Reduce the number of threads

This commit is contained in:
Andrea Cavalli 2021-12-15 16:04:33 +01:00
parent c59655e844
commit 8ad622db0a
4 changed files with 48 additions and 69 deletions

View File

@ -34,8 +34,7 @@ import reactor.core.scheduler.Schedulers;
public class CachedIndexSearcherManager implements IndexSearcherManager { public class CachedIndexSearcherManager implements IndexSearcherManager {
private static final Logger logger = LoggerFactory.getLogger(CachedIndexSearcherManager.class); private static final Logger logger = LoggerFactory.getLogger(CachedIndexSearcherManager.class);
private final Executor SEARCH_EXECUTOR = Executors private final Executor SEARCH_EXECUTOR = command -> Schedulers.boundedElastic().schedule(command);
.newCachedThreadPool(new ShortNamedThreadFactory("lucene-search"));
private final SearcherFactory SEARCHER_FACTORY = new ExecutorSearcherFactory(SEARCH_EXECUTOR); private final SearcherFactory SEARCHER_FACTORY = new ExecutorSearcherFactory(SEARCH_EXECUTOR);
private final SnapshotsManager snapshotsManager; private final SnapshotsManager snapshotsManager;
@ -128,7 +127,7 @@ public class CachedIndexSearcherManager implements IndexSearcherManager {
logger.info("Closed active searchers"); logger.info("Closed active searchers");
cachedSnapshotSearchers.invalidateAll(); cachedSnapshotSearchers.invalidateAll();
cachedSnapshotSearchers.cleanUp(); cachedSnapshotSearchers.cleanUp();
})).cache(); }).subscribeOn(Schedulers.boundedElastic())).cache();
} }
private Mono<Send<LLIndexSearcher>> generateCachedSearcher(@Nullable LLSnapshot snapshot) { private Mono<Send<LLIndexSearcher>> generateCachedSearcher(@Nullable LLSnapshot snapshot) {

View File

@ -277,35 +277,29 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
} }
private <V> Mono<V> runSafe(Callable<V> callable) { private <V> Mono<V> runSafe(Callable<V> callable) {
return Mono.<V>create(sink -> { return Mono.create(sink -> Schedulers.boundedElastic().schedule(() -> {
var future = SAFE_EXECUTOR.submit(() -> { try {
try { var result = callable.call();
var result = callable.call(); if (result != null) {
if (result != null) { sink.success(result);
sink.success(result); } else {
} else { sink.success();
sink.success();
}
} catch (Throwable e) {
sink.error(e);
} }
}); } catch (Throwable e) {
sink.onDispose(() -> future.cancel(false)); sink.error(e);
}); }
}));
} }
private <V> Mono<V> runSafe(IORunnable runnable) { private <V> Mono<V> runSafe(IORunnable runnable) {
return Mono.create(sink -> { return Mono.create(sink -> Schedulers.boundedElastic().schedule(() -> {
var future = SAFE_EXECUTOR.submit(() -> { try {
try { runnable.run();
runnable.run(); sink.success();
sink.success(); } catch (Throwable e) {
} catch (Throwable e) { sink.error(e);
sink.error(e); }
} }));
});
sink.onDispose(() -> future.cancel(false));
});
} }
@Override @Override

View File

@ -15,15 +15,13 @@ import reactor.core.publisher.Sinks.Many;
public class ReactiveCollectorMultiManager implements CollectorMultiManager<Void, Void> { public class ReactiveCollectorMultiManager implements CollectorMultiManager<Void, Void> {
private final FluxSink<ScoreDoc> scoreDocsSink;
private final LongSemaphore requested;
public ReactiveCollectorMultiManager(FluxSink<ScoreDoc> scoreDocsSink, LongSemaphore requested) { public ReactiveCollectorMultiManager() {
this.scoreDocsSink = scoreDocsSink;
this.requested = requested;
} }
public CollectorManager<Collector, Void> get(int shardIndex) { public CollectorManager<Collector, Void> get(LongSemaphore requested,
FluxSink<ScoreDoc> scoreDocsSink,
int shardIndex) {
return new CollectorManager<>() { return new CollectorManager<>() {
@Override @Override

View File

@ -1,5 +1,8 @@
package it.cavallium.dbengine.lucene.searcher; package it.cavallium.dbengine.lucene.searcher;
import static java.lang.Math.toIntExact;
import static java.util.Objects.requireNonNull;
import io.net5.buffer.api.Send; import io.net5.buffer.api.Send;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import it.cavallium.dbengine.database.LLKeyScore; import it.cavallium.dbengine.database.LLKeyScore;
@ -11,8 +14,10 @@ import it.cavallium.dbengine.lucene.searcher.LLSearchTransformer.TransformerInpu
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException; import java.util.concurrent.CompletionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
@ -28,12 +33,10 @@ import reactor.core.publisher.FluxSink.OverflowStrategy;
import reactor.core.publisher.Mono; import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler; import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers; import reactor.core.scheduler.Schedulers;
import reactor.util.function.Tuples;
public class UnsortedUnscoredStreamingMultiSearcher implements MultiSearcher { public class UnsortedUnscoredStreamingMultiSearcher implements MultiSearcher {
private static final ExecutorService SCHEDULER = Executors.newCachedThreadPool(new ShortNamedThreadFactory(
"UnscoredStreamingSearcher"));
@Override @Override
public Mono<LuceneSearchResult> collectMulti(Mono<Send<LLIndexSearchers>> indexSearchersMono, public Mono<LuceneSearchResult> collectMulti(Mono<Send<LLIndexSearchers>> indexSearchersMono,
LocalQueryParams queryParams, LocalQueryParams queryParams,
@ -61,47 +64,32 @@ public class UnsortedUnscoredStreamingMultiSearcher implements MultiSearcher {
} }
var shards = indexSearchers.shards(); var shards = indexSearchers.shards();
Flux<ScoreDoc> scoreDocsFlux = Flux.<ScoreDoc>create(scoreDocsSink -> { var cmm = new ReactiveCollectorMultiManager();
var requested = new LongSemaphore(0);
var cmm = new ReactiveCollectorMultiManager(scoreDocsSink, requested);
scoreDocsSink.onRequest(requested::release); Flux<ScoreDoc> scoreDocsFlux = Flux.fromIterable(shards)
.index()
.flatMap(tuple -> Flux.<ScoreDoc>create(scoreDocsSink -> {
LLUtils.ensureBlocking();
var index = toIntExact(requireNonNull(tuple.getT1()));
var shard = tuple.getT2();
var requested = new LongSemaphore(0);
var collectorManager = cmm.get(requested, scoreDocsSink, index);
int mutableShardIndex = 0; assert queryParams.computePreciseHitsCount() == cmm.scoreMode().isExhaustive();
CompletableFuture<?>[] futures = new CompletableFuture<?>[shards.size()];
for (IndexSearcher shard : shards) { scoreDocsSink.onRequest(requested::release);
int shardIndex = mutableShardIndex++;
assert queryParams.computePreciseHitsCount() == cmm.scoreMode().isExhaustive();
var future = CompletableFuture.runAsync(() -> {
try { try {
LLUtils.ensureBlocking();
var collectorManager = cmm.get(shardIndex);
shard.search(localQueryParams.query(), collectorManager.newCollector()); shard.search(localQueryParams.query(), collectorManager.newCollector());
scoreDocsSink.complete();
} catch (IOException e) { } catch (IOException e) {
throw new CompletionException(e); scoreDocsSink.error(e);
} }
}, SCHEDULER); }, OverflowStrategy.BUFFER).subscribeOn(Schedulers.boundedElastic()));
futures[shardIndex] = future;
}
var combinedFuture = CompletableFuture.allOf(futures).whenCompleteAsync((result, ex) -> {
if (ex != null) {
scoreDocsSink.error(ex);
} else {
scoreDocsSink.complete();
}
});
scoreDocsSink.onDispose(() -> {
for (CompletableFuture<?> future : futures) {
future.cancel(true);
}
combinedFuture.cancel(true);
});
}, OverflowStrategy.BUFFER).subscribeOn(Schedulers.boundedElastic()).publishOn(Schedulers.boundedElastic());
Flux<LLKeyScore> resultsFlux = LuceneUtils.convertHits(scoreDocsFlux, shards, keyFieldName, false); Flux<LLKeyScore> resultsFlux = LuceneUtils
.convertHits(scoreDocsFlux.publishOn(Schedulers.boundedElastic()), shards, keyFieldName, false);
var totalHitsCount = new TotalHitsCount(0, false); var totalHitsCount = new TotalHitsCount(0, false);
Flux<LLKeyScore> mergedFluxes = resultsFlux Flux<LLKeyScore> mergedFluxes = resultsFlux