Use blocking generator instead of Flux.create
This commit is contained in:
parent
e12e240487
commit
2b21e6a864
@ -6,6 +6,7 @@ import static it.cavallium.dbengine.lucene.searcher.CurrentPageInfo.TIE_BREAKER;
|
|||||||
import it.cavallium.dbengine.database.LLKeyScore;
|
import it.cavallium.dbengine.database.LLKeyScore;
|
||||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
@ -82,7 +83,7 @@ class ScoredSimpleLuceneShardSearcher implements LuceneShardSearcher {
|
|||||||
TopDocs result;
|
TopDocs result;
|
||||||
Mono<Void> release;
|
Mono<Void> release;
|
||||||
synchronized (lock) {
|
synchronized (lock) {
|
||||||
|
//noinspection BlockingMethodInNonBlockingContext
|
||||||
result = firstPageSharedManager.reduce(collectors);
|
result = firstPageSharedManager.reduce(collectors);
|
||||||
release = Mono.when(indexSearcherReleasersArray);
|
release = Mono.when(indexSearcherReleasersArray);
|
||||||
}
|
}
|
||||||
@ -93,30 +94,16 @@ class ScoredSimpleLuceneShardSearcher implements LuceneShardSearcher {
|
|||||||
Flux<LLKeyScore> firstPageHits = LuceneUtils
|
Flux<LLKeyScore> firstPageHits = LuceneUtils
|
||||||
.convertHits(result.scoreDocs, indexSearchers, keyFieldName, collectorScheduler, true);
|
.convertHits(result.scoreDocs, indexSearchers, keyFieldName, collectorScheduler, true);
|
||||||
|
|
||||||
Flux<LLKeyScore> nextHits = Flux.defer(() -> {
|
Flux<LLKeyScore> nextHits;
|
||||||
if (paginationInfo.forceSinglePage()
|
nextHits = Flux
|
||||||
|| paginationInfo.totalLimit() - paginationInfo.firstPageLimit() <= 0) {
|
.<TopDocs, CurrentPageInfo>generate(
|
||||||
return Flux.empty();
|
() -> new CurrentPageInfo(LuceneUtils.getLastFieldDoc(result.scoreDocs),
|
||||||
}
|
paginationInfo.totalLimit() - paginationInfo.firstPageLimit(), 1),
|
||||||
return Flux
|
(s, emitter) -> {
|
||||||
.<TopDocs>create(emitter -> {
|
|
||||||
if (Schedulers.isInNonBlockingThread()) {
|
if (Schedulers.isInNonBlockingThread()) {
|
||||||
emitter.error(new UnsupportedOperationException("Called collect in a nonblocking thread"));
|
throw new UnsupportedOperationException("Called collect in a nonblocking thread");
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
Empty<Void> cancelEvent = Sinks.empty();
|
|
||||||
AtomicReference<CurrentPageInfo> currentPageInfoAtomicReference = new AtomicReference<>(new CurrentPageInfo(LuceneUtils.getLastFieldDoc(result.scoreDocs),
|
|
||||||
paginationInfo.totalLimit() - paginationInfo.firstPageLimit(), 1));
|
|
||||||
emitter.onRequest(requests -> {
|
|
||||||
if (Schedulers.isInNonBlockingThread()) {
|
|
||||||
emitter.error(new UnsupportedOperationException("Called collect"
|
|
||||||
+ ", onRequest in a nonblocking thread"));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
synchronized (currentPageInfoAtomicReference) {
|
|
||||||
var s = currentPageInfoAtomicReference.get();
|
|
||||||
while (requests > 0 && !emitter.isCancelled()) {
|
|
||||||
requests--;
|
|
||||||
if (s.last() != null && s.remainingLimit() > 0) {
|
if (s.last() != null && s.remainingLimit() > 0) {
|
||||||
Sort luceneSort = queryParams.sort();
|
Sort luceneSort = queryParams.sort();
|
||||||
if (luceneSort == null) {
|
if (luceneSort == null) {
|
||||||
@ -126,57 +113,46 @@ class ScoredSimpleLuceneShardSearcher implements LuceneShardSearcher {
|
|||||||
= new ScoringShardsCollectorManager(luceneSort, s.currentPageLimit(),
|
= new ScoringShardsCollectorManager(luceneSort, s.currentPageLimit(),
|
||||||
(FieldDoc) s.last(), LuceneUtils.totalHitsThreshold(), 0, s.currentPageLimit());
|
(FieldDoc) s.last(), LuceneUtils.totalHitsThreshold(), 0, s.currentPageLimit());
|
||||||
|
|
||||||
TopDocs pageTopDocs = Flux
|
|
||||||
.fromIterable(indexSearchersArray)
|
|
||||||
.index()
|
|
||||||
.<TopFieldCollector>handle((tuple, sink) -> {
|
|
||||||
try {
|
try {
|
||||||
IndexSearcher indexSearcher = tuple.getT2();
|
var collectors = new ObjectArrayList<TopFieldCollector>(indexSearchersArray.size());
|
||||||
|
for (IndexSearcher indexSearcher : indexSearchersArray) {
|
||||||
|
//noinspection BlockingMethodInNonBlockingContext
|
||||||
TopFieldCollector collector = sharedManager.newCollector();
|
TopFieldCollector collector = sharedManager.newCollector();
|
||||||
|
//noinspection BlockingMethodInNonBlockingContext
|
||||||
indexSearcher.search(luceneQuery, collector);
|
indexSearcher.search(luceneQuery, collector);
|
||||||
sink.next(collector);
|
|
||||||
} catch (Exception ex) {
|
collectors.add(collector);
|
||||||
sink.error(ex);
|
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.collect(Collectors.toCollection(ObjectArrayList::new))
|
//noinspection BlockingMethodInNonBlockingContext
|
||||||
.<TopDocs>handle((collectors, sink) -> {
|
var pageTopDocs = sharedManager.reduce(collectors);
|
||||||
try {
|
|
||||||
sink.next(sharedManager.reduce(collectors));
|
|
||||||
} catch (Exception ex) {
|
|
||||||
sink.error(ex);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.single()
|
|
||||||
.takeUntilOther(cancelEvent.asMono())
|
|
||||||
.subscribeOn(Schedulers.immediate())
|
|
||||||
.block();
|
|
||||||
if (!emitter.isCancelled()) {
|
|
||||||
Objects.requireNonNull(pageTopDocs);
|
|
||||||
var pageLastDoc = LuceneUtils.getLastFieldDoc(pageTopDocs.scoreDocs);
|
var pageLastDoc = LuceneUtils.getLastFieldDoc(pageTopDocs.scoreDocs);
|
||||||
emitter.next(pageTopDocs);
|
emitter.next(pageTopDocs);
|
||||||
s = new CurrentPageInfo(pageLastDoc, s.remainingLimit() - s.currentPageLimit(), s.pageIndex() + 1);
|
|
||||||
} else {
|
s = new CurrentPageInfo(pageLastDoc, s.remainingLimit() - s.currentPageLimit(),
|
||||||
|
s.pageIndex() + 1);
|
||||||
|
} catch (IOException ex) {
|
||||||
|
emitter.error(ex);
|
||||||
s = EMPTY_STATUS;
|
s = EMPTY_STATUS;
|
||||||
requests = 0;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
emitter.complete();
|
emitter.complete();
|
||||||
s = EMPTY_STATUS;
|
s = EMPTY_STATUS;
|
||||||
requests = 0;
|
|
||||||
}
|
}
|
||||||
|
return s;
|
||||||
|
})
|
||||||
|
.transform(flux -> {
|
||||||
|
if (paginationInfo.forceSinglePage()
|
||||||
|
|| paginationInfo.totalLimit() - paginationInfo.firstPageLimit() <= 0) {
|
||||||
|
return Flux.empty();
|
||||||
|
} else {
|
||||||
|
return flux;
|
||||||
}
|
}
|
||||||
currentPageInfoAtomicReference.set(s);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
emitter.onCancel(cancelEvent::tryEmitEmpty);
|
|
||||||
})
|
})
|
||||||
.subscribeOn(collectorScheduler)
|
.subscribeOn(collectorScheduler)
|
||||||
.flatMapSequential(topFieldDoc -> LuceneUtils
|
.flatMapSequential(topFieldDoc -> LuceneUtils
|
||||||
.convertHits(topFieldDoc.scoreDocs, indexSearchers, keyFieldName, collectorScheduler, true)
|
.convertHits(topFieldDoc.scoreDocs, indexSearchers, keyFieldName, collectorScheduler, true)
|
||||||
);
|
);
|
||||||
});
|
|
||||||
|
|
||||||
return new LuceneSearchResult(LuceneUtils.convertTotalHitsCount(result.totalHits),
|
return new LuceneSearchResult(LuceneUtils.convertTotalHitsCount(result.totalHits),
|
||||||
firstPageHits
|
firstPageHits
|
||||||
|
Loading…
Reference in New Issue
Block a user