Performance bugfix
This commit is contained in:
parent
047a471bf7
commit
09c7e4f730
@ -6,9 +6,11 @@ import com.google.common.cache.LoadingCache;
|
|||||||
import it.cavallium.dbengine.database.LLSnapshot;
|
import it.cavallium.dbengine.database.LLSnapshot;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.Phaser;
|
import java.util.concurrent.Phaser;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.locks.LockSupport;
|
import java.util.concurrent.locks.LockSupport;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
@ -44,6 +46,7 @@ public class CachedIndexSearcherManager {
|
|||||||
|
|
||||||
private final Empty<Void> closeRequested = Sinks.empty();
|
private final Empty<Void> closeRequested = Sinks.empty();
|
||||||
private final Empty<Void> refresherClosed = Sinks.empty();
|
private final Empty<Void> refresherClosed = Sinks.empty();
|
||||||
|
private final Mono<Void> closeMono;
|
||||||
|
|
||||||
public CachedIndexSearcherManager(IndexWriter indexWriter,
|
public CachedIndexSearcherManager(IndexWriter indexWriter,
|
||||||
SnapshotsManager snapshotsManager,
|
SnapshotsManager snapshotsManager,
|
||||||
@ -64,7 +67,7 @@ public class CachedIndexSearcherManager {
|
|||||||
Mono
|
Mono
|
||||||
.fromRunnable(() -> {
|
.fromRunnable(() -> {
|
||||||
try {
|
try {
|
||||||
maybeRefreshBlocking();
|
maybeRefresh();
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
logger.error("Failed to refresh the searcher manager", ex);
|
logger.error("Failed to refresh the searcher manager", ex);
|
||||||
}
|
}
|
||||||
@ -86,10 +89,49 @@ public class CachedIndexSearcherManager {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
this.cachedMainSearcher = this.generateCachedSearcher(null);
|
this.cachedMainSearcher = this.generateCachedSearcher(null);
|
||||||
|
|
||||||
|
this.closeMono = Mono
|
||||||
|
.fromRunnable(() -> {
|
||||||
|
logger.info("Closing IndexSearcherManager...");
|
||||||
|
this.closeRequested.tryEmitEmpty();
|
||||||
|
})
|
||||||
|
.then(refresherClosed.asMono())
|
||||||
|
.then(Mono.<Void>fromRunnable(() -> {
|
||||||
|
logger.info("Closed IndexSearcherManager");
|
||||||
|
logger.info("Closing refreshes...");
|
||||||
|
if (!activeRefreshes.isTerminated()) {
|
||||||
|
try {
|
||||||
|
activeRefreshes.awaitAdvanceInterruptibly(activeRefreshes.arrive(), 15, TimeUnit.SECONDS);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
if (ex instanceof TimeoutException) {
|
||||||
|
logger.error("Failed to terminate active refreshes: timeout");
|
||||||
|
} else {
|
||||||
|
logger.error("Failed to terminate active refreshes", ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logger.info("Closed refreshes...");
|
||||||
|
logger.info("Closing active searchers...");
|
||||||
|
if (!activeSearchers.isTerminated()) {
|
||||||
|
try {
|
||||||
|
activeSearchers.awaitAdvanceInterruptibly(activeSearchers.arrive(), 15, TimeUnit.SECONDS);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
if (ex instanceof TimeoutException) {
|
||||||
|
logger.error("Failed to terminate active searchers: timeout");
|
||||||
|
} else {
|
||||||
|
logger.error("Failed to terminate active searchers", ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logger.info("Closed active searchers");
|
||||||
|
cachedSnapshotSearchers.invalidateAll();
|
||||||
|
cachedSnapshotSearchers.cleanUp();
|
||||||
|
})).cache();
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<CachedIndexSearcher> generateCachedSearcher(@Nullable LLSnapshot snapshot) {
|
private Mono<CachedIndexSearcher> generateCachedSearcher(@Nullable LLSnapshot snapshot) {
|
||||||
return Mono.fromCallable(() -> {
|
return Mono.fromCallable(() -> {
|
||||||
|
activeSearchers.register();
|
||||||
IndexSearcher indexSearcher;
|
IndexSearcher indexSearcher;
|
||||||
SearcherManager associatedSearcherManager;
|
SearcherManager associatedSearcherManager;
|
||||||
if (snapshot == null) {
|
if (snapshot == null) {
|
||||||
@ -100,7 +142,18 @@ public class CachedIndexSearcherManager {
|
|||||||
indexSearcher = snapshotsManager.resolveSnapshot(snapshot).getIndexSearcher();
|
indexSearcher = snapshotsManager.resolveSnapshot(snapshot).getIndexSearcher();
|
||||||
associatedSearcherManager = null;
|
associatedSearcherManager = null;
|
||||||
}
|
}
|
||||||
return new CachedIndexSearcher(indexSearcher, associatedSearcherManager, activeSearchers::arriveAndDeregister);
|
AtomicBoolean alreadyDeregistered = new AtomicBoolean(false);
|
||||||
|
return new CachedIndexSearcher(indexSearcher, associatedSearcherManager,
|
||||||
|
() -> {
|
||||||
|
// This shouldn't happen more than once,
|
||||||
|
// but I put this AtomicBoolean to be sure that this will NEVER happen more than once.
|
||||||
|
if (alreadyDeregistered.compareAndSet(false, true)) {
|
||||||
|
activeSearchers.arriveAndDeregister();
|
||||||
|
} else {
|
||||||
|
logger.error("Disposed CachedIndexSearcher twice! This is an implementation bug!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
})
|
})
|
||||||
.cacheInvalidateWhen(indexSearcher -> Mono
|
.cacheInvalidateWhen(indexSearcher -> Mono
|
||||||
.firstWithSignal(
|
.firstWithSignal(
|
||||||
@ -159,10 +212,7 @@ public class CachedIndexSearcherManager {
|
|||||||
return this
|
return this
|
||||||
.retrieveCachedIndexSearcher(snapshot)
|
.retrieveCachedIndexSearcher(snapshot)
|
||||||
// Increment reference count
|
// Increment reference count
|
||||||
.doOnNext(indexSearcher -> {
|
.doOnNext(CachedIndexSearcher::incUsage);
|
||||||
activeSearchers.register();
|
|
||||||
indexSearcher.incUsage();
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<CachedIndexSearcher> retrieveCachedIndexSearcher(LLSnapshot snapshot) {
|
private Mono<CachedIndexSearcher> retrieveCachedIndexSearcher(LLSnapshot snapshot) {
|
||||||
@ -185,42 +235,6 @@ public class CachedIndexSearcherManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> close() {
|
public Mono<Void> close() {
|
||||||
return Mono
|
return closeMono;
|
||||||
.fromRunnable(() -> {
|
|
||||||
logger.info("Closing IndexSearcherManager...");
|
|
||||||
this.closeRequested.tryEmitEmpty();
|
|
||||||
})
|
|
||||||
.then(refresherClosed.asMono())
|
|
||||||
.then(Mono.fromRunnable(() -> {
|
|
||||||
logger.info("Closed IndexSearcherManager");
|
|
||||||
logger.info("Closing refreshes...");
|
|
||||||
if (!activeRefreshes.isTerminated()) {
|
|
||||||
try {
|
|
||||||
activeRefreshes.awaitAdvanceInterruptibly(activeRefreshes.arrive(), 15, TimeUnit.SECONDS);
|
|
||||||
} catch (Exception ex) {
|
|
||||||
if (ex instanceof TimeoutException) {
|
|
||||||
logger.error("Failed to terminate active refreshes: timeout");
|
|
||||||
} else {
|
|
||||||
logger.error("Failed to terminate active refreshes", ex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.info("Closed refreshes...");
|
|
||||||
logger.info("Closing active searchers...");
|
|
||||||
if (!activeSearchers.isTerminated()) {
|
|
||||||
try {
|
|
||||||
activeSearchers.awaitAdvanceInterruptibly(activeSearchers.arrive(), 15, TimeUnit.SECONDS);
|
|
||||||
} catch (Exception ex) {
|
|
||||||
if (ex instanceof TimeoutException) {
|
|
||||||
logger.error("Failed to terminate active searchers: timeout");
|
|
||||||
} else {
|
|
||||||
logger.error("Failed to terminate active searchers", ex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.info("Closed active searchers");
|
|
||||||
cachedSnapshotSearchers.invalidateAll();
|
|
||||||
cachedSnapshotSearchers.cleanUp();
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1111,7 +1111,6 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
public Flux<Send<LLEntry>> putMulti(Flux<Send<LLEntry>> entries, boolean getOldValues) {
|
public Flux<Send<LLEntry>> putMulti(Flux<Send<LLEntry>> entries, boolean getOldValues) {
|
||||||
return entries
|
return entries
|
||||||
.buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP))
|
.buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP))
|
||||||
.publishOn(dbScheduler)
|
|
||||||
.flatMapSequential(ew -> Mono
|
.flatMapSequential(ew -> Mono
|
||||||
.<List<Send<LLEntry>>>fromCallable(() -> {
|
.<List<Send<LLEntry>>>fromCallable(() -> {
|
||||||
var entriesWindow = new ArrayList<LLEntry>(ew.size());
|
var entriesWindow = new ArrayList<LLEntry>(ew.size());
|
||||||
@ -1206,7 +1205,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
llEntry.close();
|
llEntry.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}), 2) // Max concurrency is 2 to read data while preparing the next segment
|
}).subscribeOn(dbScheduler), 2) // Max concurrency is 2 to read data while preparing the next segment
|
||||||
.flatMapIterable(oldValuesList -> oldValuesList)
|
.flatMapIterable(oldValuesList -> oldValuesList)
|
||||||
.transform(LLUtils::handleDiscard);
|
.transform(LLUtils::handleDiscard);
|
||||||
}
|
}
|
||||||
|
@ -77,13 +77,7 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
true
|
true
|
||||||
);
|
);
|
||||||
// Scheduler used to get callback values of LuceneStreamSearcher without creating deadlocks
|
// Scheduler used to get callback values of LuceneStreamSearcher without creating deadlocks
|
||||||
protected static final Scheduler luceneSearcherScheduler = Schedulers.newBoundedElastic(
|
protected final Scheduler luceneSearcherScheduler = LuceneUtils.newLuceneSearcherScheduler(false);
|
||||||
4,
|
|
||||||
Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
|
||||||
"lucene-searcher",
|
|
||||||
60,
|
|
||||||
true
|
|
||||||
);
|
|
||||||
// Scheduler used to get callback values of LuceneStreamSearcher without creating deadlocks
|
// Scheduler used to get callback values of LuceneStreamSearcher without creating deadlocks
|
||||||
private static final Scheduler luceneWriterScheduler = Schedulers.boundedElastic();
|
private static final Scheduler luceneWriterScheduler = Schedulers.boundedElastic();
|
||||||
|
|
||||||
@ -257,9 +251,8 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
public Mono<Void> addDocuments(Flux<Entry<LLTerm, LLDocument>> documents) {
|
public Mono<Void> addDocuments(Flux<Entry<LLTerm, LLDocument>> documents) {
|
||||||
return documents
|
return documents
|
||||||
.collectList()
|
.collectList()
|
||||||
.publishOn(luceneWriterScheduler)
|
|
||||||
.flatMap(documentsList -> Mono
|
.flatMap(documentsList -> Mono
|
||||||
.fromCallable(() -> {
|
.<Void>fromCallable(() -> {
|
||||||
activeTasks.register();
|
activeTasks.register();
|
||||||
try {
|
try {
|
||||||
indexWriter.addDocuments(LLUtils.toDocumentsFromEntries(documentsList));
|
indexWriter.addDocuments(LLUtils.toDocumentsFromEntries(documentsList));
|
||||||
@ -267,7 +260,7 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
} finally {
|
} finally {
|
||||||
activeTasks.arriveAndDeregister();
|
activeTasks.arriveAndDeregister();
|
||||||
}
|
}
|
||||||
})
|
}).subscribeOn(luceneWriterScheduler)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,7 +503,6 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
try {
|
try {
|
||||||
if (activeTasks.isTerminated()) return null;
|
if (activeTasks.isTerminated()) return null;
|
||||||
if (force) {
|
if (force) {
|
||||||
if (activeTasks.isTerminated()) return null;
|
|
||||||
//noinspection BlockingMethodInNonBlockingContext
|
//noinspection BlockingMethodInNonBlockingContext
|
||||||
searcherManager.maybeRefreshBlocking();
|
searcherManager.maybeRefreshBlocking();
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
package it.cavallium.dbengine.database.disk;
|
package it.cavallium.dbengine.database.disk;
|
||||||
|
|
||||||
import static it.cavallium.dbengine.database.disk.LLLocalLuceneIndex.luceneSearcherScheduler;
|
|
||||||
|
|
||||||
import com.google.common.cache.Cache;
|
import com.google.common.cache.Cache;
|
||||||
import com.google.common.cache.CacheBuilder;
|
import com.google.common.cache.CacheBuilder;
|
||||||
import com.google.common.cache.CacheLoader.InvalidCacheLoadException;
|
import com.google.common.cache.CacheLoader.InvalidCacheLoadException;
|
||||||
@ -53,12 +51,16 @@ import org.warp.commonutils.functional.IOBiConsumer;
|
|||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.GroupedFlux;
|
import reactor.core.publisher.GroupedFlux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
import reactor.core.scheduler.Scheduler;
|
||||||
import reactor.core.scheduler.Schedulers;
|
import reactor.core.scheduler.Schedulers;
|
||||||
import reactor.util.function.Tuple2;
|
import reactor.util.function.Tuple2;
|
||||||
import reactor.util.function.Tuples;
|
import reactor.util.function.Tuples;
|
||||||
|
|
||||||
public class LLLocalMultiLuceneIndex implements LLLuceneIndex {
|
public class LLLocalMultiLuceneIndex implements LLLuceneIndex {
|
||||||
|
|
||||||
|
// Scheduler used to get callback values of LuceneStreamSearcher without creating deadlocks
|
||||||
|
protected final Scheduler luceneSearcherScheduler = LuceneUtils.newLuceneSearcherScheduler(true);
|
||||||
|
|
||||||
private final ConcurrentHashMap<Long, LLSnapshot[]> registeredSnapshots = new ConcurrentHashMap<>();
|
private final ConcurrentHashMap<Long, LLSnapshot[]> registeredSnapshots = new ConcurrentHashMap<>();
|
||||||
private final AtomicLong nextSnapshotNumber = new AtomicLong(1);
|
private final AtomicLong nextSnapshotNumber = new AtomicLong(1);
|
||||||
private final LLLocalLuceneIndex[] luceneIndices;
|
private final LLLocalLuceneIndex[] luceneIndices;
|
||||||
|
@ -374,8 +374,10 @@ public class LuceneUtils {
|
|||||||
.transform(hitsFlux -> {
|
.transform(hitsFlux -> {
|
||||||
if (preserveOrder) {
|
if (preserveOrder) {
|
||||||
return hitsFlux
|
return hitsFlux
|
||||||
.publishOn(scheduler)
|
.flatMapSequential(hit -> Mono
|
||||||
.mapNotNull(hit -> mapHitBlocking(hit, indexSearchers, keyFieldName));
|
.fromCallable(() -> mapHitBlocking(hit, indexSearchers, keyFieldName))
|
||||||
|
.subscribeOn(scheduler)
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
return hitsFlux
|
return hitsFlux
|
||||||
.parallel()
|
.parallel()
|
||||||
@ -508,4 +510,14 @@ public class LuceneUtils {
|
|||||||
return totalHitsCount.value() + "+";
|
return totalHitsCount.value() + "+";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Scheduler newLuceneSearcherScheduler(boolean multi) {
|
||||||
|
return Schedulers.newBoundedElastic(
|
||||||
|
4,
|
||||||
|
Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
||||||
|
multi ? "lucene-searcher-multi" : "lucene-searcher-shard",
|
||||||
|
60,
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user