Fix all warnings
This commit is contained in:
parent
65c45756d7
commit
eb0ac6fbb9
@ -31,13 +31,13 @@ public class CountedStream<T> {
|
|||||||
public static <T> CountedStream<T> merge(Collection<CountedStream<T>> stream) {
|
public static <T> CountedStream<T> merge(Collection<CountedStream<T>> stream) {
|
||||||
return stream
|
return stream
|
||||||
.stream()
|
.stream()
|
||||||
.reduce((a, b) -> new CountedStream<T>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
|
.reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
|
||||||
.orElseGet(() -> new CountedStream<>(Flux.empty(), 0));
|
.orElseGet(() -> new CountedStream<>(Flux.empty(), 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static <T> Mono<CountedStream<T>> merge(Flux<CountedStream<T>> stream) {
|
public static <T> Mono<CountedStream<T>> merge(Flux<CountedStream<T>> stream) {
|
||||||
return stream
|
return stream
|
||||||
.reduce((a, b) -> new CountedStream<T>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
|
.reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
|
||||||
.switchIfEmpty(Mono.fromSupplier(() -> new CountedStream<>(Flux.empty(), 0)));
|
.switchIfEmpty(Mono.fromSupplier(() -> new CountedStream<>(Flux.empty(), 0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,292 +2,47 @@ package it.cavallium.dbengine.client;
|
|||||||
|
|
||||||
import it.cavallium.dbengine.client.query.ClientQueryParams;
|
import it.cavallium.dbengine.client.query.ClientQueryParams;
|
||||||
import it.cavallium.dbengine.client.query.current.data.Query;
|
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||||
import it.cavallium.dbengine.client.query.current.data.QueryParams;
|
|
||||||
import it.cavallium.dbengine.database.LLLuceneIndex;
|
|
||||||
import it.cavallium.dbengine.database.LLScoreMode;
|
|
||||||
import it.cavallium.dbengine.database.LLSearchResult;
|
|
||||||
import it.cavallium.dbengine.database.LLSnapshot;
|
|
||||||
import it.cavallium.dbengine.database.LLSnapshottable;
|
import it.cavallium.dbengine.database.LLSnapshottable;
|
||||||
import it.cavallium.dbengine.database.LLTerm;
|
|
||||||
import it.cavallium.dbengine.database.collections.Joiner.ValueGetter;
|
import it.cavallium.dbengine.database.collections.Joiner.ValueGetter;
|
||||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.Set;
|
|
||||||
import org.jetbrains.annotations.Nullable;
|
import org.jetbrains.annotations.Nullable;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
import reactor.util.function.Tuple2;
|
|
||||||
|
|
||||||
public class LuceneIndex<T, U> implements LLSnapshottable {
|
@SuppressWarnings("unused")
|
||||||
|
public interface LuceneIndex<T, U> extends LLSnapshottable {
|
||||||
|
|
||||||
private final LLLuceneIndex luceneIndex;
|
Mono<Void> addDocument(T key, U value);
|
||||||
private final Indicizer<T,U> indicizer;
|
|
||||||
|
|
||||||
public LuceneIndex(LLLuceneIndex luceneIndex, Indicizer<T, U> indicizer) {
|
Mono<Void> addDocuments(Flux<Entry<T, U>> entries);
|
||||||
this.luceneIndex = luceneIndex;
|
|
||||||
this.indicizer = indicizer;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
Mono<Void> deleteDocument(T key);
|
||||||
|
|
||||||
private LLSnapshot resolveSnapshot(CompositeSnapshot snapshot) {
|
Mono<Void> updateDocument(T key, U value);
|
||||||
if (snapshot == null) {
|
|
||||||
return null;
|
|
||||||
} else {
|
|
||||||
return snapshot.getSnapshot(luceneIndex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> addDocument(T key, U value) {
|
Mono<Void> updateDocuments(Flux<Entry<T, U>> entries);
|
||||||
return indicizer
|
|
||||||
.toDocument(key, value)
|
|
||||||
.flatMap(doc -> luceneIndex.addDocument(indicizer.toIndex(key), doc));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> addDocuments(Flux<Entry<T, U>> entries) {
|
Mono<Void> deleteAll();
|
||||||
return luceneIndex
|
|
||||||
.addDocuments(entries
|
|
||||||
.flatMap(entry -> indicizer
|
|
||||||
.toDocument(entry.getKey(), entry.getValue())
|
|
||||||
.map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc)))
|
|
||||||
.groupBy(Entry::getKey, Entry::getValue)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> deleteDocument(T key) {
|
Mono<SearchResultKeys<T>> moreLikeThis(ClientQueryParams<SearchResultKey<T>> queryParams, T key, U mltDocumentValue);
|
||||||
LLTerm id = indicizer.toIndex(key);
|
|
||||||
return luceneIndex.deleteDocument(id);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateDocument(T key, U value) {
|
Mono<SearchResult<T, U>> moreLikeThisWithValues(ClientQueryParams<SearchResultItem<T, U>> queryParams,
|
||||||
return indicizer
|
|
||||||
.toDocument(key, value)
|
|
||||||
.flatMap(doc -> luceneIndex.updateDocument(indicizer.toIndex(key), doc));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateDocuments(Flux<Entry<T, U>> entries) {
|
|
||||||
return luceneIndex
|
|
||||||
.updateDocuments(entries
|
|
||||||
.flatMap(entry -> indicizer
|
|
||||||
.toDocument(entry.getKey(), entry.getValue())
|
|
||||||
.map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc)))
|
|
||||||
.groupBy(Entry::getKey, Entry::getValue)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> deleteAll() {
|
|
||||||
return luceneIndex.deleteAll();
|
|
||||||
}
|
|
||||||
|
|
||||||
private static QueryParams fixOffset(LLLuceneIndex luceneIndex, QueryParams queryParams) {
|
|
||||||
if (luceneIndex.supportsOffset()) {
|
|
||||||
return queryParams;
|
|
||||||
} else {
|
|
||||||
return queryParams.setOffset(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static long fixTransformOffset(LLLuceneIndex luceneIndex, long offset) {
|
|
||||||
if (luceneIndex.supportsOffset()) {
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<SearchResultKeys<T>> transformLuceneResult(LLSearchResult llSearchResult,
|
|
||||||
@Nullable MultiSort<SearchResultKey<T>> sort,
|
|
||||||
LLScoreMode scoreMode,
|
|
||||||
long offset,
|
|
||||||
@Nullable Long limit) {
|
|
||||||
Flux<SearchResultKeys<T>> mappedKeys = llSearchResult
|
|
||||||
.getResults()
|
|
||||||
.map(flux -> new SearchResultKeys<>(flux.getResults().map(signal -> {
|
|
||||||
return new SearchResultKey<T>(indicizer.getKey(signal.getKey()),
|
|
||||||
signal.getScore()
|
|
||||||
);
|
|
||||||
}), flux.getTotalHitsCount()));
|
|
||||||
MultiSort<SearchResultKey<T>> finalSort;
|
|
||||||
if (scoreMode != LLScoreMode.COMPLETE_NO_SCORES && sort == null) {
|
|
||||||
finalSort = MultiSort.topScore();
|
|
||||||
} else {
|
|
||||||
finalSort = sort;
|
|
||||||
}
|
|
||||||
|
|
||||||
MultiSort<SearchResultKey<T>> mappedSort;
|
|
||||||
if (finalSort != null) {
|
|
||||||
mappedSort = new MultiSort<>(finalSort.getQuerySort(), (signal1, signal2) -> {
|
|
||||||
return finalSort.getResultSort().compare((signal1), signal2);
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
mappedSort = null;
|
|
||||||
}
|
|
||||||
return LuceneUtils.mergeSignalStreamKeys(mappedKeys, mappedSort, offset, limit);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<SearchResult<T, U>> transformLuceneResultWithValues(LLSearchResult llSearchResult,
|
|
||||||
@Nullable MultiSort<SearchResultItem<T, U>> sort,
|
|
||||||
LLScoreMode scoreMode,
|
|
||||||
long offset,
|
|
||||||
@Nullable Long limit,
|
|
||||||
ValueGetter<T, U> valueGetter) {
|
|
||||||
Flux<SearchResult<T, U>> mappedKeys = llSearchResult
|
|
||||||
.getResults()
|
|
||||||
.map(flux -> new SearchResult<>(flux.getResults().flatMapSequential(signal -> {
|
|
||||||
var key = indicizer.getKey(signal.getKey());
|
|
||||||
return valueGetter
|
|
||||||
.get(key)
|
|
||||||
.map(value -> new SearchResultItem<>(key, value, signal.getScore()));
|
|
||||||
}), flux.getTotalHitsCount()));
|
|
||||||
MultiSort<SearchResultItem<T, U>> finalSort;
|
|
||||||
if (scoreMode != LLScoreMode.COMPLETE_NO_SCORES && sort == null) {
|
|
||||||
finalSort = MultiSort.topScoreWithValues();
|
|
||||||
} else {
|
|
||||||
finalSort = sort;
|
|
||||||
}
|
|
||||||
|
|
||||||
MultiSort<SearchResultItem<T, U>> mappedSort;
|
|
||||||
if (finalSort != null) {
|
|
||||||
mappedSort = new MultiSort<>(finalSort.getQuerySort(), (signal1, signal2) -> {
|
|
||||||
return finalSort.getResultSort().compare((signal1), signal2);
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
mappedSort = null;
|
|
||||||
}
|
|
||||||
return LuceneUtils.mergeSignalStreamItems(mappedKeys, mappedSort, offset, limit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @param queryParams the limit is valid for each lucene instance.
|
|
||||||
* If you have 15 instances, the number of elements returned
|
|
||||||
* can be at most <code>limit * 15</code>
|
|
||||||
* @return the collection has one or more flux
|
|
||||||
*/
|
|
||||||
public Mono<SearchResultKeys<T>> moreLikeThis(
|
|
||||||
ClientQueryParams<SearchResultKey<T>> queryParams,
|
|
||||||
T key,
|
|
||||||
U mltDocumentValue) {
|
|
||||||
Flux<Tuple2<String, Set<String>>> mltDocumentFields
|
|
||||||
= indicizer.getMoreLikeThisDocumentFields(key, mltDocumentValue);
|
|
||||||
return luceneIndex
|
|
||||||
.moreLikeThis(resolveSnapshot(queryParams.getSnapshot()), fixOffset(luceneIndex, queryParams.toQueryParams()), indicizer.getKeyFieldName(), mltDocumentFields)
|
|
||||||
.flatMap(llSearchResult -> this.transformLuceneResult(llSearchResult,
|
|
||||||
queryParams.getSort(),
|
|
||||||
queryParams.getScoreMode(),
|
|
||||||
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
|
||||||
queryParams.getLimit()
|
|
||||||
));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @param queryParams the limit is valid for each lucene instance.
|
|
||||||
* If you have 15 instances, the number of elements returned
|
|
||||||
* can be at most <code>limit * 15</code>
|
|
||||||
* @return the collection has one or more flux
|
|
||||||
*/
|
|
||||||
public Mono<SearchResult<T, U>> moreLikeThisWithValues(
|
|
||||||
ClientQueryParams<SearchResultItem<T, U>> queryParams,
|
|
||||||
T key,
|
T key,
|
||||||
U mltDocumentValue,
|
U mltDocumentValue,
|
||||||
ValueGetter<T, U> valueGetter) {
|
ValueGetter<T, U> valueGetter);
|
||||||
Flux<Tuple2<String, Set<String>>> mltDocumentFields
|
|
||||||
= indicizer.getMoreLikeThisDocumentFields(key, mltDocumentValue);
|
|
||||||
return luceneIndex
|
|
||||||
.moreLikeThis(resolveSnapshot(queryParams.getSnapshot()),
|
|
||||||
fixOffset(luceneIndex, queryParams.toQueryParams()),
|
|
||||||
indicizer.getKeyFieldName(),
|
|
||||||
mltDocumentFields
|
|
||||||
)
|
|
||||||
.flatMap(llSearchResult -> this.transformLuceneResultWithValues(llSearchResult,
|
|
||||||
queryParams.getSort(),
|
|
||||||
queryParams.getScoreMode(),
|
|
||||||
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
|
||||||
queryParams.getLimit(),
|
|
||||||
valueGetter
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
Mono<SearchResultKeys<T>> search(ClientQueryParams<SearchResultKey<T>> queryParams);
|
||||||
*
|
|
||||||
* @param queryParams the limit is valid for each lucene instance.
|
|
||||||
* If you have 15 instances, the number of elements returned
|
|
||||||
* can be at most <code>limit * 15</code>
|
|
||||||
* @return the collection has one or more flux
|
|
||||||
*/
|
|
||||||
public Mono<SearchResultKeys<T>> search(
|
|
||||||
ClientQueryParams<SearchResultKey<T>> queryParams) {
|
|
||||||
return luceneIndex
|
|
||||||
.search(resolveSnapshot(queryParams.getSnapshot()),
|
|
||||||
fixOffset(luceneIndex, queryParams.toQueryParams()),
|
|
||||||
indicizer.getKeyFieldName()
|
|
||||||
)
|
|
||||||
.flatMap(llSearchResult -> this.transformLuceneResult(llSearchResult,
|
|
||||||
queryParams.getSort(),
|
|
||||||
queryParams.getScoreMode(),
|
|
||||||
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
|
||||||
queryParams.getLimit()
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
Mono<SearchResult<T, U>> searchWithValues(ClientQueryParams<SearchResultItem<T, U>> queryParams,
|
||||||
*
|
ValueGetter<T, U> valueGetter);
|
||||||
* @param queryParams the limit is valid for each lucene instance.
|
|
||||||
* If you have 15 instances, the number of elements returned
|
|
||||||
* can be at most <code>limit * 15</code>
|
|
||||||
* @return the collection has one or more flux
|
|
||||||
*/
|
|
||||||
public Mono<SearchResult<T, U>> searchWithValues(
|
|
||||||
ClientQueryParams<SearchResultItem<T, U>> queryParams,
|
|
||||||
ValueGetter<T, U> valueGetter) {
|
|
||||||
return luceneIndex
|
|
||||||
.search(resolveSnapshot(queryParams.getSnapshot()), fixOffset(luceneIndex, queryParams.toQueryParams()), indicizer.getKeyFieldName())
|
|
||||||
.flatMap(llSearchResult -> this.transformLuceneResultWithValues(llSearchResult,
|
|
||||||
queryParams.getSort(),
|
|
||||||
queryParams.getScoreMode(),
|
|
||||||
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
|
||||||
queryParams.getLimit(),
|
|
||||||
valueGetter
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Long> count(@Nullable CompositeSnapshot snapshot, Query query) {
|
Mono<Long> count(@Nullable CompositeSnapshot snapshot, Query query);
|
||||||
return this.search(ClientQueryParams.<SearchResultKey<T>>builder().snapshot(snapshot).query(query).limit(0).build())
|
|
||||||
.map(SearchResultKeys::getTotalHitsCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isLowMemoryMode() {
|
boolean isLowMemoryMode();
|
||||||
return luceneIndex.isLowMemoryMode();
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> close() {
|
Mono<Void> close();
|
||||||
return luceneIndex.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
Mono<Void> flush();
|
||||||
* Flush writes to disk
|
|
||||||
*/
|
|
||||||
public Mono<Void> flush() {
|
|
||||||
return luceneIndex.flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
Mono<Void> refresh();
|
||||||
* Refresh index searcher
|
|
||||||
*/
|
|
||||||
public Mono<Void> refresh() {
|
|
||||||
return luceneIndex.refresh();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Mono<LLSnapshot> takeSnapshot() {
|
|
||||||
return luceneIndex.takeSnapshot();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Mono<Void> releaseSnapshot(LLSnapshot snapshot) {
|
|
||||||
return luceneIndex.releaseSnapshot(snapshot);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
304
src/main/java/it/cavallium/dbengine/client/LuceneIndexImpl.java
Normal file
304
src/main/java/it/cavallium/dbengine/client/LuceneIndexImpl.java
Normal file
@ -0,0 +1,304 @@
|
|||||||
|
package it.cavallium.dbengine.client;
|
||||||
|
|
||||||
|
import it.cavallium.dbengine.client.query.ClientQueryParams;
|
||||||
|
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||||
|
import it.cavallium.dbengine.client.query.current.data.QueryParams;
|
||||||
|
import it.cavallium.dbengine.database.LLLuceneIndex;
|
||||||
|
import it.cavallium.dbengine.database.LLScoreMode;
|
||||||
|
import it.cavallium.dbengine.database.LLSearchResult;
|
||||||
|
import it.cavallium.dbengine.database.LLSnapshot;
|
||||||
|
import it.cavallium.dbengine.database.LLTerm;
|
||||||
|
import it.cavallium.dbengine.database.collections.Joiner.ValueGetter;
|
||||||
|
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
|
import java.util.Set;
|
||||||
|
import org.jetbrains.annotations.Nullable;
|
||||||
|
import reactor.core.publisher.Flux;
|
||||||
|
import reactor.core.publisher.Mono;
|
||||||
|
import reactor.util.function.Tuple2;
|
||||||
|
|
||||||
|
public class LuceneIndexImpl<T, U> implements LuceneIndex<T, U> {
|
||||||
|
|
||||||
|
private final LLLuceneIndex luceneIndex;
|
||||||
|
private final Indicizer<T,U> indicizer;
|
||||||
|
|
||||||
|
public LuceneIndexImpl(LLLuceneIndex luceneIndex, Indicizer<T, U> indicizer) {
|
||||||
|
this.luceneIndex = luceneIndex;
|
||||||
|
this.indicizer = indicizer;
|
||||||
|
}
|
||||||
|
|
||||||
|
private LLSnapshot resolveSnapshot(CompositeSnapshot snapshot) {
|
||||||
|
if (snapshot == null) {
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
return snapshot.getSnapshot(luceneIndex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> addDocument(T key, U value) {
|
||||||
|
return indicizer
|
||||||
|
.toDocument(key, value)
|
||||||
|
.flatMap(doc -> luceneIndex.addDocument(indicizer.toIndex(key), doc));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> addDocuments(Flux<Entry<T, U>> entries) {
|
||||||
|
return luceneIndex
|
||||||
|
.addDocuments(entries
|
||||||
|
.flatMap(entry -> indicizer
|
||||||
|
.toDocument(entry.getKey(), entry.getValue())
|
||||||
|
.map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc)))
|
||||||
|
.groupBy(Entry::getKey, Entry::getValue)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> deleteDocument(T key) {
|
||||||
|
LLTerm id = indicizer.toIndex(key);
|
||||||
|
return luceneIndex.deleteDocument(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> updateDocument(T key, U value) {
|
||||||
|
return indicizer
|
||||||
|
.toDocument(key, value)
|
||||||
|
.flatMap(doc -> luceneIndex.updateDocument(indicizer.toIndex(key), doc));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> updateDocuments(Flux<Entry<T, U>> entries) {
|
||||||
|
return luceneIndex
|
||||||
|
.updateDocuments(entries
|
||||||
|
.flatMap(entry -> indicizer
|
||||||
|
.toDocument(entry.getKey(), entry.getValue())
|
||||||
|
.map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc)))
|
||||||
|
.groupBy(Entry::getKey, Entry::getValue)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> deleteAll() {
|
||||||
|
return luceneIndex.deleteAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static QueryParams fixOffset(LLLuceneIndex luceneIndex, QueryParams queryParams) {
|
||||||
|
if (luceneIndex.supportsOffset()) {
|
||||||
|
return queryParams;
|
||||||
|
} else {
|
||||||
|
return queryParams.setOffset(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static long fixTransformOffset(LLLuceneIndex luceneIndex, long offset) {
|
||||||
|
if (luceneIndex.supportsOffset()) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<SearchResultKeys<T>> transformLuceneResult(LLSearchResult llSearchResult,
|
||||||
|
@Nullable MultiSort<SearchResultKey<T>> sort,
|
||||||
|
LLScoreMode scoreMode,
|
||||||
|
long offset,
|
||||||
|
@Nullable Long limit) {
|
||||||
|
Flux<SearchResultKeys<T>> mappedKeys = llSearchResult
|
||||||
|
.getResults()
|
||||||
|
.map(flux -> new SearchResultKeys<>(flux
|
||||||
|
.getResults()
|
||||||
|
.map(signal -> new SearchResultKey<>(indicizer.getKey(signal.getKey()), signal.getScore())),
|
||||||
|
flux.getTotalHitsCount()
|
||||||
|
));
|
||||||
|
MultiSort<SearchResultKey<T>> finalSort;
|
||||||
|
if (scoreMode != LLScoreMode.COMPLETE_NO_SCORES && sort == null) {
|
||||||
|
finalSort = MultiSort.topScore();
|
||||||
|
} else {
|
||||||
|
finalSort = sort;
|
||||||
|
}
|
||||||
|
|
||||||
|
MultiSort<SearchResultKey<T>> mappedSort;
|
||||||
|
if (finalSort != null) {
|
||||||
|
mappedSort = new MultiSort<>(
|
||||||
|
finalSort.getQuerySort(),
|
||||||
|
(signal1, signal2) -> finalSort.getResultSort().compare((signal1), signal2)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
mappedSort = null;
|
||||||
|
}
|
||||||
|
return LuceneUtils.mergeSignalStreamKeys(mappedKeys, mappedSort, offset, limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<SearchResult<T, U>> transformLuceneResultWithValues(LLSearchResult llSearchResult,
|
||||||
|
@Nullable MultiSort<SearchResultItem<T, U>> sort,
|
||||||
|
LLScoreMode scoreMode,
|
||||||
|
long offset,
|
||||||
|
@Nullable Long limit,
|
||||||
|
ValueGetter<T, U> valueGetter) {
|
||||||
|
Flux<SearchResult<T, U>> mappedKeys = llSearchResult
|
||||||
|
.getResults()
|
||||||
|
.map(flux -> new SearchResult<>(flux.getResults().flatMapSequential(signal -> {
|
||||||
|
var key = indicizer.getKey(signal.getKey());
|
||||||
|
return valueGetter
|
||||||
|
.get(key)
|
||||||
|
.map(value -> new SearchResultItem<>(key, value, signal.getScore()));
|
||||||
|
}), flux.getTotalHitsCount()));
|
||||||
|
MultiSort<SearchResultItem<T, U>> finalSort;
|
||||||
|
if (scoreMode != LLScoreMode.COMPLETE_NO_SCORES && sort == null) {
|
||||||
|
finalSort = MultiSort.topScoreWithValues();
|
||||||
|
} else {
|
||||||
|
finalSort = sort;
|
||||||
|
}
|
||||||
|
|
||||||
|
MultiSort<SearchResultItem<T, U>> mappedSort;
|
||||||
|
if (finalSort != null) {
|
||||||
|
mappedSort = new MultiSort<>(
|
||||||
|
finalSort.getQuerySort(),
|
||||||
|
(signal1, signal2) -> finalSort.getResultSort().compare((signal1), signal2)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
mappedSort = null;
|
||||||
|
}
|
||||||
|
return LuceneUtils.mergeSignalStreamItems(mappedKeys, mappedSort, offset, limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param queryParams the limit is valid for each lucene instance.
|
||||||
|
* If you have 15 instances, the number of elements returned
|
||||||
|
* can be at most <code>limit * 15</code>
|
||||||
|
* @return the collection has one or more flux
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Mono<SearchResultKeys<T>> moreLikeThis(ClientQueryParams<SearchResultKey<T>> queryParams,
|
||||||
|
T key,
|
||||||
|
U mltDocumentValue) {
|
||||||
|
Flux<Tuple2<String, Set<String>>> mltDocumentFields
|
||||||
|
= indicizer.getMoreLikeThisDocumentFields(key, mltDocumentValue);
|
||||||
|
return luceneIndex
|
||||||
|
.moreLikeThis(resolveSnapshot(queryParams.getSnapshot()), fixOffset(luceneIndex, queryParams.toQueryParams()), indicizer.getKeyFieldName(), mltDocumentFields)
|
||||||
|
.flatMap(llSearchResult -> this.transformLuceneResult(llSearchResult,
|
||||||
|
queryParams.getSort(),
|
||||||
|
queryParams.getScoreMode(),
|
||||||
|
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
||||||
|
queryParams.getLimit()
|
||||||
|
));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param queryParams the limit is valid for each lucene instance.
|
||||||
|
* If you have 15 instances, the number of elements returned
|
||||||
|
* can be at most <code>limit * 15</code>
|
||||||
|
* @return the collection has one or more flux
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Mono<SearchResult<T, U>> moreLikeThisWithValues(ClientQueryParams<SearchResultItem<T, U>> queryParams,
|
||||||
|
T key,
|
||||||
|
U mltDocumentValue,
|
||||||
|
ValueGetter<T, U> valueGetter) {
|
||||||
|
Flux<Tuple2<String, Set<String>>> mltDocumentFields
|
||||||
|
= indicizer.getMoreLikeThisDocumentFields(key, mltDocumentValue);
|
||||||
|
return luceneIndex
|
||||||
|
.moreLikeThis(resolveSnapshot(queryParams.getSnapshot()),
|
||||||
|
fixOffset(luceneIndex, queryParams.toQueryParams()),
|
||||||
|
indicizer.getKeyFieldName(),
|
||||||
|
mltDocumentFields
|
||||||
|
)
|
||||||
|
.flatMap(llSearchResult -> this.transformLuceneResultWithValues(llSearchResult,
|
||||||
|
queryParams.getSort(),
|
||||||
|
queryParams.getScoreMode(),
|
||||||
|
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
||||||
|
queryParams.getLimit(),
|
||||||
|
valueGetter
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param queryParams the limit is valid for each lucene instance.
|
||||||
|
* If you have 15 instances, the number of elements returned
|
||||||
|
* can be at most <code>limit * 15</code>
|
||||||
|
* @return the collection has one or more flux
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Mono<SearchResultKeys<T>> search(ClientQueryParams<SearchResultKey<T>> queryParams) {
|
||||||
|
return luceneIndex
|
||||||
|
.search(resolveSnapshot(queryParams.getSnapshot()),
|
||||||
|
fixOffset(luceneIndex, queryParams.toQueryParams()),
|
||||||
|
indicizer.getKeyFieldName()
|
||||||
|
)
|
||||||
|
.flatMap(llSearchResult -> this.transformLuceneResult(llSearchResult,
|
||||||
|
queryParams.getSort(),
|
||||||
|
queryParams.getScoreMode(),
|
||||||
|
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
||||||
|
queryParams.getLimit()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param queryParams the limit is valid for each lucene instance.
|
||||||
|
* If you have 15 instances, the number of elements returned
|
||||||
|
* can be at most <code>limit * 15</code>
|
||||||
|
* @return the collection has one or more flux
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Mono<SearchResult<T, U>> searchWithValues(ClientQueryParams<SearchResultItem<T, U>> queryParams,
|
||||||
|
ValueGetter<T, U> valueGetter) {
|
||||||
|
return luceneIndex
|
||||||
|
.search(resolveSnapshot(queryParams.getSnapshot()), fixOffset(luceneIndex, queryParams.toQueryParams()), indicizer.getKeyFieldName())
|
||||||
|
.flatMap(llSearchResult -> this.transformLuceneResultWithValues(llSearchResult,
|
||||||
|
queryParams.getSort(),
|
||||||
|
queryParams.getScoreMode(),
|
||||||
|
fixTransformOffset(luceneIndex, queryParams.getOffset()),
|
||||||
|
queryParams.getLimit(),
|
||||||
|
valueGetter
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Long> count(@Nullable CompositeSnapshot snapshot, Query query) {
|
||||||
|
return this.search(ClientQueryParams.<SearchResultKey<T>>builder().snapshot(snapshot).query(query).limit(0).build())
|
||||||
|
.map(SearchResultKeys::getTotalHitsCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isLowMemoryMode() {
|
||||||
|
return luceneIndex.isLowMemoryMode();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> close() {
|
||||||
|
return luceneIndex.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flush writes to disk
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Mono<Void> flush() {
|
||||||
|
return luceneIndex.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh index searcher
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Mono<Void> refresh() {
|
||||||
|
return luceneIndex.refresh();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<LLSnapshot> takeSnapshot() {
|
||||||
|
return luceneIndex.takeSnapshot();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<Void> releaseSnapshot(LLSnapshot snapshot) {
|
||||||
|
return luceneIndex.releaseSnapshot(snapshot);
|
||||||
|
}
|
||||||
|
}
|
@ -4,6 +4,7 @@ import it.cavallium.dbengine.database.collections.Joiner.ValueGetter;
|
|||||||
import lombok.Value;
|
import lombok.Value;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
@Value
|
@Value
|
||||||
public class SearchResultKeys<T> {
|
public class SearchResultKeys<T> {
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@ public class QueryGson {
|
|||||||
return gsonBuilder;
|
return gsonBuilder;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("DuplicatedCode")
|
||||||
public static class DbClassesGenericSerializer<T extends IType> implements JsonSerializer<T>, JsonDeserializer<T> {
|
public static class DbClassesGenericSerializer<T extends IType> implements JsonSerializer<T>, JsonDeserializer<T> {
|
||||||
|
|
||||||
private final BiMap<String, Class<? extends IBasicType>> subTypes;
|
private final BiMap<String, Class<? extends IBasicType>> subTypes;
|
||||||
|
@ -15,20 +15,12 @@ import it.cavallium.dbengine.client.query.current.data.TermPosition;
|
|||||||
import it.cavallium.dbengine.client.query.current.data.TermQuery;
|
import it.cavallium.dbengine.client.query.current.data.TermQuery;
|
||||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||||
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
|
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.StringReader;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
|
||||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
|
||||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
|
||||||
import org.apache.lucene.index.Term;
|
|
||||||
import org.apache.lucene.search.BooleanClause;
|
import org.apache.lucene.search.BooleanClause;
|
||||||
import org.apache.lucene.util.QueryBuilder;
|
import org.apache.lucene.util.QueryBuilder;
|
||||||
import org.jetbrains.annotations.NotNull;
|
import org.jetbrains.annotations.NotNull;
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
public class QueryUtils {
|
public class QueryUtils {
|
||||||
|
|
||||||
public static Query approximateSearch(TextFieldsAnalyzer preferredAnalyzer, String field, String text) {
|
public static Query approximateSearch(TextFieldsAnalyzer preferredAnalyzer, String field, String text) {
|
||||||
@ -100,28 +92,4 @@ public class QueryUtils {
|
|||||||
.toArray(TermAndBoost[]::new)
|
.toArray(TermAndBoost[]::new)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static List<TermPosition> getTerms(TextFieldsAnalyzer preferredAnalyzer, String field, String text) throws IOException {
|
|
||||||
Analyzer analyzer = LuceneUtils.getAnalyzer(preferredAnalyzer);
|
|
||||||
TokenStream ts = analyzer.tokenStream(field, new StringReader(text));
|
|
||||||
return getTerms(ts, field);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static List<TermPosition> getTerms(TokenStream ts, String field) throws IOException {
|
|
||||||
TermToBytesRefAttribute charTermAttr = ts.addAttribute(TermToBytesRefAttribute.class);
|
|
||||||
PositionIncrementAttribute positionIncrementTermAttr = ts.addAttribute(PositionIncrementAttribute.class);
|
|
||||||
List<TermPosition> terms = new LinkedList<>();
|
|
||||||
try (ts) {
|
|
||||||
ts.reset(); // Resets this stream to the beginning. (Required)
|
|
||||||
int termPosition = -1;
|
|
||||||
while (ts.incrementToken()) {
|
|
||||||
var tokenPositionIncrement = positionIncrementTermAttr.getPositionIncrement();
|
|
||||||
termPosition += tokenPositionIncrement;
|
|
||||||
terms.add(TermPosition.of(QueryParser.toQueryTerm(new Term(field, charTermAttr.getBytesRef())), termPosition));
|
|
||||||
}
|
|
||||||
ts.end(); // Perform end-of-stream operations, e.g. set the final offset.
|
|
||||||
}
|
|
||||||
// Release resources associated with this stream.
|
|
||||||
return terms;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -1010,6 +1010,7 @@ public class EnglishItalianStopFilter extends StopFilter {
|
|||||||
.collect(Collectors.toSet()));
|
.collect(Collectors.toSet()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
public static CharArraySet getStopWords() {
|
public static CharArraySet getStopWords() {
|
||||||
return stopWords;
|
return stopWords;
|
||||||
}
|
}
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
package it.cavallium.dbengine.database;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import org.apache.lucene.search.CollectionStatistics;
|
|
||||||
|
|
||||||
public interface LLCollectionStatisticsGetter {
|
|
||||||
|
|
||||||
CollectionStatistics collectionStatistics(String field) throws IOException;
|
|
||||||
}
|
|
@ -14,42 +14,35 @@ public class LLItem {
|
|||||||
private final LLType type;
|
private final LLType type;
|
||||||
private final String name;
|
private final String name;
|
||||||
private final byte[] data;
|
private final byte[] data;
|
||||||
// nullable
|
|
||||||
private final byte[] data2;
|
|
||||||
|
|
||||||
public LLItem(LLType type, String name, byte[] data, byte[] data2) {
|
public LLItem(LLType type, String name, byte[] data) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.data = data;
|
this.data = data;
|
||||||
this.data2 = data2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private LLItem(LLType type, String name, String data) {
|
private LLItem(LLType type, String name, String data) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.data = data.getBytes(StandardCharsets.UTF_8);
|
this.data = data.getBytes(StandardCharsets.UTF_8);
|
||||||
this.data2 = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private LLItem(LLType type, String name, int data) {
|
private LLItem(LLType type, String name, int data) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.data = Ints.toByteArray(data);
|
this.data = Ints.toByteArray(data);
|
||||||
this.data2 = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private LLItem(LLType type, String name, float data) {
|
private LLItem(LLType type, String name, float data) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.data = ByteBuffer.allocate(4).putFloat(data).array();;
|
this.data = ByteBuffer.allocate(4).putFloat(data).array();
|
||||||
this.data2 = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private LLItem(LLType type, String name, long data) {
|
private LLItem(LLType type, String name, long data) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.data = Longs.toByteArray(data);
|
this.data = Longs.toByteArray(data);
|
||||||
this.data2 = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static LLItem newIntPoint(String name, int data) {
|
public static LLItem newIntPoint(String name, int data) {
|
||||||
@ -96,10 +89,6 @@ public class LLItem {
|
|||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
public byte[] getData2() {
|
|
||||||
return data2;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object o) {
|
public boolean equals(Object o) {
|
||||||
if (this == o) {
|
if (this == o) {
|
||||||
@ -111,15 +100,13 @@ public class LLItem {
|
|||||||
LLItem llItem = (LLItem) o;
|
LLItem llItem = (LLItem) o;
|
||||||
return type == llItem.type &&
|
return type == llItem.type &&
|
||||||
Objects.equals(name, llItem.name) &&
|
Objects.equals(name, llItem.name) &&
|
||||||
Arrays.equals(data, llItem.data) &&
|
Arrays.equals(data, llItem.data);
|
||||||
Arrays.equals(data2, llItem.data2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
int result = Objects.hash(type, name);
|
int result = Objects.hash(type, name);
|
||||||
result = 31 * result + Arrays.hashCode(data);
|
result = 31 * result + Arrays.hashCode(data);
|
||||||
result = 31 * result + Arrays.hashCode(data2);
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,9 +117,6 @@ public class LLItem {
|
|||||||
.add("name='" + name + "'");
|
.add("name='" + name + "'");
|
||||||
if (data != null && data.length > 0) {
|
if (data != null && data.length > 0) {
|
||||||
sj.add("data=" + new String(data));
|
sj.add("data=" + new String(data));
|
||||||
}
|
|
||||||
if (data2 != null && data2.length > 0) {
|
|
||||||
sj.add("data2=" + new String(data2));
|
|
||||||
}
|
}
|
||||||
return sj.toString();
|
return sj.toString();
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package it.cavallium.dbengine.database;
|
|||||||
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
public class LLSort {
|
public class LLSort {
|
||||||
|
|
||||||
private final String fieldName;
|
private final String fieldName;
|
||||||
|
@ -3,6 +3,7 @@ package it.cavallium.dbengine.database;
|
|||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
public class LLTopKeys {
|
public class LLTopKeys {
|
||||||
|
|
||||||
private final long totalHitsCount;
|
private final long totalHitsCount;
|
||||||
|
@ -6,7 +6,7 @@ import org.jetbrains.annotations.NotNull;
|
|||||||
|
|
||||||
public class DatabaseEmpty {
|
public class DatabaseEmpty {
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
@SuppressWarnings({"unused", "InstantiationOfUtilityClass"})
|
||||||
public static final Nothing NOTHING = new Nothing();
|
public static final Nothing NOTHING = new Nothing();
|
||||||
private static final byte[] NOTHING_BYTES = new byte[0];
|
private static final byte[] NOTHING_BYTES = new byte[0];
|
||||||
private static final Serializer<Nothing, byte[]> NOTHING_SERIALIZER = new Serializer<>() {
|
private static final Serializer<Nothing, byte[]> NOTHING_SERIALIZER = new Serializer<>() {
|
||||||
|
@ -50,24 +50,20 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
|||||||
}
|
}
|
||||||
|
|
||||||
static byte[] firstRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) {
|
static byte[] firstRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) {
|
||||||
return fillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
|
return zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
static byte[] nextRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) {
|
static byte[] nextRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) {
|
||||||
byte[] nonIncremented = fillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
|
byte[] nonIncremented = zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength);
|
||||||
return incrementPrefix(nonIncremented, prefixLength);
|
return incrementPrefix(nonIncremented, prefixLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static byte[] fillKeySuffixAndExt(byte[] prefixKey,
|
protected static byte[] zeroFillKeySuffixAndExt(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) {
|
||||||
int prefixLength,
|
|
||||||
int suffixLength,
|
|
||||||
int extLength,
|
|
||||||
byte fillValue) {
|
|
||||||
assert prefixKey.length == prefixLength;
|
assert prefixKey.length == prefixLength;
|
||||||
assert suffixLength > 0;
|
assert suffixLength > 0;
|
||||||
assert extLength >= 0;
|
assert extLength >= 0;
|
||||||
byte[] result = Arrays.copyOf(prefixKey, prefixLength + suffixLength + extLength);
|
byte[] result = Arrays.copyOf(prefixKey, prefixLength + suffixLength + extLength);
|
||||||
Arrays.fill(result, prefixLength, result.length, fillValue);
|
Arrays.fill(result, prefixLength, result.length, (byte) 0);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,7 +72,7 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
|||||||
int prefixLength,
|
int prefixLength,
|
||||||
int suffixLength,
|
int suffixLength,
|
||||||
int extLength) {
|
int extLength) {
|
||||||
return fillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
|
return zeroFillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
static byte[] nextRangeKey(byte[] prefixKey,
|
static byte[] nextRangeKey(byte[] prefixKey,
|
||||||
@ -84,23 +80,22 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
|||||||
int prefixLength,
|
int prefixLength,
|
||||||
int suffixLength,
|
int suffixLength,
|
||||||
int extLength) {
|
int extLength) {
|
||||||
byte[] nonIncremented = fillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
|
byte[] nonIncremented = zeroFillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength);
|
||||||
return incrementPrefix(nonIncremented, prefixLength + suffixLength);
|
return incrementPrefix(nonIncremented, prefixLength + suffixLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static byte[] fillKeyExt(byte[] prefixKey,
|
protected static byte[] zeroFillKeyExt(byte[] prefixKey,
|
||||||
byte[] suffixKey,
|
byte[] suffixKey,
|
||||||
int prefixLength,
|
int prefixLength,
|
||||||
int suffixLength,
|
int suffixLength,
|
||||||
int extLength,
|
int extLength) {
|
||||||
byte fillValue) {
|
|
||||||
assert prefixKey.length == prefixLength;
|
assert prefixKey.length == prefixLength;
|
||||||
assert suffixKey.length == suffixLength;
|
assert suffixKey.length == suffixLength;
|
||||||
assert suffixLength > 0;
|
assert suffixLength > 0;
|
||||||
assert extLength >= 0;
|
assert extLength >= 0;
|
||||||
byte[] result = Arrays.copyOf(prefixKey, prefixLength + suffixLength + extLength);
|
byte[] result = Arrays.copyOf(prefixKey, prefixLength + suffixLength + extLength);
|
||||||
System.arraycopy(suffixKey, 0, result, prefixLength, suffixLength);
|
System.arraycopy(suffixKey, 0, result, prefixLength, suffixLength);
|
||||||
Arrays.fill(result, prefixLength + suffixLength, result.length, fillValue);
|
Arrays.fill(result, prefixLength + suffixLength, result.length, (byte) 0);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,7 +205,6 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
|||||||
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range);
|
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range);
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("ReactiveStreamsUnusedPublisher")
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<US> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
public Mono<US> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||||
byte[] keySuffixData = serializeSuffix(keySuffix);
|
byte[] keySuffixData = serializeSuffix(keySuffix);
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
package it.cavallium.dbengine.database.collections;
|
|
||||||
|
|
||||||
public interface DatabaseMappable<T, U, US extends DatabaseStage<U>> {
|
|
||||||
|
|
||||||
DatabaseStageMap<T, U, US> map();
|
|
||||||
}
|
|
@ -10,6 +10,7 @@ import java.util.Set;
|
|||||||
import org.jetbrains.annotations.Nullable;
|
import org.jetbrains.annotations.Nullable;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
public class DatabaseSetDictionary<T> extends DatabaseMapDictionaryDeep<T, Nothing, DatabaseStageEntry<Nothing>> {
|
public class DatabaseSetDictionary<T> extends DatabaseMapDictionaryDeep<T, Nothing, DatabaseStageEntry<Nothing>> {
|
||||||
|
|
||||||
protected DatabaseSetDictionary(LLDictionary dictionary,
|
protected DatabaseSetDictionary(LLDictionary dictionary,
|
||||||
|
@ -2,7 +2,7 @@ package it.cavallium.dbengine.database.collections;
|
|||||||
|
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
public interface Joiner<KEY, DBVALUE, JOINEDVALUE> {
|
public interface Joiner<KEY, DB_VALUE, JOINED_VALUE> {
|
||||||
|
|
||||||
interface ValueGetter<KEY, VALUE> {
|
interface ValueGetter<KEY, VALUE> {
|
||||||
|
|
||||||
@ -19,9 +19,6 @@ public interface Joiner<KEY, DBVALUE, JOINEDVALUE> {
|
|||||||
*
|
*
|
||||||
* Can return Mono error IOException
|
* Can return Mono error IOException
|
||||||
*/
|
*/
|
||||||
Mono<JOINEDVALUE> join(ValueGetter<KEY, DBVALUE> dbValueGetter, DBVALUE value);
|
Mono<JOINED_VALUE> join(ValueGetter<KEY, DB_VALUE> dbValueGetter, DB_VALUE value);
|
||||||
|
|
||||||
static <KEY, DBVALUE> Joiner<KEY, DBVALUE, DBVALUE> direct() {
|
|
||||||
return (dbValueGetter, value) -> Mono.just(value);
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
@ -2,8 +2,7 @@ package it.cavallium.dbengine.database.collections;
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
@SuppressWarnings("SpellCheckingInspection")
|
public interface JoinerBlocking<KEY, DB_VALUE, JOINED_VALUE> {
|
||||||
public interface JoinerBlocking<KEY, DBVALUE, JOINEDVALUE> {
|
|
||||||
|
|
||||||
interface ValueGetterBlocking<KEY, VALUE> {
|
interface ValueGetterBlocking<KEY, VALUE> {
|
||||||
VALUE get(KEY key) throws IOException;
|
VALUE get(KEY key) throws IOException;
|
||||||
@ -14,9 +13,6 @@ public interface JoinerBlocking<KEY, DBVALUE, JOINEDVALUE> {
|
|||||||
* Good examples: message id, send date, ...
|
* Good examples: message id, send date, ...
|
||||||
* Bad examples: message content, views, edited, ...
|
* Bad examples: message content, views, edited, ...
|
||||||
*/
|
*/
|
||||||
JOINEDVALUE join(ValueGetterBlocking<KEY, DBVALUE> dbValueGetter, DBVALUE value) throws IOException;
|
JOINED_VALUE join(ValueGetterBlocking<KEY, DB_VALUE> dbValueGetter, DB_VALUE value) throws IOException;
|
||||||
|
|
||||||
static <KEY, DBVALUE> JoinerBlocking<KEY, DBVALUE, DBVALUE> direct() {
|
|
||||||
return (dbValueGetter, value) -> value;
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
@ -40,12 +40,7 @@ public class SubStageGetterSingle<T> implements SubStageGetter<T, DatabaseStageE
|
|||||||
return null;
|
return null;
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
.then(Mono.fromSupplier(() -> {
|
.then(Mono.fromSupplier(() -> new DatabaseSingle<>(dictionary, keyPrefix, serializer)));
|
||||||
return new DatabaseSingle<>(dictionary,
|
|
||||||
keyPrefix,
|
|
||||||
serializer
|
|
||||||
);
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -44,6 +44,8 @@ import org.warp.commonutils.log.LoggerFactory;
|
|||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
import reactor.core.scheduler.Scheduler;
|
import reactor.core.scheduler.Scheduler;
|
||||||
|
import reactor.util.function.Tuple3;
|
||||||
|
import reactor.util.function.Tuples;
|
||||||
|
|
||||||
@NotAtomic
|
@NotAtomic
|
||||||
public class LLLocalDictionary implements LLDictionary {
|
public class LLLocalDictionary implements LLDictionary {
|
||||||
@ -299,6 +301,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
|
|
||||||
Optional<byte[]> newData = value.apply(prevData);
|
Optional<byte[]> newData = value.apply(prevData);
|
||||||
if (prevData.isPresent() && newData.isEmpty()) {
|
if (prevData.isPresent() && newData.isEmpty()) {
|
||||||
|
//noinspection DuplicatedCode
|
||||||
if (updateMode == UpdateMode.ALLOW) {
|
if (updateMode == UpdateMode.ALLOW) {
|
||||||
var ws = lock.tryConvertToWriteLock(stamp);
|
var ws = lock.tryConvertToWriteLock(stamp);
|
||||||
if (ws != 0) {
|
if (ws != 0) {
|
||||||
@ -315,6 +318,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
db.delete(cfh, key);
|
db.delete(cfh, key);
|
||||||
} else if (newData.isPresent()
|
} else if (newData.isPresent()
|
||||||
&& (prevData.isEmpty() || !Arrays.equals(prevData.get(), newData.get()))) {
|
&& (prevData.isEmpty() || !Arrays.equals(prevData.get(), newData.get()))) {
|
||||||
|
//noinspection DuplicatedCode
|
||||||
if (updateMode == UpdateMode.ALLOW) {
|
if (updateMode == UpdateMode.ALLOW) {
|
||||||
var ws = lock.tryConvertToWriteLock(stamp);
|
var ws = lock.tryConvertToWriteLock(stamp);
|
||||||
if (ws != 0) {
|
if (ws != 0) {
|
||||||
@ -519,12 +523,10 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
|
|
||||||
@NotNull
|
@NotNull
|
||||||
private Mono<Entry<byte[], byte[]>> putEntryToWriteBatch(Entry<byte[], byte[]> newEntry,
|
private Mono<Entry<byte[], byte[]>> putEntryToWriteBatch(Entry<byte[], byte[]> newEntry,
|
||||||
boolean getOldValues,
|
boolean getOldValues, CappedWriteBatch writeBatch) {
|
||||||
boolean existsAlmostCertainly,
|
|
||||||
CappedWriteBatch writeBatch) {
|
|
||||||
Mono<byte[]> getOldValueMono;
|
Mono<byte[]> getOldValueMono;
|
||||||
if (getOldValues) {
|
if (getOldValues) {
|
||||||
getOldValueMono = get(null, newEntry.getKey(), existsAlmostCertainly);
|
getOldValueMono = get(null, newEntry.getKey(), false);
|
||||||
} else {
|
} else {
|
||||||
getOldValueMono = Mono.empty();
|
getOldValueMono = Mono.empty();
|
||||||
}
|
}
|
||||||
@ -656,7 +658,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
})
|
})
|
||||||
.subscribeOn(dbScheduler)
|
.subscribeOn(dbScheduler)
|
||||||
.thenMany(entries)
|
.thenMany(entries)
|
||||||
.flatMapSequential(newEntry -> putEntryToWriteBatch(newEntry, getOldValues, false, writeBatch)),
|
.flatMapSequential(newEntry -> putEntryToWriteBatch(newEntry, getOldValues, writeBatch)),
|
||||||
writeBatch -> Mono
|
writeBatch -> Mono
|
||||||
.fromCallable(() -> {
|
.fromCallable(() -> {
|
||||||
try (writeBatch) {
|
try (writeBatch) {
|
||||||
@ -670,29 +672,6 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
.onErrorMap(cause -> new IOException("Failed to write range", cause));
|
.onErrorMap(cause -> new IOException("Failed to write range", cause));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void deleteSmallRange(LLRange range)
|
|
||||||
throws RocksDBException {
|
|
||||||
var readOpts = getReadOptions(null);
|
|
||||||
readOpts.setFillCache(false);
|
|
||||||
if (range.hasMin()) {
|
|
||||||
readOpts.setIterateLowerBound(new Slice(range.getMin()));
|
|
||||||
}
|
|
||||||
if (range.hasMax()) {
|
|
||||||
readOpts.setIterateUpperBound(new Slice(range.getMax()));
|
|
||||||
}
|
|
||||||
try (var rocksIterator = db.newIterator(cfh, readOpts)) {
|
|
||||||
if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) {
|
|
||||||
rocksIterator.seek(range.getMin());
|
|
||||||
} else {
|
|
||||||
rocksIterator.seekToFirst();
|
|
||||||
}
|
|
||||||
while (rocksIterator.isValid()) {
|
|
||||||
db.delete(cfh, rocksIterator.key());
|
|
||||||
rocksIterator.next();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void deleteSmallRangeWriteBatch(CappedWriteBatch writeBatch, LLRange range)
|
private void deleteSmallRangeWriteBatch(CappedWriteBatch writeBatch, LLRange range)
|
||||||
throws RocksDBException {
|
throws RocksDBException {
|
||||||
var readOpts = getReadOptions(null);
|
var readOpts = getReadOptions(null);
|
||||||
@ -716,29 +695,6 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static byte[] incrementLexicographically(byte[] key) {
|
|
||||||
boolean remainder = true;
|
|
||||||
int prefixLength = key.length;
|
|
||||||
final byte ff = (byte) 0xFF;
|
|
||||||
for (int i = prefixLength - 1; i >= 0; i--) {
|
|
||||||
if (key[i] != ff) {
|
|
||||||
key[i]++;
|
|
||||||
remainder = false;
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
key[i] = 0x00;
|
|
||||||
remainder = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remainder) {
|
|
||||||
Arrays.fill(key, 0, prefixLength, (byte) 0xFF);
|
|
||||||
return Arrays.copyOf(key, key.length + 1);
|
|
||||||
} else {
|
|
||||||
return key;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> clear() {
|
public Mono<Void> clear() {
|
||||||
return Mono
|
return Mono
|
||||||
.<Void>fromCallable(() -> {
|
.<Void>fromCallable(() -> {
|
||||||
@ -1025,4 +981,32 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
.onErrorMap(cause -> new IOException("Failed to delete " + range.toString(), cause))
|
.onErrorMap(cause -> new IOException("Failed to delete " + range.toString(), cause))
|
||||||
.subscribeOn(dbScheduler);
|
.subscribeOn(dbScheduler);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@NotNull
|
||||||
|
public static Tuple3<RocksIterator, Optional<Slice>, Optional<Slice>> getRocksIterator(ReadOptions readOptions,
|
||||||
|
LLRange range,
|
||||||
|
RocksDB db,
|
||||||
|
ColumnFamilyHandle cfh) {
|
||||||
|
Slice sliceMin;
|
||||||
|
Slice sliceMax;
|
||||||
|
if (range.hasMin()) {
|
||||||
|
sliceMin = new Slice(range.getMin());
|
||||||
|
readOptions.setIterateLowerBound(sliceMin);
|
||||||
|
} else {
|
||||||
|
sliceMin = null;
|
||||||
|
}
|
||||||
|
if (range.hasMax()) {
|
||||||
|
sliceMax = new Slice(range.getMax());
|
||||||
|
readOptions.setIterateUpperBound(sliceMax);
|
||||||
|
} else {
|
||||||
|
sliceMax = null;
|
||||||
|
}
|
||||||
|
var rocksIterator = db.newIterator(cfh, readOptions);
|
||||||
|
if (!PREFER_SEEK_TO_FIRST && range.hasMin()) {
|
||||||
|
rocksIterator.seek(range.getMin());
|
||||||
|
} else {
|
||||||
|
rocksIterator.seekToFirst();
|
||||||
|
}
|
||||||
|
return Tuples.of(rocksIterator, Optional.ofNullable(sliceMin), Optional.ofNullable(sliceMax));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ public class LLLocalGroupedEntryReactiveRocksIterator extends
|
|||||||
LLRange range,
|
LLRange range,
|
||||||
ReadOptions readOptions,
|
ReadOptions readOptions,
|
||||||
String debugName) {
|
String debugName) {
|
||||||
super(db, cfh, prefixLength, range, readOptions, false, true, debugName);
|
super(db, cfh, prefixLength, range, readOptions, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -13,7 +13,7 @@ public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReacti
|
|||||||
LLRange range,
|
LLRange range,
|
||||||
ReadOptions readOptions,
|
ReadOptions readOptions,
|
||||||
String debugName) {
|
String debugName) {
|
||||||
super(db, cfh, prefixLength, range, readOptions, true, false, debugName);
|
super(db, cfh, prefixLength, range, readOptions, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -1,17 +1,16 @@
|
|||||||
package it.cavallium.dbengine.database.disk;
|
package it.cavallium.dbengine.database.disk;
|
||||||
|
|
||||||
|
import static it.cavallium.dbengine.database.disk.LLLocalDictionary.getRocksIterator;
|
||||||
|
|
||||||
import it.cavallium.dbengine.database.LLRange;
|
import it.cavallium.dbengine.database.LLRange;
|
||||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
|
||||||
import org.rocksdb.ColumnFamilyHandle;
|
import org.rocksdb.ColumnFamilyHandle;
|
||||||
import org.rocksdb.ReadOptions;
|
import org.rocksdb.ReadOptions;
|
||||||
import org.rocksdb.RocksDB;
|
import org.rocksdb.RocksDB;
|
||||||
import org.rocksdb.RocksMutableObject;
|
import org.rocksdb.RocksMutableObject;
|
||||||
import org.rocksdb.Slice;
|
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.util.function.Tuples;
|
|
||||||
|
|
||||||
public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
||||||
|
|
||||||
@ -24,7 +23,6 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
|||||||
private final ReadOptions readOptions;
|
private final ReadOptions readOptions;
|
||||||
private final boolean canFillCache;
|
private final boolean canFillCache;
|
||||||
private final boolean readValues;
|
private final boolean readValues;
|
||||||
private final String debugName;
|
|
||||||
|
|
||||||
public LLLocalGroupedReactiveRocksIterator(RocksDB db,
|
public LLLocalGroupedReactiveRocksIterator(RocksDB db,
|
||||||
ColumnFamilyHandle cfh,
|
ColumnFamilyHandle cfh,
|
||||||
@ -32,8 +30,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
|||||||
LLRange range,
|
LLRange range,
|
||||||
ReadOptions readOptions,
|
ReadOptions readOptions,
|
||||||
boolean canFillCache,
|
boolean canFillCache,
|
||||||
boolean readValues,
|
boolean readValues) {
|
||||||
String debugName) {
|
|
||||||
this.db = db;
|
this.db = db;
|
||||||
this.cfh = cfh;
|
this.cfh = cfh;
|
||||||
this.prefixLength = prefixLength;
|
this.prefixLength = prefixLength;
|
||||||
@ -41,37 +38,15 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
|||||||
this.readOptions = readOptions;
|
this.readOptions = readOptions;
|
||||||
this.canFillCache = canFillCache;
|
this.canFillCache = canFillCache;
|
||||||
this.readValues = readValues;
|
this.readValues = readValues;
|
||||||
this.debugName = debugName;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@SuppressWarnings("Convert2MethodRef")
|
|
||||||
public Flux<List<T>> flux() {
|
public Flux<List<T>> flux() {
|
||||||
return Flux
|
return Flux
|
||||||
.generate(() -> {
|
.generate(() -> {
|
||||||
var readOptions = new ReadOptions(this.readOptions);
|
var readOptions = new ReadOptions(this.readOptions);
|
||||||
readOptions.setFillCache(canFillCache && range.hasMin() && range.hasMax());
|
readOptions.setFillCache(canFillCache && range.hasMin() && range.hasMax());
|
||||||
Slice sliceMin;
|
return getRocksIterator(readOptions, range, db, cfh);
|
||||||
Slice sliceMax;
|
|
||||||
if (range.hasMin()) {
|
|
||||||
sliceMin = new Slice(range.getMin());
|
|
||||||
readOptions.setIterateLowerBound(sliceMin);
|
|
||||||
} else {
|
|
||||||
sliceMin = null;
|
|
||||||
}
|
|
||||||
if (range.hasMax()) {
|
|
||||||
sliceMax = new Slice(range.getMax());
|
|
||||||
readOptions.setIterateUpperBound(sliceMax);
|
|
||||||
} else {
|
|
||||||
sliceMax = null;
|
|
||||||
}
|
|
||||||
var rocksIterator = db.newIterator(cfh, readOptions);
|
|
||||||
if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) {
|
|
||||||
rocksIterator.seek(range.getMin());
|
|
||||||
} else {
|
|
||||||
rocksIterator.seekToFirst();
|
|
||||||
}
|
|
||||||
return Tuples.of(rocksIterator, Optional.ofNullable(sliceMin), Optional.ofNullable(sliceMax));
|
|
||||||
}, (tuple, sink) -> {
|
}, (tuple, sink) -> {
|
||||||
var rocksIterator = tuple.getT1();
|
var rocksIterator = tuple.getT1();
|
||||||
ObjectArrayList<T> values = new ObjectArrayList<>();
|
ObjectArrayList<T> values = new ObjectArrayList<>();
|
||||||
|
@ -2,14 +2,11 @@ package it.cavallium.dbengine.database.disk;
|
|||||||
|
|
||||||
import it.cavallium.dbengine.database.LLRange;
|
import it.cavallium.dbengine.database.LLRange;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Optional;
|
|
||||||
import org.rocksdb.ColumnFamilyHandle;
|
import org.rocksdb.ColumnFamilyHandle;
|
||||||
import org.rocksdb.ReadOptions;
|
import org.rocksdb.ReadOptions;
|
||||||
import org.rocksdb.RocksDB;
|
import org.rocksdb.RocksDB;
|
||||||
import org.rocksdb.RocksMutableObject;
|
import org.rocksdb.RocksMutableObject;
|
||||||
import org.rocksdb.Slice;
|
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.util.function.Tuples;
|
|
||||||
|
|
||||||
public class LLLocalKeyPrefixReactiveRocksIterator {
|
public class LLLocalKeyPrefixReactiveRocksIterator {
|
||||||
|
|
||||||
@ -48,27 +45,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
|||||||
//readOptions.setReadaheadSize(2 * 1024 * 1024);
|
//readOptions.setReadaheadSize(2 * 1024 * 1024);
|
||||||
readOptions.setFillCache(canFillCache);
|
readOptions.setFillCache(canFillCache);
|
||||||
}
|
}
|
||||||
Slice sliceMin;
|
return LLLocalDictionary.getRocksIterator(readOptions, range, db, cfh);
|
||||||
Slice sliceMax;
|
|
||||||
if (range.hasMin()) {
|
|
||||||
sliceMin = new Slice(range.getMin());
|
|
||||||
readOptions.setIterateLowerBound(sliceMin);
|
|
||||||
} else {
|
|
||||||
sliceMin = null;
|
|
||||||
}
|
|
||||||
if (range.hasMax()) {
|
|
||||||
sliceMax = new Slice(range.getMax());
|
|
||||||
readOptions.setIterateUpperBound(sliceMax);
|
|
||||||
} else {
|
|
||||||
sliceMax = null;
|
|
||||||
}
|
|
||||||
var rocksIterator = db.newIterator(cfh, readOptions);
|
|
||||||
if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) {
|
|
||||||
rocksIterator.seek(range.getMin());
|
|
||||||
} else {
|
|
||||||
rocksIterator.seekToFirst();
|
|
||||||
}
|
|
||||||
return Tuples.of(rocksIterator, Optional.ofNullable(sliceMin), Optional.ofNullable(sliceMax));
|
|
||||||
}, (tuple, sink) -> {
|
}, (tuple, sink) -> {
|
||||||
var rocksIterator = tuple.getT1();
|
var rocksIterator = tuple.getT1();
|
||||||
byte[] firstGroupKey = null;
|
byte[] firstGroupKey = null;
|
||||||
@ -96,4 +73,5 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
|||||||
tuple.getT3().ifPresent(RocksMutableObject::close);
|
tuple.getT3().ifPresent(RocksMutableObject::close);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,8 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
|||||||
// end force flush
|
// end force flush
|
||||||
}
|
}
|
||||||
|
|
||||||
private void compactDb(RocksDB db, List<ColumnFamilyHandle> handles) throws RocksDBException {
|
@SuppressWarnings("unused")
|
||||||
|
private void compactDb(RocksDB db, List<ColumnFamilyHandle> handles) {
|
||||||
// force compact the database
|
// force compact the database
|
||||||
for (ColumnFamilyHandle cfh : handles) {
|
for (ColumnFamilyHandle cfh : handles) {
|
||||||
var t = new Thread(() -> {
|
var t = new Thread(() -> {
|
||||||
|
@ -95,10 +95,13 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
private static final Supplier<Scheduler> lowMemorySchedulerSupplier = Suppliers.memoize(() ->
|
private static final Supplier<Scheduler> lowMemorySchedulerSupplier = Suppliers.memoize(() ->
|
||||||
Schedulers.newBoundedElastic(1, Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
Schedulers.newBoundedElastic(1, Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
||||||
"lucene-low-memory", Integer.MAX_VALUE))::get;
|
"lucene-low-memory", Integer.MAX_VALUE))::get;
|
||||||
|
@SuppressWarnings("FieldCanBeLocal")
|
||||||
private final Supplier<Scheduler> querySchedulerSupplier = USE_STANDARD_SCHEDULERS ?
|
private final Supplier<Scheduler> querySchedulerSupplier = USE_STANDARD_SCHEDULERS ?
|
||||||
Schedulers::boundedElastic : Suppliers.memoize(() -> boundedSchedulerSupplier.apply("query"))::get;
|
Schedulers::boundedElastic : Suppliers.memoize(() -> boundedSchedulerSupplier.apply("query"))::get;
|
||||||
|
@SuppressWarnings("FieldCanBeLocal")
|
||||||
private final Supplier<Scheduler> blockingSchedulerSupplier = USE_STANDARD_SCHEDULERS ?
|
private final Supplier<Scheduler> blockingSchedulerSupplier = USE_STANDARD_SCHEDULERS ?
|
||||||
Schedulers::boundedElastic : Suppliers.memoize(() -> boundedSchedulerSupplier.apply("blocking"))::get;
|
Schedulers::boundedElastic : Suppliers.memoize(() -> boundedSchedulerSupplier.apply("blocking"))::get;
|
||||||
|
@SuppressWarnings("FieldCanBeLocal")
|
||||||
private final Supplier<Scheduler> blockingLuceneSearchSchedulerSupplier = USE_STANDARD_SCHEDULERS ?
|
private final Supplier<Scheduler> blockingLuceneSearchSchedulerSupplier = USE_STANDARD_SCHEDULERS ?
|
||||||
Schedulers::boundedElastic : Suppliers.memoize(() -> boundedSchedulerSupplier.apply("search-blocking"))::get;
|
Schedulers::boundedElastic : Suppliers.memoize(() -> boundedSchedulerSupplier.apply("search-blocking"))::get;
|
||||||
/**
|
/**
|
||||||
@ -603,15 +606,9 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
|
|
||||||
AtomicBoolean cancelled = new AtomicBoolean();
|
AtomicBoolean cancelled = new AtomicBoolean();
|
||||||
Semaphore requests = new Semaphore(0);
|
Semaphore requests = new Semaphore(0);
|
||||||
sink.onDispose(() -> {
|
sink.onDispose(() -> cancelled.set(true));
|
||||||
cancelled.set(true);
|
sink.onCancel(() -> cancelled.set(true));
|
||||||
});
|
sink.onRequest(delta -> requests.release((int) Math.min(delta, Integer.MAX_VALUE)));
|
||||||
sink.onCancel(() -> {
|
|
||||||
cancelled.set(true);
|
|
||||||
});
|
|
||||||
sink.onRequest(delta -> {
|
|
||||||
requests.release((int) Math.min(delta, Integer.MAX_VALUE));
|
|
||||||
});
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
//noinspection BlockingMethodInNonBlockingContext
|
//noinspection BlockingMethodInNonBlockingContext
|
||||||
|
@ -247,6 +247,7 @@ public class LLLocalMultiLuceneIndex implements LLLuceneIndex {
|
|||||||
distributedPre = Mono.empty();
|
distributedPre = Mono.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection DuplicatedCode
|
||||||
return distributedPre.then(Flux
|
return distributedPre.then(Flux
|
||||||
.fromArray(luceneIndices)
|
.fromArray(luceneIndices)
|
||||||
.index()
|
.index()
|
||||||
@ -314,6 +315,7 @@ public class LLLocalMultiLuceneIndex implements LLLuceneIndex {
|
|||||||
)
|
)
|
||||||
.then();
|
.then();
|
||||||
}
|
}
|
||||||
|
//noinspection DuplicatedCode
|
||||||
return distributedPre
|
return distributedPre
|
||||||
.then(Flux
|
.then(Flux
|
||||||
.fromArray(luceneIndices)
|
.fromArray(luceneIndices)
|
||||||
|
@ -1,14 +1,13 @@
|
|||||||
package it.cavallium.dbengine.database.disk;
|
package it.cavallium.dbengine.database.disk;
|
||||||
|
|
||||||
|
import static it.cavallium.dbengine.database.disk.LLLocalDictionary.getRocksIterator;
|
||||||
|
|
||||||
import it.cavallium.dbengine.database.LLRange;
|
import it.cavallium.dbengine.database.LLRange;
|
||||||
import java.util.Optional;
|
|
||||||
import org.rocksdb.ColumnFamilyHandle;
|
import org.rocksdb.ColumnFamilyHandle;
|
||||||
import org.rocksdb.ReadOptions;
|
import org.rocksdb.ReadOptions;
|
||||||
import org.rocksdb.RocksDB;
|
import org.rocksdb.RocksDB;
|
||||||
import org.rocksdb.RocksMutableObject;
|
import org.rocksdb.RocksMutableObject;
|
||||||
import org.rocksdb.Slice;
|
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.util.function.Tuples;
|
|
||||||
|
|
||||||
public abstract class LLLocalReactiveRocksIterator<T> {
|
public abstract class LLLocalReactiveRocksIterator<T> {
|
||||||
|
|
||||||
@ -40,27 +39,7 @@ public abstract class LLLocalReactiveRocksIterator<T> {
|
|||||||
readOptions.setReadaheadSize(2 * 1024 * 1024);
|
readOptions.setReadaheadSize(2 * 1024 * 1024);
|
||||||
readOptions.setFillCache(false);
|
readOptions.setFillCache(false);
|
||||||
}
|
}
|
||||||
Slice sliceMin;
|
return getRocksIterator(readOptions, range, db, cfh);
|
||||||
Slice sliceMax;
|
|
||||||
if (range.hasMin()) {
|
|
||||||
sliceMin = new Slice(range.getMin());
|
|
||||||
readOptions.setIterateLowerBound(sliceMin);
|
|
||||||
} else {
|
|
||||||
sliceMin = null;
|
|
||||||
}
|
|
||||||
if (range.hasMax()) {
|
|
||||||
sliceMax = new Slice(range.getMax());
|
|
||||||
readOptions.setIterateUpperBound(sliceMax);
|
|
||||||
} else {
|
|
||||||
sliceMax = null;
|
|
||||||
}
|
|
||||||
var rocksIterator = db.newIterator(cfh, readOptions);
|
|
||||||
if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) {
|
|
||||||
rocksIterator.seek(range.getMin());
|
|
||||||
} else {
|
|
||||||
rocksIterator.seekToFirst();
|
|
||||||
}
|
|
||||||
return Tuples.of(rocksIterator, Optional.ofNullable(sliceMin), Optional.ofNullable(sliceMax));
|
|
||||||
}, (tuple, sink) -> {
|
}, (tuple, sink) -> {
|
||||||
var rocksIterator = tuple.getT1();
|
var rocksIterator = tuple.getT1();
|
||||||
if (rocksIterator.isValid()) {
|
if (rocksIterator.isValid()) {
|
||||||
|
@ -71,6 +71,7 @@ public class CodecSerializer<A> implements Serializer<A, byte[]> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
public int getCodecHeadersBytes() {
|
public int getCodecHeadersBytes() {
|
||||||
if (microCodecs) {
|
if (microCodecs) {
|
||||||
return Byte.BYTES;
|
return Byte.BYTES;
|
||||||
|
@ -247,8 +247,11 @@ public class LuceneUtils {
|
|||||||
Long limit) {
|
Long limit) {
|
||||||
return mappedKeys.reduce(
|
return mappedKeys.reduce(
|
||||||
new SearchResultKeys<>(Flux.empty(), 0L),
|
new SearchResultKeys<>(Flux.empty(), 0L),
|
||||||
(a, b) -> new SearchResultKeys<T>(LuceneUtils
|
(a, b) -> new SearchResultKeys<>(LuceneUtils.mergeStream(Flux.just(a.getResults(), b.getResults()),
|
||||||
.mergeStream(Flux.just(a.getResults(), b.getResults()), sort, offset, limit), a.getTotalHitsCount() + b.getTotalHitsCount())
|
sort,
|
||||||
|
offset,
|
||||||
|
limit
|
||||||
|
), a.getTotalHitsCount() + b.getTotalHitsCount())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -258,8 +261,11 @@ public class LuceneUtils {
|
|||||||
Long limit) {
|
Long limit) {
|
||||||
return mappedKeys.reduce(
|
return mappedKeys.reduce(
|
||||||
new SearchResult<>(Flux.empty(), 0L),
|
new SearchResult<>(Flux.empty(), 0L),
|
||||||
(a, b) -> new SearchResult<T, U>(LuceneUtils
|
(a, b) -> new SearchResult<>(LuceneUtils.mergeStream(Flux.just(a.getResults(), b.getResults()),
|
||||||
.mergeStream(Flux.just(a.getResults(), b.getResults()), sort, offset, limit), a.getTotalHitsCount() + b.getTotalHitsCount())
|
sort,
|
||||||
|
offset,
|
||||||
|
limit
|
||||||
|
), a.getTotalHitsCount() + b.getTotalHitsCount())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,11 +26,10 @@ public class NCharGramAnalyzer extends Analyzer {
|
|||||||
TokenStream tokenStream;
|
TokenStream tokenStream;
|
||||||
if (words) {
|
if (words) {
|
||||||
tokenizer = new StandardTokenizer();
|
tokenizer = new StandardTokenizer();
|
||||||
tokenStream = tokenizer;
|
|
||||||
} else {
|
} else {
|
||||||
tokenizer = new KeywordTokenizer();
|
tokenizer = new KeywordTokenizer();
|
||||||
tokenStream = tokenizer;
|
|
||||||
}
|
}
|
||||||
|
tokenStream = tokenizer;
|
||||||
tokenStream = LuceneUtils.newCommonFilter(tokenStream, words);
|
tokenStream = LuceneUtils.newCommonFilter(tokenStream, words);
|
||||||
tokenStream = new NGramTokenFilter(tokenStream, minGram, maxGram, false);
|
tokenStream = new NGramTokenFilter(tokenStream, minGram, maxGram, false);
|
||||||
|
|
||||||
|
@ -26,11 +26,10 @@ public class NCharGramEdgeAnalyzer extends Analyzer {
|
|||||||
TokenStream tokenStream;
|
TokenStream tokenStream;
|
||||||
if (words) {
|
if (words) {
|
||||||
tokenizer = new StandardTokenizer();
|
tokenizer = new StandardTokenizer();
|
||||||
tokenStream = tokenizer;
|
|
||||||
} else {
|
} else {
|
||||||
tokenizer = new KeywordTokenizer();
|
tokenizer = new KeywordTokenizer();
|
||||||
tokenStream = tokenizer;
|
|
||||||
}
|
}
|
||||||
|
tokenStream = tokenizer;
|
||||||
tokenStream = LuceneUtils.newCommonFilter(tokenStream, words);
|
tokenStream = LuceneUtils.newCommonFilter(tokenStream, words);
|
||||||
tokenStream = new EdgeNGramTokenFilter(tokenStream, minGram, maxGram, false);
|
tokenStream = new EdgeNGramTokenFilter(tokenStream, minGram, maxGram, false);
|
||||||
|
|
||||||
|
@ -80,12 +80,12 @@ public class AllowOnlyQueryParsingCollectorStreamSearcher implements LuceneStrea
|
|||||||
|
|
||||||
return new LuceneSearchInstance() {
|
return new LuceneSearchInstance() {
|
||||||
@Override
|
@Override
|
||||||
public long getTotalHitsCount() throws IOException {
|
public long getTotalHitsCount() {
|
||||||
throw new IllegalArgumentException("Total hits consumer not allowed");
|
throw new IllegalArgumentException("Total hits consumer not allowed");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void getResults(ResultItemConsumer consumer) throws IOException {
|
public void getResults(ResultItemConsumer consumer) {
|
||||||
throw new IllegalArgumentException("Results consumer not allowed");
|
throw new IllegalArgumentException("Results consumer not allowed");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -24,8 +24,6 @@ public interface LuceneStreamSearcher {
|
|||||||
* @param scoreMode score mode
|
* @param scoreMode score mode
|
||||||
* @param minCompetitiveScore minimum score accepted
|
* @param minCompetitiveScore minimum score accepted
|
||||||
* @param keyFieldName the name of the key field
|
* @param keyFieldName the name of the key field
|
||||||
* @param resultsConsumer the consumer of results
|
|
||||||
* @param totalHitsConsumer the consumer of total count of results
|
|
||||||
* @throws IOException thrown if there is an error
|
* @throws IOException thrown if there is an error
|
||||||
*/
|
*/
|
||||||
LuceneSearchInstance search(IndexSearcher indexSearcher,
|
LuceneSearchInstance search(IndexSearcher indexSearcher,
|
||||||
|
@ -46,7 +46,7 @@ public class ParallelCollectorStreamSearcher implements LuceneStreamSearcher {
|
|||||||
long totalHitsCount = countStreamSearcher.countLong(indexSearcher, query);
|
long totalHitsCount = countStreamSearcher.countLong(indexSearcher, query);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getTotalHitsCount() throws IOException {
|
public long getTotalHitsCount() {
|
||||||
return totalHitsCount;
|
return totalHitsCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user