CavalliumDBEngine/src/main/java/it/cavallium/dbengine/database/LLLuceneIndex.java

100 lines
3.2 KiB
Java
Raw Normal View History

2020-12-07 22:15:18 +01:00
package it.cavallium.dbengine.database;
2022-01-28 19:31:25 +01:00
import com.google.common.collect.Multimap;
2022-08-15 23:07:17 +02:00
import it.cavallium.dbengine.client.IBackuppable;
2021-03-02 01:53:36 +01:00
import it.cavallium.dbengine.client.query.current.data.NoSort;
import it.cavallium.dbengine.client.query.current.data.Query;
import it.cavallium.dbengine.client.query.current.data.QueryParams;
2021-08-04 01:12:39 +02:00
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
2021-11-19 19:03:31 +01:00
import it.cavallium.dbengine.lucene.collector.Buckets;
2021-11-18 17:13:53 +01:00
import it.cavallium.dbengine.lucene.searcher.BucketParams;
import java.io.IOException;
2022-07-02 11:44:13 +02:00
import java.time.Duration;
2021-11-19 19:03:31 +01:00
import java.util.List;
import java.util.Map.Entry;
import java.util.stream.Stream;
2021-11-19 19:03:31 +01:00
import org.jetbrains.annotations.NotNull;
2020-12-07 22:15:18 +01:00
import org.jetbrains.annotations.Nullable;
2022-08-15 23:07:17 +02:00
public interface LLLuceneIndex extends LLSnapshottable, IBackuppable, SafeCloseable {
2020-12-07 22:15:18 +01:00
String getLuceneIndexName();
void addDocument(LLTerm id, LLUpdateDocument doc);
2020-12-07 22:15:18 +01:00
long addDocuments(boolean atomic, Stream<Entry<LLTerm, LLUpdateDocument>> documents);
2020-12-07 22:15:18 +01:00
void deleteDocument(LLTerm id);
2020-12-07 22:15:18 +01:00
void update(LLTerm id, LLIndexRequest request);
2020-12-07 22:15:18 +01:00
long updateDocuments(Stream<Entry<LLTerm, LLUpdateDocument>> documents);
2020-12-07 22:15:18 +01:00
void deleteAll();
2020-12-07 22:15:18 +01:00
2020-12-08 10:52:15 +01:00
/**
2021-03-02 01:53:36 +01:00
* @param queryParams the limit is valid for each lucene instance. If you have 15 instances, the number of elements
* returned can be at most <code>limit * 15</code>.
* <p>
* The additional query will be used with the moreLikeThis query: "mltQuery AND additionalQuery"
* @return the collection has one or more flux
2020-12-08 10:52:15 +01:00
*/
Stream<LLSearchResultShard> moreLikeThis(@Nullable LLSnapshot snapshot,
2021-03-02 01:53:36 +01:00
QueryParams queryParams,
2022-02-26 03:28:20 +01:00
@Nullable String keyFieldName,
2022-01-28 19:31:25 +01:00
Multimap<String, String> mltDocumentFields);
2020-12-07 22:15:18 +01:00
/**
2021-03-02 01:53:36 +01:00
* @param queryParams the limit is valid for each lucene instance. If you have 15 instances, the number of elements
* returned can be at most <code>limit * 15</code>
2020-12-07 22:15:18 +01:00
* @return the collection has one or more flux
*/
Stream<LLSearchResultShard> search(@Nullable LLSnapshot snapshot,
2022-02-26 03:28:20 +01:00
QueryParams queryParams,
@Nullable String keyFieldName);
2020-12-07 22:15:18 +01:00
2021-11-18 17:13:53 +01:00
/**
* @return buckets with each value collected into one of the buckets
*/
Buckets computeBuckets(@Nullable LLSnapshot snapshot,
2021-11-19 19:03:31 +01:00
@NotNull List<Query> queries,
@Nullable Query normalizationQuery,
BucketParams bucketParams);
2021-11-18 17:13:53 +01:00
default TotalHitsCount count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) {
2022-07-02 11:44:13 +02:00
QueryParams params = QueryParams.of(query,
0,
0,
NoSort.of(),
false,
timeout == null ? Long.MAX_VALUE : timeout.toMillis()
2022-01-26 14:22:54 +01:00
);
2023-02-22 23:31:05 +01:00
try (var stream = this.search(snapshot, params, null)) {
return stream.parallel().map(LLSearchResultShard::totalHitsCount).reduce(TotalHitsCount.of(0, true),
(a, b) -> TotalHitsCount.of(a.value() + b.value(), a.exact() && b.exact())
);
}
}
2020-12-07 22:15:18 +01:00
boolean isLowMemoryMode();
2021-02-03 13:48:30 +01:00
/**
2021-11-20 01:30:06 +01:00
* Flush writes to disk.
* This does not commit, it syncs the data to the disk
2021-02-03 13:48:30 +01:00
*/
void flush();
2021-02-03 13:48:30 +01:00
void waitForMerges();
/**
* Wait for the latest pending merge
* This disables future merges until shutdown!
*/
void waitForLastMerges();
2021-02-03 13:48:30 +01:00
/**
* Refresh index searcher
*/
void refresh(boolean force);
2020-12-07 22:15:18 +01:00
}