Faster mapList, bugfixes, remove composite buffers

This commit is contained in:
Andrea Cavalli 2023-03-09 00:16:57 +01:00
parent 81f1c5643d
commit af7c3dfd65
9 changed files with 31 additions and 61 deletions

View File

@ -14,7 +14,7 @@
<dbengine.ci>false</dbengine.ci>
<micrometer.version>1.9.5</micrometer.version>
<lucene.version>9.5.0</lucene.version>
<rocksdb.version>7.9.2</rocksdb.version>
<rocksdb.version>7.10.2</rocksdb.version>
<junit.jupiter.version>5.9.0</junit.jupiter.version>
<data.generator.version>1.0.258</data.generator.version>
</properties>

View File

@ -1,5 +1,7 @@
package it.cavallium.dbengine.client.query;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import it.cavallium.dbengine.client.query.current.data.BooleanQuery;
import it.cavallium.dbengine.client.query.current.data.BooleanQueryPart;
import it.cavallium.dbengine.client.query.current.data.Occur;
@ -86,11 +88,7 @@ public class QueryUtils {
}
org.apache.lucene.search.SynonymQuery synonymQuery = (org.apache.lucene.search.SynonymQuery) luceneQuery;
return SynonymQuery.of(field,
synonymQuery
.getTerms()
.stream()
.map(term -> TermAndBoost.of(QueryParser.toQueryTerm(term), 1))
.toList()
mapList(synonymQuery.getTerms(), term -> TermAndBoost.of(QueryParser.toQueryTerm(term), 1))
);
}
}

View File

@ -1,5 +1,6 @@
package it.cavallium.dbengine.database;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import static it.cavallium.dbengine.lucene.LuceneUtils.getLuceneIndexId;
import static it.cavallium.dbengine.utils.StreamUtils.LUCENE_SCHEDULER;
import static it.cavallium.dbengine.utils.StreamUtils.collect;
@ -174,11 +175,11 @@ public class LLMultiLuceneIndex implements LLLuceneIndex {
@NotNull List<Query> queries,
@Nullable Query normalizationQuery,
BucketParams bucketParams) {
return mergeShards(luceneIndicesSet.stream().map(luceneIndex -> luceneIndex.computeBuckets(snapshot,
return mergeShards(mapList(luceneIndicesSet, luceneIndex -> luceneIndex.computeBuckets(snapshot,
queries,
normalizationQuery,
bucketParams
)).toList());
)));
}
@Override

View File

@ -513,30 +513,6 @@ public class LLUtils {
return Buf.copyOf(array);
}
public static Buf compositeBuffer(Buf buffer) {
return buffer;
}
@NotNull
public static Buf compositeBuffer(Buf buffer1, Buf buffer2) {
// todo: create a composite buffer without allocating a new array
var out = Buf.create(buffer1.size() + buffer2.size());
out.addAll(buffer1);
out.addAll(buffer2);
return out;
}
@NotNull
public static Buf compositeBuffer(Buf buffer1, Buf buffer2, Buf buffer3) {
// todo: create a composite buffer without allocating a new array
var out = Buf.create(buffer1.size() + buffer2.size());
out.addAll(buffer1);
out.addAll(buffer2);
out.addAll(buffer3);
return out;
}
public static <T> T resolveDelta(Delta<T> delta, UpdateReturnMode updateReturnMode) {
return switch (updateReturnMode) {
case GET_NEW_VALUE -> delta.current();
@ -666,9 +642,10 @@ public class LLUtils {
}
}
public static <T, U> List<U> mapList(List<T> input, Function<T, U> mapper) {
//todo: optimize hits mapping
return input.stream().map(mapper).toList();
public static <T, U> List<U> mapList(Collection<T> input, Function<T, U> mapper) {
var result = new ArrayList<U>(input.size());
input.forEach(t -> result.add(mapper.apply(t)));
return result;
}
private static class FakeBytesRefBuilder extends BytesRefBuilder {

View File

@ -3,6 +3,7 @@ package it.cavallium.dbengine.database.disk;
import static it.cavallium.dbengine.database.LLUtils.ALLOW_STATIC_OPTIONS;
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import static it.cavallium.dbengine.database.LLUtils.toStringSafe;
import static it.cavallium.dbengine.database.disk.UpdateAtomicResultMode.DELTA;
import static it.cavallium.dbengine.utils.StreamUtils.LUCENE_SCHEDULER;
@ -542,7 +543,7 @@ public class LLLocalDictionary implements LLDictionary {
{
var readOptions = generateReadOptionsOrStatic(null);
try {
var inputs = db.multiGetAsList(readOptions, Lists.transform(keyBufsWindow, Buf::asArray));
var inputs = db.multiGetAsList(readOptions, mapList(keyBufsWindow, Buf::asArray));
mappedInputs = new ArrayList<>(inputs.size());
for (int i = 0; i < inputs.size(); i++) {
var val = inputs.get(i);

View File

@ -1,6 +1,7 @@
package it.cavallium.dbengine.database.disk;
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import static it.cavallium.dbengine.utils.StreamUtils.collect;
import static it.cavallium.dbengine.utils.StreamUtils.iterating;
import static java.lang.Boolean.parseBoolean;
@ -276,11 +277,7 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
columnFamilyOptions.setBottommostCompressionType(lastLevelOptions.compressionType);
columnFamilyOptions.setBottommostCompressionOptions(lastLevelOptions.compressionOptions);
columnFamilyOptions.setCompressionPerLevel(columnOptions
.levels()
.stream()
.map(v -> v.compression().getType())
.toList());
columnFamilyOptions.setCompressionPerLevel(mapList(columnOptions.levels(), v -> v.compression().getType()));
} else {
columnFamilyOptions.setNumLevels(7);
List<CompressionType> compressionTypes = new ArrayList<>(7);
@ -932,10 +929,9 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
requireNonNull(databasesDirPath);
requireNonNull(path.getFileName());
List<DbPath> paths = convertPaths(databasesDirPath, path.getFileName(), databaseOptions.volumes())
.stream()
.map(p -> new DbPath(p.path, p.targetSize))
.toList();
List<DbPath> paths = mapList(convertPaths(databasesDirPath, path.getFileName(), databaseOptions.volumes()),
p -> new DbPath(p.path, p.targetSize)
);
options.setDbPaths(paths);
options.setMaxOpenFiles(databaseOptions.maxOpenFiles().orElse(-1));
if (databaseOptions.spinning()) {

View File

@ -1,5 +1,7 @@
package it.cavallium.dbengine.lucene.collector;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import it.cavallium.dbengine.lucene.IntSmear;
import it.unimi.dsi.fastutil.ints.IntHash;
import java.io.IOException;
@ -31,10 +33,9 @@ public class FastFacetsCollectorManager implements CollectorManager<FacetsCollec
@Override
public FacetsCollector reduce(Collection<FacetsCollector> collectors) throws IOException {
return FacetsCollector.wrap(facetsCollectorManager.reduce(collectors
.stream()
.map(facetsCollector -> facetsCollector.getLuceneFacetsCollector())
.toList()));
return FacetsCollector.wrap(facetsCollectorManager.reduce(mapList(collectors,
facetsCollector -> facetsCollector.getLuceneFacetsCollector()
)));
}
private static class FastFacetsCollector implements FacetsCollector {

View File

@ -1,5 +1,7 @@
package it.cavallium.dbengine.lucene.searcher;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import it.cavallium.dbengine.database.LLKeyScore;
import it.cavallium.dbengine.database.LLUtils;
@ -39,16 +41,9 @@ public class CountMultiSearcher implements MultiSearcher {
"Scored queries are not supported by SimpleUnsortedUnscoredLuceneMultiSearcher");
}
var results = indexSearchers
.llShards()
.stream()
.map(searcher -> this.collect(searcher,
queryParams,
keyFieldName,
transformer,
f -> filterer.apply(f).limit(0)
))
.toList();
var results = mapList(indexSearchers.llShards(),
searcher -> this.collect(searcher, queryParams, keyFieldName, transformer, f -> filterer.apply(f).limit(0))
);
boolean exactTotalHitsCount = true;
long totalHitsCountValue = 0;
for (LuceneSearchResult result : results) {

View File

@ -1,5 +1,6 @@
package it.cavallium.dbengine.lucene.searcher;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import static it.cavallium.dbengine.utils.StreamUtils.toList;
import static java.util.Objects.requireNonNull;
@ -64,7 +65,7 @@ public class StandardSearcher implements MultiSearcher {
sharedManager = TopScoreDocCollector.createSharedManager(queryParams.limitInt(), null, totalHitsThreshold);
}
;
var collectors = indexSearchers.stream().map(shard -> {
var collectors = mapList(indexSearchers, shard -> {
try {
TopDocsCollector<?> collector;
collector = sharedManager.newCollector();
@ -77,7 +78,7 @@ public class StandardSearcher implements MultiSearcher {
} catch (IOException e) {
throw new DBException(e);
}
}).toList();
});
try {
if (collectors.size() <= 1) {