Compare commits
114 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
a14c823941 | ||
|
12669e4eb5 | ||
|
6af06ca90e | ||
|
86377a4e65 | ||
|
591963f630 | ||
|
6a68c8452b | ||
|
c859b8238f | ||
|
e1e6065036 | ||
|
ceff8f5022 | ||
|
06e754d437 | ||
|
8dbfe7a488 | ||
|
ce356e3c33 | ||
|
52863bf498 | ||
|
fe6ec9afe3 | ||
|
8bd0e7cf12 | ||
|
852a5e1d51 | ||
|
bc9d274c58 | ||
|
e8342b783a | ||
|
18191ef2fd | ||
|
6564db6c4f | ||
|
fe9370294b | ||
|
632dd41e9e | ||
|
4695f29a76 | ||
|
5888bc96b4 | ||
|
ff3cbc11b6 | ||
|
ae8e8441ac | ||
|
2792add2b9 | ||
|
89a0fa9408 | ||
|
251240996e | ||
|
024db43de8 | ||
|
a5502af24a | ||
|
fa865a654d | ||
|
361e2f04f6 | ||
|
fe0256dbf9 | ||
|
495cbdea64 | ||
|
117df4cb30 | ||
|
d0c79a57d9 | ||
|
50ce5984ac | ||
|
b3631c5513 | ||
|
0ab75623ba | ||
|
7cffc853b3 | ||
|
6fd7d249de | ||
|
a6e73b83d8 | ||
|
8b747db386 | ||
|
3df0dcf36a | ||
|
0cb1ad55a8 | ||
|
065281a4e4 | ||
|
85bfdc33e9 | ||
|
3b35c18517 | ||
|
e4ec49e9aa | ||
|
0caffbfa79 | ||
|
ce5516bd28 | ||
|
7692b21e3d | ||
|
40cd756f35 | ||
|
a4c322e96a | ||
|
fdb504d9cd | ||
|
e83270906e | ||
|
0e2b3677c4 | ||
|
faa15d64ce | ||
|
7f7f13d7f3 | ||
|
e9c765f7da | ||
|
dec229ac78 | ||
|
e66bc6ce53 | ||
|
3a6883c274 | ||
|
2a817cbc58 | ||
|
bee2fe1bf5 | ||
|
0062a36ed5 | ||
|
3e90ba3704 | ||
|
7e7e1c410b | ||
|
dc03d25fdc | ||
|
26961125c0 | ||
|
df946146a1 | ||
|
93fc28101a | ||
|
2f5c8b618f | ||
|
8499dcf89c | ||
|
0c3afa5839 | ||
|
14e00a1857 | ||
|
7c67676a25 | ||
|
2b6b447e0c | ||
|
2810571d7f | ||
|
249403016a | ||
|
7ac452d52a | ||
|
977dd472c1 | ||
|
cce49a50ee | ||
|
97df3bf725 | ||
|
09bdfed0aa | ||
|
0a325c6ef6 | ||
|
161876c1ee | ||
|
f54388efa8 | ||
|
3180b751ef | ||
|
003799b468 | ||
|
8ac067b639 | ||
|
09113207ed | ||
|
dfa1fc3ecc | ||
|
ed981581ec | ||
|
a83f1ff1a6 | ||
|
468886d154 | ||
|
1aeb0c99d3 | ||
|
af7c3dfd65 | ||
|
81f1c5643d | ||
|
404092106b | ||
|
6037a906dc | ||
|
a21c1f3cf4 | ||
|
ea1b464ddf | ||
|
011c8f839c | ||
|
024c4ee226 | ||
|
0e21c72e0a | ||
|
daa7047614 | ||
|
e0d929dbaa | ||
|
1b83c95856 | ||
|
3f88ff8f83 | ||
|
59f9f01268 | ||
|
cd15f8d23d | ||
|
a9857f7553 |
333
pom.xml
333
pom.xml
@ -5,17 +5,17 @@
|
||||
|
||||
<groupId>it.cavallium</groupId>
|
||||
<artifactId>dbengine</artifactId>
|
||||
<version>3.0.${revision}</version>
|
||||
<version>4.3.${revision}</version>
|
||||
|
||||
<packaging>jar</packaging>
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<revision>0-SNAPSHOT</revision>
|
||||
<dbengine.ci>false</dbengine.ci>
|
||||
<micrometer.version>1.9.5</micrometer.version>
|
||||
<lucene.version>9.4.2</lucene.version>
|
||||
<micrometer.version>1.10.4</micrometer.version>
|
||||
<rocksdb.version>9.7.3</rocksdb.version>
|
||||
<junit.jupiter.version>5.9.0</junit.jupiter.version>
|
||||
<data.generator.version>1.0.244</data.generator.version>
|
||||
<data.generator.version>1.1.18</data.generator.version>
|
||||
</properties>
|
||||
<repositories>
|
||||
<repository>
|
||||
@ -39,17 +39,6 @@
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>netty5-snapshots</id>
|
||||
<name>Netty 5 snapshots</name>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
<releases>
|
||||
<enabled>true</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>apache.snapshots</id>
|
||||
<name>Apache Snapshot Repository</name>
|
||||
@ -58,7 +47,12 @@
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
</repository>
|
||||
</repositories>
|
||||
<repository>
|
||||
<id>maven_central</id>
|
||||
<name>Maven Central</name>
|
||||
<url>https://repo.maven.apache.org/maven2/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
<pluginRepositories>
|
||||
<pluginRepository>
|
||||
<id>mchv-release</id>
|
||||
@ -76,11 +70,23 @@
|
||||
<id>mchv-release-distribution</id>
|
||||
<name>MCHV Release Apache Maven Packages Distribution</name>
|
||||
<url>https://mvn.mchv.eu/repository/mchv</url>
|
||||
<releases>
|
||||
<enabled>true</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<snapshotRepository>
|
||||
<id>mchv-snapshot-distribution</id>
|
||||
<name>MCHV Snapshot Apache Maven Packages Distribution</name>
|
||||
<url>https://mvn.mchv.eu/repository/mchv-snapshot</url>
|
||||
<releases>
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<scm>
|
||||
@ -96,71 +102,14 @@
|
||||
<artifactId>hamcrest-library</artifactId>
|
||||
<version>2.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.projectreactor</groupId>
|
||||
<artifactId>reactor-bom</artifactId>
|
||||
<version>2022.0.2</version>
|
||||
<type>pom</type>
|
||||
<scope>import</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-bom</artifactId>
|
||||
<version>4.1.86.Final</version>
|
||||
<type>pom</type>
|
||||
<scope>import</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.projectreactor</groupId>
|
||||
<artifactId>reactor-tools</artifactId>
|
||||
<classifier>original</classifier>
|
||||
<scope>runtime</scope>
|
||||
<version>3.5.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>31.1-jre</version>
|
||||
<version>33.0.0-jre</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty5-buffer</artifactId>
|
||||
<version>5.0.0.Alpha5</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-common</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-buffer</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-buffer</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-transport</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-handler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.projectreactor.netty.incubator</groupId>
|
||||
<artifactId>reactor-netty-incubator-quic</artifactId>
|
||||
<version>0.1.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.yaml</groupId>
|
||||
<artifactId>snakeyaml</artifactId>
|
||||
@ -226,7 +175,7 @@
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-slf4j2-impl</artifactId>
|
||||
<version>2.19.0</version>
|
||||
<version>2.23.1</version>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
@ -250,84 +199,28 @@
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>2.0.3</version>
|
||||
<version>2.0.12</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-api</artifactId>
|
||||
<version>2.19.0</version>
|
||||
<version>2.23.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.lmax</groupId>
|
||||
<artifactId>disruptor</artifactId>
|
||||
<version>3.4.4</version>
|
||||
<version>4.0.0</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.rocksdb</groupId>
|
||||
<artifactId>rocksdbjni</artifactId>
|
||||
<version>7.9.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-core</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-join</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-analysis-common</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-analysis-icu</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-codecs</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-backward-codecs</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-queries</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-queryparser</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-misc</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-facet</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-test-framework</artifactId>
|
||||
<version>${lucene.version}</version>
|
||||
<scope>test</scope>
|
||||
<version>${rocksdb.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jetbrains</groupId>
|
||||
<artifactId>annotations</artifactId>
|
||||
<version>23.0.0</version>
|
||||
<version>24.0.1</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
@ -356,59 +249,14 @@
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.projectreactor.netty</groupId>
|
||||
<artifactId>reactor-netty</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>io.netty.incubator</groupId>
|
||||
<artifactId>netty-incubator-codec-native-quic</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>reactor-netty-core</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-common</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-codec</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-handler</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-transport</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-buffer</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcpkix-jdk15on</artifactId>
|
||||
<version>1.70</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.novasearch</groupId>
|
||||
<artifactId>lucene-relevance</artifactId>
|
||||
<version>9.0.1.0.0-SNAPSHOT</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-core</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>it.cavallium</groupId>
|
||||
<artifactId>data-generator-runtime</artifactId>
|
||||
<artifactId>datagen</artifactId>
|
||||
<version>${data.generator.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
@ -448,7 +296,7 @@
|
||||
<dependency>
|
||||
<groupId>io.soabase.record-builder</groupId>
|
||||
<artifactId>record-builder-core</artifactId>
|
||||
<version>34</version>
|
||||
<version>36</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
@ -457,22 +305,9 @@
|
||||
<version>3.12.0</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.projectreactor</groupId>
|
||||
<artifactId>reactor-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<testSourceDirectory>src/test/java</testSourceDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>../src/main/libs</directory>
|
||||
<excludes>
|
||||
<exclude>**/*.jar</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
</resources>
|
||||
<extensions>
|
||||
<extension>
|
||||
<groupId>kr.motd.maven</groupId>
|
||||
@ -489,26 +324,26 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.8.1</version>
|
||||
<version>3.11.0</version>
|
||||
<configuration>
|
||||
<release>17</release>
|
||||
<annotationProcessorPaths>
|
||||
<annotationProcessorPath>
|
||||
<groupId>io.soabase.record-builder</groupId>
|
||||
<artifactId>record-builder-processor</artifactId>
|
||||
<version>33</version>
|
||||
</annotationProcessorPath>
|
||||
</annotationProcessorPaths>
|
||||
<annotationProcessors>
|
||||
<annotationProcessor>io.soabase.recordbuilder.processor.RecordBuilderProcessor</annotationProcessor>
|
||||
</annotationProcessors>
|
||||
<source>17</source>
|
||||
<target>17</target>
|
||||
</configuration>
|
||||
<release>21</release>
|
||||
<annotationProcessorPaths>
|
||||
<annotationProcessorPath>
|
||||
<groupId>io.soabase.record-builder</groupId>
|
||||
<artifactId>record-builder-processor</artifactId>
|
||||
<version>33</version>
|
||||
</annotationProcessorPath>
|
||||
</annotationProcessorPaths>
|
||||
<annotationProcessors>
|
||||
<annotationProcessor>io.soabase.recordbuilder.processor.RecordBuilderProcessor</annotationProcessor>
|
||||
</annotationProcessors>
|
||||
<source>21</source>
|
||||
<target>21</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>it.cavallium</groupId>
|
||||
<artifactId>data-generator-plugin</artifactId>
|
||||
<artifactId>datagen-plugin</artifactId>
|
||||
<version>${data.generator.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
@ -640,37 +475,83 @@
|
||||
</build>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>reactor-agent</id>
|
||||
<id>repair</id>
|
||||
<activation>
|
||||
<activeByDefault>false</activeByDefault>
|
||||
<property>
|
||||
<name>reactor.agent.enable</name>
|
||||
<value>true</value>
|
||||
<name>dbengine.build</name>
|
||||
<value>repair</value>
|
||||
</property>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>net.bytebuddy</groupId>
|
||||
<artifactId>byte-buddy-maven-plugin</artifactId>
|
||||
<version>1.12.22</version>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>add-source</id>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>transform</goal>
|
||||
<goal>add-source</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<sources>
|
||||
<source>src/repair/java</source>
|
||||
</sources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<version>3.6.0</version>
|
||||
<configuration>
|
||||
<descriptorRefs>
|
||||
<descriptorRef>jar-with-dependencies</descriptorRef>
|
||||
</descriptorRefs>
|
||||
<appendAssemblyId>false</appendAssemblyId>
|
||||
<finalName>dbengine-repair</finalName>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>it.cavallium.dbengine.repair.Repair</mainClass>
|
||||
</manifest>
|
||||
<manifestEntries>
|
||||
<Multi-Release>true</Multi-Release>
|
||||
</manifestEntries>
|
||||
</archive>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>make-assembly</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>single</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<transformations>
|
||||
<transformation>
|
||||
<plugin>reactor.tools.agent.ReactorDebugByteBuddyPlugin</plugin>
|
||||
</transformation>
|
||||
</transformations>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/repair/resources</directory>
|
||||
</resource>
|
||||
</resources>
|
||||
</build>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-slf4j2-impl</artifactId>
|
||||
<version>2.20.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.lmax</groupId>
|
||||
<artifactId>disruptor</artifactId>
|
||||
<version>3.4.4</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
|
@ -80,7 +80,7 @@ public class CodecsExample {
|
||||
.then(),
|
||||
SpeedExample.numRepeats,
|
||||
tuple -> tuple.getT1().close()
|
||||
)).transform(LLUtils::handleDiscard).subscribeOn(Schedulers.parallel()).blockOptional();
|
||||
)).subscribeOn(Schedulers.parallel()).blockOptional();
|
||||
}
|
||||
|
||||
private static void testConversion() {
|
||||
@ -88,7 +88,6 @@ public class CodecsExample {
|
||||
.then()
|
||||
.then(readNew())
|
||||
.subscribeOn(Schedulers.parallel())
|
||||
.transform(LLUtils::handleDiscard)
|
||||
.blockOptional();
|
||||
}
|
||||
|
||||
|
@ -1,193 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.client.query.QueryUtils;
|
||||
import it.cavallium.dbengine.client.query.current.data.QueryParams;
|
||||
import it.cavallium.dbengine.client.query.current.data.ScoreMode;
|
||||
import it.cavallium.dbengine.client.query.current.data.ScoreSort;
|
||||
import it.cavallium.dbengine.database.LLDocument;
|
||||
import it.cavallium.dbengine.database.LLItem;
|
||||
import it.cavallium.dbengine.database.LLLuceneIndex;
|
||||
import it.cavallium.dbengine.database.LLSignal;
|
||||
import it.cavallium.dbengine.database.LLTerm;
|
||||
import it.cavallium.dbengine.database.disk.LLLocalDatabaseConnection;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
|
||||
import it.cavallium.dbengine.lucene.analyzer.TextFieldsSimilarity;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.time.Duration;
|
||||
import java.util.Comparator;
|
||||
import java.util.StringJoiner;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
public class IndicizationExample {
|
||||
|
||||
public static void main(String[] args) {
|
||||
tempIndex(true)
|
||||
.flatMap(index -> index
|
||||
.addDocument(new LLTerm("id", "123"),
|
||||
new LLDocument(new LLItem[]{
|
||||
LLItem.newStringField("id", "123", Store.YES),
|
||||
LLItem.newTextField("name", "Mario", Store.NO),
|
||||
LLItem.newStringField("surname", "Rossi", Store.NO)
|
||||
})
|
||||
)
|
||||
.then(index.refresh())
|
||||
.then(index.search(null,
|
||||
QueryParams
|
||||
.builder()
|
||||
.query(QueryUtils.exactSearch(TextFieldsAnalyzer.N4GramPartialString, "name", "Mario"))
|
||||
.limit(1)
|
||||
.sort(ScoreSort.of())
|
||||
.scoreMode(ScoreMode.of(false, true))
|
||||
.build(),
|
||||
"id"
|
||||
))
|
||||
.flatMap(results -> Mono.from(results
|
||||
.results()
|
||||
.flatMap(r -> r)
|
||||
.doOnNext(signal -> {
|
||||
if (signal.isValue()) {
|
||||
System.out.println("Value: " + signal.getValue());
|
||||
}
|
||||
})
|
||||
.filter(LLSignal::isTotalHitsCount))
|
||||
)
|
||||
.doOnNext(count -> System.out.println("Total hits: " + count))
|
||||
.doOnTerminate(() -> System.out.println("Completed"))
|
||||
.then(index.close())
|
||||
)
|
||||
.subscribeOn(Schedulers.parallel())
|
||||
.transform(LLUtils::handleDiscard)
|
||||
.block();
|
||||
tempIndex(true)
|
||||
.flatMap(index ->
|
||||
index
|
||||
.addDocument(new LLTerm("id", "126"),
|
||||
new LLDocument(new LLItem[]{
|
||||
LLItem.newStringField("id", "126", Store.YES),
|
||||
LLItem.newTextField("name", "Marioxq", Store.NO),
|
||||
LLItem.newStringField("surname", "Rossi", Store.NO)
|
||||
})
|
||||
)
|
||||
.then(index
|
||||
.addDocument(new LLTerm("id", "123"),
|
||||
new LLDocument(new LLItem[]{
|
||||
LLItem.newStringField("id", "123", Store.YES),
|
||||
LLItem.newTextField("name", "Mario", Store.NO),
|
||||
LLItem.newStringField("surname", "Rossi", Store.NO)
|
||||
})
|
||||
))
|
||||
.then(index
|
||||
.addDocument(new LLTerm("id", "124"),
|
||||
new LLDocument(new LLItem[]{
|
||||
LLItem.newStringField("id", "124", Store.YES),
|
||||
LLItem.newTextField("name", "Mariossi", Store.NO),
|
||||
LLItem.newStringField("surname", "Rossi", Store.NO)
|
||||
})
|
||||
))
|
||||
.then(index
|
||||
.addDocument(new LLTerm("id", "125"),
|
||||
new LLDocument(new LLItem[]{
|
||||
LLItem.newStringField("id", "125", Store.YES),
|
||||
LLItem.newTextField("name", "Mario marios", Store.NO),
|
||||
LLItem.newStringField("surname", "Rossi", Store.NO)
|
||||
})
|
||||
))
|
||||
.then(index
|
||||
.addDocument(new LLTerm("id", "128"),
|
||||
new LLDocument(new LLItem[]{
|
||||
LLItem.newStringField("id", "128", Store.YES),
|
||||
LLItem.newTextField("name", "Marion", Store.NO),
|
||||
LLItem.newStringField("surname", "Rossi", Store.NO)
|
||||
})
|
||||
))
|
||||
.then(index
|
||||
.addDocument(new LLTerm("id", "127"),
|
||||
new LLDocument(new LLItem[]{
|
||||
LLItem.newStringField("id", "127", Store.YES),
|
||||
LLItem.newTextField("name", "Mariotto", Store.NO),
|
||||
LLItem.newStringField("surname", "Rossi", Store.NO)
|
||||
})
|
||||
))
|
||||
.then(index.refresh())
|
||||
.then(index.search(null,
|
||||
QueryParams
|
||||
.builder()
|
||||
.query(QueryUtils.exactSearch(TextFieldsAnalyzer.N4GramPartialString, "name", "Mario"))
|
||||
.limit(10)
|
||||
.sort(MultiSort.topScore().getQuerySort())
|
||||
.scoreMode(ScoreMode.of(false, true))
|
||||
.build(),
|
||||
"id"
|
||||
))
|
||||
.flatMap(results -> LuceneUtils.mergeSignalStreamRaw(results
|
||||
.results(), MultiSort.topScoreRaw(), 10L)
|
||||
.doOnNext(value -> System.out.println("Value: " + value))
|
||||
.then(Mono.from(results
|
||||
.results()
|
||||
.flatMap(part -> part)
|
||||
.filter(LLSignal::isTotalHitsCount)
|
||||
.map(LLSignal::getTotalHitsCount)))
|
||||
)
|
||||
.doOnNext(count -> System.out.println("Total hits: " + count))
|
||||
.doOnTerminate(() -> System.out.println("Completed"))
|
||||
.then(index.close())
|
||||
)
|
||||
.subscribeOn(Schedulers.parallel())
|
||||
.transform(LLUtils::handleDiscard)
|
||||
.block();
|
||||
}
|
||||
|
||||
public static final class CurrentCustomType {
|
||||
|
||||
private final int number;
|
||||
|
||||
public CurrentCustomType(int number) {
|
||||
this.number = number;
|
||||
}
|
||||
|
||||
public int getNumber() {
|
||||
return number;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", CurrentCustomType.class.getSimpleName() + "[", "]")
|
||||
.add("number=" + number)
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
||||
private static <U> Mono<? extends LLLuceneIndex> tempIndex(boolean delete) {
|
||||
var wrkspcPath = Path.of("/tmp/tempdb/");
|
||||
return Mono
|
||||
.fromCallable(() -> {
|
||||
if (delete && Files.exists(wrkspcPath)) {
|
||||
Files.walk(wrkspcPath).sorted(Comparator.reverseOrder()).forEach(file -> {
|
||||
try {
|
||||
Files.delete(file);
|
||||
} catch (IOException ex) {
|
||||
throw new CompletionException(ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
Files.createDirectories(wrkspcPath);
|
||||
return null;
|
||||
})
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.then(new LLLocalDatabaseConnection(wrkspcPath, true).connect())
|
||||
.flatMap(conn -> conn.getLuceneIndex("testindices",
|
||||
10,
|
||||
TextFieldsAnalyzer.N4GramPartialString,
|
||||
TextFieldsSimilarity.NGramBM25Plus,
|
||||
Duration.ofSeconds(5),
|
||||
Duration.ofSeconds(5),
|
||||
false
|
||||
));
|
||||
}
|
||||
}
|
@ -56,7 +56,6 @@ public class SpeedExample {
|
||||
.then(test3LevelPut())
|
||||
.then(test4LevelPut())
|
||||
.subscribeOn(Schedulers.parallel())
|
||||
.transform(LLUtils::handleDiscard)
|
||||
.blockOptional();
|
||||
}
|
||||
|
||||
|
@ -6,14 +6,14 @@ superTypesData:
|
||||
Query: [
|
||||
BoxedQuery, TermQuery, IntTermQuery, IntNDTermQuery, LongTermQuery, LongNDTermQuery, FloatTermQuery,
|
||||
FloatNDTermQuery, DoubleTermQuery, DoubleNDTermQuery,
|
||||
PhraseQuery, WildcardQuery, SynonymQuery, FuzzyQuery, MatchAllDocsQuery, MatchNoDocsQuery,
|
||||
PhraseQuery, SolrTextQuery, WildcardQuery, SynonymQuery, FuzzyQuery, MatchAllDocsQuery, MatchNoDocsQuery,
|
||||
BooleanQuery, SortedNumericDocValuesFieldSlowRangeQuery, SortedDocFieldExistsQuery,
|
||||
ConstantScoreQuery, BoostQuery, IntPointRangeQuery, IntNDPointRangeQuery, LongPointRangeQuery,
|
||||
FloatPointRangeQuery, DoublePointRangeQuery, LongNDPointRangeQuery, FloatNDPointRangeQuery,
|
||||
DoubleNDPointRangeQuery, IntPointExactQuery, IntNDPointExactQuery, LongPointExactQuery, FloatPointExactQuery,
|
||||
FloatPointExactQuery, DoublePointExactQuery, LongNDPointExactQuery, FloatNDPointExactQuery,
|
||||
DoubleNDPointExactQuery, IntPointSetQuery, LongPointSetQuery, FloatPointSetQuery, DoublePointSetQuery,
|
||||
StandardQuery, FieldExistsQuery
|
||||
StandardQuery, FieldExistsQuery, FilterConfigQuery, SolrFunctionQuery, MoreLikeThisQuery
|
||||
]
|
||||
Occur: [OccurMust, OccurMustNot, OccurShould, OccurFilter]
|
||||
Sort: [NoSort, NumericSort, ScoreSort, DocSort, RandomSort]
|
||||
@ -129,6 +129,30 @@ baseTypesData:
|
||||
FieldExistsQuery:
|
||||
data:
|
||||
field: String
|
||||
# Query used to configure the Solr cache.
|
||||
FilterConfigQuery:
|
||||
data:
|
||||
query: Query
|
||||
cached: boolean
|
||||
# Query that represents a Solr Function Query (https://solr.apache.org/guide/solr/latest/query-guide/function-queries.html)
|
||||
SolrFunctionQuery:
|
||||
data:
|
||||
query: String
|
||||
boost: double
|
||||
MoreLikeThisQuery:
|
||||
data:
|
||||
id: String
|
||||
fieldList: String[]
|
||||
minTf: -int
|
||||
minDf: -int
|
||||
maxDf: -int
|
||||
maxDfPct: -int
|
||||
minWl: -int
|
||||
maxWl: -int
|
||||
maxQt: -int
|
||||
maxNtp: -int
|
||||
boost: -boolean
|
||||
qf: String[]
|
||||
# Query that matches a phrase.
|
||||
PhraseQuery:
|
||||
data:
|
||||
@ -136,6 +160,14 @@ baseTypesData:
|
||||
# counted as characters from the beginning of the phrase.
|
||||
phrase: TermPosition[]
|
||||
slop: int
|
||||
# Query that matches a phrase. (Solr)
|
||||
SolrTextQuery:
|
||||
data:
|
||||
# Field name
|
||||
field: String
|
||||
# Text query
|
||||
phrase: String
|
||||
slop: int
|
||||
# Advanced query that matches text allowing asterisks in the query
|
||||
WildcardQuery:
|
||||
data:
|
||||
@ -350,7 +382,7 @@ baseTypesData:
|
||||
DocSort:
|
||||
data: { }
|
||||
TotalHitsCount:
|
||||
stringRepresenter: "it.cavallium.dbengine.lucene.LuceneUtils.toHumanReadableString"
|
||||
stringRepresenter: "it.cavallium.dbengine.client.query.QueryUtil.toHumanReadableString"
|
||||
data:
|
||||
value: long
|
||||
exact: boolean
|
||||
|
@ -1,10 +1,6 @@
|
||||
# A type that starts with "-" is an optional type, otherwise it can't be null
|
||||
currentVersion: "0.0.0"
|
||||
interfacesData:
|
||||
StandardFSDirectoryOptions:
|
||||
extendInterfaces: [PathDirectoryOptions]
|
||||
PathDirectoryOptions:
|
||||
extendInterfaces: [LuceneDirectoryOptions]
|
||||
ClientBoundRequest:
|
||||
extendInterfaces: [RPCEvent]
|
||||
ClientBoundResponse:
|
||||
@ -13,20 +9,6 @@ interfacesData:
|
||||
extendInterfaces: [RPCEvent]
|
||||
ServerBoundResponse:
|
||||
extendInterfaces: [RPCEvent]
|
||||
ColumnOptions:
|
||||
commonGetters:
|
||||
levels: DatabaseLevel[]
|
||||
memtableMemoryBudgetBytes: -long
|
||||
cacheIndexAndFilterBlocks: -boolean
|
||||
partitionFilters: -boolean
|
||||
filter: -Filter
|
||||
blockSize: -int
|
||||
persistentCacheId: -String
|
||||
writeBufferSize: -long
|
||||
blobFiles: boolean
|
||||
minBlobSize: -long
|
||||
blobFileSize: -long
|
||||
blobCompressionType: -Compression
|
||||
superTypesData:
|
||||
RPCEvent: [
|
||||
Empty,
|
||||
@ -35,7 +17,6 @@ superTypesData:
|
||||
SingletonUpdateOldData,
|
||||
GeneratedEntityId,
|
||||
GetDatabase,
|
||||
GetLuceneIndex,
|
||||
Disconnect,
|
||||
GetSingleton,
|
||||
SingletonGet,
|
||||
@ -43,19 +24,16 @@ superTypesData:
|
||||
SingletonUpdateInit,
|
||||
SingletonUpdateEnd,
|
||||
RPCCrash,
|
||||
CloseDatabase,
|
||||
CloseLuceneIndex
|
||||
CloseDatabase
|
||||
]
|
||||
ServerBoundRequest: [
|
||||
GetDatabase,
|
||||
GetLuceneIndex,
|
||||
Disconnect,
|
||||
GetSingleton,
|
||||
SingletonGet,
|
||||
SingletonSet,
|
||||
SingletonUpdateInit,
|
||||
CloseDatabase,
|
||||
CloseLuceneIndex
|
||||
CloseDatabase
|
||||
]
|
||||
ClientBoundResponse: [
|
||||
Empty,
|
||||
@ -71,36 +49,10 @@ superTypesData:
|
||||
Empty,
|
||||
SingletonUpdateEnd
|
||||
]
|
||||
LuceneDirectoryOptions: [
|
||||
ByteBuffersDirectory,
|
||||
MemoryMappedFSDirectory,
|
||||
NIOFSDirectory,
|
||||
RAFFSDirectory,
|
||||
DirectIOFSDirectory,
|
||||
RocksDBStandaloneDirectory,
|
||||
RocksDBSharedDirectory,
|
||||
NRTCachingDirectory
|
||||
]
|
||||
StandardFSDirectoryOptions: [
|
||||
MemoryMappedFSDirectory,
|
||||
NIOFSDirectory,
|
||||
RAFFSDirectory
|
||||
]
|
||||
PathDirectoryOptions: [
|
||||
MemoryMappedFSDirectory,
|
||||
NIOFSDirectory,
|
||||
RAFFSDirectory,
|
||||
RocksDBStandaloneDirectory,
|
||||
StandardFSDirectoryOptions
|
||||
]
|
||||
Filter: [
|
||||
NoFilter,
|
||||
BloomFilter
|
||||
]
|
||||
ColumnOptions: [
|
||||
DefaultColumnOptions,
|
||||
NamedColumnOptions
|
||||
]
|
||||
customTypesData:
|
||||
Path:
|
||||
javaClass: java.nio.file.Path
|
||||
@ -108,12 +60,6 @@ customTypesData:
|
||||
Compression:
|
||||
javaClass: it.cavallium.dbengine.client.Compression
|
||||
serializer: it.cavallium.dbengine.database.remote.CompressionSerializer
|
||||
TextFieldsAnalyzer:
|
||||
javaClass: it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer
|
||||
serializer: it.cavallium.dbengine.database.remote.TextFieldsAnalyzerSerializer
|
||||
TextFieldsSimilarity:
|
||||
javaClass: it.cavallium.dbengine.lucene.analyzer.TextFieldsSimilarity
|
||||
serializer: it.cavallium.dbengine.database.remote.TextFieldsSimilaritySerializer
|
||||
Duration:
|
||||
javaClass: java.time.Duration
|
||||
serializer: it.cavallium.dbengine.database.remote.DurationSerializer
|
||||
@ -123,9 +69,6 @@ customTypesData:
|
||||
ColumnFamilyHandle:
|
||||
javaClass: org.rocksdb.ColumnFamilyHandle
|
||||
serializer: it.cavallium.dbengine.database.remote.ColumnFamilyHandleSerializer
|
||||
LuceneHacks:
|
||||
javaClass: it.cavallium.dbengine.lucene.LuceneHacks
|
||||
serializer: it.cavallium.dbengine.database.remote.LuceneHacksSerializer
|
||||
UpdateReturnMode:
|
||||
javaClass: it.cavallium.dbengine.database.UpdateReturnMode
|
||||
serializer: it.cavallium.dbengine.database.remote.UpdateReturnModeSerializer
|
||||
@ -134,17 +77,11 @@ customTypesData:
|
||||
serializer: it.cavallium.dbengine.database.remote.LLSnapshotSerializer
|
||||
|
||||
Bytes:
|
||||
javaClass: it.unimi.dsi.fastutil.bytes.ByteList
|
||||
serializer: it.cavallium.dbengine.database.remote.ByteListSerializer
|
||||
javaClass: it.cavallium.buffer.Buf
|
||||
serializer: it.cavallium.dbengine.database.remote.BufSerializer
|
||||
StringMap:
|
||||
javaClass: java.util.Map<java.lang.String, java.lang.String>
|
||||
serializer: it.cavallium.dbengine.database.remote.StringMapSerializer
|
||||
String2FieldAnalyzerMap:
|
||||
javaClass: java.util.Map<java.lang.String, it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer>
|
||||
serializer: it.cavallium.dbengine.database.remote.String2FieldAnalyzerMapSerializer
|
||||
String2FieldSimilarityMap:
|
||||
javaClass: java.util.Map<java.lang.String, it.cavallium.dbengine.lucene.analyzer.TextFieldsSimilarity>
|
||||
serializer: it.cavallium.dbengine.database.remote.String2FieldSimilarityMapSerializer
|
||||
String2ColumnFamilyHandleMap:
|
||||
javaClass: java.util.Map<java.lang.String, org.rocksdb.ColumnFamilyHandle>
|
||||
serializer: it.cavallium.dbengine.database.remote.String2ColumnFamilyHandleMapSerializer
|
||||
@ -160,13 +97,6 @@ baseTypesData:
|
||||
name: String
|
||||
columns: Column[]
|
||||
databaseOptions: DatabaseOptions
|
||||
GetLuceneIndex:
|
||||
data:
|
||||
clusterName: String
|
||||
structure: LuceneIndexStructure
|
||||
indicizerAnalyzers: IndicizerAnalyzers
|
||||
indicizerSimilarities: IndicizerSimilarities
|
||||
luceneOptions: LuceneOptions
|
||||
Disconnect: { data: { } }
|
||||
GetSingleton:
|
||||
data:
|
||||
@ -193,9 +123,6 @@ baseTypesData:
|
||||
CloseDatabase:
|
||||
data:
|
||||
databaseId: long
|
||||
CloseLuceneIndex:
|
||||
data:
|
||||
luceneIndexId: long
|
||||
|
||||
# Client-bound responses
|
||||
|
||||
@ -219,17 +146,6 @@ baseTypesData:
|
||||
|
||||
# Data
|
||||
|
||||
LuceneIndexStructure:
|
||||
data:
|
||||
totalShards: int
|
||||
activeShards: int[]
|
||||
SingleIndex:
|
||||
data:
|
||||
name: String
|
||||
ClusteredShardIndex:
|
||||
data:
|
||||
clusterName: String
|
||||
shard: int
|
||||
BinaryOptional:
|
||||
data:
|
||||
val: -Binary
|
||||
@ -248,20 +164,19 @@ baseTypesData:
|
||||
lowMemory: boolean
|
||||
useDirectIO: boolean
|
||||
allowMemoryMapping: boolean
|
||||
allowNettyDirect: boolean
|
||||
optimistic: boolean
|
||||
maxOpenFiles: -int
|
||||
blockCache: -long
|
||||
compressedBlockCache: -long
|
||||
persistentCaches: PersistentCache[]
|
||||
writeBufferManager: -long
|
||||
spinning: boolean
|
||||
defaultColumnOptions: DefaultColumnOptions
|
||||
defaultColumnOptions: ColumnOptions
|
||||
columnOptions: NamedColumnOptions[]
|
||||
logPath: -String
|
||||
walPath: -String
|
||||
# Remember to update ColumnOptions common getters
|
||||
DefaultColumnOptions:
|
||||
openAsSecondary: boolean
|
||||
secondaryDirectoryName: -String
|
||||
ColumnOptions:
|
||||
data:
|
||||
levels: DatabaseLevel[]
|
||||
memtableMemoryBudgetBytes: -long
|
||||
@ -275,22 +190,10 @@ baseTypesData:
|
||||
minBlobSize: -long
|
||||
blobFileSize: -long
|
||||
blobCompressionType: -Compression
|
||||
# Remember to update ColumnOptions common getters
|
||||
NamedColumnOptions:
|
||||
data:
|
||||
columnName: String
|
||||
levels: DatabaseLevel[]
|
||||
memtableMemoryBudgetBytes: -long
|
||||
cacheIndexAndFilterBlocks: -boolean
|
||||
partitionFilters: -boolean
|
||||
filter: -Filter
|
||||
blockSize: -int
|
||||
persistentCacheId: -String
|
||||
writeBufferSize: -long
|
||||
blobFiles: boolean
|
||||
minBlobSize: -long
|
||||
blobFileSize: -long
|
||||
blobCompressionType: -Compression
|
||||
name: String
|
||||
options: ColumnOptions
|
||||
NoFilter:
|
||||
data: {}
|
||||
BloomFilter:
|
||||
@ -311,67 +214,6 @@ baseTypesData:
|
||||
data:
|
||||
maxDictBytes: int
|
||||
compression: Compression
|
||||
IndicizerAnalyzers:
|
||||
data:
|
||||
defaultAnalyzer: TextFieldsAnalyzer
|
||||
fieldAnalyzer: String2FieldAnalyzerMap
|
||||
IndicizerSimilarities:
|
||||
data:
|
||||
defaultSimilarity: TextFieldsSimilarity
|
||||
fieldSimilarity: String2FieldSimilarityMap
|
||||
LuceneOptions:
|
||||
data:
|
||||
extraFlags: StringMap
|
||||
queryRefreshDebounceTime: Duration
|
||||
commitDebounceTime: Duration
|
||||
lowMemory: boolean
|
||||
directoryOptions: LuceneDirectoryOptions
|
||||
indexWriterReaderPooling: -boolean
|
||||
indexWriterRAMBufferSizeMB: -double
|
||||
indexWriterMaxBufferedDocs: -int
|
||||
applyAllDeletes: -boolean
|
||||
writeAllDeletes: -boolean
|
||||
allowNonVolatileCollection: boolean
|
||||
maxInMemoryResultEntries: int
|
||||
mergePolicy: TieredMergePolicy
|
||||
TieredMergePolicy:
|
||||
data:
|
||||
forceMergeDeletesPctAllowed: -double
|
||||
deletesPctAllowed: -double
|
||||
maxMergeAtOnce: -int
|
||||
maxMergedSegmentBytes: -long
|
||||
floorSegmentBytes: -long
|
||||
segmentsPerTier: -double
|
||||
maxCFSSegmentSizeBytes: -long
|
||||
noCFSRatio: -double
|
||||
ByteBuffersDirectory: { data: { } }
|
||||
MemoryMappedFSDirectory:
|
||||
data:
|
||||
managedPath: Path
|
||||
NIOFSDirectory:
|
||||
data:
|
||||
managedPath: Path
|
||||
RAFFSDirectory:
|
||||
data:
|
||||
managedPath: Path
|
||||
DirectIOFSDirectory:
|
||||
data:
|
||||
delegate: StandardFSDirectoryOptions
|
||||
mergeBufferSize: -int
|
||||
minBytesDirect: -long
|
||||
RocksDBStandaloneDirectory:
|
||||
data:
|
||||
managedPath: Path
|
||||
blockSize: int
|
||||
RocksDBSharedDirectory:
|
||||
data:
|
||||
managedPath: Path
|
||||
blockSize: int
|
||||
NRTCachingDirectory:
|
||||
data:
|
||||
delegate: LuceneDirectoryOptions
|
||||
maxMergeSizeBytes: long
|
||||
maxCachedBytes: long
|
||||
versions:
|
||||
0.0.0:
|
||||
details:
|
||||
|
@ -1,55 +0,0 @@
|
||||
package it.cavallium.dbengine;
|
||||
|
||||
import io.netty5.buffer.pool.PoolArenaMetric;
|
||||
import io.netty5.buffer.pool.PooledBufferAllocator;
|
||||
import java.lang.invoke.MethodHandle;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.lang.invoke.MethodType;
|
||||
import java.util.List;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
/**
|
||||
* Netty5 hides some metrics. This utility class can read them.
|
||||
*/
|
||||
public class MetricUtils {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(MetricUtils.class);
|
||||
private static final MethodHandle GET_ARENA_METRICS;
|
||||
|
||||
static {
|
||||
var lookup = MethodHandles.lookup();
|
||||
|
||||
// Get the method handle that returns the metrics of each pool arena
|
||||
MethodHandle handle = null;
|
||||
try {
|
||||
// Find the class
|
||||
var pooledBufferClass = Class.forName("io.netty5.buffer.pool.PooledBufferAllocatorMetric");
|
||||
// Find the handle of the method
|
||||
handle = lookup.findVirtual(pooledBufferClass, "arenaMetrics", MethodType.methodType(List.class));
|
||||
} catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ex) {
|
||||
logMetricsNotAccessible(ex);
|
||||
}
|
||||
GET_ARENA_METRICS = handle;
|
||||
}
|
||||
|
||||
private static void logMetricsNotAccessible(Throwable ex) {
|
||||
LOG.debug("Failed to open pooled buffer allocator metrics", ex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the metrics of each pool arena of a pooled allocator
|
||||
* @param allocator Pooled allocator
|
||||
* @return A list of {@link PoolArenaMetric}
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static List<PoolArenaMetric> getPoolArenaMetrics(PooledBufferAllocator allocator) {
|
||||
var metric = allocator.metric();
|
||||
try {
|
||||
// Invoke the method to get the metrics
|
||||
return (List<PoolArenaMetric>) GET_ARENA_METRICS.invoke(metric);
|
||||
} catch (Throwable e) {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,8 +1,7 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.SignalType;
|
||||
|
||||
public abstract class Backuppable implements IBackuppable {
|
||||
|
||||
@ -13,29 +12,29 @@ public abstract class Backuppable implements IBackuppable {
|
||||
private final AtomicInteger state = new AtomicInteger();
|
||||
|
||||
@Override
|
||||
public final Mono<Void> pauseForBackup() {
|
||||
return Mono.defer(() -> {
|
||||
if (state.compareAndSet(State.RUNNING.ordinal(), State.PAUSING.ordinal())) {
|
||||
return onPauseForBackup().doFinally(type -> state.compareAndSet(State.PAUSING.ordinal(),
|
||||
type == SignalType.ON_ERROR ? State.RUNNING.ordinal() : State.PAUSED.ordinal()
|
||||
));
|
||||
} else {
|
||||
return Mono.empty();
|
||||
public final void pauseForBackup() {
|
||||
if (state.compareAndSet(State.RUNNING.ordinal(), State.PAUSING.ordinal())) {
|
||||
try {
|
||||
onPauseForBackup();
|
||||
state.compareAndSet(State.PAUSING.ordinal(), State.PAUSED.ordinal());
|
||||
} catch (Throwable ex) {
|
||||
state.compareAndSet(State.PAUSING.ordinal(), State.RUNNING.ordinal());
|
||||
throw ex;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Mono<Void> resumeAfterBackup() {
|
||||
return Mono.defer(() -> {
|
||||
if (state.compareAndSet(State.PAUSED.ordinal(), State.RESUMING.ordinal())) {
|
||||
return onResumeAfterBackup().doFinally(type -> state.compareAndSet(State.RESUMING.ordinal(),
|
||||
type == SignalType.ON_ERROR ? State.PAUSED.ordinal() : State.RUNNING.ordinal()
|
||||
));
|
||||
} else {
|
||||
return Mono.empty();
|
||||
public final void resumeAfterBackup() {
|
||||
if (state.compareAndSet(State.PAUSED.ordinal(), State.RESUMING.ordinal())) {
|
||||
try {
|
||||
onResumeAfterBackup();
|
||||
state.compareAndSet(State.RESUMING.ordinal(), State.RUNNING.ordinal());
|
||||
} catch (Throwable ex) {
|
||||
state.compareAndSet(State.RESUMING.ordinal(), State.PAUSED.ordinal());
|
||||
throw ex;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -47,9 +46,9 @@ public abstract class Backuppable implements IBackuppable {
|
||||
return State.values()[state.get()];
|
||||
}
|
||||
|
||||
protected abstract Mono<Void> onPauseForBackup();
|
||||
protected abstract void onPauseForBackup();
|
||||
|
||||
protected abstract Mono<Void> onResumeAfterBackup();
|
||||
protected abstract void onResumeAfterBackup();
|
||||
|
||||
public final void setStopped() {
|
||||
state.set(State.STOPPED.ordinal());
|
||||
|
@ -1,8 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import it.unimi.dsi.fastutil.bytes.ByteList;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public record BadBlock(String databaseName, @Nullable Column column, @Nullable ByteList rawKey,
|
||||
@Nullable Throwable ex) {}
|
@ -1,14 +1,14 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.client.Mapper;
|
||||
|
||||
public class CastMapper<T, U> implements Mapper<T, U> {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public U map(T key) {
|
||||
return (U) key;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public T unmap(U key) {
|
||||
return (T) key;
|
||||
|
@ -1,36 +1,32 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import it.cavallium.dbengine.database.DatabaseOperations;
|
||||
import it.cavallium.dbengine.database.DatabaseProperties;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public interface CompositeDatabase extends DatabaseProperties, DatabaseOperations {
|
||||
|
||||
Mono<Void> preClose();
|
||||
void preClose();
|
||||
|
||||
Mono<Void> close();
|
||||
void close();
|
||||
|
||||
/**
|
||||
* Can return SnapshotException
|
||||
*/
|
||||
Mono<CompositeSnapshot> takeSnapshot();
|
||||
CompositeSnapshot takeSnapshot();
|
||||
|
||||
/**
|
||||
* Can return SnapshotException
|
||||
*/
|
||||
Mono<Void> releaseSnapshot(CompositeSnapshot snapshot);
|
||||
|
||||
BufferAllocator getAllocator();
|
||||
void releaseSnapshot(CompositeSnapshot snapshot);
|
||||
|
||||
MeterRegistry getMeterRegistry();
|
||||
|
||||
/**
|
||||
* Find corrupted items
|
||||
*/
|
||||
Flux<BadBlock> badBlocks();
|
||||
Stream<DbProgress<SSTVerificationProgress>> verify();
|
||||
|
||||
Mono<Void> verifyChecksum();
|
||||
void verifyChecksum();
|
||||
}
|
||||
|
@ -18,8 +18,7 @@ public class CompositeDatabasePartLocation {
|
||||
}
|
||||
|
||||
public enum CompositeDatabasePartType {
|
||||
KV_DATABASE,
|
||||
LUCENE_INDEX
|
||||
KV_DATABASE
|
||||
}
|
||||
|
||||
public CompositeDatabasePartType getPartType() {
|
||||
|
@ -2,7 +2,6 @@ package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.client.CompositeDatabasePartLocation.CompositeDatabasePartType;
|
||||
import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure;
|
||||
import it.cavallium.dbengine.database.LLLuceneIndex;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
@ -20,12 +19,6 @@ public class CompositeSnapshot {
|
||||
)), () -> "No snapshot for database with name \"" + database.getDatabaseName() + "\"");
|
||||
}
|
||||
|
||||
public LLSnapshot getSnapshot(LLLuceneIndex luceneIndex) {
|
||||
return Objects.requireNonNull(snapshots.get(CompositeDatabasePartLocation.of(CompositeDatabasePartType.LUCENE_INDEX,
|
||||
luceneIndex.getLuceneIndexName()
|
||||
)), () -> "No snapshot for lucene index with name \"" + luceneIndex.getLuceneIndexName() + "\"");
|
||||
}
|
||||
|
||||
public Map<CompositeDatabasePartLocation, LLSnapshot> getAllSnapshots() {
|
||||
return snapshots;
|
||||
}
|
||||
|
@ -17,9 +17,6 @@ public sealed interface ConnectionSettings {
|
||||
|
||||
record LocalConnectionSettings(Path dataPath) implements PrimaryConnectionSettings, SubConnectionSettings {}
|
||||
|
||||
record QuicConnectionSettings(SocketAddress bindAddress, SocketAddress remoteAddress) implements
|
||||
PrimaryConnectionSettings, SubConnectionSettings {}
|
||||
|
||||
record MultiConnectionSettings(Map<ConnectionPart, SubConnectionSettings> parts) implements
|
||||
PrimaryConnectionSettings {
|
||||
|
||||
@ -32,8 +29,6 @@ public sealed interface ConnectionSettings {
|
||||
|
||||
sealed interface ConnectionPart {
|
||||
|
||||
record ConnectionPartLucene(@Nullable String name) implements ConnectionPart {}
|
||||
|
||||
record ConnectionPartRocksDB(@Nullable String name) implements ConnectionPart {}
|
||||
}
|
||||
}
|
||||
|
@ -1,47 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class CountedStream<T> {
|
||||
|
||||
private final Flux<T> stream;
|
||||
private final long count;
|
||||
|
||||
public CountedStream(Flux<T> stream, long count) {
|
||||
this.stream = stream;
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public Flux<T> getStream() {
|
||||
return stream;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
public static <T> CountedStream<T> merge(CountedStream<T>... stream) {
|
||||
return merge(List.of(stream));
|
||||
}
|
||||
|
||||
public static <T> CountedStream<T> merge(Collection<CountedStream<T>> stream) {
|
||||
return stream
|
||||
.stream()
|
||||
.reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
|
||||
.orElseGet(() -> new CountedStream<>(Flux.empty(), 0));
|
||||
}
|
||||
|
||||
public static <T> Mono<CountedStream<T>> merge(Flux<CountedStream<T>> stream) {
|
||||
return stream
|
||||
.reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount()))
|
||||
.switchIfEmpty(Mono.fromSupplier(() -> new CountedStream<>(Flux.empty(), 0)));
|
||||
}
|
||||
|
||||
public Mono<List<T>> collectList() {
|
||||
return stream.collectList();
|
||||
}
|
||||
}
|
45
src/main/java/it/cavallium/dbengine/client/DbProgress.java
Normal file
45
src/main/java/it/cavallium/dbengine/client/DbProgress.java
Normal file
@ -0,0 +1,45 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTProgressReport;
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTStart;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import java.nio.file.Path;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Stream;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public interface DbProgress<T extends SSTProgress> {
|
||||
|
||||
String databaseName();
|
||||
|
||||
record DbSSTProgress<T extends SSTProgress>(String databaseName, Column column, @Nullable Path file, long scanned,
|
||||
long total, T sstProgress) implements DbProgress<T> {
|
||||
|
||||
public double getProgress() {
|
||||
if (total == 0) {
|
||||
return 0d;
|
||||
}
|
||||
return scanned / (double) total;
|
||||
}
|
||||
|
||||
public String fileString() {
|
||||
return file != null ? file.normalize().toString() : null;
|
||||
}
|
||||
}
|
||||
|
||||
static <T extends SSTProgress> Stream<DbProgress<T>> toDbProgress(String dbName,
|
||||
String columnName,
|
||||
LongProgressTracker totalTracker,
|
||||
Stream<T> stream) {
|
||||
Column column = Column.of(columnName);
|
||||
AtomicReference<Path> filePath = new AtomicReference<>();
|
||||
return stream.map(state -> {
|
||||
switch (state) {
|
||||
case SSTStart start -> filePath.set(start.metadata().filePath());
|
||||
case SSTProgressReport progress -> totalTracker.incrementAndGet();
|
||||
default -> {}
|
||||
}
|
||||
return new DbSSTProgress<>(dbName, column, filePath.get(), totalTracker.getCurrent(), totalTracker.getTotal(), state);
|
||||
});
|
||||
}
|
||||
}
|
@ -1,13 +1,13 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.data.generator.nativedata.NullableString;
|
||||
import it.cavallium.data.generator.nativedata.Nullableboolean;
|
||||
import it.cavallium.data.generator.nativedata.Nullableint;
|
||||
import it.cavallium.data.generator.nativedata.Nullablelong;
|
||||
import it.cavallium.datagen.nativedata.NullableString;
|
||||
import it.cavallium.datagen.nativedata.Nullableboolean;
|
||||
import it.cavallium.datagen.nativedata.Nullableint;
|
||||
import it.cavallium.datagen.nativedata.Nullablelong;
|
||||
import it.cavallium.dbengine.rpc.current.data.ColumnOptions;
|
||||
import it.cavallium.dbengine.rpc.current.data.ColumnOptionsBuilder;
|
||||
import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
|
||||
import it.cavallium.dbengine.rpc.current.data.DatabaseOptionsBuilder;
|
||||
import it.cavallium.dbengine.rpc.current.data.DefaultColumnOptions;
|
||||
import it.cavallium.dbengine.rpc.current.data.DefaultColumnOptionsBuilder;
|
||||
import it.cavallium.dbengine.rpc.current.data.NamedColumnOptions;
|
||||
import it.cavallium.dbengine.rpc.current.data.NamedColumnOptionsBuilder;
|
||||
import it.cavallium.dbengine.rpc.current.data.nullables.NullableCompression;
|
||||
@ -20,7 +20,7 @@ import org.rocksdb.RocksDB;
|
||||
|
||||
public class DefaultDatabaseOptions {
|
||||
|
||||
public static DefaultColumnOptions DEFAULT_DEFAULT_COLUMN_OPTIONS = new DefaultColumnOptions(
|
||||
public static ColumnOptions DEFAULT_DEFAULT_COLUMN_OPTIONS = new ColumnOptions(
|
||||
Collections.emptyList(),
|
||||
Nullablelong.empty(),
|
||||
Nullableboolean.empty(),
|
||||
@ -37,18 +37,7 @@ public class DefaultDatabaseOptions {
|
||||
|
||||
public static NamedColumnOptions DEFAULT_NAMED_COLUMN_OPTIONS = new NamedColumnOptions(
|
||||
new String(RocksDB.DEFAULT_COLUMN_FAMILY, StandardCharsets.UTF_8),
|
||||
Collections.emptyList(),
|
||||
Nullablelong.empty(),
|
||||
Nullableboolean.empty(),
|
||||
Nullableboolean.empty(),
|
||||
NullableFilter.empty(),
|
||||
Nullableint.empty(),
|
||||
NullableString.empty(),
|
||||
Nullablelong.empty(),
|
||||
false,
|
||||
Nullablelong.empty(),
|
||||
Nullablelong.empty(),
|
||||
NullableCompression.empty()
|
||||
DEFAULT_DEFAULT_COLUMN_OPTIONS
|
||||
);
|
||||
|
||||
public static DatabaseOptions DEFAULT_DATABASE_OPTIONS = new DatabaseOptions(List.of(),
|
||||
@ -58,16 +47,16 @@ public class DefaultDatabaseOptions {
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
true,
|
||||
Nullableint.empty(),
|
||||
Nullablelong.empty(),
|
||||
Nullablelong.empty(),
|
||||
Collections.emptyList(),
|
||||
Nullablelong.empty(),
|
||||
false,
|
||||
DEFAULT_DEFAULT_COLUMN_OPTIONS,
|
||||
List.of(),
|
||||
NullableString.empty(),
|
||||
NullableString.empty(),
|
||||
false,
|
||||
NullableString.empty()
|
||||
);
|
||||
|
||||
@ -75,8 +64,8 @@ public class DefaultDatabaseOptions {
|
||||
return DatabaseOptionsBuilder.builder(DEFAULT_DATABASE_OPTIONS);
|
||||
}
|
||||
|
||||
public static DefaultColumnOptionsBuilder defaultColumnOptionsBuilder() {
|
||||
return DefaultColumnOptionsBuilder.builder(DEFAULT_DEFAULT_COLUMN_OPTIONS);
|
||||
public static ColumnOptionsBuilder defaultColumnOptionsBuilder() {
|
||||
return ColumnOptionsBuilder.builder(DEFAULT_DEFAULT_COLUMN_OPTIONS);
|
||||
}
|
||||
|
||||
public static NamedColumnOptionsBuilder namedColumnOptionsBuilder() {
|
||||
|
@ -1,13 +1,26 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import org.jetbrains.annotations.Contract;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import reactor.core.publisher.Mono;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.jetbrains.annotations.Unmodifiable;
|
||||
|
||||
public record HitEntry<T, U>(T key, U value, float score)
|
||||
public record HitEntry<T, U>(T key, @Nullable U value, float score)
|
||||
implements Comparable<HitEntry<T, U>> {
|
||||
|
||||
@Override
|
||||
public int compareTo(@NotNull HitEntry<T, U> o) {
|
||||
return Float.compare(o.score, this.score);
|
||||
}
|
||||
|
||||
@Contract(pure = true)
|
||||
public @Nullable @Unmodifiable Entry<T, U> toEntry() {
|
||||
if (value != null) {
|
||||
return Map.entry(key, value);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,16 +1,13 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
import java.util.Comparator;
|
||||
import java.util.function.Function;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public record HitKey<T>(T key, float score) implements Comparable<HitKey<T>> {
|
||||
|
||||
public <U> Mono<HitEntry<T, U>> withValue(Function<T, Mono<U>> valueGetter) {
|
||||
return valueGetter.apply(key).map(value -> new HitEntry<>(key, value, score));
|
||||
public <U> HitEntry<T, U> withValue(Function<T, U> valueGetter) {
|
||||
return new HitEntry<>(key, valueGetter.apply(key), score);
|
||||
}
|
||||
|
||||
public <U> HitEntry<T, U> withNullValue() {
|
||||
|
@ -1,33 +1,27 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
|
||||
import it.cavallium.dbengine.database.DiscardingCloseable;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import it.cavallium.dbengine.database.collections.ValueGetter;
|
||||
import it.cavallium.dbengine.database.collections.ValueTransformer;
|
||||
import it.cavallium.dbengine.lucene.LuceneCloseable;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class Hits<T> extends SimpleResource implements DiscardingCloseable {
|
||||
public class Hits<T> {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(Hits.class);
|
||||
private static final Hits<?> EMPTY_HITS = new Hits<>(Flux.empty(), TotalHitsCount.of(0, true), false);
|
||||
private final Flux<T> results;
|
||||
private static final Hits<?> EMPTY_HITS = new Hits<>(List.of(), TotalHitsCount.of(0, true));
|
||||
private final List<T> results;
|
||||
private final TotalHitsCount totalHitsCount;
|
||||
|
||||
public Hits(Flux<T> results, TotalHitsCount totalHitsCount) {
|
||||
this(results, totalHitsCount, true);
|
||||
}
|
||||
|
||||
private Hits(Flux<T> results, TotalHitsCount totalHitsCount, boolean canClose) {
|
||||
super(canClose);
|
||||
public Hits(List<T> results, TotalHitsCount totalHitsCount) {
|
||||
this.results = results;
|
||||
this.totalHitsCount = totalHitsCount;
|
||||
}
|
||||
@ -37,58 +31,21 @@ public class Hits<T> extends SimpleResource implements DiscardingCloseable {
|
||||
return (Hits<T>) EMPTY_HITS;
|
||||
}
|
||||
|
||||
public static <T, U> Function<Hits<HitKey<T>>, Hits<LazyHitEntry<T, U>>> generateMapper(
|
||||
public static <T, U> Function<Hits<HitKey<T>>, Hits<HitEntry<T, U>>> generateMapper(
|
||||
ValueGetter<T, U> valueGetter) {
|
||||
return result -> {
|
||||
var hitsToTransform = result.results()
|
||||
.map(hit -> new LazyHitEntry<>(Mono.just(hit.key()), valueGetter.get(hit.key()), hit.score()));
|
||||
return Hits.withResource(hitsToTransform, result.totalHitsCount(), result);
|
||||
List<HitEntry<T, U>> hitsToTransform = LLUtils.mapList(result.results,
|
||||
hit -> new HitEntry<>(hit.key(), valueGetter.get(hit.key()), hit.score())
|
||||
);
|
||||
return new Hits<>(hitsToTransform, result.totalHitsCount());
|
||||
};
|
||||
}
|
||||
|
||||
public static <T, U> Function<Hits<HitKey<T>>, Hits<LazyHitEntry<T, U>>> generateMapper(
|
||||
ValueTransformer<T, U> valueTransformer) {
|
||||
return result -> {
|
||||
try {
|
||||
var sharedHitsFlux = result.results().publish().refCount(3);
|
||||
var scoresFlux = sharedHitsFlux.map(HitKey::score);
|
||||
var keysFlux = sharedHitsFlux.map(HitKey::key);
|
||||
|
||||
var valuesFlux = valueTransformer.transform(keysFlux);
|
||||
|
||||
var transformedFlux = Flux.zip((Object[] data) -> {
|
||||
//noinspection unchecked
|
||||
var keyMono = Mono.just((T) data[0]);
|
||||
//noinspection unchecked
|
||||
var val = (Entry<T, Optional<U>>) data[1];
|
||||
var valMono = Mono.justOrEmpty(val.getValue());
|
||||
var score = (Float) data[2];
|
||||
return new LazyHitEntry<>(keyMono, valMono, score);
|
||||
}, keysFlux, valuesFlux, scoresFlux);
|
||||
|
||||
return Hits.withResource(transformedFlux, result.totalHitsCount(), result);
|
||||
} catch (Throwable t) {
|
||||
result.close();
|
||||
throw t;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static <T> Hits<T> withResource(Flux<T> hits, TotalHitsCount count, SafeCloseable resource) {
|
||||
if (resource instanceof LuceneCloseable luceneCloseable) {
|
||||
return new LuceneHits<>(hits, count, luceneCloseable);
|
||||
} else {
|
||||
return new CloseableHits<>(hits, count, resource);
|
||||
}
|
||||
}
|
||||
|
||||
public Flux<T> results() {
|
||||
ensureOpen();
|
||||
public List<T> results() {
|
||||
return results;
|
||||
}
|
||||
|
||||
public TotalHitsCount totalHitsCount() {
|
||||
ensureOpen();
|
||||
return totalHitsCount;
|
||||
}
|
||||
|
||||
@ -96,48 +53,4 @@ public class Hits<T> extends SimpleResource implements DiscardingCloseable {
|
||||
public String toString() {
|
||||
return "Hits[" + "results=" + results + ", " + "totalHitsCount=" + totalHitsCount + ']';
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
}
|
||||
|
||||
public static final class LuceneHits<U> extends Hits<U> implements LuceneCloseable {
|
||||
|
||||
private final LuceneCloseable resource;
|
||||
|
||||
public LuceneHits(Flux<U> hits, TotalHitsCount count, LuceneCloseable resource) {
|
||||
super(hits, count);
|
||||
this.resource = resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
try {
|
||||
resource.close();
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close resource", ex);
|
||||
}
|
||||
super.onClose();
|
||||
}
|
||||
}
|
||||
|
||||
public static final class CloseableHits<U> extends Hits<U> {
|
||||
|
||||
private final SafeCloseable resource;
|
||||
|
||||
public CloseableHits(Flux<U> hits, TotalHitsCount count, SafeCloseable resource) {
|
||||
super(hits, count);
|
||||
this.resource = resource;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
try {
|
||||
resource.close();
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close resource", ex);
|
||||
}
|
||||
super.onClose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,10 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface IBackuppable {
|
||||
|
||||
Mono<Void> pauseForBackup();
|
||||
void pauseForBackup();
|
||||
|
||||
Mono<Void> resumeAfterBackup();
|
||||
void resumeAfterBackup();
|
||||
|
||||
boolean isPaused();
|
||||
}
|
||||
|
@ -1,128 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.client.IndexAction.Add;
|
||||
import it.cavallium.dbengine.client.IndexAction.AddMulti;
|
||||
import it.cavallium.dbengine.client.IndexAction.Update;
|
||||
import it.cavallium.dbengine.client.IndexAction.UpdateMulti;
|
||||
import it.cavallium.dbengine.client.IndexAction.Delete;
|
||||
import it.cavallium.dbengine.client.IndexAction.DeleteAll;
|
||||
import it.cavallium.dbengine.client.IndexAction.TakeSnapshot;
|
||||
import it.cavallium.dbengine.client.IndexAction.ReleaseSnapshot;
|
||||
import it.cavallium.dbengine.client.IndexAction.Flush;
|
||||
import it.cavallium.dbengine.client.IndexAction.Refresh;
|
||||
import it.cavallium.dbengine.client.IndexAction.Close;
|
||||
import it.cavallium.dbengine.database.LLUpdateDocument;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLTerm;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.MonoSink;
|
||||
|
||||
sealed interface IndexAction permits Add, AddMulti, Update, UpdateMulti, Delete, DeleteAll, TakeSnapshot,
|
||||
ReleaseSnapshot, Flush, Refresh, Close {
|
||||
|
||||
IndexActionType getType();
|
||||
|
||||
final record Add(LLTerm key, LLUpdateDocument doc, MonoSink<Void> addedFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.ADD;
|
||||
}
|
||||
}
|
||||
|
||||
final record AddMulti(Flux<Entry<LLTerm, LLUpdateDocument>> docsFlux, MonoSink<Void> addedMultiFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.ADD_MULTI;
|
||||
}
|
||||
}
|
||||
|
||||
final record Update(LLTerm key, LLUpdateDocument doc, MonoSink<Void> updatedFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.UPDATE;
|
||||
}
|
||||
}
|
||||
|
||||
final record UpdateMulti(Map<LLTerm, LLUpdateDocument> docs, MonoSink<Void> updatedMultiFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.UPDATE_MULTI;
|
||||
}
|
||||
}
|
||||
|
||||
final record Delete(LLTerm key, MonoSink<Void> deletedFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.DELETE;
|
||||
}
|
||||
}
|
||||
|
||||
final record DeleteAll(MonoSink<Void> deletedAllFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.DELETE_ALL;
|
||||
}
|
||||
}
|
||||
|
||||
final record TakeSnapshot(MonoSink<LLSnapshot> snapshotFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.TAKE_SNAPSHOT;
|
||||
}
|
||||
}
|
||||
|
||||
final record ReleaseSnapshot(LLSnapshot snapshot, MonoSink<Void> releasedFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.RELEASE_SNAPSHOT;
|
||||
}
|
||||
}
|
||||
|
||||
final record Flush(MonoSink<Void> flushFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.FLUSH;
|
||||
}
|
||||
}
|
||||
|
||||
final record Refresh(boolean force, MonoSink<Void> refreshFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.REFRESH;
|
||||
}
|
||||
}
|
||||
|
||||
final record Close(MonoSink<Void> closeFuture) implements IndexAction {
|
||||
|
||||
@Override
|
||||
public IndexActionType getType() {
|
||||
return IndexActionType.CLOSE;
|
||||
}
|
||||
}
|
||||
|
||||
enum IndexActionType {
|
||||
ADD,
|
||||
ADD_MULTI,
|
||||
UPDATE,
|
||||
UPDATE_MULTI,
|
||||
DELETE,
|
||||
DELETE_ALL,
|
||||
TAKE_SNAPSHOT,
|
||||
RELEASE_SNAPSHOT,
|
||||
FLUSH,
|
||||
REFRESH,
|
||||
CLOSE
|
||||
}
|
||||
}
|
@ -1,56 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Multimaps;
|
||||
import it.cavallium.dbengine.database.LLIndexRequest;
|
||||
import it.cavallium.dbengine.database.LLSoftUpdateDocument;
|
||||
import it.cavallium.dbengine.database.LLUpdateDocument;
|
||||
import it.cavallium.dbengine.database.LLTerm;
|
||||
import it.cavallium.dbengine.database.LLUpdateFields;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers;
|
||||
import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
||||
public abstract class Indicizer<T, U> {
|
||||
|
||||
/**
|
||||
* Transform a value to an IndexRequest.
|
||||
*/
|
||||
public abstract @NotNull Mono<? extends LLIndexRequest> toIndexRequest(@NotNull T key, @NotNull U value);
|
||||
|
||||
public final @NotNull Mono<LLUpdateDocument> toDocument(@NotNull T key, @NotNull U value) {
|
||||
return toIndexRequest(key, value).map(req -> {
|
||||
if (req instanceof LLUpdateFields updateFields) {
|
||||
return new LLUpdateDocument(updateFields.items());
|
||||
} else if (req instanceof LLUpdateDocument updateDocument) {
|
||||
return updateDocument;
|
||||
} else if (req instanceof LLSoftUpdateDocument softUpdateDocument) {
|
||||
return new LLUpdateDocument(softUpdateDocument.items());
|
||||
} else {
|
||||
throw new UnsupportedOperationException("Unexpected request type: " + req);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public abstract @NotNull LLTerm toIndex(@NotNull T key);
|
||||
|
||||
public abstract @NotNull String getKeyFieldName();
|
||||
|
||||
public abstract @NotNull T getKey(IndexableField key);
|
||||
|
||||
public abstract IndicizerAnalyzers getPerFieldAnalyzer();
|
||||
|
||||
public abstract IndicizerSimilarities getPerFieldSimilarity();
|
||||
|
||||
public Multimap<String, String> getMoreLikeThisDocumentFields(T key, U value) {
|
||||
return Multimaps.forMap(Map.of());
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
|
||||
import it.cavallium.dbengine.rpc.current.serializers.IndicizerAnalyzersSerializer;
|
||||
import java.util.Map;
|
||||
|
||||
public class IndicizerAnalyzers {
|
||||
|
||||
public static it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers of() {
|
||||
return of(TextFieldsAnalyzer.ICUCollationKey);
|
||||
}
|
||||
|
||||
public static it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers of(TextFieldsAnalyzer defaultAnalyzer) {
|
||||
return of(defaultAnalyzer, Map.of());
|
||||
}
|
||||
|
||||
public static it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers of(TextFieldsAnalyzer defaultAnalyzer, Map<String, TextFieldsAnalyzer> fieldAnalyzer) {
|
||||
return new it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers(defaultAnalyzer, fieldAnalyzer);
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.lucene.analyzer.TextFieldsSimilarity;
|
||||
import java.util.Map;
|
||||
|
||||
public class IndicizerSimilarities {
|
||||
|
||||
public static it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities of() {
|
||||
return of(TextFieldsSimilarity.BM25Standard);
|
||||
}
|
||||
|
||||
public static it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities of(TextFieldsSimilarity defaultSimilarity) {
|
||||
return of(defaultSimilarity, Map.of());
|
||||
}
|
||||
|
||||
public static it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities of(TextFieldsSimilarity defaultSimilarity,
|
||||
Map<String, TextFieldsSimilarity> fieldSimilarity) {
|
||||
return it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities.of(defaultSimilarity, fieldSimilarity);
|
||||
}
|
||||
}
|
@ -2,7 +2,6 @@ package it.cavallium.dbengine.client;
|
||||
|
||||
import com.squareup.moshi.JsonReader;
|
||||
import com.squareup.moshi.JsonWriter;
|
||||
import it.cavallium.data.generator.nativedata.Int52;
|
||||
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
|
||||
import java.io.IOException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
@ -1,15 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public record LazyHitEntry<T, U>(Mono<T> key, Mono<U> value, float score) {
|
||||
|
||||
public Mono<HitEntry<T, U>> resolve() {
|
||||
return Mono.zip(key, value, (k, v) -> new HitEntry<>(k, v, score));
|
||||
}
|
||||
|
||||
public Mono<HitKey<T>> resolveKey() {
|
||||
return key.map(k -> new HitKey<>(k, score));
|
||||
}
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import java.util.function.Function;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public record LazyHitKey<T>(Mono<T> key, float score) {
|
||||
|
||||
public <U> LazyHitEntry<T, U> withValue(Function<T, Mono<U>> valueGetter) {
|
||||
return new LazyHitEntry<>(key, key.flatMap(valueGetter), score);
|
||||
}
|
||||
|
||||
public Mono<HitKey<T>> resolve() {
|
||||
return key.map(k -> new HitKey<>(k, score));
|
||||
}
|
||||
|
||||
public <U> Mono<HitEntry<T, U>> resolveWithValue(Function<T, Mono<U>> valueGetter) {
|
||||
return resolve().flatMap(key -> key.withValue(valueGetter));
|
||||
}
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class LongProgressTracker {
|
||||
|
||||
private final AtomicLong current = new AtomicLong();
|
||||
private final AtomicLong total = new AtomicLong();
|
||||
|
||||
public LongProgressTracker(long size) {
|
||||
setTotal(size);
|
||||
}
|
||||
|
||||
public LongProgressTracker() {
|
||||
|
||||
}
|
||||
|
||||
public LongProgressTracker setTotal(long estimate) {
|
||||
total.set(estimate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public long getCurrent() {
|
||||
return current.get();
|
||||
}
|
||||
|
||||
public long incrementAndGet() {
|
||||
return current.incrementAndGet();
|
||||
}
|
||||
|
||||
public long getAndIncrement() {
|
||||
return current.getAndIncrement();
|
||||
}
|
||||
|
||||
public long getTotal() {
|
||||
return Math.max(current.get(), total.get());
|
||||
}
|
||||
|
||||
public double progress() {
|
||||
return getCurrent() / (double) Math.max(1L, getTotal());
|
||||
}
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.client.query.ClientQueryParams;
|
||||
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLSnapshottable;
|
||||
import it.cavallium.dbengine.database.collections.ValueGetter;
|
||||
import it.cavallium.dbengine.database.collections.ValueTransformer;
|
||||
import it.cavallium.dbengine.lucene.collector.Buckets;
|
||||
import it.cavallium.dbengine.lucene.searcher.BucketParams;
|
||||
import it.unimi.dsi.fastutil.doubles.DoubleArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface LuceneIndex<T, U> extends LLSnapshottable {
|
||||
|
||||
Mono<Void> addDocument(T key, U value);
|
||||
|
||||
Mono<Long> addDocuments(boolean atomic, Flux<Entry<T, U>> entries);
|
||||
|
||||
Mono<Void> deleteDocument(T key);
|
||||
|
||||
Mono<Void> updateDocument(T key, @NotNull U value);
|
||||
|
||||
Mono<Long> updateDocuments(Flux<Entry<T, U>> entries);
|
||||
|
||||
default Mono<Void> updateOrDeleteDocument(T key, @Nullable U value) {
|
||||
if (value == null) {
|
||||
return deleteDocument(key);
|
||||
} else {
|
||||
return updateDocument(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
default Mono<Void> updateOrDeleteDocumentIfModified(T key, @NotNull Delta<U> delta) {
|
||||
return updateOrDeleteDocumentIfModified(key, delta.current(), delta.isModified());
|
||||
}
|
||||
|
||||
default Mono<Void> updateOrDeleteDocumentIfModified(T key, @Nullable U currentValue, boolean modified) {
|
||||
if (modified) {
|
||||
return updateOrDeleteDocument(key, currentValue);
|
||||
} else {
|
||||
return Mono.empty();
|
||||
}
|
||||
}
|
||||
|
||||
Mono<Void> deleteAll();
|
||||
|
||||
Mono<Hits<HitKey<T>>> moreLikeThis(ClientQueryParams queryParams, T key,
|
||||
U mltDocumentValue);
|
||||
|
||||
Mono<Hits<HitKey<T>>> search(ClientQueryParams queryParams);
|
||||
|
||||
Mono<Buckets> computeBuckets(@Nullable CompositeSnapshot snapshot,
|
||||
@NotNull List<Query> queries,
|
||||
@Nullable Query normalizationQuery,
|
||||
BucketParams bucketParams);
|
||||
|
||||
Mono<TotalHitsCount> count(@Nullable CompositeSnapshot snapshot, Query query);
|
||||
|
||||
boolean isLowMemoryMode();
|
||||
|
||||
void close();
|
||||
|
||||
Mono<Void> flush();
|
||||
|
||||
Mono<Void> waitForMerges();
|
||||
|
||||
Mono<Void> waitForLastMerges();
|
||||
|
||||
Mono<Void> refresh(boolean force);
|
||||
}
|
@ -1,252 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.client.Hits.CloseableHits;
|
||||
import it.cavallium.dbengine.client.Hits.LuceneHits;
|
||||
import it.cavallium.dbengine.client.query.ClientQueryParams;
|
||||
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
|
||||
import it.cavallium.dbengine.database.DiscardingCloseable;
|
||||
import it.cavallium.dbengine.database.LLKeyScore;
|
||||
import it.cavallium.dbengine.database.LLLuceneIndex;
|
||||
import it.cavallium.dbengine.database.LLSearchResultShard;
|
||||
import it.cavallium.dbengine.database.LLSearchResultShard.LuceneLLSearchResultShard;
|
||||
import it.cavallium.dbengine.database.LLSearchResultShard.ResourcesLLSearchResultShard;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLTerm;
|
||||
import it.cavallium.dbengine.database.LLUpdateDocument;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import it.cavallium.dbengine.lucene.LuceneCloseable;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import it.cavallium.dbengine.lucene.collector.Buckets;
|
||||
import it.cavallium.dbengine.lucene.searcher.BucketParams;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.logging.Level;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.SignalType;
|
||||
|
||||
public class LuceneIndexImpl<T, U> implements LuceneIndex<T, U> {
|
||||
|
||||
private static final Duration MAX_COUNT_TIME = Duration.ofSeconds(30);
|
||||
private static final Logger LOG = LogManager.getLogger(LuceneIndex.class);
|
||||
private final LLLuceneIndex luceneIndex;
|
||||
private final Indicizer<T,U> indicizer;
|
||||
|
||||
public LuceneIndexImpl(LLLuceneIndex luceneIndex, Indicizer<T, U> indicizer) {
|
||||
this.luceneIndex = luceneIndex;
|
||||
this.indicizer = indicizer;
|
||||
}
|
||||
|
||||
private LLSnapshot resolveSnapshot(CompositeSnapshot snapshot) {
|
||||
if (snapshot == null) {
|
||||
return null;
|
||||
} else {
|
||||
return snapshot.getSnapshot(luceneIndex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> addDocument(T key, U value) {
|
||||
return indicizer
|
||||
.toDocument(key, value)
|
||||
.flatMap(doc -> luceneIndex.addDocument(indicizer.toIndex(key), doc));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> addDocuments(boolean atomic, Flux<Entry<T, U>> entries) {
|
||||
return luceneIndex.addDocuments(atomic, entries.flatMap(entry -> indicizer
|
||||
.toDocument(entry.getKey(), entry.getValue())
|
||||
.map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> deleteDocument(T key) {
|
||||
LLTerm id = indicizer.toIndex(key);
|
||||
return luceneIndex.deleteDocument(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> updateDocument(T key, @NotNull U value) {
|
||||
return indicizer
|
||||
.toIndexRequest(key, value)
|
||||
.flatMap(doc -> luceneIndex.update(indicizer.toIndex(key), doc));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> updateDocuments(Flux<Entry<T, U>> entries) {
|
||||
Flux<Entry<LLTerm, LLUpdateDocument>> mappedEntries = entries
|
||||
.flatMap(entry -> Mono
|
||||
.zip(Mono.just(indicizer.toIndex(entry.getKey())),
|
||||
indicizer.toDocument(entry.getKey(), entry.getValue()).single(),
|
||||
Map::entry
|
||||
)
|
||||
.single()
|
||||
)
|
||||
.log("impl-update-documents", Level.FINEST, false, SignalType.ON_NEXT, SignalType.ON_COMPLETE);
|
||||
return luceneIndex.updateDocuments(mappedEntries);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> deleteAll() {
|
||||
return luceneIndex.deleteAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Hits<HitKey<T>>> moreLikeThis(ClientQueryParams queryParams,
|
||||
T key,
|
||||
U mltDocumentValue) {
|
||||
var mltDocumentFields
|
||||
= indicizer.getMoreLikeThisDocumentFields(key, mltDocumentValue);
|
||||
|
||||
return luceneIndex
|
||||
.moreLikeThis(resolveSnapshot(queryParams.snapshot()),
|
||||
queryParams.toQueryParams(),
|
||||
indicizer.getKeyFieldName(),
|
||||
mltDocumentFields
|
||||
)
|
||||
.collectList()
|
||||
.mapNotNull(shards -> mergeResults(queryParams, shards))
|
||||
.map(llSearchResult -> mapResults(llSearchResult))
|
||||
.defaultIfEmpty(Hits.empty())
|
||||
.doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Hits<HitKey<T>>> search(ClientQueryParams queryParams) {
|
||||
return luceneIndex
|
||||
.search(resolveSnapshot(queryParams.snapshot()),
|
||||
queryParams.toQueryParams(),
|
||||
indicizer.getKeyFieldName()
|
||||
)
|
||||
.collectList()
|
||||
.mapNotNull(shards -> mergeResults(queryParams, shards))
|
||||
.map(llSearchResult -> mapResults(llSearchResult))
|
||||
.defaultIfEmpty(Hits.empty())
|
||||
.doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Buckets> computeBuckets(@Nullable CompositeSnapshot snapshot,
|
||||
@NotNull List<Query> query,
|
||||
@Nullable Query normalizationQuery,
|
||||
BucketParams bucketParams) {
|
||||
return luceneIndex.computeBuckets(resolveSnapshot(snapshot), query,
|
||||
normalizationQuery, bucketParams).single();
|
||||
}
|
||||
|
||||
private Hits<HitKey<T>> mapResults(LLSearchResultShard llSearchResult) {
|
||||
Flux<HitKey<T>> scoresWithKeysFlux = llSearchResult.results()
|
||||
.map(hit -> new HitKey<>(indicizer.getKey(hit.key()), hit.score()));
|
||||
|
||||
if (llSearchResult instanceof LuceneCloseable luceneCloseable) {
|
||||
return new LuceneHits<>(scoresWithKeysFlux, llSearchResult.totalHitsCount(), luceneCloseable);
|
||||
} else {
|
||||
return new CloseableHits<>(scoresWithKeysFlux, llSearchResult.totalHitsCount(), llSearchResult);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<TotalHitsCount> count(@Nullable CompositeSnapshot snapshot, Query query) {
|
||||
return luceneIndex
|
||||
.count(resolveSnapshot(snapshot), query, MAX_COUNT_TIME)
|
||||
.doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLowMemoryMode() {
|
||||
return luceneIndex.isLowMemoryMode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
luceneIndex.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush writes to disk
|
||||
*/
|
||||
@Override
|
||||
public Mono<Void> flush() {
|
||||
return luceneIndex.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> waitForMerges() {
|
||||
return luceneIndex.waitForMerges();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> waitForLastMerges() {
|
||||
return luceneIndex.waitForLastMerges();
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh index searcher
|
||||
*/
|
||||
@Override
|
||||
public Mono<Void> refresh(boolean force) {
|
||||
return luceneIndex.refresh(force);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<LLSnapshot> takeSnapshot() {
|
||||
return luceneIndex.takeSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> releaseSnapshot(LLSnapshot snapshot) {
|
||||
return luceneIndex.releaseSnapshot(snapshot);
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
@Nullable
|
||||
private static LLSearchResultShard mergeResults(ClientQueryParams queryParams, List<LLSearchResultShard> shards) {
|
||||
if (shards.size() == 0) {
|
||||
return null;
|
||||
} else if (shards.size() == 1) {
|
||||
return shards.get(0);
|
||||
}
|
||||
TotalHitsCount count = null;
|
||||
ObjectArrayList<Flux<LLKeyScore>> results = new ObjectArrayList<>(shards.size());
|
||||
ObjectArrayList resources = new ObjectArrayList(shards.size());
|
||||
boolean luceneResources = false;
|
||||
for (LLSearchResultShard shard : shards) {
|
||||
if (!luceneResources && shard instanceof LuceneCloseable) {
|
||||
luceneResources = true;
|
||||
}
|
||||
if (count == null) {
|
||||
count = shard.totalHitsCount();
|
||||
} else {
|
||||
count = LuceneUtils.sum(count, shard.totalHitsCount());
|
||||
}
|
||||
var maxLimit = queryParams.offset() + queryParams.limit();
|
||||
results.add(shard.results().take(maxLimit, true));
|
||||
resources.add(shard);
|
||||
}
|
||||
Objects.requireNonNull(count);
|
||||
Flux<LLKeyScore> resultsFlux;
|
||||
if (results.size() == 0) {
|
||||
resultsFlux = Flux.empty();
|
||||
} else if (results.size() == 1) {
|
||||
resultsFlux = results.get(0);
|
||||
} else {
|
||||
resultsFlux = Flux.merge(results);
|
||||
}
|
||||
if (luceneResources) {
|
||||
return new LuceneLLSearchResultShard(resultsFlux, count, (List<LuceneCloseable>) resources);
|
||||
} else {
|
||||
return new ResourcesLLSearchResultShard(resultsFlux, count, (List<SafeCloseable>) resources);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,11 +1,12 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class MappedSerializer<A, B> implements Serializer<B> {
|
||||
|
||||
@ -18,14 +19,24 @@ public class MappedSerializer<A, B> implements Serializer<B> {
|
||||
this.keyMapper = keyMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException {
|
||||
return keyMapper.map(serializer.deserialize(serialized));
|
||||
public static <A, B> Serializer<B> of(Serializer<A> ser,
|
||||
Mapper<A, B> keyMapper) {
|
||||
if (keyMapper.getClass() == NoMapper.class) {
|
||||
//noinspection unchecked
|
||||
return (Serializer<B>) ser;
|
||||
} else {
|
||||
return new MappedSerializer<>(ser, keyMapper);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(@NotNull B deserialized, Buffer output) throws SerializationException {
|
||||
serializer.serialize(keyMapper.unmap(deserialized), output);
|
||||
public @NotNull B deserialize(@NotNull BufDataInput in) throws SerializationException {
|
||||
return keyMapper.map(serializer.deserialize(in));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(@NotNull B deserialized, BufDataOutput out) throws SerializationException {
|
||||
serializer.serialize(keyMapper.unmap(deserialized), out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1,11 +1,11 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class MappedSerializerFixedLength<A, B> implements SerializerFixedBinaryLength<B> {
|
||||
|
||||
@ -18,14 +18,24 @@ public class MappedSerializerFixedLength<A, B> implements SerializerFixedBinaryL
|
||||
this.keyMapper = keyMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException {
|
||||
return keyMapper.map(fixedLengthSerializer.deserialize(serialized));
|
||||
public static <A, B> SerializerFixedBinaryLength<B> of(SerializerFixedBinaryLength<A> fixedLengthSerializer,
|
||||
Mapper<A, B> keyMapper) {
|
||||
if (keyMapper.getClass() == NoMapper.class) {
|
||||
//noinspection unchecked
|
||||
return (SerializerFixedBinaryLength<B>) fixedLengthSerializer;
|
||||
} else {
|
||||
return new MappedSerializerFixedLength<>(fixedLengthSerializer, keyMapper);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(@NotNull B deserialized, Buffer output) throws SerializationException {
|
||||
fixedLengthSerializer.serialize(keyMapper.unmap(deserialized), output);
|
||||
public @NotNull B deserialize(@NotNull BufDataInput in) throws SerializationException {
|
||||
return keyMapper.map(fixedLengthSerializer.deserialize(in));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(@NotNull B deserialized, BufDataOutput out) throws SerializationException {
|
||||
fixedLengthSerializer.serialize(keyMapper.unmap(deserialized), out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1,5 +1,5 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
public record MemoryStats(long estimateTableReadersMem, long sizeAllMemTables,
|
||||
long curSizeAllMemTables, long estimateNumKeys, long blockCacheUsage,
|
||||
long blockCachePinnedUsage) {}
|
||||
long curSizeAllMemTables, long estimateNumKeys, long blockCacheCapacity,
|
||||
long blockCacheUsage, long blockCachePinnedUsage, long liveVersions) {}
|
||||
|
@ -1,7 +1,5 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.client.Mapper;
|
||||
|
||||
public class NoMapper<T> implements Mapper<T, T> {
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,17 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.SSTDumpProgress.SSTBlockFail;
|
||||
import it.cavallium.dbengine.client.SSTDumpProgress.SSTBlockKeyValue;
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTOk;
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTProgressReport;
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTStart;
|
||||
import org.rocksdb.RocksDBException;
|
||||
|
||||
public sealed interface SSTDumpProgress extends SSTProgress permits SSTBlockFail, SSTBlockKeyValue, SSTOk,
|
||||
SSTProgressReport, SSTStart {
|
||||
|
||||
record SSTBlockKeyValue(Buf rawKey, Buf rawValue) implements SSTDumpProgress {}
|
||||
|
||||
record SSTBlockFail(RocksDBException ex) implements SSTDumpProgress {}
|
||||
}
|
21
src/main/java/it/cavallium/dbengine/client/SSTProgress.java
Normal file
21
src/main/java/it/cavallium/dbengine/client/SSTProgress.java
Normal file
@ -0,0 +1,21 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.dbengine.database.disk.RocksDBFile.IterationMetadata;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public interface SSTProgress {
|
||||
|
||||
record SSTStart(IterationMetadata metadata) implements SSTProgress, SSTVerificationProgress, SSTDumpProgress {}
|
||||
|
||||
record SSTOk(long scannedCount) implements SSTProgress, SSTVerificationProgress, SSTDumpProgress {}
|
||||
|
||||
record SSTProgressReport(long fileScanned, long fileTotal) implements SSTProgress, SSTVerificationProgress,
|
||||
SSTDumpProgress {
|
||||
|
||||
public double getFileProgress() {
|
||||
if (fileTotal == 0) return 0d;
|
||||
return fileScanned / (double) fileTotal;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.DbProgress.DbSSTProgress;
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTOk;
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTProgressReport;
|
||||
import it.cavallium.dbengine.client.SSTProgress.SSTStart;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress.SSTBlockBad;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public sealed interface SSTVerificationProgress extends SSTProgress permits SSTOk, SSTProgressReport, SSTStart,
|
||||
SSTBlockBad {
|
||||
|
||||
record SSTBlockBad(Buf rawKey, Throwable ex) implements SSTVerificationProgress {}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
public class SnapshotException extends RuntimeException {
|
||||
public class SnapshotException extends IllegalStateException {
|
||||
|
||||
public SnapshotException(Throwable ex) {
|
||||
super(ex);
|
||||
|
@ -1,90 +0,0 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import reactor.core.Disposable;
|
||||
import reactor.core.scheduler.Scheduler;
|
||||
|
||||
public class UninterruptibleScheduler {
|
||||
|
||||
public static Scheduler uninterruptibleScheduler(Scheduler scheduler) {
|
||||
return new Scheduler() {
|
||||
@Override
|
||||
public @NotNull Disposable schedule(@NotNull Runnable task) {
|
||||
scheduler.schedule(task);
|
||||
return () -> {};
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull Disposable schedule(@NotNull Runnable task, long delay, @NotNull TimeUnit unit) {
|
||||
scheduler.schedule(task, delay, unit);
|
||||
return () -> {};
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull Disposable schedulePeriodically(@NotNull Runnable task,
|
||||
long initialDelay,
|
||||
long period,
|
||||
@NotNull TimeUnit unit) {
|
||||
scheduler.schedulePeriodically(task, initialDelay, period, unit);
|
||||
return () -> {};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDisposed() {
|
||||
return scheduler.isDisposed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dispose() {
|
||||
scheduler.dispose();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() {
|
||||
scheduler.start();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long now(@NotNull TimeUnit unit) {
|
||||
return Scheduler.super.now(unit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull Worker createWorker() {
|
||||
var worker = scheduler.createWorker();
|
||||
return new Worker() {
|
||||
@Override
|
||||
public @NotNull Disposable schedule(@NotNull Runnable task) {
|
||||
worker.schedule(task);
|
||||
return () -> {};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dispose() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDisposed() {
|
||||
return worker.isDisposed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull Disposable schedule(@NotNull Runnable task, long delay, @NotNull TimeUnit unit) {
|
||||
worker.schedule(task, delay, unit);
|
||||
return () -> {};
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull Disposable schedulePeriodically(@NotNull Runnable task,
|
||||
long initialDelay,
|
||||
long period,
|
||||
@NotNull TimeUnit unit) {
|
||||
worker.schedulePeriodically(task, initialDelay, period, unit);
|
||||
return () -> {};
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
package it.cavallium.dbengine.client.query;
|
||||
|
||||
import io.soabase.recordbuilder.core.RecordBuilder;
|
||||
import it.cavallium.data.generator.nativedata.Nullablefloat;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.client.Sort;
|
||||
import it.cavallium.dbengine.client.query.current.data.NoSort;
|
||||
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||
import it.cavallium.dbengine.client.query.current.data.QueryParams;
|
||||
import it.cavallium.dbengine.client.query.current.data.QueryParamsBuilder;
|
||||
import java.time.Duration;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
@RecordBuilder
|
||||
public record ClientQueryParams(@Nullable CompositeSnapshot snapshot,
|
||||
@NotNull Query query,
|
||||
long offset,
|
||||
long limit,
|
||||
@Nullable Sort sort,
|
||||
boolean computePreciseHitsCount,
|
||||
@NotNull Duration timeout) {
|
||||
|
||||
public static ClientQueryParamsBuilder builder() {
|
||||
return ClientQueryParamsBuilder
|
||||
.builder()
|
||||
.snapshot(null)
|
||||
.offset(0)
|
||||
.limit(Long.MAX_VALUE)
|
||||
.sort(null)
|
||||
// Default timeout: 4 minutes
|
||||
.timeout(Duration.ofMinutes(4))
|
||||
.computePreciseHitsCount(true);
|
||||
}
|
||||
|
||||
public boolean isSorted() {
|
||||
return sort != null && sort.isSorted();
|
||||
}
|
||||
|
||||
public QueryParams toQueryParams() {
|
||||
return QueryParamsBuilder
|
||||
.builder()
|
||||
.query(query())
|
||||
.sort(sort != null ? sort.querySort() : new NoSort())
|
||||
.offset(offset())
|
||||
.limit(limit())
|
||||
.computePreciseHitsCount(computePreciseHitsCount())
|
||||
.timeoutMilliseconds(timeout.toMillis())
|
||||
.build();
|
||||
}
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
package it.cavallium.dbengine.client.query;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
|
||||
public class NoOpAnalyzer extends Analyzer {
|
||||
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
return new TokenStreamComponents(new KeywordTokenizer());
|
||||
}
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
package it.cavallium.dbengine.client.query;
|
||||
|
||||
import com.squareup.moshi.JsonAdapter;
|
||||
import it.cavallium.dbengine.client.IntOpenHashSetJsonAdapter;
|
||||
import it.cavallium.dbengine.client.query.current.CurrentVersion;
|
||||
import it.cavallium.dbengine.client.query.current.IBaseType;
|
||||
import it.cavallium.dbengine.client.query.current.IType;
|
||||
import it.unimi.dsi.fastutil.booleans.BooleanList;
|
||||
import it.unimi.dsi.fastutil.bytes.ByteList;
|
||||
import it.unimi.dsi.fastutil.chars.CharList;
|
||||
import it.unimi.dsi.fastutil.ints.IntList;
|
||||
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
|
||||
import it.unimi.dsi.fastutil.longs.LongList;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectMaps;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap;
|
||||
import it.unimi.dsi.fastutil.shorts.ShortList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import it.cavallium.dbengine.utils.BooleanListJsonAdapter;
|
||||
import it.cavallium.dbengine.utils.ByteListJsonAdapter;
|
||||
import it.cavallium.dbengine.utils.CharListJsonAdapter;
|
||||
import it.cavallium.dbengine.utils.IntListJsonAdapter;
|
||||
import it.cavallium.dbengine.utils.LongListJsonAdapter;
|
||||
import it.cavallium.dbengine.utils.MoshiPolymorphic;
|
||||
import it.cavallium.dbengine.utils.ShortListJsonAdapter;
|
||||
|
||||
public class QueryMoshi extends MoshiPolymorphic<IType> {
|
||||
|
||||
private final Set<Class<IType>> abstractClasses;
|
||||
private final Set<Class<IType>> concreteClasses;
|
||||
private final Map<Class<?>, JsonAdapter<?>> extraAdapters;
|
||||
|
||||
@SuppressWarnings({"unchecked", "RedundantCast", "rawtypes"})
|
||||
public QueryMoshi() {
|
||||
super(true, GetterStyle.RECORDS_GETTERS);
|
||||
HashSet<Class<IType>> abstractClasses = new HashSet<>();
|
||||
HashSet<Class<IType>> concreteClasses = new HashSet<>();
|
||||
|
||||
// Add all super types with their implementations
|
||||
for (var superTypeClass : CurrentVersion.getSuperTypeClasses()) {
|
||||
for (Class<? extends IBaseType> superTypeSubtypesClass : CurrentVersion.getSuperTypeSubtypesClasses(
|
||||
superTypeClass)) {
|
||||
concreteClasses.add((Class<IType>) (Class) superTypeSubtypesClass);
|
||||
}
|
||||
abstractClasses.add((Class<IType>) (Class) superTypeClass);
|
||||
}
|
||||
|
||||
// Add IBaseType with all basic types
|
||||
abstractClasses.add((Class<IType>) (Class) IBaseType.class);
|
||||
for (BaseType BaseType : BaseType.values()) {
|
||||
concreteClasses.add((Class<IType>) (Class) CurrentVersion.getClass(BaseType));
|
||||
}
|
||||
|
||||
this.abstractClasses = abstractClasses;
|
||||
this.concreteClasses = concreteClasses;
|
||||
Object2ObjectMap<Class<?>, JsonAdapter<?>> extraAdapters = new Object2ObjectOpenHashMap<>();
|
||||
extraAdapters.put(BooleanList.class, new BooleanListJsonAdapter());
|
||||
extraAdapters.put(ByteList.class, new ByteListJsonAdapter());
|
||||
extraAdapters.put(ShortList.class, new ShortListJsonAdapter());
|
||||
extraAdapters.put(CharList.class, new CharListJsonAdapter());
|
||||
extraAdapters.put(IntList.class, new IntListJsonAdapter());
|
||||
extraAdapters.put(LongList.class, new LongListJsonAdapter());
|
||||
extraAdapters.put(IntOpenHashSet.class, new IntOpenHashSetJsonAdapter());
|
||||
this.extraAdapters = Object2ObjectMaps.unmodifiable(extraAdapters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Class<?>, JsonAdapter<?>> getExtraAdapters() {
|
||||
return extraAdapters;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Set<Class<IType>> getAbstractClasses() {
|
||||
return abstractClasses;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Set<Class<IType>> getConcreteClasses() {
|
||||
return concreteClasses;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldIgnoreField(String fieldName) {
|
||||
return fieldName.contains("$");
|
||||
}
|
||||
}
|
@ -1,5 +1,7 @@
|
||||
package it.cavallium.dbengine.client.query;
|
||||
|
||||
import com.google.common.xml.XmlEscapers;
|
||||
import it.cavallium.dbengine.client.query.current.data.BooleanQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.BooleanQueryPart;
|
||||
import it.cavallium.dbengine.client.query.current.data.BoostQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.BoxedQuery;
|
||||
@ -19,7 +21,6 @@ import it.cavallium.dbengine.client.query.current.data.FloatPointExactQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.FloatPointRangeQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.FloatPointSetQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.FloatTermQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.IntNDPointExactQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.IntNDPointRangeQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.IntNDTermQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.IntPointExactQuery;
|
||||
@ -33,337 +34,421 @@ import it.cavallium.dbengine.client.query.current.data.LongPointExactQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.LongPointRangeQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.LongPointSetQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.LongTermQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.NumericSort;
|
||||
import it.cavallium.dbengine.client.query.current.data.OccurShould;
|
||||
import it.cavallium.dbengine.client.query.current.data.PhraseQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.PointConfig;
|
||||
import it.cavallium.dbengine.client.query.current.data.PointType;
|
||||
import it.cavallium.dbengine.client.query.current.data.SolrTextQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.SortedDocFieldExistsQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.SortedNumericDocValuesFieldSlowRangeQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.SynonymQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.TermAndBoost;
|
||||
import it.cavallium.dbengine.client.query.current.data.TermPosition;
|
||||
import it.cavallium.dbengine.client.query.current.data.TermQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.WildcardQuery;
|
||||
import it.cavallium.dbengine.lucene.RandomSortField;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.NumberFormat;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
|
||||
import org.apache.lucene.analysis.en.PorterStemFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.document.DoublePoint;
|
||||
import org.apache.lucene.document.FloatPoint;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
|
||||
import org.apache.lucene.queryparser.flexible.standard.StandardQueryParser;
|
||||
import org.apache.lucene.queryparser.flexible.standard.config.PointsConfig;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery.Builder;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.SortField.Type;
|
||||
import org.apache.lucene.search.SortedNumericSortField;
|
||||
import java.text.BreakIterator;
|
||||
import java.util.Comparator;
|
||||
import java.util.Locale;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class QueryParser {
|
||||
|
||||
public static Query toQuery(it.cavallium.dbengine.client.query.current.data.Query query, Analyzer analyzer) {
|
||||
private static final String[] QUERY_STRING_FIND = {"\\", "\""};
|
||||
private static final String[] QUERY_STRING_REPLACE = {"\\\\", "\\\""};
|
||||
|
||||
public static void toQueryXML(StringBuilder out,
|
||||
it.cavallium.dbengine.client.query.current.data.Query query,
|
||||
@Nullable Float boost) {
|
||||
if (query == null) {
|
||||
return null;
|
||||
return;
|
||||
}
|
||||
switch (query.getBaseType$()) {
|
||||
case StandardQuery:
|
||||
case StandardQuery -> {
|
||||
var standardQuery = (it.cavallium.dbengine.client.query.current.data.StandardQuery) query;
|
||||
|
||||
// Fix the analyzer
|
||||
Map<String, Analyzer> customAnalyzers = standardQuery
|
||||
.termFields()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Function.identity(), term -> new NoOpAnalyzer()));
|
||||
analyzer = new PerFieldAnalyzerWrapper(analyzer, customAnalyzers);
|
||||
|
||||
var standardQueryParser = new StandardQueryParser(analyzer);
|
||||
|
||||
standardQueryParser.setPointsConfigMap(standardQuery
|
||||
.pointsConfig()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(
|
||||
PointConfig::field,
|
||||
pointConfig -> new PointsConfig(
|
||||
toNumberFormat(pointConfig.data().numberFormat()),
|
||||
toType(pointConfig.data().type())
|
||||
)
|
||||
)));
|
||||
var defaultFields = standardQuery.defaultFields();
|
||||
try {
|
||||
Query parsed;
|
||||
if (defaultFields.size() > 1) {
|
||||
standardQueryParser.setMultiFields(defaultFields.toArray(String[]::new));
|
||||
parsed = standardQueryParser.parse(standardQuery.query(), null);
|
||||
} else if (defaultFields.size() == 1) {
|
||||
parsed = standardQueryParser.parse(standardQuery.query(), defaultFields.get(0));
|
||||
} else {
|
||||
throw new IllegalStateException("Can't parse a standard query expression that has 0 default fields");
|
||||
}
|
||||
return parsed;
|
||||
} catch (QueryNodeException e) {
|
||||
throw new IllegalStateException("Can't parse query expression \"" + standardQuery.query() + "\"", e);
|
||||
out.append("<UserQuery");
|
||||
if (standardQuery.defaultFields().size() > 1) {
|
||||
throw new UnsupportedOperationException("Maximum supported default fields count: 1");
|
||||
}
|
||||
case BooleanQuery:
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
if (standardQuery.defaultFields().size() == 1) {
|
||||
out
|
||||
.append(" fieldName=\"")
|
||||
.append(XmlEscapers.xmlAttributeEscaper().escape(standardQuery.defaultFields().get(0)))
|
||||
.append("\"");
|
||||
}
|
||||
if (!standardQuery.termFields().isEmpty()) {
|
||||
throw new UnsupportedOperationException("Term fields unsupported");
|
||||
}
|
||||
if (!standardQuery.pointsConfig().isEmpty()) {
|
||||
throw new UnsupportedOperationException("Points config unsupported");
|
||||
}
|
||||
out.append(">");
|
||||
out.append(XmlEscapers.xmlContentEscaper().escape(standardQuery.query()));
|
||||
out.append("</UserQuery>\n");
|
||||
}
|
||||
case BooleanQuery -> {
|
||||
var booleanQuery = (it.cavallium.dbengine.client.query.current.data.BooleanQuery) query;
|
||||
var bq = new Builder();
|
||||
for (BooleanQueryPart part : booleanQuery.parts()) {
|
||||
Occur occur = switch (part.occur().getBaseType$()) {
|
||||
case OccurFilter -> Occur.FILTER;
|
||||
case OccurMust -> Occur.MUST;
|
||||
case OccurShould -> Occur.SHOULD;
|
||||
case OccurMustNot -> Occur.MUST_NOT;
|
||||
default -> throw new IllegalStateException("Unexpected value: " + part.occur().getBaseType$());
|
||||
};
|
||||
bq.add(toQuery(part.query(), analyzer), occur);
|
||||
if (booleanQuery.parts().size() == 1
|
||||
&& booleanQuery.parts().get(0).occur().getBaseType$() == BaseType.OccurMust) {
|
||||
toQueryXML(out, booleanQuery.parts().get(0).query(), boost);
|
||||
} else {
|
||||
out.append("<BooleanQuery");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" minimumNumberShouldMatch=\"").append(booleanQuery.minShouldMatch()).append("\"");
|
||||
out.append(">\n");
|
||||
|
||||
for (BooleanQueryPart part : booleanQuery.parts()) {
|
||||
out.append("<Clause");
|
||||
out.append(" occurs=\"").append(switch (part.occur().getBaseType$()) {
|
||||
case OccurFilter -> "filter";
|
||||
case OccurMust -> "must";
|
||||
case OccurShould -> "should";
|
||||
case OccurMustNot -> "mustNot";
|
||||
default -> throw new IllegalStateException("Unexpected value: " + part.occur().getBaseType$());
|
||||
}).append("\"");
|
||||
out.append(">\n");
|
||||
toQueryXML(out, part.query(), null);
|
||||
out.append("</Clause>\n");
|
||||
}
|
||||
out.append("</BooleanQuery>\n");
|
||||
}
|
||||
bq.setMinimumNumberShouldMatch(booleanQuery.minShouldMatch());
|
||||
return bq.build();
|
||||
case IntPointExactQuery:
|
||||
}
|
||||
case IntPointExactQuery -> {
|
||||
var intPointExactQuery = (IntPointExactQuery) query;
|
||||
return IntPoint.newExactQuery(intPointExactQuery.field(), intPointExactQuery.value());
|
||||
case IntNDPointExactQuery:
|
||||
var intndPointExactQuery = (IntNDPointExactQuery) query;
|
||||
var intndValues = intndPointExactQuery.value().toIntArray();
|
||||
return IntPoint.newRangeQuery(intndPointExactQuery.field(), intndValues, intndValues);
|
||||
case LongPointExactQuery:
|
||||
out.append("<PointRangeQuery type=\"int\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(intPointExactQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(intPointExactQuery.value()).append("\"");
|
||||
out.append(" upperTerm=\"").append(intPointExactQuery.value()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case IntNDPointExactQuery -> {
|
||||
var intPointExactQuery = (IntPointExactQuery) query;
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case LongPointExactQuery -> {
|
||||
var longPointExactQuery = (LongPointExactQuery) query;
|
||||
return LongPoint.newExactQuery(longPointExactQuery.field(), longPointExactQuery.value());
|
||||
case FloatPointExactQuery:
|
||||
out.append("<PointRangeQuery type=\"long\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(longPointExactQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(longPointExactQuery.value()).append("\"");
|
||||
out.append(" upperTerm=\"").append(longPointExactQuery.value()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case FloatPointExactQuery -> {
|
||||
var floatPointExactQuery = (FloatPointExactQuery) query;
|
||||
return FloatPoint.newExactQuery(floatPointExactQuery.field(), floatPointExactQuery.value());
|
||||
case DoublePointExactQuery:
|
||||
out.append("<PointRangeQuery type=\"float\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(floatPointExactQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(floatPointExactQuery.value()).append("\"");
|
||||
out.append(" upperTerm=\"").append(floatPointExactQuery.value()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case DoublePointExactQuery -> {
|
||||
var doublePointExactQuery = (DoublePointExactQuery) query;
|
||||
return DoublePoint.newExactQuery(doublePointExactQuery.field(), doublePointExactQuery.value());
|
||||
case LongNDPointExactQuery:
|
||||
out.append("<PointRangeQuery type=\"double\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(doublePointExactQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(doublePointExactQuery.value()).append("\"");
|
||||
out.append(" upperTerm=\"").append(doublePointExactQuery.value()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case LongNDPointExactQuery -> {
|
||||
var longndPointExactQuery = (LongNDPointExactQuery) query;
|
||||
var longndValues = longndPointExactQuery.value().toLongArray();
|
||||
return LongPoint.newRangeQuery(longndPointExactQuery.field(), longndValues, longndValues);
|
||||
case FloatNDPointExactQuery:
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case FloatNDPointExactQuery -> {
|
||||
var floatndPointExactQuery = (FloatNDPointExactQuery) query;
|
||||
var floatndValues = floatndPointExactQuery.value().toFloatArray();
|
||||
return FloatPoint.newRangeQuery(floatndPointExactQuery.field(), floatndValues, floatndValues);
|
||||
case DoubleNDPointExactQuery:
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case DoubleNDPointExactQuery -> {
|
||||
var doublendPointExactQuery = (DoubleNDPointExactQuery) query;
|
||||
var doublendValues = doublendPointExactQuery.value().toDoubleArray();
|
||||
return DoublePoint.newRangeQuery(doublendPointExactQuery.field(), doublendValues, doublendValues);
|
||||
case IntPointSetQuery:
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case IntPointSetQuery -> {
|
||||
var intPointSetQuery = (IntPointSetQuery) query;
|
||||
return IntPoint.newSetQuery(intPointSetQuery.field(), intPointSetQuery.values().toIntArray());
|
||||
case LongPointSetQuery:
|
||||
// Polyfill
|
||||
toQueryXML(out, BooleanQuery.of(intPointSetQuery.values().intStream()
|
||||
.mapToObj(val -> IntPointExactQuery.of(intPointSetQuery.field(), val))
|
||||
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
|
||||
.toList(), 1), boost);
|
||||
}
|
||||
case LongPointSetQuery -> {
|
||||
var longPointSetQuery = (LongPointSetQuery) query;
|
||||
return LongPoint.newSetQuery(longPointSetQuery.field(), longPointSetQuery.values().toLongArray());
|
||||
case FloatPointSetQuery:
|
||||
// Polyfill
|
||||
toQueryXML(out, BooleanQuery.of(longPointSetQuery.values().longStream()
|
||||
.mapToObj(val -> LongPointExactQuery.of(longPointSetQuery.field(), val))
|
||||
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
|
||||
.toList(), 1), boost);
|
||||
}
|
||||
case FloatPointSetQuery -> {
|
||||
var floatPointSetQuery = (FloatPointSetQuery) query;
|
||||
return FloatPoint.newSetQuery(floatPointSetQuery.field(), floatPointSetQuery.values().toFloatArray());
|
||||
case DoublePointSetQuery:
|
||||
// Polyfill
|
||||
toQueryXML(out, BooleanQuery.of(floatPointSetQuery.values().stream()
|
||||
.map(val -> FloatPointExactQuery.of(floatPointSetQuery.field(), val))
|
||||
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
|
||||
.toList(), 1), boost);
|
||||
}
|
||||
case DoublePointSetQuery -> {
|
||||
var doublePointSetQuery = (DoublePointSetQuery) query;
|
||||
return DoublePoint.newSetQuery(doublePointSetQuery.field(), doublePointSetQuery.values().toDoubleArray());
|
||||
case TermQuery:
|
||||
// Polyfill
|
||||
toQueryXML(out, BooleanQuery.of(doublePointSetQuery.values().doubleStream()
|
||||
.mapToObj(val -> DoublePointExactQuery.of(doublePointSetQuery.field(), val))
|
||||
.map(q -> BooleanQueryPart.of(q, OccurShould.of()))
|
||||
.toList(), 1), boost);
|
||||
}
|
||||
case TermQuery -> {
|
||||
var termQuery = (TermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(toTerm(termQuery.term()));
|
||||
case IntTermQuery:
|
||||
out
|
||||
.append("<TermQuery");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out
|
||||
.append(" fieldName=\"")
|
||||
.append(XmlEscapers.xmlAttributeEscaper().escape(termQuery.term().field()))
|
||||
.append("\"");
|
||||
out.append(">");
|
||||
out.append(XmlEscapers.xmlContentEscaper().escape(termQuery.term().value()));
|
||||
out.append("</TermQuery>\n");
|
||||
}
|
||||
case IntTermQuery -> {
|
||||
var intTermQuery = (IntTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(intTermQuery.field(),
|
||||
IntPoint.pack(intTermQuery.value())
|
||||
));
|
||||
case IntNDTermQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case IntNDTermQuery -> {
|
||||
var intNDTermQuery = (IntNDTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(intNDTermQuery.field(),
|
||||
IntPoint.pack(intNDTermQuery.value().toIntArray())
|
||||
));
|
||||
case LongTermQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case LongTermQuery -> {
|
||||
var longTermQuery = (LongTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(longTermQuery.field(),
|
||||
LongPoint.pack(longTermQuery.value())
|
||||
));
|
||||
case LongNDTermQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case LongNDTermQuery -> {
|
||||
var longNDTermQuery = (LongNDTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(longNDTermQuery.field(),
|
||||
LongPoint.pack(longNDTermQuery.value().toLongArray())
|
||||
));
|
||||
case FloatTermQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case FloatTermQuery -> {
|
||||
var floatTermQuery = (FloatTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(floatTermQuery.field(),
|
||||
FloatPoint.pack(floatTermQuery.value())
|
||||
));
|
||||
case FloatNDTermQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case FloatNDTermQuery -> {
|
||||
var floatNDTermQuery = (FloatNDTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(floatNDTermQuery.field(),
|
||||
FloatPoint.pack(floatNDTermQuery.value().toFloatArray())
|
||||
));
|
||||
case DoubleTermQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case DoubleTermQuery -> {
|
||||
var doubleTermQuery = (DoubleTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(doubleTermQuery.field(),
|
||||
DoublePoint.pack(doubleTermQuery.value())
|
||||
));
|
||||
case DoubleNDTermQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case DoubleNDTermQuery -> {
|
||||
var doubleNDTermQuery = (DoubleNDTermQuery) query;
|
||||
return new org.apache.lucene.search.TermQuery(new Term(doubleNDTermQuery.field(),
|
||||
DoublePoint.pack(doubleNDTermQuery.value().toDoubleArray())
|
||||
));
|
||||
case FieldExistsQuery:
|
||||
throw new UnsupportedOperationException("Non-string term fields are not supported");
|
||||
}
|
||||
case FieldExistsQuery -> {
|
||||
var fieldExistQuery = (FieldExistsQuery) query;
|
||||
return new org.apache.lucene.search.FieldExistsQuery(fieldExistQuery.field());
|
||||
case BoostQuery:
|
||||
out.append("<UserQuery");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(">");
|
||||
ensureValidField(fieldExistQuery.field());
|
||||
out.append(fieldExistQuery.field());
|
||||
out.append(":[* TO *]");
|
||||
out.append("</UserQuery>\n");
|
||||
}
|
||||
case SolrTextQuery -> {
|
||||
var solrTextQuery = (SolrTextQuery) query;
|
||||
out.append("<UserQuery");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(">");
|
||||
ensureValidField(solrTextQuery.field());
|
||||
out.append(solrTextQuery.field());
|
||||
out.append(":");
|
||||
out.append("\"").append(XmlEscapers.xmlContentEscaper().escape(escapeQueryStringValue(solrTextQuery.phrase()))).append("\"");
|
||||
if (solrTextQuery.slop() > 0 && hasMoreThanOneWord(solrTextQuery.phrase())) {
|
||||
out.append("~").append(solrTextQuery.slop());
|
||||
}
|
||||
out.append("</UserQuery>\n");
|
||||
}
|
||||
case BoostQuery -> {
|
||||
var boostQuery = (BoostQuery) query;
|
||||
return new org.apache.lucene.search.BoostQuery(toQuery(boostQuery.query(), analyzer), boostQuery.scoreBoost());
|
||||
case ConstantScoreQuery:
|
||||
toQueryXML(out, boostQuery.query(), boostQuery.scoreBoost());
|
||||
}
|
||||
case ConstantScoreQuery -> {
|
||||
var constantScoreQuery = (ConstantScoreQuery) query;
|
||||
return new org.apache.lucene.search.ConstantScoreQuery(toQuery(constantScoreQuery.query(), analyzer));
|
||||
case BoxedQuery:
|
||||
return toQuery(((BoxedQuery) query).query(), analyzer);
|
||||
case FuzzyQuery:
|
||||
var fuzzyQuery = (it.cavallium.dbengine.client.query.current.data.FuzzyQuery) query;
|
||||
return new FuzzyQuery(toTerm(fuzzyQuery.term()),
|
||||
fuzzyQuery.maxEdits(),
|
||||
fuzzyQuery.prefixLength(),
|
||||
fuzzyQuery.maxExpansions(),
|
||||
fuzzyQuery.transpositions()
|
||||
);
|
||||
case IntPointRangeQuery:
|
||||
out.append("<ConstantScoreQuery");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(">\n");
|
||||
toQueryXML(out, query, null);
|
||||
out.append("</ConstantScoreQuery>\n");
|
||||
}
|
||||
case BoxedQuery -> {
|
||||
toQueryXML(out, ((BoxedQuery) query).query(), boost);
|
||||
}
|
||||
case FuzzyQuery -> {
|
||||
throw new UnsupportedOperationException("Fuzzy query is not supported, use span queries");
|
||||
}
|
||||
case IntPointRangeQuery -> {
|
||||
var intPointRangeQuery = (IntPointRangeQuery) query;
|
||||
return IntPoint.newRangeQuery(intPointRangeQuery.field(), intPointRangeQuery.min(), intPointRangeQuery.max());
|
||||
case IntNDPointRangeQuery:
|
||||
out.append("<PointRangeQuery type=\"int\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(intPointRangeQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(intPointRangeQuery.min()).append("\"");
|
||||
out.append(" upperTerm=\"").append(intPointRangeQuery.max()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case IntNDPointRangeQuery -> {
|
||||
var intndPointRangeQuery = (IntNDPointRangeQuery) query;
|
||||
return IntPoint.newRangeQuery(intndPointRangeQuery.field(),
|
||||
intndPointRangeQuery.min().toIntArray(),
|
||||
intndPointRangeQuery.max().toIntArray()
|
||||
);
|
||||
case LongPointRangeQuery:
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case LongPointRangeQuery -> {
|
||||
var longPointRangeQuery = (LongPointRangeQuery) query;
|
||||
return LongPoint.newRangeQuery(longPointRangeQuery.field(),
|
||||
longPointRangeQuery.min(),
|
||||
longPointRangeQuery.max()
|
||||
);
|
||||
case FloatPointRangeQuery:
|
||||
out.append("<PointRangeQuery type=\"long\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(longPointRangeQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(longPointRangeQuery.min()).append("\"");
|
||||
out.append(" upperTerm=\"").append(longPointRangeQuery.max()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case FloatPointRangeQuery -> {
|
||||
var floatPointRangeQuery = (FloatPointRangeQuery) query;
|
||||
return FloatPoint.newRangeQuery(floatPointRangeQuery.field(),
|
||||
floatPointRangeQuery.min(),
|
||||
floatPointRangeQuery.max()
|
||||
);
|
||||
case DoublePointRangeQuery:
|
||||
out.append("<PointRangeQuery type=\"float\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(floatPointRangeQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(floatPointRangeQuery.min()).append("\"");
|
||||
out.append(" upperTerm=\"").append(floatPointRangeQuery.max()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case DoublePointRangeQuery -> {
|
||||
var doublePointRangeQuery = (DoublePointRangeQuery) query;
|
||||
return DoublePoint.newRangeQuery(doublePointRangeQuery.field(),
|
||||
doublePointRangeQuery.min(),
|
||||
doublePointRangeQuery.max()
|
||||
);
|
||||
case LongNDPointRangeQuery:
|
||||
out.append("<PointRangeQuery type=\"double\"");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(" fieldName=\"").append(XmlEscapers.xmlAttributeEscaper().escape(doublePointRangeQuery.field())).append("\"");
|
||||
out.append(" lowerTerm=\"").append(doublePointRangeQuery.min()).append("\"");
|
||||
out.append(" upperTerm=\"").append(doublePointRangeQuery.max()).append("\"");
|
||||
out.append(" />\n");
|
||||
}
|
||||
case LongNDPointRangeQuery -> {
|
||||
var longndPointRangeQuery = (LongNDPointRangeQuery) query;
|
||||
return LongPoint.newRangeQuery(longndPointRangeQuery.field(),
|
||||
longndPointRangeQuery.min().toLongArray(),
|
||||
longndPointRangeQuery.max().toLongArray()
|
||||
);
|
||||
case FloatNDPointRangeQuery:
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case FloatNDPointRangeQuery -> {
|
||||
var floatndPointRangeQuery = (FloatNDPointRangeQuery) query;
|
||||
return FloatPoint.newRangeQuery(floatndPointRangeQuery.field(),
|
||||
floatndPointRangeQuery.min().toFloatArray(),
|
||||
floatndPointRangeQuery.max().toFloatArray()
|
||||
);
|
||||
case DoubleNDPointRangeQuery:
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case DoubleNDPointRangeQuery -> {
|
||||
var doublendPointRangeQuery = (DoubleNDPointRangeQuery) query;
|
||||
return DoublePoint.newRangeQuery(doublendPointRangeQuery.field(),
|
||||
doublendPointRangeQuery.min().toDoubleArray(),
|
||||
doublendPointRangeQuery.max().toDoubleArray()
|
||||
);
|
||||
case MatchAllDocsQuery:
|
||||
return new MatchAllDocsQuery();
|
||||
case MatchNoDocsQuery:
|
||||
return new MatchNoDocsQuery();
|
||||
case PhraseQuery:
|
||||
throw new UnsupportedOperationException("N-dimensional point queries are not supported");
|
||||
}
|
||||
case MatchAllDocsQuery -> {
|
||||
out.append("<UserQuery");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(">");
|
||||
out.append("*:*");
|
||||
out.append("</UserQuery>\n");
|
||||
}
|
||||
case MatchNoDocsQuery -> {
|
||||
out.append("<UserQuery");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
out.append(">");
|
||||
//todo: check if it's correct
|
||||
out.append("!*:*");
|
||||
out.append("</UserQuery>\n");
|
||||
}
|
||||
case PhraseQuery -> {
|
||||
//todo: check if it's correct
|
||||
|
||||
var phraseQuery = (PhraseQuery) query;
|
||||
var pqb = new org.apache.lucene.search.PhraseQuery.Builder();
|
||||
for (TermPosition phrase : phraseQuery.phrase()) {
|
||||
pqb.add(toTerm(phrase.term()), phrase.position());
|
||||
out.append("<SpanNear");
|
||||
if (boost != null) {
|
||||
out.append(" boost=\"").append(boost).append("\"");
|
||||
}
|
||||
pqb.setSlop(phraseQuery.slop());
|
||||
return pqb.build();
|
||||
case SortedDocFieldExistsQuery:
|
||||
out.append(" inOrder=\"true\"");
|
||||
out.append(">\n");
|
||||
phraseQuery.phrase().stream().sorted(Comparator.comparingInt(TermPosition::position)).forEach(term -> {
|
||||
out
|
||||
.append("<SpanTerm fieldName=\"")
|
||||
.append(XmlEscapers.xmlAttributeEscaper().escape(term.term().field()))
|
||||
.append("\">")
|
||||
.append(XmlEscapers.xmlContentEscaper().escape(term.term().value()))
|
||||
.append("</SpanTerm>\n");
|
||||
});
|
||||
out.append("</SpanNear>\n");
|
||||
}
|
||||
case SortedDocFieldExistsQuery -> {
|
||||
var sortedDocFieldExistsQuery = (SortedDocFieldExistsQuery) query;
|
||||
return new DocValuesFieldExistsQuery(sortedDocFieldExistsQuery.field());
|
||||
case SynonymQuery:
|
||||
throw new UnsupportedOperationException("Field existence query is not supported");
|
||||
}
|
||||
case SynonymQuery -> {
|
||||
var synonymQuery = (SynonymQuery) query;
|
||||
var sqb = new org.apache.lucene.search.SynonymQuery.Builder(synonymQuery.field());
|
||||
for (TermAndBoost part : synonymQuery.parts()) {
|
||||
sqb.addTerm(toTerm(part.term()), part.boost());
|
||||
}
|
||||
return sqb.build();
|
||||
case SortedNumericDocValuesFieldSlowRangeQuery:
|
||||
var sortedNumericDocValuesFieldSlowRangeQuery = (SortedNumericDocValuesFieldSlowRangeQuery) query;
|
||||
return SortedNumericDocValuesField.newSlowRangeQuery(sortedNumericDocValuesFieldSlowRangeQuery.field(),
|
||||
sortedNumericDocValuesFieldSlowRangeQuery.min(),
|
||||
sortedNumericDocValuesFieldSlowRangeQuery.max()
|
||||
);
|
||||
case WildcardQuery:
|
||||
throw new UnsupportedOperationException("Synonym query is not supported");
|
||||
}
|
||||
case SortedNumericDocValuesFieldSlowRangeQuery -> {
|
||||
throw new UnsupportedOperationException("Slow range query is not supported");
|
||||
}
|
||||
case WildcardQuery -> {
|
||||
var wildcardQuery = (WildcardQuery) query;
|
||||
return new org.apache.lucene.search.WildcardQuery(new Term(wildcardQuery.field(), wildcardQuery.pattern()));
|
||||
default:
|
||||
throw new IllegalStateException("Unexpected value: " + query.getBaseType$());
|
||||
throw new UnsupportedOperationException("Wildcard query is not supported");
|
||||
}
|
||||
default -> throw new IllegalStateException("Unexpected value: " + query.getBaseType$());
|
||||
}
|
||||
}
|
||||
|
||||
private static NumberFormat toNumberFormat(it.cavallium.dbengine.client.query.current.data.NumberFormat numberFormat) {
|
||||
return switch (numberFormat.getBaseType$()) {
|
||||
case NumberFormatDecimal -> new DecimalFormat();
|
||||
default -> throw new UnsupportedOperationException("Unsupported type: " + numberFormat.getBaseType$());
|
||||
};
|
||||
}
|
||||
private static boolean hasMoreThanOneWord(String sentence) {
|
||||
BreakIterator iterator = BreakIterator.getWordInstance(Locale.ENGLISH);
|
||||
iterator.setText(sentence);
|
||||
|
||||
private static Class<? extends Number> toType(PointType type) {
|
||||
return switch (type.getBaseType$()) {
|
||||
case PointTypeInt -> Integer.class;
|
||||
case PointTypeLong -> Long.class;
|
||||
case PointTypeFloat -> Float.class;
|
||||
case PointTypeDouble -> Double.class;
|
||||
default -> throw new UnsupportedOperationException("Unsupported type: " + type.getBaseType$());
|
||||
};
|
||||
}
|
||||
|
||||
private static Term toTerm(it.cavallium.dbengine.client.query.current.data.Term term) {
|
||||
return new Term(term.field(), term.value());
|
||||
}
|
||||
|
||||
public static Sort toSort(it.cavallium.dbengine.client.query.current.data.Sort sort) {
|
||||
switch (sort.getBaseType$()) {
|
||||
case NoSort:
|
||||
return null;
|
||||
case ScoreSort:
|
||||
return new Sort(SortField.FIELD_SCORE);
|
||||
case DocSort:
|
||||
return new Sort(SortField.FIELD_DOC);
|
||||
case NumericSort:
|
||||
NumericSort numericSort = (NumericSort) sort;
|
||||
return new Sort(new SortedNumericSortField(numericSort.field(), Type.LONG, numericSort.reverse()));
|
||||
case RandomSort:
|
||||
return new Sort(new RandomSortField());
|
||||
default:
|
||||
throw new IllegalStateException("Unexpected value: " + sort.getBaseType$());
|
||||
boolean firstWord = false;
|
||||
iterator.first();
|
||||
int end = iterator.next();
|
||||
while (end != BreakIterator.DONE) {
|
||||
if (!firstWord) {
|
||||
firstWord = true;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
end = iterator.next();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static it.cavallium.dbengine.client.query.current.data.Term toQueryTerm(Term term) {
|
||||
return it.cavallium.dbengine.client.query.current.data.Term.of(term.field(), term.text());
|
||||
private static String escapeQueryStringValue(String text) {
|
||||
return StringUtils.replaceEach(text, QUERY_STRING_FIND, QUERY_STRING_REPLACE);
|
||||
}
|
||||
|
||||
private static void ensureValidField(String field) {
|
||||
field.codePoints().forEach(codePoint -> {
|
||||
if (!Character.isLetterOrDigit(codePoint) && codePoint != '_') {
|
||||
throw new UnsupportedOperationException(
|
||||
"Invalid character \"" + codePoint + "\" in field name \"" + field + "\"");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,16 @@
|
||||
package it.cavallium.dbengine.client.query;
|
||||
|
||||
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
|
||||
|
||||
public class QueryUtil {
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public static String toHumanReadableString(TotalHitsCount totalHitsCount) {
|
||||
if (totalHitsCount.exact()) {
|
||||
return Long.toString(totalHitsCount.value());
|
||||
} else {
|
||||
return totalHitsCount.value() + "+";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,109 +0,0 @@
|
||||
package it.cavallium.dbengine.client.query;
|
||||
|
||||
import it.cavallium.dbengine.client.query.current.data.BooleanQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.BooleanQueryPart;
|
||||
import it.cavallium.dbengine.client.query.current.data.Occur;
|
||||
import it.cavallium.dbengine.client.query.current.data.OccurFilter;
|
||||
import it.cavallium.dbengine.client.query.current.data.OccurMust;
|
||||
import it.cavallium.dbengine.client.query.current.data.OccurMustNot;
|
||||
import it.cavallium.dbengine.client.query.current.data.OccurShould;
|
||||
import it.cavallium.dbengine.client.query.current.data.PhraseQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||
import it.cavallium.dbengine.client.query.current.data.SynonymQuery;
|
||||
import it.cavallium.dbengine.client.query.current.data.TermAndBoost;
|
||||
import it.cavallium.dbengine.client.query.current.data.TermPosition;
|
||||
import it.cavallium.dbengine.client.query.current.data.TermQuery;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.util.QueryBuilder;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class QueryUtils {
|
||||
|
||||
/**
|
||||
* @param fraction of query terms [0..1] that should match
|
||||
*/
|
||||
public static Query sparseWordsSearch(TextFieldsAnalyzer preferredAnalyzer,
|
||||
String field,
|
||||
String text,
|
||||
float fraction) {
|
||||
var qb = new QueryBuilder(LuceneUtils.getAnalyzer(preferredAnalyzer));
|
||||
var luceneQuery = qb.createMinShouldMatchQuery(field, text, fraction);
|
||||
return transformQuery(field, luceneQuery);
|
||||
}
|
||||
|
||||
public static Query phraseSearch(TextFieldsAnalyzer preferredAnalyzer, String field, String text, int slop) {
|
||||
var qb = new QueryBuilder(LuceneUtils.getAnalyzer(preferredAnalyzer));
|
||||
var luceneQuery = qb.createPhraseQuery(field, text, slop);
|
||||
return transformQuery(field, luceneQuery);
|
||||
}
|
||||
|
||||
public static Query exactSearch(TextFieldsAnalyzer preferredAnalyzer, String field, String text) {
|
||||
var qb = new QueryBuilder(LuceneUtils.getAnalyzer(preferredAnalyzer));
|
||||
var luceneQuery = qb.createPhraseQuery(field, text);
|
||||
return transformQuery(field, luceneQuery);
|
||||
}
|
||||
|
||||
@NotNull
|
||||
private static Query transformQuery(String field, org.apache.lucene.search.Query luceneQuery) {
|
||||
if (luceneQuery == null) {
|
||||
return TermQuery.of(it.cavallium.dbengine.client.query.current.data.Term.of(field, ""));
|
||||
}
|
||||
if (luceneQuery instanceof org.apache.lucene.search.TermQuery) {
|
||||
return TermQuery.of(QueryParser.toQueryTerm(((org.apache.lucene.search.TermQuery) luceneQuery).getTerm()));
|
||||
}
|
||||
if (luceneQuery instanceof org.apache.lucene.search.BooleanQuery) {
|
||||
var booleanQuery = (org.apache.lucene.search.BooleanQuery) luceneQuery;
|
||||
var queryParts = new ArrayList<BooleanQueryPart>();
|
||||
for (BooleanClause booleanClause : booleanQuery) {
|
||||
org.apache.lucene.search.Query queryPartQuery = booleanClause.getQuery();
|
||||
|
||||
Occur occur;
|
||||
switch (booleanClause.getOccur()) {
|
||||
case MUST:
|
||||
occur = OccurMust.of();
|
||||
break;
|
||||
case FILTER:
|
||||
occur = OccurFilter.of();
|
||||
break;
|
||||
case SHOULD:
|
||||
occur = OccurShould.of();
|
||||
break;
|
||||
case MUST_NOT:
|
||||
occur = OccurMustNot.of();
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
queryParts.add(BooleanQueryPart.of(transformQuery(field, queryPartQuery), occur));
|
||||
}
|
||||
return BooleanQuery.of(List.copyOf(queryParts), booleanQuery.getMinimumNumberShouldMatch());
|
||||
}
|
||||
if (luceneQuery instanceof org.apache.lucene.search.PhraseQuery) {
|
||||
var phraseQuery = (org.apache.lucene.search.PhraseQuery) luceneQuery;
|
||||
int slop = phraseQuery.getSlop();
|
||||
var terms = phraseQuery.getTerms();
|
||||
var positions = phraseQuery.getPositions();
|
||||
TermPosition[] termPositions = new TermPosition[terms.length];
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
var term = terms[i];
|
||||
var position = positions[i];
|
||||
termPositions[i] = TermPosition.of(QueryParser.toQueryTerm(term), position);
|
||||
}
|
||||
return PhraseQuery.of(List.of(termPositions), slop);
|
||||
}
|
||||
org.apache.lucene.search.SynonymQuery synonymQuery = (org.apache.lucene.search.SynonymQuery) luceneQuery;
|
||||
return SynonymQuery.of(field,
|
||||
synonymQuery
|
||||
.getTerms()
|
||||
.stream()
|
||||
.map(term -> TermAndBoost.of(QueryParser.toQueryTerm(term), 1))
|
||||
.toList()
|
||||
);
|
||||
}
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public abstract class BufSupplier implements SafeCloseable, DiscardingCloseable, Supplier<Buffer> {
|
||||
|
||||
public static BufSupplier of(Supplier<Buffer> supplier) {
|
||||
return new SimpleBufSupplier(supplier);
|
||||
}
|
||||
|
||||
public static BufSupplier of(Send<Buffer> supplier) {
|
||||
return new CopyBufSupplier(supplier.receive());
|
||||
}
|
||||
|
||||
public static BufSupplier ofOwned(Buffer supplier) {
|
||||
return new CopyBufSupplier(supplier);
|
||||
}
|
||||
|
||||
public static BufSupplier ofShared(Buffer supplier) {
|
||||
return new SimpleBufSupplier(() -> supplier.copy());
|
||||
}
|
||||
|
||||
private static final class SimpleBufSupplier extends BufSupplier {
|
||||
|
||||
private final Supplier<Buffer> supplier;
|
||||
|
||||
public SimpleBufSupplier(Supplier<Buffer> supplier) {
|
||||
this.supplier = supplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Buffer get() {
|
||||
return supplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private static final class CopyBufSupplier extends BufSupplier {
|
||||
|
||||
private final Buffer supplier;
|
||||
|
||||
public CopyBufSupplier(Buffer supplier) {
|
||||
this.supplier = supplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Buffer get() {
|
||||
return supplier.copy();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
supplier.close();
|
||||
}
|
||||
}
|
||||
}
|
@ -2,10 +2,9 @@ package it.cavallium.dbengine.database;
|
||||
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import java.nio.file.Path;
|
||||
import org.reactivestreams.Publisher;
|
||||
import reactor.core.publisher.Mono;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public interface DatabaseOperations {
|
||||
|
||||
Mono<Void> ingestSST(Column column, Publisher<Path> files, boolean replaceExisting);
|
||||
void ingestSST(Column column, Stream<Path> files, boolean replaceExisting);
|
||||
}
|
||||
|
@ -2,30 +2,30 @@ package it.cavallium.dbengine.database;
|
||||
|
||||
import it.cavallium.dbengine.client.MemoryStats;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface DatabaseProperties {
|
||||
|
||||
Mono<MemoryStats> getMemoryStats();
|
||||
MemoryStats getMemoryStats();
|
||||
|
||||
Mono<String> getRocksDBStats();
|
||||
String getRocksDBStats();
|
||||
|
||||
Mono<Map<String, String>> getMapProperty(@Nullable Column column, RocksDBMapProperty property);
|
||||
Map<String, String> getMapProperty(@Nullable Column column, RocksDBMapProperty property);
|
||||
|
||||
Flux<ColumnProperty<Map<String, String>>> getMapColumnProperties(RocksDBMapProperty property);
|
||||
Stream<ColumnProperty<Map<String, String>>> getMapColumnProperties(RocksDBMapProperty property);
|
||||
|
||||
Mono<String> getStringProperty(@Nullable Column column, RocksDBStringProperty property);
|
||||
String getStringProperty(@Nullable Column column, RocksDBStringProperty property);
|
||||
|
||||
Flux<ColumnProperty<String>> getStringColumnProperties(RocksDBStringProperty property);
|
||||
Stream<ColumnProperty<String>> getStringColumnProperties(RocksDBStringProperty property);
|
||||
|
||||
Mono<Long> getLongProperty(@Nullable Column column, RocksDBLongProperty property);
|
||||
Long getLongProperty(@Nullable Column column, RocksDBLongProperty property);
|
||||
|
||||
Flux<ColumnProperty<Long>> getLongColumnProperties(RocksDBLongProperty property);
|
||||
Stream<ColumnProperty<Long>> getLongColumnProperties(RocksDBLongProperty property);
|
||||
|
||||
Mono<Long> getAggregatedLongProperty(RocksDBLongProperty property);
|
||||
Long getAggregatedLongProperty(RocksDBLongProperty property);
|
||||
|
||||
Flux<TableWithProperties> getTableProperties();
|
||||
Stream<TableWithProperties> getTableProperties();
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class Delta<T> {
|
||||
|
||||
private static final Delta<?> EMPTY = new Delta<>(null, null);
|
||||
private final @Nullable T previous;
|
||||
private final @Nullable T current;
|
||||
|
||||
@ -25,6 +26,11 @@ public class Delta<T> {
|
||||
return current;
|
||||
}
|
||||
|
||||
public static <X> Delta<X> empty() {
|
||||
//noinspection unchecked
|
||||
return (Delta<X>) EMPTY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this)
|
||||
|
@ -1,39 +1,20 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import it.cavallium.dbengine.lucene.LuceneHacks;
|
||||
import it.cavallium.dbengine.lucene.LuceneRocksDBManager;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
|
||||
import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers;
|
||||
import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities;
|
||||
import it.cavallium.dbengine.rpc.current.data.LuceneIndexStructure;
|
||||
import it.cavallium.dbengine.rpc.current.data.LuceneOptions;
|
||||
import java.util.List;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("UnusedReturnValue")
|
||||
public interface LLDatabaseConnection {
|
||||
|
||||
BufferAllocator getAllocator();
|
||||
|
||||
MeterRegistry getMeterRegistry();
|
||||
|
||||
Mono<? extends LLDatabaseConnection> connect();
|
||||
LLDatabaseConnection connect();
|
||||
|
||||
Mono<? extends LLKeyValueDatabase> getDatabase(String name,
|
||||
LLKeyValueDatabase getDatabase(String name,
|
||||
List<Column> columns,
|
||||
DatabaseOptions databaseOptions);
|
||||
|
||||
Mono<? extends LLLuceneIndex> getLuceneIndex(String clusterName,
|
||||
LuceneIndexStructure indexStructure,
|
||||
IndicizerAnalyzers indicizerAnalyzers,
|
||||
IndicizerSimilarities indicizerSimilarities,
|
||||
LuceneOptions luceneOptions,
|
||||
@Nullable LuceneHacks luceneHacks);
|
||||
|
||||
Mono<Void> disconnect();
|
||||
void disconnect();
|
||||
}
|
||||
|
@ -1,71 +1,37 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import static it.cavallium.dbengine.database.LLUtils.unmodifiableBytes;
|
||||
|
||||
import it.cavallium.buffer.Buf;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class LLDelta extends SimpleResource implements DiscardingCloseable {
|
||||
public class LLDelta {
|
||||
|
||||
@Nullable
|
||||
private final Buffer previous;
|
||||
private final Buf previous;
|
||||
@Nullable
|
||||
private final Buffer current;
|
||||
private final Buf current;
|
||||
|
||||
private LLDelta(@Nullable Buffer previous, @Nullable Buffer current) {
|
||||
private LLDelta(@Nullable Buf previous, @Nullable Buf current) {
|
||||
super();
|
||||
this.previous = previous != null ? previous.makeReadOnly() : null;
|
||||
this.current = current != null ? current.makeReadOnly() : null;
|
||||
this.previous = unmodifiableBytes(previous);
|
||||
this.current = unmodifiableBytes(current);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void ensureOpen() {
|
||||
super.ensureOpen();
|
||||
assert previous == null || previous.isAccessible();
|
||||
assert current == null || current.isAccessible();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
if (previous != null && previous.isAccessible()) {
|
||||
previous.close();
|
||||
}
|
||||
if (current != null && current.isAccessible()) {
|
||||
current.close();
|
||||
}
|
||||
}
|
||||
|
||||
public static LLDelta of(Buffer previous, Buffer current) {
|
||||
public static LLDelta of(Buf previous, Buf current) {
|
||||
assert (previous == null && current == null) || (previous != current);
|
||||
return new LLDelta(previous, current);
|
||||
}
|
||||
|
||||
public Send<Buffer> previous() {
|
||||
ensureOpen();
|
||||
return previous != null ? previous.copy().send() : null;
|
||||
}
|
||||
|
||||
public Send<Buffer> current() {
|
||||
ensureOpen();
|
||||
return current != null ? current.copy().send() : null;
|
||||
}
|
||||
|
||||
public Buffer currentUnsafe() {
|
||||
ensureOpen();
|
||||
return current;
|
||||
}
|
||||
|
||||
public Buffer previousUnsafe() {
|
||||
ensureOpen();
|
||||
public Buf previous() {
|
||||
return previous;
|
||||
}
|
||||
|
||||
public Buf current() {
|
||||
return current;
|
||||
}
|
||||
|
||||
public boolean isModified() {
|
||||
return !LLUtils.equals(previous, current);
|
||||
}
|
||||
|
@ -1,105 +1,93 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.database.disk.BinarySerializationFunction;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.serialization.KVSerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public interface LLDictionary extends LLKeyValueDatabaseStructure {
|
||||
|
||||
String getColumnName();
|
||||
|
||||
BufferAllocator getAllocator();
|
||||
Buf get(@Nullable LLSnapshot snapshot, Buf key);
|
||||
|
||||
Mono<Buffer> get(@Nullable LLSnapshot snapshot, Mono<Buffer> key);
|
||||
|
||||
Mono<Buffer> put(Mono<Buffer> key, Mono<Buffer> value, LLDictionaryResultType resultType);
|
||||
Buf put(Buf key, Buf value, LLDictionaryResultType resultType);
|
||||
|
||||
UpdateMode getUpdateMode();
|
||||
|
||||
default Mono<Buffer> update(Mono<Buffer> key,
|
||||
BinarySerializationFunction updater,
|
||||
UpdateReturnMode updateReturnMode) {
|
||||
return this
|
||||
.updateAndGetDelta(key, updater)
|
||||
.transform(prev -> LLUtils.resolveLLDelta(prev, updateReturnMode));
|
||||
default Buf update(Buf key, SerializationFunction<@Nullable Buf, @Nullable Buf> updater, UpdateReturnMode updateReturnMode) {
|
||||
LLDelta prev = this.updateAndGetDelta(key, updater);
|
||||
return LLUtils.resolveLLDelta(prev, updateReturnMode);
|
||||
}
|
||||
|
||||
Mono<LLDelta> updateAndGetDelta(Mono<Buffer> key, BinarySerializationFunction updater);
|
||||
LLDelta updateAndGetDelta(Buf key, SerializationFunction<@Nullable Buf, @Nullable Buf> updater);
|
||||
|
||||
Mono<Void> clear();
|
||||
void clear();
|
||||
|
||||
Mono<Buffer> remove(Mono<Buffer> key, LLDictionaryResultType resultType);
|
||||
Buf remove(Buf key, LLDictionaryResultType resultType);
|
||||
|
||||
Flux<OptionalBuf> getMulti(@Nullable LLSnapshot snapshot, Flux<Buffer> keys);
|
||||
Stream<OptionalBuf> getMulti(@Nullable LLSnapshot snapshot, Stream<Buf> keys);
|
||||
|
||||
Mono<Void> putMulti(Flux<LLEntry> entries);
|
||||
void putMulti(Stream<LLEntry> entries);
|
||||
|
||||
<K> Flux<Boolean> updateMulti(Flux<K> keys, Flux<Buffer> serializedKeys,
|
||||
KVSerializationFunction<K, @Nullable Buffer, @Nullable Buffer> updateFunction);
|
||||
<K> Stream<Boolean> updateMulti(Stream<SerializedKey<K>> keys,
|
||||
KVSerializationFunction<K, @Nullable Buf, @Nullable Buf> updateFunction);
|
||||
|
||||
Flux<LLEntry> getRange(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> range,
|
||||
Stream<LLEntry> getRange(@Nullable LLSnapshot snapshot,
|
||||
LLRange range,
|
||||
boolean reverse,
|
||||
boolean smallRange);
|
||||
|
||||
Flux<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> range,
|
||||
Stream<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
|
||||
LLRange range,
|
||||
int prefixLength,
|
||||
boolean smallRange);
|
||||
|
||||
Flux<Buffer> getRangeKeys(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> range,
|
||||
Stream<Buf> getRangeKeys(@Nullable LLSnapshot snapshot,
|
||||
LLRange range,
|
||||
boolean reverse,
|
||||
boolean smallRange);
|
||||
|
||||
Flux<List<Buffer>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> range,
|
||||
Stream<List<Buf>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot,
|
||||
LLRange range,
|
||||
int prefixLength,
|
||||
boolean smallRange);
|
||||
|
||||
Flux<Buffer> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> range,
|
||||
Stream<Buf> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot,
|
||||
LLRange range,
|
||||
int prefixLength,
|
||||
boolean smallRange);
|
||||
|
||||
Flux<BadBlock> badBlocks(Mono<LLRange> range);
|
||||
Stream<DbProgress<SSTVerificationProgress>> verifyChecksum(LLRange range);
|
||||
|
||||
Mono<Void> setRange(Mono<LLRange> range, Flux<LLEntry> entries, boolean smallRange);
|
||||
void setRange(LLRange range, Stream<LLEntry> entries, boolean smallRange);
|
||||
|
||||
default Mono<Void> replaceRange(Mono<LLRange> range,
|
||||
default void replaceRange(LLRange range,
|
||||
boolean canKeysChange,
|
||||
Function<LLEntry, Mono<LLEntry>> entriesReplacer,
|
||||
Function<@NotNull LLEntry, @NotNull LLEntry> entriesReplacer,
|
||||
boolean smallRange) {
|
||||
return Mono.defer(() -> {
|
||||
if (canKeysChange) {
|
||||
return this
|
||||
.setRange(range, this
|
||||
.getRange(null, range, false, smallRange)
|
||||
.flatMap(entriesReplacer), smallRange);
|
||||
} else {
|
||||
return this.putMulti(this.getRange(null, range, false, smallRange).flatMap(entriesReplacer));
|
||||
}
|
||||
});
|
||||
if (canKeysChange) {
|
||||
this.setRange(range, this.getRange(null, range, false, smallRange).map(entriesReplacer), smallRange);
|
||||
} else {
|
||||
this.putMulti(this.getRange(null, range, false, smallRange).map(entriesReplacer));
|
||||
}
|
||||
}
|
||||
|
||||
Mono<Boolean> isRangeEmpty(@Nullable LLSnapshot snapshot, Mono<LLRange> range, boolean fillCache);
|
||||
boolean isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range, boolean fillCache);
|
||||
|
||||
Mono<Long> sizeRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range, boolean fast);
|
||||
long sizeRange(@Nullable LLSnapshot snapshot, LLRange range, boolean fast);
|
||||
|
||||
Mono<LLEntry> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
|
||||
LLEntry getOne(@Nullable LLSnapshot snapshot, LLRange range);
|
||||
|
||||
Mono<Buffer> getOneKey(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
|
||||
Buf getOneKey(@Nullable LLSnapshot snapshot, LLRange range);
|
||||
|
||||
Mono<LLEntry> removeOne(Mono<LLRange> range);
|
||||
LLEntry removeOne(LLRange range);
|
||||
}
|
||||
|
@ -1,67 +1,37 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.util.Resource;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import java.util.Objects;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class LLEntry extends SimpleResource implements DiscardingCloseable {
|
||||
public class LLEntry {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(LLEntry.class);
|
||||
private Buffer key;
|
||||
private Buffer value;
|
||||
private final Buf key;
|
||||
private final Buf value;
|
||||
|
||||
private LLEntry(@NotNull Send<Buffer> key, @NotNull Send<Buffer> value) {
|
||||
this.key = key.receive();
|
||||
this.value = value.receive();
|
||||
assert isAllAccessible();
|
||||
}
|
||||
private LLEntry(@NotNull Buffer key, @NotNull Buffer value) {
|
||||
private LLEntry(@NotNull Buf key, @NotNull Buf value) {
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
assert isAllAccessible();
|
||||
}
|
||||
|
||||
private boolean isAllAccessible() {
|
||||
assert key != null && key.isAccessible();
|
||||
assert value != null && value.isAccessible();
|
||||
return true;
|
||||
}
|
||||
|
||||
public static LLEntry of(@NotNull Buffer key, @NotNull Buffer value) {
|
||||
public static LLEntry of(@NotNull Buf key, @NotNull Buf value) {
|
||||
return new LLEntry(key, value);
|
||||
}
|
||||
|
||||
public Send<Buffer> getKey() {
|
||||
ensureOwned();
|
||||
return Objects.requireNonNull(key).copy().send();
|
||||
public static LLEntry copyOf(Buf keyView, Buf valueView) {
|
||||
return new LLEntry(keyView.copy(), valueView.copy());
|
||||
}
|
||||
|
||||
public Buffer getKeyUnsafe() {
|
||||
return key;
|
||||
public Buf getKey() {
|
||||
return Objects.requireNonNull(key);
|
||||
}
|
||||
|
||||
public Send<Buffer> getValue() {
|
||||
ensureOwned();
|
||||
return Objects.requireNonNull(value).copy().send();
|
||||
}
|
||||
|
||||
|
||||
public Buffer getValueUnsafe() {
|
||||
return value;
|
||||
}
|
||||
|
||||
private void ensureOwned() {
|
||||
assert isAllAccessible();
|
||||
public Buf getValue() {
|
||||
return Objects.requireNonNull(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -90,24 +60,4 @@ public class LLEntry extends SimpleResource implements DiscardingCloseable {
|
||||
.add("value=" + LLUtils.toString(value))
|
||||
.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
try {
|
||||
if (key != null && key.isAccessible()) {
|
||||
key.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close key", ex);
|
||||
}
|
||||
try {
|
||||
if (value != null && value.isAccessible()) {
|
||||
value.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close value", ex);
|
||||
}
|
||||
key = null;
|
||||
value = null;
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
public sealed interface LLIndexRequest permits LLSoftUpdateDocument, LLUpdateDocument, LLUpdateFields {}
|
@ -1,253 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import com.google.common.primitives.Floats;
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import java.nio.Buffer;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.FloatBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.index.VectorSimilarityFunction;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
public class LLItem {
|
||||
|
||||
private final LLType type;
|
||||
private final String name;
|
||||
private final Object data;
|
||||
|
||||
public LLItem(LLType type, String name, ByteBuffer data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
public LLItem(LLType type, String name, BytesRef data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
public LLItem(LLType type, String name, KnnFieldData data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, String data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, int data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, float data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, long data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, int... data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, float... data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, double... data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
private LLItem(LLType type, String name, long... data) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
public static LLItem newIntPoint(String name, int data) {
|
||||
return new LLItem(LLType.IntPoint, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newIntPointND(String name, int... data) {
|
||||
return new LLItem(LLType.IntPointND, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newLongPoint(String name, long data) {
|
||||
return new LLItem(LLType.LongPoint, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newFloatPoint(String name, float data) {
|
||||
return new LLItem(LLType.FloatPoint, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newDoublePoint(String name, double data) {
|
||||
return new LLItem(LLType.DoublePoint, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newLongPointND(String name, long... data) {
|
||||
return new LLItem(LLType.LongPointND, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newFloatPointND(String name, float... data) {
|
||||
return new LLItem(LLType.FloatPointND, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newDoublePointND(String name, double... data) {
|
||||
return new LLItem(LLType.DoublePointND, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newLongStoredField(String name, long data) {
|
||||
return new LLItem(LLType.LongStoredField, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newLongStoredFieldND(String name, long... data) {
|
||||
BytesRef packed = LongPoint.pack(data);
|
||||
return new LLItem(LLType.BytesStoredField, name, packed);
|
||||
}
|
||||
|
||||
public static LLItem newTextField(String name, String data, Field.Store store) {
|
||||
if (store == Field.Store.YES) {
|
||||
return new LLItem(LLType.TextFieldStored, name, data);
|
||||
} else {
|
||||
return new LLItem(LLType.TextField, name, data);
|
||||
}
|
||||
}
|
||||
|
||||
public static LLItem newStringField(String name, String data, Field.Store store) {
|
||||
if (store == Field.Store.YES) {
|
||||
return new LLItem(LLType.StringFieldStored, name, data);
|
||||
} else {
|
||||
return new LLItem(LLType.StringField, name, data);
|
||||
}
|
||||
}
|
||||
|
||||
public static LLItem newStringField(String name, BytesRef bytesRef, Field.Store store) {
|
||||
if (store == Field.Store.YES) {
|
||||
return new LLItem(LLType.StringFieldStored, name, bytesRef);
|
||||
} else {
|
||||
return new LLItem(LLType.StringField, name, bytesRef);
|
||||
}
|
||||
}
|
||||
|
||||
public static LLItem newSortedNumericDocValuesField(String name, long data) {
|
||||
return new LLItem(LLType.SortedNumericDocValuesField, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newNumericDocValuesField(String name, long data) {
|
||||
return new LLItem(LLType.NumericDocValuesField, name, data);
|
||||
}
|
||||
|
||||
public static LLItem newKnnField(String name, KnnFieldData knnFieldData) {
|
||||
return new LLItem(LLType.NumericDocValuesField, name, knnFieldData);
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public LLType getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public Object getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
LLItem llItem = (LLItem) o;
|
||||
|
||||
if (type != llItem.type) {
|
||||
return false;
|
||||
}
|
||||
return Objects.equals(name, llItem.name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = type != null ? type.hashCode() : 0;
|
||||
result = 31 * result + (name != null ? name.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", LLItem.class.getSimpleName() + "[", "]")
|
||||
.add("type=" + type)
|
||||
.add("name='" + name + "'")
|
||||
.add("data=" + data)
|
||||
.toString();
|
||||
}
|
||||
|
||||
public int intData() {
|
||||
return (int) data;
|
||||
}
|
||||
|
||||
public int[] intArrayData() {
|
||||
return (int[]) data;
|
||||
}
|
||||
|
||||
public long longData() {
|
||||
return (long) data;
|
||||
}
|
||||
|
||||
public long[] longArrayData() {
|
||||
return (long[]) data;
|
||||
}
|
||||
|
||||
public float floatData() {
|
||||
return (float) data;
|
||||
}
|
||||
|
||||
public float[] floatArrayData() {
|
||||
return (float[]) data;
|
||||
}
|
||||
|
||||
public double doubleData() {
|
||||
return (double) data;
|
||||
}
|
||||
|
||||
public double[] doubleArrayData() {
|
||||
return (double[]) data;
|
||||
}
|
||||
|
||||
public KnnFieldData knnFieldData() {
|
||||
return (KnnFieldData) data;
|
||||
}
|
||||
|
||||
public String stringValue() {
|
||||
return (String) data;
|
||||
}
|
||||
|
||||
public record KnnFieldData(float[] data, VectorSimilarityFunction vectorSimilarityFunction) {}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public record LLKeyScore(int docId, int shardId, float score, @Nullable IndexableField key) {}
|
@ -3,69 +3,67 @@ package it.cavallium.dbengine.database;
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.IBackuppable;
|
||||
import it.cavallium.dbengine.client.MemoryStats;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseInt;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseLong;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import org.rocksdb.RocksDBException;
|
||||
|
||||
public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseStructure, DatabaseProperties,
|
||||
IBackuppable, DatabaseOperations {
|
||||
IBackuppable, DatabaseOperations, Closeable {
|
||||
|
||||
Mono<? extends LLSingleton> getSingleton(byte[] singletonListColumnName, byte[] name, byte @Nullable[] defaultValue);
|
||||
LLSingleton getSingleton(byte[] singletonListColumnName, byte[] name, byte @Nullable [] defaultValue);
|
||||
|
||||
Mono<? extends LLDictionary> getDictionary(byte[] columnName, UpdateMode updateMode);
|
||||
LLDictionary getDictionary(byte[] columnName, UpdateMode updateMode);
|
||||
|
||||
@Deprecated
|
||||
default Mono<? extends LLDictionary> getDeprecatedSet(String name, UpdateMode updateMode) {
|
||||
default LLDictionary getDeprecatedSet(String name, UpdateMode updateMode) {
|
||||
return getDictionary(ColumnUtils.deprecatedSet(name).name().getBytes(StandardCharsets.US_ASCII), updateMode);
|
||||
}
|
||||
|
||||
default Mono<? extends LLDictionary> getDictionary(String name, UpdateMode updateMode) {
|
||||
default LLDictionary getDictionary(String name, UpdateMode updateMode) {
|
||||
return getDictionary(ColumnUtils.dictionary(name).name().getBytes(StandardCharsets.US_ASCII), updateMode);
|
||||
}
|
||||
|
||||
default Mono<? extends LLSingleton> getSingleton(String singletonListName, String name) {
|
||||
default LLSingleton getSingleton(String singletonListName, String name) {
|
||||
return getSingleton(ColumnUtils.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII),
|
||||
name.getBytes(StandardCharsets.US_ASCII),
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
default Mono<DatabaseInt> getInteger(String singletonListName, String name, int defaultValue) {
|
||||
return this
|
||||
.getSingleton(ColumnUtils.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII),
|
||||
name.getBytes(StandardCharsets.US_ASCII),
|
||||
Ints.toByteArray(defaultValue)
|
||||
)
|
||||
.map(DatabaseInt::new);
|
||||
default DatabaseInt getInteger(String singletonListName, String name, int defaultValue) {
|
||||
return new DatabaseInt(this.getSingleton(ColumnUtils
|
||||
.special(singletonListName)
|
||||
.name()
|
||||
.getBytes(StandardCharsets.US_ASCII),
|
||||
name.getBytes(StandardCharsets.US_ASCII),
|
||||
Ints.toByteArray(defaultValue)
|
||||
));
|
||||
}
|
||||
|
||||
default Mono<DatabaseLong> getLong(String singletonListName, String name, long defaultValue) {
|
||||
return this
|
||||
.getSingleton(ColumnUtils.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII),
|
||||
name.getBytes(StandardCharsets.US_ASCII),
|
||||
Longs.toByteArray(defaultValue)
|
||||
)
|
||||
.map(DatabaseLong::new);
|
||||
default DatabaseLong getLong(String singletonListName, String name, long defaultValue) {
|
||||
return new DatabaseLong(this.getSingleton(ColumnUtils
|
||||
.special(singletonListName)
|
||||
.name()
|
||||
.getBytes(StandardCharsets.US_ASCII),
|
||||
name.getBytes(StandardCharsets.US_ASCII),
|
||||
Longs.toByteArray(defaultValue)
|
||||
));
|
||||
}
|
||||
|
||||
Mono<Void> verifyChecksum();
|
||||
void verifyChecksum();
|
||||
|
||||
Mono<Void> compact();
|
||||
void compact();
|
||||
|
||||
Mono<Void> flush();
|
||||
|
||||
BufferAllocator getAllocator();
|
||||
void flush();
|
||||
|
||||
MeterRegistry getMeterRegistry();
|
||||
|
||||
Mono<Void> preClose();
|
||||
Mono<Void> close();
|
||||
void preClose();
|
||||
|
||||
void close();
|
||||
}
|
||||
|
@ -1,6 +1,12 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
|
||||
public interface LLKeyValueDatabaseStructure {
|
||||
|
||||
String getDatabaseName();
|
||||
|
||||
ForkJoinPool getDbReadPool();
|
||||
|
||||
ForkJoinPool getDbWritePool();
|
||||
}
|
||||
|
@ -1,101 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import com.google.common.collect.Multimap;
|
||||
import it.cavallium.dbengine.client.IBackuppable;
|
||||
import it.cavallium.dbengine.client.query.current.data.NoSort;
|
||||
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||
import it.cavallium.dbengine.client.query.current.data.QueryParams;
|
||||
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
|
||||
import it.cavallium.dbengine.lucene.collector.Buckets;
|
||||
import it.cavallium.dbengine.lucene.searcher.BucketParams;
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface LLLuceneIndex extends LLSnapshottable, IBackuppable, SafeCloseable {
|
||||
|
||||
String getLuceneIndexName();
|
||||
|
||||
Mono<Void> addDocument(LLTerm id, LLUpdateDocument doc);
|
||||
|
||||
Mono<Long> addDocuments(boolean atomic, Flux<Entry<LLTerm, LLUpdateDocument>> documents);
|
||||
|
||||
Mono<Void> deleteDocument(LLTerm id);
|
||||
|
||||
Mono<Void> update(LLTerm id, LLIndexRequest request);
|
||||
|
||||
Mono<Long> updateDocuments(Flux<Entry<LLTerm, LLUpdateDocument>> documents);
|
||||
|
||||
Mono<Void> deleteAll();
|
||||
|
||||
/**
|
||||
* @param queryParams the limit is valid for each lucene instance. If you have 15 instances, the number of elements
|
||||
* returned can be at most <code>limit * 15</code>.
|
||||
* <p>
|
||||
* The additional query will be used with the moreLikeThis query: "mltQuery AND additionalQuery"
|
||||
* @return the collection has one or more flux
|
||||
*/
|
||||
Flux<LLSearchResultShard> moreLikeThis(@Nullable LLSnapshot snapshot,
|
||||
QueryParams queryParams,
|
||||
@Nullable String keyFieldName,
|
||||
Multimap<String, String> mltDocumentFields);
|
||||
|
||||
/**
|
||||
* @param queryParams the limit is valid for each lucene instance. If you have 15 instances, the number of elements
|
||||
* returned can be at most <code>limit * 15</code>
|
||||
* @return the collection has one or more flux
|
||||
*/
|
||||
Flux<LLSearchResultShard> search(@Nullable LLSnapshot snapshot,
|
||||
QueryParams queryParams,
|
||||
@Nullable String keyFieldName);
|
||||
|
||||
/**
|
||||
* @return buckets with each value collected into one of the buckets
|
||||
*/
|
||||
Mono<Buckets> computeBuckets(@Nullable LLSnapshot snapshot,
|
||||
@NotNull List<Query> queries,
|
||||
@Nullable Query normalizationQuery,
|
||||
BucketParams bucketParams);
|
||||
|
||||
default Mono<TotalHitsCount> count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) {
|
||||
QueryParams params = QueryParams.of(query,
|
||||
0,
|
||||
0,
|
||||
NoSort.of(),
|
||||
false,
|
||||
timeout == null ? Long.MAX_VALUE : timeout.toMillis()
|
||||
);
|
||||
return Mono
|
||||
.usingWhen(this.search(snapshot, params, null).singleOrEmpty(),
|
||||
llSearchResultShard -> Mono.just(llSearchResultShard.totalHitsCount()),
|
||||
LLUtils::finalizeResource
|
||||
)
|
||||
.defaultIfEmpty(TotalHitsCount.of(0, true));
|
||||
}
|
||||
|
||||
boolean isLowMemoryMode();
|
||||
|
||||
/**
|
||||
* Flush writes to disk.
|
||||
* This does not commit, it syncs the data to the disk
|
||||
*/
|
||||
Mono<Void> flush();
|
||||
|
||||
Mono<Void> waitForMerges();
|
||||
|
||||
/**
|
||||
* Wait for the latest pending merge
|
||||
* This disables future merges until shutdown!
|
||||
*/
|
||||
Mono<Void> waitForLastMerges();
|
||||
|
||||
/**
|
||||
* Refresh index searcher
|
||||
*/
|
||||
Mono<Void> refresh(boolean force);
|
||||
}
|
@ -1,23 +1,14 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import static it.cavallium.dbengine.utils.StreamUtils.collect;
|
||||
import static it.cavallium.dbengine.utils.StreamUtils.executing;
|
||||
|
||||
import com.google.common.collect.Multimap;
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart;
|
||||
import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart.ConnectionPartLucene;
|
||||
import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart.ConnectionPartRocksDB;
|
||||
import it.cavallium.dbengine.client.IndicizerAnalyzers;
|
||||
import it.cavallium.dbengine.client.IndicizerSimilarities;
|
||||
import it.cavallium.dbengine.lucene.LuceneHacks;
|
||||
import it.cavallium.dbengine.lucene.LuceneRocksDBManager;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
|
||||
import it.cavallium.dbengine.rpc.current.data.LuceneIndexStructure;
|
||||
import it.cavallium.dbengine.rpc.current.data.LuceneOptions;
|
||||
import it.unimi.dsi.fastutil.ints.IntArrayList;
|
||||
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
|
||||
import it.unimi.dsi.fastutil.ints.IntSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
@ -25,36 +16,24 @@ import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
||||
public class LLMultiDatabaseConnection implements LLDatabaseConnection {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(LLMultiDatabaseConnection.class);
|
||||
private final Map<String, LLDatabaseConnection> databaseShardConnections = new HashMap<>();
|
||||
private final Map<String, LLDatabaseConnection> luceneShardConnections = new HashMap<>();
|
||||
private final Set<LLDatabaseConnection> allConnections = new HashSet<>();
|
||||
private final LLDatabaseConnection defaultDatabaseConnection;
|
||||
private final LLDatabaseConnection defaultLuceneConnection;
|
||||
private final LLDatabaseConnection anyConnection;
|
||||
|
||||
public LLMultiDatabaseConnection(Multimap<LLDatabaseConnection, ConnectionPart> subConnections) {
|
||||
LLDatabaseConnection defaultDatabaseConnection = null;
|
||||
LLDatabaseConnection defaultLuceneConnection = null;
|
||||
for (Entry<LLDatabaseConnection, ConnectionPart> entry : subConnections.entries()) {
|
||||
var subConnectionSettings = entry.getKey();
|
||||
var connectionPart = entry.getValue();
|
||||
if (connectionPart instanceof ConnectionPartLucene connectionPartLucene) {
|
||||
if (connectionPartLucene.name() == null) {
|
||||
defaultLuceneConnection = subConnectionSettings;
|
||||
} else {
|
||||
luceneShardConnections.put(connectionPartLucene.name(), subConnectionSettings);
|
||||
}
|
||||
} else if (connectionPart instanceof ConnectionPartRocksDB connectionPartRocksDB) {
|
||||
if (connectionPart instanceof ConnectionPartRocksDB connectionPartRocksDB) {
|
||||
if (connectionPartRocksDB.name() == null) {
|
||||
defaultDatabaseConnection = subConnectionSettings;
|
||||
} else {
|
||||
@ -65,48 +44,36 @@ public class LLMultiDatabaseConnection implements LLDatabaseConnection {
|
||||
}
|
||||
}
|
||||
this.defaultDatabaseConnection = defaultDatabaseConnection;
|
||||
this.defaultLuceneConnection = defaultLuceneConnection;
|
||||
if (defaultDatabaseConnection != null) {
|
||||
anyConnection = defaultDatabaseConnection;
|
||||
} else if (defaultLuceneConnection != null) {
|
||||
anyConnection = defaultLuceneConnection;
|
||||
} else {
|
||||
anyConnection = subConnections.keySet().stream().findAny().orElse(null);
|
||||
}
|
||||
if (defaultDatabaseConnection != null) {
|
||||
allConnections.add(defaultDatabaseConnection);
|
||||
}
|
||||
if (defaultLuceneConnection != null) {
|
||||
allConnections.add(defaultLuceneConnection);
|
||||
}
|
||||
allConnections.addAll(luceneShardConnections.values());
|
||||
allConnections.addAll(databaseShardConnections.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public BufferAllocator getAllocator() {
|
||||
return anyConnection.getAllocator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MeterRegistry getMeterRegistry() {
|
||||
return anyConnection.getMeterRegistry();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<? extends LLDatabaseConnection> connect() {
|
||||
return Flux
|
||||
.fromIterable(allConnections)
|
||||
.flatMap((LLDatabaseConnection databaseConnection) -> databaseConnection
|
||||
.connect()
|
||||
.doOnError(ex -> LOG.error("Failed to open connection", ex))
|
||||
)
|
||||
.then()
|
||||
.thenReturn(this);
|
||||
public LLDatabaseConnection connect() {
|
||||
collect(allConnections.stream(), executing(connection -> {
|
||||
try {
|
||||
connection.connect();
|
||||
} catch (Exception ex) {
|
||||
LOG.error("Failed to open connection", ex);
|
||||
}
|
||||
}));
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<? extends LLKeyValueDatabase> getDatabase(String name,
|
||||
public LLKeyValueDatabase getDatabase(String name,
|
||||
List<Column> columns,
|
||||
DatabaseOptions databaseOptions) {
|
||||
var conn = databaseShardConnections.getOrDefault(name, defaultDatabaseConnection);
|
||||
@ -115,86 +82,23 @@ public class LLMultiDatabaseConnection implements LLDatabaseConnection {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<? extends LLLuceneIndex> getLuceneIndex(String clusterName,
|
||||
LuceneIndexStructure indexStructure,
|
||||
it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers indicizerAnalyzers,
|
||||
it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities indicizerSimilarities,
|
||||
LuceneOptions luceneOptions,
|
||||
@Nullable LuceneHacks luceneHacks) {
|
||||
IntSet registeredShards = new IntOpenHashSet();
|
||||
Map<LLDatabaseConnection, IntSet> connectionToShardMap = new HashMap<>();
|
||||
for (int activeShard : indexStructure.activeShards()) {
|
||||
if (activeShard >= indexStructure.totalShards()) {
|
||||
throw new IllegalArgumentException(
|
||||
"ActiveShard " + activeShard + " is bigger than total shards count " + indexStructure.totalShards());
|
||||
public void disconnect() {
|
||||
collect(allConnections.stream(), executing(connection -> {
|
||||
try {
|
||||
connection.disconnect();
|
||||
} catch (Exception ex) {
|
||||
LOG.error("Failed to close connection", ex);
|
||||
}
|
||||
if (!registeredShards.add(activeShard)) {
|
||||
throw new IllegalArgumentException("ActiveShard " + activeShard + " has been specified twice");
|
||||
}
|
||||
var shardName = LuceneUtils.getStandardName(clusterName, activeShard);
|
||||
var connection = luceneShardConnections.getOrDefault(shardName, defaultLuceneConnection);
|
||||
Objects.requireNonNull(connection, "Null connection");
|
||||
connectionToShardMap.computeIfAbsent(connection, k -> new IntOpenHashSet()).add(activeShard);
|
||||
}
|
||||
if (connectionToShardMap.keySet().size() == 1) {
|
||||
return connectionToShardMap
|
||||
.keySet()
|
||||
.stream()
|
||||
.findFirst()
|
||||
.orElseThrow()
|
||||
.getLuceneIndex(clusterName,
|
||||
indexStructure,
|
||||
indicizerAnalyzers,
|
||||
indicizerSimilarities,
|
||||
luceneOptions,
|
||||
luceneHacks
|
||||
);
|
||||
} else {
|
||||
return Flux
|
||||
.fromIterable(connectionToShardMap.entrySet())
|
||||
.flatMap(entry -> {
|
||||
var connectionIndexStructure = indexStructure
|
||||
.setActiveShards(new IntArrayList(entry.getValue()));
|
||||
|
||||
Flux<LLLuceneIndex> connIndex = entry.getKey()
|
||||
.getLuceneIndex(clusterName,
|
||||
connectionIndexStructure,
|
||||
indicizerAnalyzers,
|
||||
indicizerSimilarities,
|
||||
luceneOptions,
|
||||
luceneHacks
|
||||
).cast(LLLuceneIndex.class).cache().repeat();
|
||||
return Flux
|
||||
.fromIterable(entry.getValue())
|
||||
.zipWith(connIndex);
|
||||
})
|
||||
.collectList()
|
||||
.map(indices -> {
|
||||
var luceneIndices = new LLLuceneIndex[indexStructure.totalShards()];
|
||||
for (var index : indices) {
|
||||
luceneIndices[index.getT1()] = index.getT2();
|
||||
}
|
||||
return new LLMultiLuceneIndex(clusterName,
|
||||
indexStructure,
|
||||
indicizerAnalyzers,
|
||||
indicizerSimilarities,
|
||||
luceneOptions,
|
||||
luceneHacks,
|
||||
luceneIndices
|
||||
);
|
||||
});
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> disconnect() {
|
||||
return Flux
|
||||
.fromIterable(allConnections)
|
||||
.flatMap(databaseConnection -> databaseConnection
|
||||
.disconnect()
|
||||
.doOnError(ex -> LOG.error("Failed to close connection", ex))
|
||||
.onErrorResume(ex -> Mono.empty())
|
||||
)
|
||||
.then();
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", LLMultiDatabaseConnection.class.getSimpleName() + "[", "]")
|
||||
.add("databaseShardConnections=" + databaseShardConnections)
|
||||
.add("allConnections=" + allConnections)
|
||||
.add("defaultDatabaseConnection=" + defaultDatabaseConnection)
|
||||
.add("anyConnection=" + anyConnection)
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
@ -1,267 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Multimap;
|
||||
import it.cavallium.dbengine.client.IBackuppable;
|
||||
import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers;
|
||||
import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities;
|
||||
import it.cavallium.dbengine.client.query.current.data.Query;
|
||||
import it.cavallium.dbengine.client.query.current.data.QueryParams;
|
||||
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
|
||||
import it.cavallium.dbengine.lucene.LuceneHacks;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import it.cavallium.dbengine.lucene.collector.Buckets;
|
||||
import it.cavallium.dbengine.lucene.searcher.BucketParams;
|
||||
import it.cavallium.dbengine.rpc.current.data.LuceneIndexStructure;
|
||||
import it.cavallium.dbengine.rpc.current.data.LuceneOptions;
|
||||
import it.unimi.dsi.fastutil.doubles.DoubleArrayList;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.logging.Level;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.reactivestreams.Publisher;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.SignalType;
|
||||
|
||||
public class LLMultiLuceneIndex implements LLLuceneIndex {
|
||||
|
||||
|
||||
private final ConcurrentHashMap<Long, List<LLSnapshot>> registeredSnapshots = new ConcurrentHashMap<>();
|
||||
private final AtomicLong nextSnapshotNumber = new AtomicLong(1);
|
||||
|
||||
private final String clusterName;
|
||||
private final LuceneIndexStructure indexStructure;
|
||||
private final IndicizerAnalyzers indicizerAnalyzers;
|
||||
private final IndicizerSimilarities indicizerSimilarities;
|
||||
private final LuceneOptions luceneOptions;
|
||||
private final LuceneHacks luceneHacks;
|
||||
private final LLLuceneIndex[] luceneIndicesById;
|
||||
private final List<LLLuceneIndex> luceneIndicesSet;
|
||||
private final int totalShards;
|
||||
private final Flux<LLLuceneIndex> luceneIndicesFlux;
|
||||
|
||||
public LLMultiLuceneIndex(String clusterName,
|
||||
LuceneIndexStructure indexStructure,
|
||||
IndicizerAnalyzers indicizerAnalyzers,
|
||||
IndicizerSimilarities indicizerSimilarities,
|
||||
LuceneOptions luceneOptions,
|
||||
LuceneHacks luceneHacks,
|
||||
LLLuceneIndex[] luceneIndices) {
|
||||
this.clusterName = clusterName;
|
||||
this.indexStructure = indexStructure;
|
||||
this.indicizerAnalyzers = indicizerAnalyzers;
|
||||
this.indicizerSimilarities = indicizerSimilarities;
|
||||
this.luceneOptions = luceneOptions;
|
||||
this.luceneHacks = luceneHacks;
|
||||
this.luceneIndicesById = luceneIndices;
|
||||
this.totalShards = indexStructure.totalShards();
|
||||
var luceneIndicesSet = new HashSet<LLLuceneIndex>();
|
||||
for (LLLuceneIndex luceneIndex : luceneIndices) {
|
||||
if (luceneIndex != null) {
|
||||
luceneIndicesSet.add(luceneIndex);
|
||||
}
|
||||
}
|
||||
this.luceneIndicesSet = new ArrayList<>(luceneIndicesSet);
|
||||
this.luceneIndicesFlux = Flux.fromIterable(luceneIndicesSet);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLuceneIndexName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
private LLLuceneIndex getLuceneIndex(LLTerm id) {
|
||||
return luceneIndicesById[LuceneUtils.getLuceneIndexId(id, totalShards)];
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> addDocument(LLTerm id, LLUpdateDocument doc) {
|
||||
return getLuceneIndex(id).addDocument(id, doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> addDocuments(boolean atomic, Flux<Entry<LLTerm, LLUpdateDocument>> documents) {
|
||||
return documents
|
||||
.groupBy(term -> LuceneUtils.getLuceneIndexId(term.getKey(), totalShards))
|
||||
.flatMap(group -> {
|
||||
var index = luceneIndicesById[group.key()];
|
||||
return index.addDocuments(atomic, group);
|
||||
})
|
||||
.reduce(0L, Long::sum);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> deleteDocument(LLTerm id) {
|
||||
return getLuceneIndex(id).deleteDocument(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> update(LLTerm id, LLIndexRequest request) {
|
||||
return getLuceneIndex(id).update(id, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> updateDocuments(Flux<Entry<LLTerm, LLUpdateDocument>> documents) {
|
||||
return documents
|
||||
.log("multi-update-documents", Level.FINEST, false, SignalType.ON_NEXT, SignalType.ON_COMPLETE)
|
||||
.groupBy(term -> getLuceneIndex(term.getKey()))
|
||||
.flatMap(groupFlux -> groupFlux.key().updateDocuments(groupFlux))
|
||||
.reduce(0L, Long::sum);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> deleteAll() {
|
||||
Iterable<Mono<Void>> it = () -> luceneIndicesSet.stream().map(llLuceneIndex -> llLuceneIndex.deleteAll()).iterator();
|
||||
return Mono.whenDelayError(it);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<LLSearchResultShard> moreLikeThis(@Nullable LLSnapshot snapshot,
|
||||
QueryParams queryParams,
|
||||
@Nullable String keyFieldName,
|
||||
Multimap<String, String> mltDocumentFields) {
|
||||
return luceneIndicesFlux.flatMap(luceneIndex -> luceneIndex.moreLikeThis(snapshot,
|
||||
queryParams,
|
||||
keyFieldName,
|
||||
mltDocumentFields
|
||||
)).doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard);
|
||||
}
|
||||
|
||||
private Mono<Buckets> mergeShards(List<Buckets> shards) {
|
||||
return Mono.fromCallable(() -> {
|
||||
List<DoubleArrayList> seriesValues = new ArrayList<>();
|
||||
DoubleArrayList totals = new DoubleArrayList(shards.get(0).totals());
|
||||
|
||||
for (Buckets shard : shards) {
|
||||
if (seriesValues.isEmpty()) {
|
||||
seriesValues.addAll(shard.seriesValues());
|
||||
} else {
|
||||
for (int serieIndex = 0; serieIndex < seriesValues.size(); serieIndex++) {
|
||||
DoubleArrayList mergedSerieValues = seriesValues.get(serieIndex);
|
||||
for (int dataIndex = 0; dataIndex < mergedSerieValues.size(); dataIndex++) {
|
||||
mergedSerieValues.set(dataIndex, mergedSerieValues.getDouble(dataIndex)
|
||||
+ shard.seriesValues().get(serieIndex).getDouble(dataIndex)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < totals.size(); i++) {
|
||||
totals.set(i, totals.getDouble(i) + shard.totals().getDouble(i));
|
||||
}
|
||||
}
|
||||
return new Buckets(seriesValues, totals);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<LLSearchResultShard> search(@Nullable LLSnapshot snapshot,
|
||||
QueryParams queryParams,
|
||||
@Nullable String keyFieldName) {
|
||||
return luceneIndicesFlux.flatMap(luceneIndex -> luceneIndex.search(snapshot,
|
||||
queryParams,
|
||||
keyFieldName
|
||||
)).doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Buckets> computeBuckets(@Nullable LLSnapshot snapshot,
|
||||
@NotNull List<Query> queries,
|
||||
@Nullable Query normalizationQuery,
|
||||
BucketParams bucketParams) {
|
||||
return luceneIndicesFlux.flatMap(luceneIndex -> luceneIndex.computeBuckets(snapshot,
|
||||
queries,
|
||||
normalizationQuery,
|
||||
bucketParams
|
||||
)).collectList().flatMap(this::mergeShards).doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLowMemoryMode() {
|
||||
return luceneOptions.lowMemory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
Iterable<Mono<Void>> it = () -> luceneIndicesSet.stream().map(e -> Mono.<Void>fromRunnable(e::close)).iterator();
|
||||
Mono.whenDelayError(it).transform(LLUtils::handleDiscard).block();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> flush() {
|
||||
Iterable<Mono<Void>> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::flush).iterator();
|
||||
return Mono.whenDelayError(it);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> waitForMerges() {
|
||||
Iterable<Mono<Void>> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::waitForMerges).iterator();
|
||||
return Mono.whenDelayError(it);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> waitForLastMerges() {
|
||||
Iterable<Mono<Void>> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::waitForLastMerges).iterator();
|
||||
return Mono.whenDelayError(it);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> refresh(boolean force) {
|
||||
Iterable<Mono<Void>> it = () -> luceneIndicesSet.stream().map(index -> index.refresh(force)).iterator();
|
||||
return Mono.whenDelayError(it);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<LLSnapshot> takeSnapshot() {
|
||||
return Mono
|
||||
// Generate next snapshot index
|
||||
.fromCallable(nextSnapshotNumber::getAndIncrement)
|
||||
.flatMap(snapshotIndex -> luceneIndicesFlux
|
||||
.flatMapSequential(llLuceneIndex -> llLuceneIndex.takeSnapshot())
|
||||
.collectList()
|
||||
.doOnNext(instancesSnapshotsArray -> registeredSnapshots.put(snapshotIndex, instancesSnapshotsArray))
|
||||
.thenReturn(new LLSnapshot(snapshotIndex))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> releaseSnapshot(LLSnapshot snapshot) {
|
||||
return Mono
|
||||
.fromCallable(() -> registeredSnapshots.remove(snapshot.getSequenceNumber()))
|
||||
.flatMapIterable(list -> list)
|
||||
.index()
|
||||
.flatMap(tuple -> {
|
||||
int index = (int) (long) tuple.getT1();
|
||||
LLSnapshot instanceSnapshot = tuple.getT2();
|
||||
return luceneIndicesSet.get(index).releaseSnapshot(instanceSnapshot);
|
||||
})
|
||||
.then();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> pauseForBackup() {
|
||||
return Mono.whenDelayError(Iterables.transform(this.luceneIndicesSet, IBackuppable::pauseForBackup));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> resumeAfterBackup() {
|
||||
return Mono.whenDelayError(Iterables.transform(this.luceneIndicesSet, IBackuppable::resumeAfterBackup));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isPaused() {
|
||||
for (LLLuceneIndex llLuceneIndex : this.luceneIndicesSet) {
|
||||
if (llLuceneIndex.isPaused()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
@ -1,195 +1,145 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import java.util.Objects;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
/**
|
||||
* Range of data, from min (inclusive), to max (exclusive)
|
||||
*/
|
||||
public class LLRange extends SimpleResource {
|
||||
public class LLRange {
|
||||
|
||||
private static final LLRange RANGE_ALL = new LLRange( null, null, (Buffer) null, false);
|
||||
private static final LLRange RANGE_ALL = new LLRange( null, null, (Buf) null);
|
||||
@Nullable
|
||||
private final Buffer min;
|
||||
private final Buf min;
|
||||
@Nullable
|
||||
private final Buffer max;
|
||||
private final Buf max;
|
||||
@Nullable
|
||||
private final Buffer single;
|
||||
private final Buf single;
|
||||
|
||||
private LLRange(Send<Buffer> min, Send<Buffer> max, Send<Buffer> single, boolean closeable) {
|
||||
super(closeable);
|
||||
private LLRange(@Nullable Buf min, @Nullable Buf max, @Nullable Buf single) {
|
||||
assert single == null || (min == null && max == null);
|
||||
this.min = min != null ? min.receive().makeReadOnly() : null;
|
||||
this.max = max != null ? max.receive().makeReadOnly() : null;
|
||||
this.single = single != null ? single.receive().makeReadOnly() : null;
|
||||
}
|
||||
|
||||
private LLRange(Buffer min, Buffer max, Buffer single, boolean closeable) {
|
||||
super(closeable);
|
||||
assert single == null || (min == null && max == null);
|
||||
this.min = min != null ? min.makeReadOnly() : null;
|
||||
this.max = max != null ? max.makeReadOnly() : null;
|
||||
this.single = single != null ? single.makeReadOnly() : null;
|
||||
assert min == null || max == null || min.compareTo(max) <= 0
|
||||
: "Minimum buffer is bigger than maximum buffer: " + min + " > " + max;
|
||||
this.min = min;
|
||||
this.max = max;
|
||||
this.single = single;
|
||||
}
|
||||
|
||||
public static LLRange all() {
|
||||
return RANGE_ALL;
|
||||
}
|
||||
|
||||
public static LLRange from(Send<Buffer> min) {
|
||||
return new LLRange(min, null, null, true);
|
||||
public static LLRange from(Buf min) {
|
||||
return new LLRange(min, null, null);
|
||||
}
|
||||
|
||||
public static LLRange to(Send<Buffer> max) {
|
||||
return new LLRange(null, max, null, true);
|
||||
public static LLRange to(Buf max) {
|
||||
return new LLRange(null, max, null);
|
||||
}
|
||||
|
||||
public static LLRange single(Send<Buffer> single) {
|
||||
return new LLRange(null, null, single, true);
|
||||
public static LLRange single(Buf single) {
|
||||
return new LLRange(null, null, single);
|
||||
}
|
||||
|
||||
public static LLRange singleUnsafe(Buffer single) {
|
||||
return new LLRange(null, null, single, true);
|
||||
public static LLRange of(Buf min, Buf max) {
|
||||
return new LLRange(min, max, null);
|
||||
}
|
||||
|
||||
public static LLRange of(Send<Buffer> min, Send<Buffer> max) {
|
||||
return new LLRange(min, max, null, true);
|
||||
public static boolean isInside(LLRange rangeSub, LLRange rangeParent) {
|
||||
if (rangeParent.isAll()) {
|
||||
return true;
|
||||
} else if (rangeParent.isSingle()) {
|
||||
return Objects.equals(rangeSub, rangeParent);
|
||||
} else {
|
||||
return ((!rangeParent.hasMin() || (rangeSub.hasMin() && rangeParent.getMin().compareTo(rangeSub.getMin()) <= 0)))
|
||||
&& ((!rangeParent.hasMax() || (rangeSub.hasMax() && rangeParent.getMax().compareTo(rangeSub.getMax()) >= 0)));
|
||||
}
|
||||
}
|
||||
|
||||
public static LLRange ofUnsafe(Buffer min, Buffer max) {
|
||||
return new LLRange(min, max, null, true);
|
||||
@Nullable
|
||||
public static LLRange intersect(LLRange rangeA, LLRange rangeB) {
|
||||
boolean aEndInclusive = rangeA.isSingle();
|
||||
boolean bEndInclusive = rangeB.isSingle();
|
||||
Buf min = rangeA.isAll()
|
||||
? rangeB.getMin()
|
||||
: (rangeB.isAll()
|
||||
? rangeA.getMin()
|
||||
: (rangeA.getMin().compareTo(rangeB.getMin()) <= 0 ? rangeB.getMin() : rangeA.getMin()));
|
||||
int aComparedToB;
|
||||
Buf max;
|
||||
boolean maxInclusive;
|
||||
if (rangeA.isAll()) {
|
||||
max = rangeB.getMax();
|
||||
maxInclusive = bEndInclusive;
|
||||
} else if (rangeB.isAll()) {
|
||||
max = rangeA.getMax();
|
||||
maxInclusive = aEndInclusive;
|
||||
} else if ((aComparedToB = rangeA.getMax().compareTo(rangeB.getMax())) >= 0) {
|
||||
max = rangeB.getMax();
|
||||
if (aComparedToB == 0) {
|
||||
maxInclusive = bEndInclusive && aEndInclusive;
|
||||
} else {
|
||||
maxInclusive = bEndInclusive;
|
||||
}
|
||||
} else {
|
||||
max = rangeA.getMax();
|
||||
maxInclusive = aEndInclusive;
|
||||
}
|
||||
if (min != null && max != null && min.compareTo(max) >= (maxInclusive ? 1 : 0)) {
|
||||
return null;
|
||||
} else {
|
||||
if (min != null && min.equals(max)) {
|
||||
return LLRange.single(min);
|
||||
} else {
|
||||
return LLRange.of(min, max);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isAll() {
|
||||
ensureOpen();
|
||||
return min == null && max == null && single == null;
|
||||
}
|
||||
|
||||
public boolean isSingle() {
|
||||
ensureOpen();
|
||||
return single != null;
|
||||
}
|
||||
|
||||
public boolean hasMin() {
|
||||
ensureOpen();
|
||||
return min != null || single != null;
|
||||
}
|
||||
|
||||
public Send<Buffer> getMin() {
|
||||
ensureOpen();
|
||||
if (min != null) {
|
||||
// todo: use a read-only copy
|
||||
return min.copy().send();
|
||||
} else if (single != null) {
|
||||
// todo: use a read-only copy
|
||||
return single.copy().send();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public Buffer getMinUnsafe() {
|
||||
ensureOpen();
|
||||
public Buf getMin() {
|
||||
// todo: use a read-only copy
|
||||
if (min != null) {
|
||||
return min;
|
||||
} else if (single != null) {
|
||||
} else {
|
||||
return single;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public Buffer getMinCopy() {
|
||||
ensureOpen();
|
||||
if (min != null) {
|
||||
return min.copy();
|
||||
} else if (single != null) {
|
||||
return single.copy();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasMax() {
|
||||
ensureOpen();
|
||||
return max != null || single != null;
|
||||
}
|
||||
|
||||
public Send<Buffer> getMax() {
|
||||
ensureOpen();
|
||||
if (max != null) {
|
||||
// todo: use a read-only copy
|
||||
return max.copy().send();
|
||||
} else if (single != null) {
|
||||
// todo: use a read-only copy
|
||||
return single.copy().send();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public Buffer getMaxUnsafe() {
|
||||
ensureOpen();
|
||||
public Buf getMax() {
|
||||
// todo: use a read-only copy
|
||||
if (max != null) {
|
||||
return max;
|
||||
} else if (single != null) {
|
||||
} else {
|
||||
return single;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public Buffer getMaxCopy() {
|
||||
ensureOpen();
|
||||
if (max != null) {
|
||||
return max.copy();
|
||||
} else if (single != null) {
|
||||
return single.copy();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public Send<Buffer> getSingle() {
|
||||
ensureOpen();
|
||||
public Buf getSingle() {
|
||||
assert isSingle();
|
||||
// todo: use a read-only copy
|
||||
return single != null ? single.copy().send() : null;
|
||||
}
|
||||
|
||||
public Buffer getSingleUnsafe() {
|
||||
ensureOpen();
|
||||
assert isSingle();
|
||||
return single;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void ensureOpen() {
|
||||
super.ensureOpen();
|
||||
assert min == null || min.isAccessible() : "Range min not owned";
|
||||
assert max == null || max.isAccessible() : "Range max not owned";
|
||||
assert single == null || single.isAccessible() : "Range single not owned";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
if (min != null && min.isAccessible()) {
|
||||
min.close();
|
||||
}
|
||||
if (max != null && max.isAccessible()) {
|
||||
max.close();
|
||||
}
|
||||
if (single != null && single.isAccessible()) {
|
||||
single.close();
|
||||
}
|
||||
public Buf getSingleUnsafe() {
|
||||
assert isSingle();
|
||||
return single;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -211,21 +161,24 @@ public class LLRange extends SimpleResource {
|
||||
return result;
|
||||
}
|
||||
|
||||
@SuppressWarnings("UnnecessaryUnicodeEscape")
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", LLRange.class.getSimpleName() + "[", "]")
|
||||
.add("min=" + LLUtils.toString(min))
|
||||
.add("max=" + LLUtils.toString(max))
|
||||
.toString();
|
||||
if (single != null) {
|
||||
return "[" + single + "]";
|
||||
} else if (min != null && max != null) {
|
||||
return "[" + LLUtils.toString(min) + "," + LLUtils.toString(max) + ")";
|
||||
} else if (min != null) {
|
||||
return "[" + min + ",\u221E)";
|
||||
} else if (max != null) {
|
||||
return "[\u2205," + max + ")";
|
||||
} else {
|
||||
return "[\u221E)";
|
||||
}
|
||||
}
|
||||
|
||||
public LLRange copy() {
|
||||
ensureOpen();
|
||||
// todo: use a read-only copy
|
||||
return new LLRange(min != null ? min.copy().send() : null,
|
||||
max != null ? max.copy().send() : null,
|
||||
single != null ? single.copy().send() : null,
|
||||
true
|
||||
);
|
||||
return new LLRange(min, max, single);
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import org.apache.lucene.search.Scorer;
|
||||
|
||||
public enum LLScoreMode {
|
||||
/**
|
||||
* Produced scorers will allow visiting all matches and get their score.
|
||||
@ -15,7 +13,7 @@ public enum LLScoreMode {
|
||||
COMPLETE_NO_SCORES,
|
||||
/**
|
||||
* Produced scorers will optionally allow skipping over non-competitive
|
||||
* hits using the {@link Scorer#setMinCompetitiveScore(float)} API.
|
||||
* hits using the Scorer#setMinCompetitiveScore(float) API.
|
||||
* This can reduce time if using setMinCompetitiveScore.
|
||||
*/
|
||||
TOP_SCORES,
|
||||
|
@ -1,13 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.function.BiFunction;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
public record LLSearchResult(Flux<LLSearchResultShard> results) {
|
||||
|
||||
@NotNull
|
||||
public static BiFunction<LLSearchResult, LLSearchResult, LLSearchResult> accumulator() {
|
||||
return (a, b) -> new LLSearchResult(Flux.merge(a.results, b.results));
|
||||
}
|
||||
}
|
@ -1,128 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.client.LuceneIndexImpl;
|
||||
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
|
||||
import it.cavallium.dbengine.lucene.LuceneCloseable;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
public class LLSearchResultShard extends SimpleResource implements DiscardingCloseable {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(LLSearchResultShard.class);
|
||||
|
||||
private final Flux<LLKeyScore> results;
|
||||
private final TotalHitsCount totalHitsCount;
|
||||
|
||||
public LLSearchResultShard(Flux<LLKeyScore> results, TotalHitsCount totalHitsCount) {
|
||||
this.results = results;
|
||||
this.totalHitsCount = totalHitsCount;
|
||||
}
|
||||
|
||||
public static LLSearchResultShard withResource(Flux<LLKeyScore> results,
|
||||
TotalHitsCount totalHitsCount,
|
||||
SafeCloseable closeableResource) {
|
||||
if (closeableResource instanceof LuceneCloseable luceneCloseable) {
|
||||
return new LuceneLLSearchResultShard(results, totalHitsCount, List.of(luceneCloseable));
|
||||
} else {
|
||||
return new ResourcesLLSearchResultShard(results, totalHitsCount, List.of(closeableResource));
|
||||
}
|
||||
}
|
||||
|
||||
public Flux<LLKeyScore> results() {
|
||||
ensureOpen();
|
||||
return results;
|
||||
}
|
||||
|
||||
public TotalHitsCount totalHitsCount() {
|
||||
ensureOpen();
|
||||
return totalHitsCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this)
|
||||
return true;
|
||||
if (obj == null || obj.getClass() != this.getClass())
|
||||
return false;
|
||||
var that = (LLSearchResultShard) obj;
|
||||
return Objects.equals(this.results, that.results) && Objects.equals(this.totalHitsCount, that.totalHitsCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(results, totalHitsCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "LLSearchResultShard[" + "results=" + results + ", " + "totalHitsCount=" + totalHitsCount + ']';
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose() {
|
||||
}
|
||||
|
||||
public static class ResourcesLLSearchResultShard extends LLSearchResultShard {
|
||||
|
||||
private final List<SafeCloseable> resources;
|
||||
|
||||
public ResourcesLLSearchResultShard(Flux<LLKeyScore> resultsFlux,
|
||||
TotalHitsCount count,
|
||||
List<SafeCloseable> resources) {
|
||||
super(resultsFlux, count);
|
||||
this.resources = resources;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose() {
|
||||
try {
|
||||
for (SafeCloseable resource : resources) {
|
||||
try {
|
||||
resource.close();
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close resource", ex);
|
||||
}
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close resources", ex);
|
||||
}
|
||||
super.onClose();
|
||||
}
|
||||
}
|
||||
|
||||
public static class LuceneLLSearchResultShard extends LLSearchResultShard implements LuceneCloseable {
|
||||
|
||||
private final List<LuceneCloseable> resources;
|
||||
|
||||
public LuceneLLSearchResultShard(Flux<LLKeyScore> resultsFlux,
|
||||
TotalHitsCount count,
|
||||
List<LuceneCloseable> resources) {
|
||||
super(resultsFlux, count);
|
||||
this.resources = resources;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose() {
|
||||
try {
|
||||
for (LuceneCloseable resource : resources) {
|
||||
try {
|
||||
resource.close();
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close resource", ex);
|
||||
}
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close resources", ex);
|
||||
}
|
||||
super.onClose();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,32 +1,22 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.database.disk.BinarySerializationFunction;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.unimi.dsi.fastutil.bytes.ByteList;
|
||||
import java.util.function.Function;
|
||||
import java.io.IOException;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface LLSingleton extends LLKeyValueDatabaseStructure {
|
||||
|
||||
Buf get(@Nullable LLSnapshot snapshot);
|
||||
|
||||
BufferAllocator getAllocator();
|
||||
void set(Buf value);
|
||||
|
||||
Mono<Buffer> get(@Nullable LLSnapshot snapshot);
|
||||
|
||||
Mono<Void> set(Mono<Buffer> value);
|
||||
|
||||
default Mono<Buffer> update(BinarySerializationFunction updater,
|
||||
UpdateReturnMode updateReturnMode) {
|
||||
return this
|
||||
.updateAndGetDelta(updater)
|
||||
.transform(prev -> LLUtils.resolveLLDelta(prev, updateReturnMode));
|
||||
default Buf update(SerializationFunction<@Nullable Buf, @Nullable Buf> updater, UpdateReturnMode updateReturnMode) {
|
||||
var prev = this.updateAndGetDelta(updater);
|
||||
return LLUtils.resolveLLDelta(prev, updateReturnMode);
|
||||
}
|
||||
|
||||
Mono<LLDelta> updateAndGetDelta(BinarySerializationFunction updater);
|
||||
LLDelta updateAndGetDelta(SerializationFunction<@Nullable Buf, @Nullable Buf> updater);
|
||||
|
||||
String getColumnName();
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import reactor.core.publisher.Mono;
|
||||
import java.io.IOException;
|
||||
|
||||
public interface LLSnapshottable {
|
||||
|
||||
Mono<LLSnapshot> takeSnapshot();
|
||||
LLSnapshot takeSnapshot();
|
||||
|
||||
Mono<Void> releaseSnapshot(LLSnapshot snapshot);
|
||||
void releaseSnapshot(LLSnapshot snapshot);
|
||||
}
|
||||
|
@ -1,5 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public record LLSoftUpdateDocument(List<LLItem> items, List<LLItem> softDeleteItems) implements LLIndexRequest {}
|
@ -1,59 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.Objects;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
public class LLTerm {
|
||||
|
||||
private final String key;
|
||||
private final BytesRef value;
|
||||
|
||||
public LLTerm(String key, String value) {
|
||||
this.key = key;
|
||||
this.value = new BytesRef(value);
|
||||
}
|
||||
|
||||
public LLTerm(String key, BytesRef value) {
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
public String getValueUTF8() {
|
||||
return value.utf8ToString();
|
||||
}
|
||||
|
||||
public BytesRef getValueBytesRef() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "LLTerm{" +
|
||||
"key='" + key + '\'' +
|
||||
", value='" + value + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
LLTerm llTerm = (LLTerm) o;
|
||||
return Objects.equals(key, llTerm.key) &&
|
||||
Objects.equals(value, llTerm.value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key, value);
|
||||
}
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class LLTopKeys {
|
||||
|
||||
private final long totalHitsCount;
|
||||
private final LLKeyScore[] hits;
|
||||
|
||||
public LLTopKeys(long totalHitsCount, LLKeyScore[] hits) {
|
||||
this.totalHitsCount = totalHitsCount;
|
||||
this.hits = hits;
|
||||
}
|
||||
|
||||
public long getTotalHitsCount() {
|
||||
return totalHitsCount;
|
||||
}
|
||||
|
||||
public LLKeyScore[] getHits() {
|
||||
return hits;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
LLTopKeys llTopKeys = (LLTopKeys) o;
|
||||
return totalHitsCount == llTopKeys.totalHitsCount &&
|
||||
Arrays.equals(hits, llTopKeys.hits);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Objects.hash(totalHitsCount);
|
||||
result = 31 * result + Arrays.hashCode(hits);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "LLTopKeys{" +
|
||||
"totalHitsCount=" + totalHitsCount +
|
||||
", hits=" + Arrays.toString(hits) +
|
||||
'}';
|
||||
}
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
/**
|
||||
* https://lucene.apache.org/core/8_0_0/core/org/apache/lucene/document/Field.html
|
||||
* <a href="https://lucene.apache.org/core/8_0_0/core/org/apache/lucene/document/Field.html">Field.html</a>
|
||||
*/
|
||||
public enum LLType {
|
||||
StringField,
|
||||
|
@ -1,5 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public record LLUpdateDocument(List<LLItem> items) implements LLIndexRequest {}
|
@ -1,5 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public record LLUpdateFields(List<LLItem> items) implements LLIndexRequest {}
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
@ -9,20 +9,20 @@ import java.util.function.Function;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public final class OptionalBuf implements DiscardingCloseable {
|
||||
public final class OptionalBuf {
|
||||
|
||||
private static final OptionalBuf EMPTY = new OptionalBuf(null);
|
||||
private final Buffer buffer;
|
||||
private final Buf buffer;
|
||||
|
||||
private OptionalBuf(@Nullable Buffer buffer) {
|
||||
private OptionalBuf(@Nullable Buf buffer) {
|
||||
this.buffer = buffer;
|
||||
}
|
||||
|
||||
public static OptionalBuf ofNullable(@Nullable Buffer buffer) {
|
||||
public static OptionalBuf ofNullable(@Nullable Buf buffer) {
|
||||
return new OptionalBuf(buffer);
|
||||
}
|
||||
|
||||
public static OptionalBuf of(@NotNull Buffer buffer) {
|
||||
public static OptionalBuf of(@NotNull Buf buffer) {
|
||||
Objects.requireNonNull(buffer);
|
||||
return new OptionalBuf(buffer);
|
||||
}
|
||||
@ -31,13 +31,6 @@ public final class OptionalBuf implements DiscardingCloseable {
|
||||
return EMPTY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (buffer != null && buffer.isAccessible()) {
|
||||
buffer.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (buffer != null) {
|
||||
@ -66,21 +59,21 @@ public final class OptionalBuf implements DiscardingCloseable {
|
||||
return buffer != null ? buffer.hashCode() : 0;
|
||||
}
|
||||
|
||||
public Buffer get() {
|
||||
public Buf get() {
|
||||
if (buffer == null) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public Buffer orElse(Buffer alternative) {
|
||||
public Buf orElse(Buf alternative) {
|
||||
if (buffer == null) {
|
||||
return alternative;
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public void ifPresent(Consumer<Buffer> consumer) {
|
||||
public void ifPresent(Consumer<Buf> consumer) {
|
||||
if (buffer != null) {
|
||||
consumer.accept(buffer);
|
||||
}
|
||||
@ -94,7 +87,7 @@ public final class OptionalBuf implements DiscardingCloseable {
|
||||
return buffer == null;
|
||||
}
|
||||
|
||||
public <U> Optional<U> map(Function<Buffer, U> mapper) {
|
||||
public <U> Optional<U> map(Function<Buf, U> mapper) {
|
||||
if (buffer != null) {
|
||||
return Optional.of(mapper.apply(buffer));
|
||||
} else {
|
||||
|
@ -1,57 +0,0 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty5.util.Send;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public abstract class RangeSupplier implements DiscardingCloseable, Supplier<LLRange> {
|
||||
|
||||
public static RangeSupplier of(Supplier<LLRange> supplier) {
|
||||
return new SimpleSupplier(supplier);
|
||||
}
|
||||
|
||||
public static RangeSupplier ofOwned(LLRange supplier) {
|
||||
return new CopySupplier(supplier);
|
||||
}
|
||||
|
||||
public static RangeSupplier ofShared(LLRange supplier) {
|
||||
return new SimpleSupplier(supplier::copy);
|
||||
}
|
||||
|
||||
private static final class SimpleSupplier extends RangeSupplier {
|
||||
|
||||
private final Supplier<LLRange> supplier;
|
||||
|
||||
public SimpleSupplier(Supplier<LLRange> supplier) {
|
||||
this.supplier = supplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public LLRange get() {
|
||||
return supplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private static final class CopySupplier extends RangeSupplier {
|
||||
|
||||
private final LLRange supplier;
|
||||
|
||||
public CopySupplier(LLRange supplier) {
|
||||
this.supplier = supplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public LLRange get() {
|
||||
return supplier.copy();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
supplier.close();
|
||||
}
|
||||
}
|
||||
}
|
@ -60,7 +60,8 @@ public enum RocksDBLongProperty implements RocksDBProperty {
|
||||
NUM_BLOB_FILES("num-blob-files"),
|
||||
TOTAL_BLOB_FILE_SIZE("total-blob-file-size"),
|
||||
LIVE_BLOB_FILE_SIZE("live-blob-file-size"),
|
||||
LIVE_BLOB_FILE_GARBAGE_SIZE("live-blob-file-garbage-size")
|
||||
LIVE_BLOB_FILE_GARBAGE_SIZE("live-blob-file-garbage-size"),
|
||||
FILE_READ_DB_OPEN_MICROS("file.read.db.open.micros")
|
||||
;
|
||||
|
||||
private final String name;
|
||||
|
@ -2,6 +2,10 @@ package it.cavallium.dbengine.database;
|
||||
|
||||
public interface RocksDBProperty {
|
||||
|
||||
/**
|
||||
* Get rocksdb property name
|
||||
* @return name, with the "rocksdb." prefix included
|
||||
*/
|
||||
String getName();
|
||||
|
||||
boolean isNumeric();
|
||||
|
@ -1,7 +1,6 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
public interface SafeCloseable extends io.netty5.util.SafeCloseable {
|
||||
public interface SafeCloseable extends AutoCloseable {
|
||||
|
||||
@Override
|
||||
void close();
|
||||
}
|
||||
|
@ -0,0 +1,5 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import it.cavallium.buffer.Buf;
|
||||
|
||||
public record SerializedKey<T>(T key, Buf serialized) {}
|
@ -4,7 +4,7 @@ import it.cavallium.dbengine.database.collections.DatabaseStage;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
|
||||
public final class SubStageEntry<T, U extends DatabaseStage<?>> implements DiscardingCloseable, Entry<T, U> {
|
||||
public final class SubStageEntry<T, U extends DatabaseStage<?>> implements Entry<T, U> {
|
||||
|
||||
private final T key;
|
||||
private final U value;
|
||||
@ -14,13 +14,6 @@ public final class SubStageEntry<T, U extends DatabaseStage<?>> implements Disca
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (value != null) {
|
||||
value.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public T getKey() {
|
||||
return key;
|
||||
|
@ -1,11 +1,11 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import java.util.function.Supplier;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
public class DatabaseEmpty {
|
||||
@ -13,16 +13,16 @@ public class DatabaseEmpty {
|
||||
@SuppressWarnings({"unused", "InstantiationOfUtilityClass"})
|
||||
public static final Nothing NOTHING = new Nothing();
|
||||
|
||||
public static Serializer<Nothing> nothingSerializer(BufferAllocator bufferAllocator) {
|
||||
public static Serializer<Nothing> nothingSerializer() {
|
||||
return new Serializer<>() {
|
||||
|
||||
@Override
|
||||
public @NotNull Nothing deserialize(@NotNull Buffer serialized) {
|
||||
public @NotNull Nothing deserialize(@NotNull BufDataInput in) throws SerializationException {
|
||||
return NOTHING;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serialize(@NotNull Nothing deserialized, Buffer output) {
|
||||
public void serialize(@NotNull Nothing deserialized, BufDataOutput out) throws SerializationException {
|
||||
|
||||
}
|
||||
|
||||
@ -36,8 +36,8 @@ public class DatabaseEmpty {
|
||||
private DatabaseEmpty() {
|
||||
}
|
||||
|
||||
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, BufSupplier key) {
|
||||
return new DatabaseMapSingle<>(dictionary, key, nothingSerializer(dictionary.getAllocator()));
|
||||
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, Buf key) {
|
||||
return new DatabaseMapSingle<>(dictionary, key, nothingSerializer());
|
||||
}
|
||||
|
||||
public static final class Nothing {
|
||||
|
@ -1,14 +1,13 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure;
|
||||
import it.cavallium.dbengine.database.LLSingleton;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class DatabaseInt implements LLKeyValueDatabaseStructure {
|
||||
|
||||
@ -17,32 +16,32 @@ public class DatabaseInt implements LLKeyValueDatabaseStructure {
|
||||
|
||||
public DatabaseInt(LLSingleton singleton) {
|
||||
this.singleton = singleton;
|
||||
this.serializer = SerializerFixedBinaryLength.intSerializer(singleton.getAllocator());
|
||||
this.serializer = SerializerFixedBinaryLength.intSerializer();
|
||||
}
|
||||
|
||||
public Mono<Integer> get(@Nullable LLSnapshot snapshot) {
|
||||
var resultMono = singleton.get(snapshot);
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(() -> serializer.deserialize(result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public Integer get(@Nullable LLSnapshot snapshot) {
|
||||
var result = singleton.get(snapshot);
|
||||
return serializer.deserialize(BufDataInput.create(result));
|
||||
}
|
||||
|
||||
public Mono<Void> set(int value) {
|
||||
return singleton.set(Mono.fromCallable(() -> {
|
||||
var buf = singleton.getAllocator().allocate(Integer.BYTES);
|
||||
try {
|
||||
serializer.serialize(value, buf);
|
||||
return buf;
|
||||
} catch (Throwable ex) {
|
||||
buf.close();
|
||||
throw ex;
|
||||
}
|
||||
}));
|
||||
public void set(int value) {
|
||||
var buf = BufDataOutput.createLimited(Integer.BYTES);
|
||||
serializer.serialize(value, buf);
|
||||
singleton.set(buf.asList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDatabaseName() {
|
||||
return singleton.getDatabaseName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return singleton.getDbReadPool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return singleton.getDbWritePool();
|
||||
}
|
||||
}
|
||||
|
@ -1,16 +1,15 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure;
|
||||
import it.cavallium.dbengine.database.LLSingleton;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class DatabaseLong implements LLKeyValueDatabaseStructure {
|
||||
|
||||
@ -20,86 +19,77 @@ public class DatabaseLong implements LLKeyValueDatabaseStructure {
|
||||
|
||||
public DatabaseLong(LLSingleton singleton) {
|
||||
this.singleton = singleton;
|
||||
this.serializer = SerializerFixedBinaryLength.longSerializer(singleton.getAllocator());
|
||||
this.bugSerializer = SerializerFixedBinaryLength.intSerializer(singleton.getAllocator());
|
||||
this.serializer = SerializerFixedBinaryLength.longSerializer();
|
||||
this.bugSerializer = SerializerFixedBinaryLength.intSerializer();
|
||||
}
|
||||
|
||||
public Mono<Long> get(@Nullable LLSnapshot snapshot) {
|
||||
var resultMono = singleton.get(snapshot);
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(() -> {
|
||||
if (result.readableBytes() == 4) {
|
||||
return (long) (int) bugSerializer.deserialize(result);
|
||||
} else {
|
||||
return serializer.deserialize(result);
|
||||
}
|
||||
}),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public Long get(@Nullable LLSnapshot snapshot) {
|
||||
var result = BufDataInput.create(singleton.get(snapshot));
|
||||
if (result.available() == 4) {
|
||||
return (long) (int) bugSerializer.deserialize(result);
|
||||
} else {
|
||||
return serializer.deserialize(result);
|
||||
}
|
||||
}
|
||||
|
||||
public Mono<Long> incrementAndGet() {
|
||||
public Long incrementAndGet() {
|
||||
return addAnd(1, UpdateReturnMode.GET_NEW_VALUE);
|
||||
}
|
||||
|
||||
public Mono<Long> getAndIncrement() {
|
||||
public Long getAndIncrement() {
|
||||
return addAnd(1, UpdateReturnMode.GET_OLD_VALUE);
|
||||
}
|
||||
|
||||
public Mono<Long> decrementAndGet() {
|
||||
public Long decrementAndGet() {
|
||||
return addAnd(-1, UpdateReturnMode.GET_NEW_VALUE);
|
||||
}
|
||||
|
||||
public Mono<Long> getAndDecrement() {
|
||||
public Long getAndDecrement() {
|
||||
return addAnd(-1, UpdateReturnMode.GET_OLD_VALUE);
|
||||
}
|
||||
|
||||
public Mono<Long> addAndGet(long count) {
|
||||
public Long addAndGet(long count) {
|
||||
return addAnd(count, UpdateReturnMode.GET_NEW_VALUE);
|
||||
}
|
||||
|
||||
public Mono<Long> getAndAdd(long count) {
|
||||
public Long getAndAdd(long count) {
|
||||
return addAnd(count, UpdateReturnMode.GET_OLD_VALUE);
|
||||
}
|
||||
|
||||
private Mono<Long> addAnd(long count, UpdateReturnMode updateReturnMode) {
|
||||
var resultMono = singleton.update(prev -> {
|
||||
try (prev) {
|
||||
if (prev != null) {
|
||||
var prevLong = prev.readLong();
|
||||
var alloc = singleton.getAllocator();
|
||||
var buf = alloc.allocate(Long.BYTES);
|
||||
buf.writeLong(prevLong + count);
|
||||
return buf;
|
||||
} else {
|
||||
var alloc = singleton.getAllocator();
|
||||
var buf = alloc.allocate(Long.BYTES);
|
||||
buf.writeLong(count);
|
||||
return buf;
|
||||
}
|
||||
private Long addAnd(long count, UpdateReturnMode updateReturnMode) {
|
||||
var result = singleton.update(prev -> {
|
||||
if (prev != null) {
|
||||
var prevLong = prev.getLong(0);
|
||||
var buf = Buf.createZeroes(Long.BYTES);
|
||||
buf.setLong(0, prevLong + count);
|
||||
return buf;
|
||||
} else {
|
||||
var buf = Buf.createZeroes(Long.BYTES);
|
||||
buf.setLong(0, count);
|
||||
return buf;
|
||||
}
|
||||
}, updateReturnMode);
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(result::readLong),
|
||||
LLUtils::finalizeResource
|
||||
).single();
|
||||
return result.getLong(0);
|
||||
}
|
||||
|
||||
public Mono<Void> set(long value) {
|
||||
return singleton.set(Mono.fromCallable(() -> {
|
||||
var buf = singleton.getAllocator().allocate(Long.BYTES);
|
||||
try {
|
||||
serializer.serialize(value, buf);
|
||||
} catch (Throwable ex) {
|
||||
buf.close();
|
||||
throw ex;
|
||||
}
|
||||
return buf;
|
||||
}));
|
||||
public void set(long value) {
|
||||
var buf = BufDataOutput.createLimited(Long.BYTES);
|
||||
serializer.serialize(value, buf);
|
||||
singleton.set(buf.asList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDatabaseName() {
|
||||
return singleton.getDatabaseName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return singleton.getDbReadPool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return singleton.getDbWritePool();
|
||||
}
|
||||
}
|
||||
|
@ -1,43 +1,54 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import static java.util.Objects.requireNonNullElseGet;
|
||||
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.util.Resource;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import com.google.common.collect.Lists;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLDictionaryResultType;
|
||||
import it.cavallium.dbengine.database.LLEntry;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.SerializedKey;
|
||||
import it.cavallium.dbengine.database.SubStageEntry;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.disk.BinarySerializationFunction;
|
||||
import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
|
||||
import it.cavallium.dbengine.database.disk.LLLocalDictionary;
|
||||
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationKeyState.RocksDBFileIterationStateKeyError;
|
||||
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationKeyState.RocksDBFileIterationStateKeyOk;
|
||||
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationState.RocksDBFileIterationStateBegin;
|
||||
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationState.RocksDBFileIterationStateEnd;
|
||||
import it.cavallium.dbengine.database.disk.RocksDBFile.RocksDBFileIterationState.RocksDBFileIterationStateKey;
|
||||
import it.cavallium.dbengine.database.disk.SSTRange.SSTRangeFull;
|
||||
import it.cavallium.dbengine.database.serialization.KVSerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.cavallium.dbengine.utils.InternalMonoUtils;
|
||||
import it.cavallium.dbengine.utils.StreamUtils;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMaps;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import org.rocksdb.RocksDBException;
|
||||
|
||||
/**
|
||||
* Optimized implementation of "DatabaseMapDictionary with SubStageGetterSingle"
|
||||
@ -50,11 +61,11 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
private final Serializer<U> valueSerializer;
|
||||
|
||||
protected DatabaseMapDictionary(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
@Nullable Buf prefixKey,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer) {
|
||||
// Do not retain or release or use the prefixKey here
|
||||
super(dictionary, prefixKeySupplier, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0);
|
||||
super(dictionary, prefixKey, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0);
|
||||
this.valueSerializer = valueSerializer;
|
||||
}
|
||||
|
||||
@ -65,65 +76,53 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
}
|
||||
|
||||
public static <T, U> DatabaseMapDictionary<T, U> tail(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
@Nullable Buf prefixKey,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer) {
|
||||
return new DatabaseMapDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer, valueSerializer);
|
||||
return new DatabaseMapDictionary<>(dictionary, prefixKey, keySuffixSerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public static <K, V> Flux<Entry<K, V>> getLeavesFrom(DatabaseMapDictionary<K, V> databaseMapDictionary,
|
||||
public static <K, V> Stream<Entry<K, V>> getLeavesFrom(DatabaseMapDictionary<K, V> databaseMapDictionary,
|
||||
CompositeSnapshot snapshot,
|
||||
Mono<K> keyMin,
|
||||
Mono<K> keyMax,
|
||||
boolean reverse, boolean smallRange) {
|
||||
Mono<Optional<K>> keyMinOptMono = keyMin.map(Optional::of).defaultIfEmpty(Optional.empty());
|
||||
Mono<Optional<K>> keyMaxOptMono = keyMax.map(Optional::of).defaultIfEmpty(Optional.empty());
|
||||
@Nullable K keyMin,
|
||||
@Nullable K keyMax,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
|
||||
return Mono.zip(keyMinOptMono, keyMaxOptMono).flatMapMany(entry -> {
|
||||
var keyMinOpt = entry.getT1();
|
||||
var keyMaxOpt = entry.getT2();
|
||||
if (keyMinOpt.isPresent() || keyMaxOpt.isPresent()) {
|
||||
return databaseMapDictionary.getAllValues(snapshot,
|
||||
keyMinOpt.orElse(null),
|
||||
keyMaxOpt.orElse(null),
|
||||
reverse,
|
||||
smallRange
|
||||
);
|
||||
} else {
|
||||
return databaseMapDictionary.getAllValues(snapshot, smallRange);
|
||||
}
|
||||
});
|
||||
if (keyMin != null || keyMax != null) {
|
||||
return databaseMapDictionary.getAllEntries(snapshot,
|
||||
keyMin,
|
||||
keyMax,
|
||||
reverse,
|
||||
smallRange,
|
||||
Map::entry
|
||||
);
|
||||
} else {
|
||||
return databaseMapDictionary.getAllEntries(snapshot, smallRange, Map::entry);
|
||||
}
|
||||
}
|
||||
|
||||
public static <K> Flux<K> getKeyLeavesFrom(DatabaseMapDictionary<K, ?> databaseMapDictionary,
|
||||
public static <K> Stream<K> getKeyLeavesFrom(DatabaseMapDictionary<K, ?> databaseMapDictionary,
|
||||
CompositeSnapshot snapshot,
|
||||
Mono<K> keyMin,
|
||||
Mono<K> keyMax,
|
||||
boolean reverse, boolean smallRange) {
|
||||
Mono<Optional<K>> keyMinOptMono = keyMin.map(Optional::of).defaultIfEmpty(Optional.empty());
|
||||
Mono<Optional<K>> keyMaxOptMono = keyMax.map(Optional::of).defaultIfEmpty(Optional.empty());
|
||||
@Nullable K keyMin,
|
||||
@Nullable K keyMax,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
|
||||
return Mono.zip(keyMinOptMono, keyMaxOptMono).flatMapMany(keys -> {
|
||||
var keyMinOpt = keys.getT1();
|
||||
var keyMaxOpt = keys.getT2();
|
||||
Flux<? extends Entry<K, ? extends DatabaseStageEntry<?>>> stagesFlux;
|
||||
if (keyMinOpt.isPresent() || keyMaxOpt.isPresent()) {
|
||||
stagesFlux = databaseMapDictionary
|
||||
.getAllStages(snapshot, keyMinOpt.orElse(null), keyMaxOpt.orElse(null), reverse, smallRange);
|
||||
} else {
|
||||
stagesFlux = databaseMapDictionary.getAllStages(snapshot, smallRange);
|
||||
}
|
||||
return stagesFlux.doOnNext(e -> e.getValue().close())
|
||||
.doOnDiscard(Entry.class, e -> {
|
||||
if (e.getValue() instanceof DatabaseStageEntry<?> resource) {
|
||||
LLUtils.onDiscard(resource);
|
||||
}
|
||||
})
|
||||
.map(Entry::getKey);
|
||||
});
|
||||
Stream<? extends Entry<K, ? extends DatabaseStageEntry<?>>> stagesFlux;
|
||||
if (keyMin != null || keyMax != null) {
|
||||
stagesFlux = databaseMapDictionary.getAllStages(snapshot, keyMin, keyMax, reverse, smallRange);
|
||||
} else {
|
||||
stagesFlux = databaseMapDictionary.getAllStages(snapshot, smallRange);
|
||||
}
|
||||
return stagesFlux.map(Entry::getKey);
|
||||
}
|
||||
|
||||
private @Nullable U deserializeValue(T keySuffix, Buffer value) {
|
||||
private U deserializeValue(Buf value) {
|
||||
return valueSerializer.deserialize(BufDataInput.create(value));
|
||||
}
|
||||
|
||||
private @Nullable U deserializeValue(T keySuffix, BufDataInput value) {
|
||||
try {
|
||||
return valueSerializer.deserialize(value);
|
||||
} catch (IndexOutOfBoundsException ex) {
|
||||
@ -131,19 +130,16 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) {
|
||||
var totalZeroBytesErrors = this.totalZeroBytesErrors.incrementAndGet();
|
||||
if (totalZeroBytesErrors < 512 || totalZeroBytesErrors % 10000 == 0) {
|
||||
try (var keyPrefix = keyPrefixSupplier.get()) {
|
||||
try (var keySuffixBytes = serializeKeySuffixToKey(keySuffix)) {
|
||||
LOG.error(
|
||||
"Unexpected zero-bytes value at "
|
||||
+ dictionary.getDatabaseName() + ":" + dictionary.getColumnName()
|
||||
+ ":" + LLUtils.toStringSafe(keyPrefix) + ":" + keySuffix
|
||||
+ "(" + LLUtils.toStringSafe(keySuffixBytes) + ") total=" + totalZeroBytesErrors);
|
||||
} catch (SerializationException e) {
|
||||
LOG.error(
|
||||
"Unexpected zero-bytes value at " + dictionary.getDatabaseName() + ":" + dictionary.getColumnName()
|
||||
+ ":" + LLUtils.toStringSafe(keyPrefix) + ":" + keySuffix + "(?) total="
|
||||
+ totalZeroBytesErrors);
|
||||
}
|
||||
var keySuffixBytes = serializeKeySuffixToKey(keySuffix);
|
||||
try {
|
||||
LOG.error(
|
||||
"Unexpected zero-bytes value at " + dictionary.getDatabaseName() + ":" + dictionary.getColumnName()
|
||||
+ ":" + LLUtils.toStringSafe(keyPrefix) + ":" + keySuffix + "(" + LLUtils.toStringSafe(
|
||||
keySuffixBytes) + ") total=" + totalZeroBytesErrors);
|
||||
} catch (SerializationException e) {
|
||||
LOG.error(
|
||||
"Unexpected zero-bytes value at " + dictionary.getDatabaseName() + ":" + dictionary.getColumnName()
|
||||
+ ":" + LLUtils.toStringSafe(keyPrefix) + ":" + keySuffix + "(?) total=" + totalZeroBytesErrors);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
@ -153,139 +149,120 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
}
|
||||
}
|
||||
|
||||
private Buffer serializeValue(U value) throws SerializationException {
|
||||
private Buf serializeValue(U value) throws SerializationException {
|
||||
var valSizeHint = valueSerializer.getSerializedSizeHint();
|
||||
if (valSizeHint == -1) valSizeHint = 128;
|
||||
var valBuf = dictionary.getAllocator().allocate(valSizeHint);
|
||||
var valBuf = BufDataOutput.create(valSizeHint);
|
||||
try {
|
||||
valueSerializer.serialize(value, valBuf);
|
||||
return valBuf;
|
||||
} catch (Throwable t) {
|
||||
valBuf.close();
|
||||
throw t;
|
||||
} catch (SerializationException ex) {
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
throw new SerializationException("Failed to serialize value", ex);
|
||||
}
|
||||
return valBuf.asList();
|
||||
}
|
||||
|
||||
private Buffer serializeKeySuffixToKey(T keySuffix) throws SerializationException {
|
||||
Buffer keyBuf;
|
||||
if (keyPrefixSupplier != null) {
|
||||
keyBuf = keyPrefixSupplier.get();
|
||||
} else {
|
||||
keyBuf = this.dictionary.getAllocator().allocate(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
}
|
||||
try {
|
||||
assert keyBuf.readableBytes() == keyPrefixLength;
|
||||
keyBuf.ensureWritable(keySuffixLength + keyExtLength);
|
||||
serializeSuffix(keySuffix, keyBuf);
|
||||
assert keyBuf.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
return keyBuf;
|
||||
} catch (Throwable t) {
|
||||
keyBuf.close();
|
||||
throw t;
|
||||
private Buf serializeKeySuffixToKey(T keySuffix) throws SerializationException {
|
||||
BufDataOutput keyBuf = BufDataOutput.createLimited(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
if (keyPrefix != null) {
|
||||
keyBuf.writeBytes(keyPrefix);
|
||||
}
|
||||
assert keyBuf.size() == keyPrefixLength;
|
||||
serializeSuffixTo(keySuffix, keyBuf);
|
||||
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
return keyBuf.asList();
|
||||
}
|
||||
|
||||
private Buffer toKey(Buffer suffixKey) {
|
||||
assert suffixKeyLengthConsistency(suffixKey.readableBytes());
|
||||
if (keyPrefixSupplier != null) {
|
||||
var result = LLUtils.compositeBuffer(dictionary.getAllocator(), keyPrefixSupplier.get().send(), suffixKey.send());
|
||||
try {
|
||||
assert result.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
return result;
|
||||
} catch (Throwable t) {
|
||||
result.close();
|
||||
throw t;
|
||||
}
|
||||
private Buf toKey(Buf suffixKey) {
|
||||
assert suffixKeyLengthConsistency(suffixKey.size());
|
||||
if (keyPrefix != null) {
|
||||
var result = keyPrefix.copy();
|
||||
result.addAll(suffixKey);
|
||||
assert result.size() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
return result;
|
||||
} else {
|
||||
assert suffixKey.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
assert suffixKey.size() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
return suffixKey;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Object2ObjectSortedMap<T, U>> get(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary
|
||||
.getRange(resolveSnapshot(snapshot), rangeMono, false, true)
|
||||
public Object2ObjectSortedMap<T, U> get(@Nullable CompositeSnapshot snapshot) {
|
||||
Stream<Entry<T, U>> stream = dictionary
|
||||
.getRange(resolveSnapshot(snapshot), range, false, true)
|
||||
.map(entry -> {
|
||||
Entry<T, U> deserializedEntry;
|
||||
try (entry) {
|
||||
T key;
|
||||
var serializedKey = entry.getKeyUnsafe();
|
||||
var serializedValue = entry.getValueUnsafe();
|
||||
splitPrefix(serializedKey).close();
|
||||
suffixKeyLengthConsistency(serializedKey.readableBytes());
|
||||
key = deserializeSuffix(serializedKey);
|
||||
U value = valueSerializer.deserialize(serializedValue);
|
||||
deserializedEntry = Map.entry(key, value);
|
||||
}
|
||||
T key;
|
||||
// serializedKey
|
||||
var buf1 = BufDataInput.create(entry.getKey());
|
||||
var serializedValue = BufDataInput.create(entry.getValue());
|
||||
// after this, it becomes serializedSuffixAndExt
|
||||
buf1.skipNBytes(keyPrefixLength);
|
||||
suffixAndExtKeyConsistency(buf1.available());
|
||||
|
||||
key = deserializeSuffix(buf1);
|
||||
U value = valueSerializer.deserialize(serializedValue);
|
||||
deserializedEntry = Map.entry(key, value);
|
||||
return deserializedEntry;
|
||||
})
|
||||
.collectMap(Entry::getKey, Entry::getValue, Object2ObjectLinkedOpenHashMap::new)
|
||||
.map(map -> (Object2ObjectSortedMap<T, U>) map)
|
||||
.filter(map -> !map.isEmpty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Object2ObjectSortedMap<T, U>> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
|
||||
return this
|
||||
.get(null)
|
||||
.concatWith(dictionary
|
||||
.setRange(rangeMono,
|
||||
Flux.fromIterable(Collections.unmodifiableMap(value).entrySet()).map(entry -> serializeEntry(entry)),
|
||||
true
|
||||
)
|
||||
.as(InternalMonoUtils::toAny))
|
||||
.singleOrEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Object2ObjectSortedMap<T, U>> clearAndGetPrevious() {
|
||||
return this
|
||||
.setAndGetPrevious(Object2ObjectSortedMaps.emptyMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return dictionary.sizeRange(resolveSnapshot(snapshot), rangeMono, fast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), rangeMono, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<DatabaseStageEntry<U>> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
return Mono.fromCallable(() ->
|
||||
new DatabaseMapSingle<>(dictionary, BufSupplier.ofOwned(serializeKeySuffixToKey(keySuffix)), valueSerializer));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> containsKey(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
return dictionary
|
||||
.isRangeEmpty(resolveSnapshot(snapshot),
|
||||
Mono.fromCallable(() -> LLRange.singleUnsafe(serializeKeySuffixToKey(keySuffix))),
|
||||
true
|
||||
)
|
||||
.map(empty -> !empty);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> getValue(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
return Mono.usingWhen(dictionary
|
||||
.get(resolveSnapshot(snapshot), Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix))),
|
||||
value -> Mono.fromCallable(() -> deserializeValue(keySuffix, value)),
|
||||
LLUtils::finalizeResource);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> putValue(T keySuffix, U value) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix)).single();
|
||||
var valueMono = Mono.fromCallable(() -> serializeValue(value)).single();
|
||||
return Mono.usingWhen(dictionary.put(keyMono, valueMono, LLDictionaryResultType.VOID),
|
||||
v -> Mono.empty(),
|
||||
LLUtils::finalizeResource
|
||||
});
|
||||
// serializedKey
|
||||
// after this, it becomes serializedSuffixAndExt
|
||||
var map = StreamUtils.collect(stream,
|
||||
Collectors.toMap(Entry::getKey, Entry::getValue, (a, b) -> a, Object2ObjectLinkedOpenHashMap::new)
|
||||
);
|
||||
return map == null || map.isEmpty() ? null : map;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object2ObjectSortedMap<T, U> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
|
||||
Object2ObjectSortedMap<T, U> prev = this.get(null);
|
||||
if (value == null || value.isEmpty()) {
|
||||
dictionary.clear();
|
||||
} else {
|
||||
dictionary.setRange(range, value.entrySet().stream().map(this::serializeEntry), true);
|
||||
}
|
||||
return prev != null && prev.isEmpty() ? null : prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object2ObjectSortedMap<T, U> clearAndGetPrevious() {
|
||||
return this.setAndGetPrevious(Object2ObjectSortedMaps.emptyMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull DatabaseStageEntry<U> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
return new DatabaseMapSingle<>(dictionary, serializeKeySuffixToKey(keySuffix), valueSerializer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
return !dictionary.isRangeEmpty(resolveSnapshot(snapshot),
|
||||
LLRange.single(serializeKeySuffixToKey(keySuffix)), true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public U getValue(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
var keySuffixBuf = serializeKeySuffixToKey(keySuffix);
|
||||
Buf value = dictionary.get(resolveSnapshot(snapshot), keySuffixBuf);
|
||||
return value != null ? deserializeValue(keySuffix, BufDataInput.create(value)) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putValue(T keySuffix, U value) {
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
var valueMono = serializeValue(value);
|
||||
dictionary.put(keyMono, valueMono, LLDictionaryResultType.VOID);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -294,32 +271,35 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> updateValue(T keySuffix, UpdateReturnMode updateReturnMode,
|
||||
public U updateValue(T keySuffix,
|
||||
UpdateReturnMode updateReturnMode,
|
||||
SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix));
|
||||
return Mono.usingWhen(dictionary.update(keyMono, getSerializedUpdater(updater), updateReturnMode),
|
||||
result -> Mono.fromCallable(() -> deserializeValue(keySuffix, result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
var serializedUpdater = getSerializedUpdater(updater);
|
||||
dictionary.update(keyMono, serializedUpdater, UpdateReturnMode.NOTHING);
|
||||
return serializedUpdater.getResult(updateReturnMode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Delta<U>> updateValueAndGetDelta(T keySuffix, SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix));
|
||||
return dictionary
|
||||
.updateAndGetDelta(keyMono, getSerializedUpdater(updater))
|
||||
.transform(mono -> LLUtils.mapLLDelta(mono, serialized -> valueSerializer.deserialize(serialized)));
|
||||
public Delta<U> updateValueAndGetDelta(T keySuffix, SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
var serializedUpdater = getSerializedUpdater(updater);
|
||||
dictionary.update(keyMono, serializedUpdater, UpdateReturnMode.NOTHING);
|
||||
return serializedUpdater.getDelta();
|
||||
}
|
||||
|
||||
public BinarySerializationFunction getSerializedUpdater(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return oldSerialized -> {
|
||||
public CachedSerializationFunction<U, Buf, Buf> getSerializedUpdater(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return new CachedSerializationFunction<>(updater, this::serializeValue, this::deserializeValue);
|
||||
}
|
||||
|
||||
public KVSerializationFunction<@NotNull T, @Nullable Buf, @Nullable Buf> getSerializedUpdater(
|
||||
KVSerializationFunction<@NotNull T, @Nullable U, @Nullable U> updater) {
|
||||
return (key, oldSerialized) -> {
|
||||
U result;
|
||||
if (oldSerialized == null) {
|
||||
result = updater.apply(null);
|
||||
result = updater.apply(key, null);
|
||||
} else {
|
||||
try (oldSerialized) {
|
||||
result = updater.apply(valueSerializer.deserialize(oldSerialized));
|
||||
}
|
||||
result = updater.apply(key, valueSerializer.deserialize(BufDataInput.create(oldSerialized)));
|
||||
}
|
||||
if (result == null) {
|
||||
return null;
|
||||
@ -329,101 +309,67 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
};
|
||||
}
|
||||
|
||||
public KVSerializationFunction<@NotNull T, @Nullable Buffer, @Nullable Buffer> getSerializedUpdater(
|
||||
KVSerializationFunction<@NotNull T, @Nullable U, @Nullable U> updater) {
|
||||
return (key, oldSerialized) -> {
|
||||
try (oldSerialized) {
|
||||
U result;
|
||||
if (oldSerialized == null) {
|
||||
result = updater.apply(key, null);
|
||||
} else {
|
||||
try (oldSerialized) {
|
||||
result = updater.apply(key, valueSerializer.deserialize(oldSerialized));
|
||||
}
|
||||
}
|
||||
if (result == null) {
|
||||
return null;
|
||||
} else {
|
||||
return serializeValue(result);
|
||||
}
|
||||
}
|
||||
};
|
||||
@Override
|
||||
public U putValueAndGetPrevious(T keySuffix, U value) {
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
var valueMono = serializeValue(value);
|
||||
var valueBuf = dictionary.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE);
|
||||
if (valueBuf == null) {
|
||||
return null;
|
||||
}
|
||||
return deserializeValue(keySuffix, BufDataInput.create(valueBuf));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> putValueAndGetPrevious(T keySuffix, U value) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix));
|
||||
var valueMono = Mono.fromCallable(() -> serializeValue(value));
|
||||
return Mono.usingWhen(dictionary.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE),
|
||||
valueBuf -> Mono.fromCallable(() -> deserializeValue(keySuffix, valueBuf)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public boolean putValueAndGetChanged(T keySuffix, U value) {
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
var valueMono = serializeValue(value);
|
||||
var oldValueBuf = dictionary.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE);
|
||||
var oldValue = oldValueBuf != null ? deserializeValue(keySuffix, BufDataInput.create(oldValueBuf)) : null;
|
||||
if (oldValue == null) {
|
||||
return value != null;
|
||||
} else {
|
||||
return !Objects.equals(oldValue, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> putValueAndGetChanged(T keySuffix, U value) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix));
|
||||
var valueMono = Mono.fromCallable(() -> serializeValue(value));
|
||||
return Mono
|
||||
.usingWhen(dictionary.put(keyMono, valueMono, LLDictionaryResultType.PREVIOUS_VALUE),
|
||||
valueBuf -> Mono.fromCallable(() -> deserializeValue(keySuffix, valueBuf)),
|
||||
LLUtils::finalizeResource
|
||||
)
|
||||
.map(oldValue -> !Objects.equals(oldValue, value))
|
||||
.defaultIfEmpty(value != null);
|
||||
public void remove(T keySuffix) {
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
dictionary.remove(keyMono, LLDictionaryResultType.VOID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> remove(T keySuffix) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix));
|
||||
return dictionary
|
||||
.remove(keyMono, LLDictionaryResultType.VOID)
|
||||
.doOnNext(LLUtils::finalizeResourceNow)
|
||||
.then();
|
||||
public U removeAndGetPrevious(T keySuffix) {
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
var valueBuf = dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE);
|
||||
return valueBuf != null ? deserializeValue(keySuffix, BufDataInput.create(valueBuf)) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> removeAndGetPrevious(T keySuffix) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix));
|
||||
return Mono.usingWhen(dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE),
|
||||
valueBuf -> Mono.fromCallable(() -> deserializeValue(keySuffix, valueBuf)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public boolean removeAndGetStatus(T keySuffix) {
|
||||
var keyMono = serializeKeySuffixToKey(keySuffix);
|
||||
return LLUtils.responseToBoolean(dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> removeAndGetStatus(T keySuffix) {
|
||||
var keyMono = Mono.fromCallable(() -> serializeKeySuffixToKey(keySuffix));
|
||||
return dictionary
|
||||
.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE)
|
||||
.map(response -> LLUtils.responseToBoolean(response));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Optional<U>> getMulti(@Nullable CompositeSnapshot snapshot, Flux<T> keys) {
|
||||
public Stream<Optional<U>> getMulti(@Nullable CompositeSnapshot snapshot, Stream<T> keys) {
|
||||
var mappedKeys = keys.map(keySuffix -> serializeKeySuffixToKey(keySuffix));
|
||||
return dictionary
|
||||
.getMulti(resolveSnapshot(snapshot), mappedKeys)
|
||||
.map(valueBufOpt -> {
|
||||
try (valueBufOpt) {
|
||||
if (valueBufOpt.isPresent()) {
|
||||
return Optional.of(valueSerializer.deserialize(valueBufOpt.get()));
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
if (valueBufOpt.isPresent()) {
|
||||
return Optional.of(valueSerializer.deserialize(BufDataInput.create(valueBufOpt.get())));
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private LLEntry serializeEntry(T keySuffix, U value) throws SerializationException {
|
||||
var key = serializeKeySuffixToKey(keySuffix);
|
||||
try {
|
||||
var serializedValue = serializeValue(value);
|
||||
return LLEntry.of(key, serializedValue);
|
||||
} catch (Throwable t) {
|
||||
key.close();
|
||||
throw t;
|
||||
}
|
||||
var serializedValue = serializeValue(value);
|
||||
return LLEntry.of(key, serializedValue);
|
||||
}
|
||||
|
||||
private LLEntry serializeEntry(Entry<T, U> entry) throws SerializationException {
|
||||
@ -431,59 +377,55 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> putMulti(Flux<Entry<T, U>> entries) {
|
||||
var serializedEntries = entries.map(entry -> serializeEntry(entry));
|
||||
return dictionary.putMulti(serializedEntries);
|
||||
public void putMulti(Stream<Entry<T, U>> entries) {
|
||||
try (var serializedEntries = entries.map(entry -> serializeEntry(entry))) {
|
||||
dictionary.putMulti(serializedEntries);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Boolean> updateMulti(Flux<T> keys,
|
||||
public Stream<Boolean> updateMulti(Stream<T> keys,
|
||||
KVSerializationFunction<T, @Nullable U, @Nullable U> updater) {
|
||||
var sharedKeys = keys.publish().refCount(2);
|
||||
var serializedKeys = sharedKeys.map(keySuffix -> serializeKeySuffixToKey(keySuffix));
|
||||
var serializedKeys = keys.map(keySuffix -> new SerializedKey<>(keySuffix, serializeKeySuffixToKey(keySuffix)));
|
||||
var serializedUpdater = getSerializedUpdater(updater);
|
||||
return dictionary.updateMulti(sharedKeys, serializedKeys, serializedUpdater);
|
||||
return dictionary.updateMulti(serializedKeys, serializedUpdater);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllStages(snapshot, rangeMono, false, smallRange);
|
||||
public Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllStages(snapshot, range, false, smallRange);
|
||||
}
|
||||
|
||||
private LLRange getPatchedRange(@NotNull LLRange range, @Nullable T keyMin, @Nullable T keyMax)
|
||||
throws SerializationException {
|
||||
Buffer keyMinBuf = serializeSuffixForRange(keyMin);
|
||||
Buf keyMinBuf = serializeSuffixForRange(keyMin);
|
||||
if (keyMinBuf == null) {
|
||||
keyMinBuf = range.getMinCopy();
|
||||
keyMinBuf = range.getMin();
|
||||
}
|
||||
Buffer keyMaxBuf = serializeSuffixForRange(keyMax);
|
||||
Buf keyMaxBuf = serializeSuffixForRange(keyMax);
|
||||
if (keyMaxBuf == null) {
|
||||
keyMaxBuf = range.getMaxCopy();
|
||||
keyMaxBuf = range.getMax();
|
||||
}
|
||||
return LLRange.ofUnsafe(keyMinBuf, keyMaxBuf);
|
||||
return LLRange.of(keyMinBuf, keyMaxBuf);
|
||||
}
|
||||
|
||||
private Buffer serializeSuffixForRange(@Nullable T key) throws SerializationException {
|
||||
private Buf serializeSuffixForRange(@Nullable T key) throws SerializationException {
|
||||
if (key == null) {
|
||||
return null;
|
||||
}
|
||||
var keyWithoutExtBuf =
|
||||
keyPrefixSupplier == null ? alloc.allocate(keySuffixLength + keyExtLength) : keyPrefixSupplier.get();
|
||||
try {
|
||||
keyWithoutExtBuf.ensureWritable(keySuffixLength + keyExtLength);
|
||||
serializeSuffix(key, keyWithoutExtBuf);
|
||||
return keyWithoutExtBuf;
|
||||
} catch (Throwable ex) {
|
||||
keyWithoutExtBuf.close();
|
||||
throw ex;
|
||||
var keyWithoutExtBuf = BufDataOutput.createLimited(keyPrefixLength + keySuffixLength);
|
||||
if (keyPrefix != null) {
|
||||
keyWithoutExtBuf.writeBytes(keyPrefix);
|
||||
}
|
||||
serializeSuffixTo(key, keyWithoutExtBuf);
|
||||
return keyWithoutExtBuf.asList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all stages
|
||||
* @param reverse if true, the results will go backwards from the specified key (inclusive)
|
||||
*/
|
||||
public Flux<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
|
||||
public Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
|
||||
@Nullable T keyMin,
|
||||
@Nullable T keyMax,
|
||||
boolean reverse,
|
||||
@ -491,108 +433,187 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
if (keyMin == null && keyMax == null) {
|
||||
return getAllStages(snapshot, smallRange);
|
||||
} else {
|
||||
Mono<LLRange> boundedRangeMono = rangeMono.map(range -> {
|
||||
try (range) {
|
||||
return getPatchedRange(range, keyMin, keyMax);
|
||||
}
|
||||
});
|
||||
return getAllStages(snapshot, boundedRangeMono, reverse, smallRange);
|
||||
LLRange boundedRange = getPatchedRange(range, keyMin, keyMax);
|
||||
return getAllStages(snapshot, boundedRange, reverse, smallRange);
|
||||
}
|
||||
}
|
||||
|
||||
private Flux<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
|
||||
Mono<LLRange> sliceRangeMono, boolean reverse, boolean smallRange) {
|
||||
private Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
|
||||
LLRange sliceRange, boolean reverse, boolean smallRange) {
|
||||
return dictionary
|
||||
.getRangeKeys(resolveSnapshot(snapshot), sliceRangeMono, reverse, smallRange)
|
||||
.getRangeKeys(resolveSnapshot(snapshot), sliceRange, reverse, smallRange)
|
||||
.map(keyBuf -> {
|
||||
try (keyBuf) {
|
||||
assert keyBuf.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
// Remove prefix. Keep only the suffix and the ext
|
||||
splitPrefix(keyBuf).close();
|
||||
suffixKeyLengthConsistency(keyBuf.readableBytes());
|
||||
var bufSupplier = BufSupplier.ofOwned(toKey(keyBuf.copy()));
|
||||
try {
|
||||
T keySuffix = deserializeSuffix(keyBuf);
|
||||
var subStage = new DatabaseMapSingle<>(dictionary, bufSupplier, valueSerializer);
|
||||
return new SubStageEntry<>(keySuffix, subStage);
|
||||
} catch (Throwable ex) {
|
||||
bufSupplier.close();
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
// Remove prefix. Keep only the suffix and the ext
|
||||
var suffixAndExtIn = BufDataInput.create(keyBuf);
|
||||
suffixAndExtIn.skipBytes(keyPrefixLength);
|
||||
|
||||
suffixKeyLengthConsistency(suffixAndExtIn.available());
|
||||
T keySuffix = deserializeSuffix(suffixAndExtIn);
|
||||
var subStage = new DatabaseMapSingle<>(dictionary, keyBuf, valueSerializer);
|
||||
return new SubStageEntry<>(keySuffix, subStage);
|
||||
});
|
||||
}
|
||||
|
||||
private Stream<T> getAllKeys(@Nullable CompositeSnapshot snapshot,
|
||||
LLRange sliceRange, boolean reverse, boolean smallRange) {
|
||||
return dictionary
|
||||
.getRangeKeys(resolveSnapshot(snapshot), sliceRange, reverse, smallRange)
|
||||
.map(keyBuf -> {
|
||||
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
// Remove prefix. Keep only the suffix and the ext
|
||||
var suffixAndExtIn = BufDataInput.create(keyBuf);
|
||||
suffixAndExtIn.skipBytes(keyPrefixLength);
|
||||
|
||||
suffixKeyLengthConsistency(suffixAndExtIn.available());
|
||||
return deserializeSuffix(suffixAndExtIn);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllValues(snapshot, rangeMono, false, smallRange);
|
||||
public Stream<Entry<T, U>> getAllEntries(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllEntries(snapshot, smallRange, Map::entry);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<U> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllEntries(snapshot, range, false, smallRange, (k, v) -> v);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<T> getAllKeys(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllKeys(snapshot, range, false, smallRange);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all values
|
||||
* @param reverse if true, the results will go backwards from the specified key (inclusive)
|
||||
*/
|
||||
public Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot,
|
||||
public Stream<Entry<T, U>> getAllEntries(@Nullable CompositeSnapshot snapshot,
|
||||
@Nullable T keyMin,
|
||||
@Nullable T keyMax,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
return getAllEntries(snapshot, keyMin, keyMax, reverse, smallRange, Map::entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all values
|
||||
* @param reverse if true, the results will go backwards from the specified key (inclusive)
|
||||
*/
|
||||
public <X> Stream<X> getAllEntries(@Nullable CompositeSnapshot snapshot,
|
||||
@Nullable T keyMin,
|
||||
@Nullable T keyMax,
|
||||
boolean reverse,
|
||||
boolean smallRange,
|
||||
BiFunction<T, U, X> mapper) {
|
||||
if (keyMin == null && keyMax == null) {
|
||||
return getAllValues(snapshot, smallRange);
|
||||
return getAllEntries(snapshot, smallRange, mapper);
|
||||
} else {
|
||||
Mono<LLRange> boundedRangeMono = Mono.usingWhen(rangeMono,
|
||||
range -> Mono.fromCallable(() -> getPatchedRange(range, keyMin, keyMax)),
|
||||
LLUtils::finalizeResource);
|
||||
return getAllValues(snapshot, boundedRangeMono, reverse, smallRange);
|
||||
LLRange boundedRange = getPatchedRange(range, keyMin, keyMax);
|
||||
return getAllEntries(snapshot, boundedRange, reverse, smallRange, mapper);
|
||||
}
|
||||
}
|
||||
|
||||
private Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot,
|
||||
Mono<LLRange> sliceRangeMono,
|
||||
boolean reverse, boolean smallRange) {
|
||||
private <X> Stream<X> getAllEntries(@Nullable CompositeSnapshot snapshot, boolean smallRange, BiFunction<T, U, X> mapper) {
|
||||
return getAllEntries(snapshot, range, false, smallRange, mapper);
|
||||
}
|
||||
|
||||
private <X> Stream<X> getAllEntries(@Nullable CompositeSnapshot snapshot,
|
||||
LLRange sliceRangeMono,
|
||||
boolean reverse,
|
||||
boolean smallRange,
|
||||
BiFunction<T, U, X> mapper) {
|
||||
return dictionary
|
||||
.getRange(resolveSnapshot(snapshot), sliceRangeMono, reverse, smallRange)
|
||||
.map((serializedEntry) -> {
|
||||
Entry<T, U> entry;
|
||||
try (serializedEntry) {
|
||||
var keyBuf = serializedEntry.getKeyUnsafe();
|
||||
assert keyBuf != null;
|
||||
assert keyBuf.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
// Remove prefix. Keep only the suffix and the ext
|
||||
splitPrefix(keyBuf).close();
|
||||
assert suffixKeyLengthConsistency(keyBuf.readableBytes());
|
||||
T keySuffix = deserializeSuffix(keyBuf);
|
||||
X entry;
|
||||
var keyBuf = serializedEntry.getKey();
|
||||
assert keyBuf != null;
|
||||
assert keyBuf.size() == keyPrefixLength + keySuffixLength + keyExtLength;
|
||||
|
||||
assert serializedEntry.getValueUnsafe() != null;
|
||||
U value = valueSerializer.deserialize(serializedEntry.getValueUnsafe());
|
||||
entry = Map.entry(keySuffix, value);
|
||||
}
|
||||
// Remove prefix. Keep only the suffix and the ext
|
||||
var suffixAndExtIn = BufDataInput.create(keyBuf);
|
||||
suffixAndExtIn.skipBytes(keyPrefixLength);
|
||||
|
||||
assert suffixKeyLengthConsistency(suffixAndExtIn.available());
|
||||
T keySuffix = deserializeSuffix(suffixAndExtIn);
|
||||
|
||||
assert serializedEntry.getValue() != null;
|
||||
U value = valueSerializer.deserialize(BufDataInput.create(serializedEntry.getValue()));
|
||||
entry = mapper.apply(keySuffix, value);
|
||||
return entry;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
||||
return Flux.usingWhen(Mono.just(true),
|
||||
b -> this.getAllValues(null, false),
|
||||
b -> dictionary.setRange(rangeMono, entries.map(entry -> serializeEntry(entry)), false)
|
||||
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
|
||||
return resourceStream(
|
||||
() -> getAllEntries(null, false),
|
||||
() -> dictionary.setRange(range, entries.map(entry -> serializeEntry(entry)), false)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clear() {
|
||||
return Mono.using(() -> rangeSupplier.get(), range -> {
|
||||
if (range.isAll()) {
|
||||
return dictionary.clear();
|
||||
} else if (range.isSingle()) {
|
||||
return dictionary
|
||||
.remove(Mono.fromCallable(() -> range.getSingleUnsafe()), LLDictionaryResultType.VOID)
|
||||
.doOnNext(LLUtils::finalizeResourceNow)
|
||||
.then();
|
||||
} else {
|
||||
return dictionary.setRange(rangeMono, Flux.empty(), false);
|
||||
}
|
||||
}, LLUtils::finalizeResourceNow);
|
||||
public void clear() {
|
||||
if (range.isAll()) {
|
||||
dictionary.clear();
|
||||
} else if (range.isSingle()) {
|
||||
dictionary.remove(range.getSingleUnsafe(), LLDictionaryResultType.VOID);
|
||||
} else {
|
||||
dictionary.setRange(range, Stream.empty(), false);
|
||||
}
|
||||
}
|
||||
|
||||
public static <T, U> List<Stream<UnsafeSSTEntry<T, U>>> getAllEntriesFastUnsafe(DatabaseMapDictionary<T, U> dict,
|
||||
boolean disableRocksdbChecks,
|
||||
BiConsumer<UnsafeRawSSTEntry<T, U>, Throwable> deserializationErrorHandler) {
|
||||
try {
|
||||
var liveFiles = ((LLLocalDictionary) dict.dictionary).getAllLiveFiles();
|
||||
return Lists.transform(liveFiles, file -> file.iterate(new SSTRangeFull(), disableRocksdbChecks)
|
||||
.map(state -> switch (state) {
|
||||
case RocksDBFileIterationStateBegin rocksDBFileIterationStateBegin:
|
||||
yield null;
|
||||
case RocksDBFileIterationStateEnd rocksDBFileIterationStateEnd:
|
||||
yield null;
|
||||
case RocksDBFileIterationStateKey rocksDBFileIterationStateKey:
|
||||
yield switch (rocksDBFileIterationStateKey.state()) {
|
||||
case RocksDBFileIterationStateKeyError e -> null;
|
||||
case RocksDBFileIterationStateKeyOk rocksDBFileIterationStateKeyOk -> {
|
||||
var key = rocksDBFileIterationStateKey.key();
|
||||
var value = rocksDBFileIterationStateKeyOk.value();
|
||||
try {
|
||||
var deserializedKey = dict.deserializeSuffix(BufDataInput.create(key));
|
||||
var deserializedValue = dict.deserializeValue(value);
|
||||
yield new UnsafeSSTEntry<>(file,
|
||||
deserializedKey,
|
||||
deserializedValue,
|
||||
key,
|
||||
value,
|
||||
k -> dict.deserializeSuffix(BufDataInput.create(k)),
|
||||
dict::deserializeValue
|
||||
);
|
||||
} catch (Throwable t) {
|
||||
if (deserializationErrorHandler != null) {
|
||||
deserializationErrorHandler.accept(new UnsafeRawSSTEntry<>(file,
|
||||
key,
|
||||
value,
|
||||
k -> dict.deserializeSuffix(BufDataInput.create(k)),
|
||||
dict::deserializeValue
|
||||
), t);
|
||||
yield null;
|
||||
} else {
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
})
|
||||
.filter(Objects::nonNull));
|
||||
} catch (RocksDBException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,137 +1,118 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import io.netty5.buffer.DefaultBufferAllocators;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.util.Resource;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
|
||||
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLDictionaryResultType;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.RangeSupplier;
|
||||
import it.cavallium.dbengine.database.SubStageEntry;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.cavallium.dbengine.utils.InternalMonoUtils;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.commons.lang3.function.TriFunction;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import org.jetbrains.annotations.VisibleForTesting;
|
||||
|
||||
// todo: implement optimized methods (which?)
|
||||
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extends SimpleResource implements
|
||||
DatabaseStageMap<T, U, US> {
|
||||
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implements DatabaseStageMap<T, U, US> {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(DatabaseMapDictionaryDeep.class);
|
||||
|
||||
protected final LLDictionary dictionary;
|
||||
protected final BufferAllocator alloc;
|
||||
private final AtomicLong totalZeroBytesErrors = new AtomicLong();
|
||||
protected final SubStageGetter<U, US> subStageGetter;
|
||||
protected final SerializerFixedBinaryLength<T> keySuffixSerializer;
|
||||
protected final int keyPrefixLength;
|
||||
protected final int keySuffixLength;
|
||||
protected final int keyExtLength;
|
||||
protected final Mono<LLRange> rangeMono;
|
||||
protected final LLRange range;
|
||||
|
||||
protected RangeSupplier rangeSupplier;
|
||||
protected BufSupplier keyPrefixSupplier;
|
||||
protected Buf keyPrefix;
|
||||
|
||||
private static void incrementPrefix(Buffer prefix, int prefixLength) {
|
||||
assert prefix.readableBytes() >= prefixLength;
|
||||
assert prefix.readerOffset() == 0;
|
||||
final var originalKeyLength = prefix.readableBytes();
|
||||
private static void incrementPrefix(Buf modifiablePrefix, int prefixLength) {
|
||||
assert modifiablePrefix.size() >= prefixLength;
|
||||
final var originalKeyLength = modifiablePrefix.size();
|
||||
boolean overflowed = true;
|
||||
final int ff = 0xFF;
|
||||
int writtenBytes = 0;
|
||||
for (int i = prefixLength - 1; i >= 0; i--) {
|
||||
int iByte = prefix.getUnsignedByte(i);
|
||||
int iByte = Byte.toUnsignedInt(modifiablePrefix.getByte(i));
|
||||
if (iByte != ff) {
|
||||
prefix.setUnsignedByte(i, iByte + 1);
|
||||
modifiablePrefix.set(i, (byte) (iByte + 1));
|
||||
writtenBytes++;
|
||||
overflowed = false;
|
||||
break;
|
||||
} else {
|
||||
prefix.setUnsignedByte(i, 0x00);
|
||||
modifiablePrefix.set(i, (byte) 0x00);
|
||||
writtenBytes++;
|
||||
}
|
||||
}
|
||||
assert prefixLength - writtenBytes >= 0;
|
||||
|
||||
if (overflowed) {
|
||||
assert prefix.writerOffset() == originalKeyLength;
|
||||
prefix.ensureWritable(1, 1, true);
|
||||
prefix.writerOffset(originalKeyLength + 1);
|
||||
modifiablePrefix.add((byte) 0);
|
||||
for (int i = 0; i < originalKeyLength; i++) {
|
||||
prefix.setUnsignedByte(i, 0xFF);
|
||||
modifiablePrefix.set(i, (byte) 0xFF);
|
||||
}
|
||||
prefix.setUnsignedByte(originalKeyLength, (byte) 0x00);
|
||||
modifiablePrefix.set(originalKeyLength, (byte) 0x00);
|
||||
}
|
||||
}
|
||||
|
||||
static void firstRangeKey(Buffer prefixKey, int prefixLength, Buffer suffixAndExtZeroes) {
|
||||
zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixAndExtZeroes);
|
||||
@VisibleForTesting
|
||||
public static Buf firstRangeKey(Buf prefixKey, int prefixLength, Buf suffixAndExtZeroes) {
|
||||
return createFullKeyWithEmptySuffixAndExt(prefixKey, prefixLength, suffixAndExtZeroes);
|
||||
}
|
||||
|
||||
static void nextRangeKey(Buffer prefixKey, int prefixLength, Buffer suffixAndExtZeroes) {
|
||||
zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixAndExtZeroes);
|
||||
incrementPrefix(prefixKey, prefixLength);
|
||||
@VisibleForTesting
|
||||
public static Buf nextRangeKey(Buf prefixKey, int prefixLength, Buf suffixAndExtZeroes) {
|
||||
Buf modifiablePrefixKey = createFullKeyWithEmptySuffixAndExt(prefixKey, prefixLength, suffixAndExtZeroes);
|
||||
incrementPrefix(modifiablePrefixKey, prefixLength);
|
||||
return modifiablePrefixKey;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
static void firstRangeKey(Buffer prefixKey, int prefixLength, int suffixLength, int extLength) {
|
||||
try (var zeroBuf = DefaultBufferAllocators.offHeapAllocator().allocate(suffixLength + extLength)) {
|
||||
zeroBuf.fill((byte) 0);
|
||||
zeroBuf.writerOffset(suffixLength + extLength);
|
||||
zeroFillKeySuffixAndExt(prefixKey, prefixLength, zeroBuf);
|
||||
private static Buf createFullKeyWithEmptySuffixAndExt(Buf prefixKey, int prefixLength, Buf suffixAndExtZeroes) {
|
||||
var modifiablePrefixKey = Buf.create(prefixLength + suffixAndExtZeroes.size());
|
||||
if (prefixKey != null) {
|
||||
modifiablePrefixKey.addAll(prefixKey);
|
||||
}
|
||||
assert prefixKey != null || prefixLength == 0 : "Prefix length is " + prefixLength + " but the prefix key is null";
|
||||
zeroFillKeySuffixAndExt(modifiablePrefixKey, prefixLength, suffixAndExtZeroes);
|
||||
return modifiablePrefixKey;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
static void nextRangeKey(Buffer prefixKey, int prefixLength, int suffixLength, int extLength) {
|
||||
try (var zeroBuf = DefaultBufferAllocators.offHeapAllocator().allocate(suffixLength + extLength)) {
|
||||
zeroBuf.fill((byte) 0);
|
||||
zeroBuf.writerOffset(suffixLength + extLength);
|
||||
zeroFillKeySuffixAndExt(prefixKey, prefixLength, zeroBuf);
|
||||
incrementPrefix(prefixKey, prefixLength);
|
||||
}
|
||||
}
|
||||
|
||||
protected static void zeroFillKeySuffixAndExt(@NotNull Buffer prefixKey,
|
||||
int prefixLength, Buffer suffixAndExtZeroes) {
|
||||
/**
|
||||
* @param modifiablePrefixKey This field content will be modified
|
||||
*/
|
||||
protected static void zeroFillKeySuffixAndExt(@NotNull Buf modifiablePrefixKey, int prefixLength, Buf suffixAndExtZeroes) {
|
||||
//noinspection UnnecessaryLocalVariable
|
||||
var result = prefixKey;
|
||||
var suffixLengthAndExtLength = suffixAndExtZeroes.readableBytes();
|
||||
assert result.readableBytes() == prefixLength;
|
||||
var result = modifiablePrefixKey;
|
||||
var suffixLengthAndExtLength = suffixAndExtZeroes.size();
|
||||
assert result.size() == prefixLength;
|
||||
assert suffixLengthAndExtLength > 0 : "Suffix length + ext length is < 0: " + suffixLengthAndExtLength;
|
||||
prefixKey.ensureWritable(suffixLengthAndExtLength);
|
||||
suffixAndExtZeroes.copyInto(suffixAndExtZeroes.readerOffset(),
|
||||
prefixKey,
|
||||
prefixKey.writerOffset(),
|
||||
suffixLengthAndExtLength
|
||||
);
|
||||
prefixKey.skipWritableBytes(suffixLengthAndExtLength);
|
||||
result.size(prefixLength);
|
||||
modifiablePrefixKey.addAll(suffixAndExtZeroes);
|
||||
assert modifiablePrefixKey.size() == prefixLength + suffixAndExtZeroes.size() : "Result buffer size is wrong";
|
||||
}
|
||||
|
||||
/**
|
||||
@ -150,98 +131,63 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
|
||||
public static <T, U, US extends DatabaseStage<U>> DatabaseMapDictionaryDeep<T, U, US> deepIntermediate(
|
||||
LLDictionary dictionary, BufSupplier prefixKey, SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
LLDictionary dictionary, Buf prefixKey, SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
SubStageGetter<U, US> subStageGetter, int keyExtLength) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter, keyExtLength);
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
protected DatabaseMapDictionaryDeep(LLDictionary dictionary, @Nullable BufSupplier prefixKeySupplier,
|
||||
protected DatabaseMapDictionaryDeep(LLDictionary dictionary, @Nullable Buf prefixKey,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer, SubStageGetter<U, US> subStageGetter, int keyExtLength) {
|
||||
try (var prefixKey = prefixKeySupplier != null ? prefixKeySupplier.get() : null) {
|
||||
this.dictionary = dictionary;
|
||||
this.alloc = dictionary.getAllocator();
|
||||
this.subStageGetter = subStageGetter;
|
||||
this.keySuffixSerializer = keySuffixSerializer;
|
||||
this.keyPrefixLength = prefixKey != null ? prefixKey.readableBytes() : 0;
|
||||
this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength();
|
||||
this.keyExtLength = keyExtLength;
|
||||
try (var keySuffixAndExtZeroBuffer = alloc
|
||||
.allocate(keySuffixLength + keyExtLength)
|
||||
.fill((byte) 0)
|
||||
.writerOffset(keySuffixLength + keyExtLength)
|
||||
.makeReadOnly()) {
|
||||
assert keySuffixAndExtZeroBuffer.readableBytes() == keySuffixLength + keyExtLength :
|
||||
"Key suffix and ext zero buffer readable length is not equal"
|
||||
+ " to the key suffix length + key ext length. keySuffixAndExtZeroBuffer="
|
||||
+ keySuffixAndExtZeroBuffer.readableBytes() + " keySuffixLength=" + keySuffixLength + " keyExtLength="
|
||||
+ keyExtLength;
|
||||
assert keySuffixAndExtZeroBuffer.readableBytes() > 0;
|
||||
var firstKey = prefixKey != null ? prefixKeySupplier.get()
|
||||
: alloc.allocate(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
try {
|
||||
firstRangeKey(firstKey, keyPrefixLength, keySuffixAndExtZeroBuffer);
|
||||
var nextRangeKey = prefixKey != null ? prefixKeySupplier.get()
|
||||
: alloc.allocate(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
try {
|
||||
nextRangeKey(nextRangeKey, keyPrefixLength, keySuffixAndExtZeroBuffer);
|
||||
assert prefixKey == null || prefixKey.isAccessible();
|
||||
assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey);
|
||||
if (keyPrefixLength == 0) {
|
||||
this.rangeSupplier = RangeSupplier.ofOwned(LLRange.all());
|
||||
firstKey.close();
|
||||
nextRangeKey.close();
|
||||
} else {
|
||||
this.rangeSupplier = RangeSupplier.ofOwned(LLRange.ofUnsafe(firstKey, nextRangeKey));
|
||||
}
|
||||
this.rangeMono = Mono.fromSupplier(rangeSupplier);
|
||||
assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
} catch (Throwable t) {
|
||||
nextRangeKey.close();
|
||||
throw t;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
firstKey.close();
|
||||
throw t;
|
||||
}
|
||||
|
||||
this.keyPrefixSupplier = prefixKeySupplier;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
if (prefixKeySupplier != null) {
|
||||
prefixKeySupplier.close();
|
||||
}
|
||||
throw t;
|
||||
this.dictionary = dictionary;
|
||||
this.subStageGetter = subStageGetter;
|
||||
this.keySuffixSerializer = keySuffixSerializer;
|
||||
this.keyPrefixLength = prefixKey != null ? prefixKey.size() : 0;
|
||||
this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength();
|
||||
this.keyExtLength = keyExtLength;
|
||||
var keySuffixAndExtZeroBuffer = Buf.createZeroes(keySuffixLength + keyExtLength);
|
||||
assert keySuffixAndExtZeroBuffer.size() == keySuffixLength + keyExtLength :
|
||||
"Key suffix and ext zero buffer readable length is not equal"
|
||||
+ " to the key suffix length + key ext length. keySuffixAndExtZeroBuffer="
|
||||
+ keySuffixAndExtZeroBuffer.size() + " keySuffixLength=" + keySuffixLength + " keyExtLength="
|
||||
+ keyExtLength;
|
||||
assert keySuffixAndExtZeroBuffer.size() > 0;
|
||||
var firstKey = firstRangeKey(prefixKey, keyPrefixLength, keySuffixAndExtZeroBuffer);
|
||||
var nextRangeKey = nextRangeKey(prefixKey, keyPrefixLength, keySuffixAndExtZeroBuffer);
|
||||
assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey);
|
||||
if (keyPrefixLength == 0) {
|
||||
this.range = LLRange.all();
|
||||
} else {
|
||||
this.range = LLRange.of(firstKey, nextRangeKey);
|
||||
}
|
||||
}
|
||||
assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
this.keyPrefix = prefixKey;
|
||||
}
|
||||
private DatabaseMapDictionaryDeep(LLDictionary dictionary,
|
||||
BufferAllocator alloc,
|
||||
SubStageGetter<U, US> subStageGetter,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
int keyPrefixLength,
|
||||
int keySuffixLength,
|
||||
int keyExtLength,
|
||||
Mono<LLRange> rangeMono,
|
||||
RangeSupplier rangeSupplier,
|
||||
BufSupplier keyPrefixSupplier,
|
||||
Runnable onClose) {
|
||||
LLRange range,
|
||||
Buf keyPrefix) {
|
||||
this.dictionary = dictionary;
|
||||
this.alloc = alloc;
|
||||
this.subStageGetter = subStageGetter;
|
||||
this.keySuffixSerializer = keySuffixSerializer;
|
||||
this.keyPrefixLength = keyPrefixLength;
|
||||
this.keySuffixLength = keySuffixLength;
|
||||
this.keyExtLength = keyExtLength;
|
||||
this.rangeMono = rangeMono;
|
||||
this.range = range;
|
||||
|
||||
this.rangeSupplier = rangeSupplier;
|
||||
this.keyPrefixSupplier = keyPrefixSupplier;
|
||||
this.keyPrefix = keyPrefix;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
protected boolean suffixKeyLengthConsistency(int keySuffixLength) {
|
||||
assert
|
||||
this.keySuffixLength == keySuffixLength :
|
||||
"Key suffix length is " + keySuffixLength + ", but it should be " + this.keySuffixLength + " bytes long";
|
||||
//noinspection ConstantValue
|
||||
return this.keySuffixLength == keySuffixLength;
|
||||
}
|
||||
|
||||
@ -256,16 +202,39 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the prefix from the key
|
||||
* @return the prefix
|
||||
*/
|
||||
protected Buffer splitPrefix(Buffer key) {
|
||||
assert key.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength
|
||||
|| key.readableBytes() == keyPrefixLength + keySuffixLength;
|
||||
var prefix = key.readSplit(this.keyPrefixLength);
|
||||
assert key.readableBytes() == keySuffixLength + keyExtLength
|
||||
|| key.readableBytes() == keySuffixLength;
|
||||
return prefix;
|
||||
protected Buf prefixSubList(Buf key) {
|
||||
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|
||||
|| key.size() == keyPrefixLength + keySuffixLength;
|
||||
return key.subList(0, this.keyPrefixLength);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the suffix
|
||||
*/
|
||||
protected Buf suffixSubList(Buf key) {
|
||||
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|
||||
|| key.size() == keyPrefixLength + keySuffixLength;
|
||||
return key.subList(this.keyPrefixLength, keyPrefixLength + keySuffixLength);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the suffix
|
||||
*/
|
||||
protected Buf suffixAndExtSubList(Buf key) {
|
||||
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|
||||
|| key.size() == keyPrefixLength + keySuffixLength;
|
||||
return key.subList(this.keyPrefixLength, key.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the ext
|
||||
*/
|
||||
protected Buf extSubList(Buf key) {
|
||||
assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength
|
||||
|| key.size() == keyPrefixLength + keySuffixLength;
|
||||
return key.subList(this.keyPrefixLength + this.keySuffixLength, key.size());
|
||||
}
|
||||
|
||||
protected LLSnapshot resolveSnapshot(@Nullable CompositeSnapshot snapshot) {
|
||||
@ -277,30 +246,23 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return dictionary.sizeRange(resolveSnapshot(snapshot), rangeMono, fast);
|
||||
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), rangeMono, false);
|
||||
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<US> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
var suffixKeyWithoutExt = Mono.fromCallable(() -> {
|
||||
var keyWithoutExtBuf = keyPrefixSupplier == null
|
||||
? alloc.allocate(keySuffixLength + keyExtLength) : keyPrefixSupplier.get();
|
||||
try {
|
||||
keyWithoutExtBuf.ensureWritable(keySuffixLength + keyExtLength);
|
||||
serializeSuffix(keySuffix, keyWithoutExtBuf);
|
||||
} catch (Throwable ex) {
|
||||
keyWithoutExtBuf.close();
|
||||
throw ex;
|
||||
}
|
||||
return keyWithoutExtBuf;
|
||||
});
|
||||
return this.subStageGetter.subStage(dictionary, snapshot, suffixKeyWithoutExt);
|
||||
public @NotNull US at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
BufDataOutput bufOutput = BufDataOutput.createLimited(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
if (keyPrefix != null) {
|
||||
bufOutput.writeBytes(keyPrefix);
|
||||
}
|
||||
serializeSuffixTo(keySuffix, bufOutput);
|
||||
return this.subStageGetter.subStage(dictionary, snapshot, bufOutput.asList());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -309,39 +271,21 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BadBlock> badBlocks() {
|
||||
return dictionary.badBlocks(rangeMono);
|
||||
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
|
||||
return dictionary.verifyChecksum(range);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<SubStageEntry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
public Stream<SubStageEntry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return dictionary
|
||||
.getRangeKeyPrefixes(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength, smallRange)
|
||||
.flatMapSequential(groupKeyWithoutExt -> this.subStageGetter
|
||||
.subStage(dictionary, snapshot, Mono.fromCallable(() -> groupKeyWithoutExt.copy()))
|
||||
.map(us -> {
|
||||
T deserializedSuffix;
|
||||
try (var splittedGroupSuffix = splitGroupSuffix(groupKeyWithoutExt)) {
|
||||
deserializedSuffix = this.deserializeSuffix(splittedGroupSuffix);
|
||||
return new SubStageEntry<>(deserializedSuffix, us);
|
||||
}
|
||||
})
|
||||
.doFinally(s -> groupKeyWithoutExt.close())
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Split the input. The input will become the ext, the returned data will be the group suffix
|
||||
* @param groupKey group key, will become ext
|
||||
* @return group suffix
|
||||
*/
|
||||
private Buffer splitGroupSuffix(@NotNull Buffer groupKey) {
|
||||
assert subStageKeysConsistency(groupKey.readableBytes())
|
||||
|| subStageKeysConsistency(groupKey.readableBytes() + keyExtLength);
|
||||
this.splitPrefix(groupKey).close();
|
||||
assert subStageKeysConsistency(keyPrefixLength + groupKey.readableBytes())
|
||||
|| subStageKeysConsistency(keyPrefixLength + groupKey.readableBytes() + keyExtLength);
|
||||
return groupKey.readSplit(keySuffixLength);
|
||||
.getRangeKeyPrefixes(resolveSnapshot(snapshot), range, keyPrefixLength + keySuffixLength, smallRange)
|
||||
.map(groupKeyWithoutExt -> {
|
||||
T deserializedSuffix;
|
||||
var splittedGroupSuffix = suffixSubList(groupKeyWithoutExt);
|
||||
deserializedSuffix = this.deserializeSuffix(BufDataInput.create(splittedGroupSuffix));
|
||||
return new SubStageEntry<>(deserializedSuffix,
|
||||
this.subStageGetter.subStage(dictionary, snapshot, groupKeyWithoutExt));
|
||||
});
|
||||
}
|
||||
|
||||
private boolean subStageKeysConsistency(int totalKeyLength) {
|
||||
@ -357,51 +301,58 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
||||
return this
|
||||
.getAllValues(null, false)
|
||||
.concatWith(this
|
||||
.clear()
|
||||
.then(this.putMulti(entries))
|
||||
.as(InternalMonoUtils::toAny)
|
||||
);
|
||||
public void setAllEntries(Stream<Entry<T, U>> entries) {
|
||||
this.clear();
|
||||
this.putMulti(entries);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clear() {
|
||||
return Mono.using(() -> rangeSupplier.get(), range -> {
|
||||
if (range.isAll()) {
|
||||
return dictionary.clear();
|
||||
} else if (range.isSingle()) {
|
||||
return dictionary
|
||||
.remove(Mono.fromCallable(() -> range.getSingleUnsafe()), LLDictionaryResultType.VOID)
|
||||
.doOnNext(resource -> LLUtils.finalizeResourceNow(resource))
|
||||
.then();
|
||||
} else {
|
||||
return dictionary.setRange(rangeMono, Flux.empty(), false);
|
||||
}
|
||||
}, resource -> LLUtils.finalizeResourceNow(resource));
|
||||
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
|
||||
return resourceStream(() -> this.getAllEntries(null, false), () -> setAllEntries(entries));
|
||||
}
|
||||
|
||||
protected T deserializeSuffix(@NotNull Buffer keySuffix) throws SerializationException {
|
||||
assert suffixKeyLengthConsistency(keySuffix.readableBytes());
|
||||
var result = keySuffixSerializer.deserialize(keySuffix);
|
||||
return result;
|
||||
@Override
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return dictionary.getDbReadPool();
|
||||
}
|
||||
|
||||
protected void serializeSuffix(T keySuffix, Buffer output) throws SerializationException {
|
||||
output.ensureWritable(keySuffixLength);
|
||||
var beforeWriterOffset = output.writerOffset();
|
||||
@Override
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return dictionary.getDbWritePool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
if (range.isAll()) {
|
||||
dictionary.clear();
|
||||
} else if (range.isSingle()) {
|
||||
dictionary.remove(range.getSingleUnsafe(), LLDictionaryResultType.VOID);
|
||||
} else {
|
||||
dictionary.setRange(range, Stream.empty(), false);
|
||||
}
|
||||
}
|
||||
|
||||
protected T deserializeSuffix(@NotNull BufDataInput keySuffix) throws SerializationException {
|
||||
assert suffixKeyLengthConsistency(keySuffix.available());
|
||||
return keySuffixSerializer.deserialize(keySuffix);
|
||||
}
|
||||
|
||||
protected void serializeSuffixTo(T keySuffix, BufDataOutput output) throws SerializationException {
|
||||
var beforeWriterOffset = output.size();
|
||||
assert beforeWriterOffset == keyPrefixLength;
|
||||
assert keySuffixSerializer.getSerializedBinaryLength() == keySuffixLength
|
||||
: "Invalid key suffix serializer length: " + keySuffixSerializer.getSerializedBinaryLength()
|
||||
+ ". Expected: " + keySuffixLength;
|
||||
keySuffixSerializer.serialize(keySuffix, output);
|
||||
var afterWriterOffset = output.writerOffset();
|
||||
var afterWriterOffset = output.size();
|
||||
assert suffixKeyLengthConsistency(afterWriterOffset - beforeWriterOffset)
|
||||
: "Invalid key suffix length: " + (afterWriterOffset - beforeWriterOffset) + ". Expected: " + keySuffixLength;
|
||||
}
|
||||
|
||||
public static <K1, K2, V, R> Flux<R> getAllLeaves2(DatabaseMapDictionaryDeep<K1, Object2ObjectSortedMap<K2, V>, ? extends DatabaseStageMap<K2, V, DatabaseStageEntry<V>>> deepMap,
|
||||
public static <K1, K2, V, R> Stream<R> getAllLeaves2(DatabaseMapDictionaryDeep<K1, Object2ObjectSortedMap<K2, V>, ? extends DatabaseStageMap<K2, V, DatabaseStageEntry<V>>> deepMap,
|
||||
CompositeSnapshot snapshot,
|
||||
TriFunction<K1, K2, V, R> merger,
|
||||
@NotNull Mono<K1> savedProgressKey1) {
|
||||
@Nullable K1 savedProgressKey1) {
|
||||
var keySuffix1Serializer = deepMap.keySuffixSerializer;
|
||||
SerializerFixedBinaryLength<?> keySuffix2Serializer;
|
||||
Serializer<?> valueSerializer;
|
||||
@ -434,64 +385,47 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
|
||||
var savedProgressKey1Opt = savedProgressKey1.map(value1 -> Optional.of(value1)).defaultIfEmpty(Optional.empty());
|
||||
var firstKey = Optional.ofNullable(savedProgressKey1);
|
||||
var fullRange = deepMap.range;
|
||||
|
||||
return deepMap
|
||||
.dictionary
|
||||
.getRange(deepMap.resolveSnapshot(snapshot), Mono.zip(savedProgressKey1Opt, deepMap.rangeMono).handle((tuple, sink) -> {
|
||||
var firstKey = tuple.getT1();
|
||||
var fullRange = tuple.getT2();
|
||||
try {
|
||||
if (firstKey.isPresent()) {
|
||||
try (fullRange) {
|
||||
try (var key1Buf = deepMap.alloc.allocate(keySuffix1Serializer.getSerializedBinaryLength())) {
|
||||
keySuffix1Serializer.serialize(firstKey.get(), key1Buf);
|
||||
sink.next(LLRange.of(key1Buf.send(), fullRange.getMax()));
|
||||
} catch (SerializationException e) {
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sink.next(fullRange);
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
try {
|
||||
fullRange.close();
|
||||
} catch (Throwable ex2) {
|
||||
LOG.error(ex2);
|
||||
}
|
||||
sink.error(ex);
|
||||
}
|
||||
}), false, false)
|
||||
.concatMapIterable(entry -> {
|
||||
|
||||
LLRange range;
|
||||
if (firstKey.isPresent()) {
|
||||
var key1Buf = BufDataOutput.create(keySuffix1Serializer.getSerializedBinaryLength());
|
||||
keySuffix1Serializer.serialize(firstKey.get(), key1Buf);
|
||||
range = LLRange.of(key1Buf.asList(), fullRange.getMax());
|
||||
} else {
|
||||
range = fullRange;
|
||||
}
|
||||
|
||||
return deepMap.dictionary.getRange(deepMap.resolveSnapshot(snapshot), range, false, false)
|
||||
.flatMap(entry -> {
|
||||
K1 key1 = null;
|
||||
Object key2 = null;
|
||||
try (entry) {
|
||||
var keyBuf = entry.getKeyUnsafe();
|
||||
var valueBuf = entry.getValueUnsafe();
|
||||
try {
|
||||
var keyBuf = entry.getKey();
|
||||
var valueBuf = entry.getValue();
|
||||
try {
|
||||
assert keyBuf != null;
|
||||
keyBuf.skipReadableBytes(deepMap.keyPrefixLength);
|
||||
try (var key1Buf = keyBuf.split(deepMap.keySuffixLength)) {
|
||||
key1 = keySuffix1Serializer.deserialize(key1Buf);
|
||||
}
|
||||
key2 = keySuffix2Serializer.deserialize(keyBuf);
|
||||
var suffix1And2 = BufDataInput.create(keyBuf.subList(deepMap.keyPrefixLength, deepMap.keyPrefixLength + deepMap.keySuffixLength + deepMap.keyExtLength));
|
||||
key1 = keySuffix1Serializer.deserialize(suffix1And2);
|
||||
key2 = keySuffix2Serializer.deserialize(suffix1And2);
|
||||
assert valueBuf != null;
|
||||
Object value = valueSerializer.deserialize(valueBuf);
|
||||
Object value = valueSerializer.deserialize(BufDataInput.create(valueBuf));
|
||||
if (isHashedSet) {
|
||||
//noinspection unchecked
|
||||
Set<K2> set = (Set<K2>) value;
|
||||
K1 finalKey1 = key1;
|
||||
//noinspection unchecked
|
||||
return set.stream().map(e -> merger.apply(finalKey1, e, (V) Nothing.INSTANCE)).toList();
|
||||
return set.stream().map(e -> merger.apply(finalKey1, e, (V) Nothing.INSTANCE));
|
||||
} else if (isHashed) {
|
||||
//noinspection unchecked
|
||||
Set<Entry<K2, V>> set = (Set<Entry<K2, V>>) value;
|
||||
K1 finalKey1 = key1;
|
||||
return set.stream().map(e -> merger.apply(finalKey1, e.getKey(), e.getValue())).toList();
|
||||
return set.stream().map(e -> merger.apply(finalKey1, e.getKey(), e.getValue()));
|
||||
} else {
|
||||
//noinspection unchecked
|
||||
return List.of(merger.apply(key1, (K2) key2, (V) value));
|
||||
return Stream.of(merger.apply(key1, (K2) key2, (V) value));
|
||||
}
|
||||
} catch (IndexOutOfBoundsException ex) {
|
||||
var exMessage = ex.getMessage();
|
||||
@ -504,7 +438,7 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
+ ":" + key2
|
||||
+ "](" + LLUtils.toStringSafe(keyBuf) + ") total=" + totalZeroBytesErrors);
|
||||
}
|
||||
return List.of();
|
||||
return Stream.empty();
|
||||
} else {
|
||||
throw ex;
|
||||
}
|
||||
@ -514,22 +448,4 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
try {
|
||||
if (rangeSupplier != null) {
|
||||
rangeSupplier.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close range", ex);
|
||||
}
|
||||
try {
|
||||
if (keyPrefixSupplier != null) {
|
||||
keyPrefixSupplier.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close keyPrefix", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,49 +1,43 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.BufferAllocator;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.SubStageEntry;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArraySet;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implements
|
||||
DatabaseStageMap<T, U, DatabaseStageEntry<U>> {
|
||||
public class DatabaseMapDictionaryHashed<T, U, TH> implements DatabaseStageMap<T, U, DatabaseStageEntry<U>> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(DatabaseMapDictionaryHashed.class);
|
||||
|
||||
private final BufferAllocator alloc;
|
||||
private final Function<T, TH> keySuffixHashFunction;
|
||||
|
||||
private final DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>> subDictionary;
|
||||
|
||||
protected DatabaseMapDictionaryHashed(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
@Nullable Buf prefixKeySupplier,
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
@ -52,7 +46,6 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
if (updateMode != UpdateMode.ALLOW) {
|
||||
throw new IllegalArgumentException("Hashed maps only works when UpdateMode is ALLOW");
|
||||
}
|
||||
this.alloc = dictionary.getAllocator();
|
||||
ValueWithHashSerializer<T, U> valueWithHashSerializer
|
||||
= new ValueWithHashSerializer<>(keySuffixSerializer, valueSerializer);
|
||||
ValuesSetSerializer<Entry<T, U>> valuesSetSerializer
|
||||
@ -62,11 +55,8 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
this.keySuffixHashFunction = keySuffixHashFunction;
|
||||
}
|
||||
|
||||
private DatabaseMapDictionaryHashed(BufferAllocator alloc,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
DatabaseStage<Object2ObjectSortedMap<TH, ObjectArraySet<Entry<T, U>>>> subDictionary,
|
||||
Drop<DatabaseMapDictionaryHashed<T, U, TH>> drop) {
|
||||
this.alloc = alloc;
|
||||
private DatabaseMapDictionaryHashed(Function<T, TH> keySuffixHashFunction,
|
||||
DatabaseStage<Object2ObjectSortedMap<TH, ObjectArraySet<Entry<T, U>>>> subDictionary) {
|
||||
this.keySuffixHashFunction = keySuffixHashFunction;
|
||||
|
||||
this.subDictionary = (DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>>) subDictionary;
|
||||
@ -88,7 +78,7 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
}
|
||||
|
||||
public static <T, U, UH> DatabaseMapDictionaryHashed<T, U, UH> tail(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
@Nullable Buf prefixKeySupplier,
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Function<T, UH> keySuffixHashFunction,
|
||||
@ -121,36 +111,46 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Object2ObjectSortedMap<T, U>> get(@Nullable CompositeSnapshot snapshot) {
|
||||
return subDictionary.get(snapshot).map(map -> deserializeMap(map));
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return subDictionary.getDbReadPool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Object2ObjectSortedMap<T, U>> getOrDefault(@Nullable CompositeSnapshot snapshot,
|
||||
Mono<Object2ObjectSortedMap<T, U>> defaultValue) {
|
||||
return this.get(snapshot).switchIfEmpty(defaultValue);
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return subDictionary.getDbWritePool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> set(Object2ObjectSortedMap<T, U> map) {
|
||||
return Mono.fromSupplier(() -> this.serializeMap(map)).flatMap(value -> subDictionary.set(value));
|
||||
public Object2ObjectSortedMap<T, U> get(@Nullable CompositeSnapshot snapshot) {
|
||||
var v = subDictionary.get(snapshot);
|
||||
var result = v != null ? deserializeMap(v) : null;
|
||||
return result != null && result.isEmpty() ? null : result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> setAndGetChanged(Object2ObjectSortedMap<T, U> map) {
|
||||
return Mono
|
||||
.fromSupplier(() -> this.serializeMap(map))
|
||||
.flatMap(value -> subDictionary.setAndGetChanged(value))
|
||||
.single();
|
||||
public Object2ObjectSortedMap<T, U> getOrDefault(@Nullable CompositeSnapshot snapshot,
|
||||
Object2ObjectSortedMap<T, U> defaultValue) {
|
||||
return Objects.requireNonNullElse(this.get(snapshot), defaultValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> clearAndGetStatus() {
|
||||
public void set(Object2ObjectSortedMap<T, U> map) {
|
||||
var value = this.serializeMap(map);
|
||||
subDictionary.set(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean setAndGetChanged(Object2ObjectSortedMap<T, U> map) {
|
||||
return subDictionary.setAndGetChanged(this.serializeMap(map));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean clearAndGetStatus() {
|
||||
return subDictionary.clearAndGetStatus();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return subDictionary.isEmpty(snapshot);
|
||||
}
|
||||
|
||||
@ -160,20 +160,17 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BadBlock> badBlocks() {
|
||||
return this.subDictionary.badBlocks();
|
||||
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
|
||||
return this.subDictionary.verifyChecksum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<DatabaseStageEntry<U>> at(@Nullable CompositeSnapshot snapshot, T key) {
|
||||
return this
|
||||
.atPrivate(snapshot, key, keySuffixHashFunction.apply(key))
|
||||
.map(cast -> cast);
|
||||
public @NotNull DatabaseStageEntry<U> at(@Nullable CompositeSnapshot snapshot, T key) {
|
||||
return this.atPrivate(snapshot, key, keySuffixHashFunction.apply(key));
|
||||
}
|
||||
|
||||
private Mono<DatabaseSingleBucket<T, U, TH>> atPrivate(@Nullable CompositeSnapshot snapshot, T key, TH hash) {
|
||||
return subDictionary.at(snapshot, hash)
|
||||
.map(entry -> new DatabaseSingleBucket<T, U, TH>(entry, key));
|
||||
private DatabaseSingleBucket<T, U, TH> atPrivate(@Nullable CompositeSnapshot snapshot, T key, TH hash) {
|
||||
return new DatabaseSingleBucket<T, U, TH>(subDictionary.at(snapshot, hash), key);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -182,57 +179,69 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
|
||||
public Stream<SubStageEntry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot,
|
||||
boolean smallRange) {
|
||||
return subDictionary
|
||||
.getAllValues(snapshot, smallRange)
|
||||
.getAllEntries(snapshot, smallRange)
|
||||
.map(Entry::getValue)
|
||||
.map(Collections::unmodifiableSet)
|
||||
.flatMap(bucket -> Flux
|
||||
.fromIterable(bucket)
|
||||
.flatMap(bucket -> bucket.stream()
|
||||
.map(Entry::getKey)
|
||||
.flatMap(key -> this.at(snapshot, key).map(stage -> new SubStageEntry<>(key, stage))));
|
||||
.map(key -> new SubStageEntry<>(key, this.at(snapshot, key))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
public Stream<Entry<T, U>> getAllEntries(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return subDictionary
|
||||
.getAllValues(snapshot, smallRange)
|
||||
.getAllEntries(snapshot, smallRange)
|
||||
.map(Entry::getValue)
|
||||
.map(Collections::unmodifiableSet)
|
||||
.concatMapIterable(list -> list);
|
||||
.flatMap(Collection::stream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
||||
return entries.flatMap(entry -> Mono.usingWhen(this.at(null, entry.getKey()),
|
||||
stage -> stage.setAndGetPrevious(entry.getValue()).map(prev -> Map.entry(entry.getKey(), prev)),
|
||||
LLUtils::finalizeResource
|
||||
));
|
||||
public Stream<T> getAllKeys(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllEntries(snapshot, smallRange).map(Entry::getKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clear() {
|
||||
return subDictionary.clear();
|
||||
public Stream<U> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return getAllEntries(snapshot, smallRange).map(Entry::getValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Object2ObjectSortedMap<T, U>> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
|
||||
return Mono
|
||||
.fromSupplier(() -> this.serializeMap(value))
|
||||
.flatMap(value1 -> subDictionary.setAndGetPrevious(value1))
|
||||
.map(map -> deserializeMap(map));
|
||||
public Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries) {
|
||||
List<Entry<T, U>> prevList = entries.map(entry -> {
|
||||
var prev = this.at(null, entry.getKey()).setAndGetPrevious(entry.getValue());
|
||||
if (prev != null) {
|
||||
return Map.entry(entry.getKey(), prev);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}).filter(Objects::nonNull).toList();
|
||||
return prevList.stream();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Object2ObjectSortedMap<T, U>> clearAndGetPrevious() {
|
||||
return subDictionary
|
||||
.clearAndGetPrevious()
|
||||
.map(map -> deserializeMap(map));
|
||||
public void clear() {
|
||||
subDictionary.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
public Object2ObjectSortedMap<T, U> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
|
||||
var v = subDictionary.setAndGetPrevious(this.serializeMap(value));
|
||||
var result = v != null ? deserializeMap(v) : null;
|
||||
return result != null && result.isEmpty() ? null : result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object2ObjectSortedMap<T, U> clearAndGetPrevious() {
|
||||
var v = subDictionary.clearAndGetPrevious();
|
||||
return v != null ? deserializeMap(v) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return subDictionary.leavesCount(snapshot, fast);
|
||||
}
|
||||
|
||||
@ -245,13 +254,14 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
@Override
|
||||
public ValueGetter<T, U> getAsyncDbValueGetter(@Nullable CompositeSnapshot snapshot) {
|
||||
ValueGetter<TH, ObjectArraySet<Entry<T, U>>> getter = subDictionary.getAsyncDbValueGetter(snapshot);
|
||||
return key -> getter
|
||||
.get(keySuffixHashFunction.apply(key))
|
||||
.flatMap(set -> this.extractValueTransformation(set, key));
|
||||
}
|
||||
|
||||
private Mono<U> extractValueTransformation(ObjectArraySet<Entry<T, U>> entries, T key) {
|
||||
return Mono.fromCallable(() -> extractValue(entries, key));
|
||||
return key -> {
|
||||
ObjectArraySet<Entry<T, U>> set = getter.get(keySuffixHashFunction.apply(key));
|
||||
if (set != null) {
|
||||
return this.extractValue(set, key);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@ -299,15 +309,4 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implem
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
try {
|
||||
if (subDictionary != null) {
|
||||
subDictionary.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close subDictionary", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,11 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLDictionaryResultType;
|
||||
@ -14,32 +13,27 @@ import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class DatabaseMapSingle<U> extends SimpleResource implements DatabaseStageEntry<U> {
|
||||
public final class DatabaseMapSingle<U> implements DatabaseStageEntry<U> {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(DatabaseMapSingle.class);
|
||||
|
||||
private final AtomicLong totalZeroBytesErrors = new AtomicLong();
|
||||
|
||||
private final LLDictionary dictionary;
|
||||
private final Mono<Buffer> keyMono;
|
||||
private final Buf key;
|
||||
private final Serializer<U> serializer;
|
||||
private final BufSupplier keySupplier;
|
||||
|
||||
public DatabaseMapSingle(LLDictionary dictionary, BufSupplier keySupplier, Serializer<U> serializer) {
|
||||
public DatabaseMapSingle(LLDictionary dictionary, Buf key, Serializer<U> serializer) {
|
||||
this.dictionary = dictionary;
|
||||
this.keySupplier = keySupplier;
|
||||
this.keyMono = Mono.fromSupplier(() -> keySupplier.get());
|
||||
this.key = key;
|
||||
this.serializer = serializer;
|
||||
}
|
||||
|
||||
@ -51,127 +45,96 @@ public class DatabaseMapSingle<U> extends SimpleResource implements DatabaseStag
|
||||
}
|
||||
}
|
||||
|
||||
private U deserializeValue(Buffer value) {
|
||||
private U deserializeValue(Buf value) {
|
||||
try {
|
||||
return serializer.deserialize(value);
|
||||
return serializer.deserialize(BufDataInput.create(value));
|
||||
} catch (IndexOutOfBoundsException ex) {
|
||||
var exMessage = ex.getMessage();
|
||||
if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) {
|
||||
try (var key = keySupplier.get()) {
|
||||
LOG.error("Unexpected zero-bytes value at "
|
||||
+ dictionary.getDatabaseName() + ":" + dictionary.getColumnName() + ":" + LLUtils.toStringSafe(key));
|
||||
}
|
||||
LOG.error("Unexpected zero-bytes value at %s:%s:%s".formatted(dictionary.getDatabaseName(),
|
||||
dictionary.getColumnName(),
|
||||
LLUtils.toStringSafe(key)
|
||||
));
|
||||
return null;
|
||||
} else {
|
||||
throw ex;
|
||||
}
|
||||
} catch (SerializationException ex) {
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
private Buffer serializeValue(U value) throws SerializationException {
|
||||
var valSizeHint = serializer.getSerializedSizeHint();
|
||||
if (valSizeHint == -1) valSizeHint = 128;
|
||||
var valBuf = dictionary.getAllocator().allocate(valSizeHint);
|
||||
try {
|
||||
serializer.serialize(value, valBuf);
|
||||
return valBuf;
|
||||
} catch (Throwable ex) {
|
||||
valBuf.close();
|
||||
throw ex;
|
||||
private Buf serializeValue(U value) throws SerializationException {
|
||||
BufDataOutput valBuf = BufDataOutput.create(serializer.getSerializedSizeHint());
|
||||
serializer.serialize(value, valBuf);
|
||||
return valBuf.asList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return dictionary.getDbReadPool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return dictionary.getDbWritePool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public U get(@Nullable CompositeSnapshot snapshot) {
|
||||
var result = dictionary.get(resolveSnapshot(snapshot), key);
|
||||
if (result != null) {
|
||||
return deserializeValue(result);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> get(@Nullable CompositeSnapshot snapshot) {
|
||||
return Mono.usingWhen(dictionary.get(resolveSnapshot(snapshot), keyMono),
|
||||
buf -> Mono.fromSupplier(() -> deserializeValue(buf)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public U setAndGetPrevious(U value) {
|
||||
var serializedKey = value != null ? serializeValue(value) : null;
|
||||
var result = dictionary.put(key, serializedKey, LLDictionaryResultType.PREVIOUS_VALUE);
|
||||
if (result != null) {
|
||||
return deserializeValue(result);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> setAndGetPrevious(U value) {
|
||||
return Mono.usingWhen(dictionary
|
||||
.put(keyMono, Mono.fromCallable(() -> serializeValue(value)), LLDictionaryResultType.PREVIOUS_VALUE),
|
||||
buf -> Mono.fromSupplier(() -> deserializeValue(buf)),
|
||||
LLUtils::finalizeResource);
|
||||
public U update(SerializationFunction<@Nullable U, @Nullable U> updater, UpdateReturnMode updateReturnMode) {
|
||||
var serializedUpdater = createUpdater(updater);
|
||||
dictionary.update(key, serializedUpdater, UpdateReturnMode.NOTHING);
|
||||
return serializedUpdater.getResult(updateReturnMode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> update(SerializationFunction<@Nullable U, @Nullable U> updater,
|
||||
UpdateReturnMode updateReturnMode) {
|
||||
var resultMono = dictionary
|
||||
.update(keyMono, (oldValueSer) -> {
|
||||
try (oldValueSer) {
|
||||
U result;
|
||||
if (oldValueSer == null) {
|
||||
result = updater.apply(null);
|
||||
} else {
|
||||
U deserializedValue = serializer.deserialize(oldValueSer);
|
||||
result = updater.apply(deserializedValue);
|
||||
}
|
||||
if (result == null) {
|
||||
return null;
|
||||
} else {
|
||||
return serializeValue(result);
|
||||
}
|
||||
}
|
||||
}, updateReturnMode);
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(() -> deserializeValue(result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public Delta<U> updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
var serializedUpdater = createUpdater(updater);
|
||||
dictionary.update(key, serializedUpdater, UpdateReturnMode.NOTHING);
|
||||
return serializedUpdater.getDelta();
|
||||
}
|
||||
|
||||
private CachedSerializationFunction<U, Buf, Buf> createUpdater(SerializationFunction<U, U> updater) {
|
||||
return new CachedSerializationFunction<>(updater, this::serializeValue, this::deserializeValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Delta<U>> updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return dictionary
|
||||
.updateAndGetDelta(keyMono, (oldValueSer) -> {
|
||||
U result;
|
||||
if (oldValueSer == null) {
|
||||
result = updater.apply(null);
|
||||
} else {
|
||||
U deserializedValue = serializer.deserialize(oldValueSer);
|
||||
result = updater.apply(deserializedValue);
|
||||
}
|
||||
if (result == null) {
|
||||
return null;
|
||||
} else {
|
||||
return serializeValue(result);
|
||||
}
|
||||
}).transform(mono -> LLUtils.mapLLDelta(mono, serialized -> serializer.deserialize(serialized)));
|
||||
public U clearAndGetPrevious() {
|
||||
return deserializeValue(dictionary.remove(key, LLDictionaryResultType.PREVIOUS_VALUE));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> clearAndGetPrevious() {
|
||||
return Mono.usingWhen(dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE),
|
||||
result -> Mono.fromSupplier(() -> deserializeValue(result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key), false) ? 0L : 1L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return dictionary
|
||||
.isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(single -> LLRange.singleUnsafe(single)), false)
|
||||
.map(empty -> empty ? 0L : 1L);
|
||||
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary.isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key), true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary
|
||||
.isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(single -> LLRange.singleUnsafe(single)), true);
|
||||
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
|
||||
return dictionary.verifyChecksum(LLRange.single(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BadBlock> badBlocks() {
|
||||
return dictionary.badBlocks(keyMono.map(single -> LLRange.singleUnsafe(single)));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
keySupplier.close();
|
||||
}
|
||||
}
|
@ -1,32 +1,21 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseSetDictionary<T> extends DatabaseMapDictionary<T, Nothing> {
|
||||
|
||||
protected DatabaseSetDictionary(LLDictionary dictionary,
|
||||
BufSupplier prefixKeySupplier,
|
||||
Buf prefixKeySupplier,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer) {
|
||||
super(dictionary,
|
||||
prefixKeySupplier,
|
||||
keySuffixSerializer,
|
||||
DatabaseEmpty.nothingSerializer(dictionary.getAllocator())
|
||||
);
|
||||
super(dictionary, prefixKeySupplier, keySuffixSerializer, DatabaseEmpty.nothingSerializer());
|
||||
}
|
||||
|
||||
public static <T> DatabaseSetDictionary<T> simple(LLDictionary dictionary,
|
||||
@ -35,24 +24,27 @@ public class DatabaseSetDictionary<T> extends DatabaseMapDictionary<T, Nothing>
|
||||
}
|
||||
|
||||
public static <T> DatabaseSetDictionary<T> tail(LLDictionary dictionary,
|
||||
BufSupplier prefixKeySupplier,
|
||||
Buf prefixKeySupplier,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer) {
|
||||
return new DatabaseSetDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer);
|
||||
}
|
||||
|
||||
public Mono<Set<T>> getKeySet(@Nullable CompositeSnapshot snapshot) {
|
||||
return get(snapshot).map(Map::keySet);
|
||||
public Set<T> getKeySet(@Nullable CompositeSnapshot snapshot) {
|
||||
var v = get(snapshot);
|
||||
return v != null ? v.keySet() : null;
|
||||
}
|
||||
|
||||
public Mono<Set<T>> setAndGetPreviousKeySet(Set<T> value) {
|
||||
public Set<T> setAndGetPreviousKeySet(Set<T> value) {
|
||||
var hm = new Object2ObjectLinkedOpenHashMap<T, Nothing>();
|
||||
for (T t : value) {
|
||||
hm.put(t, DatabaseEmpty.NOTHING);
|
||||
}
|
||||
return setAndGetPrevious(hm).map(Map::keySet);
|
||||
var v = setAndGetPrevious(hm);
|
||||
return v != null ? v.keySet() : null;
|
||||
}
|
||||
|
||||
public Mono<Set<T>> clearAndGetPreviousKeySet() {
|
||||
return clearAndGetPrevious().map(Map::keySet);
|
||||
public Set<T> clearAndGetPreviousKeySet() {
|
||||
var v = clearAndGetPrevious();
|
||||
return v != null ? v.keySet() : null;
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +1,28 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHashed<T, Nothing, TH> {
|
||||
|
||||
protected DatabaseSetDictionaryHashed(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
@Nullable Buf prefixKeySupplier,
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
SerializerFixedBinaryLength<TH> keySuffixHashSerializer) {
|
||||
super(dictionary,
|
||||
prefixKeySupplier,
|
||||
keySuffixSerializer,
|
||||
DatabaseEmpty.nothingSerializer(dictionary.getAllocator()),
|
||||
DatabaseEmpty.nothingSerializer(),
|
||||
keySuffixHashFunction,
|
||||
keySuffixHashSerializer
|
||||
);
|
||||
@ -49,7 +41,7 @@ public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHas
|
||||
}
|
||||
|
||||
public static <T, TH> DatabaseSetDictionaryHashed<T, TH> tail(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
@Nullable Buf prefixKeySupplier,
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Function<T, TH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<TH> keyHashSerializer) {
|
||||
@ -61,19 +53,22 @@ public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHas
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<Set<T>> getKeySet(@Nullable CompositeSnapshot snapshot) {
|
||||
return get(snapshot).map(Map::keySet);
|
||||
public Set<T> getKeySet(@Nullable CompositeSnapshot snapshot) {
|
||||
var v = get(snapshot);
|
||||
return v != null ? v.keySet() : null;
|
||||
}
|
||||
|
||||
public Mono<Set<T>> setAndGetPreviousKeySet(Set<T> value) {
|
||||
public Set<T> setAndGetPreviousKeySet(Set<T> value) {
|
||||
var hm = new Object2ObjectLinkedOpenHashMap<T, Nothing>();
|
||||
for (T t : value) {
|
||||
hm.put(t, DatabaseEmpty.NOTHING);
|
||||
}
|
||||
return setAndGetPrevious(hm).map(Map::keySet);
|
||||
var v = setAndGetPrevious(hm);
|
||||
return v != null ? v.keySet() : null;
|
||||
}
|
||||
|
||||
public Mono<Set<T>> clearAndGetPreviousKeySet() {
|
||||
return clearAndGetPrevious().map(Map::keySet);
|
||||
public Set<T> clearAndGetPreviousKeySet() {
|
||||
var v = clearAndGetPrevious();
|
||||
return v != null ? v.keySet() : null;
|
||||
}
|
||||
}
|
||||
|
@ -1,30 +1,26 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArraySet;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseSingleBucket<K, V, TH> extends SimpleResource implements DatabaseStageEntry<V> {
|
||||
public class DatabaseSingleBucket<K, V, TH> implements DatabaseStageEntry<V> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(DatabaseSingleBucket.class);
|
||||
|
||||
@ -43,33 +39,45 @@ public class DatabaseSingleBucket<K, V, TH> extends SimpleResource implements Da
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<V> get(@Nullable CompositeSnapshot snapshot) {
|
||||
return bucketStage.get(snapshot).flatMap(entries -> extractValueTransformation(entries));
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return bucketStage.getDbReadPool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<V> getOrDefault(@Nullable CompositeSnapshot snapshot, Mono<V> defaultValue) {
|
||||
return bucketStage.get(snapshot).flatMap(entries -> extractValueTransformation(entries)).switchIfEmpty(defaultValue);
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return bucketStage.getDbWritePool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> set(V value) {
|
||||
return this.update(prev -> value, UpdateReturnMode.NOTHING).then();
|
||||
public V get(@Nullable CompositeSnapshot snapshot) {
|
||||
var entries = bucketStage.get(snapshot);
|
||||
return entries != null ? extractValue(entries) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<V> setAndGetPrevious(V value) {
|
||||
public V getOrDefault(@Nullable CompositeSnapshot snapshot, V defaultValue) {
|
||||
var entries = bucketStage.get(snapshot);
|
||||
return entries != null ? extractValue(entries) : defaultValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void set(V value) {
|
||||
this.update(prev -> value, UpdateReturnMode.NOTHING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public V setAndGetPrevious(V value) {
|
||||
return this.update(prev -> value, UpdateReturnMode.GET_OLD_VALUE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> setAndGetChanged(V value) {
|
||||
return this.updateAndGetDelta(prev -> value).map(delta -> LLUtils.isDeltaChanged(delta));
|
||||
public boolean setAndGetChanged(V value) {
|
||||
return LLUtils.isDeltaChanged(this.updateAndGetDelta(prev -> value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<V> update(SerializationFunction<@Nullable V, @Nullable V> updater, UpdateReturnMode updateReturnMode) {
|
||||
return bucketStage
|
||||
public V update(SerializationFunction<@Nullable V, @Nullable V> updater, UpdateReturnMode updateReturnMode) {
|
||||
var result = bucketStage
|
||||
.update(oldBucket -> {
|
||||
V oldValue = extractValue(oldBucket);
|
||||
V newValue = updater.apply(oldValue);
|
||||
@ -79,13 +87,13 @@ public class DatabaseSingleBucket<K, V, TH> extends SimpleResource implements Da
|
||||
} else {
|
||||
return this.insertValueOrCreate(oldBucket, newValue);
|
||||
}
|
||||
}, updateReturnMode)
|
||||
.flatMap(entries -> extractValueTransformation(entries));
|
||||
}, updateReturnMode);
|
||||
return result != null ? extractValue(result) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Delta<V>> updateAndGetDelta(SerializationFunction<@Nullable V, @Nullable V> updater) {
|
||||
return bucketStage.updateAndGetDelta(oldBucket -> {
|
||||
public Delta<V> updateAndGetDelta(SerializationFunction<@Nullable V, @Nullable V> updater) {
|
||||
var delta = bucketStage.updateAndGetDelta(oldBucket -> {
|
||||
V oldValue = extractValue(oldBucket);
|
||||
var result = updater.apply(oldValue);
|
||||
if (result == null) {
|
||||
@ -93,32 +101,33 @@ public class DatabaseSingleBucket<K, V, TH> extends SimpleResource implements Da
|
||||
} else {
|
||||
return this.insertValueOrCreate(oldBucket, result);
|
||||
}
|
||||
}).transform(mono -> LLUtils.mapDelta(mono, entries -> extractValue(entries)));
|
||||
});
|
||||
return LLUtils.mapDelta(delta, this::extractValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clear() {
|
||||
return this.update(prev -> null, UpdateReturnMode.NOTHING).then();
|
||||
public void clear() {
|
||||
this.update(prev -> null, UpdateReturnMode.NOTHING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<V> clearAndGetPrevious() {
|
||||
public V clearAndGetPrevious() {
|
||||
return this.update(prev -> null, UpdateReturnMode.GET_OLD_VALUE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> clearAndGetStatus() {
|
||||
return this.updateAndGetDelta(prev -> null).map(delta -> LLUtils.isDeltaChanged(delta));
|
||||
public boolean clearAndGetStatus() {
|
||||
return LLUtils.isDeltaChanged(this.updateAndGetDelta(prev -> null));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return this.get(snapshot).map(prev -> 1L).defaultIfEmpty(0L);
|
||||
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return this.get(snapshot) != null ? 1L : 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return this.get(snapshot).map(prev -> true).defaultIfEmpty(true);
|
||||
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return this.get(snapshot) == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -127,12 +136,8 @@ public class DatabaseSingleBucket<K, V, TH> extends SimpleResource implements Da
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BadBlock> badBlocks() {
|
||||
return bucketStage.badBlocks();
|
||||
}
|
||||
|
||||
private Mono<V> extractValueTransformation(Set<Entry<K, V>> entries) {
|
||||
return Mono.fromCallable(() -> extractValue(entries));
|
||||
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
|
||||
return bucketStage.verifyChecksum();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@ -188,15 +193,4 @@ public class DatabaseSingleBucket<K, V, TH> extends SimpleResource implements Da
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
try {
|
||||
if (bucketStage != null) {
|
||||
bucketStage.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close bucketStage", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,137 +1,115 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.Mapper;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.stream.Stream;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.SynchronousSink;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseSingleMapped<A, B> extends SimpleResource implements DatabaseStageEntry<A> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(DatabaseSingleMapped.class);
|
||||
public class DatabaseSingleMapped<A, B> implements DatabaseStageEntry<A> {
|
||||
|
||||
private final Mapper<A, B> mapper;
|
||||
|
||||
private final DatabaseStageEntry<B> serializedSingle;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public DatabaseSingleMapped(DatabaseStageEntry<B> serializedSingle, Mapper<A, B> mapper,
|
||||
Drop<DatabaseSingleMapped<A, B>> drop) {
|
||||
public DatabaseSingleMapped(DatabaseStageEntry<B> serializedSingle, Mapper<A, B> mapper) {
|
||||
this.serializedSingle = serializedSingle;
|
||||
this.mapper = mapper;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private DatabaseSingleMapped(DatabaseStage<B> serializedSingle, Mapper<A, B> mapper,
|
||||
Drop<DatabaseSingleMapped<A, B>> drop) {
|
||||
private DatabaseSingleMapped(DatabaseStage<B> serializedSingle, Mapper<A, B> mapper) {
|
||||
this.mapper = mapper;
|
||||
|
||||
this.serializedSingle = (DatabaseStageEntry<B>) serializedSingle;
|
||||
}
|
||||
|
||||
private void deserializeSink(B value, SynchronousSink<A> sink) {
|
||||
try {
|
||||
sink.next(this.unMap(value));
|
||||
} catch (SerializationException ex) {
|
||||
sink.error(ex);
|
||||
}
|
||||
@Override
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return serializedSingle.getDbReadPool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<A> get(@Nullable CompositeSnapshot snapshot) {
|
||||
return serializedSingle.get(snapshot).handle((value, sink) -> deserializeSink(value, sink));
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return serializedSingle.getDbWritePool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<A> getOrDefault(@Nullable CompositeSnapshot snapshot, Mono<A> defaultValue) {
|
||||
return serializedSingle.get(snapshot).handle((B value, SynchronousSink<A> sink) -> deserializeSink(value, sink)).switchIfEmpty(defaultValue);
|
||||
public A get(@Nullable CompositeSnapshot snapshot) {
|
||||
var data = serializedSingle.get(snapshot);
|
||||
if (data == null) return null;
|
||||
return this.unMap(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> set(A value) {
|
||||
return Mono
|
||||
.fromCallable(() -> map(value))
|
||||
.flatMap(value1 -> serializedSingle.set(value1));
|
||||
public A getOrDefault(@Nullable CompositeSnapshot snapshot, A defaultValue) {
|
||||
var value = serializedSingle.get(snapshot);
|
||||
if (value == null) return defaultValue;
|
||||
return this.unMap(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<A> setAndGetPrevious(A value) {
|
||||
return Mono
|
||||
.fromCallable(() -> map(value))
|
||||
.flatMap(value2 -> serializedSingle.setAndGetPrevious(value2))
|
||||
.handle((value1, sink) -> deserializeSink(value1, sink));
|
||||
public void set(A value) {
|
||||
B mappedValue = value != null ? map(value) : null;
|
||||
serializedSingle.set(mappedValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> setAndGetChanged(A value) {
|
||||
return Mono
|
||||
.fromCallable(() -> map(value))
|
||||
.flatMap(value1 -> serializedSingle.setAndGetChanged(value1))
|
||||
.single();
|
||||
public A setAndGetPrevious(A value) {
|
||||
var mappedValue = value != null ? map(value) : null;
|
||||
var prev = serializedSingle.setAndGetPrevious(mappedValue);
|
||||
return prev != null ? unMap(prev) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<A> update(SerializationFunction<@Nullable A, @Nullable A> updater,
|
||||
UpdateReturnMode updateReturnMode) {
|
||||
return serializedSingle.update(oldValue -> {
|
||||
var result = updater.apply(oldValue == null ? null : this.unMap(oldValue));
|
||||
if (result == null) {
|
||||
return null;
|
||||
} else {
|
||||
return this.map(result);
|
||||
}
|
||||
}, updateReturnMode).handle((value, sink) -> deserializeSink(value, sink));
|
||||
public boolean setAndGetChanged(A value) {
|
||||
var mappedValue = value != null ? map(value) : null;
|
||||
return serializedSingle.setAndGetChanged(mappedValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Delta<A>> updateAndGetDelta(SerializationFunction<@Nullable A, @Nullable A> updater) {
|
||||
return serializedSingle.updateAndGetDelta(oldValue -> {
|
||||
var result = updater.apply(oldValue == null ? null : this.unMap(oldValue));
|
||||
if (result == null) {
|
||||
return null;
|
||||
} else {
|
||||
return this.map(result);
|
||||
}
|
||||
}).transform(mono -> LLUtils.mapDelta(mono, bytes -> unMap(bytes)));
|
||||
public A update(SerializationFunction<@Nullable A, @Nullable A> updater, UpdateReturnMode updateReturnMode) {
|
||||
var mappedUpdater = new CachedSerializationFunction<>(updater, this::map, this::unMap);
|
||||
serializedSingle.update(mappedUpdater, UpdateReturnMode.NOTHING);
|
||||
return mappedUpdater.getResult(updateReturnMode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clear() {
|
||||
return serializedSingle.clear();
|
||||
public Delta<A> updateAndGetDelta(SerializationFunction<@Nullable A, @Nullable A> updater) {
|
||||
var mappedUpdater = new CachedSerializationFunction<>(updater, this::map, this::unMap);
|
||||
serializedSingle.update(mappedUpdater, UpdateReturnMode.NOTHING);
|
||||
return mappedUpdater.getDelta();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<A> clearAndGetPrevious() {
|
||||
return serializedSingle.clearAndGetPrevious().handle((value, sink) -> deserializeSink(value, sink));
|
||||
public void clear() {
|
||||
serializedSingle.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> clearAndGetStatus() {
|
||||
public A clearAndGetPrevious() {
|
||||
var prev = serializedSingle.clearAndGetPrevious();
|
||||
return prev != null ? unMap(prev) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean clearAndGetStatus() {
|
||||
return serializedSingle.clearAndGetStatus();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return serializedSingle.leavesCount(snapshot, fast);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return serializedSingle.isEmpty(snapshot);
|
||||
}
|
||||
|
||||
@ -141,8 +119,8 @@ public class DatabaseSingleMapped<A, B> extends SimpleResource implements Databa
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BadBlock> badBlocks() {
|
||||
return this.serializedSingle.badBlocks();
|
||||
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
|
||||
return this.serializedSingle.verifyChecksum();
|
||||
}
|
||||
|
||||
private A unMap(B bytes) throws SerializationException {
|
||||
@ -152,9 +130,4 @@ public class DatabaseSingleMapped<A, B> extends SimpleResource implements Databa
|
||||
private B map(A bytes) throws SerializationException {
|
||||
return mapper.map(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
serializedSingle.close();
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +1,32 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.buffer.Drop;
|
||||
import io.netty5.buffer.Owned;
|
||||
import io.netty5.buffer.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.buffer.BufDataInput;
|
||||
import it.cavallium.buffer.BufDataOutput;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLSingleton;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.utils.InternalMonoUtils;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.SynchronousSink;
|
||||
|
||||
public class DatabaseSingleton<U> extends SimpleResource implements DatabaseStageEntry<U> {
|
||||
public class DatabaseSingleton<U> implements DatabaseStageEntry<U> {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(DatabaseSingleton.class);
|
||||
|
||||
private final LLSingleton singleton;
|
||||
private final Serializer<U> serializer;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public DatabaseSingleton(LLSingleton singleton, Serializer<U> serializer) {
|
||||
this.singleton = singleton;
|
||||
this.serializer = serializer;
|
||||
@ -44,13 +40,12 @@ public class DatabaseSingleton<U> extends SimpleResource implements DatabaseStag
|
||||
}
|
||||
}
|
||||
|
||||
private U deserializeValue(Buffer value) {
|
||||
private U deserializeValue(Buf value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
U deserializedValue;
|
||||
try (value) {
|
||||
deserializedValue = serializer.deserialize(value);
|
||||
}
|
||||
return deserializedValue;
|
||||
return serializer.deserialize(BufDataInput.create(value));
|
||||
} catch (IndexOutOfBoundsException ex) {
|
||||
var exMessage = ex.getMessage();
|
||||
if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) {
|
||||
@ -63,124 +58,80 @@ public class DatabaseSingleton<U> extends SimpleResource implements DatabaseStag
|
||||
}
|
||||
}
|
||||
|
||||
private Buffer serializeValue(U value) throws SerializationException {
|
||||
private Buf serializeValue(U value) throws SerializationException {
|
||||
var valSizeHint = serializer.getSerializedSizeHint();
|
||||
if (valSizeHint == -1) valSizeHint = 128;
|
||||
var valBuf = singleton.getAllocator().allocate(valSizeHint);
|
||||
try {
|
||||
serializer.serialize(value, valBuf);
|
||||
return valBuf;
|
||||
} catch (Throwable ex) {
|
||||
valBuf.close();
|
||||
throw ex;
|
||||
}
|
||||
var valBuf = BufDataOutput.create(valSizeHint);
|
||||
serializer.serialize(value, valBuf);
|
||||
return valBuf.asList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> get(@Nullable CompositeSnapshot snapshot) {
|
||||
var resultMono = singleton.get(resolveSnapshot(snapshot));
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(() -> this.deserializeValue(result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public ForkJoinPool getDbReadPool() {
|
||||
return this.singleton.getDbReadPool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> set(U value) {
|
||||
return singleton.set(Mono.fromCallable(() -> serializeValue(value)));
|
||||
public ForkJoinPool getDbWritePool() {
|
||||
return this.singleton.getDbWritePool();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> setAndGetPrevious(U value) {
|
||||
var resultMono = Flux
|
||||
.concat(singleton.get(null),
|
||||
singleton.set(Mono.fromCallable(() -> serializeValue(value))).as(InternalMonoUtils::toAny)
|
||||
)
|
||||
.last();
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(() -> this.deserializeValue(result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public U get(@Nullable CompositeSnapshot snapshot) {
|
||||
Buf result = singleton.get(resolveSnapshot(snapshot));
|
||||
return this.deserializeValue(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> update(SerializationFunction<@Nullable U, @Nullable U> updater,
|
||||
UpdateReturnMode updateReturnMode) {
|
||||
var resultMono = singleton
|
||||
.update((oldValueSer) -> {
|
||||
try (oldValueSer) {
|
||||
U result;
|
||||
if (oldValueSer == null) {
|
||||
result = updater.apply(null);
|
||||
} else {
|
||||
U deserializedValue = serializer.deserialize(oldValueSer);
|
||||
result = updater.apply(deserializedValue);
|
||||
}
|
||||
if (result == null) {
|
||||
return null;
|
||||
} else {
|
||||
return serializeValue(result);
|
||||
}
|
||||
}
|
||||
}, updateReturnMode);
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(() -> this.deserializeValue(result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public void set(U value) {
|
||||
singleton.set(serializeValue(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Delta<U>> updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return singleton
|
||||
.updateAndGetDelta((oldValueSer) -> {
|
||||
try (oldValueSer) {
|
||||
U result;
|
||||
if (oldValueSer == null) {
|
||||
result = updater.apply(null);
|
||||
} else {
|
||||
U deserializedValue = serializer.deserialize(oldValueSer);
|
||||
result = updater.apply(deserializedValue);
|
||||
}
|
||||
if (result == null) {
|
||||
return null;
|
||||
} else {
|
||||
return serializeValue(result);
|
||||
}
|
||||
}
|
||||
}).transform(mono -> LLUtils.mapLLDelta(mono, serialized -> serializer.deserialize(serialized)));
|
||||
public U setAndGetPrevious(U value) {
|
||||
var prev = singleton.get(null);
|
||||
singleton.set(serializeValue(value));
|
||||
return this.deserializeValue(prev);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> clear() {
|
||||
return singleton.set(Mono.empty());
|
||||
public U update(SerializationFunction<@Nullable U, @Nullable U> updater, UpdateReturnMode updateReturnMode) {
|
||||
var serializedUpdater = new CachedSerializationFunction<>(updater, this::serializeValue, this::deserializeValue);
|
||||
singleton.update(serializedUpdater, UpdateReturnMode.NOTHING);
|
||||
return serializedUpdater.getResult(updateReturnMode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<U> clearAndGetPrevious() {
|
||||
var resultMono = Flux.concat(singleton.get(null), singleton.set(Mono.empty()).as(InternalMonoUtils::toAny)).last();
|
||||
return Mono.usingWhen(resultMono,
|
||||
result -> Mono.fromSupplier(() -> this.deserializeValue(result)),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
public Delta<U> updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
var serializedUpdater = new CachedSerializationFunction<>(updater, this::serializeValue, this::deserializeValue);
|
||||
singleton.update(serializedUpdater, UpdateReturnMode.NOTHING);
|
||||
return serializedUpdater.getDelta();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return singleton.get(null).map(unused -> 1L).defaultIfEmpty(0L);
|
||||
public void clear() {
|
||||
singleton.set(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return singleton.get(null).map(t -> false).defaultIfEmpty(true);
|
||||
public U clearAndGetPrevious() {
|
||||
var result = singleton.get(null);
|
||||
singleton.set(null);
|
||||
return this.deserializeValue(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BadBlock> badBlocks() {
|
||||
return Flux.empty();
|
||||
public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return singleton.get(null) != null ? 1L : 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
public boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return singleton.get(null) == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<DbProgress<SSTVerificationProgress>> verifyChecksum() {
|
||||
return Stream.empty();
|
||||
}
|
||||
}
|
@ -1,64 +1,61 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.util.Resource;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.client.DbProgress;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.client.SSTVerificationProgress;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.stream.Stream;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface DatabaseStage<T> extends DatabaseStageWithEntry<T>, SafeCloseable {
|
||||
public interface DatabaseStage<T> extends DatabaseStageWithEntry<T> {
|
||||
|
||||
Mono<T> get(@Nullable CompositeSnapshot snapshot);
|
||||
ForkJoinPool getDbReadPool();
|
||||
ForkJoinPool getDbWritePool();
|
||||
|
||||
default Mono<T> getOrDefault(@Nullable CompositeSnapshot snapshot,
|
||||
Mono<T> defaultValue,
|
||||
boolean existsAlmostCertainly) {
|
||||
return get(snapshot).switchIfEmpty(defaultValue).single();
|
||||
@Nullable T get(@Nullable CompositeSnapshot snapshot);
|
||||
|
||||
default T getOrDefault(@Nullable CompositeSnapshot snapshot, T defaultValue, boolean existsAlmostCertainly) {
|
||||
return Objects.requireNonNullElse(get(snapshot), defaultValue);
|
||||
}
|
||||
|
||||
default Mono<T> getOrDefault(@Nullable CompositeSnapshot snapshot, Mono<T> defaultValue) {
|
||||
default T getOrDefault(@Nullable CompositeSnapshot snapshot, T defaultValue) {
|
||||
return getOrDefault(snapshot, defaultValue, false);
|
||||
}
|
||||
|
||||
default Mono<Void> set(T value) {
|
||||
return this
|
||||
.setAndGetChanged(value)
|
||||
.then();
|
||||
default void set(@Nullable T value) {
|
||||
this.setAndGetChanged(value);
|
||||
}
|
||||
|
||||
Mono<T> setAndGetPrevious(T value);
|
||||
@Nullable T setAndGetPrevious(@Nullable T value);
|
||||
|
||||
default Mono<Boolean> setAndGetChanged(T value) {
|
||||
return this
|
||||
.setAndGetPrevious(value)
|
||||
.map(oldValue -> !Objects.equals(oldValue, value))
|
||||
.switchIfEmpty(Mono.fromSupplier(() -> value != null));
|
||||
default boolean setAndGetChanged(@Nullable T value) {
|
||||
T oldValue = this.setAndGetPrevious(value);
|
||||
if (oldValue != null) {
|
||||
return !Objects.equals(oldValue, value);
|
||||
} else {
|
||||
return value != null;
|
||||
}
|
||||
}
|
||||
|
||||
default Mono<T> update(SerializationFunction<@Nullable T, @Nullable T> updater,
|
||||
UpdateReturnMode updateReturnMode) {
|
||||
return this
|
||||
.updateAndGetDelta(updater)
|
||||
.transform(prev -> LLUtils.resolveDelta(prev, updateReturnMode));
|
||||
default @Nullable T update(SerializationFunction<@Nullable T, @Nullable T> updater, UpdateReturnMode updateReturnMode) {
|
||||
return LLUtils.resolveDelta(this.updateAndGetDelta(updater), updateReturnMode);
|
||||
}
|
||||
|
||||
Mono<Delta<T>> updateAndGetDelta(SerializationFunction<@Nullable T, @Nullable T> updater);
|
||||
Delta<T> updateAndGetDelta(SerializationFunction<@Nullable T, @Nullable T> updater);
|
||||
|
||||
default Mono<Void> clear() {
|
||||
return clearAndGetStatus().then();
|
||||
default void clear() {
|
||||
clearAndGetStatus();
|
||||
}
|
||||
|
||||
Mono<T> clearAndGetPrevious();
|
||||
@Nullable T clearAndGetPrevious();
|
||||
|
||||
default Mono<Boolean> clearAndGetStatus() {
|
||||
return clearAndGetPrevious().map(Objects::nonNull).defaultIfEmpty(false);
|
||||
default boolean clearAndGetStatus() {
|
||||
return clearAndGetPrevious() != null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -66,11 +63,11 @@ public interface DatabaseStage<T> extends DatabaseStageWithEntry<T>, SafeCloseab
|
||||
* If it's a nested collection the count will include all the children recursively
|
||||
* @param fast true to return an approximate value
|
||||
*/
|
||||
Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast);
|
||||
long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast);
|
||||
|
||||
default Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return leavesCount(snapshot, false).map(size -> size <= 0);
|
||||
default boolean isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return leavesCount(snapshot, false) <= 0;
|
||||
}
|
||||
|
||||
Flux<BadBlock> badBlocks();
|
||||
Stream<DbProgress<SSTVerificationProgress>> verifyChecksum();
|
||||
}
|
||||
|
@ -1,9 +1,5 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.util.Resource;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
public interface DatabaseStageEntry<U> extends DatabaseStage<U> {
|
||||
|
||||
@Override
|
||||
|
@ -1,5 +1,9 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import static it.cavallium.dbengine.utils.StreamUtils.collectOn;
|
||||
import static it.cavallium.dbengine.utils.StreamUtils.count;
|
||||
import static it.cavallium.dbengine.utils.StreamUtils.executing;
|
||||
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
@ -7,7 +11,6 @@ import it.cavallium.dbengine.database.SubStageEntry;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.KVSerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
@ -16,261 +19,234 @@ import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public interface DatabaseStageMap<T, U, US extends DatabaseStage<U>> extends
|
||||
DatabaseStageEntry<Object2ObjectSortedMap<T, U>> {
|
||||
public interface DatabaseStageMap<T, U, US extends DatabaseStage<U>> extends DatabaseStageEntry<Object2ObjectSortedMap<T, U>> {
|
||||
|
||||
Mono<US> at(@Nullable CompositeSnapshot snapshot, T key);
|
||||
@NotNull US at(@Nullable CompositeSnapshot snapshot, T key);
|
||||
|
||||
default Mono<Boolean> containsKey(@Nullable CompositeSnapshot snapshot, T key) {
|
||||
return Mono.usingWhen(this.at(snapshot, key),
|
||||
stage -> stage.isEmpty(snapshot).map(empty -> !empty),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
default boolean containsKey(@Nullable CompositeSnapshot snapshot, T key) {
|
||||
return !this.at(snapshot, key).isEmpty(snapshot);
|
||||
}
|
||||
|
||||
default Mono<U> getValue(@Nullable CompositeSnapshot snapshot, T key) {
|
||||
return Mono.usingWhen(this.at(snapshot, key),
|
||||
stage -> stage.get(snapshot),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
default @Nullable U getValue(@Nullable CompositeSnapshot snapshot, T key) {
|
||||
return this.at(snapshot, key).get(snapshot);
|
||||
}
|
||||
|
||||
default Mono<U> getValueOrDefault(@Nullable CompositeSnapshot snapshot, T key, Mono<U> defaultValue) {
|
||||
return getValue(snapshot, key).switchIfEmpty(defaultValue).single();
|
||||
default U getValueOrDefault(@Nullable CompositeSnapshot snapshot, T key, U defaultValue) {
|
||||
return Objects.requireNonNullElse(getValue(snapshot, key), defaultValue);
|
||||
}
|
||||
|
||||
default Mono<Void> putValue(T key, U value) {
|
||||
return Mono.usingWhen(at(null, key).single(), stage -> stage.set(value), LLUtils::finalizeResource);
|
||||
default U getValueOrDefault(@Nullable CompositeSnapshot snapshot, T key, Supplier<U> defaultValue) {
|
||||
return Objects.requireNonNullElseGet(getValue(snapshot, key), defaultValue);
|
||||
}
|
||||
|
||||
default void putValue(T key, U value) {
|
||||
at(null, key).set(value);
|
||||
}
|
||||
|
||||
UpdateMode getUpdateMode();
|
||||
|
||||
default Mono<U> updateValue(T key,
|
||||
default U updateValue(T key,
|
||||
UpdateReturnMode updateReturnMode,
|
||||
SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return Mono.usingWhen(at(null, key).single(),
|
||||
stage -> stage.update(updater, updateReturnMode),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
return at(null, key).update(updater, updateReturnMode);
|
||||
}
|
||||
|
||||
default Flux<Boolean> updateMulti(Flux<T> keys, KVSerializationFunction<T, @Nullable U, @Nullable U> updater) {
|
||||
return keys.flatMapSequential(key -> this.updateValue(key, prevValue -> updater.apply(key, prevValue)));
|
||||
default Stream<Boolean> updateMulti(Stream<T> keys, KVSerializationFunction<T, @Nullable U, @Nullable U> updater) {
|
||||
return keys.map(key -> this.updateValue(key, prevValue -> updater.apply(key, prevValue)));
|
||||
}
|
||||
|
||||
default Mono<Boolean> updateValue(T key, SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return updateValueAndGetDelta(key, updater).map(delta -> LLUtils.isDeltaChanged(delta)).single();
|
||||
default boolean updateValue(T key, SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return LLUtils.isDeltaChanged(updateValueAndGetDelta(key, updater));
|
||||
}
|
||||
|
||||
default Mono<Delta<U>> updateValueAndGetDelta(T key,
|
||||
SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
var stageMono = this.at(null, key).single();
|
||||
return stageMono.flatMap(stage -> stage
|
||||
.updateAndGetDelta(updater)
|
||||
.doFinally(s -> stage.close()));
|
||||
default Delta<U> updateValueAndGetDelta(T key, SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return this.at(null, key).updateAndGetDelta(updater);
|
||||
}
|
||||
|
||||
default Mono<U> putValueAndGetPrevious(T key, U value) {
|
||||
return Mono.usingWhen(at(null, key).single(),
|
||||
stage -> stage.setAndGetPrevious(value),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
default @Nullable U putValueAndGetPrevious(T key, @Nullable U value) {
|
||||
return at(null, key).setAndGetPrevious(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the key was associated with any value, false if the key didn't exist.
|
||||
*/
|
||||
default Mono<Boolean> putValueAndGetChanged(T key, U value) {
|
||||
return Mono
|
||||
.usingWhen(at(null, key).single(), stage -> stage.setAndGetChanged(value), LLUtils::finalizeResource)
|
||||
.single();
|
||||
default boolean putValueAndGetChanged(T key, @Nullable U value) {
|
||||
return at(null, key).setAndGetChanged(value);
|
||||
}
|
||||
|
||||
default Mono<Void> remove(T key) {
|
||||
return removeAndGetStatus(key).then();
|
||||
default void remove(T key) {
|
||||
removeAndGetStatus(key);
|
||||
}
|
||||
|
||||
default Mono<U> removeAndGetPrevious(T key) {
|
||||
return Mono.usingWhen(at(null, key), us -> us.clearAndGetPrevious(), LLUtils::finalizeResource);
|
||||
default @Nullable U removeAndGetPrevious(T key) {
|
||||
return at(null, key).clearAndGetPrevious();
|
||||
}
|
||||
|
||||
default Mono<Boolean> removeAndGetStatus(T key) {
|
||||
return removeAndGetPrevious(key).map(o -> true).defaultIfEmpty(false);
|
||||
default boolean removeAndGetStatus(T key) {
|
||||
return removeAndGetPrevious(key) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* GetMulti must return the elements in sequence!
|
||||
*/
|
||||
default Flux<Optional<U>> getMulti(@Nullable CompositeSnapshot snapshot, Flux<T> keys) {
|
||||
return keys.flatMapSequential(key -> this
|
||||
.getValue(snapshot, key)
|
||||
.map(Optional::of)
|
||||
.defaultIfEmpty(Optional.empty())
|
||||
);
|
||||
default Stream<Optional<U>> getMulti(@Nullable CompositeSnapshot snapshot, Stream<T> keys) {
|
||||
return keys.map(key -> Optional.ofNullable(this.getValue(snapshot, key)));
|
||||
}
|
||||
|
||||
default Mono<Void> putMulti(Flux<Entry<T, U>> entries) {
|
||||
return entries.flatMap(entry -> this.putValue(entry.getKey(), entry.getValue())).then();
|
||||
default void putMulti(Stream<Entry<T, U>> entries) {
|
||||
collectOn(getDbWritePool(), entries, executing(entry -> this.putValue(entry.getKey(), entry.getValue())));
|
||||
}
|
||||
|
||||
Flux<SubStageEntry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange);
|
||||
Stream<SubStageEntry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange);
|
||||
|
||||
default Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
default Stream<Entry<T, U>> getAllEntries(@Nullable CompositeSnapshot snapshot,
|
||||
boolean smallRange) {
|
||||
return this.getAllStages(snapshot, smallRange).map(stage -> {
|
||||
var val = stage.getValue().get(snapshot);
|
||||
return val != null ? Map.entry(stage.getKey(), val) : null;
|
||||
}).filter(Objects::nonNull);
|
||||
}
|
||||
|
||||
default Stream<T> getAllKeys(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return this
|
||||
.getAllStages(snapshot, smallRange)
|
||||
.flatMapSequential(stage -> stage
|
||||
.getValue()
|
||||
.get(snapshot)
|
||||
.map(value -> Map.entry(stage.getKey(), value))
|
||||
.doFinally(s -> stage.getValue().close())
|
||||
);
|
||||
.map(SubStageEntry::getKey)
|
||||
.filter(Objects::nonNull);
|
||||
}
|
||||
|
||||
default Mono<Void> setAllValues(Flux<Entry<T, U>> entries) {
|
||||
return setAllValuesAndGetPrevious(entries).then();
|
||||
default Stream<U> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) {
|
||||
return this
|
||||
.getAllEntries(snapshot, smallRange)
|
||||
.map(Entry::getValue)
|
||||
.filter(Objects::nonNull);
|
||||
}
|
||||
|
||||
Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries);
|
||||
|
||||
default Mono<Void> clear() {
|
||||
return setAllValues(Flux.empty());
|
||||
default void setAllEntries(Stream<Entry<T, U>> entries) {
|
||||
setAllEntriesAndGetPrevious(entries).close();
|
||||
}
|
||||
|
||||
default Mono<Void> replaceAllValues(boolean canKeysChange,
|
||||
Function<Entry<T, U>, Mono<Entry<T, U>>> entriesReplacer,
|
||||
Stream<Entry<T, U>> setAllEntriesAndGetPrevious(Stream<Entry<T, U>> entries);
|
||||
|
||||
default void clear() {
|
||||
setAllEntries(Stream.empty());
|
||||
}
|
||||
|
||||
default void replaceAllEntries(boolean canKeysChange,
|
||||
Function<Entry<T, U>, @NotNull Entry<T, U>> entriesReplacer,
|
||||
boolean smallRange) {
|
||||
if (canKeysChange) {
|
||||
return this.setAllValues(this.getAllValues(null, smallRange).flatMap(entriesReplacer)).then();
|
||||
try (var entries = this.getAllEntries(null, smallRange)) {
|
||||
this.setAllEntries(entries.map(entriesReplacer));
|
||||
}
|
||||
} else {
|
||||
return this
|
||||
.getAllValues(null, smallRange)
|
||||
.flatMap(entriesReplacer)
|
||||
.flatMap(replacedEntry -> this
|
||||
.at(null, replacedEntry.getKey())
|
||||
.flatMap(stage -> stage
|
||||
.set(replacedEntry.getValue())
|
||||
.doFinally(s -> stage.close())
|
||||
)
|
||||
)
|
||||
.then();
|
||||
collectOn(getDbWritePool(),
|
||||
this.getAllEntries(null, smallRange).map(entriesReplacer),
|
||||
executing(replacedEntry -> this.at(null, replacedEntry.getKey()).set(replacedEntry.getValue()))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
default Mono<Void> replaceAll(Function<Entry<T, US>, Mono<Void>> entriesReplacer) {
|
||||
return this
|
||||
.getAllStages(null, false)
|
||||
.flatMap(stage -> entriesReplacer.apply(stage)
|
||||
.doFinally(s -> stage.getValue().close())
|
||||
)
|
||||
.then();
|
||||
default void replaceAll(Consumer<Entry<T, US>> entriesReplacer) {
|
||||
collectOn(getDbWritePool(), this.getAllStages(null, false), executing(entriesReplacer));
|
||||
}
|
||||
|
||||
@Override
|
||||
default Mono<Object2ObjectSortedMap<T, U>> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
|
||||
return this
|
||||
.setAllValuesAndGetPrevious(Flux.fromIterable(value.entrySet()))
|
||||
.collectMap(Entry::getKey, Entry::getValue, Object2ObjectLinkedOpenHashMap::new)
|
||||
.map(map -> (Object2ObjectSortedMap<T, U>) map)
|
||||
.filter(map -> !map.isEmpty());
|
||||
default Object2ObjectSortedMap<T, U> setAndGetPrevious(Object2ObjectSortedMap<T, U> value) {
|
||||
Object2ObjectSortedMap<T, U> map;
|
||||
if (value == null) {
|
||||
map = this.clearAndGetPrevious();
|
||||
} else {
|
||||
try (var stream = this.setAllEntriesAndGetPrevious(value.entrySet().stream())) {
|
||||
map = stream.collect(Collectors.toMap(Entry::getKey,
|
||||
Entry::getValue,
|
||||
(a, b) -> a,
|
||||
Object2ObjectLinkedOpenHashMap::new
|
||||
));
|
||||
}
|
||||
}
|
||||
return map != null && map.isEmpty() ? null : map;
|
||||
}
|
||||
|
||||
@Override
|
||||
default Mono<Boolean> setAndGetChanged(Object2ObjectSortedMap<T, U> value) {
|
||||
return this
|
||||
.setAndGetPrevious(value)
|
||||
.map(oldValue -> !Objects.equals(oldValue, value.isEmpty() ? null : value))
|
||||
.switchIfEmpty(Mono.fromSupplier(() -> !value.isEmpty()));
|
||||
default boolean setAndGetChanged(@Nullable Object2ObjectSortedMap<T, U> value) {
|
||||
if (value != null && value.isEmpty()) {
|
||||
value = null;
|
||||
}
|
||||
var prev = this.setAndGetPrevious(value);
|
||||
if (prev == null) {
|
||||
return value != null;
|
||||
} else {
|
||||
return !Objects.equals(prev, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
default Mono<Delta<Object2ObjectSortedMap<T, U>>> updateAndGetDelta(SerializationFunction<@Nullable Object2ObjectSortedMap<T, U>, @Nullable Object2ObjectSortedMap<T, U>> updater) {
|
||||
default Delta<Object2ObjectSortedMap<T, U>> updateAndGetDelta(
|
||||
SerializationFunction<@Nullable Object2ObjectSortedMap<T, U>, @Nullable Object2ObjectSortedMap<T, U>> updater) {
|
||||
var updateMode = this.getUpdateMode();
|
||||
if (updateMode == UpdateMode.ALLOW_UNSAFE) {
|
||||
return this
|
||||
.getAllValues(null, true)
|
||||
.collectMap(Entry::getKey, Entry::getValue, Object2ObjectLinkedOpenHashMap::new)
|
||||
.map(map -> (Object2ObjectSortedMap<T, U>) map)
|
||||
.single()
|
||||
.<Tuple2<Optional<Object2ObjectSortedMap<T, U>>, Optional<Object2ObjectSortedMap<T, U>>>>handle((v, sink) -> {
|
||||
if (v.isEmpty()) {
|
||||
v = null;
|
||||
}
|
||||
try {
|
||||
var result = updater.apply(v);
|
||||
if (result != null && result.isEmpty()) {
|
||||
result = null;
|
||||
}
|
||||
sink.next(Tuples.of(Optional.ofNullable(v), Optional.ofNullable(result)));
|
||||
} catch (SerializationException ex) {
|
||||
sink.error(ex);
|
||||
}
|
||||
})
|
||||
.flatMap(result -> Mono
|
||||
.justOrEmpty(result.getT2())
|
||||
.flatMap(values -> this.setAllValues(Flux.fromIterable(values.entrySet())))
|
||||
.thenReturn(new Delta<>(result.getT1().orElse(null), result.getT2().orElse(null)))
|
||||
);
|
||||
Object2ObjectSortedMap<T, U> v;
|
||||
|
||||
try (var stream = this.getAllEntries(null, true)) {
|
||||
v = stream
|
||||
.collect(Collectors.toMap(Entry::getKey, Entry::getValue, (a, b) -> a, Object2ObjectLinkedOpenHashMap::new));
|
||||
}
|
||||
|
||||
if (v.isEmpty()) {
|
||||
v = null;
|
||||
}
|
||||
|
||||
var result = updater.apply(v);
|
||||
if (result != null && result.isEmpty()) {
|
||||
result = null;
|
||||
}
|
||||
this.setAllEntries(result != null ? result.entrySet().stream() : null);
|
||||
return new Delta<>(v, result);
|
||||
} else if (updateMode == UpdateMode.ALLOW) {
|
||||
return Mono.fromCallable(() -> {
|
||||
throw new UnsupportedOperationException("Maps can't be updated atomically");
|
||||
});
|
||||
throw new UnsupportedOperationException("Maps can't be updated atomically");
|
||||
} else if (updateMode == UpdateMode.DISALLOW) {
|
||||
return Mono.fromCallable(() -> {
|
||||
throw new UnsupportedOperationException("Map can't be updated because updates are disabled");
|
||||
});
|
||||
throw new UnsupportedOperationException("Map can't be updated because updates are disabled");
|
||||
} else {
|
||||
return Mono.fromCallable(() -> {
|
||||
throw new UnsupportedOperationException("Unknown update mode: " + updateMode);
|
||||
});
|
||||
throw new UnsupportedOperationException("Unknown update mode: " + updateMode);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
default Mono<Object2ObjectSortedMap<T, U>> clearAndGetPrevious() {
|
||||
default Object2ObjectSortedMap<T, U> clearAndGetPrevious() {
|
||||
return this.setAndGetPrevious(Object2ObjectSortedMaps.emptyMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
default Mono<Object2ObjectSortedMap<T, U>> get(@Nullable CompositeSnapshot snapshot) {
|
||||
return this
|
||||
.getAllValues(snapshot, true)
|
||||
.collectMap(Entry::getKey, Entry::getValue, Object2ObjectLinkedOpenHashMap::new)
|
||||
.map(map -> (Object2ObjectSortedMap<T, U>) map)
|
||||
.filter(map -> !map.isEmpty());
|
||||
default Object2ObjectSortedMap<T, U> get(@Nullable CompositeSnapshot snapshot) {
|
||||
try (var stream = this.getAllEntries(snapshot, true)) {
|
||||
Object2ObjectSortedMap<T, U> map = stream
|
||||
.collect(Collectors.toMap(Entry::getKey, Entry::getValue, (a, b) -> a, Object2ObjectLinkedOpenHashMap::new));
|
||||
return map.isEmpty() ? null : map;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
default Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return this
|
||||
.getAllStages(snapshot, false)
|
||||
.doOnNext(stage -> stage.getValue().close())
|
||||
.count();
|
||||
default long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return count(this.getAllStages(snapshot, false));
|
||||
}
|
||||
|
||||
/**
|
||||
* Value getter doesn't lock data. Please make sure to lock before getting data.
|
||||
*/
|
||||
default ValueGetterBlocking<T, U> getDbValueGetter(@Nullable CompositeSnapshot snapshot) {
|
||||
return k -> getValue(snapshot, k).transform(LLUtils::handleDiscard).block();
|
||||
return k -> getValue(snapshot, k);
|
||||
}
|
||||
|
||||
default ValueGetter<T, U> getAsyncDbValueGetter(@Nullable CompositeSnapshot snapshot) {
|
||||
return k -> getValue(snapshot, k);
|
||||
}
|
||||
|
||||
default ValueTransformer<T, U> getAsyncDbValueTransformer(@Nullable CompositeSnapshot snapshot) {
|
||||
return keys -> {
|
||||
var sharedKeys = keys.publish().refCount(2);
|
||||
var values = DatabaseStageMap.this.getMulti(snapshot, sharedKeys);
|
||||
return Flux.zip(sharedKeys, values, Map::entry);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,5 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.util.Resource;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface DatabaseStageWithEntry<T> {
|
||||
|
||||
DatabaseStageEntry<T> entry();
|
||||
|
@ -1,16 +1,12 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface SubStageGetter<U, US extends DatabaseStage<U>> {
|
||||
|
||||
Mono<US> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<Buffer> prefixKey);
|
||||
US subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, Buf prefixKey);
|
||||
|
||||
}
|
||||
|
@ -1,19 +1,13 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty5.buffer.Buffer;
|
||||
import io.netty5.util.Resource;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.buffer.Buf;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.BufSupplier;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class SubStageGetterHashMap<T, U, TH> implements
|
||||
@ -35,16 +29,16 @@ public class SubStageGetterHashMap<T, U, TH> implements
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<DatabaseMapDictionaryHashed<T, U, TH>> subStage(LLDictionary dictionary,
|
||||
public DatabaseMapDictionaryHashed<T, U, TH> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<Buffer> prefixKeyMono) {
|
||||
return prefixKeyMono.map(prefixKey -> DatabaseMapDictionaryHashed.tail(dictionary,
|
||||
BufSupplier.ofOwned(prefixKey),
|
||||
Buf prefixKey) {
|
||||
return DatabaseMapDictionaryHashed.tail(dictionary,
|
||||
prefixKey,
|
||||
keySerializer,
|
||||
valueSerializer,
|
||||
keyHashFunction,
|
||||
keyHashSerializer
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
public int getKeyHashBinaryLength() {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user