Compare commits
37 Commits
ae20421aae
...
11b1d800dc
Author | SHA1 | Date | |
---|---|---|---|
|
11b1d800dc | ||
|
bee5f5f189 | ||
|
4932c152f7 | ||
|
150bff9f09 | ||
|
47c3e31f75 | ||
|
310f94d473 | ||
|
a6320722a1 | ||
|
9ba2026265 | ||
|
b7172db2f9 | ||
|
60474f2a75 | ||
|
b4c610be08 | ||
|
1afa4c183a | ||
|
b2bfd0f23a | ||
|
1621bb73ca | ||
|
df98b40884 | ||
|
b6d788d00a | ||
|
54a239d369 | ||
|
e580f80384 | ||
|
34ff4bf9a2 | ||
|
b8b4b3c320 | ||
|
215ba242fd | ||
|
4d46a32bb4 | ||
|
4e14ae77db | ||
|
77f3e012ce | ||
|
d4f6f1097b | ||
|
a11ae8cebc | ||
|
bcd18e72a9 | ||
|
d8f9907ed8 | ||
|
1ca62c5813 | ||
|
f30945fa70 | ||
|
b9faeb020e | ||
|
974d8b774a | ||
|
3594cb6b76 | ||
|
e7a201aab3 | ||
|
caefc8a2d3 | ||
|
90b968cff4 | ||
|
165e90636a |
1
.gitignore
vendored
1
.gitignore
vendored
@ -5,3 +5,4 @@ tmp-rockserver-db/
|
||||
*.iml
|
||||
hs_err_pid*.log
|
||||
/dependency-reduced-pom.xml
|
||||
/debug/
|
||||
|
30
pom.xml
30
pom.xml
@ -10,16 +10,17 @@
|
||||
|
||||
<properties>
|
||||
<maven.compiler.source>22</maven.compiler.source>
|
||||
<maven.compiler.target>22</maven.compiler.target>
|
||||
<maven.compiler.targe>22</maven.compiler.targe>
|
||||
<native.maven.plugin.version>0.9.28</native.maven.plugin.version>
|
||||
<gestalt.version>0.25.3</gestalt.version>
|
||||
<rocksdb.version>9.0.0</rocksdb.version>
|
||||
<gestalt.version>0.32.2</gestalt.version>
|
||||
<rocksdb.version>9.6.1</rocksdb.version>
|
||||
<slf4j.version>2.0.12</slf4j.version>
|
||||
<imageName>rockserver-core</imageName>
|
||||
<mainClass>it.cavallium.rockserver.core.Main</mainClass>
|
||||
<protobuf-plugin.version>0.6.1</protobuf-plugin.version>
|
||||
<protobuf.version>3.25.3</protobuf.version>
|
||||
<grpc.version>1.65.1</grpc.version>
|
||||
<netty.version>4.1.100.Final</netty.version>
|
||||
<rainbowgum.version>0.7.0</rainbowgum.version>
|
||||
</properties>
|
||||
|
||||
@ -35,6 +36,11 @@
|
||||
</snapshots>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>maven_central</id>
|
||||
<name>Maven Central</name>
|
||||
<url>https://repo.maven.apache.org/maven2/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<distributionManagement>
|
||||
@ -81,7 +87,7 @@
|
||||
<dependency>
|
||||
<groupId>org.jetbrains</groupId>
|
||||
<artifactId>annotations</artifactId>
|
||||
<version>24.0.1</version>
|
||||
<version>24.1.0</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
@ -114,6 +120,17 @@
|
||||
<artifactId>grpc-netty</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-transport-native-epoll</artifactId>
|
||||
<version>${netty.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-transport-native-epoll</artifactId>
|
||||
<classifier>linux-x86_64</classifier>
|
||||
<version>${netty.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-tcnative-boringssl-static</artifactId>
|
||||
@ -129,6 +146,11 @@
|
||||
<artifactId>grpc-stub</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.reactivestreams</groupId>
|
||||
<artifactId>reactive-streams</artifactId>
|
||||
<version>1.0.4</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.lz4</groupId>
|
||||
|
@ -26,6 +26,10 @@ module rockserver.core {
|
||||
requires io.netty.codec.http;
|
||||
requires io.netty.codec;
|
||||
requires io.netty.codec.http2;
|
||||
requires jdk.unsupported;
|
||||
requires io.netty.transport.classes.epoll;
|
||||
requires org.reactivestreams;
|
||||
requires io.netty.transport.unix.common;
|
||||
|
||||
exports it.cavallium.rockserver.core.client;
|
||||
exports it.cavallium.rockserver.core.common;
|
||||
|
@ -24,6 +24,10 @@ module rockserver.core {
|
||||
requires io.netty.codec.http;
|
||||
requires io.netty.codec;
|
||||
requires io.netty.codec.http2;
|
||||
requires jdk.unsupported;
|
||||
requires io.netty.transport.classes.epoll;
|
||||
requires org.reactivestreams;
|
||||
requires io.netty.transport.unix.common;
|
||||
|
||||
exports it.cavallium.rockserver.core.client;
|
||||
exports it.cavallium.rockserver.core.common;
|
||||
|
@ -120,7 +120,9 @@ public class Main {
|
||||
CountDownLatch shutdownLatch = new CountDownLatch(1);
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(shutdownLatch::countDown));
|
||||
|
||||
try (var _ = thriftServerBuilder.build(); var _ = grpcServerBuilder.build()) {
|
||||
try (var thrift = thriftServerBuilder.build(); var grpc = grpcServerBuilder.build()) {
|
||||
thrift.start();
|
||||
grpc.start();
|
||||
shutdownLatch.await();
|
||||
LOG.info("Shutting down...");
|
||||
}
|
||||
|
51
src/main/java/it/cavallium/rockserver/core/TestGrpcLoop.java
Normal file
51
src/main/java/it/cavallium/rockserver/core/TestGrpcLoop.java
Normal file
@ -0,0 +1,51 @@
|
||||
package it.cavallium.rockserver.core;
|
||||
|
||||
import it.cavallium.rockserver.core.client.ClientBuilder;
|
||||
import it.cavallium.rockserver.core.client.EmbeddedConnection;
|
||||
import it.cavallium.rockserver.core.client.RocksDBConnection;
|
||||
import it.cavallium.rockserver.core.common.ColumnSchema;
|
||||
import it.cavallium.rockserver.core.common.Keys;
|
||||
import it.cavallium.rockserver.core.common.RequestType;
|
||||
import it.cavallium.rockserver.core.common.Utils;
|
||||
import it.cavallium.rockserver.core.impl.EmbeddedDB;
|
||||
import it.cavallium.rockserver.core.server.GrpcServer;
|
||||
import it.unimi.dsi.fastutil.ints.IntList;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectList;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.foreign.Arena;
|
||||
import java.lang.foreign.MemorySegment;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
public class TestGrpcLoop {
|
||||
public static void main(String[] args) throws IOException, InterruptedException {
|
||||
var embeddedDB = new EmbeddedConnection(null, "main", null);
|
||||
var server = new GrpcServer(embeddedDB, new InetSocketAddress("localhost", 12345));
|
||||
server.start();
|
||||
var clientB = new ClientBuilder();
|
||||
clientB.setHttpAddress(new Utils.HostAndPort("localhost", 12345));
|
||||
clientB.setName("local");
|
||||
clientB.setUseThrift(false);
|
||||
var client = clientB.build();
|
||||
var col = client.getSyncApi().createColumn("test", ColumnSchema.of(IntList.of(15), ObjectList.of(), true));
|
||||
var parallelism = 4;
|
||||
for (int i = 0; i < parallelism; i++) {
|
||||
var t = new Thread(() -> {
|
||||
while (true) {
|
||||
try (var arena = Arena.ofConfined()) {
|
||||
var delta = client.getSyncApi().put(arena, 0, col,
|
||||
new Keys(new MemorySegment[]{MemorySegment.ofArray(new byte[15])}),
|
||||
MemorySegment.ofArray(new byte[15]),
|
||||
RequestType.delta());
|
||||
}
|
||||
}
|
||||
});
|
||||
t.setDaemon(true);
|
||||
t.setName("test-requests-thread-" + i);
|
||||
t.start();
|
||||
if (i + 1 == parallelism) {
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -54,12 +54,12 @@ public class ClientBuilder {
|
||||
} else if (embeddedPath != null) {
|
||||
return new EmbeddedConnection(embeddedPath, name, embeddedConfig);
|
||||
} else if (unixAddress != null) {
|
||||
throw new UnsupportedOperationException("Not implemented: unix socket");
|
||||
return GrpcConnection.forPath(name, unixAddress.getPath());
|
||||
} else if (httpAddress != null) {
|
||||
if (useThrift) {
|
||||
throw new UnsupportedOperationException("Not implemented: thrift http2 address");
|
||||
} else {
|
||||
return new GrpcConnection(name, httpAddress);
|
||||
return GrpcConnection.forHostAndPort(name, httpAddress);
|
||||
}
|
||||
} else if (iNetAddress != null) {
|
||||
throw new UnsupportedOperationException("Not implemented: inet address");
|
||||
|
@ -1,15 +1,8 @@
|
||||
package it.cavallium.rockserver.core.client;
|
||||
|
||||
import it.cavallium.rockserver.core.common.Keys;
|
||||
import it.cavallium.rockserver.core.common.RequestType;
|
||||
import it.cavallium.rockserver.core.common.*;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestGet;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestPut;
|
||||
import it.cavallium.rockserver.core.common.ColumnSchema;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPI;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAsyncAPI;
|
||||
import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import it.cavallium.rockserver.core.common.RocksDBSyncAPI;
|
||||
import it.cavallium.rockserver.core.impl.EmbeddedDB;
|
||||
import java.io.IOException;
|
||||
import java.lang.foreign.Arena;
|
||||
@ -24,6 +17,7 @@ import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.reactivestreams.Publisher;
|
||||
|
||||
public class EmbeddedConnection extends BaseConnection implements RocksDBAPI {
|
||||
|
||||
@ -95,6 +89,10 @@ public class EmbeddedConnection extends BaseConnection implements RocksDBAPI {
|
||||
|
||||
@Override
|
||||
public <R> CompletableFuture<R> requestAsync(RocksDBAPICommand<R> req) {
|
||||
if (req instanceof RocksDBAPICommand.PutBatch putBatch) {
|
||||
//noinspection unchecked
|
||||
return (CompletableFuture<R>) this.putBatchAsync(putBatch.columnId(), putBatch.batchPublisher(), putBatch.mode());
|
||||
}
|
||||
return CompletableFuture.supplyAsync(() -> req.handleSync(this), exeuctor);
|
||||
}
|
||||
|
||||
@ -118,6 +116,20 @@ public class EmbeddedConnection extends BaseConnection implements RocksDBAPI {
|
||||
return db.putMulti(arena, transactionOrUpdateId, columnId, keys, values, requestType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> putBatchAsync(long columnId,
|
||||
@NotNull Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) throws RocksDBException {
|
||||
return db.putBatchInternal(columnId, batchPublisher, mode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putBatch(long columnId,
|
||||
@NotNull Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) throws RocksDBException {
|
||||
db.putBatch(columnId, batchPublisher, mode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T get(Arena arena,
|
||||
long transactionOrUpdateId,
|
||||
|
@ -10,66 +10,46 @@ import com.google.protobuf.Empty;
|
||||
import com.google.protobuf.UnsafeByteOperations;
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.ManagedChannelBuilder;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.stub.StreamObserver;
|
||||
import io.grpc.netty.NettyChannelBuilder;
|
||||
import io.grpc.stub.*;
|
||||
import io.netty.channel.epoll.EpollDomainSocketChannel;
|
||||
import io.netty.channel.epoll.EpollEventLoopGroup;
|
||||
import io.netty.channel.epoll.EpollServerDomainSocketChannel;
|
||||
import io.netty.channel.nio.NioEventLoopGroup;
|
||||
import io.netty.channel.socket.nio.NioServerSocketChannel;
|
||||
import io.netty.channel.socket.nio.NioSocketChannel;
|
||||
import io.netty.channel.unix.DomainSocketAddress;
|
||||
import it.cavallium.rockserver.core.common.*;
|
||||
import it.cavallium.rockserver.core.common.ColumnSchema;
|
||||
import it.cavallium.rockserver.core.common.Keys;
|
||||
import it.cavallium.rockserver.core.common.RequestType;
|
||||
import it.cavallium.rockserver.core.common.KVBatch;
|
||||
import it.cavallium.rockserver.core.common.PutBatchMode;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestChanged;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestCurrent;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestDelta;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestExists;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestForUpdate;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestGet;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestMulti;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestNothing;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestPrevious;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestPreviousPresence;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestPut;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPI;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAsyncAPI;
|
||||
import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import it.cavallium.rockserver.core.common.RocksDBSyncAPI;
|
||||
import it.cavallium.rockserver.core.common.UpdateContext;
|
||||
import it.cavallium.rockserver.core.common.Utils.HostAndPort;
|
||||
import it.cavallium.rockserver.core.common.api.proto.Changed;
|
||||
import it.cavallium.rockserver.core.common.api.proto.CloseFailedUpdateRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.CloseIteratorRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.CloseTransactionRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.CloseTransactionResponse;
|
||||
import it.cavallium.rockserver.core.common.api.proto.*;
|
||||
import it.cavallium.rockserver.core.common.api.proto.ColumnHashType;
|
||||
import it.cavallium.rockserver.core.common.api.proto.CreateColumnRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.CreateColumnResponse;
|
||||
import it.cavallium.rockserver.core.common.api.proto.DeleteColumnRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.Delta;
|
||||
import it.cavallium.rockserver.core.common.api.proto.GetColumnIdRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.GetColumnIdResponse;
|
||||
import it.cavallium.rockserver.core.common.api.proto.GetRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.GetResponse;
|
||||
import it.cavallium.rockserver.core.common.api.proto.KV;
|
||||
import it.cavallium.rockserver.core.common.api.proto.OpenIteratorRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.OpenIteratorResponse;
|
||||
import it.cavallium.rockserver.core.common.api.proto.OpenTransactionRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.OpenTransactionResponse;
|
||||
import it.cavallium.rockserver.core.common.api.proto.Previous;
|
||||
import it.cavallium.rockserver.core.common.api.proto.PreviousPresence;
|
||||
import it.cavallium.rockserver.core.common.api.proto.PutMultiInitialRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.PutMultiRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.PutRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.RocksDBServiceGrpc;
|
||||
import it.cavallium.rockserver.core.common.api.proto.RocksDBServiceGrpc.RocksDBServiceBlockingStub;
|
||||
import it.cavallium.rockserver.core.common.api.proto.RocksDBServiceGrpc.RocksDBServiceFutureStub;
|
||||
import it.cavallium.rockserver.core.common.api.proto.RocksDBServiceGrpc.RocksDBServiceStub;
|
||||
import it.cavallium.rockserver.core.common.api.proto.SeekToRequest;
|
||||
import it.cavallium.rockserver.core.common.api.proto.SubsequentRequest;
|
||||
import it.unimi.dsi.fastutil.ints.IntArrayList;
|
||||
import java.io.IOException;
|
||||
import java.lang.foreign.Arena;
|
||||
import java.lang.foreign.MemorySegment;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.UnixDomainSocketAddress;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.Executor;
|
||||
@ -77,6 +57,9 @@ import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.reactivestreams.Subscriber;
|
||||
import org.reactivestreams.Subscription;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@ -90,16 +73,48 @@ public class GrpcConnection extends BaseConnection implements RocksDBAPI {
|
||||
private final RocksDBServiceFutureStub futureStub;
|
||||
private final URI address;
|
||||
|
||||
public GrpcConnection(String name, HostAndPort address) {
|
||||
private GrpcConnection(String name, SocketAddress socketAddress, URI address) {
|
||||
super(name);
|
||||
var channelBuilder = ManagedChannelBuilder
|
||||
.forAddress(address.host(), address.port())
|
||||
NettyChannelBuilder channelBuilder;
|
||||
if (socketAddress instanceof InetSocketAddress inetSocketAddress) {
|
||||
channelBuilder = NettyChannelBuilder
|
||||
.forAddress(inetSocketAddress.getHostString(), inetSocketAddress.getPort());
|
||||
} else {
|
||||
channelBuilder = NettyChannelBuilder
|
||||
.forAddress(socketAddress);
|
||||
}
|
||||
|
||||
channelBuilder
|
||||
.directExecutor()
|
||||
.usePlaintext();
|
||||
if (socketAddress instanceof DomainSocketAddress _) {
|
||||
channelBuilder
|
||||
.eventLoopGroup(new EpollEventLoopGroup(Runtime.getRuntime().availableProcessors() * 2))
|
||||
.channelType(EpollDomainSocketChannel.class);
|
||||
} else {
|
||||
channelBuilder
|
||||
.eventLoopGroup(new NioEventLoopGroup(Runtime.getRuntime().availableProcessors() * 2))
|
||||
.channelType(NioSocketChannel.class);
|
||||
}
|
||||
this.channel = channelBuilder.build();
|
||||
this.blockingStub = RocksDBServiceGrpc.newBlockingStub(channel);
|
||||
this.asyncStub = RocksDBServiceGrpc.newStub(channel);
|
||||
this.futureStub = RocksDBServiceGrpc.newFutureStub(channel);
|
||||
this.address = URI.create("http://" + address.host() + ":" + address.port());
|
||||
this.address = address;
|
||||
}
|
||||
|
||||
public static GrpcConnection forHostAndPort(String name, HostAndPort address) {
|
||||
return new GrpcConnection(name,
|
||||
new InetSocketAddress(address.host(), address.port()),
|
||||
URI.create("http://" + address.host() + ":" + address.port())
|
||||
);
|
||||
}
|
||||
|
||||
public static GrpcConnection forPath(String name, Path unixSocketPath) {
|
||||
return new GrpcConnection(name,
|
||||
new DomainSocketAddress(unixSocketPath.toFile()),
|
||||
URI.create("unix://" + unixSocketPath)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -119,7 +134,7 @@ public class GrpcConnection extends BaseConnection implements RocksDBAPI {
|
||||
|
||||
@Override
|
||||
public <R> R requestSync(RocksDBAPICommand<R> req) {
|
||||
var asyncResponse = requestAsync(req);
|
||||
var asyncResponse = req.handleAsync(this);
|
||||
return asyncResponse
|
||||
.toCompletableFuture()
|
||||
.join();
|
||||
@ -214,6 +229,25 @@ public class GrpcConnection extends BaseConnection implements RocksDBAPI {
|
||||
+ count + " != " + allValues.size());
|
||||
}
|
||||
|
||||
CompletableFuture<List<T>> responseObserver;
|
||||
|
||||
if (requestType instanceof RequestType.RequestNothing<?> && transactionOrUpdateId == 0L) {
|
||||
return putBatchAsync(columnId, subscriber -> {
|
||||
var sub = new Subscription() {
|
||||
@Override
|
||||
public void request(long l) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
|
||||
}
|
||||
};
|
||||
subscriber.onSubscribe(sub);
|
||||
subscriber.onNext(new KVBatch(allKeys, allValues));
|
||||
}, PutBatchMode.WRITE_BATCH).thenApply(_ -> List.of());
|
||||
}
|
||||
|
||||
var initialRequest = PutMultiRequest.newBuilder()
|
||||
.setInitialRequest(PutMultiInitialRequest.newBuilder()
|
||||
.setTransactionOrUpdateId(transactionOrUpdateId)
|
||||
@ -221,8 +255,6 @@ public class GrpcConnection extends BaseConnection implements RocksDBAPI {
|
||||
.build())
|
||||
.build();
|
||||
|
||||
CompletableFuture<List<T>> responseObserver;
|
||||
|
||||
StreamObserver<PutMultiRequest> requestPublisher = switch (requestType) {
|
||||
case RequestNothing<?> _ -> {
|
||||
var thisResponseObserver = new CollectListStreamObserver<Empty>(0);
|
||||
@ -273,6 +305,118 @@ public class GrpcConnection extends BaseConnection implements RocksDBAPI {
|
||||
return responseObserver;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Void> putBatchAsync(long columnId,
|
||||
@NotNull Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) throws RocksDBException {
|
||||
var cf = new CompletableFuture<Void>();
|
||||
var responseobserver = new ClientResponseObserver<PutBatchRequest, Empty>() {
|
||||
private ClientCallStreamObserver<PutBatchRequest> requestStream;
|
||||
private Subscription subscription;
|
||||
private int sendingRequests = 0;
|
||||
|
||||
@Override
|
||||
public void beforeStart(ClientCallStreamObserver<PutBatchRequest> requestStream) {
|
||||
this.requestStream = requestStream;
|
||||
// Set up manual flow control for the response stream. It feels backwards to configure the response
|
||||
// stream's flow control using the request stream's observer, but this is the way it is.
|
||||
requestStream.disableAutoRequestWithInitial(1);
|
||||
|
||||
var subscriber = new Subscriber<KVBatch>() {
|
||||
private volatile boolean finalized;
|
||||
|
||||
@Override
|
||||
public void onSubscribe(Subscription subscription2) {
|
||||
subscription = subscription2;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNext(KVBatch batch) {
|
||||
sendingRequests--;
|
||||
var request = PutBatchRequest.newBuilder();
|
||||
request.setData(mapKVBatch(batch));
|
||||
requestStream.onNext(request.build());
|
||||
request = null;
|
||||
batch = null;
|
||||
if (requestStream.isReady()) {
|
||||
if (sendingRequests == 0) {
|
||||
sendingRequests++;
|
||||
subscription.request(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onError(Throwable throwable) {
|
||||
this.finalized = true;
|
||||
requestStream.onError(throwable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onComplete() {
|
||||
this.finalized = true;
|
||||
requestStream.onCompleted();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
batchPublisher.subscribe(subscriber);
|
||||
|
||||
// Set up a back-pressure-aware producer for the request stream. The onReadyHandler will be invoked
|
||||
// when the consuming side has enough buffer space to receive more messages.
|
||||
//
|
||||
// Messages are serialized into a transport-specific transmit buffer. Depending on the size of this buffer,
|
||||
// MANY messages may be buffered, however, they haven't yet been sent to the server. The server must call
|
||||
// request() to pull a buffered message from the client.
|
||||
//
|
||||
// Note: the onReadyHandler's invocation is serialized on the same thread pool as the incoming
|
||||
// StreamObserver's onNext(), onError(), and onComplete() handlers. Blocking the onReadyHandler will prevent
|
||||
// additional messages from being processed by the incoming StreamObserver. The onReadyHandler must return
|
||||
// in a timely manner or else message processing throughput will suffer.
|
||||
requestStream.setOnReadyHandler(new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (sendingRequests == 0) {
|
||||
// Start generating values from where we left off on a non-gRPC thread.
|
||||
sendingRequests++;
|
||||
subscription.request(1);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNext(Empty empty) {}
|
||||
|
||||
@Override
|
||||
public void onError(Throwable throwable) {
|
||||
cf.completeExceptionally(throwable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCompleted() {
|
||||
cf.complete(null);
|
||||
}
|
||||
};
|
||||
|
||||
var requestStream = asyncStub.putBatch(responseobserver);
|
||||
|
||||
requestStream.onNext(PutBatchRequest.newBuilder()
|
||||
.setInitialRequest(PutBatchInitialRequest.newBuilder()
|
||||
.setColumnId(columnId)
|
||||
.setMode(switch (mode) {
|
||||
case WRITE_BATCH -> it.cavallium.rockserver.core.common.api.proto.PutBatchMode.WRITE_BATCH;
|
||||
case WRITE_BATCH_NO_WAL -> it.cavallium.rockserver.core.common.api.proto.PutBatchMode.WRITE_BATCH_NO_WAL;
|
||||
case SST_INGESTION -> it.cavallium.rockserver.core.common.api.proto.PutBatchMode.SST_INGESTION;
|
||||
case SST_INGEST_BEHIND -> it.cavallium.rockserver.core.common.api.proto.PutBatchMode.SST_INGEST_BEHIND;
|
||||
})
|
||||
.build())
|
||||
.build());
|
||||
|
||||
return cf;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <T> CompletableFuture<T> getAsync(Arena arena,
|
||||
@ -377,6 +521,33 @@ public class GrpcConnection extends BaseConnection implements RocksDBAPI {
|
||||
return data != null ? MemorySegment.ofBuffer(data.asReadOnlyByteBuffer()) : null;
|
||||
}
|
||||
|
||||
private static it.cavallium.rockserver.core.common.api.proto.KVBatch mapKVBatch(@NotNull KVBatch kvBatch) {
|
||||
return it.cavallium.rockserver.core.common.api.proto.KVBatch.newBuilder()
|
||||
.addAllEntries(mapKVList(kvBatch.keys(), kvBatch.values()))
|
||||
.build();
|
||||
}
|
||||
|
||||
private static Iterable<KV> mapKVList(@NotNull List<Keys> keys, @NotNull List<MemorySegment> values) {
|
||||
return new Iterable<>() {
|
||||
@Override
|
||||
public @NotNull Iterator<KV> iterator() {
|
||||
var it1 = keys.iterator();
|
||||
var it2 = values.iterator();
|
||||
return new Iterator<>() {
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return it1.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public KV next() {
|
||||
return mapKV(it1.next(), it2.next());
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static KV mapKV(@NotNull Keys keys, @NotNull MemorySegment value) {
|
||||
return KV.newBuilder()
|
||||
.addAllKeys(mapKeys(keys))
|
||||
|
@ -56,6 +56,7 @@ public class LoggingClient implements RocksDBConnection {
|
||||
|
||||
@Override
|
||||
public <R> R requestSync(RocksDBAPICommand<R> req) {
|
||||
logger.trace("Request input (sync): {}", req);
|
||||
R result;
|
||||
try {
|
||||
result = syncApi.requestSync(req);
|
||||
@ -78,6 +79,7 @@ public class LoggingClient implements RocksDBConnection {
|
||||
|
||||
@Override
|
||||
public <R> CompletableFuture<R> requestAsync(RocksDBAPICommand<R> req) {
|
||||
logger.trace("Request input (async): {}", req);
|
||||
return asyncApi.requestAsync(req).whenComplete((result, e) -> {
|
||||
if (e != null) {
|
||||
logger.trace("Request failed: {} Error: {}", req, e.getMessage());
|
||||
|
@ -0,0 +1,10 @@
|
||||
package it.cavallium.rockserver.core.common;
|
||||
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.lang.foreign.Arena;
|
||||
import java.lang.foreign.MemorySegment;
|
||||
import java.util.List;
|
||||
|
||||
public record KVBatch(@NotNull List<@NotNull Keys> keys, @NotNull List<@NotNull MemorySegment> values) {
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
package it.cavallium.rockserver.core.common;
|
||||
|
||||
public enum PutBatchMode {
|
||||
WRITE_BATCH,
|
||||
WRITE_BATCH_NO_WAL,
|
||||
SST_INGESTION,
|
||||
/**
|
||||
* Ingest an SST behind, skipping duplicate keys
|
||||
* and ingesting everything in the bottommost level
|
||||
*/
|
||||
SST_INGEST_BEHIND
|
||||
}
|
@ -17,8 +17,10 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
|
||||
/**
|
||||
* Open a transaction
|
||||
* <p>
|
||||
* Returns the transaction id
|
||||
*
|
||||
* @param timeoutMs timeout in milliseconds
|
||||
* @return transaction id
|
||||
*/
|
||||
record OpenTransaction(long timeoutMs) implements RocksDBAPICommand<Long> {
|
||||
|
||||
@ -35,10 +37,11 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
}
|
||||
/**
|
||||
* Close a transaction
|
||||
* <p>
|
||||
* Returns true if committed, if false, you should try again
|
||||
*
|
||||
* @param transactionId transaction id to close
|
||||
* @param commit true to commit the transaction, false to rollback it
|
||||
* @return true if committed, if false, you should try again
|
||||
*/
|
||||
record CloseTransaction(long transactionId, boolean commit) implements RocksDBAPICommand<Boolean> {
|
||||
|
||||
@ -74,9 +77,11 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
}
|
||||
/**
|
||||
* Create a column
|
||||
* <p>
|
||||
* Returns the column id
|
||||
*
|
||||
* @param name column name
|
||||
* @param schema column key-value schema
|
||||
* @return column id
|
||||
*/
|
||||
record CreateColumn(String name, @NotNull ColumnSchema schema) implements RocksDBAPICommand<Long> {
|
||||
|
||||
@ -111,8 +116,10 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
}
|
||||
/**
|
||||
* Get column id by name
|
||||
* <p>
|
||||
* Returns the column id
|
||||
*
|
||||
* @param name column name
|
||||
* @return column id
|
||||
*/
|
||||
record GetColumnId(@NotNull String name) implements RocksDBAPICommand<Long> {
|
||||
|
||||
@ -176,9 +183,9 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
* @param requestType the request type determines which type of data will be returned.
|
||||
*/
|
||||
record PutMulti<T>(Arena arena, long transactionOrUpdateId, long columnId,
|
||||
@NotNull List<Keys> keys,
|
||||
@NotNull List<@NotNull MemorySegment> values,
|
||||
RequestPut<? super MemorySegment, T> requestType) implements RocksDBAPICommand<List<T>> {
|
||||
@NotNull List<Keys> keys,
|
||||
@NotNull List<@NotNull MemorySegment> values,
|
||||
RequestPut<? super MemorySegment, T> requestType) implements RocksDBAPICommand<List<T>> {
|
||||
|
||||
@Override
|
||||
public List<T> handleSync(RocksDBSyncAPI api) {
|
||||
@ -208,6 +215,36 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Put multiple elements into the specified positions
|
||||
* @param columnId column id
|
||||
* @param batchPublisher publisher of batches of keys and values
|
||||
* @param mode put batch mode
|
||||
*/
|
||||
record PutBatch(long columnId,
|
||||
@NotNull org.reactivestreams.Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) implements RocksDBAPICommand<Void> {
|
||||
|
||||
@Override
|
||||
public Void handleSync(RocksDBSyncAPI api) {
|
||||
api.putBatch(columnId, batchPublisher, mode);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletionStage<Void> handleAsync(RocksDBAsyncAPI api) {
|
||||
return api.putBatchAsync(columnId, batchPublisher, mode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
var sb = new StringBuilder("PUT_BATCH");
|
||||
sb.append(" column:").append(columnId);
|
||||
sb.append(" mode:").append(mode);
|
||||
sb.append(" batch:[...]");
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get an element from the specified position
|
||||
* @param arena arena
|
||||
@ -246,6 +283,9 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
}
|
||||
/**
|
||||
* Open an iterator
|
||||
* <p>
|
||||
* Returns the iterator id
|
||||
*
|
||||
* @param arena arena
|
||||
* @param transactionId transaction id, or 0
|
||||
* @param columnId column id
|
||||
@ -253,7 +293,6 @@ public sealed interface RocksDBAPICommand<R> {
|
||||
* @param endKeysExclusive end keys, exclusive. Null means "the end"
|
||||
* @param reverse if true, seek in reverse direction
|
||||
* @param timeoutMs timeout in milliseconds
|
||||
* @return iterator id
|
||||
*/
|
||||
record OpenIterator(Arena arena,
|
||||
long transactionId,
|
||||
|
@ -13,6 +13,7 @@ import it.cavallium.rockserver.core.common.RocksDBAPICommand.OpenIterator;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.OpenTransaction;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.Put;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.PutMulti;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.PutBatch;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.SeekTo;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.Subsequent;
|
||||
import java.lang.foreign.Arena;
|
||||
@ -74,6 +75,13 @@ public interface RocksDBAsyncAPI extends RocksDBAsyncAPIRequestHandler {
|
||||
return requestAsync(new PutMulti<>(arena, transactionOrUpdateId, columnId, keys, values, requestType));
|
||||
}
|
||||
|
||||
/** See: {@link PutBatch}. */
|
||||
default CompletableFuture<Void> putBatchAsync(long columnId,
|
||||
@NotNull org.reactivestreams.Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) throws RocksDBException {
|
||||
return requestAsync(new PutBatch(columnId, batchPublisher, mode));
|
||||
}
|
||||
|
||||
/** See: {@link Get}. */
|
||||
default <T> CompletableFuture<T> getAsync(Arena arena,
|
||||
long transactionOrUpdateId,
|
||||
|
@ -25,7 +25,13 @@ public class RocksDBException extends RuntimeException {
|
||||
COMMIT_FAILED_TRY_AGAIN,
|
||||
COMMIT_FAILED,
|
||||
TX_NOT_FOUND,
|
||||
KEY_HASH_SIZE_MISMATCH, RESTRICTED_TRANSACTION, PUT_INVALID_REQUEST, UPDATE_RETRY, ROCKSDB_LOAD_ERROR
|
||||
KEY_HASH_SIZE_MISMATCH, RESTRICTED_TRANSACTION, PUT_INVALID_REQUEST, UPDATE_RETRY, ROCKSDB_LOAD_ERROR,
|
||||
WRITE_BATCH_1,
|
||||
SST_WRITE_1,
|
||||
SST_WRITE_2,
|
||||
SST_WRITE_3,
|
||||
SST_WRITE_4,
|
||||
SST_GET_SIZE_FAILED
|
||||
}
|
||||
|
||||
public static RocksDBException of(RocksDBErrorType errorUniqueId, String message) {
|
||||
@ -33,7 +39,7 @@ public class RocksDBException extends RuntimeException {
|
||||
}
|
||||
|
||||
public static RocksDBException of(RocksDBErrorType errorUniqueId, Throwable ex) {
|
||||
if (ex instanceof RocksDBException e) {
|
||||
if (ex instanceof org.rocksdb.RocksDBException e) {
|
||||
return new RocksDBException(errorUniqueId, e);
|
||||
} else {
|
||||
return new RocksDBException(errorUniqueId, ex);
|
||||
@ -41,7 +47,7 @@ public class RocksDBException extends RuntimeException {
|
||||
}
|
||||
|
||||
public static RocksDBException of(RocksDBErrorType errorUniqueId, String message, Throwable ex) {
|
||||
if (ex instanceof RocksDBException e) {
|
||||
if (ex instanceof org.rocksdb.RocksDBException e) {
|
||||
return new RocksDBException(errorUniqueId, message, e);
|
||||
} else {
|
||||
return new RocksDBException(errorUniqueId, message, ex);
|
||||
@ -65,6 +71,7 @@ public class RocksDBException extends RuntimeException {
|
||||
|
||||
protected RocksDBException(RocksDBErrorType errorUniqueId, org.rocksdb.RocksDBException ex) {
|
||||
this(errorUniqueId, ex.getMessage());
|
||||
super.initCause(ex);
|
||||
}
|
||||
|
||||
protected RocksDBException(RocksDBErrorType errorUniqueId, String message, org.rocksdb.RocksDBException ex) {
|
||||
|
@ -13,6 +13,7 @@ import it.cavallium.rockserver.core.common.RocksDBAPICommand.OpenIterator;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.OpenTransaction;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.Put;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.PutMulti;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.PutBatch;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.SeekTo;
|
||||
import it.cavallium.rockserver.core.common.RocksDBAPICommand.Subsequent;
|
||||
import java.lang.foreign.Arena;
|
||||
@ -73,6 +74,13 @@ public interface RocksDBSyncAPI extends RocksDBSyncAPIRequestHandler {
|
||||
return requestSync(new PutMulti<>(arena, transactionOrUpdateId, columnId, keys, values, requestType));
|
||||
}
|
||||
|
||||
/** See: {@link PutBatch}. */
|
||||
default void putBatch(long columnId,
|
||||
@NotNull org.reactivestreams.Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) throws RocksDBException {
|
||||
requestSync(new PutBatch(columnId, batchPublisher, mode));
|
||||
}
|
||||
|
||||
/** See: {@link Get}. */
|
||||
default <T> T get(Arena arena,
|
||||
long transactionOrUpdateId,
|
||||
|
@ -1,46 +0,0 @@
|
||||
/**
|
||||
* Autogenerated by Thrift Compiler (0.20.0)
|
||||
*
|
||||
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
* @generated
|
||||
*/
|
||||
package it.cavallium.rockserver.core.common.api;
|
||||
|
||||
|
||||
public enum ColumnHashType implements org.apache.thrift.TEnum {
|
||||
XXHASH32(1),
|
||||
XXHASH8(2),
|
||||
ALLSAME8(3);
|
||||
|
||||
private final int value;
|
||||
|
||||
private ColumnHashType(int value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the integer value of this enum value, as defined in the Thrift IDL.
|
||||
*/
|
||||
@Override
|
||||
public int getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a the enum type by its integer value, as defined in the Thrift IDL.
|
||||
* @return null if the value is not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static ColumnHashType findByValue(int value) {
|
||||
switch (value) {
|
||||
case 1:
|
||||
return XXHASH32;
|
||||
case 2:
|
||||
return XXHASH8;
|
||||
case 3:
|
||||
return ALLSAME8;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,701 +0,0 @@
|
||||
/**
|
||||
* Autogenerated by Thrift Compiler (0.20.0)
|
||||
*
|
||||
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
* @generated
|
||||
*/
|
||||
package it.cavallium.rockserver.core.common.api;
|
||||
|
||||
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
|
||||
public class ColumnSchema implements org.apache.thrift.TBase<ColumnSchema, ColumnSchema._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnSchema> {
|
||||
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnSchema");
|
||||
|
||||
private static final org.apache.thrift.protocol.TField FIXED_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("fixedKeys", org.apache.thrift.protocol.TType.LIST, (short)1);
|
||||
private static final org.apache.thrift.protocol.TField VARIABLE_TAIL_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("variableTailKeys", org.apache.thrift.protocol.TType.LIST, (short)2);
|
||||
private static final org.apache.thrift.protocol.TField HAS_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("hasValue", org.apache.thrift.protocol.TType.BOOL, (short)3);
|
||||
|
||||
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new ColumnSchemaStandardSchemeFactory();
|
||||
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new ColumnSchemaTupleSchemeFactory();
|
||||
|
||||
public @org.apache.thrift.annotation.Nullable java.util.List<java.lang.Integer> fixedKeys; // required
|
||||
public @org.apache.thrift.annotation.Nullable java.util.List<ColumnHashType> variableTailKeys; // required
|
||||
public boolean hasValue; // required
|
||||
|
||||
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
|
||||
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
|
||||
FIXED_KEYS((short)1, "fixedKeys"),
|
||||
VARIABLE_TAIL_KEYS((short)2, "variableTailKeys"),
|
||||
HAS_VALUE((short)3, "hasValue");
|
||||
|
||||
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
|
||||
|
||||
static {
|
||||
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
|
||||
byName.put(field.getFieldName(), field);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByThriftId(int fieldId) {
|
||||
switch(fieldId) {
|
||||
case 1: // FIXED_KEYS
|
||||
return FIXED_KEYS;
|
||||
case 2: // VARIABLE_TAIL_KEYS
|
||||
return VARIABLE_TAIL_KEYS;
|
||||
case 3: // HAS_VALUE
|
||||
return HAS_VALUE;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, throwing an exception
|
||||
* if it is not found.
|
||||
*/
|
||||
public static _Fields findByThriftIdOrThrow(int fieldId) {
|
||||
_Fields fields = findByThriftId(fieldId);
|
||||
if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches name, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByName(java.lang.String name) {
|
||||
return byName.get(name);
|
||||
}
|
||||
|
||||
private final short _thriftId;
|
||||
private final java.lang.String _fieldName;
|
||||
|
||||
_Fields(short thriftId, java.lang.String fieldName) {
|
||||
_thriftId = thriftId;
|
||||
_fieldName = fieldName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getThriftFieldId() {
|
||||
return _thriftId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String getFieldName() {
|
||||
return _fieldName;
|
||||
}
|
||||
}
|
||||
|
||||
// isset id assignments
|
||||
private static final int __HASVALUE_ISSET_ID = 0;
|
||||
private byte __isset_bitfield = 0;
|
||||
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
|
||||
static {
|
||||
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
|
||||
tmpMap.put(_Fields.FIXED_KEYS, new org.apache.thrift.meta_data.FieldMetaData("fixedKeys", org.apache.thrift.TFieldRequirementType.DEFAULT,
|
||||
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))));
|
||||
tmpMap.put(_Fields.VARIABLE_TAIL_KEYS, new org.apache.thrift.meta_data.FieldMetaData("variableTailKeys", org.apache.thrift.TFieldRequirementType.DEFAULT,
|
||||
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
|
||||
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ColumnHashType.class))));
|
||||
tmpMap.put(_Fields.HAS_VALUE, new org.apache.thrift.meta_data.FieldMetaData("hasValue", org.apache.thrift.TFieldRequirementType.DEFAULT,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
|
||||
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
|
||||
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnSchema.class, metaDataMap);
|
||||
}
|
||||
|
||||
public ColumnSchema() {
|
||||
}
|
||||
|
||||
public ColumnSchema(
|
||||
java.util.List<java.lang.Integer> fixedKeys,
|
||||
java.util.List<ColumnHashType> variableTailKeys,
|
||||
boolean hasValue)
|
||||
{
|
||||
this();
|
||||
this.fixedKeys = fixedKeys;
|
||||
this.variableTailKeys = variableTailKeys;
|
||||
this.hasValue = hasValue;
|
||||
setHasValueIsSet(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a deep copy on <i>other</i>.
|
||||
*/
|
||||
public ColumnSchema(ColumnSchema other) {
|
||||
__isset_bitfield = other.__isset_bitfield;
|
||||
if (other.isSetFixedKeys()) {
|
||||
java.util.List<java.lang.Integer> __this__fixedKeys = new java.util.ArrayList<java.lang.Integer>(other.fixedKeys);
|
||||
this.fixedKeys = __this__fixedKeys;
|
||||
}
|
||||
if (other.isSetVariableTailKeys()) {
|
||||
java.util.List<ColumnHashType> __this__variableTailKeys = new java.util.ArrayList<ColumnHashType>(other.variableTailKeys.size());
|
||||
for (ColumnHashType other_element : other.variableTailKeys) {
|
||||
__this__variableTailKeys.add(other_element);
|
||||
}
|
||||
this.variableTailKeys = __this__variableTailKeys;
|
||||
}
|
||||
this.hasValue = other.hasValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ColumnSchema deepCopy() {
|
||||
return new ColumnSchema(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
this.fixedKeys = null;
|
||||
this.variableTailKeys = null;
|
||||
setHasValueIsSet(false);
|
||||
this.hasValue = false;
|
||||
}
|
||||
|
||||
public int getFixedKeysSize() {
|
||||
return (this.fixedKeys == null) ? 0 : this.fixedKeys.size();
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public java.util.Iterator<java.lang.Integer> getFixedKeysIterator() {
|
||||
return (this.fixedKeys == null) ? null : this.fixedKeys.iterator();
|
||||
}
|
||||
|
||||
public void addToFixedKeys(int elem) {
|
||||
if (this.fixedKeys == null) {
|
||||
this.fixedKeys = new java.util.ArrayList<java.lang.Integer>();
|
||||
}
|
||||
this.fixedKeys.add(elem);
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public java.util.List<java.lang.Integer> getFixedKeys() {
|
||||
return this.fixedKeys;
|
||||
}
|
||||
|
||||
public ColumnSchema setFixedKeys(@org.apache.thrift.annotation.Nullable java.util.List<java.lang.Integer> fixedKeys) {
|
||||
this.fixedKeys = fixedKeys;
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetFixedKeys() {
|
||||
this.fixedKeys = null;
|
||||
}
|
||||
|
||||
/** Returns true if field fixedKeys is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetFixedKeys() {
|
||||
return this.fixedKeys != null;
|
||||
}
|
||||
|
||||
public void setFixedKeysIsSet(boolean value) {
|
||||
if (!value) {
|
||||
this.fixedKeys = null;
|
||||
}
|
||||
}
|
||||
|
||||
public int getVariableTailKeysSize() {
|
||||
return (this.variableTailKeys == null) ? 0 : this.variableTailKeys.size();
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public java.util.Iterator<ColumnHashType> getVariableTailKeysIterator() {
|
||||
return (this.variableTailKeys == null) ? null : this.variableTailKeys.iterator();
|
||||
}
|
||||
|
||||
public void addToVariableTailKeys(ColumnHashType elem) {
|
||||
if (this.variableTailKeys == null) {
|
||||
this.variableTailKeys = new java.util.ArrayList<ColumnHashType>();
|
||||
}
|
||||
this.variableTailKeys.add(elem);
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public java.util.List<ColumnHashType> getVariableTailKeys() {
|
||||
return this.variableTailKeys;
|
||||
}
|
||||
|
||||
public ColumnSchema setVariableTailKeys(@org.apache.thrift.annotation.Nullable java.util.List<ColumnHashType> variableTailKeys) {
|
||||
this.variableTailKeys = variableTailKeys;
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetVariableTailKeys() {
|
||||
this.variableTailKeys = null;
|
||||
}
|
||||
|
||||
/** Returns true if field variableTailKeys is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetVariableTailKeys() {
|
||||
return this.variableTailKeys != null;
|
||||
}
|
||||
|
||||
public void setVariableTailKeysIsSet(boolean value) {
|
||||
if (!value) {
|
||||
this.variableTailKeys = null;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isHasValue() {
|
||||
return this.hasValue;
|
||||
}
|
||||
|
||||
public ColumnSchema setHasValue(boolean hasValue) {
|
||||
this.hasValue = hasValue;
|
||||
setHasValueIsSet(true);
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetHasValue() {
|
||||
__isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __HASVALUE_ISSET_ID);
|
||||
}
|
||||
|
||||
/** Returns true if field hasValue is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetHasValue() {
|
||||
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __HASVALUE_ISSET_ID);
|
||||
}
|
||||
|
||||
public void setHasValueIsSet(boolean value) {
|
||||
__isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __HASVALUE_ISSET_ID, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
|
||||
switch (field) {
|
||||
case FIXED_KEYS:
|
||||
if (value == null) {
|
||||
unsetFixedKeys();
|
||||
} else {
|
||||
setFixedKeys((java.util.List<java.lang.Integer>)value);
|
||||
}
|
||||
break;
|
||||
|
||||
case VARIABLE_TAIL_KEYS:
|
||||
if (value == null) {
|
||||
unsetVariableTailKeys();
|
||||
} else {
|
||||
setVariableTailKeys((java.util.List<ColumnHashType>)value);
|
||||
}
|
||||
break;
|
||||
|
||||
case HAS_VALUE:
|
||||
if (value == null) {
|
||||
unsetHasValue();
|
||||
} else {
|
||||
setHasValue((java.lang.Boolean)value);
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public java.lang.Object getFieldValue(_Fields field) {
|
||||
switch (field) {
|
||||
case FIXED_KEYS:
|
||||
return getFixedKeys();
|
||||
|
||||
case VARIABLE_TAIL_KEYS:
|
||||
return getVariableTailKeys();
|
||||
|
||||
case HAS_VALUE:
|
||||
return isHasValue();
|
||||
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
|
||||
@Override
|
||||
public boolean isSet(_Fields field) {
|
||||
if (field == null) {
|
||||
throw new java.lang.IllegalArgumentException();
|
||||
}
|
||||
|
||||
switch (field) {
|
||||
case FIXED_KEYS:
|
||||
return isSetFixedKeys();
|
||||
case VARIABLE_TAIL_KEYS:
|
||||
return isSetVariableTailKeys();
|
||||
case HAS_VALUE:
|
||||
return isSetHasValue();
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(java.lang.Object that) {
|
||||
if (that instanceof ColumnSchema)
|
||||
return this.equals((ColumnSchema)that);
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean equals(ColumnSchema that) {
|
||||
if (that == null)
|
||||
return false;
|
||||
if (this == that)
|
||||
return true;
|
||||
|
||||
boolean this_present_fixedKeys = true && this.isSetFixedKeys();
|
||||
boolean that_present_fixedKeys = true && that.isSetFixedKeys();
|
||||
if (this_present_fixedKeys || that_present_fixedKeys) {
|
||||
if (!(this_present_fixedKeys && that_present_fixedKeys))
|
||||
return false;
|
||||
if (!this.fixedKeys.equals(that.fixedKeys))
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean this_present_variableTailKeys = true && this.isSetVariableTailKeys();
|
||||
boolean that_present_variableTailKeys = true && that.isSetVariableTailKeys();
|
||||
if (this_present_variableTailKeys || that_present_variableTailKeys) {
|
||||
if (!(this_present_variableTailKeys && that_present_variableTailKeys))
|
||||
return false;
|
||||
if (!this.variableTailKeys.equals(that.variableTailKeys))
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean this_present_hasValue = true;
|
||||
boolean that_present_hasValue = true;
|
||||
if (this_present_hasValue || that_present_hasValue) {
|
||||
if (!(this_present_hasValue && that_present_hasValue))
|
||||
return false;
|
||||
if (this.hasValue != that.hasValue)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 1;
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetFixedKeys()) ? 131071 : 524287);
|
||||
if (isSetFixedKeys())
|
||||
hashCode = hashCode * 8191 + fixedKeys.hashCode();
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetVariableTailKeys()) ? 131071 : 524287);
|
||||
if (isSetVariableTailKeys())
|
||||
hashCode = hashCode * 8191 + variableTailKeys.hashCode();
|
||||
|
||||
hashCode = hashCode * 8191 + ((hasValue) ? 131071 : 524287);
|
||||
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ColumnSchema other) {
|
||||
if (!getClass().equals(other.getClass())) {
|
||||
return getClass().getName().compareTo(other.getClass().getName());
|
||||
}
|
||||
|
||||
int lastComparison = 0;
|
||||
|
||||
lastComparison = java.lang.Boolean.compare(isSetFixedKeys(), other.isSetFixedKeys());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetFixedKeys()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fixedKeys, other.fixedKeys);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
lastComparison = java.lang.Boolean.compare(isSetVariableTailKeys(), other.isSetVariableTailKeys());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetVariableTailKeys()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.variableTailKeys, other.variableTailKeys);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
lastComparison = java.lang.Boolean.compare(isSetHasValue(), other.isSetHasValue());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetHasValue()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasValue, other.hasValue);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public _Fields fieldForId(int fieldId) {
|
||||
return _Fields.findByThriftId(fieldId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
|
||||
scheme(iprot).read(iprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
|
||||
scheme(oprot).write(oprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String toString() {
|
||||
java.lang.StringBuilder sb = new java.lang.StringBuilder("ColumnSchema(");
|
||||
boolean first = true;
|
||||
|
||||
sb.append("fixedKeys:");
|
||||
if (this.fixedKeys == null) {
|
||||
sb.append("null");
|
||||
} else {
|
||||
sb.append(this.fixedKeys);
|
||||
}
|
||||
first = false;
|
||||
if (!first) sb.append(", ");
|
||||
sb.append("variableTailKeys:");
|
||||
if (this.variableTailKeys == null) {
|
||||
sb.append("null");
|
||||
} else {
|
||||
sb.append(this.variableTailKeys);
|
||||
}
|
||||
first = false;
|
||||
if (!first) sb.append(", ");
|
||||
sb.append("hasValue:");
|
||||
sb.append(this.hasValue);
|
||||
first = false;
|
||||
sb.append(")");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public void validate() throws org.apache.thrift.TException {
|
||||
// check for required fields
|
||||
// check for sub-struct validity
|
||||
}
|
||||
|
||||
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
|
||||
try {
|
||||
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
|
||||
try {
|
||||
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
|
||||
__isset_bitfield = 0;
|
||||
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private static class ColumnSchemaStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public ColumnSchemaStandardScheme getScheme() {
|
||||
return new ColumnSchemaStandardScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class ColumnSchemaStandardScheme extends org.apache.thrift.scheme.StandardScheme<ColumnSchema> {
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnSchema struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TField schemeField;
|
||||
iprot.readStructBegin();
|
||||
while (true)
|
||||
{
|
||||
schemeField = iprot.readFieldBegin();
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
|
||||
break;
|
||||
}
|
||||
switch (schemeField.id) {
|
||||
case 1: // FIXED_KEYS
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
|
||||
{
|
||||
org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
|
||||
struct.fixedKeys = new java.util.ArrayList<java.lang.Integer>(_list0.size);
|
||||
int _elem1;
|
||||
for (int _i2 = 0; _i2 < _list0.size; ++_i2)
|
||||
{
|
||||
_elem1 = iprot.readI32();
|
||||
struct.fixedKeys.add(_elem1);
|
||||
}
|
||||
iprot.readListEnd();
|
||||
}
|
||||
struct.setFixedKeysIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
case 2: // VARIABLE_TAIL_KEYS
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
|
||||
{
|
||||
org.apache.thrift.protocol.TList _list3 = iprot.readListBegin();
|
||||
struct.variableTailKeys = new java.util.ArrayList<ColumnHashType>(_list3.size);
|
||||
@org.apache.thrift.annotation.Nullable ColumnHashType _elem4;
|
||||
for (int _i5 = 0; _i5 < _list3.size; ++_i5)
|
||||
{
|
||||
_elem4 = it.cavallium.rockserver.core.common.api.ColumnHashType.findByValue(iprot.readI32());
|
||||
if (_elem4 != null)
|
||||
{
|
||||
struct.variableTailKeys.add(_elem4);
|
||||
}
|
||||
}
|
||||
iprot.readListEnd();
|
||||
}
|
||||
struct.setVariableTailKeysIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
case 3: // HAS_VALUE
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
|
||||
struct.hasValue = iprot.readBool();
|
||||
struct.setHasValueIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
iprot.readFieldEnd();
|
||||
}
|
||||
iprot.readStructEnd();
|
||||
|
||||
// check for required fields of primitive type, which can't be checked in the validate method
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnSchema struct) throws org.apache.thrift.TException {
|
||||
struct.validate();
|
||||
|
||||
oprot.writeStructBegin(STRUCT_DESC);
|
||||
if (struct.fixedKeys != null) {
|
||||
oprot.writeFieldBegin(FIXED_KEYS_FIELD_DESC);
|
||||
{
|
||||
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.fixedKeys.size()));
|
||||
for (int _iter6 : struct.fixedKeys)
|
||||
{
|
||||
oprot.writeI32(_iter6);
|
||||
}
|
||||
oprot.writeListEnd();
|
||||
}
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
if (struct.variableTailKeys != null) {
|
||||
oprot.writeFieldBegin(VARIABLE_TAIL_KEYS_FIELD_DESC);
|
||||
{
|
||||
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.variableTailKeys.size()));
|
||||
for (ColumnHashType _iter7 : struct.variableTailKeys)
|
||||
{
|
||||
oprot.writeI32(_iter7.getValue());
|
||||
}
|
||||
oprot.writeListEnd();
|
||||
}
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
oprot.writeFieldBegin(HAS_VALUE_FIELD_DESC);
|
||||
oprot.writeBool(struct.hasValue);
|
||||
oprot.writeFieldEnd();
|
||||
oprot.writeFieldStop();
|
||||
oprot.writeStructEnd();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class ColumnSchemaTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public ColumnSchemaTupleScheme getScheme() {
|
||||
return new ColumnSchemaTupleScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class ColumnSchemaTupleScheme extends org.apache.thrift.scheme.TupleScheme<ColumnSchema> {
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol prot, ColumnSchema struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet optionals = new java.util.BitSet();
|
||||
if (struct.isSetFixedKeys()) {
|
||||
optionals.set(0);
|
||||
}
|
||||
if (struct.isSetVariableTailKeys()) {
|
||||
optionals.set(1);
|
||||
}
|
||||
if (struct.isSetHasValue()) {
|
||||
optionals.set(2);
|
||||
}
|
||||
oprot.writeBitSet(optionals, 3);
|
||||
if (struct.isSetFixedKeys()) {
|
||||
{
|
||||
oprot.writeI32(struct.fixedKeys.size());
|
||||
for (int _iter8 : struct.fixedKeys)
|
||||
{
|
||||
oprot.writeI32(_iter8);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (struct.isSetVariableTailKeys()) {
|
||||
{
|
||||
oprot.writeI32(struct.variableTailKeys.size());
|
||||
for (ColumnHashType _iter9 : struct.variableTailKeys)
|
||||
{
|
||||
oprot.writeI32(_iter9.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (struct.isSetHasValue()) {
|
||||
oprot.writeBool(struct.hasValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol prot, ColumnSchema struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet incoming = iprot.readBitSet(3);
|
||||
if (incoming.get(0)) {
|
||||
{
|
||||
org.apache.thrift.protocol.TList _list10 = iprot.readListBegin(org.apache.thrift.protocol.TType.I32);
|
||||
struct.fixedKeys = new java.util.ArrayList<java.lang.Integer>(_list10.size);
|
||||
int _elem11;
|
||||
for (int _i12 = 0; _i12 < _list10.size; ++_i12)
|
||||
{
|
||||
_elem11 = iprot.readI32();
|
||||
struct.fixedKeys.add(_elem11);
|
||||
}
|
||||
}
|
||||
struct.setFixedKeysIsSet(true);
|
||||
}
|
||||
if (incoming.get(1)) {
|
||||
{
|
||||
org.apache.thrift.protocol.TList _list13 = iprot.readListBegin(org.apache.thrift.protocol.TType.I32);
|
||||
struct.variableTailKeys = new java.util.ArrayList<ColumnHashType>(_list13.size);
|
||||
@org.apache.thrift.annotation.Nullable ColumnHashType _elem14;
|
||||
for (int _i15 = 0; _i15 < _list13.size; ++_i15)
|
||||
{
|
||||
_elem14 = it.cavallium.rockserver.core.common.api.ColumnHashType.findByValue(iprot.readI32());
|
||||
if (_elem14 != null)
|
||||
{
|
||||
struct.variableTailKeys.add(_elem14);
|
||||
}
|
||||
}
|
||||
}
|
||||
struct.setVariableTailKeysIsSet(true);
|
||||
}
|
||||
if (incoming.get(2)) {
|
||||
struct.hasValue = iprot.readBool();
|
||||
struct.setHasValueIsSet(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) {
|
||||
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
|
||||
}
|
||||
}
|
||||
|
@ -1,518 +0,0 @@
|
||||
/**
|
||||
* Autogenerated by Thrift Compiler (0.20.0)
|
||||
*
|
||||
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
* @generated
|
||||
*/
|
||||
package it.cavallium.rockserver.core.common.api;
|
||||
|
||||
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
|
||||
public class Delta implements org.apache.thrift.TBase<Delta, Delta._Fields>, java.io.Serializable, Cloneable, Comparable<Delta> {
|
||||
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Delta");
|
||||
|
||||
private static final org.apache.thrift.protocol.TField PREVIOUS_FIELD_DESC = new org.apache.thrift.protocol.TField("previous", org.apache.thrift.protocol.TType.STRING, (short)1);
|
||||
private static final org.apache.thrift.protocol.TField CURRENT_FIELD_DESC = new org.apache.thrift.protocol.TField("current", org.apache.thrift.protocol.TType.STRING, (short)2);
|
||||
|
||||
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new DeltaStandardSchemeFactory();
|
||||
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new DeltaTupleSchemeFactory();
|
||||
|
||||
public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer previous; // optional
|
||||
public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer current; // optional
|
||||
|
||||
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
|
||||
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
|
||||
PREVIOUS((short)1, "previous"),
|
||||
CURRENT((short)2, "current");
|
||||
|
||||
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
|
||||
|
||||
static {
|
||||
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
|
||||
byName.put(field.getFieldName(), field);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByThriftId(int fieldId) {
|
||||
switch(fieldId) {
|
||||
case 1: // PREVIOUS
|
||||
return PREVIOUS;
|
||||
case 2: // CURRENT
|
||||
return CURRENT;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, throwing an exception
|
||||
* if it is not found.
|
||||
*/
|
||||
public static _Fields findByThriftIdOrThrow(int fieldId) {
|
||||
_Fields fields = findByThriftId(fieldId);
|
||||
if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches name, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByName(java.lang.String name) {
|
||||
return byName.get(name);
|
||||
}
|
||||
|
||||
private final short _thriftId;
|
||||
private final java.lang.String _fieldName;
|
||||
|
||||
_Fields(short thriftId, java.lang.String fieldName) {
|
||||
_thriftId = thriftId;
|
||||
_fieldName = fieldName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getThriftFieldId() {
|
||||
return _thriftId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String getFieldName() {
|
||||
return _fieldName;
|
||||
}
|
||||
}
|
||||
|
||||
// isset id assignments
|
||||
private static final _Fields optionals[] = {_Fields.PREVIOUS,_Fields.CURRENT};
|
||||
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
|
||||
static {
|
||||
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
|
||||
tmpMap.put(_Fields.PREVIOUS, new org.apache.thrift.meta_data.FieldMetaData("previous", org.apache.thrift.TFieldRequirementType.OPTIONAL,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)));
|
||||
tmpMap.put(_Fields.CURRENT, new org.apache.thrift.meta_data.FieldMetaData("current", org.apache.thrift.TFieldRequirementType.OPTIONAL,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)));
|
||||
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
|
||||
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Delta.class, metaDataMap);
|
||||
}
|
||||
|
||||
public Delta() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a deep copy on <i>other</i>.
|
||||
*/
|
||||
public Delta(Delta other) {
|
||||
if (other.isSetPrevious()) {
|
||||
this.previous = org.apache.thrift.TBaseHelper.copyBinary(other.previous);
|
||||
}
|
||||
if (other.isSetCurrent()) {
|
||||
this.current = org.apache.thrift.TBaseHelper.copyBinary(other.current);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Delta deepCopy() {
|
||||
return new Delta(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
this.previous = null;
|
||||
this.current = null;
|
||||
}
|
||||
|
||||
public byte[] getPrevious() {
|
||||
setPrevious(org.apache.thrift.TBaseHelper.rightSize(previous));
|
||||
return previous == null ? null : previous.array();
|
||||
}
|
||||
|
||||
public java.nio.ByteBuffer bufferForPrevious() {
|
||||
return org.apache.thrift.TBaseHelper.copyBinary(previous);
|
||||
}
|
||||
|
||||
public Delta setPrevious(byte[] previous) {
|
||||
this.previous = previous == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(previous.clone());
|
||||
return this;
|
||||
}
|
||||
|
||||
public Delta setPrevious(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer previous) {
|
||||
this.previous = org.apache.thrift.TBaseHelper.copyBinary(previous);
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetPrevious() {
|
||||
this.previous = null;
|
||||
}
|
||||
|
||||
/** Returns true if field previous is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetPrevious() {
|
||||
return this.previous != null;
|
||||
}
|
||||
|
||||
public void setPreviousIsSet(boolean value) {
|
||||
if (!value) {
|
||||
this.previous = null;
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] getCurrent() {
|
||||
setCurrent(org.apache.thrift.TBaseHelper.rightSize(current));
|
||||
return current == null ? null : current.array();
|
||||
}
|
||||
|
||||
public java.nio.ByteBuffer bufferForCurrent() {
|
||||
return org.apache.thrift.TBaseHelper.copyBinary(current);
|
||||
}
|
||||
|
||||
public Delta setCurrent(byte[] current) {
|
||||
this.current = current == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(current.clone());
|
||||
return this;
|
||||
}
|
||||
|
||||
public Delta setCurrent(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer current) {
|
||||
this.current = org.apache.thrift.TBaseHelper.copyBinary(current);
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetCurrent() {
|
||||
this.current = null;
|
||||
}
|
||||
|
||||
/** Returns true if field current is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetCurrent() {
|
||||
return this.current != null;
|
||||
}
|
||||
|
||||
public void setCurrentIsSet(boolean value) {
|
||||
if (!value) {
|
||||
this.current = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
|
||||
switch (field) {
|
||||
case PREVIOUS:
|
||||
if (value == null) {
|
||||
unsetPrevious();
|
||||
} else {
|
||||
if (value instanceof byte[]) {
|
||||
setPrevious((byte[])value);
|
||||
} else {
|
||||
setPrevious((java.nio.ByteBuffer)value);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CURRENT:
|
||||
if (value == null) {
|
||||
unsetCurrent();
|
||||
} else {
|
||||
if (value instanceof byte[]) {
|
||||
setCurrent((byte[])value);
|
||||
} else {
|
||||
setCurrent((java.nio.ByteBuffer)value);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public java.lang.Object getFieldValue(_Fields field) {
|
||||
switch (field) {
|
||||
case PREVIOUS:
|
||||
return getPrevious();
|
||||
|
||||
case CURRENT:
|
||||
return getCurrent();
|
||||
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
|
||||
@Override
|
||||
public boolean isSet(_Fields field) {
|
||||
if (field == null) {
|
||||
throw new java.lang.IllegalArgumentException();
|
||||
}
|
||||
|
||||
switch (field) {
|
||||
case PREVIOUS:
|
||||
return isSetPrevious();
|
||||
case CURRENT:
|
||||
return isSetCurrent();
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(java.lang.Object that) {
|
||||
if (that instanceof Delta)
|
||||
return this.equals((Delta)that);
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean equals(Delta that) {
|
||||
if (that == null)
|
||||
return false;
|
||||
if (this == that)
|
||||
return true;
|
||||
|
||||
boolean this_present_previous = true && this.isSetPrevious();
|
||||
boolean that_present_previous = true && that.isSetPrevious();
|
||||
if (this_present_previous || that_present_previous) {
|
||||
if (!(this_present_previous && that_present_previous))
|
||||
return false;
|
||||
if (!this.previous.equals(that.previous))
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean this_present_current = true && this.isSetCurrent();
|
||||
boolean that_present_current = true && that.isSetCurrent();
|
||||
if (this_present_current || that_present_current) {
|
||||
if (!(this_present_current && that_present_current))
|
||||
return false;
|
||||
if (!this.current.equals(that.current))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 1;
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetPrevious()) ? 131071 : 524287);
|
||||
if (isSetPrevious())
|
||||
hashCode = hashCode * 8191 + previous.hashCode();
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetCurrent()) ? 131071 : 524287);
|
||||
if (isSetCurrent())
|
||||
hashCode = hashCode * 8191 + current.hashCode();
|
||||
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Delta other) {
|
||||
if (!getClass().equals(other.getClass())) {
|
||||
return getClass().getName().compareTo(other.getClass().getName());
|
||||
}
|
||||
|
||||
int lastComparison = 0;
|
||||
|
||||
lastComparison = java.lang.Boolean.compare(isSetPrevious(), other.isSetPrevious());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetPrevious()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.previous, other.previous);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
lastComparison = java.lang.Boolean.compare(isSetCurrent(), other.isSetCurrent());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetCurrent()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.current, other.current);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public _Fields fieldForId(int fieldId) {
|
||||
return _Fields.findByThriftId(fieldId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
|
||||
scheme(iprot).read(iprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
|
||||
scheme(oprot).write(oprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String toString() {
|
||||
java.lang.StringBuilder sb = new java.lang.StringBuilder("Delta(");
|
||||
boolean first = true;
|
||||
|
||||
if (isSetPrevious()) {
|
||||
sb.append("previous:");
|
||||
if (this.previous == null) {
|
||||
sb.append("null");
|
||||
} else {
|
||||
org.apache.thrift.TBaseHelper.toString(this.previous, sb);
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
if (isSetCurrent()) {
|
||||
if (!first) sb.append(", ");
|
||||
sb.append("current:");
|
||||
if (this.current == null) {
|
||||
sb.append("null");
|
||||
} else {
|
||||
org.apache.thrift.TBaseHelper.toString(this.current, sb);
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
sb.append(")");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public void validate() throws org.apache.thrift.TException {
|
||||
// check for required fields
|
||||
// check for sub-struct validity
|
||||
}
|
||||
|
||||
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
|
||||
try {
|
||||
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
|
||||
try {
|
||||
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private static class DeltaStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public DeltaStandardScheme getScheme() {
|
||||
return new DeltaStandardScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class DeltaStandardScheme extends org.apache.thrift.scheme.StandardScheme<Delta> {
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot, Delta struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TField schemeField;
|
||||
iprot.readStructBegin();
|
||||
while (true)
|
||||
{
|
||||
schemeField = iprot.readFieldBegin();
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
|
||||
break;
|
||||
}
|
||||
switch (schemeField.id) {
|
||||
case 1: // PREVIOUS
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
|
||||
struct.previous = iprot.readBinary();
|
||||
struct.setPreviousIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
case 2: // CURRENT
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
|
||||
struct.current = iprot.readBinary();
|
||||
struct.setCurrentIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
iprot.readFieldEnd();
|
||||
}
|
||||
iprot.readStructEnd();
|
||||
|
||||
// check for required fields of primitive type, which can't be checked in the validate method
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot, Delta struct) throws org.apache.thrift.TException {
|
||||
struct.validate();
|
||||
|
||||
oprot.writeStructBegin(STRUCT_DESC);
|
||||
if (struct.previous != null) {
|
||||
if (struct.isSetPrevious()) {
|
||||
oprot.writeFieldBegin(PREVIOUS_FIELD_DESC);
|
||||
oprot.writeBinary(struct.previous);
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
}
|
||||
if (struct.current != null) {
|
||||
if (struct.isSetCurrent()) {
|
||||
oprot.writeFieldBegin(CURRENT_FIELD_DESC);
|
||||
oprot.writeBinary(struct.current);
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
}
|
||||
oprot.writeFieldStop();
|
||||
oprot.writeStructEnd();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class DeltaTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public DeltaTupleScheme getScheme() {
|
||||
return new DeltaTupleScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class DeltaTupleScheme extends org.apache.thrift.scheme.TupleScheme<Delta> {
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol prot, Delta struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet optionals = new java.util.BitSet();
|
||||
if (struct.isSetPrevious()) {
|
||||
optionals.set(0);
|
||||
}
|
||||
if (struct.isSetCurrent()) {
|
||||
optionals.set(1);
|
||||
}
|
||||
oprot.writeBitSet(optionals, 2);
|
||||
if (struct.isSetPrevious()) {
|
||||
oprot.writeBinary(struct.previous);
|
||||
}
|
||||
if (struct.isSetCurrent()) {
|
||||
oprot.writeBinary(struct.current);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol prot, Delta struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet incoming = iprot.readBitSet(2);
|
||||
if (incoming.get(0)) {
|
||||
struct.previous = iprot.readBinary();
|
||||
struct.setPreviousIsSet(true);
|
||||
}
|
||||
if (incoming.get(1)) {
|
||||
struct.current = iprot.readBinary();
|
||||
struct.setCurrentIsSet(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) {
|
||||
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
|
||||
}
|
||||
}
|
||||
|
@ -1,490 +0,0 @@
|
||||
/**
|
||||
* Autogenerated by Thrift Compiler (0.19.0)
|
||||
*
|
||||
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
* @generated
|
||||
*/
|
||||
package it.cavallium.rockserver.core.common.api;
|
||||
|
||||
/**
|
||||
* Structs can also be exceptions, if they are nasty.
|
||||
*/
|
||||
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
|
||||
public class InvalidOperation extends org.apache.thrift.TException implements org.apache.thrift.TBase<InvalidOperation, InvalidOperation._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidOperation> {
|
||||
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidOperation");
|
||||
|
||||
private static final org.apache.thrift.protocol.TField WHAT_OP_FIELD_DESC = new org.apache.thrift.protocol.TField("whatOp", org.apache.thrift.protocol.TType.I32, (short)1);
|
||||
private static final org.apache.thrift.protocol.TField WHY_FIELD_DESC = new org.apache.thrift.protocol.TField("why", org.apache.thrift.protocol.TType.STRING, (short)2);
|
||||
|
||||
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new InvalidOperationStandardSchemeFactory();
|
||||
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new InvalidOperationTupleSchemeFactory();
|
||||
|
||||
public int whatOp; // required
|
||||
public @org.apache.thrift.annotation.Nullable java.lang.String why; // required
|
||||
|
||||
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
|
||||
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
|
||||
WHAT_OP((short)1, "whatOp"),
|
||||
WHY((short)2, "why");
|
||||
|
||||
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
|
||||
|
||||
static {
|
||||
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
|
||||
byName.put(field.getFieldName(), field);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByThriftId(int fieldId) {
|
||||
switch(fieldId) {
|
||||
case 1: // WHAT_OP
|
||||
return WHAT_OP;
|
||||
case 2: // WHY
|
||||
return WHY;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, throwing an exception
|
||||
* if it is not found.
|
||||
*/
|
||||
public static _Fields findByThriftIdOrThrow(int fieldId) {
|
||||
_Fields fields = findByThriftId(fieldId);
|
||||
if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches name, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByName(java.lang.String name) {
|
||||
return byName.get(name);
|
||||
}
|
||||
|
||||
private final short _thriftId;
|
||||
private final java.lang.String _fieldName;
|
||||
|
||||
_Fields(short thriftId, java.lang.String fieldName) {
|
||||
_thriftId = thriftId;
|
||||
_fieldName = fieldName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getThriftFieldId() {
|
||||
return _thriftId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String getFieldName() {
|
||||
return _fieldName;
|
||||
}
|
||||
}
|
||||
|
||||
// isset id assignments
|
||||
private static final int __WHATOP_ISSET_ID = 0;
|
||||
private byte __isset_bitfield = 0;
|
||||
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
|
||||
static {
|
||||
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
|
||||
tmpMap.put(_Fields.WHAT_OP, new org.apache.thrift.meta_data.FieldMetaData("whatOp", org.apache.thrift.TFieldRequirementType.DEFAULT,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
|
||||
tmpMap.put(_Fields.WHY, new org.apache.thrift.meta_data.FieldMetaData("why", org.apache.thrift.TFieldRequirementType.DEFAULT,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
|
||||
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
|
||||
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidOperation.class, metaDataMap);
|
||||
}
|
||||
|
||||
public InvalidOperation() {
|
||||
}
|
||||
|
||||
public InvalidOperation(
|
||||
int whatOp,
|
||||
java.lang.String why)
|
||||
{
|
||||
this();
|
||||
this.whatOp = whatOp;
|
||||
setWhatOpIsSet(true);
|
||||
this.why = why;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a deep copy on <i>other</i>.
|
||||
*/
|
||||
public InvalidOperation(InvalidOperation other) {
|
||||
__isset_bitfield = other.__isset_bitfield;
|
||||
this.whatOp = other.whatOp;
|
||||
if (other.isSetWhy()) {
|
||||
this.why = other.why;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public InvalidOperation deepCopy() {
|
||||
return new InvalidOperation(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
setWhatOpIsSet(false);
|
||||
this.whatOp = 0;
|
||||
this.why = null;
|
||||
}
|
||||
|
||||
public int getWhatOp() {
|
||||
return this.whatOp;
|
||||
}
|
||||
|
||||
public InvalidOperation setWhatOp(int whatOp) {
|
||||
this.whatOp = whatOp;
|
||||
setWhatOpIsSet(true);
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetWhatOp() {
|
||||
__isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __WHATOP_ISSET_ID);
|
||||
}
|
||||
|
||||
/** Returns true if field whatOp is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetWhatOp() {
|
||||
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __WHATOP_ISSET_ID);
|
||||
}
|
||||
|
||||
public void setWhatOpIsSet(boolean value) {
|
||||
__isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __WHATOP_ISSET_ID, value);
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public java.lang.String getWhy() {
|
||||
return this.why;
|
||||
}
|
||||
|
||||
public InvalidOperation setWhy(@org.apache.thrift.annotation.Nullable java.lang.String why) {
|
||||
this.why = why;
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetWhy() {
|
||||
this.why = null;
|
||||
}
|
||||
|
||||
/** Returns true if field why is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetWhy() {
|
||||
return this.why != null;
|
||||
}
|
||||
|
||||
public void setWhyIsSet(boolean value) {
|
||||
if (!value) {
|
||||
this.why = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
|
||||
switch (field) {
|
||||
case WHAT_OP:
|
||||
if (value == null) {
|
||||
unsetWhatOp();
|
||||
} else {
|
||||
setWhatOp((java.lang.Integer)value);
|
||||
}
|
||||
break;
|
||||
|
||||
case WHY:
|
||||
if (value == null) {
|
||||
unsetWhy();
|
||||
} else {
|
||||
setWhy((java.lang.String)value);
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public java.lang.Object getFieldValue(_Fields field) {
|
||||
switch (field) {
|
||||
case WHAT_OP:
|
||||
return getWhatOp();
|
||||
|
||||
case WHY:
|
||||
return getWhy();
|
||||
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
|
||||
@Override
|
||||
public boolean isSet(_Fields field) {
|
||||
if (field == null) {
|
||||
throw new java.lang.IllegalArgumentException();
|
||||
}
|
||||
|
||||
switch (field) {
|
||||
case WHAT_OP:
|
||||
return isSetWhatOp();
|
||||
case WHY:
|
||||
return isSetWhy();
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(java.lang.Object that) {
|
||||
if (that instanceof InvalidOperation)
|
||||
return this.equals((InvalidOperation)that);
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean equals(InvalidOperation that) {
|
||||
if (that == null)
|
||||
return false;
|
||||
if (this == that)
|
||||
return true;
|
||||
|
||||
boolean this_present_whatOp = true;
|
||||
boolean that_present_whatOp = true;
|
||||
if (this_present_whatOp || that_present_whatOp) {
|
||||
if (!(this_present_whatOp && that_present_whatOp))
|
||||
return false;
|
||||
if (this.whatOp != that.whatOp)
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean this_present_why = true && this.isSetWhy();
|
||||
boolean that_present_why = true && that.isSetWhy();
|
||||
if (this_present_why || that_present_why) {
|
||||
if (!(this_present_why && that_present_why))
|
||||
return false;
|
||||
if (!this.why.equals(that.why))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 1;
|
||||
|
||||
hashCode = hashCode * 8191 + whatOp;
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetWhy()) ? 131071 : 524287);
|
||||
if (isSetWhy())
|
||||
hashCode = hashCode * 8191 + why.hashCode();
|
||||
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(InvalidOperation other) {
|
||||
if (!getClass().equals(other.getClass())) {
|
||||
return getClass().getName().compareTo(other.getClass().getName());
|
||||
}
|
||||
|
||||
int lastComparison = 0;
|
||||
|
||||
lastComparison = java.lang.Boolean.compare(isSetWhatOp(), other.isSetWhatOp());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetWhatOp()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.whatOp, other.whatOp);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
lastComparison = java.lang.Boolean.compare(isSetWhy(), other.isSetWhy());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetWhy()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.why, other.why);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public _Fields fieldForId(int fieldId) {
|
||||
return _Fields.findByThriftId(fieldId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
|
||||
scheme(iprot).read(iprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
|
||||
scheme(oprot).write(oprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String toString() {
|
||||
java.lang.StringBuilder sb = new java.lang.StringBuilder("InvalidOperation(");
|
||||
boolean first = true;
|
||||
|
||||
sb.append("whatOp:");
|
||||
sb.append(this.whatOp);
|
||||
first = false;
|
||||
if (!first) sb.append(", ");
|
||||
sb.append("why:");
|
||||
if (this.why == null) {
|
||||
sb.append("null");
|
||||
} else {
|
||||
sb.append(this.why);
|
||||
}
|
||||
first = false;
|
||||
sb.append(")");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public void validate() throws org.apache.thrift.TException {
|
||||
// check for required fields
|
||||
// check for sub-struct validity
|
||||
}
|
||||
|
||||
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
|
||||
try {
|
||||
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
|
||||
try {
|
||||
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
|
||||
__isset_bitfield = 0;
|
||||
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private static class InvalidOperationStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public InvalidOperationStandardScheme getScheme() {
|
||||
return new InvalidOperationStandardScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class InvalidOperationStandardScheme extends org.apache.thrift.scheme.StandardScheme<InvalidOperation> {
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidOperation struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TField schemeField;
|
||||
iprot.readStructBegin();
|
||||
while (true)
|
||||
{
|
||||
schemeField = iprot.readFieldBegin();
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
|
||||
break;
|
||||
}
|
||||
switch (schemeField.id) {
|
||||
case 1: // WHAT_OP
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
|
||||
struct.whatOp = iprot.readI32();
|
||||
struct.setWhatOpIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
case 2: // WHY
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
|
||||
struct.why = iprot.readString();
|
||||
struct.setWhyIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
iprot.readFieldEnd();
|
||||
}
|
||||
iprot.readStructEnd();
|
||||
|
||||
// check for required fields of primitive type, which can't be checked in the validate method
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidOperation struct) throws org.apache.thrift.TException {
|
||||
struct.validate();
|
||||
|
||||
oprot.writeStructBegin(STRUCT_DESC);
|
||||
oprot.writeFieldBegin(WHAT_OP_FIELD_DESC);
|
||||
oprot.writeI32(struct.whatOp);
|
||||
oprot.writeFieldEnd();
|
||||
if (struct.why != null) {
|
||||
oprot.writeFieldBegin(WHY_FIELD_DESC);
|
||||
oprot.writeString(struct.why);
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
oprot.writeFieldStop();
|
||||
oprot.writeStructEnd();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class InvalidOperationTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public InvalidOperationTupleScheme getScheme() {
|
||||
return new InvalidOperationTupleScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class InvalidOperationTupleScheme extends org.apache.thrift.scheme.TupleScheme<InvalidOperation> {
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol prot, InvalidOperation struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet optionals = new java.util.BitSet();
|
||||
if (struct.isSetWhatOp()) {
|
||||
optionals.set(0);
|
||||
}
|
||||
if (struct.isSetWhy()) {
|
||||
optionals.set(1);
|
||||
}
|
||||
oprot.writeBitSet(optionals, 2);
|
||||
if (struct.isSetWhatOp()) {
|
||||
oprot.writeI32(struct.whatOp);
|
||||
}
|
||||
if (struct.isSetWhy()) {
|
||||
oprot.writeString(struct.why);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol prot, InvalidOperation struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet incoming = iprot.readBitSet(2);
|
||||
if (incoming.get(0)) {
|
||||
struct.whatOp = iprot.readI32();
|
||||
struct.setWhatOpIsSet(true);
|
||||
}
|
||||
if (incoming.get(1)) {
|
||||
struct.why = iprot.readString();
|
||||
struct.setWhyIsSet(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) {
|
||||
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
|
||||
}
|
||||
}
|
||||
|
@ -1,64 +0,0 @@
|
||||
/**
|
||||
* Autogenerated by Thrift Compiler (0.20.0)
|
||||
*
|
||||
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
* @generated
|
||||
*/
|
||||
package it.cavallium.rockserver.core.common.api;
|
||||
|
||||
|
||||
public enum Operation implements org.apache.thrift.TEnum {
|
||||
NOTHING(1),
|
||||
PREVIOUS(2),
|
||||
CURRENT(3),
|
||||
FOR_UPDATE(4),
|
||||
EXISTS(5),
|
||||
DELTA(6),
|
||||
MULTI(7),
|
||||
CHANGED(8),
|
||||
PREVIOUS_PRESENCE(9);
|
||||
|
||||
private final int value;
|
||||
|
||||
private Operation(int value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the integer value of this enum value, as defined in the Thrift IDL.
|
||||
*/
|
||||
@Override
|
||||
public int getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a the enum type by its integer value, as defined in the Thrift IDL.
|
||||
* @return null if the value is not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static Operation findByValue(int value) {
|
||||
switch (value) {
|
||||
case 1:
|
||||
return NOTHING;
|
||||
case 2:
|
||||
return PREVIOUS;
|
||||
case 3:
|
||||
return CURRENT;
|
||||
case 4:
|
||||
return FOR_UPDATE;
|
||||
case 5:
|
||||
return EXISTS;
|
||||
case 6:
|
||||
return DELTA;
|
||||
case 7:
|
||||
return MULTI;
|
||||
case 8:
|
||||
return CHANGED;
|
||||
case 9:
|
||||
return PREVIOUS_PRESENCE;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,398 +0,0 @@
|
||||
/**
|
||||
* Autogenerated by Thrift Compiler (0.20.0)
|
||||
*
|
||||
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
* @generated
|
||||
*/
|
||||
package it.cavallium.rockserver.core.common.api;
|
||||
|
||||
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
|
||||
public class OptionalBinary implements org.apache.thrift.TBase<OptionalBinary, OptionalBinary._Fields>, java.io.Serializable, Cloneable, Comparable<OptionalBinary> {
|
||||
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OptionalBinary");
|
||||
|
||||
private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)1);
|
||||
|
||||
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new OptionalBinaryStandardSchemeFactory();
|
||||
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new OptionalBinaryTupleSchemeFactory();
|
||||
|
||||
public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value; // optional
|
||||
|
||||
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
|
||||
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
|
||||
VALUE((short)1, "value");
|
||||
|
||||
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
|
||||
|
||||
static {
|
||||
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
|
||||
byName.put(field.getFieldName(), field);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByThriftId(int fieldId) {
|
||||
switch(fieldId) {
|
||||
case 1: // VALUE
|
||||
return VALUE;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, throwing an exception
|
||||
* if it is not found.
|
||||
*/
|
||||
public static _Fields findByThriftIdOrThrow(int fieldId) {
|
||||
_Fields fields = findByThriftId(fieldId);
|
||||
if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches name, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByName(java.lang.String name) {
|
||||
return byName.get(name);
|
||||
}
|
||||
|
||||
private final short _thriftId;
|
||||
private final java.lang.String _fieldName;
|
||||
|
||||
_Fields(short thriftId, java.lang.String fieldName) {
|
||||
_thriftId = thriftId;
|
||||
_fieldName = fieldName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getThriftFieldId() {
|
||||
return _thriftId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String getFieldName() {
|
||||
return _fieldName;
|
||||
}
|
||||
}
|
||||
|
||||
// isset id assignments
|
||||
private static final _Fields optionals[] = {_Fields.VALUE};
|
||||
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
|
||||
static {
|
||||
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
|
||||
tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)));
|
||||
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
|
||||
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OptionalBinary.class, metaDataMap);
|
||||
}
|
||||
|
||||
public OptionalBinary() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a deep copy on <i>other</i>.
|
||||
*/
|
||||
public OptionalBinary(OptionalBinary other) {
|
||||
if (other.isSetValue()) {
|
||||
this.value = org.apache.thrift.TBaseHelper.copyBinary(other.value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptionalBinary deepCopy() {
|
||||
return new OptionalBinary(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
this.value = null;
|
||||
}
|
||||
|
||||
public byte[] getValue() {
|
||||
setValue(org.apache.thrift.TBaseHelper.rightSize(value));
|
||||
return value == null ? null : value.array();
|
||||
}
|
||||
|
||||
public java.nio.ByteBuffer bufferForValue() {
|
||||
return org.apache.thrift.TBaseHelper.copyBinary(value);
|
||||
}
|
||||
|
||||
public OptionalBinary setValue(byte[] value) {
|
||||
this.value = value == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(value.clone());
|
||||
return this;
|
||||
}
|
||||
|
||||
public OptionalBinary setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
|
||||
this.value = org.apache.thrift.TBaseHelper.copyBinary(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetValue() {
|
||||
this.value = null;
|
||||
}
|
||||
|
||||
/** Returns true if field value is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetValue() {
|
||||
return this.value != null;
|
||||
}
|
||||
|
||||
public void setValueIsSet(boolean value) {
|
||||
if (!value) {
|
||||
this.value = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
|
||||
switch (field) {
|
||||
case VALUE:
|
||||
if (value == null) {
|
||||
unsetValue();
|
||||
} else {
|
||||
if (value instanceof byte[]) {
|
||||
setValue((byte[])value);
|
||||
} else {
|
||||
setValue((java.nio.ByteBuffer)value);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public java.lang.Object getFieldValue(_Fields field) {
|
||||
switch (field) {
|
||||
case VALUE:
|
||||
return getValue();
|
||||
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
|
||||
@Override
|
||||
public boolean isSet(_Fields field) {
|
||||
if (field == null) {
|
||||
throw new java.lang.IllegalArgumentException();
|
||||
}
|
||||
|
||||
switch (field) {
|
||||
case VALUE:
|
||||
return isSetValue();
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(java.lang.Object that) {
|
||||
if (that instanceof OptionalBinary)
|
||||
return this.equals((OptionalBinary)that);
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean equals(OptionalBinary that) {
|
||||
if (that == null)
|
||||
return false;
|
||||
if (this == that)
|
||||
return true;
|
||||
|
||||
boolean this_present_value = true && this.isSetValue();
|
||||
boolean that_present_value = true && that.isSetValue();
|
||||
if (this_present_value || that_present_value) {
|
||||
if (!(this_present_value && that_present_value))
|
||||
return false;
|
||||
if (!this.value.equals(that.value))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 1;
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
|
||||
if (isSetValue())
|
||||
hashCode = hashCode * 8191 + value.hashCode();
|
||||
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(OptionalBinary other) {
|
||||
if (!getClass().equals(other.getClass())) {
|
||||
return getClass().getName().compareTo(other.getClass().getName());
|
||||
}
|
||||
|
||||
int lastComparison = 0;
|
||||
|
||||
lastComparison = java.lang.Boolean.compare(isSetValue(), other.isSetValue());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetValue()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public _Fields fieldForId(int fieldId) {
|
||||
return _Fields.findByThriftId(fieldId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
|
||||
scheme(iprot).read(iprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
|
||||
scheme(oprot).write(oprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String toString() {
|
||||
java.lang.StringBuilder sb = new java.lang.StringBuilder("OptionalBinary(");
|
||||
boolean first = true;
|
||||
|
||||
if (isSetValue()) {
|
||||
sb.append("value:");
|
||||
if (this.value == null) {
|
||||
sb.append("null");
|
||||
} else {
|
||||
org.apache.thrift.TBaseHelper.toString(this.value, sb);
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
sb.append(")");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public void validate() throws org.apache.thrift.TException {
|
||||
// check for required fields
|
||||
// check for sub-struct validity
|
||||
}
|
||||
|
||||
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
|
||||
try {
|
||||
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
|
||||
try {
|
||||
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private static class OptionalBinaryStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public OptionalBinaryStandardScheme getScheme() {
|
||||
return new OptionalBinaryStandardScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class OptionalBinaryStandardScheme extends org.apache.thrift.scheme.StandardScheme<OptionalBinary> {
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot, OptionalBinary struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TField schemeField;
|
||||
iprot.readStructBegin();
|
||||
while (true)
|
||||
{
|
||||
schemeField = iprot.readFieldBegin();
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
|
||||
break;
|
||||
}
|
||||
switch (schemeField.id) {
|
||||
case 1: // VALUE
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
|
||||
struct.value = iprot.readBinary();
|
||||
struct.setValueIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
iprot.readFieldEnd();
|
||||
}
|
||||
iprot.readStructEnd();
|
||||
|
||||
// check for required fields of primitive type, which can't be checked in the validate method
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot, OptionalBinary struct) throws org.apache.thrift.TException {
|
||||
struct.validate();
|
||||
|
||||
oprot.writeStructBegin(STRUCT_DESC);
|
||||
if (struct.value != null) {
|
||||
if (struct.isSetValue()) {
|
||||
oprot.writeFieldBegin(VALUE_FIELD_DESC);
|
||||
oprot.writeBinary(struct.value);
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
}
|
||||
oprot.writeFieldStop();
|
||||
oprot.writeStructEnd();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class OptionalBinaryTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public OptionalBinaryTupleScheme getScheme() {
|
||||
return new OptionalBinaryTupleScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class OptionalBinaryTupleScheme extends org.apache.thrift.scheme.TupleScheme<OptionalBinary> {
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol prot, OptionalBinary struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet optionals = new java.util.BitSet();
|
||||
if (struct.isSetValue()) {
|
||||
optionals.set(0);
|
||||
}
|
||||
oprot.writeBitSet(optionals, 1);
|
||||
if (struct.isSetValue()) {
|
||||
oprot.writeBinary(struct.value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol prot, OptionalBinary struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet incoming = iprot.readBitSet(1);
|
||||
if (incoming.get(0)) {
|
||||
struct.value = iprot.readBinary();
|
||||
struct.setValueIsSet(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) {
|
||||
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,501 +0,0 @@
|
||||
/**
|
||||
* Autogenerated by Thrift Compiler (0.20.0)
|
||||
*
|
||||
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
* @generated
|
||||
*/
|
||||
package it.cavallium.rockserver.core.common.api;
|
||||
|
||||
@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
|
||||
public class UpdateBegin implements org.apache.thrift.TBase<UpdateBegin, UpdateBegin._Fields>, java.io.Serializable, Cloneable, Comparable<UpdateBegin> {
|
||||
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UpdateBegin");
|
||||
|
||||
private static final org.apache.thrift.protocol.TField PREVIOUS_FIELD_DESC = new org.apache.thrift.protocol.TField("previous", org.apache.thrift.protocol.TType.STRING, (short)1);
|
||||
private static final org.apache.thrift.protocol.TField UPDATE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("updateId", org.apache.thrift.protocol.TType.I64, (short)2);
|
||||
|
||||
private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new UpdateBeginStandardSchemeFactory();
|
||||
private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new UpdateBeginTupleSchemeFactory();
|
||||
|
||||
public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer previous; // optional
|
||||
public long updateId; // optional
|
||||
|
||||
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
|
||||
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
|
||||
PREVIOUS((short)1, "previous"),
|
||||
UPDATE_ID((short)2, "updateId");
|
||||
|
||||
private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
|
||||
|
||||
static {
|
||||
for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
|
||||
byName.put(field.getFieldName(), field);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByThriftId(int fieldId) {
|
||||
switch(fieldId) {
|
||||
case 1: // PREVIOUS
|
||||
return PREVIOUS;
|
||||
case 2: // UPDATE_ID
|
||||
return UPDATE_ID;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches fieldId, throwing an exception
|
||||
* if it is not found.
|
||||
*/
|
||||
public static _Fields findByThriftIdOrThrow(int fieldId) {
|
||||
_Fields fields = findByThriftId(fieldId);
|
||||
if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the _Fields constant that matches name, or null if its not found.
|
||||
*/
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
public static _Fields findByName(java.lang.String name) {
|
||||
return byName.get(name);
|
||||
}
|
||||
|
||||
private final short _thriftId;
|
||||
private final java.lang.String _fieldName;
|
||||
|
||||
_Fields(short thriftId, java.lang.String fieldName) {
|
||||
_thriftId = thriftId;
|
||||
_fieldName = fieldName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getThriftFieldId() {
|
||||
return _thriftId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String getFieldName() {
|
||||
return _fieldName;
|
||||
}
|
||||
}
|
||||
|
||||
// isset id assignments
|
||||
private static final int __UPDATEID_ISSET_ID = 0;
|
||||
private byte __isset_bitfield = 0;
|
||||
private static final _Fields optionals[] = {_Fields.PREVIOUS,_Fields.UPDATE_ID};
|
||||
public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
|
||||
static {
|
||||
java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
|
||||
tmpMap.put(_Fields.PREVIOUS, new org.apache.thrift.meta_data.FieldMetaData("previous", org.apache.thrift.TFieldRequirementType.OPTIONAL,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)));
|
||||
tmpMap.put(_Fields.UPDATE_ID, new org.apache.thrift.meta_data.FieldMetaData("updateId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
|
||||
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
|
||||
metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
|
||||
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(UpdateBegin.class, metaDataMap);
|
||||
}
|
||||
|
||||
public UpdateBegin() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a deep copy on <i>other</i>.
|
||||
*/
|
||||
public UpdateBegin(UpdateBegin other) {
|
||||
__isset_bitfield = other.__isset_bitfield;
|
||||
if (other.isSetPrevious()) {
|
||||
this.previous = org.apache.thrift.TBaseHelper.copyBinary(other.previous);
|
||||
}
|
||||
this.updateId = other.updateId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateBegin deepCopy() {
|
||||
return new UpdateBegin(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
this.previous = null;
|
||||
setUpdateIdIsSet(false);
|
||||
this.updateId = 0;
|
||||
}
|
||||
|
||||
public byte[] getPrevious() {
|
||||
setPrevious(org.apache.thrift.TBaseHelper.rightSize(previous));
|
||||
return previous == null ? null : previous.array();
|
||||
}
|
||||
|
||||
public java.nio.ByteBuffer bufferForPrevious() {
|
||||
return org.apache.thrift.TBaseHelper.copyBinary(previous);
|
||||
}
|
||||
|
||||
public UpdateBegin setPrevious(byte[] previous) {
|
||||
this.previous = previous == null ? (java.nio.ByteBuffer)null : java.nio.ByteBuffer.wrap(previous.clone());
|
||||
return this;
|
||||
}
|
||||
|
||||
public UpdateBegin setPrevious(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer previous) {
|
||||
this.previous = org.apache.thrift.TBaseHelper.copyBinary(previous);
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetPrevious() {
|
||||
this.previous = null;
|
||||
}
|
||||
|
||||
/** Returns true if field previous is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetPrevious() {
|
||||
return this.previous != null;
|
||||
}
|
||||
|
||||
public void setPreviousIsSet(boolean value) {
|
||||
if (!value) {
|
||||
this.previous = null;
|
||||
}
|
||||
}
|
||||
|
||||
public long getUpdateId() {
|
||||
return this.updateId;
|
||||
}
|
||||
|
||||
public UpdateBegin setUpdateId(long updateId) {
|
||||
this.updateId = updateId;
|
||||
setUpdateIdIsSet(true);
|
||||
return this;
|
||||
}
|
||||
|
||||
public void unsetUpdateId() {
|
||||
__isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __UPDATEID_ISSET_ID);
|
||||
}
|
||||
|
||||
/** Returns true if field updateId is set (has been assigned a value) and false otherwise */
|
||||
public boolean isSetUpdateId() {
|
||||
return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __UPDATEID_ISSET_ID);
|
||||
}
|
||||
|
||||
public void setUpdateIdIsSet(boolean value) {
|
||||
__isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __UPDATEID_ISSET_ID, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
|
||||
switch (field) {
|
||||
case PREVIOUS:
|
||||
if (value == null) {
|
||||
unsetPrevious();
|
||||
} else {
|
||||
if (value instanceof byte[]) {
|
||||
setPrevious((byte[])value);
|
||||
} else {
|
||||
setPrevious((java.nio.ByteBuffer)value);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case UPDATE_ID:
|
||||
if (value == null) {
|
||||
unsetUpdateId();
|
||||
} else {
|
||||
setUpdateId((java.lang.Long)value);
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public java.lang.Object getFieldValue(_Fields field) {
|
||||
switch (field) {
|
||||
case PREVIOUS:
|
||||
return getPrevious();
|
||||
|
||||
case UPDATE_ID:
|
||||
return getUpdateId();
|
||||
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
|
||||
@Override
|
||||
public boolean isSet(_Fields field) {
|
||||
if (field == null) {
|
||||
throw new java.lang.IllegalArgumentException();
|
||||
}
|
||||
|
||||
switch (field) {
|
||||
case PREVIOUS:
|
||||
return isSetPrevious();
|
||||
case UPDATE_ID:
|
||||
return isSetUpdateId();
|
||||
}
|
||||
throw new java.lang.IllegalStateException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(java.lang.Object that) {
|
||||
if (that instanceof UpdateBegin)
|
||||
return this.equals((UpdateBegin)that);
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean equals(UpdateBegin that) {
|
||||
if (that == null)
|
||||
return false;
|
||||
if (this == that)
|
||||
return true;
|
||||
|
||||
boolean this_present_previous = true && this.isSetPrevious();
|
||||
boolean that_present_previous = true && that.isSetPrevious();
|
||||
if (this_present_previous || that_present_previous) {
|
||||
if (!(this_present_previous && that_present_previous))
|
||||
return false;
|
||||
if (!this.previous.equals(that.previous))
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean this_present_updateId = true && this.isSetUpdateId();
|
||||
boolean that_present_updateId = true && that.isSetUpdateId();
|
||||
if (this_present_updateId || that_present_updateId) {
|
||||
if (!(this_present_updateId && that_present_updateId))
|
||||
return false;
|
||||
if (this.updateId != that.updateId)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 1;
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetPrevious()) ? 131071 : 524287);
|
||||
if (isSetPrevious())
|
||||
hashCode = hashCode * 8191 + previous.hashCode();
|
||||
|
||||
hashCode = hashCode * 8191 + ((isSetUpdateId()) ? 131071 : 524287);
|
||||
if (isSetUpdateId())
|
||||
hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(updateId);
|
||||
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(UpdateBegin other) {
|
||||
if (!getClass().equals(other.getClass())) {
|
||||
return getClass().getName().compareTo(other.getClass().getName());
|
||||
}
|
||||
|
||||
int lastComparison = 0;
|
||||
|
||||
lastComparison = java.lang.Boolean.compare(isSetPrevious(), other.isSetPrevious());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetPrevious()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.previous, other.previous);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
lastComparison = java.lang.Boolean.compare(isSetUpdateId(), other.isSetUpdateId());
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
if (isSetUpdateId()) {
|
||||
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.updateId, other.updateId);
|
||||
if (lastComparison != 0) {
|
||||
return lastComparison;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@org.apache.thrift.annotation.Nullable
|
||||
@Override
|
||||
public _Fields fieldForId(int fieldId) {
|
||||
return _Fields.findByThriftId(fieldId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
|
||||
scheme(iprot).read(iprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
|
||||
scheme(oprot).write(oprot, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public java.lang.String toString() {
|
||||
java.lang.StringBuilder sb = new java.lang.StringBuilder("UpdateBegin(");
|
||||
boolean first = true;
|
||||
|
||||
if (isSetPrevious()) {
|
||||
sb.append("previous:");
|
||||
if (this.previous == null) {
|
||||
sb.append("null");
|
||||
} else {
|
||||
org.apache.thrift.TBaseHelper.toString(this.previous, sb);
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
if (isSetUpdateId()) {
|
||||
if (!first) sb.append(", ");
|
||||
sb.append("updateId:");
|
||||
sb.append(this.updateId);
|
||||
first = false;
|
||||
}
|
||||
sb.append(")");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public void validate() throws org.apache.thrift.TException {
|
||||
// check for required fields
|
||||
// check for sub-struct validity
|
||||
}
|
||||
|
||||
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
|
||||
try {
|
||||
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
|
||||
try {
|
||||
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
|
||||
__isset_bitfield = 0;
|
||||
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
|
||||
} catch (org.apache.thrift.TException te) {
|
||||
throw new java.io.IOException(te);
|
||||
}
|
||||
}
|
||||
|
||||
private static class UpdateBeginStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public UpdateBeginStandardScheme getScheme() {
|
||||
return new UpdateBeginStandardScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class UpdateBeginStandardScheme extends org.apache.thrift.scheme.StandardScheme<UpdateBegin> {
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol iprot, UpdateBegin struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TField schemeField;
|
||||
iprot.readStructBegin();
|
||||
while (true)
|
||||
{
|
||||
schemeField = iprot.readFieldBegin();
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
|
||||
break;
|
||||
}
|
||||
switch (schemeField.id) {
|
||||
case 1: // PREVIOUS
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
|
||||
struct.previous = iprot.readBinary();
|
||||
struct.setPreviousIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
case 2: // UPDATE_ID
|
||||
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
|
||||
struct.updateId = iprot.readI64();
|
||||
struct.setUpdateIdIsSet(true);
|
||||
} else {
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
|
||||
}
|
||||
iprot.readFieldEnd();
|
||||
}
|
||||
iprot.readStructEnd();
|
||||
|
||||
// check for required fields of primitive type, which can't be checked in the validate method
|
||||
struct.validate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol oprot, UpdateBegin struct) throws org.apache.thrift.TException {
|
||||
struct.validate();
|
||||
|
||||
oprot.writeStructBegin(STRUCT_DESC);
|
||||
if (struct.previous != null) {
|
||||
if (struct.isSetPrevious()) {
|
||||
oprot.writeFieldBegin(PREVIOUS_FIELD_DESC);
|
||||
oprot.writeBinary(struct.previous);
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
}
|
||||
if (struct.isSetUpdateId()) {
|
||||
oprot.writeFieldBegin(UPDATE_ID_FIELD_DESC);
|
||||
oprot.writeI64(struct.updateId);
|
||||
oprot.writeFieldEnd();
|
||||
}
|
||||
oprot.writeFieldStop();
|
||||
oprot.writeStructEnd();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class UpdateBeginTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
|
||||
@Override
|
||||
public UpdateBeginTupleScheme getScheme() {
|
||||
return new UpdateBeginTupleScheme();
|
||||
}
|
||||
}
|
||||
|
||||
private static class UpdateBeginTupleScheme extends org.apache.thrift.scheme.TupleScheme<UpdateBegin> {
|
||||
|
||||
@Override
|
||||
public void write(org.apache.thrift.protocol.TProtocol prot, UpdateBegin struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet optionals = new java.util.BitSet();
|
||||
if (struct.isSetPrevious()) {
|
||||
optionals.set(0);
|
||||
}
|
||||
if (struct.isSetUpdateId()) {
|
||||
optionals.set(1);
|
||||
}
|
||||
oprot.writeBitSet(optionals, 2);
|
||||
if (struct.isSetPrevious()) {
|
||||
oprot.writeBinary(struct.previous);
|
||||
}
|
||||
if (struct.isSetUpdateId()) {
|
||||
oprot.writeI64(struct.updateId);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(org.apache.thrift.protocol.TProtocol prot, UpdateBegin struct) throws org.apache.thrift.TException {
|
||||
org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
|
||||
java.util.BitSet incoming = iprot.readBitSet(2);
|
||||
if (incoming.get(0)) {
|
||||
struct.previous = iprot.readBinary();
|
||||
struct.setPreviousIsSet(true);
|
||||
}
|
||||
if (incoming.get(1)) {
|
||||
struct.updateId = iprot.readI64();
|
||||
struct.setUpdateIdIsSet(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) {
|
||||
return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
|
||||
}
|
||||
}
|
||||
|
@ -4,15 +4,25 @@ import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType;
|
||||
import it.cavallium.rockserver.core.impl.DataSizeDecoder;
|
||||
import it.cavallium.rockserver.core.impl.DbCompressionDecoder;
|
||||
import it.cavallium.rockserver.core.resources.DefaultConfig;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import org.github.gestalt.config.builder.GestaltBuilder;
|
||||
import org.github.gestalt.config.builder.SourceBuilder;
|
||||
import org.github.gestalt.config.decoder.ProxyDecoderMode;
|
||||
import org.github.gestalt.config.exceptions.GestaltException;
|
||||
import org.github.gestalt.config.reload.ConfigReloadStrategy;
|
||||
import org.github.gestalt.config.source.ClassPathConfigSourceBuilder;
|
||||
import org.github.gestalt.config.source.ConfigSource;
|
||||
import org.github.gestalt.config.source.FileConfigSourceBuilder;
|
||||
import org.github.gestalt.config.source.InputStreamConfigSourceBuilder;
|
||||
import org.github.gestalt.config.tag.Tags;
|
||||
import org.github.gestalt.config.utils.Pair;
|
||||
|
||||
public class ConfigParser {
|
||||
|
||||
@ -33,7 +43,9 @@ public class ConfigParser {
|
||||
|
||||
public static DatabaseConfig parse(Path configPath) {
|
||||
var parser = new ConfigParser();
|
||||
parser.addSource(configPath);
|
||||
if (configPath != null) {
|
||||
parser.addSource(configPath);
|
||||
}
|
||||
return parser.parse();
|
||||
}
|
||||
|
||||
@ -51,8 +63,11 @@ public class ConfigParser {
|
||||
|
||||
public DatabaseConfig parse() {
|
||||
try {
|
||||
gsb.addSource(ClassPathConfigSourceBuilder
|
||||
.builder().setResource("it/cavallium/rockserver/core/resources/default.conf").build());
|
||||
gsb.addSource(InputStreamConfigSourceBuilder
|
||||
.builder()
|
||||
.setConfig(DefaultConfig.getDefaultConfig())
|
||||
.setFormat("conf")
|
||||
.build());
|
||||
for (SourceBuilder<?, ?> sourceBuilder : sourceBuilders) {
|
||||
gsb.addSource(sourceBuilder.build());
|
||||
}
|
||||
|
@ -69,6 +69,7 @@ public class ConfigPrinter {
|
||||
"log-path": "%s",
|
||||
"wal-path": "%s",
|
||||
"absolute-consistency": %b,
|
||||
"ingest-behind": %b,
|
||||
"volumes": %s,
|
||||
"fallback-column-options": %s,
|
||||
"column-options": %s
|
||||
@ -84,6 +85,7 @@ public class ConfigPrinter {
|
||||
o.logPath(),
|
||||
o.walPath(),
|
||||
o.absoluteConsistency(),
|
||||
o.ingestBehind(),
|
||||
result.toString(),
|
||||
stringifyFallbackColumn(o.fallbackColumnOptions()),
|
||||
joiner.toString()
|
||||
|
@ -33,11 +33,18 @@ public interface GlobalDatabaseConfig {
|
||||
@Nullable
|
||||
Path walPath() throws GestaltException;
|
||||
|
||||
@Nullable
|
||||
Path tempSstPath() throws GestaltException;
|
||||
|
||||
@Nullable
|
||||
Duration delayWalFlushDuration() throws GestaltException;
|
||||
|
||||
boolean absoluteConsistency() throws GestaltException;
|
||||
|
||||
boolean ingestBehind() throws GestaltException;
|
||||
|
||||
boolean unorderedWrite() throws GestaltException;
|
||||
|
||||
VolumeConfig[] volumes() throws GestaltException;
|
||||
|
||||
FallbackColumnConfig fallbackColumnOptions() throws GestaltException;
|
||||
|
@ -40,7 +40,7 @@ public class DataSizeDecoder implements Decoder<DataSize> {
|
||||
return GResultOf.errors(new ValidationError.DecodingNumberFormatException(path,
|
||||
node,
|
||||
name(),
|
||||
decoderContext.getSecretConcealer()
|
||||
decoderContext
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ public class DbCompressionDecoder implements Decoder<CompressionType> {
|
||||
return GResultOf.errors(new ValidationError.DecodingNumberFormatException(path,
|
||||
node,
|
||||
name(),
|
||||
decoderContext.getSecretConcealer()
|
||||
decoderContext
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -5,27 +5,13 @@ import static it.cavallium.rockserver.core.impl.ColumnInstance.BIG_ENDIAN_BYTES;
|
||||
import static org.rocksdb.KeyMayExist.KeyMayExistEnum.kExistsWithValue;
|
||||
import static org.rocksdb.KeyMayExist.KeyMayExistEnum.kExistsWithoutValue;
|
||||
|
||||
import it.cavallium.rockserver.core.common.ColumnHashType;
|
||||
import it.cavallium.rockserver.core.common.Keys;
|
||||
import it.cavallium.rockserver.core.common.RequestType;
|
||||
import it.cavallium.rockserver.core.common.*;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestGet;
|
||||
import it.cavallium.rockserver.core.common.RequestType.RequestPut;
|
||||
import it.cavallium.rockserver.core.common.ColumnSchema;
|
||||
import it.cavallium.rockserver.core.common.Delta;
|
||||
import it.cavallium.rockserver.core.common.RocksDBSyncAPI;
|
||||
import it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType;
|
||||
import it.cavallium.rockserver.core.common.RocksDBRetryException;
|
||||
import it.cavallium.rockserver.core.common.UpdateContext;
|
||||
import it.cavallium.rockserver.core.common.Utils;
|
||||
import it.cavallium.rockserver.core.config.ConfigParser;
|
||||
import it.cavallium.rockserver.core.config.ConfigPrinter;
|
||||
import it.cavallium.rockserver.core.config.DatabaseConfig;
|
||||
import it.cavallium.rockserver.core.impl.rocksdb.REntry;
|
||||
import it.cavallium.rockserver.core.impl.rocksdb.RocksDBLoader;
|
||||
import it.cavallium.rockserver.core.impl.rocksdb.RocksDBObjects;
|
||||
import it.cavallium.rockserver.core.impl.rocksdb.TransactionalDB;
|
||||
import it.cavallium.rockserver.core.config.*;
|
||||
import it.cavallium.rockserver.core.impl.rocksdb.*;
|
||||
import it.cavallium.rockserver.core.impl.rocksdb.TransactionalDB.TransactionalOptions;
|
||||
import it.cavallium.rockserver.core.impl.rocksdb.Tx;
|
||||
import it.unimi.dsi.fastutil.ints.IntArrayList;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
||||
import java.io.ByteArrayInputStream;
|
||||
@ -38,27 +24,25 @@ import java.lang.foreign.Arena;
|
||||
import java.lang.foreign.MemorySegment;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Objects;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.logging.Level;
|
||||
|
||||
import org.cliffc.high_scale_lib.NonBlockingHashMapLong;
|
||||
import org.github.gestalt.config.exceptions.GestaltException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.ColumnFamilyDescriptor;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDB;
|
||||
import org.jetbrains.annotations.VisibleForTesting;
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.reactivestreams.Subscriber;
|
||||
import org.reactivestreams.Subscription;
|
||||
import org.rocksdb.*;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.RocksIterator;
|
||||
import org.rocksdb.Status.Code;
|
||||
import org.rocksdb.WriteOptions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@ -71,13 +55,19 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
private final Logger logger;
|
||||
private final @Nullable Path path;
|
||||
private final TransactionalDB db;
|
||||
private final DBOptions dbOptions;
|
||||
private final ColumnFamilyHandle columnSchemasColumnDescriptorHandle;
|
||||
private final NonBlockingHashMapLong<ColumnInstance> columns;
|
||||
private final Map<String, ColumnFamilyOptions> columnsConifg;
|
||||
private final ConcurrentMap<String, Long> columnNamesIndex;
|
||||
private final NonBlockingHashMapLong<Tx> txs;
|
||||
private final NonBlockingHashMapLong<REntry<RocksIterator>> its;
|
||||
private final SafeShutdown ops;
|
||||
private final Object columnEditLock = new Object();
|
||||
private final DatabaseConfig config;
|
||||
private final RocksDBObjects refs;
|
||||
private final @Nullable Cache cache;
|
||||
private Path tempSSTsPath;
|
||||
|
||||
public EmbeddedDB(@Nullable Path path, String name, @Nullable Path embeddedConfigPath) throws IOException {
|
||||
this.path = path;
|
||||
@ -88,8 +78,19 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
this.columnNamesIndex = new ConcurrentHashMap<>();
|
||||
this.ops = new SafeShutdown();
|
||||
DatabaseConfig config = ConfigParser.parse(embeddedConfigPath);
|
||||
this.db = RocksDBLoader.load(path, config, logger);
|
||||
var existingColumnSchemasColumnDescriptorOptional = db
|
||||
this.config = config;
|
||||
var loadedDb = RocksDBLoader.load(path, config, logger);
|
||||
this.db = loadedDb.db();
|
||||
this.dbOptions = loadedDb.dbOptions();
|
||||
this.refs = loadedDb.refs();
|
||||
this.cache = loadedDb.cache();
|
||||
this.columnsConifg = loadedDb.definitiveColumnFamilyOptionsMap();
|
||||
try {
|
||||
this.tempSSTsPath = config.global().tempSstPath();
|
||||
} catch (GestaltException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.CONFIG_ERROR, "Can't get wal path");
|
||||
}
|
||||
var existingColumnSchemasColumnDescriptorOptional = db
|
||||
.getStartupColumns()
|
||||
.entrySet()
|
||||
.stream()
|
||||
@ -207,6 +208,18 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
if (retries >= 5000) {
|
||||
throw new IllegalStateException("Can't find column in column names index: " + name);
|
||||
}
|
||||
|
||||
ColumnFamilyOptions columnConfig;
|
||||
while ((columnConfig = this.columnsConifg.remove(name)) == null && retries++ < 5_000) {
|
||||
Thread.yield();
|
||||
}
|
||||
if (columnConfig != null) {
|
||||
columnConfig.close();
|
||||
}
|
||||
if (retries >= 5000) {
|
||||
throw new IllegalStateException("Can't find column in column names index: " + name);
|
||||
}
|
||||
|
||||
return col;
|
||||
}
|
||||
|
||||
@ -217,6 +230,7 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
ops.closeAndWait(MAX_TRANSACTION_DURATION_MS);
|
||||
columnSchemasColumnDescriptorHandle.close();
|
||||
db.close();
|
||||
refs.close();
|
||||
if (path == null) {
|
||||
Utils.deleteDirectory(db.getPath());
|
||||
}
|
||||
@ -335,12 +349,19 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
var options = RocksDBLoader.getColumnOptions(name, this.config.global(),
|
||||
logger, this.refs, path == null, cache);
|
||||
var prev = columnsConifg.put(name, options);
|
||||
if (prev != null) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.COLUMN_CREATE_FAIL,
|
||||
"ColumnsConfig already exists with name \"" + name + "\"");
|
||||
}
|
||||
byte[] key = name.getBytes(StandardCharsets.UTF_8);
|
||||
var cf = db.get().createColumnFamily(new ColumnFamilyDescriptor(key));
|
||||
var cf = db.get().createColumnFamily(new ColumnFamilyDescriptor(key, options));
|
||||
byte[] value = encodeColumnSchema(schema);
|
||||
db.get().put(columnSchemasColumnDescriptorHandle, key, value);
|
||||
return registerColumn(new ColumnInstance(cf, schema));
|
||||
} catch (RocksDBException e) {
|
||||
} catch (RocksDBException | GestaltException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.COLUMN_CREATE_FAIL, e);
|
||||
}
|
||||
}
|
||||
@ -437,11 +458,173 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
return responses != null ? responses : List.of();
|
||||
}
|
||||
|
||||
public CompletableFuture<Void> putBatchInternal(long columnId,
|
||||
@NotNull Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) throws it.cavallium.rockserver.core.common.RocksDBException {
|
||||
try {
|
||||
var cf = new CompletableFuture<Void>();
|
||||
batchPublisher.subscribe(new Subscriber<>() {
|
||||
private boolean stopped;
|
||||
private Subscription subscription;
|
||||
private ColumnInstance col;
|
||||
private ArrayList<AutoCloseable> refs;
|
||||
private DBWriter writer;
|
||||
|
||||
@Override
|
||||
public void onSubscribe(Subscription subscription) {
|
||||
ops.beginOp();
|
||||
|
||||
try {
|
||||
// Column id
|
||||
col = getColumn(columnId);
|
||||
refs = new ArrayList<>();
|
||||
|
||||
writer = switch (mode) {
|
||||
case WRITE_BATCH, WRITE_BATCH_NO_WAL -> {
|
||||
var wb = new WB(db.get(), new WriteBatch(), mode == PutBatchMode.WRITE_BATCH_NO_WAL);
|
||||
refs.add(wb);
|
||||
yield wb;
|
||||
}
|
||||
case SST_INGESTION, SST_INGEST_BEHIND -> {
|
||||
var sstWriter = getSSTWriter(columnId, null, false, mode == PutBatchMode.SST_INGEST_BEHIND);
|
||||
refs.add(sstWriter);
|
||||
yield sstWriter;
|
||||
}
|
||||
};
|
||||
} catch (Throwable ex) {
|
||||
doFinally();
|
||||
throw ex;
|
||||
}
|
||||
this.subscription = subscription;
|
||||
subscription.request(1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNext(KVBatch kvBatch) {
|
||||
if (stopped) {
|
||||
return;
|
||||
}
|
||||
var keyIt = kvBatch.keys().iterator();
|
||||
var valueIt = kvBatch.values().iterator();
|
||||
try (var arena = Arena.ofConfined()) {
|
||||
while (keyIt.hasNext()) {
|
||||
var key = keyIt.next();
|
||||
var value = valueIt.next();
|
||||
put(arena, writer, col, 0, key, value, RequestType.none());
|
||||
}
|
||||
} catch (it.cavallium.rockserver.core.common.RocksDBException ex) {
|
||||
doFinally();
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
doFinally();
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_UNKNOWN_ERROR, ex);
|
||||
}
|
||||
subscription.request(1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onError(Throwable throwable) {
|
||||
cf.completeExceptionally(throwable);
|
||||
doFinally();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onComplete() {
|
||||
try {
|
||||
try {
|
||||
writer.writePending();
|
||||
} catch (Throwable ex) {
|
||||
cf.completeExceptionally(ex);
|
||||
return;
|
||||
}
|
||||
cf.complete(null);
|
||||
} finally {
|
||||
doFinally();
|
||||
}
|
||||
}
|
||||
|
||||
private void doFinally() {
|
||||
stopped = true;
|
||||
for (int i = refs.size() - 1; i >= 0; i--) {
|
||||
try {
|
||||
var c = refs.get(i);
|
||||
if (c instanceof AbstractImmutableNativeReference fr) {
|
||||
if (fr.isOwningHandle()) {
|
||||
c.close();
|
||||
}
|
||||
} else {
|
||||
c.close();
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.error("Failed to close reference during batch write", ex);
|
||||
}
|
||||
}
|
||||
ops.endOp();
|
||||
}
|
||||
});
|
||||
return cf;
|
||||
} catch (it.cavallium.rockserver.core.common.RocksDBException ex) {
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_UNKNOWN_ERROR, ex);
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public SSTWriter getSSTWriter(long colId,
|
||||
@Nullable GlobalDatabaseConfig globalDatabaseConfigOverride,
|
||||
boolean forceNoOptions,
|
||||
boolean ingestBehind) throws it.cavallium.rockserver.core.common.RocksDBException {
|
||||
try {
|
||||
var col = getColumn(colId);
|
||||
ColumnFamilyOptions columnConifg;
|
||||
RocksDBObjects refs;
|
||||
if (!forceNoOptions) {
|
||||
var name = new String(col.cfh().getName(), StandardCharsets.UTF_8);
|
||||
refs = new RocksDBObjects();
|
||||
if (globalDatabaseConfigOverride != null) {
|
||||
columnConifg = RocksDBLoader.getColumnOptions(name, globalDatabaseConfigOverride, logger, refs, false, null);
|
||||
} else {
|
||||
try {
|
||||
columnConifg = RocksDBLoader.getColumnOptions(name, this.config.global(), logger, refs, false, null);
|
||||
} catch (GestaltException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.CONFIG_ERROR, e);
|
||||
}
|
||||
refs = null;
|
||||
}
|
||||
} else {
|
||||
columnConifg = null;
|
||||
refs = null;
|
||||
}
|
||||
if (Files.notExists(tempSSTsPath)) {
|
||||
Files.createDirectories(tempSSTsPath);
|
||||
}
|
||||
return SSTWriter.open(tempSSTsPath, db, col, columnConifg, forceNoOptions, ingestBehind, refs);
|
||||
} catch (IOException ex) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.SST_WRITE_2, ex);
|
||||
} catch (RocksDBException ex) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.SST_WRITE_3, ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putBatch(long columnId,
|
||||
@NotNull Publisher<@NotNull KVBatch> batchPublisher,
|
||||
@NotNull PutBatchMode mode) throws it.cavallium.rockserver.core.common.RocksDBException {
|
||||
try {
|
||||
putBatchInternal(columnId, batchPublisher, mode).get();
|
||||
} catch (it.cavallium.rockserver.core.common.RocksDBException ex) {
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_UNKNOWN_ERROR, ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param txConsumer this can be called multiple times, if the optimistic transaction failed
|
||||
*/
|
||||
public <R> R wrapWithTransactionIfNeeded(@Nullable Tx tx, boolean needTransaction,
|
||||
ExFunction<@Nullable Tx, R> txConsumer) throws Exception {
|
||||
public <T extends DBWriter, R> R wrapWithTransactionIfNeeded(@Nullable T tx, boolean needTransaction,
|
||||
ExFunction<@Nullable T, R> txConsumer) throws Exception {
|
||||
if (needTransaction) {
|
||||
return ensureWrapWithTransaction(tx, txConsumer);
|
||||
} else {
|
||||
@ -453,8 +636,8 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
/**
|
||||
* @param txConsumer this can be called multiple times, if the optimistic transaction failed
|
||||
*/
|
||||
public <R> R ensureWrapWithTransaction(@Nullable Tx tx,
|
||||
ExFunction<@NotNull Tx, R> txConsumer) throws Exception {
|
||||
public <T extends DBWriter, R> R ensureWrapWithTransaction(@Nullable T tx,
|
||||
ExFunction<@NotNull T, R> txConsumer) throws Exception {
|
||||
R result;
|
||||
if (tx == null) {
|
||||
// Retry using a transaction: transactions are required to handle this kind of data
|
||||
@ -462,7 +645,8 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
try {
|
||||
boolean committed;
|
||||
do {
|
||||
result = txConsumer.apply(newTx);
|
||||
//noinspection unchecked
|
||||
result = txConsumer.apply((T) newTx);
|
||||
committed = this.closeTransaction(newTx, true);
|
||||
if (!committed) {
|
||||
Thread.yield();
|
||||
@ -478,7 +662,7 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
}
|
||||
|
||||
private <U> U put(Arena arena,
|
||||
@Nullable Tx optionalTxOrUpdate,
|
||||
@Nullable DBWriter optionalDbWriter,
|
||||
ColumnInstance col,
|
||||
long updateId,
|
||||
@NotNull Keys keys,
|
||||
@ -492,49 +676,55 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
boolean needsTx = col.hasBuckets()
|
||||
|| requirePreviousValue
|
||||
|| requirePreviousPresence;
|
||||
if (optionalTxOrUpdate != null && optionalTxOrUpdate.isFromGetForUpdate() && (requirePreviousValue || requirePreviousPresence)) {
|
||||
if (optionalDbWriter instanceof Tx tx && tx.isFromGetForUpdate() && (requirePreviousValue || requirePreviousPresence)) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_INVALID_REQUEST,
|
||||
"You can't get the previous value or delta, when you are already updating that value");
|
||||
}
|
||||
if (updateId != 0L && optionalTxOrUpdate == null) {
|
||||
if (updateId != 0L && !(optionalDbWriter instanceof Tx)) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_INVALID_REQUEST,
|
||||
"Update id must be accompanied with a valid transaction");
|
||||
}
|
||||
return wrapWithTransactionIfNeeded(optionalTxOrUpdate, needsTx, tx -> {
|
||||
if (col.hasBuckets() && (optionalDbWriter != null && !(optionalDbWriter instanceof Tx))) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_INVALID_REQUEST,
|
||||
"Column with buckets don't support write batches");
|
||||
}
|
||||
return wrapWithTransactionIfNeeded(optionalDbWriter, needsTx, dbWriter -> {
|
||||
MemorySegment previousValue;
|
||||
MemorySegment calculatedKey = col.calculateKey(arena, keys.keys());
|
||||
if (updateId != 0L) {
|
||||
assert tx != null;
|
||||
tx.val().setSavePoint();
|
||||
assert dbWriter instanceof Tx;
|
||||
((Tx) dbWriter).val().setSavePoint();
|
||||
}
|
||||
if (col.hasBuckets()) {
|
||||
assert tx != null;
|
||||
assert dbWriter instanceof Tx;
|
||||
var bucketElementKeys = col.getBucketElementKeys(keys.keys());
|
||||
try (var readOptions = new ReadOptions()) {
|
||||
var previousRawBucketByteArray = tx.val().getForUpdate(readOptions, col.cfh(), calculatedKey.toArray(BIG_ENDIAN_BYTES), true);
|
||||
var previousRawBucketByteArray = ((Tx) dbWriter).val().getForUpdate(readOptions, col.cfh(), calculatedKey.toArray(BIG_ENDIAN_BYTES), true);
|
||||
MemorySegment previousRawBucket = toMemorySegment(arena, previousRawBucketByteArray);
|
||||
var bucket = previousRawBucket != null ? new Bucket(col, previousRawBucket) : new Bucket(col);
|
||||
previousValue = transformResultValue(col, bucket.addElement(bucketElementKeys, value));
|
||||
tx.val().put(col.cfh(), Utils.toByteArray(calculatedKey), Utils.toByteArray(bucket.toSegment(arena)));
|
||||
var k = Utils.toByteArray(calculatedKey);
|
||||
var v = Utils.toByteArray(bucket.toSegment(arena));
|
||||
((Tx) dbWriter).val().put(col.cfh(), k, v);
|
||||
} catch (RocksDBException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_1, e);
|
||||
}
|
||||
} else {
|
||||
if (RequestType.requiresGettingPreviousValue(callback)) {
|
||||
assert tx != null;
|
||||
assert dbWriter instanceof Tx;
|
||||
try (var readOptions = new ReadOptions()) {
|
||||
byte[] previousValueByteArray;
|
||||
previousValueByteArray = tx.val().getForUpdate(readOptions, col.cfh(), calculatedKey.toArray(BIG_ENDIAN_BYTES), true);
|
||||
previousValueByteArray = ((Tx) dbWriter).val().getForUpdate(readOptions, col.cfh(), calculatedKey.toArray(BIG_ENDIAN_BYTES), true);
|
||||
previousValue = transformResultValue(col, toMemorySegment(arena, previousValueByteArray));
|
||||
} catch (RocksDBException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_2, e);
|
||||
}
|
||||
} else if (RequestType.requiresGettingPreviousPresence(callback)) {
|
||||
// todo: in the future this should be replaced with just keyExists
|
||||
assert tx != null;
|
||||
assert dbWriter instanceof Tx;
|
||||
try (var readOptions = new ReadOptions()) {
|
||||
byte[] previousValueByteArray;
|
||||
previousValueByteArray = tx.val().getForUpdate(readOptions, col.cfh(), calculatedKey.toArray(BIG_ENDIAN_BYTES), true);
|
||||
previousValueByteArray = ((Tx) dbWriter).val().getForUpdate(readOptions, col.cfh(), calculatedKey.toArray(BIG_ENDIAN_BYTES), true);
|
||||
previousValue = previousValueByteArray != null ? MemorySegment.NULL : null;
|
||||
} catch (RocksDBException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.PUT_2, e);
|
||||
@ -542,13 +732,20 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
} else {
|
||||
previousValue = null;
|
||||
}
|
||||
if (tx != null) {
|
||||
tx.val().put(col.cfh(), Utils.toByteArray(calculatedKey), Utils.toByteArray(value));
|
||||
} else {
|
||||
try (var w = new WriteOptions()) {
|
||||
switch (dbWriter) {
|
||||
case WB wb -> wb.wb().put(col.cfh(), Utils.toByteArray(calculatedKey), Utils.toByteArray(value));
|
||||
case SSTWriter sstWriter -> {
|
||||
var keyBB = calculatedKey.asByteBuffer();
|
||||
ByteBuffer valueBB = (col.schema().hasValue() ? value : Utils.dummyEmptyValue()).asByteBuffer();
|
||||
db.get().put(col.cfh(), w, keyBB, valueBB);
|
||||
sstWriter.put(keyBB, valueBB);
|
||||
}
|
||||
case Tx t -> t.val().put(col.cfh(), Utils.toByteArray(calculatedKey), Utils.toByteArray(value));
|
||||
case null -> {
|
||||
try (var w = new WriteOptions()) {
|
||||
var keyBB = calculatedKey.asByteBuffer();
|
||||
ByteBuffer valueBB = (col.schema().hasValue() ? value : Utils.dummyEmptyValue()).asByteBuffer();
|
||||
db.get().put(col.cfh(), w, keyBB, valueBB);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -562,8 +759,8 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
|
||||
if (updateId != 0L) {
|
||||
if (!closeTransaction(updateId, true)) {
|
||||
tx.val().rollbackToSavePoint();
|
||||
tx.val().undoGetForUpdate(col.cfh(), Utils.toByteArray(calculatedKey));
|
||||
((Tx) dbWriter).val().rollbackToSavePoint();
|
||||
((Tx) dbWriter).val().undoGetForUpdate(col.cfh(), Utils.toByteArray(calculatedKey));
|
||||
throw new RocksDBRetryException();
|
||||
}
|
||||
}
|
||||
@ -833,4 +1030,13 @@ public class EmbeddedDB implements RocksDBSyncAPI, Closeable {
|
||||
return path;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public TransactionalDB getDb() {
|
||||
return db;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public DatabaseConfig getConfig() {
|
||||
return config;
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,10 @@
|
||||
package it.cavallium.rockserver.core.impl.rocksdb;
|
||||
|
||||
import org.rocksdb.RocksDB;
|
||||
|
||||
public sealed interface DBWriter permits SSTWriter, Tx, WB {
|
||||
/**
|
||||
* Writes any pending kv pair to the db
|
||||
*/
|
||||
void writePending() throws it.cavallium.rockserver.core.common.RocksDBException;
|
||||
}
|
@ -5,14 +5,8 @@ import it.cavallium.rockserver.core.config.*;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.SequencedMap;
|
||||
import java.util.logging.Level;
|
||||
import java.util.*;
|
||||
|
||||
import org.github.gestalt.config.exceptions.GestaltException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
@ -49,6 +43,21 @@ public class RocksDBLoader {
|
||||
private static final String bugFallbackJniLibraryFileName = Environment.getFallbackJniLibraryFileName("rocksdbjni");
|
||||
|
||||
public static void loadLibrary() {
|
||||
for (final CompressionType compressionType : CompressionType.values()) {
|
||||
try {
|
||||
if (compressionType.getLibraryName() != null) {
|
||||
System.loadLibrary(compressionType.getLibraryName());
|
||||
}
|
||||
} catch (final UnsatisfiedLinkError e) {
|
||||
if (compressionType == CompressionType.LZ4_COMPRESSION) {
|
||||
throw new IllegalStateException("Can't load LZ4", e);
|
||||
}
|
||||
if (compressionType == CompressionType.ZSTD_COMPRESSION) {
|
||||
throw new IllegalStateException("Can't load ZSTD", e);
|
||||
}
|
||||
// since it may be optional, we ignore its loading failure here.
|
||||
}
|
||||
}
|
||||
try {
|
||||
String currentUsersHomeDir = System.getProperty("user.home");
|
||||
var jniPath = Path.of(currentUsersHomeDir).resolve(".jni").resolve("rocksdb").resolve(RocksDBMetadata.getRocksDBVersionHash());
|
||||
@ -90,8 +99,223 @@ public class RocksDBLoader {
|
||||
throw new RuntimeException("rocksdb was not found inside JAR.");
|
||||
}
|
||||
|
||||
public record LoadedDb(TransactionalDB db, DBOptions dbOptions,
|
||||
Map<String, ColumnFamilyOptions> definitiveColumnFamilyOptionsMap, RocksDBObjects refs,
|
||||
@Nullable Cache cache) {}
|
||||
|
||||
public static TransactionalDB load(@Nullable Path path, DatabaseConfig config, Logger logger) {
|
||||
public static ColumnFamilyOptions getColumnOptions(String name,
|
||||
GlobalDatabaseConfig globalDatabaseConfig,
|
||||
Logger logger,
|
||||
RocksDBObjects refs,
|
||||
boolean inMemory,
|
||||
@Nullable Cache cache) {
|
||||
try {
|
||||
var columnFamilyOptions = new ColumnFamilyOptions();
|
||||
refs.add(columnFamilyOptions);
|
||||
|
||||
FallbackColumnConfig columnOptions = null;
|
||||
for (NamedColumnConfig namedColumnConfig : globalDatabaseConfig.columnOptions()) {
|
||||
if (namedColumnConfig.name().equals(name)) {
|
||||
columnOptions = namedColumnConfig;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (columnOptions == null) {
|
||||
columnOptions = globalDatabaseConfig.fallbackColumnOptions();
|
||||
}
|
||||
|
||||
//noinspection ConstantConditions
|
||||
if (columnOptions.memtableMemoryBudgetBytes() != null) {
|
||||
// about 512MB of ram will be used for level style compaction
|
||||
columnFamilyOptions.optimizeLevelStyleCompaction(Optional.ofNullable(columnOptions.memtableMemoryBudgetBytes())
|
||||
.map(DataSize::longValue)
|
||||
.orElse(DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET));
|
||||
}
|
||||
|
||||
if (isDisableAutoCompactions()) {
|
||||
columnFamilyOptions.setDisableAutoCompactions(true);
|
||||
}
|
||||
try {
|
||||
columnFamilyOptions.setPrepopulateBlobCache(PrepopulateBlobCache.PREPOPULATE_BLOB_FLUSH_ONLY);
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to set prepopulate blob cache", ex);
|
||||
}
|
||||
|
||||
// This option is not supported with multiple db paths
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
boolean dynamicLevelBytes = (globalDatabaseConfig.volumes() == null || globalDatabaseConfig.volumes().length <= 1)
|
||||
&& !globalDatabaseConfig.ingestBehind();
|
||||
if (dynamicLevelBytes) {
|
||||
columnFamilyOptions.setLevelCompactionDynamicLevelBytes(true);
|
||||
}
|
||||
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions
|
||||
.setTargetFileSizeBase(256 * SizeUnit.MB)
|
||||
.setMaxBytesForLevelBase(SizeUnit.GB);
|
||||
|
||||
if (isDisableAutoCompactions()) {
|
||||
columnFamilyOptions.setLevel0FileNumCompactionTrigger(-1);
|
||||
} else if (!FOLLOW_ROCKSDB_OPTIMIZATIONS) {
|
||||
// ArangoDB uses a value of 2: https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
// Higher values speed up writes, but slow down reads
|
||||
columnFamilyOptions.setLevel0FileNumCompactionTrigger(2);
|
||||
}
|
||||
if (isDisableSlowdown()) {
|
||||
columnFamilyOptions.setLevel0SlowdownWritesTrigger(-1);
|
||||
columnFamilyOptions.setLevel0StopWritesTrigger(Integer.MAX_VALUE);
|
||||
columnFamilyOptions.setHardPendingCompactionBytesLimit(Long.MAX_VALUE);
|
||||
columnFamilyOptions.setSoftPendingCompactionBytesLimit(Long.MAX_VALUE);
|
||||
}
|
||||
{
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
columnFamilyOptions.setLevel0SlowdownWritesTrigger(20);
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
columnFamilyOptions.setLevel0StopWritesTrigger(36);
|
||||
}
|
||||
|
||||
if (columnOptions.levels().length > 0) {
|
||||
columnFamilyOptions.setNumLevels(columnOptions.levels().length);
|
||||
|
||||
List<CompressionType> compressionPerLevel = new ArrayList<>();
|
||||
for (ColumnLevelConfig columnLevelConfig : columnOptions.levels()) {
|
||||
CompressionType compression = columnLevelConfig.compression();
|
||||
compressionPerLevel.add(compression);
|
||||
}
|
||||
if (compressionPerLevel.size() != columnOptions.levels().length) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.CONFIG_ERROR, "Database column levels and compression per level count is different! %s != %s".formatted(compressionPerLevel.size(), columnOptions.levels().length));
|
||||
}
|
||||
columnFamilyOptions.setCompressionPerLevel(compressionPerLevel);
|
||||
|
||||
var firstLevelOptions = getRocksLevelOptions(columnOptions.levels()[0], refs);
|
||||
columnFamilyOptions.setCompressionType(firstLevelOptions.compressionType);
|
||||
columnFamilyOptions.setCompressionOptions(firstLevelOptions.compressionOptions);
|
||||
|
||||
var lastLevelOptions = getRocksLevelOptions(columnOptions
|
||||
.levels()[columnOptions.levels().length - 1], refs);
|
||||
columnFamilyOptions.setBottommostCompressionType(lastLevelOptions.compressionType);
|
||||
columnFamilyOptions.setBottommostCompressionOptions(lastLevelOptions.compressionOptions);
|
||||
} else {
|
||||
columnFamilyOptions.setNumLevels(7);
|
||||
List<CompressionType> compressionTypes = new ArrayList<>(7);
|
||||
for (int i = 0; i < 7; i++) {
|
||||
if (i < 2) {
|
||||
compressionTypes.add(CompressionType.NO_COMPRESSION);
|
||||
} else {
|
||||
compressionTypes.add(CompressionType.LZ4_COMPRESSION);
|
||||
}
|
||||
}
|
||||
columnFamilyOptions.setBottommostCompressionType(CompressionType.LZ4HC_COMPRESSION);
|
||||
var compressionOptions = new CompressionOptions()
|
||||
.setEnabled(true)
|
||||
.setMaxDictBytes(Math.toIntExact(32 * SizeUnit.KB));
|
||||
refs.add(compressionOptions);
|
||||
setZstdCompressionOptions(compressionOptions);
|
||||
columnFamilyOptions.setBottommostCompressionOptions(compressionOptions);
|
||||
columnFamilyOptions.setCompressionPerLevel(compressionTypes);
|
||||
}
|
||||
|
||||
final BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
|
||||
|
||||
if (!FOLLOW_ROCKSDB_OPTIMIZATIONS) {
|
||||
columnFamilyOptions.setWriteBufferSize(256 * SizeUnit.MB);
|
||||
}
|
||||
Optional.ofNullable(columnOptions.writeBufferSize())
|
||||
.map(DataSize::longValue)
|
||||
.ifPresent(columnFamilyOptions::setWriteBufferSize);
|
||||
|
||||
columnFamilyOptions.setMaxWriteBufferNumberToMaintain(1);
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig.setVerifyCompression(false);
|
||||
}
|
||||
// If OptimizeFiltersForHits == true: memory size = bitsPerKey * (totalKeys * 0.1)
|
||||
// If OptimizeFiltersForHits == false: memory size = bitsPerKey * totalKeys
|
||||
BloomFilterConfig filter = null;
|
||||
BloomFilterConfig bloomFilterConfig = columnOptions.bloomFilter();
|
||||
if (bloomFilterConfig != null) filter = bloomFilterConfig;
|
||||
if (filter == null) {
|
||||
if (inMemory) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType.CONFIG_ERROR, "Please set a bloom filter. It's required for in-memory databases");
|
||||
}
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig.setFilterPolicy(null);
|
||||
}
|
||||
} else {
|
||||
final BloomFilter bloomFilter = new BloomFilter(filter.bitsPerKey());
|
||||
refs.add(bloomFilter);
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig.setFilterPolicy(bloomFilter);
|
||||
}
|
||||
}
|
||||
boolean cacheIndexAndFilterBlocks = !inMemory && Optional.ofNullable(columnOptions.cacheIndexAndFilterBlocks())
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.orElse(true);
|
||||
if (globalDatabaseConfig.spinning()) {
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMinWriteBufferNumberToMerge(3);
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMaxWriteBufferNumber(4);
|
||||
}
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig
|
||||
// http://rocksdb.org/blog/2018/08/23/data-block-hash-index.html
|
||||
.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinaryAndHash)
|
||||
// http://rocksdb.org/blog/2018/08/23/data-block-hash-index.html
|
||||
.setDataBlockHashTableUtilRatio(0.75)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setPinTopLevelIndexAndFilter(true)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setPinL0FilterAndIndexBlocksInCache(!inMemory)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setCacheIndexAndFilterBlocksWithHighPriority(true)
|
||||
.setCacheIndexAndFilterBlocks(cacheIndexAndFilterBlocks)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
// Enabling partition filters increase the reads by 2x
|
||||
.setPartitionFilters(Optional.ofNullable(columnOptions.partitionFilters()).orElse(false))
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setIndexType(inMemory ? IndexType.kHashSearch : Optional.ofNullable(columnOptions.partitionFilters()).orElse(false) ? IndexType.kTwoLevelIndexSearch : IndexType.kBinarySearch)
|
||||
.setChecksumType(inMemory ? ChecksumType.kNoChecksum : ChecksumType.kXXH3)
|
||||
// Spinning disks: 64KiB to 256KiB (also 512KiB). SSDs: 16KiB
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
.setBlockSize(inMemory ? 4 * SizeUnit.KB : Optional.ofNullable(columnOptions.blockSize())
|
||||
.map(DataSize::longValue)
|
||||
.orElse((globalDatabaseConfig.spinning() ? 128 : 16) * SizeUnit.KB))
|
||||
.setBlockCache(cache)
|
||||
.setNoBlockCache(cache == null);
|
||||
}
|
||||
if (inMemory) {
|
||||
columnFamilyOptions.useCappedPrefixExtractor(4);
|
||||
tableOptions.setBlockRestartInterval(4);
|
||||
}
|
||||
|
||||
columnFamilyOptions.setTableFormatConfig(tableOptions);
|
||||
columnFamilyOptions.setCompactionPriority(CompactionPriority.MinOverlappingRatio);
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
// https://github.com/EighteenZi/rocksdb_wiki/blob/master/RocksDB-Tuning-Guide.md#throughput-gap-between-random-read-vs-sequential-read-is-much-higher-in-spinning-disks-suggestions=
|
||||
BloomFilterConfig bloomFilterOptions = columnOptions.bloomFilter();
|
||||
if (bloomFilterOptions != null) {
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
// https://github.com/EighteenZi/rocksdb_wiki/blob/master/RocksDB-Tuning-Guide.md#throughput-gap-between-random-read-vs-sequential-read-is-much-higher-in-spinning-disks-suggestions=
|
||||
boolean optimizeForHits = globalDatabaseConfig.spinning();
|
||||
Boolean value = bloomFilterOptions.optimizeForHits();
|
||||
if (value != null) optimizeForHits = value;
|
||||
columnFamilyOptions.setOptimizeFiltersForHits(optimizeForHits);
|
||||
}
|
||||
return columnFamilyOptions;
|
||||
} catch (GestaltException ex) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType.ROCKSDB_CONFIG_ERROR, ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static void setZstdCompressionOptions(CompressionOptions compressionOptions) {
|
||||
// https://rocksdb.org/blog/2021/05/31/dictionary-compression.html#:~:text=(zstd%20only,it%20to%20100x
|
||||
compressionOptions
|
||||
.setZStdMaxTrainBytes(compressionOptions.maxDictBytes() * 100);
|
||||
}
|
||||
|
||||
public static LoadedDb load(@Nullable Path path, DatabaseConfig config, Logger logger) {
|
||||
var refs = new RocksDBObjects();
|
||||
// Get databases directory path
|
||||
Path definitiveDbPath;
|
||||
@ -132,7 +356,9 @@ public class RocksDBLoader {
|
||||
refs.add(options);
|
||||
options.setParanoidChecks(PARANOID_CHECKS);
|
||||
options.setSkipCheckingSstFileSizesOnDbOpen(true);
|
||||
options.setEnablePipelinedWrite(true);
|
||||
if (!databaseOptions.global().unorderedWrite()) {
|
||||
options.setEnablePipelinedWrite(true);
|
||||
}
|
||||
var maxSubCompactions = Integer.parseInt(System.getProperty("it.cavallium.dbengine.compactions.max.sub", "-1"));
|
||||
if (maxSubCompactions > 0) {
|
||||
options.setMaxSubcompactions(maxSubCompactions);
|
||||
@ -260,6 +486,10 @@ public class RocksDBLoader {
|
||||
options.setUseDirectIoForFlushAndCompaction(true);
|
||||
}
|
||||
|
||||
options
|
||||
.setAllowIngestBehind(databaseOptions.global().ingestBehind())
|
||||
.setUnorderedWrite(databaseOptions.global().unorderedWrite());
|
||||
|
||||
return new OptionsWithCache(options, blockCache);
|
||||
} catch (GestaltException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType.ROCKSDB_CONFIG_ERROR, e);
|
||||
@ -289,19 +519,22 @@ public class RocksDBLoader {
|
||||
.stream()
|
||||
.map(volumeConfig -> {
|
||||
try {
|
||||
return new DbPathRecord(definitiveDbPath.resolve(volumeConfig.volumePath()), volumeConfig.targetSize().longValue());
|
||||
} catch (GestaltException e) {
|
||||
var volumePath = volumeConfig.volumePath();
|
||||
Objects.requireNonNull(volumePath, "volumePath is null");
|
||||
return new DbPathRecord(definitiveDbPath.resolve(volumePath), volumeConfig.targetSize().longValue());
|
||||
} catch (NullPointerException | GestaltException e) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(RocksDBErrorType.CONFIG_ERROR, "Failed to load volume configurations", e);
|
||||
}
|
||||
})
|
||||
.toList();
|
||||
}
|
||||
|
||||
private static TransactionalDB loadDb(@Nullable Path path,
|
||||
private static LoadedDb loadDb(@Nullable Path path,
|
||||
@NotNull Path definitiveDbPath,
|
||||
DatabaseConfig databaseOptions, OptionsWithCache optionsWithCache, RocksDBObjects refs, Logger logger) {
|
||||
var inMemory = path == null;
|
||||
var rocksdbOptions = optionsWithCache.options();
|
||||
Map<String, ColumnFamilyOptions> definitiveColumnFamilyOptionsMap = new HashMap<>();
|
||||
try {
|
||||
List<DbPathRecord> volumeConfigs = getVolumeConfigs(definitiveDbPath, databaseOptions);
|
||||
List<ColumnFamilyDescriptor> descriptors = new ArrayList<>();
|
||||
@ -350,204 +583,12 @@ public class RocksDBLoader {
|
||||
|
||||
for (Map.Entry<String, FallbackColumnConfig> entry : columnConfigMap.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
FallbackColumnConfig columnOptions = entry.getValue();
|
||||
if (columnOptions instanceof NamedColumnConfig namedColumnConfig && !namedColumnConfig.name().equals(name)) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType.CONFIG_ERROR, "Wrong column config name: " + name);
|
||||
}
|
||||
|
||||
var columnFamilyOptions = new ColumnFamilyOptions();
|
||||
var columnFamilyOptions = getColumnOptions(name, databaseOptions.global(),
|
||||
logger, refs, path == null, optionsWithCache.standardCache());
|
||||
refs.add(columnFamilyOptions);
|
||||
|
||||
//noinspection ConstantConditions
|
||||
if (columnOptions.memtableMemoryBudgetBytes() != null) {
|
||||
// about 512MB of ram will be used for level style compaction
|
||||
columnFamilyOptions.optimizeLevelStyleCompaction(Optional.ofNullable(columnOptions.memtableMemoryBudgetBytes())
|
||||
.map(DataSize::longValue)
|
||||
.orElse(DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET));
|
||||
}
|
||||
|
||||
if (isDisableAutoCompactions()) {
|
||||
columnFamilyOptions.setDisableAutoCompactions(true);
|
||||
}
|
||||
try {
|
||||
columnFamilyOptions.setPrepopulateBlobCache(PrepopulateBlobCache.PREPOPULATE_BLOB_FLUSH_ONLY);
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to set prepopulate blob cache", ex);
|
||||
}
|
||||
|
||||
// This option is not supported with multiple db paths
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
boolean dynamicLevelBytes = volumeConfigs.size() <= 1;
|
||||
if (dynamicLevelBytes) {
|
||||
columnFamilyOptions.setLevelCompactionDynamicLevelBytes(true);
|
||||
columnFamilyOptions.setMaxBytesForLevelBase(10 * SizeUnit.GB);
|
||||
columnFamilyOptions.setMaxBytesForLevelMultiplier(10);
|
||||
} else {
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMaxBytesForLevelBase(256 * SizeUnit.MB);
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
columnFamilyOptions.setMaxBytesForLevelMultiplier(10);
|
||||
}
|
||||
if (isDisableAutoCompactions()) {
|
||||
columnFamilyOptions.setLevel0FileNumCompactionTrigger(-1);
|
||||
} else if (!FOLLOW_ROCKSDB_OPTIMIZATIONS) {
|
||||
// ArangoDB uses a value of 2: https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
// Higher values speed up writes, but slow down reads
|
||||
columnFamilyOptions.setLevel0FileNumCompactionTrigger(2);
|
||||
}
|
||||
if (isDisableSlowdown()) {
|
||||
columnFamilyOptions.setLevel0SlowdownWritesTrigger(-1);
|
||||
columnFamilyOptions.setLevel0StopWritesTrigger(Integer.MAX_VALUE);
|
||||
columnFamilyOptions.setHardPendingCompactionBytesLimit(Long.MAX_VALUE);
|
||||
columnFamilyOptions.setSoftPendingCompactionBytesLimit(Long.MAX_VALUE);
|
||||
}
|
||||
{
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
columnFamilyOptions.setLevel0SlowdownWritesTrigger(20);
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
columnFamilyOptions.setLevel0StopWritesTrigger(36);
|
||||
}
|
||||
|
||||
if (columnOptions.levels().length > 0) {
|
||||
columnFamilyOptions.setNumLevels(columnOptions.levels().length);
|
||||
var firstLevelOptions = getRocksLevelOptions(columnOptions.levels()[0], refs);
|
||||
columnFamilyOptions.setCompressionType(firstLevelOptions.compressionType);
|
||||
columnFamilyOptions.setCompressionOptions(firstLevelOptions.compressionOptions);
|
||||
|
||||
var lastLevelOptions = getRocksLevelOptions(columnOptions
|
||||
.levels()[columnOptions.levels().length - 1], refs);
|
||||
columnFamilyOptions.setBottommostCompressionType(lastLevelOptions.compressionType);
|
||||
columnFamilyOptions.setBottommostCompressionOptions(lastLevelOptions.compressionOptions);
|
||||
|
||||
List<CompressionType> compressionPerLevel = new ArrayList<>();
|
||||
for (ColumnLevelConfig columnLevelConfig : columnOptions.levels()) {
|
||||
CompressionType compression = columnLevelConfig.compression();
|
||||
compressionPerLevel.add(compression);
|
||||
}
|
||||
columnFamilyOptions.setCompressionPerLevel(compressionPerLevel);
|
||||
} else {
|
||||
columnFamilyOptions.setNumLevels(7);
|
||||
List<CompressionType> compressionTypes = new ArrayList<>(7);
|
||||
for (int i = 0; i < 7; i++) {
|
||||
if (i < 2) {
|
||||
compressionTypes.add(CompressionType.NO_COMPRESSION);
|
||||
} else {
|
||||
compressionTypes.add(CompressionType.LZ4_COMPRESSION);
|
||||
}
|
||||
}
|
||||
columnFamilyOptions.setBottommostCompressionType(CompressionType.LZ4HC_COMPRESSION);
|
||||
var compressionOptions = new CompressionOptions()
|
||||
.setEnabled(true)
|
||||
.setMaxDictBytes(32768);
|
||||
refs.add(compressionOptions);
|
||||
columnFamilyOptions.setBottommostCompressionOptions(compressionOptions);
|
||||
columnFamilyOptions.setCompressionPerLevel(compressionTypes);
|
||||
}
|
||||
|
||||
final BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
|
||||
|
||||
if (!FOLLOW_ROCKSDB_OPTIMIZATIONS) {
|
||||
columnFamilyOptions.setWriteBufferSize(256 * SizeUnit.MB);
|
||||
}
|
||||
Optional.ofNullable(columnOptions.writeBufferSize())
|
||||
.map(DataSize::longValue)
|
||||
.ifPresent(columnFamilyOptions::setWriteBufferSize);
|
||||
|
||||
columnFamilyOptions.setMaxWriteBufferNumberToMaintain(1);
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig.setVerifyCompression(false);
|
||||
}
|
||||
// If OptimizeFiltersForHits == true: memory size = bitsPerKey * (totalKeys * 0.1)
|
||||
// If OptimizeFiltersForHits == false: memory size = bitsPerKey * totalKeys
|
||||
BloomFilterConfig filter = null;
|
||||
BloomFilterConfig bloomFilterConfig = columnOptions.bloomFilter();
|
||||
if (bloomFilterConfig != null) filter = bloomFilterConfig;
|
||||
if (filter == null) {
|
||||
if (path == null) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType.CONFIG_ERROR, "Please set a bloom filter. It's required for in-memory databases");
|
||||
}
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig.setFilterPolicy(null);
|
||||
}
|
||||
} else {
|
||||
final BloomFilter bloomFilter = new BloomFilter(filter.bitsPerKey());
|
||||
refs.add(bloomFilter);
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig.setFilterPolicy(bloomFilter);
|
||||
}
|
||||
}
|
||||
boolean cacheIndexAndFilterBlocks = path != null && Optional.ofNullable(columnOptions.cacheIndexAndFilterBlocks())
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.orElse(true);
|
||||
if (databaseOptions.global().spinning()) {
|
||||
if (!FOLLOW_ROCKSDB_OPTIMIZATIONS) {
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
// cacheIndexAndFilterBlocks = true;
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMinWriteBufferNumberToMerge(3);
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMaxWriteBufferNumber(4);
|
||||
}
|
||||
}
|
||||
if (tableOptions instanceof BlockBasedTableConfig blockBasedTableConfig) {
|
||||
blockBasedTableConfig
|
||||
// http://rocksdb.org/blog/2018/08/23/data-block-hash-index.html
|
||||
.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinaryAndHash)
|
||||
// http://rocksdb.org/blog/2018/08/23/data-block-hash-index.html
|
||||
.setDataBlockHashTableUtilRatio(0.75)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setPinTopLevelIndexAndFilter(true)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setPinL0FilterAndIndexBlocksInCache(path != null)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setCacheIndexAndFilterBlocksWithHighPriority(true)
|
||||
.setCacheIndexAndFilterBlocks(cacheIndexAndFilterBlocks)
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
// Enabling partition filters increase the reads by 2x
|
||||
.setPartitionFilters(Optional.ofNullable(columnOptions.partitionFilters()).orElse(false))
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
.setIndexType(path == null ? IndexType.kHashSearch : Optional.ofNullable(columnOptions.partitionFilters()).orElse(false) ? IndexType.kTwoLevelIndexSearch : IndexType.kBinarySearch)
|
||||
.setChecksumType(path == null ? ChecksumType.kNoChecksum : ChecksumType.kXXH3)
|
||||
// Spinning disks: 64KiB to 256KiB (also 512KiB). SSDs: 16KiB
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
.setBlockSize(path == null ? 4096 : Optional.ofNullable(columnOptions.blockSize()).map(DataSize::longValue).orElse((databaseOptions.global().spinning() ? 128 : 16) * 1024L))
|
||||
.setBlockCache(optionsWithCache.standardCache())
|
||||
.setNoBlockCache(optionsWithCache.standardCache() == null);
|
||||
}
|
||||
if (path == null) {
|
||||
columnFamilyOptions.useCappedPrefixExtractor(4);
|
||||
tableOptions.setBlockRestartInterval(4);
|
||||
}
|
||||
|
||||
columnFamilyOptions.setTableFormatConfig(tableOptions);
|
||||
columnFamilyOptions.setCompactionPriority(CompactionPriority.MinOverlappingRatio);
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
// https://github.com/EighteenZi/rocksdb_wiki/blob/master/RocksDB-Tuning-Guide.md#throughput-gap-between-random-read-vs-sequential-read-is-much-higher-in-spinning-disks-suggestions=
|
||||
BloomFilterConfig bloomFilterOptions = columnOptions.bloomFilter();
|
||||
if (bloomFilterOptions != null) {
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
// https://github.com/EighteenZi/rocksdb_wiki/blob/master/RocksDB-Tuning-Guide.md#throughput-gap-between-random-read-vs-sequential-read-is-much-higher-in-spinning-disks-suggestions=
|
||||
boolean optimizeForHits = databaseOptions.global().spinning();
|
||||
Boolean value = bloomFilterOptions.optimizeForHits();
|
||||
if (value != null) optimizeForHits = value;
|
||||
columnFamilyOptions.setOptimizeFiltersForHits(optimizeForHits);
|
||||
}
|
||||
|
||||
if (!FOLLOW_ROCKSDB_OPTIMIZATIONS) {
|
||||
// // Increasing this value can reduce the frequency of compaction and reduce write amplification,
|
||||
// // but it will also cause old data to be unable to be cleaned up in time, thus increasing read amplification.
|
||||
// // This parameter is not easy to adjust. It is generally not recommended to set it above 256MB.
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setTargetFileSizeBase(64 * SizeUnit.MB);
|
||||
// // For each level up, the threshold is multiplied by the factor target_file_size_multiplier
|
||||
// // (but the default value is 1, which means that the maximum sstable of each level is the same).
|
||||
columnFamilyOptions.setTargetFileSizeMultiplier(2);
|
||||
}
|
||||
|
||||
descriptors.add(new ColumnFamilyDescriptor(name.getBytes(StandardCharsets.US_ASCII), columnFamilyOptions));
|
||||
definitiveColumnFamilyOptionsMap.put(name, columnFamilyOptions);
|
||||
}
|
||||
|
||||
var handles = new ArrayList<ColumnFamilyHandle>();
|
||||
@ -583,7 +624,7 @@ public class RocksDBLoader {
|
||||
|
||||
var delayWalFlushConfig = getWalFlushDelayConfig(databaseOptions);
|
||||
var dbTasks = new DatabaseTasks(db, inMemory, delayWalFlushConfig);
|
||||
return TransactionalDB.create(definitiveDbPath.toString(), db, descriptors, handles, dbTasks);
|
||||
return new LoadedDb(TransactionalDB.create(definitiveDbPath.toString(), db, descriptors, handles, dbTasks), rocksdbOptions, definitiveColumnFamilyOptionsMap, refs, optionsWithCache.standardCache());
|
||||
} catch (IOException | RocksDBException ex) {
|
||||
throw it.cavallium.rockserver.core.common.RocksDBException.of(it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType.ROCKSDB_LOAD_ERROR, "Failed to load rocksdb", ex);
|
||||
} catch (GestaltException e) {
|
||||
@ -608,8 +649,9 @@ public class RocksDBLoader {
|
||||
var compressionOptions = new CompressionOptions();
|
||||
refs.add(compressionOptions);
|
||||
if (compressionType != CompressionType.NO_COMPRESSION) {
|
||||
compressionOptions.setEnabled(true);
|
||||
compressionOptions.setMaxDictBytes(Math.toIntExact(levelOptions.maxDictBytes().longValue()));
|
||||
compressionOptions.setEnabled(true)
|
||||
.setMaxDictBytes(Math.toIntExact(levelOptions.maxDictBytes().longValue()));
|
||||
setZstdCompressionOptions(compressionOptions);
|
||||
} else {
|
||||
compressionOptions.setEnabled(false);
|
||||
}
|
||||
|
@ -0,0 +1,161 @@
|
||||
package it.cavallium.rockserver.core.impl.rocksdb;
|
||||
|
||||
import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import it.cavallium.rockserver.core.impl.ColumnInstance;
|
||||
import org.rocksdb.*;
|
||||
import org.rocksdb.util.SizeUnit;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
public record SSTWriter(RocksDB db, it.cavallium.rockserver.core.impl.ColumnInstance col, Path path, SstFileWriter sstFileWriter, boolean ingestBehind,
|
||||
RocksDBObjects refs) implements Closeable, DBWriter {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SSTWriter.class);
|
||||
|
||||
public static SSTWriter open(Path tempSSTsPath, TransactionalDB db, ColumnInstance col, ColumnFamilyOptions columnConifg, boolean forceNoOptions, boolean ingestBehind, RocksDBObjects refs) throws IOException, org.rocksdb.RocksDBException {
|
||||
if (refs == null) {
|
||||
refs = new RocksDBObjects();
|
||||
}
|
||||
var envOptions = new EnvOptions();
|
||||
if (!forceNoOptions) {
|
||||
envOptions
|
||||
.setAllowFallocate(true)
|
||||
.setWritableFileMaxBufferSize(10 * SizeUnit.MB)
|
||||
.setRandomAccessMaxBufferSize(10 * SizeUnit.MB)
|
||||
.setCompactionReadaheadSize(2 * SizeUnit.MB)
|
||||
.setBytesPerSync(10 * SizeUnit.MB);
|
||||
}
|
||||
refs.add(envOptions);
|
||||
|
||||
var options = new Options();
|
||||
refs.add(options);
|
||||
if (!forceNoOptions) {
|
||||
options
|
||||
.setDisableAutoCompactions(true)
|
||||
.setManualWalFlush(true)
|
||||
.setUseDirectIoForFlushAndCompaction(true)
|
||||
.setBytesPerSync(5 * SizeUnit.MB)
|
||||
.setParanoidChecks(false)
|
||||
.setSkipCheckingSstFileSizesOnDbOpen(true)
|
||||
.setForceConsistencyChecks(false)
|
||||
.setParanoidFileChecks(false);
|
||||
if (columnConifg != null) {
|
||||
options
|
||||
.setNumLevels(columnConifg.numLevels())
|
||||
.setTableFormatConfig(columnConifg.tableFormatConfig())
|
||||
.setTargetFileSizeBase(columnConifg.targetFileSizeBase())
|
||||
.setTargetFileSizeMultiplier(columnConifg.targetFileSizeMultiplier())
|
||||
.setMaxOpenFiles(-1)
|
||||
.setCompressionPerLevel(columnConifg.compressionPerLevel())
|
||||
.setCompressionType(columnConifg.compressionType())
|
||||
.setCompressionOptions(cloneCompressionOptions(columnConifg.compressionOptions()))
|
||||
.setBottommostCompressionType(columnConifg.bottommostCompressionType())
|
||||
.setBottommostCompressionOptions(cloneCompressionOptions(columnConifg.bottommostCompressionOptions()));
|
||||
if (columnConifg.memTableConfig() != null) {
|
||||
options.setMemTableConfig(columnConifg.memTableConfig());
|
||||
}
|
||||
}
|
||||
}
|
||||
Path tempFile;
|
||||
try {
|
||||
var tempDir = tempSSTsPath;
|
||||
if (Files.notExists(tempDir)) {
|
||||
Files.createDirectories(tempDir);
|
||||
}
|
||||
tempFile = tempDir.resolve(UUID.randomUUID() + ".sst");
|
||||
} catch (IOException ex) {
|
||||
refs.close();
|
||||
throw ex;
|
||||
}
|
||||
var sstFileWriter = new SstFileWriter(envOptions, options);
|
||||
var sstWriter = new SSTWriter(db.get(), col, tempFile, sstFileWriter, ingestBehind, refs);
|
||||
sstFileWriter.open(tempFile.toString());
|
||||
return sstWriter;
|
||||
}
|
||||
|
||||
private static CompressionOptions cloneCompressionOptions(CompressionOptions compressionOptions) {
|
||||
return new CompressionOptions()
|
||||
.setEnabled(compressionOptions.enabled())
|
||||
.setMaxDictBytes(compressionOptions.maxDictBytes())
|
||||
.setLevel(compressionOptions.level())
|
||||
.setStrategy(compressionOptions.strategy())
|
||||
.setZStdMaxTrainBytes(compressionOptions.zstdMaxTrainBytes())
|
||||
.setWindowBits(compressionOptions.windowBits());
|
||||
}
|
||||
|
||||
public void put(byte[] key, byte[] value) throws RocksDBException {
|
||||
try {
|
||||
sstFileWriter.put(key, value);
|
||||
} catch (org.rocksdb.RocksDBException e) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.PUT_UNKNOWN_ERROR, e);
|
||||
}
|
||||
}
|
||||
|
||||
public void put(ByteBuffer key, ByteBuffer value) throws RocksDBException {
|
||||
try {
|
||||
sstFileWriter.put(key, value);
|
||||
} catch (org.rocksdb.RocksDBException e) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.PUT_UNKNOWN_ERROR, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePending() throws it.cavallium.rockserver.core.common.RocksDBException {
|
||||
try {
|
||||
checkOwningHandle();
|
||||
try (this) {
|
||||
sstFileWriter.finish();
|
||||
try (var ingestOptions = new IngestExternalFileOptions()) {
|
||||
ingestOptions
|
||||
.setIngestBehind(ingestBehind)
|
||||
.setAllowBlockingFlush(true)
|
||||
.setMoveFiles(true)
|
||||
.setAllowGlobalSeqNo(true)
|
||||
.setWriteGlobalSeqno(false)
|
||||
.setSnapshotConsistency(false);
|
||||
db.ingestExternalFile(col.cfh(), List.of(path.toString()), ingestOptions);
|
||||
}
|
||||
}
|
||||
} catch (org.rocksdb.RocksDBException e) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.SST_WRITE_1, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void checkOwningHandle() {
|
||||
if (!sstFileWriter.isOwningHandle()) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.SST_WRITE_4, "SST writer is closed");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (sstFileWriter.isOwningHandle()) {
|
||||
sstFileWriter.close();
|
||||
try {
|
||||
Files.deleteIfExists(path);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to delete a file: {}", path, e);
|
||||
}
|
||||
}
|
||||
refs.close();
|
||||
}
|
||||
|
||||
public long fileSize() {
|
||||
if (!sstFileWriter.isOwningHandle()) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.SST_GET_SIZE_FAILED, "The SSTWriter is closed");
|
||||
}
|
||||
try {
|
||||
return sstFileWriter.fileSize();
|
||||
} catch (org.rocksdb.RocksDBException e) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.SST_GET_SIZE_FAILED, e);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,15 +1,21 @@
|
||||
package it.cavallium.rockserver.core.impl.rocksdb;
|
||||
|
||||
import java.io.Closeable;
|
||||
import org.rocksdb.AbstractNativeReference;
|
||||
|
||||
import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import org.rocksdb.Transaction;
|
||||
|
||||
public record Tx(Transaction val, boolean isFromGetForUpdate, RocksDBObjects objs)
|
||||
implements Closeable {
|
||||
implements Closeable, DBWriter {
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
val.close();
|
||||
objs.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePending() throws RocksDBException {
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,28 @@
|
||||
package it.cavallium.rockserver.core.impl.rocksdb;
|
||||
|
||||
import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.rocksdb.RocksDB;
|
||||
import org.rocksdb.WriteBatch;
|
||||
import org.rocksdb.WriteOptions;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
||||
public record WB(RocksDB rocksDB, @NotNull WriteBatch wb, boolean disableWal) implements Closeable, DBWriter {
|
||||
private static final boolean MIGRATE = Boolean.parseBoolean(System.getProperty("rocksdb.migrate", "false"));
|
||||
@Override
|
||||
public void close() {
|
||||
wb.close();
|
||||
}
|
||||
|
||||
public void writePending() throws RocksDBException {
|
||||
try (var w = new WriteOptions()) {
|
||||
if (disableWal || MIGRATE) {
|
||||
w.setDisableWAL(true);
|
||||
}
|
||||
rocksDB.write(w, wb);
|
||||
} catch (org.rocksdb.RocksDBException e) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.WRITE_BATCH_1, e);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
package it.cavallium.rockserver.core.resources;
|
||||
|
||||
import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import it.cavallium.rockserver.core.common.RocksDBException.RocksDBErrorType;
|
||||
import java.io.InputStream;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
public class DefaultConfig {
|
||||
|
||||
@NotNull
|
||||
public static InputStream getDefaultConfig() {
|
||||
var stream = DefaultConfig.class.getResourceAsStream("default.conf");
|
||||
if (stream == null) {
|
||||
throw RocksDBException.of(RocksDBErrorType.CONFIG_ERROR, "Missing default config resource: default.conf");
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -5,7 +5,7 @@ import it.cavallium.rockserver.core.client.RocksDBConnection;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
public class Server implements Closeable {
|
||||
public abstract class Server implements Closeable {
|
||||
|
||||
private final RocksDBConnection client;
|
||||
|
||||
@ -17,6 +17,8 @@ public class Server implements Closeable {
|
||||
return client;
|
||||
}
|
||||
|
||||
public abstract void start() throws IOException;
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
client.close();
|
||||
|
@ -1,13 +1,11 @@
|
||||
package it.cavallium.rockserver.core.server;
|
||||
|
||||
import it.cavallium.rockserver.core.client.ClientBuilder;
|
||||
import it.cavallium.rockserver.core.client.EmbeddedConnection;
|
||||
import io.netty.channel.unix.DomainSocketAddress;
|
||||
import it.cavallium.rockserver.core.client.RocksDBConnection;
|
||||
import it.cavallium.rockserver.core.common.Utils.HostAndPort;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnixDomainSocketAddress;
|
||||
import java.nio.file.Path;
|
||||
|
||||
public class ServerBuilder {
|
||||
|
||||
@ -42,10 +40,10 @@ public class ServerBuilder {
|
||||
if (useThrift) {
|
||||
return new ThriftServer(client, http2Address.host(), http2Address.port());
|
||||
} else {
|
||||
return new GrpcServer(client, http2Address.host(), http2Address.port());
|
||||
return new GrpcServer(client, new InetSocketAddress(http2Address.host(), http2Address.port()));
|
||||
}
|
||||
} else if (unixAddress != null) {
|
||||
throw new UnsupportedOperationException("Not implemented: unix socket");
|
||||
return new GrpcServer(client, new DomainSocketAddress(unixAddress.getPath().toFile()));
|
||||
} else if (iNetAddress != null) {
|
||||
throw new UnsupportedOperationException("Not implemented: inet address");
|
||||
} else {
|
||||
|
@ -57,6 +57,10 @@ public class ThriftServer extends Server {
|
||||
}
|
||||
}
|
||||
|
||||
public void start() {
|
||||
thriftThread.start();
|
||||
}
|
||||
|
||||
private static @NotNull List<@NotNull Keys> keysToRecords(Arena arena, @NotNull List<@NotNull List< @NotNull ByteBuffer>> keysMulti) {
|
||||
return keysMulti.stream().map(keys -> keysToRecord(arena, keys)).toList();
|
||||
}
|
||||
|
@ -29,6 +29,13 @@ enum Operation {
|
||||
PREVIOUS_PRESENCE = 8;
|
||||
}
|
||||
|
||||
enum PutBatchMode {
|
||||
WRITE_BATCH = 0;
|
||||
WRITE_BATCH_NO_WAL = 1;
|
||||
SST_INGESTION = 2;
|
||||
SST_INGEST_BEHIND = 3;
|
||||
}
|
||||
|
||||
message Delta {
|
||||
optional bytes previous = 1;
|
||||
optional bytes current = 2;
|
||||
@ -47,6 +54,10 @@ message KV {
|
||||
bytes value = 2;
|
||||
}
|
||||
|
||||
message KVBatch {
|
||||
repeated KV entries = 1;
|
||||
}
|
||||
|
||||
message OpenTransactionRequest {int64 timeoutMs = 1;}
|
||||
message OpenTransactionResponse {int64 transactionId = 1;}
|
||||
|
||||
@ -65,6 +76,9 @@ message GetColumnIdResponse {int64 columnId = 1;}
|
||||
|
||||
message PutRequest {int64 transactionOrUpdateId = 1; int64 columnId = 2; KV data = 3;}
|
||||
|
||||
message PutBatchInitialRequest {int64 columnId = 1; PutBatchMode mode = 2;}
|
||||
message PutBatchRequest {oneof putBatchRequestType {PutBatchInitialRequest initialRequest = 1;KVBatch data = 2;}}
|
||||
|
||||
message PutMultiInitialRequest {int64 transactionOrUpdateId = 1; int64 columnId = 2;}
|
||||
message PutMultiRequest {oneof putMultiRequestType {PutMultiInitialRequest initialRequest = 1;KV data = 2;}}
|
||||
|
||||
@ -88,6 +102,7 @@ service RocksDBService {
|
||||
rpc deleteColumn(DeleteColumnRequest) returns (google.protobuf.Empty);
|
||||
rpc getColumnId(GetColumnIdRequest) returns (GetColumnIdResponse);
|
||||
rpc put(PutRequest) returns (google.protobuf.Empty);
|
||||
rpc putBatch(stream PutBatchRequest) returns (google.protobuf.Empty);
|
||||
rpc putMulti(stream PutMultiRequest) returns (google.protobuf.Empty);
|
||||
rpc putGetPrevious(PutRequest) returns (Previous);
|
||||
rpc putMultiGetPrevious(stream PutMultiRequest) returns (stream Previous);
|
||||
|
@ -6,6 +6,40 @@ database: {
|
||||
spinning: false
|
||||
# Enable to require absolute consistency after a crash. False to use the PointInTime recovery strategy
|
||||
absolute-consistency: true
|
||||
# Set this option to true during creation of database if you want to be able
|
||||
# to ingest behind (call IngestExternalFile() skipping keys that already
|
||||
# exist, rather than overwriting matching keys).
|
||||
# Setting this option to true will affect 2 things:
|
||||
# 1) Disable some internal optimizations around SST file compression
|
||||
# 2) Reserve bottom-most level for ingested files only.
|
||||
# 3) Note that num_levels should be >= 3 if this option is turned on.
|
||||
# DEFAULT: false
|
||||
ingest-behind: false
|
||||
# ENABLE THIS ONLY WHEN DOING BULK WRITES, THIS IS UNSAFE TO USE IN NORMAL SCENARIOS
|
||||
# Setting unorderedWrite() to true trades higher write throughput
|
||||
# with relaxing the immutability guarantee of snapshots.
|
||||
# This violates the repeatability one expects from ::Get from a snapshot,
|
||||
# as well as ::MultiGet and Iterator's consistent-point-in-time view property.
|
||||
# If the application cannot tolerate the relaxed guarantees,
|
||||
# it can implement its own mechanisms to work around
|
||||
# that and yet benefit from the higher throughput.
|
||||
# Using TransactionDB with WRITE_PREPARED write policy and twoWriteQueues() true
|
||||
# is one way to achieve immutable snapshots despite unordered_write.
|
||||
# By default, i. e., when it is false, rocksdb does not advance the sequence
|
||||
# number for new snapshots unless all the writes with
|
||||
# lower sequence numbers are already finished.
|
||||
# This provides the immutability that we except from snapshots.
|
||||
# Moreover, since Iterator and MultiGet internally depend on snapshots,
|
||||
# the snapshot immutability results into Iterator
|
||||
# and MultiGet offering consistent-point-in-time view.
|
||||
# If set to true, although Read-Your-Own-Write property is still provided,
|
||||
# the snapshot immutability property is relaxed: the writes issued after
|
||||
# the snapshot is obtained (with larger sequence numbers) will be still not
|
||||
# visible to the reads from that snapshot, however, there still might be pending
|
||||
# writes (with lower sequence number) that will change the state visible
|
||||
# to the snapshot after they are landed to the memtable.
|
||||
# DEFAULT: false
|
||||
unordered-write: false
|
||||
# Error checking
|
||||
checksum: true
|
||||
# Use direct I/O in RocksDB databases (Higher I/O read throughput but OS cache is not used, less swapping, less memory pressure)
|
||||
@ -31,11 +65,13 @@ database: {
|
||||
block-cache: 512MiB
|
||||
# Database write buffer manager size
|
||||
# You should enable this option if you are using direct I/O or spinning disks
|
||||
write-buffer-manager: 64MiB
|
||||
write-buffer-manager: 128MiB
|
||||
# Log data path
|
||||
log-path: ./logs
|
||||
# Write-Ahead-Log data path
|
||||
wal-path: ./wal
|
||||
# Write-Ahead-Log data path
|
||||
temp-sst-path: ./temp_sst
|
||||
# If set and greater than zero, the WAL will not be flushed on every write, but every x seconds
|
||||
delay-wal-flush-duration: PT5S
|
||||
fallback-column-options: {
|
||||
@ -98,7 +134,7 @@ database: {
|
||||
# This should be kept to null if write-buffer-manager is set,
|
||||
# or if you want to use the "memtable-memory-budget-size" logic.
|
||||
# Remember that there are "max-write-buffer-number" in memory, 2 by default
|
||||
write-buffer-size: 200MiB
|
||||
write-buffer-size: 64MiB
|
||||
# Enable blob files
|
||||
blob-files: false
|
||||
}
|
||||
|
@ -27,6 +27,9 @@ module rockserver.core {
|
||||
requires io.netty.codec.http;
|
||||
requires io.netty.codec;
|
||||
requires io.netty.codec.http2;
|
||||
requires io.netty.transport.classes.epoll;
|
||||
requires org.reactivestreams;
|
||||
requires io.netty.transport.unix.common;
|
||||
|
||||
exports it.cavallium.rockserver.core.client;
|
||||
exports it.cavallium.rockserver.core.common;
|
||||
|
@ -0,0 +1,22 @@
|
||||
package it.cavallium.rockserver.core.impl.test;
|
||||
|
||||
import it.cavallium.rockserver.core.config.ConfigParser;
|
||||
import it.cavallium.rockserver.core.config.DataSize;
|
||||
import java.nio.file.Path;
|
||||
import org.github.gestalt.config.exceptions.GestaltException;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class DefaultConfigTest {
|
||||
|
||||
@Test
|
||||
public void test() throws GestaltException {
|
||||
var def = ConfigParser.parseDefault();
|
||||
var checksum = def.global().checksum();
|
||||
Assertions.assertTrue(checksum);
|
||||
var ingestBehind = def.global().ingestBehind();
|
||||
Assertions.assertFalse(ingestBehind);
|
||||
Assertions.assertEquals(Path.of("./volume"), def.global().volumes()[0].volumePath());
|
||||
Assertions.assertEquals(new DataSize("32KiB"), def.global().fallbackColumnOptions().levels()[6].maxDictBytes());
|
||||
}
|
||||
}
|
@ -3,24 +3,25 @@ package it.cavallium.rockserver.core.impl.test;
|
||||
import static it.cavallium.rockserver.core.common.Utils.toMemorySegmentSimple;
|
||||
|
||||
import it.cavallium.rockserver.core.client.EmbeddedConnection;
|
||||
import it.cavallium.rockserver.core.common.Keys;
|
||||
import it.cavallium.rockserver.core.common.RequestType;
|
||||
import it.cavallium.rockserver.core.common.ColumnHashType;
|
||||
import it.cavallium.rockserver.core.common.ColumnSchema;
|
||||
import it.cavallium.rockserver.core.common.Delta;
|
||||
import it.cavallium.rockserver.core.common.RocksDBException;
|
||||
import it.cavallium.rockserver.core.common.RocksDBRetryException;
|
||||
import it.cavallium.rockserver.core.common.Utils;
|
||||
import it.cavallium.rockserver.core.common.*;
|
||||
import it.unimi.dsi.fastutil.ints.IntList;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectList;
|
||||
import java.lang.foreign.Arena;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.foreign.MemorySegment;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.reactivestreams.Publisher;
|
||||
import org.reactivestreams.Subscriber;
|
||||
import org.reactivestreams.Subscription;
|
||||
|
||||
abstract class EmbeddedDBTest {
|
||||
|
||||
@ -162,11 +163,29 @@ abstract class EmbeddedDBTest {
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 0, colId, key, toMemorySegmentSimple(arena, 123), RequestType.delta()));
|
||||
} else {
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 0, colId, key, MemorySegment.NULL, RequestType.delta()));
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 0, colId, key, null, RequestType.delta()));
|
||||
Assertions.assertThrows(RocksDBException.class, () -> {
|
||||
try {
|
||||
db.put(arena, 0, colId, key, null, RequestType.delta());
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.UNEXPECTED_NULL_VALUE, ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 0, colId, null, value1, RequestType.delta()));
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 0, colId, null, null, RequestType.delta()));
|
||||
Assertions.assertThrows(RocksDBException.class, () -> {
|
||||
try {
|
||||
db.put(arena, 0, colId, null, value1, RequestType.delta());
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.UNEXPECTED_NULL_VALUE, ex);
|
||||
}
|
||||
});
|
||||
Assertions.assertThrows(RocksDBException.class, () -> {
|
||||
try {
|
||||
db.put(arena, 0, colId, null, null, RequestType.delta());
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw RocksDBException.of(RocksDBException.RocksDBErrorType.UNEXPECTED_NULL_VALUE, ex);
|
||||
}
|
||||
});
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 0, colId, key, value1, null));
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 1, colId, key, value1, RequestType.delta()));
|
||||
Assertions.assertThrows(RocksDBException.class, () -> db.put(arena, 0, 21203, key, value1, RequestType.delta()));
|
||||
@ -344,6 +363,73 @@ abstract class EmbeddedDBTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void putBatchSST() {
|
||||
@NotNull Publisher<@NotNull KVBatch> batchPublisher = new Publisher<KVBatch>() {
|
||||
@Override
|
||||
public void subscribe(Subscriber<? super KVBatch> subscriber) {
|
||||
subscriber.onSubscribe(new Subscription() {
|
||||
Iterator<KVBatch> it;
|
||||
{
|
||||
ArrayList<KVBatch> items = new ArrayList<>();
|
||||
ArrayList<Keys> keys = new ArrayList<>();
|
||||
ArrayList<MemorySegment> values = new ArrayList<>();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
var keyI = getKeyI(i);
|
||||
var valueI = getValueI(i);
|
||||
keys.add(keyI);
|
||||
values.add(valueI);
|
||||
}
|
||||
items.add(new KVBatch(keys, values));
|
||||
keys = new ArrayList<>();
|
||||
values = new ArrayList<>();
|
||||
for (int i = 2; i < 4; i++) {
|
||||
var keyI = getKeyI(i);
|
||||
var valueI = getValueI(i);
|
||||
keys.add(keyI);
|
||||
values.add(valueI);
|
||||
}
|
||||
items.add(new KVBatch(keys, values));
|
||||
it = items.iterator();
|
||||
}
|
||||
@Override
|
||||
public void request(long l) {
|
||||
while (l-- > 0) {
|
||||
if (it.hasNext()) {
|
||||
subscriber.onNext(it.next());
|
||||
} else {
|
||||
subscriber.onComplete();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
if (this.getSchema().variableLengthKeysCount() <= 0) {
|
||||
db.putBatch(colId, batchPublisher, PutBatchMode.SST_INGESTION);
|
||||
|
||||
if (getHasValues()) {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
assertSegmentEquals(getValueI(i), db.get(arena, 0, colId, getKeyI(i), RequestType.current()));
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < 4; i++) {
|
||||
Assertions.assertTrue(db.get(arena, 0, colId, getKeyI(i), RequestType.exists()));
|
||||
}
|
||||
} else {
|
||||
Assertions.assertThrows(RocksDBException.class, () -> {
|
||||
db.putBatch(colId, batchPublisher, PutBatchMode.SST_INGESTION);
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void concurrentUpdate() {
|
||||
if (getHasValues()) {
|
||||
|
@ -0,0 +1,111 @@
|
||||
package it.cavallium.rockserver.core.impl.test;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import it.cavallium.rockserver.core.common.ColumnSchema;
|
||||
import it.cavallium.rockserver.core.config.*;
|
||||
import it.cavallium.rockserver.core.impl.EmbeddedDB;
|
||||
import it.unimi.dsi.fastutil.ints.IntList;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectList;
|
||||
import org.github.gestalt.config.exceptions.GestaltException;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.rocksdb.CompressionType;
|
||||
import org.rocksdb.Options;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.SstFileReader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.time.Duration;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
public class TestSSTWriter {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestSSTWriter.class);
|
||||
|
||||
private EmbeddedDB db;
|
||||
private long colId;
|
||||
private Path tempSstPath;
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws IOException {
|
||||
db = new EmbeddedDB(null, "test", null);
|
||||
this.colId = db.createColumn("test", ColumnSchema.of(IntList.of(Long.BYTES), ObjectList.of(), true));
|
||||
this.tempSstPath = Files.createTempDirectory("tempssts");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test() throws IOException {
|
||||
LOG.info("Obtaining sst writer");
|
||||
try (var sstWriter = db.getSSTWriter(colId, null, true, false)) {
|
||||
LOG.info("Creating sst");
|
||||
var tl = ThreadLocalRandom.current();
|
||||
var bytes = new byte[1024];
|
||||
long i = 0;
|
||||
while (i < 10_000) {
|
||||
var ib = Longs.toByteArray(i++);
|
||||
tl.nextBytes(bytes);
|
||||
sstWriter.put(ib, bytes);
|
||||
}
|
||||
LOG.info("Writing pending sst data");
|
||||
sstWriter.writePending();
|
||||
LOG.info("Done, closing");
|
||||
}
|
||||
LOG.info("Done");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCompression() throws IOException, RocksDBException, GestaltException {
|
||||
LOG.info("Obtaining sst writer");
|
||||
try (var sstWriter = db.getSSTWriter(colId, null, false, false)) {
|
||||
LOG.info("Creating sst");
|
||||
var tl = ThreadLocalRandom.current();
|
||||
var bytes = new byte[1024];
|
||||
long i = 0;
|
||||
while (i < 1_000) {
|
||||
var ib = Longs.toByteArray(i++);
|
||||
tl.nextBytes(bytes);
|
||||
sstWriter.put(ib, bytes);
|
||||
}
|
||||
LOG.info("Writing pending sst data");
|
||||
sstWriter.writePending();
|
||||
LOG.info("Done, closing");
|
||||
}
|
||||
var transactionalDB = db.getDb();
|
||||
var rocksDB = transactionalDB.get();
|
||||
var metadata = rocksDB.getLiveFilesMetaData();
|
||||
Assertions.assertEquals(1, metadata.size(), "There are more than one sst files");
|
||||
var sstMetadata = metadata.getFirst();
|
||||
var sstPath = Path.of(sstMetadata.path(), sstMetadata.fileName());
|
||||
Assertions.assertTrue(Files.exists(sstPath), "SST file does not exists");
|
||||
try (var options = new Options(); var sstReader = new SstFileReader(options)) {
|
||||
sstReader.open(sstPath.toString());
|
||||
var p = sstReader.getTableProperties();
|
||||
Assertions.assertNotEquals("snappy", p.getCompressionName().toLowerCase());
|
||||
}
|
||||
LOG.info("Done");
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void tearDown() throws IOException {
|
||||
db.close();
|
||||
Files.walkFileTree(tempSstPath, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
Files.deleteIfExists(file);
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
Files.deleteIfExists(tempSstPath);
|
||||
}
|
||||
}
|
@ -3,6 +3,12 @@ module rockserver.core.test {
|
||||
requires rockserver.core;
|
||||
requires org.junit.jupiter.api;
|
||||
requires it.unimi.dsi.fastutil;
|
||||
requires com.google.common;
|
||||
requires org.slf4j;
|
||||
requires org.github.gestalt.core;
|
||||
requires org.jetbrains.annotations;
|
||||
requires rocksdbjni;
|
||||
requires org.reactivestreams;
|
||||
opens it.cavallium.rockserver.core.test;
|
||||
opens it.cavallium.rockserver.core.impl.test;
|
||||
}
|
Loading…
Reference in New Issue
Block a user