diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 29ba32070..aa41eff33 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -4292,25 +4292,12 @@ class JniUtil { * @param bytes The bytes to copy * * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size limitation. */ static jbyteArray copyBytes(JNIEnv* env, std::string bytes) { - const jsize jlen = static_cast(bytes.size()); - - jbyteArray jbytes = env->NewByteArray(jlen); - if(jbytes == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - - env->SetByteArrayRegion(jbytes, 0, jlen, - const_cast(reinterpret_cast(bytes.c_str()))); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jbytes); - return nullptr; - } - - return jbytes; + return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size()); } /** @@ -4473,6 +4460,47 @@ class JniUtil { return jbyte_strings; } + + /** + * Copies bytes to a new jByteArray with the check of java array size limitation. + * + * @param bytes pointer to memory to copy to a new jByteArray + * @param size number of bytes to copy + * + * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java array size limitation to avoid overflow. + */ + static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) { + // Limitation for java array size is vm specific + // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1) + // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5) + // It means that the next call to env->NewByteArray can still end with + // OutOfMemoryError("Requested array size exceeds VM limit") coming from VM + static const size_t MAX_JARRAY_SIZE = (static_cast(1)) << 31; + if(size > MAX_JARRAY_SIZE) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit"); + return nullptr; + } + + const jsize jlen = static_cast(size); + jbyteArray jbytes = env->NewByteArray(jlen); + if(jbytes == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion(jbytes, 0, jlen, + const_cast(reinterpret_cast(bytes))); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jbytes); + return nullptr; + } + + return jbytes; + } /** * Copies bytes from a rocksdb::Slice to a jByteArray @@ -4481,25 +4509,12 @@ class JniUtil { * @param bytes The bytes to copy * * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size limitation. */ static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) { - const jsize jlen = static_cast(bytes.size()); - - jbyteArray jbytes = env->NewByteArray(jlen); - if(jbytes == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - - env->SetByteArrayRegion(jbytes, 0, jlen, - const_cast(reinterpret_cast(bytes.data()))); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jbytes); - return nullptr; - } - - return jbytes; + return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size()); } /* diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 16b9c609b..9cda136b7 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -28,8 +28,10 @@ public class ColumnFamilyHandle extends RocksObject { * Gets the name of the Column Family. * * @return The name of the Column Family. + * + * @throws RocksDBException if an error occurs whilst retrieving the name. */ - public byte[] getName() { + public byte[] getName() throws RocksDBException { return getName(nativeHandle_); } @@ -71,14 +73,22 @@ public class ColumnFamilyHandle extends RocksObject { } final ColumnFamilyHandle that = (ColumnFamilyHandle) o; - return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && - getID() == that.getID() && - Arrays.equals(getName(), that.getName()); + try { + return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && + getID() == that.getID() && + Arrays.equals(getName(), that.getName()); + } catch (RocksDBException e) { + throw new RuntimeException("Cannot compare column family handles", e); + } } @Override public int hashCode() { - return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_); + try { + return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_); + } catch (RocksDBException e) { + throw new RuntimeException("Cannot calculate hash code of column family handle", e); + } } /** @@ -96,7 +106,7 @@ public class ColumnFamilyHandle extends RocksObject { } } - private native byte[] getName(final long handle); + private native byte[] getName(final long handle) throws RocksDBException; private native int getID(final long handle); private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException; @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index fc95ee594..5673a25ef 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -65,8 +65,11 @@ public class WriteBatch extends AbstractWriteBatch { * Retrieve the serialized version of this batch. * * @return the serialized representation of this write batch. + * + * @throws RocksDBException if an error occurs whilst retrieving + * the serialized batch data. */ - public byte[] data() { + public byte[] data() throws RocksDBException { return data(nativeHandle_); } @@ -253,7 +256,7 @@ public class WriteBatch extends AbstractWriteBatch { final int serializedLength); private native void iterate(final long handle, final long handlerHandle) throws RocksDBException; - private native byte[] data(final long nativeHandle); + private native byte[] data(final long nativeHandle) throws RocksDBException; private native long getDataSize(final long nativeHandle); private native boolean hasPut(final long nativeHandle); private native boolean hasDelete(final long nativeHandle); diff --git a/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java index 01f56d9b5..158b8d56a 100644 --- a/java/src/test/java/org/rocksdb/RocksDBTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -4,9 +4,11 @@ // (found in the LICENSE.Apache file in the root directory). package org.rocksdb; +import org.junit.Assume; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import java.util.*; @@ -143,6 +145,39 @@ public class RocksDBTest { } } + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void getOutOfArrayMaxSizeValue() throws RocksDBException { + final int numberOfValueSplits = 10; + final int splitSize = Integer.MAX_VALUE / numberOfValueSplits; + + Runtime runtime = Runtime.getRuntime(); + long neededMemory = ((long)(splitSize)) * (((long)numberOfValueSplits) + 3); + boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory; + Assume.assumeTrue(isEnoughMemory); + + final byte[] valueSplit = new byte[splitSize]; + final byte[] key = "key".getBytes(); + + thrown.expect(RocksDBException.class); + thrown.expectMessage("Requested array size exceeds VM limit"); + + // merge (numberOfValueSplits + 1) valueSplit's to get value size exceeding Integer.MAX_VALUE + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put(key, valueSplit); + for (int i = 0; i < numberOfValueSplits; i++) { + db.merge(key, valueSplit); + } + db.get(key); + } + } + @Test public void multiGet() throws RocksDBException, InterruptedException { try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());