check if data size exceeds java array vm limit when it is copied in jni (#3850)

Summary:
to address issue #3849
Closes https://github.com/facebook/rocksdb/pull/3850

Differential Revision: D8695487

Pulled By: sagar0

fbshipit-source-id: 04baeb2127663934ed1321fe6d9a9ec23c86e16b
This commit is contained in:
Andrey Zagrebin 2018-06-29 16:03:28 -07:00 committed by Sagar Vemuri
parent 6633bd0caa
commit 0ac22f2921
4 changed files with 105 additions and 42 deletions

View File

@ -4292,25 +4292,12 @@ class JniUtil {
* @param bytes The bytes to copy
*
* @return the Java byte[] or nullptr if an exception occurs
*
* @throws RocksDBException thrown
* if memory size to copy exceeds general java specific array size limitation.
*/
static jbyteArray copyBytes(JNIEnv* env, std::string bytes) {
const jsize jlen = static_cast<jsize>(bytes.size());
jbyteArray jbytes = env->NewByteArray(jlen);
if(jbytes == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jbytes, 0, jlen,
const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes.c_str())));
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jbytes);
return nullptr;
}
return jbytes;
return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size());
}
/**
@ -4473,6 +4460,47 @@ class JniUtil {
return jbyte_strings;
}
/**
* Copies bytes to a new jByteArray with the check of java array size limitation.
*
* @param bytes pointer to memory to copy to a new jByteArray
* @param size number of bytes to copy
*
* @return the Java byte[] or nullptr if an exception occurs
*
* @throws RocksDBException thrown
* if memory size to copy exceeds general java array size limitation to avoid overflow.
*/
static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) {
// Limitation for java array size is vm specific
// In general it cannot exceed Integer.MAX_VALUE (2^31 - 1)
// Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5)
// It means that the next call to env->NewByteArray can still end with
// OutOfMemoryError("Requested array size exceeds VM limit") coming from VM
static const size_t MAX_JARRAY_SIZE = (static_cast<size_t>(1)) << 31;
if(size > MAX_JARRAY_SIZE) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit");
return nullptr;
}
const jsize jlen = static_cast<jsize>(size);
jbyteArray jbytes = env->NewByteArray(jlen);
if(jbytes == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jbytes, 0, jlen,
const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes)));
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jbytes);
return nullptr;
}
return jbytes;
}
/**
* Copies bytes from a rocksdb::Slice to a jByteArray
@ -4481,25 +4509,12 @@ class JniUtil {
* @param bytes The bytes to copy
*
* @return the Java byte[] or nullptr if an exception occurs
*
* @throws RocksDBException thrown
* if memory size to copy exceeds general java specific array size limitation.
*/
static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) {
const jsize jlen = static_cast<jsize>(bytes.size());
jbyteArray jbytes = env->NewByteArray(jlen);
if(jbytes == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
env->SetByteArrayRegion(jbytes, 0, jlen,
const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes.data())));
if(env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
env->DeleteLocalRef(jbytes);
return nullptr;
}
return jbytes;
return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size());
}
/*

View File

@ -28,8 +28,10 @@ public class ColumnFamilyHandle extends RocksObject {
* Gets the name of the Column Family.
*
* @return The name of the Column Family.
*
* @throws RocksDBException if an error occurs whilst retrieving the name.
*/
public byte[] getName() {
public byte[] getName() throws RocksDBException {
return getName(nativeHandle_);
}
@ -71,14 +73,22 @@ public class ColumnFamilyHandle extends RocksObject {
}
final ColumnFamilyHandle that = (ColumnFamilyHandle) o;
return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ &&
getID() == that.getID() &&
Arrays.equals(getName(), that.getName());
try {
return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ &&
getID() == that.getID() &&
Arrays.equals(getName(), that.getName());
} catch (RocksDBException e) {
throw new RuntimeException("Cannot compare column family handles", e);
}
}
@Override
public int hashCode() {
return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_);
try {
return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_);
} catch (RocksDBException e) {
throw new RuntimeException("Cannot calculate hash code of column family handle", e);
}
}
/**
@ -96,7 +106,7 @@ public class ColumnFamilyHandle extends RocksObject {
}
}
private native byte[] getName(final long handle);
private native byte[] getName(final long handle) throws RocksDBException;
private native int getID(final long handle);
private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException;
@Override protected final native void disposeInternal(final long handle);

View File

@ -65,8 +65,11 @@ public class WriteBatch extends AbstractWriteBatch {
* Retrieve the serialized version of this batch.
*
* @return the serialized representation of this write batch.
*
* @throws RocksDBException if an error occurs whilst retrieving
* the serialized batch data.
*/
public byte[] data() {
public byte[] data() throws RocksDBException {
return data(nativeHandle_);
}
@ -253,7 +256,7 @@ public class WriteBatch extends AbstractWriteBatch {
final int serializedLength);
private native void iterate(final long handle, final long handlerHandle)
throws RocksDBException;
private native byte[] data(final long nativeHandle);
private native byte[] data(final long nativeHandle) throws RocksDBException;
private native long getDataSize(final long nativeHandle);
private native boolean hasPut(final long nativeHandle);
private native boolean hasDelete(final long nativeHandle);

View File

@ -4,9 +4,11 @@
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
import org.junit.Assume;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
import java.util.*;
@ -143,6 +145,39 @@ public class RocksDBTest {
}
}
@Rule
public ExpectedException thrown = ExpectedException.none();
@Test
public void getOutOfArrayMaxSizeValue() throws RocksDBException {
final int numberOfValueSplits = 10;
final int splitSize = Integer.MAX_VALUE / numberOfValueSplits;
Runtime runtime = Runtime.getRuntime();
long neededMemory = ((long)(splitSize)) * (((long)numberOfValueSplits) + 3);
boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory;
Assume.assumeTrue(isEnoughMemory);
final byte[] valueSplit = new byte[splitSize];
final byte[] key = "key".getBytes();
thrown.expect(RocksDBException.class);
thrown.expectMessage("Requested array size exceeds VM limit");
// merge (numberOfValueSplits + 1) valueSplit's to get value size exceeding Integer.MAX_VALUE
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
final Options opt = new Options()
.setCreateIfMissing(true)
.setMergeOperator(stringAppendOperator);
final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) {
db.put(key, valueSplit);
for (int i = 0; i < numberOfValueSplits; i++) {
db.merge(key, valueSplit);
}
db.get(key);
}
}
@Test
public void multiGet() throws RocksDBException, InterruptedException {
try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());