check if data size exceeds java array vm limit when it is copied in jni (#3850)
Summary: to address issue #3849 Closes https://github.com/facebook/rocksdb/pull/3850 Differential Revision: D8695487 Pulled By: sagar0 fbshipit-source-id: 04baeb2127663934ed1321fe6d9a9ec23c86e16b
This commit is contained in:
parent
36fa49ceb5
commit
e099c2dd55
@ -4292,25 +4292,12 @@ class JniUtil {
|
|||||||
* @param bytes The bytes to copy
|
* @param bytes The bytes to copy
|
||||||
*
|
*
|
||||||
* @return the Java byte[] or nullptr if an exception occurs
|
* @return the Java byte[] or nullptr if an exception occurs
|
||||||
|
*
|
||||||
|
* @throws RocksDBException thrown
|
||||||
|
* if memory size to copy exceeds general java specific array size limitation.
|
||||||
*/
|
*/
|
||||||
static jbyteArray copyBytes(JNIEnv* env, std::string bytes) {
|
static jbyteArray copyBytes(JNIEnv* env, std::string bytes) {
|
||||||
const jsize jlen = static_cast<jsize>(bytes.size());
|
return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size());
|
||||||
|
|
||||||
jbyteArray jbytes = env->NewByteArray(jlen);
|
|
||||||
if(jbytes == nullptr) {
|
|
||||||
// exception thrown: OutOfMemoryError
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
env->SetByteArrayRegion(jbytes, 0, jlen,
|
|
||||||
const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes.c_str())));
|
|
||||||
if(env->ExceptionCheck()) {
|
|
||||||
// exception thrown: ArrayIndexOutOfBoundsException
|
|
||||||
env->DeleteLocalRef(jbytes);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return jbytes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -4473,6 +4460,47 @@ class JniUtil {
|
|||||||
|
|
||||||
return jbyte_strings;
|
return jbyte_strings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copies bytes to a new jByteArray with the check of java array size limitation.
|
||||||
|
*
|
||||||
|
* @param bytes pointer to memory to copy to a new jByteArray
|
||||||
|
* @param size number of bytes to copy
|
||||||
|
*
|
||||||
|
* @return the Java byte[] or nullptr if an exception occurs
|
||||||
|
*
|
||||||
|
* @throws RocksDBException thrown
|
||||||
|
* if memory size to copy exceeds general java array size limitation to avoid overflow.
|
||||||
|
*/
|
||||||
|
static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) {
|
||||||
|
// Limitation for java array size is vm specific
|
||||||
|
// In general it cannot exceed Integer.MAX_VALUE (2^31 - 1)
|
||||||
|
// Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5)
|
||||||
|
// It means that the next call to env->NewByteArray can still end with
|
||||||
|
// OutOfMemoryError("Requested array size exceeds VM limit") coming from VM
|
||||||
|
static const size_t MAX_JARRAY_SIZE = (static_cast<size_t>(1)) << 31;
|
||||||
|
if(size > MAX_JARRAY_SIZE) {
|
||||||
|
rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit");
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const jsize jlen = static_cast<jsize>(size);
|
||||||
|
jbyteArray jbytes = env->NewByteArray(jlen);
|
||||||
|
if(jbytes == nullptr) {
|
||||||
|
// exception thrown: OutOfMemoryError
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
env->SetByteArrayRegion(jbytes, 0, jlen,
|
||||||
|
const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes)));
|
||||||
|
if(env->ExceptionCheck()) {
|
||||||
|
// exception thrown: ArrayIndexOutOfBoundsException
|
||||||
|
env->DeleteLocalRef(jbytes);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return jbytes;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copies bytes from a rocksdb::Slice to a jByteArray
|
* Copies bytes from a rocksdb::Slice to a jByteArray
|
||||||
@ -4481,25 +4509,12 @@ class JniUtil {
|
|||||||
* @param bytes The bytes to copy
|
* @param bytes The bytes to copy
|
||||||
*
|
*
|
||||||
* @return the Java byte[] or nullptr if an exception occurs
|
* @return the Java byte[] or nullptr if an exception occurs
|
||||||
|
*
|
||||||
|
* @throws RocksDBException thrown
|
||||||
|
* if memory size to copy exceeds general java specific array size limitation.
|
||||||
*/
|
*/
|
||||||
static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) {
|
static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) {
|
||||||
const jsize jlen = static_cast<jsize>(bytes.size());
|
return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size());
|
||||||
|
|
||||||
jbyteArray jbytes = env->NewByteArray(jlen);
|
|
||||||
if(jbytes == nullptr) {
|
|
||||||
// exception thrown: OutOfMemoryError
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
env->SetByteArrayRegion(jbytes, 0, jlen,
|
|
||||||
const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes.data())));
|
|
||||||
if(env->ExceptionCheck()) {
|
|
||||||
// exception thrown: ArrayIndexOutOfBoundsException
|
|
||||||
env->DeleteLocalRef(jbytes);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return jbytes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -28,8 +28,10 @@ public class ColumnFamilyHandle extends RocksObject {
|
|||||||
* Gets the name of the Column Family.
|
* Gets the name of the Column Family.
|
||||||
*
|
*
|
||||||
* @return The name of the Column Family.
|
* @return The name of the Column Family.
|
||||||
|
*
|
||||||
|
* @throws RocksDBException if an error occurs whilst retrieving the name.
|
||||||
*/
|
*/
|
||||||
public byte[] getName() {
|
public byte[] getName() throws RocksDBException {
|
||||||
return getName(nativeHandle_);
|
return getName(nativeHandle_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,14 +73,22 @@ public class ColumnFamilyHandle extends RocksObject {
|
|||||||
}
|
}
|
||||||
|
|
||||||
final ColumnFamilyHandle that = (ColumnFamilyHandle) o;
|
final ColumnFamilyHandle that = (ColumnFamilyHandle) o;
|
||||||
return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ &&
|
try {
|
||||||
getID() == that.getID() &&
|
return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ &&
|
||||||
Arrays.equals(getName(), that.getName());
|
getID() == that.getID() &&
|
||||||
|
Arrays.equals(getName(), that.getName());
|
||||||
|
} catch (RocksDBException e) {
|
||||||
|
throw new RuntimeException("Cannot compare column family handles", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_);
|
try {
|
||||||
|
return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_);
|
||||||
|
} catch (RocksDBException e) {
|
||||||
|
throw new RuntimeException("Cannot calculate hash code of column family handle", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -96,7 +106,7 @@ public class ColumnFamilyHandle extends RocksObject {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private native byte[] getName(final long handle);
|
private native byte[] getName(final long handle) throws RocksDBException;
|
||||||
private native int getID(final long handle);
|
private native int getID(final long handle);
|
||||||
private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException;
|
private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException;
|
||||||
@Override protected final native void disposeInternal(final long handle);
|
@Override protected final native void disposeInternal(final long handle);
|
||||||
|
@ -65,8 +65,11 @@ public class WriteBatch extends AbstractWriteBatch {
|
|||||||
* Retrieve the serialized version of this batch.
|
* Retrieve the serialized version of this batch.
|
||||||
*
|
*
|
||||||
* @return the serialized representation of this write batch.
|
* @return the serialized representation of this write batch.
|
||||||
|
*
|
||||||
|
* @throws RocksDBException if an error occurs whilst retrieving
|
||||||
|
* the serialized batch data.
|
||||||
*/
|
*/
|
||||||
public byte[] data() {
|
public byte[] data() throws RocksDBException {
|
||||||
return data(nativeHandle_);
|
return data(nativeHandle_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,7 +256,7 @@ public class WriteBatch extends AbstractWriteBatch {
|
|||||||
final int serializedLength);
|
final int serializedLength);
|
||||||
private native void iterate(final long handle, final long handlerHandle)
|
private native void iterate(final long handle, final long handlerHandle)
|
||||||
throws RocksDBException;
|
throws RocksDBException;
|
||||||
private native byte[] data(final long nativeHandle);
|
private native byte[] data(final long nativeHandle) throws RocksDBException;
|
||||||
private native long getDataSize(final long nativeHandle);
|
private native long getDataSize(final long nativeHandle);
|
||||||
private native boolean hasPut(final long nativeHandle);
|
private native boolean hasPut(final long nativeHandle);
|
||||||
private native boolean hasDelete(final long nativeHandle);
|
private native boolean hasDelete(final long nativeHandle);
|
||||||
|
@ -4,9 +4,11 @@
|
|||||||
// (found in the LICENSE.Apache file in the root directory).
|
// (found in the LICENSE.Apache file in the root directory).
|
||||||
package org.rocksdb;
|
package org.rocksdb;
|
||||||
|
|
||||||
|
import org.junit.Assume;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.junit.rules.ExpectedException;
|
||||||
import org.junit.rules.TemporaryFolder;
|
import org.junit.rules.TemporaryFolder;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
@ -143,6 +145,39 @@ public class RocksDBTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getOutOfArrayMaxSizeValue() throws RocksDBException {
|
||||||
|
final int numberOfValueSplits = 10;
|
||||||
|
final int splitSize = Integer.MAX_VALUE / numberOfValueSplits;
|
||||||
|
|
||||||
|
Runtime runtime = Runtime.getRuntime();
|
||||||
|
long neededMemory = ((long)(splitSize)) * (((long)numberOfValueSplits) + 3);
|
||||||
|
boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory;
|
||||||
|
Assume.assumeTrue(isEnoughMemory);
|
||||||
|
|
||||||
|
final byte[] valueSplit = new byte[splitSize];
|
||||||
|
final byte[] key = "key".getBytes();
|
||||||
|
|
||||||
|
thrown.expect(RocksDBException.class);
|
||||||
|
thrown.expectMessage("Requested array size exceeds VM limit");
|
||||||
|
|
||||||
|
// merge (numberOfValueSplits + 1) valueSplit's to get value size exceeding Integer.MAX_VALUE
|
||||||
|
try (final StringAppendOperator stringAppendOperator = new StringAppendOperator();
|
||||||
|
final Options opt = new Options()
|
||||||
|
.setCreateIfMissing(true)
|
||||||
|
.setMergeOperator(stringAppendOperator);
|
||||||
|
final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) {
|
||||||
|
db.put(key, valueSplit);
|
||||||
|
for (int i = 0; i < numberOfValueSplits; i++) {
|
||||||
|
db.merge(key, valueSplit);
|
||||||
|
}
|
||||||
|
db.get(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void multiGet() throws RocksDBException, InterruptedException {
|
public void multiGet() throws RocksDBException, InterruptedException {
|
||||||
try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
|
try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
|
||||||
|
Loading…
x
Reference in New Issue
Block a user