From e099c2dd55a191c9ee63b781dd6f5260d66abd52 Mon Sep 17 00:00:00 2001 From: Andrey Zagrebin Date: Fri, 29 Jun 2018 16:03:28 -0700 Subject: [PATCH] check if data size exceeds java array vm limit when it is copied in jni (#3850) Summary: to address issue #3849 Closes https://github.com/facebook/rocksdb/pull/3850 Differential Revision: D8695487 Pulled By: sagar0 fbshipit-source-id: 04baeb2127663934ed1321fe6d9a9ec23c86e16b --- java/rocksjni/portal.h | 83 +++++++++++-------- .../java/org/rocksdb/ColumnFamilyHandle.java | 22 +++-- .../src/main/java/org/rocksdb/WriteBatch.java | 7 +- .../test/java/org/rocksdb/RocksDBTest.java | 35 ++++++++ 4 files changed, 105 insertions(+), 42 deletions(-) diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 29ba320701..aa41eff336 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -4292,25 +4292,12 @@ class JniUtil { * @param bytes The bytes to copy * * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size limitation. */ static jbyteArray copyBytes(JNIEnv* env, std::string bytes) { - const jsize jlen = static_cast(bytes.size()); - - jbyteArray jbytes = env->NewByteArray(jlen); - if(jbytes == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - - env->SetByteArrayRegion(jbytes, 0, jlen, - const_cast(reinterpret_cast(bytes.c_str()))); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jbytes); - return nullptr; - } - - return jbytes; + return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size()); } /** @@ -4473,6 +4460,47 @@ class JniUtil { return jbyte_strings; } + + /** + * Copies bytes to a new jByteArray with the check of java array size limitation. + * + * @param bytes pointer to memory to copy to a new jByteArray + * @param size number of bytes to copy + * + * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java array size limitation to avoid overflow. + */ + static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) { + // Limitation for java array size is vm specific + // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1) + // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5) + // It means that the next call to env->NewByteArray can still end with + // OutOfMemoryError("Requested array size exceeds VM limit") coming from VM + static const size_t MAX_JARRAY_SIZE = (static_cast(1)) << 31; + if(size > MAX_JARRAY_SIZE) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit"); + return nullptr; + } + + const jsize jlen = static_cast(size); + jbyteArray jbytes = env->NewByteArray(jlen); + if(jbytes == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion(jbytes, 0, jlen, + const_cast(reinterpret_cast(bytes))); + if(env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jbytes); + return nullptr; + } + + return jbytes; + } /** * Copies bytes from a rocksdb::Slice to a jByteArray @@ -4481,25 +4509,12 @@ class JniUtil { * @param bytes The bytes to copy * * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size limitation. */ static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) { - const jsize jlen = static_cast(bytes.size()); - - jbyteArray jbytes = env->NewByteArray(jlen); - if(jbytes == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - - env->SetByteArrayRegion(jbytes, 0, jlen, - const_cast(reinterpret_cast(bytes.data()))); - if(env->ExceptionCheck()) { - // exception thrown: ArrayIndexOutOfBoundsException - env->DeleteLocalRef(jbytes); - return nullptr; - } - - return jbytes; + return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size()); } /* diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 16b9c609b9..9cda136b79 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -28,8 +28,10 @@ public class ColumnFamilyHandle extends RocksObject { * Gets the name of the Column Family. * * @return The name of the Column Family. + * + * @throws RocksDBException if an error occurs whilst retrieving the name. */ - public byte[] getName() { + public byte[] getName() throws RocksDBException { return getName(nativeHandle_); } @@ -71,14 +73,22 @@ public class ColumnFamilyHandle extends RocksObject { } final ColumnFamilyHandle that = (ColumnFamilyHandle) o; - return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && - getID() == that.getID() && - Arrays.equals(getName(), that.getName()); + try { + return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && + getID() == that.getID() && + Arrays.equals(getName(), that.getName()); + } catch (RocksDBException e) { + throw new RuntimeException("Cannot compare column family handles", e); + } } @Override public int hashCode() { - return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_); + try { + return Objects.hash(getName(), getID(), rocksDB_.nativeHandle_); + } catch (RocksDBException e) { + throw new RuntimeException("Cannot calculate hash code of column family handle", e); + } } /** @@ -96,7 +106,7 @@ public class ColumnFamilyHandle extends RocksObject { } } - private native byte[] getName(final long handle); + private native byte[] getName(final long handle) throws RocksDBException; private native int getID(final long handle); private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException; @Override protected final native void disposeInternal(final long handle); diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index fc95ee5944..5673a25efb 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -65,8 +65,11 @@ public class WriteBatch extends AbstractWriteBatch { * Retrieve the serialized version of this batch. * * @return the serialized representation of this write batch. + * + * @throws RocksDBException if an error occurs whilst retrieving + * the serialized batch data. */ - public byte[] data() { + public byte[] data() throws RocksDBException { return data(nativeHandle_); } @@ -253,7 +256,7 @@ public class WriteBatch extends AbstractWriteBatch { final int serializedLength); private native void iterate(final long handle, final long handlerHandle) throws RocksDBException; - private native byte[] data(final long nativeHandle); + private native byte[] data(final long nativeHandle) throws RocksDBException; private native long getDataSize(final long nativeHandle); private native boolean hasPut(final long nativeHandle); private native boolean hasDelete(final long nativeHandle); diff --git a/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java index 01f56d9b5b..158b8d56a8 100644 --- a/java/src/test/java/org/rocksdb/RocksDBTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -4,9 +4,11 @@ // (found in the LICENSE.Apache file in the root directory). package org.rocksdb; +import org.junit.Assume; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import java.util.*; @@ -143,6 +145,39 @@ public class RocksDBTest { } } + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void getOutOfArrayMaxSizeValue() throws RocksDBException { + final int numberOfValueSplits = 10; + final int splitSize = Integer.MAX_VALUE / numberOfValueSplits; + + Runtime runtime = Runtime.getRuntime(); + long neededMemory = ((long)(splitSize)) * (((long)numberOfValueSplits) + 3); + boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory; + Assume.assumeTrue(isEnoughMemory); + + final byte[] valueSplit = new byte[splitSize]; + final byte[] key = "key".getBytes(); + + thrown.expect(RocksDBException.class); + thrown.expectMessage("Requested array size exceeds VM limit"); + + // merge (numberOfValueSplits + 1) valueSplit's to get value size exceeding Integer.MAX_VALUE + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put(key, valueSplit); + for (int i = 0; i < numberOfValueSplits; i++) { + db.merge(key, valueSplit); + } + db.get(key); + } + } + @Test public void multiGet() throws RocksDBException, InterruptedException { try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());