Merge pull request #545 from fyrz/RocksJava-Level-Compression

[RocksJava] Add compression per level to API
This commit is contained in:
Yueh-Hsuan Chiang 2015-03-24 12:14:09 -07:00
commit 6284eef4c0
8 changed files with 373 additions and 6 deletions

View File

@ -1058,6 +1058,80 @@ jbyte Java_org_rocksdb_Options_compressionType(
return reinterpret_cast<rocksdb::Options*>(jhandle)->compression;
}
/*
* Helper method to convert a Java list to a CompressionType
* vector.
*/
std::vector<rocksdb::CompressionType> rocksdb_compression_vector_helper(
JNIEnv* env, jobject jcompressionLevels) {
std::vector<rocksdb::CompressionType> compressionLevels;
// iterate over compressionLevels
jobject iteratorObj = env->CallObjectMethod(
jcompressionLevels, rocksdb::ListJni::getIteratorMethod(env));
while (env->CallBooleanMethod(
iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) {
// get compression
jobject jcompression_obj = env->CallObjectMethod(iteratorObj,
rocksdb::ListJni::getNextMethod(env));
jbyte jcompression = env->CallByteMethod(jcompression_obj,
rocksdb::ByteJni::getByteValueMethod(env));
compressionLevels.push_back(static_cast<rocksdb::CompressionType>(
jcompression));
}
return compressionLevels;
}
/*
* Helper method to convert a CompressionType vector to a Java
* List.
*/
jobject rocksdb_compression_list_helper(JNIEnv* env,
std::vector<rocksdb::CompressionType> compressionLevels) {
jclass jListClazz = env->FindClass("java/util/ArrayList");
jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId(
env, jListClazz);
jobject jcompressionLevels = env->NewObject(jListClazz,
midList, compressionLevels.size());
// insert in java list
for (std::vector<rocksdb::CompressionType>::size_type i = 0;
i != compressionLevels.size(); i++) {
jclass jByteClazz = env->FindClass("java/lang/Byte");
jmethodID midByte = env->GetMethodID(jByteClazz, "<init>", "(B)V");
jobject obj = env->NewObject(jByteClazz, midByte,
compressionLevels[i]);
env->CallBooleanMethod(jcompressionLevels,
rocksdb::ListJni::getListAddMethodId(env), obj);
}
return jcompressionLevels;
}
/*
* Class: org_rocksdb_Options
* Method: setCompressionPerLevel
* Signature: (JLjava/util/List;)V
*/
void Java_org_rocksdb_Options_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle,
jobject jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
std::vector<rocksdb::CompressionType> compressionLevels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
options->compression_per_level = compressionLevels;
}
/*
* Class: org_rocksdb_Options
* Method: compressionPerLevel
* Signature: (J)Ljava/util/List;
*/
jobject Java_org_rocksdb_Options_compressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb_compression_list_helper(env,
options->compression_per_level);
}
/*
* Class: org_rocksdb_Options
* Method: setCompactionStyle
@ -2144,6 +2218,32 @@ jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(
compression;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setCompressionPerLevel
* Signature: (JLjava/util/List;)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle,
jobject jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
std::vector<rocksdb::CompressionType> compressionLevels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
options->compression_per_level = compressionLevels;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: compressionPerLevel
* Signature: (J)Ljava/util/List;
*/
jobject Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb_compression_list_helper(env,
options->compression_per_level);
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setCompactionStyle

View File

@ -498,6 +498,24 @@ class ListJni {
}
};
class ByteJni {
public:
// Get the java class id of java.lang.Byte.
static jclass getByteClass(JNIEnv* env) {
jclass jclazz = env->FindClass("java/lang/Byte");
assert(jclazz != nullptr);
return jclazz;
}
// Get the java method id of java.lang.Byte.byteValue.
static jmethodID getByteValueMethod(JNIEnv* env) {
static jmethodID mid = env->GetMethodID(
getByteClass(env), "byteValue", "()B");
assert(mid != nullptr);
return mid;
}
};
class BackupInfoJni {
public:
// Get the java class id of org.rocksdb.BackupInfo.

View File

@ -5,6 +5,8 @@
package org.rocksdb;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
@ -200,6 +202,30 @@ public class ColumnFamilyOptions extends RocksObject
return CompressionType.values()[compressionType(nativeHandle_)];
}
@Override
public ColumnFamilyOptions setCompressionPerLevel(
final List<CompressionType> compressionLevels) {
final List<Byte> byteCompressionTypes = new ArrayList<>(
compressionLevels.size());
for (final CompressionType compressionLevel : compressionLevels) {
byteCompressionTypes.add(compressionLevel.getValue());
}
setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
return this;
}
@Override
public List<CompressionType> compressionPerLevel() {
final List<Byte> byteCompressionTypes =
compressionPerLevel(nativeHandle_);
final List<CompressionType> compressionLevels = new ArrayList<>();
for (final Byte byteCompressionType : byteCompressionTypes) {
compressionLevels.add(CompressionType.getCompressionType(
byteCompressionType));
}
return compressionLevels;
}
@Override
public ColumnFamilyOptions setNumLevels(final int numLevels) {
setNumLevels(nativeHandle_, numLevels);
@ -651,6 +677,9 @@ public class ColumnFamilyOptions extends RocksObject
private native int minWriteBufferNumberToMerge(long handle);
private native void setCompressionType(long handle, byte compressionType);
private native byte compressionType(long handle);
private native void setCompressionPerLevel(long handle,
List<Byte> compressionLevels);
private native List<Byte> compressionPerLevel(long handle);
private native void useFixedLengthPrefixExtractor(
long handle, int prefixLength);
private native void setNumLevels(

View File

@ -5,6 +5,8 @@
package org.rocksdb;
import java.util.List;
public interface ColumnFamilyOptionsInterface {
/**
@ -248,6 +250,62 @@ public interface ColumnFamilyOptionsInterface {
*/
CompressionType compressionType();
/**
* <p>Different levels can have different compression
* policies. There are cases where most lower levels
* would like to use quick compression algorithms while
* the higher levels (which have more data) use
* compression algorithms that have better compression
* but could be slower. This array, if non-empty, should
* have an entry for each level of the database;
* these override the value specified in the previous
* field 'compression'.</p>
*
* <strong>NOTICE</strong>
* <p>If {@code level_compaction_dynamic_level_bytes=true},
* {@code compression_per_level[0]} still determines {@code L0},
* but other elements of the array are based on base level
* (the level {@code L0} files are merged to), and may not
* match the level users see from info log for metadata.
* </p>
* <p>If {@code L0} files are merged to {@code level - n},
* then, for {@code i&gt;0}, {@code compression_per_level[i]}
* determines compaction type for level {@code n+i-1}.</p>
*
* <strong>Example</strong>
* <p>For example, if we have 5 levels, and we determine to
* merge {@code L0} data to {@code L4} (which means {@code L1..L3}
* will be empty), then the new files go to {@code L4} uses
* compression type {@code compression_per_level[1]}.</p>
*
* <p>If now {@code L0} is merged to {@code L2}. Data goes to
* {@code L2} will be compressed according to
* {@code compression_per_level[1]}, {@code L3} using
* {@code compression_per_level[2]}and {@code L4} using
* {@code compression_per_level[3]}. Compaction for each
* level can change when data grows.</p>
*
* <p><strong>Default:</strong> empty</p>
*
* @param compressionLevels list of
* {@link org.rocksdb.CompressionType} instances.
*
* @return the reference to the current option.
*/
Object setCompressionPerLevel(
List<CompressionType> compressionLevels);
/**
* <p>Return the currently set {@link org.rocksdb.CompressionType}
* per instances.</p>
*
* <p>See: {@link #setCompressionPerLevel(java.util.List)}</p>
*
* @return list of {@link org.rocksdb.CompressionType}
* instances.
*/
List<CompressionType> compressionPerLevel();
/**
* Set the number of levels for this database
* If level-styled compaction is used, then this number determines

View File

@ -45,6 +45,26 @@ public enum CompressionType {
return CompressionType.NO_COMPRESSION;
}
/**
* <p>Get the CompressionType enumeration value by
* passing the byte identifier to this method.</p>
*
* <p>If library cannot be found the enumeration
* value {@code NO_COMPRESSION} will be returned.</p>
*
* @param byteIdentifier of CompressionType.
*
* @return CompressionType instance.
*/
public static CompressionType getCompressionType(byte byteIdentifier) {
for (CompressionType compressionType : CompressionType.values()) {
if (compressionType.getValue() == byteIdentifier) {
return compressionType;
}
}
return CompressionType.NO_COMPRESSION;
}
/**
* <p>Returns the byte value of the enumerations value.</p>
*

View File

@ -5,6 +5,9 @@
package org.rocksdb;
import java.util.ArrayList;
import java.util.List;
/**
* Options to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
@ -684,6 +687,29 @@ public class Options extends RocksObject
return CompressionType.values()[compressionType(nativeHandle_)];
}
@Override
public Options setCompressionPerLevel(final List<CompressionType> compressionLevels) {
final List<Byte> byteCompressionTypes = new ArrayList<>(
compressionLevels.size());
for (final CompressionType compressionLevel : compressionLevels) {
byteCompressionTypes.add(compressionLevel.getValue());
}
setCompressionPerLevel(nativeHandle_, byteCompressionTypes);
return this;
}
@Override
public List<CompressionType> compressionPerLevel() {
final List<Byte> byteCompressionTypes =
compressionPerLevel(nativeHandle_);
final List<CompressionType> compressionLevels = new ArrayList<>();
for (final Byte byteCompressionType : byteCompressionTypes) {
compressionLevels.add(CompressionType.getCompressionType(
byteCompressionType));
}
return compressionLevels;
}
@Override
public Options setCompressionType(CompressionType compressionType) {
setCompressionType(nativeHandle_, compressionType.getValue());
@ -1202,6 +1228,9 @@ public class Options extends RocksObject
private native int minWriteBufferNumberToMerge(long handle);
private native void setCompressionType(long handle, byte compressionType);
private native byte compressionType(long handle);
private native void setCompressionPerLevel(long handle,
List<Byte> compressionLevels);
private native List<Byte> compressionPerLevel(long handle);
private native void useFixedLengthPrefixExtractor(
long handle, int prefixLength);
private native void setNumLevels(

View File

@ -8,6 +8,8 @@ package org.rocksdb;
import org.junit.ClassRule;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.Random;
@ -630,20 +632,74 @@ public class ColumnFamilyOptionsTest {
@Test
public void compressionTypes() {
ColumnFamilyOptions ColumnFamilyOptions = null;
ColumnFamilyOptions columnFamilyOptions = null;
try {
ColumnFamilyOptions = new ColumnFamilyOptions();
columnFamilyOptions = new ColumnFamilyOptions();
for (CompressionType compressionType :
CompressionType.values()) {
ColumnFamilyOptions.setCompressionType(compressionType);
assertThat(ColumnFamilyOptions.compressionType()).
columnFamilyOptions.setCompressionType(compressionType);
assertThat(columnFamilyOptions.compressionType()).
isEqualTo(compressionType);
assertThat(CompressionType.valueOf("NO_COMPRESSION")).
isEqualTo(CompressionType.NO_COMPRESSION);
}
} finally {
if (ColumnFamilyOptions != null) {
ColumnFamilyOptions.dispose();
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}
@Test
public void compressionPerLevel() {
ColumnFamilyOptions columnFamilyOptions = null;
try {
columnFamilyOptions = new ColumnFamilyOptions();
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList = new ArrayList<>();
for (int i=0; i < columnFamilyOptions.numLevels(); i++) {
compressionTypeList.add(CompressionType.NO_COMPRESSION);
}
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel();
for (CompressionType compressionType : compressionTypeList) {
assertThat(compressionType).isEqualTo(
CompressionType.NO_COMPRESSION);
}
} finally {
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}
@Test
public void differentCompressionsPerLevel() {
ColumnFamilyOptions columnFamilyOptions = null;
try {
columnFamilyOptions = new ColumnFamilyOptions();
columnFamilyOptions.setNumLevels(3);
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList = new ArrayList<>();
compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel();
assertThat(compressionTypeList.size()).isEqualTo(3);
assertThat(compressionTypeList).
containsExactly(
CompressionType.BZLIB2_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION,
CompressionType.LZ4_COMPRESSION);
} finally {
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}

View File

@ -5,6 +5,8 @@
package org.rocksdb;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.junit.ClassRule;
import org.junit.Test;
@ -1046,6 +1048,61 @@ public class OptionsTest {
}
}
@Test
public void compressionPerLevel() {
ColumnFamilyOptions columnFamilyOptions = null;
try {
columnFamilyOptions = new ColumnFamilyOptions();
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList =
new ArrayList<>();
for (int i=0; i < columnFamilyOptions.numLevels(); i++) {
compressionTypeList.add(CompressionType.NO_COMPRESSION);
}
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel();
for (final CompressionType compressionType : compressionTypeList) {
assertThat(compressionType).isEqualTo(
CompressionType.NO_COMPRESSION);
}
} finally {
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}
@Test
public void differentCompressionsPerLevel() {
ColumnFamilyOptions columnFamilyOptions = null;
try {
columnFamilyOptions = new ColumnFamilyOptions();
columnFamilyOptions.setNumLevels(3);
assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty();
List<CompressionType> compressionTypeList = new ArrayList<>();
compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION);
compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION);
compressionTypeList.add(CompressionType.LZ4_COMPRESSION);
columnFamilyOptions.setCompressionPerLevel(compressionTypeList);
compressionTypeList = columnFamilyOptions.compressionPerLevel();
assertThat(compressionTypeList.size()).isEqualTo(3);
assertThat(compressionTypeList).
containsExactly(
CompressionType.BZLIB2_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION,
CompressionType.LZ4_COMPRESSION);
} finally {
if (columnFamilyOptions != null) {
columnFamilyOptions.dispose();
}
}
}
@Test
public void compactionStyles() {
Options options = null;