[RocksJava] Quality improvements

Summary:
- Addressed some FindBugs issues.
- Remove obsolete dbFolder cleanup
- Comparator tests for CF
 - Added AbstractComparatorTest.
 - Fixed a bug in the JNI Part about Java comparators
- Minor test improvements

Test Plan:
make rocksdbjava
make jtest
mvn -f rocksjni.pom package

Reviewers: adamretter, yhchiang, ankgup87

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D29571
This commit is contained in:
fyrz 2014-11-25 22:13:23 +01:00
parent e002a6122f
commit b7f9e644cc
6 changed files with 155 additions and 118 deletions

View file

@ -215,14 +215,26 @@ public class BackupableDBTest {
bdb.createNewBackup(true); bdb.createNewBackup(true);
bdb.createNewBackup(true); bdb.createNewBackup(true);
bdb.createNewBackup(true); bdb.createNewBackup(true);
verifyNumberOfValidBackups(bdb, 4); List<BackupInfo> infos = verifyNumberOfValidBackups(bdb, 4);
assertThat(infos.get(1).size()).
isEqualTo(infos.get(2).size());
assertThat(infos.get(1).numberFiles()).
isEqualTo(infos.get(2).numberFiles());
long maxTimeBeforePurge = Long.MIN_VALUE;
for (BackupInfo backupInfo : infos) {
if (maxTimeBeforePurge < backupInfo.timestamp()) {
maxTimeBeforePurge = backupInfo.timestamp();
}
}
// init RestoreBackupableDB // init RestoreBackupableDB
rdb = new RestoreBackupableDB(bopt); rdb = new RestoreBackupableDB(bopt);
// the same number of backups must // the same number of backups must
// exist using RestoreBackupableDB. // exist using RestoreBackupableDB.
verifyNumberOfValidBackups(rdb, 4); verifyNumberOfValidBackups(rdb, 4);
rdb.purgeOldBackups(1); rdb.purgeOldBackups(1);
verifyNumberOfValidBackups(rdb, 1); infos = verifyNumberOfValidBackups(rdb, 1);
assertThat(infos.get(0).timestamp()).
isEqualTo(maxTimeBeforePurge);
} finally { } finally {
if (bdb != null) { if (bdb != null) {
bdb.close(); bdb.close();

View file

@ -84,6 +84,9 @@ public class BlockBasedTableConfigTest {
@Test @Test
public void checksumType() { public void checksumType() {
BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
assertThat(ChecksumType.values().length).isEqualTo(3);
assertThat(ChecksumType.valueOf("kxxHash")).
isEqualTo(ChecksumType.kxxHash);
blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum); blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum);
blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash); blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash);
assertThat(blockBasedTableConfig.checksumType().equals( assertThat(blockBasedTableConfig.checksumType().equals(

View file

@ -221,5 +221,8 @@ public class ComparatorTest {
assertThat( assertThat(
BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR.ordinal()) BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR.ordinal())
.isEqualTo(1); .isEqualTo(1);
assertThat(BuiltinComparator.values().length).isEqualTo(2);
assertThat(BuiltinComparator.valueOf("BYTEWISE_COMPARATOR")).
isEqualTo(BuiltinComparator.BYTEWISE_COMPARATOR);
} }
} }

View file

@ -96,6 +96,17 @@ public class InfoLogLevelTest {
} }
} }
@Test(expected = IllegalArgumentException.class)
public void failIfIllegalByteValueProvided() {
InfoLogLevel.getInfoLogLevel((byte)-1);
}
@Test
public void valueOf() {
assertThat(InfoLogLevel.valueOf("DEBUG_LEVEL")).
isEqualTo(InfoLogLevel.DEBUG_LEVEL);
}
/** /**
* Read LOG file contents into String. * Read LOG file contents into String.
* *

View file

@ -63,6 +63,10 @@ public class PlainTableConfigTest {
public void encodingType() { public void encodingType() {
PlainTableConfig plainTableConfig = new PlainTableConfig(); PlainTableConfig plainTableConfig = new PlainTableConfig();
plainTableConfig.setEncodingType(EncodingType.kPrefix); plainTableConfig.setEncodingType(EncodingType.kPrefix);
assertThat(EncodingType.valueOf("kPrefix")).isEqualTo(
EncodingType.kPrefix);
assertThat(EncodingType.values().length).
isEqualTo(2);
assertThat(plainTableConfig.encodingType()).isEqualTo( assertThat(plainTableConfig.encodingType()).isEqualTo(
EncodingType.kPrefix); EncodingType.kPrefix);
} }

View file

@ -5,7 +5,6 @@
package org.rocksdb.test; package org.rocksdb.test;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException; import org.rocksdb.RocksDBException;
import org.rocksdb.WriteBatch; import org.rocksdb.WriteBatch;
@ -13,9 +12,9 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Test; import org.junit.Test;
import org.rocksdb.WriteOptions;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
@ -27,143 +26,148 @@ public class WriteBatchHandlerTest {
@Test @Test
public void writeBatchHandler() throws IOException, RocksDBException { public void writeBatchHandler() throws IOException, RocksDBException {
WriteBatch batch = null;
CapturingWriteBatchHandler handler = null;
try {
// setup test data
final List<Tuple<Action, Tuple<byte[], byte[]>>> testEvents = new ArrayList<>();
testEvents.add(new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>("k0".getBytes(), null)));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k1".getBytes(), "v1".getBytes())));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k2".getBytes(), "v2".getBytes())));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k3".getBytes(), "v3".getBytes())));
testEvents.add(new Tuple<>(Action.LOG,
new Tuple<byte[], byte[]>(null, "log1".getBytes())));
testEvents.add(new Tuple<>(Action.MERGE,
new Tuple<>("k2".getBytes(), "v22".getBytes())));
testEvents.add(new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>("k3".getBytes(), null)));
// setup test data // load test data to the write batch
final List<Tuple<Action, Tuple<byte[], byte[]>>> testEvents = new ArrayList<>(); batch = new WriteBatch();
testEvents.add(new Tuple<>(Action.DELETE, for (final Tuple<Action, Tuple<byte[], byte[]>> testEvent : testEvents) {
new Tuple<byte[], byte[]>("k0".getBytes(), null))); final Tuple<byte[], byte[]> data = testEvent.value;
testEvents.add(new Tuple<>(Action.PUT, switch (testEvent.key) {
new Tuple<>("k1".getBytes(), "v1".getBytes())));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k2".getBytes(), "v2".getBytes())));
testEvents.add(new Tuple<>(Action.PUT,
new Tuple<>("k3".getBytes(), "v3".getBytes())));
testEvents.add(new Tuple<>(Action.LOG,
new Tuple<byte[], byte[]>(null, "log1".getBytes())));
testEvents.add(new Tuple<>(Action.MERGE,
new Tuple<>("k2".getBytes(), "v22".getBytes())));
testEvents.add(new Tuple<>(Action.DELETE,
new Tuple<byte[], byte[]>("k3".getBytes(), null)));
// load test data to the write batch case PUT:
final WriteBatch batch = new WriteBatch(); batch.put(data.key, data.value);
for(final Tuple<Action, Tuple<byte[], byte[]>> testEvent : testEvents) { break;
final Tuple<byte[], byte[]> data = testEvent.value;
switch(testEvent.key) {
case PUT: case MERGE:
batch.put(data.key, data.value); batch.merge(data.key, data.value);
break; break;
case MERGE: case DELETE:
batch.merge(data.key, data.value); batch.remove(data.key);
break; break;
case DELETE: case LOG:
batch.remove(data.key); batch.putLogData(data.value);
break; break;
case LOG:
batch.putLogData(data.value);
break;
}
} }
}
// attempt to read test data back from the WriteBatch by iterating with a handler // attempt to read test data back from the WriteBatch by iterating with a handler
final CapturingWriteBatchHandler handler = new CapturingWriteBatchHandler(); handler = new CapturingWriteBatchHandler();
batch.iterate(handler); batch.iterate(handler);
// compare the results to the test data // compare the results to the test data
final List<Tuple<Action, Tuple<byte[], byte[]>>> actualEvents = handler.getEvents(); final List<Tuple<Action, Tuple<byte[], byte[]>>> actualEvents = handler.getEvents();
assertThat(testEvents.size()).isSameAs(actualEvents.size()); assertThat(testEvents.size()).isSameAs(actualEvents.size());
for(int i = 0; i < testEvents.size(); i++) { for (int i = 0; i < testEvents.size(); i++) {
assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue(); assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue();
} }
} finally {
if (handler != null) {
handler.dispose();
}
if (batch != null) {
batch.dispose();
}
}
}
System.out.println("Passed WriteBatchHandler Test"); private static boolean equals(final Tuple<Action, Tuple<byte[], byte[]>> expected,
final Tuple<Action, Tuple<byte[], byte[]>> actual) {
if (!expected.key.equals(actual.key)) {
return false;
} }
private static boolean equals(final Tuple<Action, Tuple<byte[], byte[]>> expected, final Tuple<byte[], byte[]> expectedData = expected.value;
final Tuple<Action, Tuple<byte[], byte[]>> actual) { final Tuple<byte[], byte[]> actualData = actual.value;
if(!expected.key.equals(actual.key)) {
return false;
}
final Tuple<byte[], byte[]> expectedData = expected.value; return equals(expectedData.key, actualData.key)
final Tuple<byte[], byte[]> actualData = actual.value; && equals(expectedData.value, actualData.value);
}
if(equals(expectedData.key, actualData.key)) { private static boolean equals(byte[] expected, byte[] actual) {
return equals(expectedData.value, actualData.value); if (expected != null) {
} else { return Arrays.equals(expected, actual);
return false; } else {
} return actual == null;
} }
}
private static boolean equals(byte[] expected, byte[] actual) { private static class Tuple<K, V> {
if(expected != null) { public final K key;
return Arrays.equals(expected, actual); public final V value;
} else {
return actual == null; public Tuple(final K key, final V value) {
} this.key = key;
this.value = value;
} }
}
private static class Tuple<K, V> { /**
public final K key; * Enumeration of Write Batch
public final V value; * event actions
*/
private enum Action {
PUT,
MERGE,
DELETE,
LOG
}
public Tuple(final K key, final V value) { /**
this.key = key; * A simple WriteBatch Handler which adds a record
this.value = value; * of each event that it receives to a list
} */
} private static class CapturingWriteBatchHandler extends WriteBatch.Handler {
private final List<Tuple<Action, Tuple<byte[], byte[]>>> events = new ArrayList<>();
/** /**
* Enumeration of Write Batch * Returns a copy of the current events list
* event actions *
* @return a list of the events which have happened upto now
*/ */
private enum Action { public List<Tuple<Action, Tuple<byte[], byte[]>>> getEvents() {
PUT, return new ArrayList<>(events);
MERGE,
DELETE,
LOG
} }
/** @Override
* A simple WriteBatch Handler which adds a record public void put(final byte[] key, final byte[] value) {
* of each event that it receives to a list events.add(new Tuple<>(Action.PUT, new Tuple<>(key, value)));
*/
private static class CapturingWriteBatchHandler extends WriteBatch.Handler {
private final List<Tuple<Action, Tuple<byte[], byte[]>>> events = new ArrayList<>();
/**
* Returns a copy of the current events list
*
* @return a list of the events which have happened upto now
*/
public List<Tuple<Action, Tuple<byte[], byte[]>>> getEvents() {
return new ArrayList<>(events);
}
@Override
public void put(final byte[] key, final byte[] value) {
events.add(new Tuple<>(Action.PUT, new Tuple<>(key, value)));
}
@Override
public void merge(final byte[] key, final byte[] value) {
events.add(new Tuple<>(Action.MERGE, new Tuple<>(key, value)));
}
@Override
public void delete(final byte[] key) {
events.add(new Tuple<>(Action.DELETE, new Tuple<byte[], byte[]>(key, null)));
}
@Override
public void logData(final byte[] blob) {
events.add(new Tuple<>(Action.LOG, new Tuple<byte[], byte[]>(null, blob)));
}
} }
@Override
public void merge(final byte[] key, final byte[] value) {
events.add(new Tuple<>(Action.MERGE, new Tuple<>(key, value)));
}
@Override
public void delete(final byte[] key) {
events.add(new Tuple<>(Action.DELETE, new Tuple<byte[], byte[]>(key, null)));
}
@Override
public void logData(final byte[] blob) {
events.add(new Tuple<>(Action.LOG, new Tuple<byte[], byte[]>(null, blob)));
}
}
} }