[RocksJava] CompactRange support

- manual range compaction support in RocksJava
This commit is contained in:
fyrz 2014-11-17 23:29:52 +01:00
parent 153f4f0719
commit 48adce77cc
3 changed files with 649 additions and 0 deletions

View File

@ -1251,6 +1251,287 @@ public class RocksDB extends RocksObject {
columnFamilyHandle.nativeHandle_);
}
/**
* <p>Full compaction of the underlying storage using key
* range mode.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.
* </p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(boolean, int, int)}</li>
* <li>{@link #compactRange(byte[], byte[])}</li>
* <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
* </ul>
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange() throws RocksDBException {
compactRange0(nativeHandle_, false, -1, 0);
}
/**
* <p>Compaction of the underlying storage using key
* using key range {@code [begin, end]}.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.
* </p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange()}</li>
* <li>{@link #compactRange(boolean, int, int)}</li>
* <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
* </ul>
*
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(byte[] begin, byte[] end)
throws RocksDBException {
compactRange0(nativeHandle_, begin, begin.length, end,
end.length, false, -1, 0);
}
/**
* <p>Full compaction of the underlying storage using key
* range mode.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.
* In this case, client could set reduce_level to true, to move
* the files back to the minimum level capable of holding the data
* set or a given level (specified by non-negative target_level).
* </p>
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange()}</li>
* <li>{@link #compactRange(byte[], byte[])}</li>
* <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
* </ul>
*
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(boolean reduce_level, int target_level,
int target_path_id) throws RocksDBException {
compactRange0(nativeHandle_, reduce_level,
target_level, target_path_id);
}
/**
* <p>Compaction of the underlying storage using key
* using key range {@code [begin, end]}.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.
* In this case, client could set reduce_level to true, to move
* the files back to the minimum level capable of holding the data
* set or a given level (specified by non-negative target_level).
* </p>
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange()}</li>
* <li>{@link #compactRange(boolean, int, int)}</li>
* <li>{@link #compactRange(byte[], byte[])}</li>
* </ul>
*
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(byte[] begin, byte[] end,
boolean reduce_level, int target_level, int target_path_id)
throws RocksDBException {
compactRange0(nativeHandle_, begin, begin.length, end, end.length,
reduce_level, target_level, target_path_id);
}
/**
* <p>Full compaction of the underlying storage of a column family
* using key range mode.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>
* {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
* boolean, int, int)}
* </li>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(ColumnFamilyHandle columnFamilyHandle)
throws RocksDBException {
compactRange(nativeHandle_, false, -1, 0,
columnFamilyHandle.nativeHandle_);
}
/**
* <p>Compaction of the underlying storage of a column family
* using key range {@code [begin, end]}.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
* boolean, int, int)}
* </li>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(ColumnFamilyHandle columnFamilyHandle,
byte[] begin, byte[] end) throws RocksDBException {
compactRange(nativeHandle_, begin, begin.length, end, end.length,
false, -1, 0, columnFamilyHandle.nativeHandle_);
}
/**
* <p>Full compaction of the underlying storage of a column family
* using key range mode.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.
* In this case, client could set reduce_level to true, to move
* the files back to the minimum level capable of holding the data
* set or a given level (specified by non-negative target_level).
* </p>
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
* boolean, int, int)}
* </li>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(ColumnFamilyHandle columnFamilyHandle,
boolean reduce_level, int target_level, int target_path_id)
throws RocksDBException {
compactRange(nativeHandle_, reduce_level, target_level,
target_path_id, columnFamilyHandle.nativeHandle_);
}
/**
* <p>Compaction of the underlying storage of a column family
* using key range {@code [begin, end]}.</p>
* <p><strong>Note</strong>: After the entire database is compacted,
* all data are pushed down to the last level containing any data.
* If the total data size after compaction is reduced, that level
* might not be appropriate for hosting all the files.
* In this case, client could set reduce_level to true, to move
* the files back to the minimum level capable of holding the data
* set or a given level (specified by non-negative target_level).
* </p>
* <p>Compaction outputs should be placed in options.db_paths
* [target_path_id]. Behavior is undefined if target_path_id is
* out of range.</p>
*
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
* </li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
* @param reduce_level reduce level after compaction
* @param target_level target level to compact to
* @param target_path_id the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
public void compactRange(ColumnFamilyHandle columnFamilyHandle,
byte[] begin, byte[] end, boolean reduce_level, int target_level,
int target_path_id) throws RocksDBException {
compactRange(nativeHandle_, begin, begin.length, end, end.length,
reduce_level, target_level, target_path_id,
columnFamilyHandle.nativeHandle_);
}
/**
* Private constructor.
*/
@ -1376,6 +1657,16 @@ public class RocksDB extends RocksObject {
throws RocksDBException;
private native void flush(long handle, long flushOptHandle,
long cfHandle) throws RocksDBException;
private native void compactRange0(long handle, boolean reduce_level, int target_level,
int target_path_id) throws RocksDBException;
private native void compactRange0(long handle, byte[] begin, int beginLen, byte[] end,
int endLen, boolean reduce_level, int target_level, int target_path_id)
throws RocksDBException;
private native void compactRange(long handle, boolean reduce_level, int target_level,
int target_path_id, long cfHandle) throws RocksDBException;
private native void compactRange(long handle, byte[] begin, int beginLen, byte[] end,
int endLen, boolean reduce_level, int target_level, int target_path_id,
long cfHandle) throws RocksDBException;
protected DBOptionsInterface options_;
}

View File

@ -13,6 +13,7 @@ import org.rocksdb.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import static org.assertj.core.api.Assertions.assertThat;
@ -25,6 +26,9 @@ public class RocksDBTest {
@Rule
public TemporaryFolder dbFolder = new TemporaryFolder();
public static final Random rand = PlatformRandomHelper.
getPlatformSpecificRandomFactory();
@Test
public void open() throws RocksDBException {
RocksDB db = null;
@ -312,4 +316,244 @@ public class RocksDBTest {
}
}
}
@Test
public void fullCompactRange() throws RocksDBException {
RocksDB db = null;
Options opt = null;
try {
opt = new Options().
setCreateIfMissing(true).
setDisableAutoCompactions(true).
setCompactionStyle(CompactionStyle.LEVEL).
setNumLevels(4).
setWriteBufferSize(100<<10).
setLevelZeroFileNumCompactionTrigger(3).
setTargetFileSizeBase(200 << 10).
setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(500 << 10).
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(false);
// open database
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// fill database with key/value pairs
byte[] b = new byte[10000];
for (int i = 0; i < 200; i++) {
rand.nextBytes(b);
db.put((String.valueOf(i)).getBytes(), b);
}
db.compactRange();
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void fullCompactRangeColumnFamily() throws RocksDBException {
RocksDB db = null;
DBOptions opt = null;
List<ColumnFamilyHandle> columnFamilyHandles =
new ArrayList<>();
try {
opt = new DBOptions().
setCreateIfMissing(true).
setCreateMissingColumnFamilies(true);
List<ColumnFamilyDescriptor> columnFamilyDescriptors =
new ArrayList<>();
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
RocksDB.DEFAULT_COLUMN_FAMILY));
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
"new_cf",
new ColumnFamilyOptions().
setDisableAutoCompactions(true).
setCompactionStyle(CompactionStyle.LEVEL).
setNumLevels(4).
setWriteBufferSize(100<<10).
setLevelZeroFileNumCompactionTrigger(3).
setTargetFileSizeBase(200 << 10).
setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(500 << 10).
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(false)));
// open database
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath(),
columnFamilyDescriptors,
columnFamilyHandles);
// fill database with key/value pairs
byte[] b = new byte[10000];
for (int i = 0; i < 200; i++) {
rand.nextBytes(b);
db.put(columnFamilyHandles.get(1),
String.valueOf(i).getBytes(), b);
}
db.compactRange(columnFamilyHandles.get(1));
} finally {
for (ColumnFamilyHandle handle : columnFamilyHandles) {
handle.dispose();
}
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void compactRangeWithKeys() {
}
@Test
public void compactRangeWithKeysColumnFamily() {
}
@Test
public void compactRangeToLevel() throws RocksDBException, InterruptedException {
RocksDB db = null;
Options opt = null;
try {
opt = new Options().
setCreateIfMissing(true).
setCompactionStyle(CompactionStyle.LEVEL).
setNumLevels(4).
setWriteBufferSize(100<<10).
setLevelZeroFileNumCompactionTrigger(3).
setTargetFileSizeBase(200 << 10).
setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(500 << 10).
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(false);
// open database
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
// fill database with key/value pairs
byte[] b = new byte[10000];
for (int i = 0; i < 200; i++) {
rand.nextBytes(b);
db.put((String.valueOf(i)).getBytes(), b);
}
db.flush(new FlushOptions().setWaitForFlush(true));
db.close();
opt.setTargetFileSizeBase(Long.MAX_VALUE).
setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(Long.MAX_VALUE).
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(true);
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath());
db.compactRange(true, 0, 0);
for (int i = 0; i < 4; i++) {
if (i == 0) {
assertThat(db.getProperty("rocksdb.num-files-at-level" + i)).
isEqualTo("1");
} else {
assertThat(db.getProperty("rocksdb.num-files-at-level" + i)).
isEqualTo("0");
}
}
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
@Test
public void compactRangeToLevelColumnFamily() throws RocksDBException {
RocksDB db = null;
DBOptions opt = null;
List<ColumnFamilyHandle> columnFamilyHandles =
new ArrayList<>();
try {
opt = new DBOptions().
setCreateIfMissing(true).
setCreateMissingColumnFamilies(true);
List<ColumnFamilyDescriptor> columnFamilyDescriptors =
new ArrayList<>();
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
RocksDB.DEFAULT_COLUMN_FAMILY));
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
"new_cf",
new ColumnFamilyOptions().
setDisableAutoCompactions(true).
setCompactionStyle(CompactionStyle.LEVEL).
setNumLevels(4).
setWriteBufferSize(100<<10).
setLevelZeroFileNumCompactionTrigger(3).
setTargetFileSizeBase(200 << 10).
setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(500 << 10).
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(false)));
// open database
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath(),
columnFamilyDescriptors,
columnFamilyHandles);
// fill database with key/value pairs
byte[] b = new byte[10000];
for (int i = 0; i < 200; i++) {
rand.nextBytes(b);
db.put(columnFamilyHandles.get(1),
String.valueOf(i).getBytes(), b);
}
db.flush(new FlushOptions().setWaitForFlush(true),
columnFamilyHandles.get(1));
// free column families
for (ColumnFamilyHandle handle : columnFamilyHandles) {
handle.dispose();
}
// clear column family handles for reopen
columnFamilyHandles.clear();
db.close();
columnFamilyDescriptors.get(1).
columnFamilyOptions().
setTargetFileSizeBase(Long.MAX_VALUE).
setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(Long.MAX_VALUE).
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(true);
// reopen database
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath(),
columnFamilyDescriptors,
columnFamilyHandles);
// compact new column family
db.compactRange(columnFamilyHandles.get(1), true, 0, 0);
// check if new column family is compacted to level zero
for (int i = 0; i < 4; i++) {
if (i == 0) {
assertThat(db.getProperty(columnFamilyHandles.get(1),
"rocksdb.num-files-at-level" + i)).
isEqualTo("1");
} else {
assertThat(db.getProperty(columnFamilyHandles.get(1),
"rocksdb.num-files-at-level" + i)).
isEqualTo("0");
}
}
} finally {
if (db != null) {
db.close();
}
if (opt != null) {
opt.dispose();
}
}
}
}

View File

@ -1379,3 +1379,117 @@ void Java_org_rocksdb_RocksDB_flush__JJJ(
auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
rocksdb_flush_helper(env, db, *flush_options, cf_handle);
}
//////////////////////////////////////////////////////////////////////////////
// rocksdb::DB::CompactRange - Full
void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
rocksdb::ColumnFamilyHandle* cf_handle, jboolean jreduce_level,
jint jtarget_level, jint jtarget_path_id) {
rocksdb::Status s;
if (cf_handle != nullptr) {
s = db->CompactRange(cf_handle, nullptr, nullptr, jreduce_level,
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
} else {
// backwards compatibility
s = db->CompactRange(nullptr, nullptr, jreduce_level,
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
}
if (s.ok()) {
return;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
/*
* Class: org_rocksdb_RocksDB
* Method: compactRange0
* Signature: (JZII)V
*/
void Java_org_rocksdb_RocksDB_compactRange0__JZII(JNIEnv* env,
jobject jdb, jlong jdb_handle, jboolean jreduce_level,
jint jtarget_level, jint jtarget_path_id) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
rocksdb_compactrange_helper(env, db, nullptr, jreduce_level,
jtarget_level, jtarget_path_id);
}
/*
* Class: org_rocksdb_RocksDB
* Method: compactRange
* Signature: (JZIIJ)V
*/
void Java_org_rocksdb_RocksDB_compactRange__JZIIJ(
JNIEnv* env, jobject jdb, jlong jdb_handle,
jboolean jreduce_level, jint jtarget_level,
jint jtarget_path_id, jlong jcf_handle) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
rocksdb_compactrange_helper(env, db, cf_handle, jreduce_level,
jtarget_level, jtarget_path_id);
}
//////////////////////////////////////////////////////////////////////////////
// rocksdb::DB::CompactRange - Range
void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jbegin, jint jbegin_len,
jbyteArray jend, jint jend_len, jboolean jreduce_level, jint jtarget_level,
jint jtarget_path_id) {
jbyte* begin = env->GetByteArrayElements(jbegin, 0);
jbyte* end = env->GetByteArrayElements(jend, 0);
const rocksdb::Slice begin_slice(reinterpret_cast<char*>(begin), jbegin_len);
const rocksdb::Slice end_slice(reinterpret_cast<char*>(end), jend_len);
rocksdb::Status s;
if (cf_handle != nullptr) {
s = db->CompactRange(cf_handle, &begin_slice, &end_slice, jreduce_level,
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
} else {
// backwards compatibility
s = db->CompactRange(&begin_slice, &end_slice, jreduce_level,
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
}
env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT);
env->ReleaseByteArrayElements(jend, end, JNI_ABORT);
if (s.ok()) {
return;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
/*
* Class: org_rocksdb_RocksDB
* Method: compactRange0
* Signature: (J[BI[BIZII)V
*/
void Java_org_rocksdb_RocksDB_compactRange0__J_3BI_3BIZII(JNIEnv* env,
jobject jdb, jlong jdb_handle, jbyteArray jbegin, jint jbegin_len,
jbyteArray jend, jint jend_len, jboolean jreduce_level,
jint jtarget_level, jint jtarget_path_id) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
rocksdb_compactrange_helper(env, db, nullptr, jbegin, jbegin_len,
jend, jend_len, jreduce_level, jtarget_level, jtarget_path_id);
}
/*
* Class: org_rocksdb_RocksDB
* Method: compactRange
* Signature: (JJ[BI[BIZII)V
*/
void Java_org_rocksdb_RocksDB_compactRange__J_3BI_3BIZIIJ(
JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jbegin,
jint jbegin_len, jbyteArray jend, jint jend_len,
jboolean jreduce_level, jint jtarget_level,
jint jtarget_path_id, jlong jcf_handle) {
auto db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len,
jend, jend_len, jreduce_level, jtarget_level, jtarget_path_id);
}