Added missing options to RocksJava

Summary:
This adds almost all missing options to RocksJava
Closes https://github.com/facebook/rocksdb/pull/2039

Differential Revision: D4779991

Pulled By: siying

fbshipit-source-id: 4a1bf28
This commit is contained in:
Adam Retter 2017-03-30 12:04:09 -07:00 committed by Facebook Github Bot
parent c6d04f2ecf
commit 0ee7f04039
51 changed files with 7539 additions and 1538 deletions

View File

@ -6,10 +6,14 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.BlockBasedTableConfig\
org.rocksdb.BloomFilter\
org.rocksdb.Checkpoint\
org.rocksdb.ClockCache\
org.rocksdb.ColumnFamilyHandle\
org.rocksdb.ColumnFamilyOptions\
org.rocksdb.CompactionOptionsFIFO\
org.rocksdb.CompactionOptionsUniversal\
org.rocksdb.Comparator\
org.rocksdb.ComparatorOptions\
org.rocksdb.CompressionOptions\
org.rocksdb.DBOptions\
org.rocksdb.DirectComparator\
org.rocksdb.DirectSlice\
@ -21,6 +25,7 @@ NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.HashLinkedListMemTableConfig\
org.rocksdb.HashSkipListMemTableConfig\
org.rocksdb.Logger\
org.rocksdb.LRUCache\
org.rocksdb.MergeOperator\
org.rocksdb.Options\
org.rocksdb.PlainTableConfig\
@ -67,11 +72,17 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.BlockBasedTableConfigTest\
org.rocksdb.util.BytewiseComparatorTest\
org.rocksdb.CheckPointTest\
org.rocksdb.ClockCacheTest\
org.rocksdb.ColumnFamilyOptionsTest\
org.rocksdb.ColumnFamilyTest\
org.rocksdb.CompactionOptionsFIFOTest\
org.rocksdb.CompactionOptionsUniversalTest\
org.rocksdb.CompactionPriorityTest\
org.rocksdb.CompactionStopStyleTest\
org.rocksdb.ComparatorOptionsTest\
org.rocksdb.ComparatorTest\
org.rocksdb.CompressionOptionsTest\
org.rocksdb.CompressionTypesTest\
org.rocksdb.DBOptionsTest\
org.rocksdb.DirectComparatorTest\
org.rocksdb.DirectSliceTest\
@ -83,6 +94,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.InfoLogLevelTest\
org.rocksdb.KeyMayExistTest\
org.rocksdb.LoggerTest\
org.rocksdb.LRUCacheTest\
org.rocksdb.MemTableTest\
org.rocksdb.MergeTest\
org.rocksdb.MixedOptionsTest\
@ -105,6 +117,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.TransactionLogIteratorTest\
org.rocksdb.TtlDBTest\
org.rocksdb.StatisticsCollectorTest\
org.rocksdb.WALRecoveryModeTest\
org.rocksdb.WriteBatchHandlerTest\
org.rocksdb.WriteBatchTest\
org.rocksdb.WriteBatchThreadedTest\

View File

@ -582,12 +582,6 @@ public class DbBenchmark {
(Integer)flags_.get(Flag.level0_slowdown_writes_trigger));
options.setLevelZeroFileNumCompactionTrigger(
(Integer)flags_.get(Flag.level0_file_num_compaction_trigger));
options.setSoftRateLimit(
(Double)flags_.get(Flag.soft_rate_limit));
options.setHardRateLimit(
(Double)flags_.get(Flag.hard_rate_limit));
options.setRateLimitDelayMaxMilliseconds(
(Integer)flags_.get(Flag.rate_limit_delay_max_milliseconds));
options.setMaxCompactionBytes(
(Long) flags_.get(Flag.max_compaction_bytes));
options.setDisableAutoCompactions(

View File

@ -48,6 +48,18 @@ jstring Java_org_rocksdb_BackupableDBOptions_backupDir(
return env->NewStringUTF(bopt->backup_dir.c_str());
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setBackupEnv
* Signature: (JJ)V
*/
void Java_org_rocksdb_BackupableDBOptions_setBackupEnv(
JNIEnv* env, jobject jopt, jlong jhandle, jlong jrocks_env_handle) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jrocks_env_handle);
bopt->backup_env = rocks_env;
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setShareTableFiles
@ -70,6 +82,19 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles(
return bopt->share_table_files;
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setInfoLog
* Signature: (JJ)V
*/
void Java_org_rocksdb_BackupableDBOptions_setInfoLog(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jlogger_handle) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* sptr_logger =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback> *>(jhandle);
bopt->info_log = sptr_logger->get();
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setSync
@ -158,6 +183,19 @@ jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit(
return bopt->backup_rate_limit;
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setBackupRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimiter(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* sptr_rate_limiter =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jrate_limiter_handle);
bopt->backup_rate_limiter = *sptr_rate_limiter;
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setRestoreRateLimit
@ -180,6 +218,19 @@ jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit(
return bopt->restore_rate_limit;
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setRestoreRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimiter(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
auto* sptr_rate_limiter =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(jrate_limiter_handle);
bopt->restore_rate_limiter = *sptr_rate_limiter;
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setShareFilesWithChecksum
@ -202,6 +253,53 @@ jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum(
return bopt->share_files_with_checksum;
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setMaxBackgroundOperations
* Signature: (JI)V
*/
void Java_org_rocksdb_BackupableDBOptions_setMaxBackgroundOperations(
JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_operations) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->max_background_operations =
static_cast<int>(max_background_operations);
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: maxBackgroundOperations
* Signature: (J)I
*/
jint Java_org_rocksdb_BackupableDBOptions_maxBackgroundOperations(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return static_cast<jint>(bopt->max_background_operations);
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: setCallbackTriggerIntervalSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_BackupableDBOptions_setCallbackTriggerIntervalSize(
JNIEnv* env, jobject jobj, jlong jhandle,
jlong jcallback_trigger_interval_size) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
bopt->callback_trigger_interval_size =
static_cast<uint64_t>(jcallback_trigger_interval_size);
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: callbackTriggerIntervalSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_BackupableDBOptions_callbackTriggerIntervalSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* bopt = reinterpret_cast<rocksdb::BackupableDBOptions*>(jhandle);
return static_cast<jlong>(bopt->callback_trigger_interval_size);
}
/*
* Class: org_rocksdb_BackupableDBOptions
* Method: disposeInternal

View File

@ -0,0 +1,40 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::ClockCache.
#include <jni.h>
#include "include/org_rocksdb_ClockCache.h"
#include "util/clock_cache.h"
/*
* Class: org_rocksdb_ClockCache
* Method: newClockCache
* Signature: (JIZ)J
*/
jlong Java_org_rocksdb_ClockCache_newClockCache(
JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits,
jboolean jstrict_capacity_limit) {
auto* sptr_clock_cache =
new std::shared_ptr<rocksdb::Cache>(rocksdb::NewClockCache(
static_cast<size_t>(jcapacity),
static_cast<int>(jnum_shard_bits),
static_cast<bool>(jstrict_capacity_limit)));
return reinterpret_cast<jlong>(sptr_clock_cache);
}
/*
* Class: org_rocksdb_ClockCache
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_ClockCache_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* sptr_clock_cache =
reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jhandle);
delete sptr_clock_cache; // delete std::shared_ptr
}

View File

@ -0,0 +1,55 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::CompactionOptionsFIFO.
#include <jni.h>
#include "include/org_rocksdb_CompactionOptionsFIFO.h"
#include "rocksdb/advanced_options.h"
/*
* Class: org_rocksdb_CompactionOptionsFIFO
* Method: newCompactionOptionsFIFO
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO(
JNIEnv* env, jclass jcls) {
const auto* opt = new rocksdb::CompactionOptionsFIFO();
return reinterpret_cast<jlong>(opt);
}
/*
* Class: org_rocksdb_CompactionOptionsFIFO
* Method: setMaxTableFilesSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
opt->max_table_files_size = static_cast<uint64_t>(jmax_table_files_size);
}
/*
* Class: org_rocksdb_CompactionOptionsFIFO
* Method: maxTableFilesSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
return static_cast<jlong>(opt->max_table_files_size);
}
/*
* Class: org_rocksdb_CompactionOptionsFIFO
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
}

View File

@ -0,0 +1,194 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::CompactionOptionsUniversal.
#include <jni.h>
#include "include/org_rocksdb_CompactionOptionsUniversal.h"
#include "rocksdb/advanced_options.h"
#include "rocksjni/portal.h"
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: newCompactionOptionsUniversal
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal(
JNIEnv* env, jclass jcls) {
const auto* opt = new rocksdb::CompactionOptionsUniversal();
return reinterpret_cast<jlong>(opt);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: setSizeRatio
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio(
JNIEnv* env, jobject jobj, jlong jhandle, jint jsize_ratio) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->size_ratio = static_cast<unsigned int>(jsize_ratio);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: sizeRatio
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->size_ratio);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: setMinMergeWidth
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth(
JNIEnv* env, jobject jobj, jlong jhandle, jint jmin_merge_width) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->min_merge_width = static_cast<unsigned int>(jmin_merge_width);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: minMergeWidth
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->min_merge_width);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: setMaxMergeWidth
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth(
JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_merge_width) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->max_merge_width = static_cast<unsigned int>(jmax_merge_width);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: maxMergeWidth
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->max_merge_width);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: setMaxSizeAmplificationPercent
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jmax_size_amplification_percent) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->max_size_amplification_percent =
static_cast<unsigned int>(jmax_size_amplification_percent);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: maxSizeAmplificationPercent
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->max_size_amplification_percent);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: setCompressionSizePercent
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent(
JNIEnv* env, jobject jobj, jlong jhandle, jint jcompression_size_percent) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->compression_size_percent =
static_cast<unsigned int>(jcompression_size_percent);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: compressionSizePercent
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->compression_size_percent);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: setStopStyle
* Signature: (JB)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle(
JNIEnv* env, jobject jobj, jlong jhandle, jbyte jstop_style_value) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->stop_style =
rocksdb::CompactionStopStyleJni::toCppCompactionStopStyle(
jstop_style_value);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: stopStyle
* Signature: (J)B
*/
jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return rocksdb::CompactionStopStyleJni::toJavaCompactionStopStyle(
opt->stop_style);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: setAllowTrivialMove
* Signature: (JZ)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jallow_trivial_move) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->allow_trivial_move = static_cast<bool>(jallow_trivial_move);
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: allowTrivialMove
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return opt->allow_trivial_move;
}
/*
* Class: org_rocksdb_CompactionOptionsUniversal
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
}

View File

@ -0,0 +1,121 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::CompressionOptions.
#include <jni.h>
#include "include/org_rocksdb_CompressionOptions.h"
#include "rocksdb/advanced_options.h"
/*
* Class: org_rocksdb_CompressionOptions
* Method: newCompressionOptions
* Signature: ()J
*/
jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions(
JNIEnv* env, jclass jcls) {
const auto* opt = new rocksdb::CompressionOptions();
return reinterpret_cast<jlong>(opt);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: setWindowBits
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setWindowBits(
JNIEnv* env, jobject jobj, jlong jhandle, jint jwindow_bits) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->window_bits = static_cast<int>(jwindow_bits);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: windowBits
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_windowBits(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->window_bits);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: setLevel
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setLevel(
JNIEnv* env, jobject jobj, jlong jhandle, jint jlevel) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->level = static_cast<int>(jlevel);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: level
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_level(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->level);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: setStrategy
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setStrategy(
JNIEnv* env, jobject jobj, jlong jhandle, jint jstrategy) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->strategy = static_cast<int>(jstrategy);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: strategy
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_strategy(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->strategy);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: setMaxDictBytes
* Signature: (JI)V
*/
void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(
JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_dict_bytes) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->max_dict_bytes = static_cast<int>(jmax_dict_bytes);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: maxDictBytes
* Signature: (J)I
*/
jint Java_org_rocksdb_CompressionOptions_maxDictBytes(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->max_dict_bytes);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_CompressionOptions_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
}

View File

@ -0,0 +1,41 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ for
// rocksdb::LRUCache.
#include <jni.h>
#include "include/org_rocksdb_LRUCache.h"
#include "util/lru_cache.h"
/*
* Class: org_rocksdb_LRUCache
* Method: newLRUCache
* Signature: (JIZD)J
*/
jlong Java_org_rocksdb_LRUCache_newLRUCache(
JNIEnv* env, jclass jcls, jlong jcapacity, jint jnum_shard_bits,
jboolean jstrict_capacity_limit, jdouble jhigh_pri_pool_ratio) {
auto* sptr_lru_cache =
new std::shared_ptr<rocksdb::Cache>(rocksdb::NewLRUCache(
static_cast<size_t>(jcapacity),
static_cast<int>(jnum_shard_bits),
static_cast<bool>(jstrict_capacity_limit),
static_cast<double>(jhigh_pri_pool_ratio)));
return reinterpret_cast<jlong>(sptr_lru_cache);
}
/*
* Class: org_rocksdb_LRUCache
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_LRUCache_disposeInternal(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto* sptr_lru_cache =
reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jhandle);
delete sptr_lru_cache; // delete std::shared_ptr
}

File diff suppressed because it is too large Load Diff

View File

@ -2069,6 +2069,220 @@ class BatchResultJni : public JavaClass {
}
};
// The portal class for org.rocksdb.CompactionStopStyle
class CompactionStopStyleJni {
public:
// Returns the equivalent org.rocksdb.CompactionStopStyle for the provided
// C++ rocksdb::CompactionStopStyle enum
static jbyte toJavaCompactionStopStyle(
const rocksdb::CompactionStopStyle& compaction_stop_style) {
switch(compaction_stop_style) {
case rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize:
return 0x0;
case rocksdb::CompactionStopStyle::kCompactionStopStyleTotalSize:
return 0x1;
default:
return 0x7F; // undefined
}
}
// Returns the equivalent C++ rocksdb::CompactionStopStyle enum for the
// provided Java org.rocksdb.CompactionStopStyle
static rocksdb::CompactionStopStyle toCppCompactionStopStyle(
jbyte jcompaction_stop_style) {
switch(jcompaction_stop_style) {
case 0x0:
return rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize;
case 0x1:
return rocksdb::CompactionStopStyle::kCompactionStopStyleTotalSize;
default:
// undefined/default
return rocksdb::CompactionStopStyle::kCompactionStopStyleSimilarSize;
}
}
};
// The portal class for org.rocksdb.CompressionType
class CompressionTypeJni {
public:
// Returns the equivalent org.rocksdb.CompressionType for the provided
// C++ rocksdb::CompressionType enum
static jbyte toJavaCompressionType(
const rocksdb::CompressionType& compression_type) {
switch(compression_type) {
case rocksdb::CompressionType::kNoCompression:
return 0x0;
case rocksdb::CompressionType::kSnappyCompression:
return 0x1;
case rocksdb::CompressionType::kZlibCompression:
return 0x2;
case rocksdb::CompressionType::kBZip2Compression:
return 0x3;
case rocksdb::CompressionType::kLZ4Compression:
return 0x4;
case rocksdb::CompressionType::kLZ4HCCompression:
return 0x5;
case rocksdb::CompressionType::kXpressCompression:
return 0x6;
case rocksdb::CompressionType::kZSTD:
return 0x7;
case rocksdb::CompressionType::kDisableCompressionOption:
default:
return 0x7F;
}
}
// Returns the equivalent C++ rocksdb::CompressionType enum for the
// provided Java org.rocksdb.CompressionType
static rocksdb::CompressionType toCppCompressionType(
jbyte jcompression_type) {
switch(jcompression_type) {
case 0x0:
return rocksdb::CompressionType::kNoCompression;
case 0x1:
return rocksdb::CompressionType::kSnappyCompression;
case 0x2:
return rocksdb::CompressionType::kZlibCompression;
case 0x3:
return rocksdb::CompressionType::kBZip2Compression;
case 0x4:
return rocksdb::CompressionType::kLZ4Compression;
case 0x5:
return rocksdb::CompressionType::kLZ4HCCompression;
case 0x6:
return rocksdb::CompressionType::kXpressCompression;
case 0x7:
return rocksdb::CompressionType::kZSTD;
case 0x7F:
default:
return rocksdb::CompressionType::kDisableCompressionOption;
}
}
};
// The portal class for org.rocksdb.CompactionPriority
class CompactionPriorityJni {
public:
// Returns the equivalent org.rocksdb.CompactionPriority for the provided
// C++ rocksdb::CompactionPri enum
static jbyte toJavaCompactionPriority(
const rocksdb::CompactionPri& compaction_priority) {
switch(compaction_priority) {
case rocksdb::CompactionPri::kByCompensatedSize:
return 0x0;
case rocksdb::CompactionPri::kOldestLargestSeqFirst:
return 0x1;
case rocksdb::CompactionPri::kOldestSmallestSeqFirst:
return 0x2;
case rocksdb::CompactionPri::kMinOverlappingRatio:
return 0x3;
default:
return 0x0; // undefined
}
}
// Returns the equivalent C++ rocksdb::CompactionPri enum for the
// provided Java org.rocksdb.CompactionPriority
static rocksdb::CompactionPri toCppCompactionPriority(
jbyte jcompaction_priority) {
switch(jcompaction_priority) {
case 0x0:
return rocksdb::CompactionPri::kByCompensatedSize;
case 0x1:
return rocksdb::CompactionPri::kOldestLargestSeqFirst;
case 0x2:
return rocksdb::CompactionPri::kOldestSmallestSeqFirst;
case 0x3:
return rocksdb::CompactionPri::kMinOverlappingRatio;
default:
// undefined/default
return rocksdb::CompactionPri::kByCompensatedSize;
}
}
};
// The portal class for org.rocksdb.AccessHint
class AccessHintJni {
public:
// Returns the equivalent org.rocksdb.AccessHint for the provided
// C++ rocksdb::DBOptions::AccessHint enum
static jbyte toJavaAccessHint(
const rocksdb::DBOptions::AccessHint& access_hint) {
switch(access_hint) {
case rocksdb::DBOptions::AccessHint::NONE:
return 0x0;
case rocksdb::DBOptions::AccessHint::NORMAL:
return 0x1;
case rocksdb::DBOptions::AccessHint::SEQUENTIAL:
return 0x2;
case rocksdb::DBOptions::AccessHint::WILLNEED:
return 0x3;
default:
// undefined/default
return 0x1;
}
}
// Returns the equivalent C++ rocksdb::DBOptions::AccessHint enum for the
// provided Java org.rocksdb.AccessHint
static rocksdb::DBOptions::AccessHint toCppAccessHint(jbyte jaccess_hint) {
switch(jaccess_hint) {
case 0x0:
return rocksdb::DBOptions::AccessHint::NONE;
case 0x1:
return rocksdb::DBOptions::AccessHint::NORMAL;
case 0x2:
return rocksdb::DBOptions::AccessHint::SEQUENTIAL;
case 0x3:
return rocksdb::DBOptions::AccessHint::WILLNEED;
default:
// undefined/default
return rocksdb::DBOptions::AccessHint::NORMAL;
}
}
};
// The portal class for org.rocksdb.WALRecoveryMode
class WALRecoveryModeJni {
public:
// Returns the equivalent org.rocksdb.WALRecoveryMode for the provided
// C++ rocksdb::WALRecoveryMode enum
static jbyte toJavaWALRecoveryMode(
const rocksdb::WALRecoveryMode& wal_recovery_mode) {
switch(wal_recovery_mode) {
case rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords:
return 0x0;
case rocksdb::WALRecoveryMode::kAbsoluteConsistency:
return 0x1;
case rocksdb::WALRecoveryMode::kPointInTimeRecovery:
return 0x2;
case rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords:
return 0x3;
default:
// undefined/default
return 0x2;
}
}
// Returns the equivalent C++ rocksdb::WALRecoveryMode enum for the
// provided Java org.rocksdb.WALRecoveryMode
static rocksdb::WALRecoveryMode toCppWALRecoveryMode(jbyte jwal_recovery_mode) {
switch(jwal_recovery_mode) {
case 0x0:
return rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords;
case 0x1:
return rocksdb::WALRecoveryMode::kAbsoluteConsistency;
case 0x2:
return rocksdb::WALRecoveryMode::kPointInTimeRecovery;
case 0x3:
return rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords;
default:
// undefined/default
return rocksdb::WALRecoveryMode::kPointInTimeRecovery;
}
}
};
// various utility functions for working with RocksDB and JNI
class JniUtil {
public:

View File

@ -0,0 +1,53 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* File access pattern once a compaction has started
*/
public enum AccessHint {
NONE((byte)0x0),
NORMAL((byte)0x1),
SEQUENTIAL((byte)0x2),
WILLNEED((byte)0x3);
private final byte value;
AccessHint(final byte value) {
this.value = value;
}
/**
* <p>Returns the byte value of the enumerations value.</p>
*
* @return byte representation
*/
public byte getValue() {
return value;
}
/**
* <p>Get the AccessHint enumeration value by
* passing the byte identifier to this method.</p>
*
* @param byteIdentifier of AccessHint.
*
* @return AccessHint instance.
*
* @throws IllegalArgumentException if the access hint for the byteIdentifier
* cannot be found
*/
public static AccessHint getAccessHint(final byte byteIdentifier) {
for (final AccessHint accessHint : AccessHint.values()) {
if (accessHint.getValue() == byteIdentifier) {
return accessHint;
}
}
throw new IllegalArgumentException(
"Illegal value provided for AccessHint.");
}
}

View File

@ -0,0 +1,465 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.util.List;
/**
* Advanced Column Family Options which are not
* mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}
*
* Taken from include/rocksdb/advanced_options.h
*/
public interface AdvancedColumnFamilyOptionsInterface
<T extends AdvancedColumnFamilyOptionsInterface> {
/**
* The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then
* all write buffers are flushed to L0 as individual files and this increases
* read amplification because a get request has to check in all of these
* files. Also, an in-memory merge may result in writing lesser
* data to storage if there are duplicate records in each of these
* individual write buffers. Default: 1
*
* @param minWriteBufferNumberToMerge the minimum number of write buffers
* that will be merged together.
* @return the reference to the current options.
*/
T setMinWriteBufferNumberToMerge(
int minWriteBufferNumberToMerge);
/**
* The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then
* all write buffers are flushed to L0 as individual files and this increases
* read amplification because a get request has to check in all of these
* files. Also, an in-memory merge may result in writing lesser
* data to storage if there are duplicate records in each of these
* individual write buffers. Default: 1
*
* @return the minimum number of write buffers that will be merged together.
*/
int minWriteBufferNumberToMerge();
/**
* The total maximum number of write buffers to maintain in memory including
* copies of buffers that have already been flushed. Unlike
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()},
* this parameter does not affect flushing.
* This controls the minimum amount of write history that will be available
* in memory for conflict checking when Transactions are used.
*
* When using an OptimisticTransactionDB:
* If this value is too low, some transactions may fail at commit time due
* to not being able to determine whether there were any write conflicts.
*
* When using a TransactionDB:
* If Transaction::SetSnapshot is used, TransactionDB will read either
* in-memory write buffers or SST files to do write-conflict checking.
* Increasing this value can reduce the number of reads to SST files
* done for conflict detection.
*
* Setting this value to 0 will cause write buffers to be freed immediately
* after they are flushed.
* If this value is set to -1,
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
* will be used.
*
* Default:
* If using a TransactionDB/OptimisticTransactionDB, the default value will
* be set to the value of
* {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}
* if it is not explicitly set by the user. Otherwise, the default is 0.
*
* @param maxWriteBufferNumberToMaintain The maximum number of write
* buffers to maintain
*
* @return the reference to the current options.
*/
T setMaxWriteBufferNumberToMaintain(
int maxWriteBufferNumberToMaintain);
/**
* The total maximum number of write buffers to maintain in memory including
* copies of buffers that have already been flushed.
*
* @return maxWriteBufferNumberToMaintain The maximum number of write buffers
* to maintain
*/
int maxWriteBufferNumberToMaintain();
/**
* Allows thread-safe inplace updates.
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) &le; sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
*
* @param inplaceUpdateSupport true if thread-safe inplace updates
* are allowed.
* @return the reference to the current options.
*/
T setInplaceUpdateSupport(
boolean inplaceUpdateSupport);
/**
* Allows thread-safe inplace updates.
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) &le; sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
*
* @return true if thread-safe inplace updates are allowed.
*/
boolean inplaceUpdateSupport();
/**
* Control locality of bloom filter probes to improve cache miss rate.
* This option only applies to memtable prefix bloom and plaintable
* prefix bloom. It essentially limits the max number of cache lines each
* bloom filter check can touch.
* This optimization is turned off when set to 0. The number should never
* be greater than number of probes. This option can boost performance
* for in-memory workload but should use with care since it can cause
* higher false positive rate.
* Default: 0
*
* @param bloomLocality the level of locality of bloom-filter probes.
* @return the reference to the current options.
*/
T setBloomLocality(int bloomLocality);
/**
* Control locality of bloom filter probes to improve cache miss rate.
* This option only applies to memtable prefix bloom and plaintable
* prefix bloom. It essentially limits the max number of cache lines each
* bloom filter check can touch.
* This optimization is turned off when set to 0. The number should never
* be greater than number of probes. This option can boost performance
* for in-memory workload but should use with care since it can cause
* higher false positive rate.
* Default: 0
*
* @return the level of locality of bloom-filter probes.
* @see #setBloomLocality(int)
*/
int bloomLocality();
/**
* <p>Different levels can have different compression
* policies. There are cases where most lower levels
* would like to use quick compression algorithms while
* the higher levels (which have more data) use
* compression algorithms that have better compression
* but could be slower. This array, if non-empty, should
* have an entry for each level of the database;
* these override the value specified in the previous
* field 'compression'.</p>
*
* <strong>NOTICE</strong>
* <p>If {@code level_compaction_dynamic_level_bytes=true},
* {@code compression_per_level[0]} still determines {@code L0},
* but other elements of the array are based on base level
* (the level {@code L0} files are merged to), and may not
* match the level users see from info log for metadata.
* </p>
* <p>If {@code L0} files are merged to {@code level - n},
* then, for {@code i&gt;0}, {@code compression_per_level[i]}
* determines compaction type for level {@code n+i-1}.</p>
*
* <strong>Example</strong>
* <p>For example, if we have 5 levels, and we determine to
* merge {@code L0} data to {@code L4} (which means {@code L1..L3}
* will be empty), then the new files go to {@code L4} uses
* compression type {@code compression_per_level[1]}.</p>
*
* <p>If now {@code L0} is merged to {@code L2}. Data goes to
* {@code L2} will be compressed according to
* {@code compression_per_level[1]}, {@code L3} using
* {@code compression_per_level[2]}and {@code L4} using
* {@code compression_per_level[3]}. Compaction for each
* level can change when data grows.</p>
*
* <p><strong>Default:</strong> empty</p>
*
* @param compressionLevels list of
* {@link org.rocksdb.CompressionType} instances.
*
* @return the reference to the current options.
*/
T setCompressionPerLevel(
List<CompressionType> compressionLevels);
/**
* <p>Return the currently set {@link org.rocksdb.CompressionType}
* per instances.</p>
*
* <p>See: {@link #setCompressionPerLevel(java.util.List)}</p>
*
* @return list of {@link org.rocksdb.CompressionType}
* instances.
*/
List<CompressionType> compressionPerLevel();
/**
* Set the number of levels for this database
* If level-styled compaction is used, then this number determines
* the total number of levels.
*
* @param numLevels the number of levels.
* @return the reference to the current options.
*/
T setNumLevels(int numLevels);
/**
* If level-styled compaction is used, then this number determines
* the total number of levels.
*
* @return the number of levels.
*/
int numLevels();
/**
* <p>If {@code true}, RocksDB will pick target size of each level
* dynamically. We will pick a base level b &gt;= 1. L0 will be
* directly merged into level b, instead of always into level 1.
* Level 1 to b-1 need to be empty. We try to pick b and its target
* size so that</p>
*
* <ol>
* <li>target size is in the range of
* (max_bytes_for_level_base / max_bytes_for_level_multiplier,
* max_bytes_for_level_base]</li>
* <li>target size of the last level (level num_levels-1) equals to extra size
* of the level.</li>
* </ol>
*
* <p>At the same time max_bytes_for_level_multiplier and
* max_bytes_for_level_multiplier_additional are still satisfied.</p>
*
* <p>With this option on, from an empty DB, we make last level the base
* level, which means merging L0 data into the last level, until it exceeds
* max_bytes_for_level_base. And then we make the second last level to be
* base level, to start to merge L0 data to second last level, with its
* target size to be {@code 1/max_bytes_for_level_multiplier} of the last
* levels extra size. After the data accumulates more so that we need to
* move the base level to the third last one, and so on.</p>
*
* <h2>Example</h2>
* <p>For example, assume {@code max_bytes_for_level_multiplier=10},
* {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.</p>
*
* <p>Target sizes of level 1 to 5 starts with:</p>
* {@code [- - - - 10MB]}
* <p>with base level is level. Target sizes of level 1 to 4 are not applicable
* because they will not be used.
* Until the size of Level 5 grows to more than 10MB, say 11MB, we make
* base target to level 4 and now the targets looks like:</p>
* {@code [- - - 1.1MB 11MB]}
* <p>While data are accumulated, size targets are tuned based on actual data
* of level 5. When level 5 has 50MB of data, the target is like:</p>
* {@code [- - - 5MB 50MB]}
* <p>Until level 5's actual size is more than 100MB, say 101MB. Now if we
* keep level 4 to be the base level, its target size needs to be 10.1MB,
* which doesn't satisfy the target size range. So now we make level 3
* the target size and the target sizes of the levels look like:</p>
* {@code [- - 1.01MB 10.1MB 101MB]}
* <p>In the same way, while level 5 further grows, all levels' targets grow,
* like</p>
* {@code [- - 5MB 50MB 500MB]}
* <p>Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the
* base level and make levels' target sizes like this:</p>
* {@code [- 1.001MB 10.01MB 100.1MB 1001MB]}
* <p>and go on...</p>
*
* <p>By doing it, we give {@code max_bytes_for_level_multiplier} a priority
* against {@code max_bytes_for_level_base}, for a more predictable LSM tree
* shape. It is useful to limit worse case space amplification.</p>
*
* <p>{@code max_bytes_for_level_multiplier_additional} is ignored with
* this flag on.</p>
*
* <p>Turning this feature on or off for an existing DB can cause unexpected
* LSM tree structure so it's not recommended.</p>
*
* <p><strong>Caution</strong>: this option is experimental</p>
*
* <p>Default: false</p>
*
* @param enableLevelCompactionDynamicLevelBytes boolean value indicating
* if {@code LevelCompactionDynamicLevelBytes} shall be enabled.
* @return the reference to the current options.
*/
@Experimental("Turning this feature on or off for an existing DB can cause" +
"unexpected LSM tree structure so it's not recommended")
T setLevelCompactionDynamicLevelBytes(
boolean enableLevelCompactionDynamicLevelBytes);
/**
* <p>Return if {@code LevelCompactionDynamicLevelBytes} is enabled.
* </p>
*
* <p>For further information see
* {@link #setLevelCompactionDynamicLevelBytes(boolean)}</p>
*
* @return boolean value indicating if
* {@code levelCompactionDynamicLevelBytes} is enabled.
*/
@Experimental("Caution: this option is experimental")
boolean levelCompactionDynamicLevelBytes();
/**
* Maximum size of each compaction (not guarantee)
*
* @param maxCompactionBytes the compaction size limit
* @return the reference to the current options.
*/
T setMaxCompactionBytes(
long maxCompactionBytes);
/**
* Control maximum size of each compaction (not guaranteed)
*
* @return compaction size threshold
*/
long maxCompactionBytes();
/**
* Set compaction style for DB.
*
* Default: LEVEL.
*
* @param compactionStyle Compaction style.
* @return the reference to the current options.
*/
ColumnFamilyOptionsInterface setCompactionStyle(
CompactionStyle compactionStyle);
/**
* Compaction style for DB.
*
* @return Compaction style.
*/
CompactionStyle compactionStyle();
/**
* If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL},
* for each level, which files are prioritized to be picked to compact.
*
* Default: {@link CompactionPriority#ByCompensatedSize}
*
* @param compactionPriority The compaction priority
*
* @return the reference to the current options.
*/
T setCompactionPriority(
CompactionPriority compactionPriority);
/**
* Get the Compaction priority if level compaction
* is used for all levels
*
* @return The compaction priority
*/
CompactionPriority compactionPriority();
/**
* Set the options needed to support Universal Style compactions
*
* @param compactionOptionsUniversal The Universal Style compaction options
*
* @return the reference to the current options.
*/
T setCompactionOptionsUniversal(
CompactionOptionsUniversal compactionOptionsUniversal);
/**
* The options needed to support Universal Style compactions
*
* @return The Universal Style compaction options
*/
CompactionOptionsUniversal compactionOptionsUniversal();
/**
* The options for FIFO compaction style
*
* @param compactionOptionsFIFO The FIFO compaction options
*
* @return the reference to the current options.
*/
T setCompactionOptionsFIFO(
CompactionOptionsFIFO compactionOptionsFIFO);
/**
* The options for FIFO compaction style
*
* @return The FIFO compaction options
*/
CompactionOptionsFIFO compactionOptionsFIFO();
/**
* <p>This flag specifies that the implementation should optimize the filters
* mainly for cases where keys are found rather than also optimize for keys
* missed. This would be used in cases where the application knows that
* there are very few misses or the performance in the case of misses is not
* important.</p>
*
* <p>For now, this flag allows us to not store filters for the last level i.e
* the largest level which contains data of the LSM store. For keys which
* are hits, the filters in this level are not useful because we will search
* for the data anyway.</p>
*
* <p><strong>NOTE</strong>: the filters in other levels are still useful
* even for key hit because they tell us whether to look in that level or go
* to the higher level.</p>
*
* <p>Default: false<p>
*
* @param optimizeFiltersForHits boolean value indicating if this flag is set.
* @return the reference to the current options.
*/
T setOptimizeFiltersForHits(
boolean optimizeFiltersForHits);
/**
* <p>Returns the current state of the {@code optimize_filters_for_hits}
* setting.</p>
*
* @return boolean value indicating if the flag
* {@code optimize_filters_for_hits} was set.
*/
boolean optimizeFiltersForHits();
/**
* In debug mode, RocksDB run consistency checks on the LSM everytime the LSM
* change (Flush, Compaction, AddFile). These checks are disabled in release
* mode, use this option to enable them in release mode as well.
*
* Default: false
*
* @param forceConsistencyChecks true to force consistency checks
*
* @return the reference to the current options.
*/
T setForceConsistencyChecks(
boolean forceConsistencyChecks);
/**
* In debug mode, RocksDB run consistency checks on the LSM everytime the LSM
* change (Flush, Compaction, AddFile). These checks are disabled in release
* mode.
*
* @return true if consistency checks are enforced
*/
boolean forceConsistencyChecks();
}

View File

@ -0,0 +1,437 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Advanced Column Family Options which are mutable
*
* Taken from include/rocksdb/advanced_options.h
* and MutableCFOptions in util/cf_options.h
*/
public interface AdvancedMutableColumnFamilyOptionsInterface
<T extends AdvancedMutableColumnFamilyOptionsInterface> {
/**
* The maximum number of write buffers that are built up in memory.
* The default is 2, so that when 1 write buffer is being flushed to
* storage, new writes can continue to the other write buffer.
* Default: 2
*
* @param maxWriteBufferNumber maximum number of write buffers.
* @return the instance of the current options.
*/
T setMaxWriteBufferNumber(
int maxWriteBufferNumber);
/**
* Returns maximum number of write buffers.
*
* @return maximum number of write buffers.
* @see #setMaxWriteBufferNumber(int)
*/
int maxWriteBufferNumber();
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @param inplaceUpdateNumLocks the number of locks used for
* inplace updates.
* @return the reference to the current options.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
T setInplaceUpdateNumLocks(
long inplaceUpdateNumLocks);
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @return the number of locks used for inplace update.
*/
long inplaceUpdateNumLocks();
/**
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25.
*
* Default: 0 (disable)
*
* @param memtablePrefixBloomSizeRatio The ratio
* @return the reference to the current options.
*/
T setMemtablePrefixBloomSizeRatio(
double memtablePrefixBloomSizeRatio);
/**
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25.
*
* Default: 0 (disable)
*
* @return the ratio
*/
double memtablePrefixBloomSizeRatio();
/**
* Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
* from huge page TLB but from malloc.
* Need to reserve huge pages for it to be allocated. For example:
* sysctl -w vm.nr_hugepages=20
* See linux doc Documentation/vm/hugetlbpage.txt
*
* @param memtableHugePageSize The page size of the huge
* page tlb
* @return the reference to the current options.
*/
T setMemtableHugePageSize(
long memtableHugePageSize);
/**
* Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
* from huge page TLB but from malloc.
* Need to reserve huge pages for it to be allocated. For example:
* sysctl -w vm.nr_hugepages=20
* See linux doc Documentation/vm/hugetlbpage.txt
*
* @return The page size of the huge page tlb
*/
long memtableHugePageSize();
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @param arenaBlockSize the size of an arena block
* @return the reference to the current options.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
T setArenaBlockSize(long arenaBlockSize);
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @return the size of an arena block
*/
long arenaBlockSize();
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value &lt; 0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @param level0SlowdownWritesTrigger The soft limit on the number of
* level-0 files
* @return the reference to the current options.
*/
T setLevel0SlowdownWritesTrigger(
int level0SlowdownWritesTrigger);
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value &lt; 0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @return The soft limit on the number of
* level-0 files
*/
int level0SlowdownWritesTrigger();
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @param level0StopWritesTrigger The maximum number of level-0 files
* @return the reference to the current options.
*/
T setLevel0StopWritesTrigger(
int level0StopWritesTrigger);
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @return The maximum number of level-0 files
*/
int level0StopWritesTrigger();
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @param targetFileSizeBase the target size of a level-0 file.
* @return the reference to the current options.
*
* @see #setTargetFileSizeMultiplier(int)
*/
T setTargetFileSizeBase(
long targetFileSizeBase);
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @return the target size of a level-0 file.
*
* @see #targetFileSizeMultiplier()
*/
long targetFileSizeBase();
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-L file and level-(L+1) file.
* By default target_file_size_multiplier is 1, meaning
* files in different levels have the same target.
*
* @param multiplier the size ratio between a level-(L+1) file
* and level-L file.
* @return the reference to the current options.
*/
T setTargetFileSizeMultiplier(
int multiplier);
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-(L+1) file and level-L file.
* By default targetFileSizeMultiplier is 1, meaning
* files in different levels have the same target.
*
* @return the size ratio between a level-(L+1) file and level-L file.
*/
int targetFileSizeMultiplier();
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L.
* @return the reference to the current options.
*
* See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)}
*/
T setMaxBytesForLevelMultiplier(double multiplier);
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L.
*
* See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()}
*/
double maxBytesForLevelMultiplier();
/**
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
*
* Default: 1
*
* @param maxBytesForLevelMultiplierAdditional The max-size multipliers
* for each level
* @return the reference to the current options.
*/
T setMaxBytesForLevelMultiplierAdditional(
int[] maxBytesForLevelMultiplierAdditional);
/**
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
*
* Default: 1
*
* @return The max-size multipliers for each level
*/
int[] maxBytesForLevelMultiplierAdditional();
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
*
* Default: 64GB
*
* @param softPendingCompactionBytesLimit The soft limit to impose on
* compaction
* @return the reference to the current options.
*/
T setSoftPendingCompactionBytesLimit(
long softPendingCompactionBytesLimit);
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
*
* Default: 64GB
*
* @return The soft limit to impose on compaction
*/
long softPendingCompactionBytesLimit();
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
*
* Default: 256GB
*
* @param hardPendingCompactionBytesLimit The hard limit to impose on
* compaction
* @return the reference to the current options.
*/
T setHardPendingCompactionBytesLimit(
long hardPendingCompactionBytesLimit);
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
*
* Default: 256GB
*
* @return The hard limit to impose on compaction
*/
long hardPendingCompactionBytesLimit();
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @param maxSequentialSkipInIterations the number of keys could
* be skipped in a iteration.
* @return the reference to the current options.
*/
T setMaxSequentialSkipInIterations(
long maxSequentialSkipInIterations);
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @return the number of keys could be skipped in a iteration.
*/
long maxSequentialSkipInIterations();
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @param maxSuccessiveMerges the maximum number of successive merges.
* @return the reference to the current options.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
T setMaxSuccessiveMerges(
long maxSuccessiveMerges);
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @return the maximum number of successive merges.
*/
long maxSuccessiveMerges();
/**
* After writing every SST file, reopen it and read all the keys.
*
* Default: false
*
* @param paranoidFileChecks true to enable paranoid file checks
* @return the reference to the current options.
*/
T setParanoidFileChecks(
boolean paranoidFileChecks);
/**
* After writing every SST file, reopen it and read all the keys.
*
* Default: false
*
* @return true if paranoid file checks are enabled
*/
boolean paranoidFileChecks();
/**
* Measure IO stats in compactions and flushes, if true.
*
* Default: false
*
* @param reportBgIoStats true to enable reporting
* @return the reference to the current options.
*/
T setReportBgIoStats(
boolean reportBgIoStats);
/**
* Determine whether IO stats in compactions and flushes are being measured
*
* @return true if reporting is enabled
*/
boolean reportBgIoStats();
}

View File

@ -18,6 +18,11 @@ import java.io.File;
*/
public class BackupableDBOptions extends RocksObject {
private Env backupEnv = null;
private Logger infoLog = null;
private RateLimiter backupRateLimiter = null;
private RateLimiter restoreRateLimiter = null;
/**
* <p>BackupableDBOptions constructor.</p>
*
@ -49,6 +54,40 @@ public class BackupableDBOptions extends RocksObject {
return backupDir(nativeHandle_);
}
/**
* Backup Env object. It will be used for backup file I/O. If it's
* null, backups will be written out using DBs Env. Otherwise
* backup's I/O will be performed using this object.
*
* If you want to have backups on HDFS, use HDFS Env here!
*
* Default: null
*
* @param env The environment to use
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setBackupEnv(final Env env) {
assert(isOwningHandle());
setBackupEnv(nativeHandle_, env.nativeHandle_);
this.backupEnv = env;
return this;
}
/**
* Backup Env object. It will be used for backup file I/O. If it's
* null, backups will be written out using DBs Env. Otherwise
* backup's I/O will be performed using this object.
*
* If you want to have backups on HDFS, use HDFS Env here!
*
* Default: null
*
* @return The environment in use
*/
public Env backupEnv() {
return this.backupEnv;
}
/**
* <p>Share table files between backups.</p>
*
@ -79,6 +118,30 @@ public class BackupableDBOptions extends RocksObject {
return shareTableFiles(nativeHandle_);
}
/**
* Set the logger to use for Backup info and error messages
*
* @param logger The logger to use for the backup
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setInfoLog(final Logger logger) {
assert(isOwningHandle());
setInfoLog(nativeHandle_, logger.nativeHandle_);
this.infoLog = logger;
return this;
}
/**
* Set the logger to use for Backup info and error messages
*
* Default: null
*
* @return The logger in use for the backup
*/
public Logger infoLog() {
return this.infoLog;
}
/**
* <p>Set synchronous backups.</p>
*
@ -189,6 +252,35 @@ public class BackupableDBOptions extends RocksObject {
return backupRateLimit(nativeHandle_);
}
/**
* Backup rate limiter. Used to control transfer speed for backup. If this is
* not null, {@link #backupRateLimit()} is ignored.
*
* Default: null
*
* @param backupRateLimiter The rate limiter to use for the backup
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setBackupRateLimiter(final RateLimiter backupRateLimiter) {
assert(isOwningHandle());
setBackupRateLimiter(nativeHandle_, backupRateLimiter.nativeHandle_);
this.backupRateLimiter = backupRateLimiter;
return this;
}
/**
* Backup rate limiter. Used to control transfer speed for backup. If this is
* not null, {@link #backupRateLimit()} is ignored.
*
* Default: null
*
* @return The rate limiter in use for the backup
*/
public RateLimiter backupRateLimiter() {
assert(isOwningHandle());
return this.backupRateLimiter;
}
/**
* <p>Set restore rate limit.</p>
*
@ -218,6 +310,35 @@ public class BackupableDBOptions extends RocksObject {
return restoreRateLimit(nativeHandle_);
}
/**
* Restore rate limiter. Used to control transfer speed during restore. If
* this is not null, {@link #restoreRateLimit()} is ignored.
*
* Default: null
*
* @param restoreRateLimiter The rate limiter to use during restore
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setRestoreRateLimiter(final RateLimiter restoreRateLimiter) {
assert(isOwningHandle());
setRestoreRateLimiter(nativeHandle_, restoreRateLimiter.nativeHandle_);
this.restoreRateLimiter = restoreRateLimiter;
return this;
}
/**
* Restore rate limiter. Used to control transfer speed during restore. If
* this is not null, {@link #restoreRateLimit()} is ignored.
*
* Default: null
*
* @return The rate limiter in use during restore
*/
public RateLimiter restoreRateLimiter() {
assert(isOwningHandle());
return this.restoreRateLimiter;
}
/**
* <p>Only used if share_table_files is set to true. If true, will consider
* that backups can come from different databases, hence a sst is not uniquely
@ -252,10 +373,73 @@ public class BackupableDBOptions extends RocksObject {
return shareFilesWithChecksum(nativeHandle_);
}
/**
* Up to this many background threads will copy files for
* {@link BackupableDB#createNewBackup(boolean)} and
* {@link RestoreBackupableDB#restoreDBFromBackup(long, String, String, RestoreOptions)}
*
* Default: 1
*
* @param maxBackgroundOperations The maximum number of background threads
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setMaxBackgroundOperations(
final int maxBackgroundOperations) {
assert(isOwningHandle());
setMaxBackgroundOperations(nativeHandle_, maxBackgroundOperations);
return this;
}
/**
* Up to this many background threads will copy files for
* {@link BackupableDB#createNewBackup(boolean)} and
* {@link RestoreBackupableDB#restoreDBFromBackup(long, String, String, RestoreOptions)}
*
* Default: 1
*
* @return The maximum number of background threads
*/
public int maxBackgroundOperations() {
assert(isOwningHandle());
return maxBackgroundOperations(nativeHandle_);
}
/**
* During backup user can get callback every time next
* {@link #callbackTriggerIntervalSize()} bytes being copied.
*
* Default: 4194304
*
* @param callbackTriggerIntervalSize The interval size for the
* callback trigger
* @return instance of current BackupableDBOptions.
*/
public BackupableDBOptions setCallbackTriggerIntervalSize(
final long callbackTriggerIntervalSize) {
assert(isOwningHandle());
setCallbackTriggerIntervalSize(nativeHandle_, callbackTriggerIntervalSize);
return this;
}
/**
* During backup user can get callback every time next
* {@link #callbackTriggerIntervalSize()} bytes being copied.
*
* Default: 4194304
*
* @return The interval size for the callback trigger
*/
public long callbackTriggerIntervalSize() {
assert(isOwningHandle());
return callbackTriggerIntervalSize(nativeHandle_);
}
private native static long newBackupableDBOptions(final String path);
private native String backupDir(long handle);
private native void setBackupEnv(final long handle, final long envHandle);
private native void setShareTableFiles(long handle, boolean flag);
private native boolean shareTableFiles(long handle);
private native void setInfoLog(final long handle, final long infoLogHandle);
private native void setSync(long handle, boolean flag);
private native boolean sync(long handle);
private native void setDestroyOldData(long handle, boolean flag);
@ -264,9 +448,18 @@ public class BackupableDBOptions extends RocksObject {
private native boolean backupLogFiles(long handle);
private native void setBackupRateLimit(long handle, long rateLimit);
private native long backupRateLimit(long handle);
private native void setBackupRateLimiter(long handle, long rateLimiterHandle);
private native void setRestoreRateLimit(long handle, long rateLimit);
private native long restoreRateLimit(long handle);
private native void setRestoreRateLimiter(final long handle,
final long rateLimiterHandle);
private native void setShareFilesWithChecksum(long handle, boolean flag);
private native boolean shareFilesWithChecksum(long handle);
private native void setMaxBackgroundOperations(final long handle,
final int maxBackgroundOperations);
private native int maxBackgroundOperations(final long handle);
private native void setCallbackTriggerIntervalSize(final long handle,
long callbackTriggerIntervalSize);
private native long callbackTriggerIntervalSize(final long handle);
@Override protected final native void disposeInternal(final long handle);
}

View File

@ -0,0 +1,13 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public abstract class Cache extends RocksObject {
protected Cache(final long nativeHandle) {
super(nativeHandle);
}
}

View File

@ -0,0 +1,59 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Similar to {@link LRUCache}, but based on the CLOCK algorithm with
* better concurrent performance in some cases
*/
public class ClockCache extends Cache {
/**
* Create a new cache with a fixed size capacity.
*
* @param capacity The fixed size capacity of the cache
*/
public ClockCache(final long capacity) {
super(newClockCache(capacity, -1, false));
}
/**
* Create a new cache with a fixed size capacity. The cache is sharded
* to 2^numShardBits shards, by hash of the key. The total capacity
* is divided and evenly assigned to each shard.
* numShardBits = -1 means it is automatically determined: every shard
* will be at least 512KB and number of shard bits will not exceed 6.
*
* @param capacity The fixed size capacity of the cache
* @param numShardBits The cache is sharded to 2^numShardBits shards,
* by hash of the key
*/
public ClockCache(final long capacity, final int numShardBits) {
super(newClockCache(capacity, numShardBits, false));
}
/**
* Create a new cache with a fixed size capacity. The cache is sharded
* to 2^numShardBits shards, by hash of the key. The total capacity
* is divided and evenly assigned to each shard. If strictCapacityLimit
* is set, insert to the cache will fail when cache is full.
* numShardBits = -1 means it is automatically determined: every shard
* will be at least 512KB and number of shard bits will not exceed 6.
*
* @param capacity The fixed size capacity of the cache
* @param numShardBits The cache is sharded to 2^numShardBits shards,
* by hash of the key
* @param strictCapacityLimit insert to the cache will fail when cache is full
*/
public ClockCache(final long capacity, final int numShardBits,
final boolean strictCapacityLimit) {
super(newClockCache(capacity, numShardBits, strictCapacityLimit));
}
private native static long newClockCache(final long capacity,
final int numShardBits, final boolean strictCapacityLimit);
@Override protected final native void disposeInternal(final long handle);
}

View File

@ -17,8 +17,8 @@ import java.util.Properties;
* automatically and native resources will be released as part of the process.
*/
public class ColumnFamilyOptions extends RocksObject
implements ColumnFamilyOptionsInterface,
MutableColumnFamilyOptionsInterface {
implements ColumnFamilyOptionsInterface<ColumnFamilyOptions>,
MutableColumnFamilyOptionsInterface<ColumnFamilyOptions> {
static {
RocksDB.loadLibrary();
}
@ -74,6 +74,12 @@ public class ColumnFamilyOptions extends RocksObject
return columnFamilyOptions;
}
@Override
public ColumnFamilyOptions optimizeForSmallDb() {
optimizeForSmallDb(nativeHandle_);
return this;
}
@Override
public ColumnFamilyOptions optimizeForPointLookup(
final long blockCacheSizeMb) {
@ -217,7 +223,7 @@ public class ColumnFamilyOptions extends RocksObject
@Override
public CompressionType compressionType() {
return CompressionType.values()[compressionType(nativeHandle_)];
return CompressionType.getCompressionType(compressionType(nativeHandle_));
}
@Override
@ -244,6 +250,33 @@ public class ColumnFamilyOptions extends RocksObject
return compressionLevels;
}
@Override
public ColumnFamilyOptions setBottommostCompressionType(
final CompressionType bottommostCompressionType) {
setBottommostCompressionType(nativeHandle_,
bottommostCompressionType.getValue());
return this;
}
@Override
public CompressionType bottommostCompressionType() {
return CompressionType.getCompressionType(
bottommostCompressionType(nativeHandle_));
}
@Override
public ColumnFamilyOptions setCompressionOptions(
final CompressionOptions compressionOptions) {
setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_);
this.compressionOptions_ = compressionOptions;
return this;
}
@Override
public CompressionOptions compressionOptions() {
return this.compressionOptions_;
}
@Override
public ColumnFamilyOptions setNumLevels(final int numLevels) {
setNumLevels(nativeHandle_, numLevels);
@ -291,17 +324,6 @@ public class ColumnFamilyOptions extends RocksObject
return levelZeroStopWritesTrigger(nativeHandle_);
}
@Override
public ColumnFamilyOptions setMaxMemCompactionLevel(
final int maxMemCompactionLevel) {
return this;
}
@Override
public int maxMemCompactionLevel() {
return 0;
}
@Override
public ColumnFamilyOptions setTargetFileSizeBase(
final long targetFileSizeBase) {
@ -373,43 +395,6 @@ public class ColumnFamilyOptions extends RocksObject
return maxCompactionBytes(nativeHandle_);
}
@Override
public ColumnFamilyOptions setSoftRateLimit(
final double softRateLimit) {
setSoftRateLimit(nativeHandle_, softRateLimit);
return this;
}
@Override
public double softRateLimit() {
return softRateLimit(nativeHandle_);
}
@Override
public ColumnFamilyOptions setHardRateLimit(
final double hardRateLimit) {
setHardRateLimit(nativeHandle_, hardRateLimit);
return this;
}
@Override
public double hardRateLimit() {
return hardRateLimit(nativeHandle_);
}
@Override
public ColumnFamilyOptions setRateLimitDelayMaxMilliseconds(
final int rateLimitDelayMaxMilliseconds) {
setRateLimitDelayMaxMilliseconds(
nativeHandle_, rateLimitDelayMaxMilliseconds);
return this;
}
@Override
public int rateLimitDelayMaxMilliseconds() {
return rateLimitDelayMaxMilliseconds(nativeHandle_);
}
@Override
public ColumnFamilyOptions setArenaBlockSize(
final long arenaBlockSize) {
@ -434,19 +419,6 @@ public class ColumnFamilyOptions extends RocksObject
return disableAutoCompactions(nativeHandle_);
}
@Override
public ColumnFamilyOptions setPurgeRedundantKvsWhileFlush(
final boolean purgeRedundantKvsWhileFlush) {
setPurgeRedundantKvsWhileFlush(
nativeHandle_, purgeRedundantKvsWhileFlush);
return this;
}
@Override
public boolean purgeRedundantKvsWhileFlush() {
return purgeRedundantKvsWhileFlush(nativeHandle_);
}
@Override
public ColumnFamilyOptions setCompactionStyle(
final CompactionStyle compactionStyle) {
@ -486,11 +458,17 @@ public class ColumnFamilyOptions extends RocksObject
return maxSequentialSkipInIterations(nativeHandle_);
}
@Override
public MemTableConfig memTableConfig() {
return this.memTableConfig_;
}
@Override
public ColumnFamilyOptions setMemTableConfig(
final MemTableConfig config) {
memTableConfig_ = config;
setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle());
final MemTableConfig memTableConfig) {
setMemTableFactory(
nativeHandle_, memTableConfig.newMemTableFactoryHandle());
this.memTableConfig_ = memTableConfig;
return this;
}
@ -500,11 +478,16 @@ public class ColumnFamilyOptions extends RocksObject
return memTableFactoryName(nativeHandle_);
}
@Override
public TableFormatConfig tableFormatConfig() {
return this.tableFormatConfig_;
}
@Override
public ColumnFamilyOptions setTableFormatConfig(
final TableFormatConfig config) {
tableFormatConfig_ = config;
setTableFactory(nativeHandle_, config.newTableFactoryHandle());
final TableFormatConfig tableFormatConfig) {
setTableFactory(nativeHandle_, tableFormatConfig.newTableFactoryHandle());
this.tableFormatConfig_ = tableFormatConfig;
return this;
}
@ -677,6 +660,81 @@ public class ColumnFamilyOptions extends RocksObject
return paranoidFileChecks(nativeHandle_);
}
@Override
public ColumnFamilyOptions setMaxWriteBufferNumberToMaintain(
final int maxWriteBufferNumberToMaintain) {
setMaxWriteBufferNumberToMaintain(
nativeHandle_, maxWriteBufferNumberToMaintain);
return this;
}
@Override
public int maxWriteBufferNumberToMaintain() {
return maxWriteBufferNumberToMaintain(nativeHandle_);
}
@Override
public ColumnFamilyOptions setCompactionPriority(
final CompactionPriority compactionPriority) {
setCompactionPriority(nativeHandle_, compactionPriority.getValue());
return this;
}
@Override
public CompactionPriority compactionPriority() {
return CompactionPriority.getCompactionPriority(
compactionPriority(nativeHandle_));
}
@Override
public ColumnFamilyOptions setReportBgIoStats(final boolean reportBgIoStats) {
setReportBgIoStats(nativeHandle_, reportBgIoStats);
return this;
}
@Override
public boolean reportBgIoStats() {
return reportBgIoStats(nativeHandle_);
}
@Override
public ColumnFamilyOptions setCompactionOptionsUniversal(
final CompactionOptionsUniversal compactionOptionsUniversal) {
setCompactionOptionsUniversal(nativeHandle_,
compactionOptionsUniversal.nativeHandle_);
this.compactionOptionsUniversal_ = compactionOptionsUniversal;
return this;
}
@Override
public CompactionOptionsUniversal compactionOptionsUniversal() {
return this.compactionOptionsUniversal_;
}
@Override
public ColumnFamilyOptions setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) {
setCompactionOptionsFIFO(nativeHandle_,
compactionOptionsFIFO.nativeHandle_);
this.compactionOptionsFIFO_ = compactionOptionsFIFO;
return this;
}
@Override
public CompactionOptionsFIFO compactionOptionsFIFO() {
return this.compactionOptionsFIFO_;
}
@Override
public ColumnFamilyOptions setForceConsistencyChecks(final boolean forceConsistencyChecks) {
setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks);
return this;
}
@Override
public boolean forceConsistencyChecks() {
return forceConsistencyChecks(nativeHandle_);
}
/**
* <p>Private constructor to be used by
* {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}</p>
@ -693,6 +751,7 @@ public class ColumnFamilyOptions extends RocksObject
private static native long newColumnFamilyOptions();
@Override protected final native void disposeInternal(final long handle);
private native void optimizeForSmallDb(final long handle);
private native void optimizeForPointLookup(long handle,
long blockCacheSizeMb);
private native void optimizeLevelStyleCompaction(long handle,
@ -720,6 +779,11 @@ public class ColumnFamilyOptions extends RocksObject
private native void setCompressionPerLevel(long handle,
byte[] compressionLevels);
private native byte[] compressionPerLevel(long handle);
private native void setBottommostCompressionType(long handle,
byte bottommostCompressionType);
private native byte bottommostCompressionType(long handle);
private native void setCompressionOptions(long handle,
long compressionOptionsHandle);
private native void useFixedLengthPrefixExtractor(
long handle, int prefixLength);
private native void useCappedPrefixExtractor(
@ -753,15 +817,6 @@ public class ColumnFamilyOptions extends RocksObject
private native double maxBytesForLevelMultiplier(long handle);
private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
private native long maxCompactionBytes(long handle);
private native void setSoftRateLimit(
long handle, double softRateLimit);
private native double softRateLimit(long handle);
private native void setHardRateLimit(
long handle, double hardRateLimit);
private native double hardRateLimit(long handle);
private native void setRateLimitDelayMaxMilliseconds(
long handle, int rateLimitDelayMaxMilliseconds);
private native int rateLimitDelayMaxMilliseconds(long handle);
private native void setArenaBlockSize(
long handle, long arenaBlockSize)
throws IllegalArgumentException;
@ -774,9 +829,6 @@ public class ColumnFamilyOptions extends RocksObject
private native void setMaxTableFilesSizeFIFO(
long handle, long max_table_files_size);
private native long maxTableFilesSizeFIFO(long handle);
private native void setPurgeRedundantKvsWhileFlush(
long handle, boolean purgeRedundantKvsWhileFlush);
private native boolean purgeRedundantKvsWhileFlush(long handle);
private native void setMaxSequentialSkipInIterations(
long handle, long maxSequentialSkipInIterations);
private native long maxSequentialSkipInIterations(long handle);
@ -828,9 +880,30 @@ public class ColumnFamilyOptions extends RocksObject
private native void setParanoidFileChecks(long handle,
boolean paranoidFileChecks);
private native boolean paranoidFileChecks(long handle);
private native void setMaxWriteBufferNumberToMaintain(final long handle,
final int maxWriteBufferNumberToMaintain);
private native int maxWriteBufferNumberToMaintain(final long handle);
private native void setCompactionPriority(final long handle,
final byte compactionPriority);
private native byte compactionPriority(final long handle);
private native void setReportBgIoStats(final long handle,
final boolean reportBgIoStats);
private native boolean reportBgIoStats(final long handle);
private native void setCompactionOptionsUniversal(final long handle,
final long compactionOptionsUniversalHandle);
private native void setCompactionOptionsFIFO(final long handle,
final long compactionOptionsFIFOHandle);
private native void setForceConsistencyChecks(final long handle,
final boolean forceConsistencyChecks);
private native boolean forceConsistencyChecks(final long handle);
// instance variables
private MemTableConfig memTableConfig_;
private TableFormatConfig tableFormatConfig_;
private AbstractComparator<? extends AbstractSlice<?>> comparator_;
private AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter_;
private CompactionOptionsUniversal compactionOptionsUniversal_;
private CompactionOptionsFIFO compactionOptionsFIFO_;
private CompressionOptions compressionOptions_;
MemTableConfig memTableConfig_;
TableFormatConfig tableFormatConfig_;
AbstractComparator<? extends AbstractSlice<?>> comparator_;
AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter_;
}

View File

@ -5,18 +5,26 @@
package org.rocksdb;
import java.util.List;
public interface ColumnFamilyOptionsInterface
<T extends ColumnFamilyOptionsInterface>
extends AdvancedColumnFamilyOptionsInterface<T> {
public interface ColumnFamilyOptionsInterface {
/**
* Use this if your DB is very small (like under 1GB) and you don't want to
* spend lots of memory for memtables.
*
* @return the instance of the current object.
*/
T optimizeForSmallDb();
/**
* Use this if you don't need to keep the data sorted, i.e. you'll never use
* an iterator, only Put() and Get() API calls
*
* @param blockCacheSizeMb Block cache size in MB
* @return the instance of the current Object.
* @return the instance of the current object.
*/
ColumnFamilyOptionsInterface optimizeForPointLookup(long blockCacheSizeMb);
T optimizeForPointLookup(long blockCacheSizeMb);
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
@ -29,9 +37,9 @@ public interface ColumnFamilyOptionsInterface {
* <p>Note: we might use more memory than memtable_memory_budget during high
* write rate period</p>
*
* @return the instance of the current Object.
* @return the instance of the current object.
*/
ColumnFamilyOptionsInterface optimizeLevelStyleCompaction();
T optimizeLevelStyleCompaction();
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
@ -45,9 +53,10 @@ public interface ColumnFamilyOptionsInterface {
* write rate period</p>
*
* @param memtableMemoryBudget memory budget in bytes
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object optimizeLevelStyleCompaction(long memtableMemoryBudget);
T optimizeLevelStyleCompaction(
long memtableMemoryBudget);
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
@ -64,9 +73,9 @@ public interface ColumnFamilyOptionsInterface {
* <p>Note: we might use more memory than memtable_memory_budget during high
* write rate period</p>
*
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object optimizeUniversalStyleCompaction();
T optimizeUniversalStyleCompaction();
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
@ -84,9 +93,10 @@ public interface ColumnFamilyOptionsInterface {
* write rate period</p>
*
* @param memtableMemoryBudget memory budget in bytes
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object optimizeUniversalStyleCompaction(long memtableMemoryBudget);
T optimizeUniversalStyleCompaction(
long memtableMemoryBudget);
/**
* Set {@link BuiltinComparator} to be used with RocksDB.
@ -95,9 +105,10 @@ public interface ColumnFamilyOptionsInterface {
*
* Default: BytewiseComparator.
* @param builtinComparator a {@link BuiltinComparator} type.
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object setComparator(BuiltinComparator builtinComparator);
T setComparator(
BuiltinComparator builtinComparator);
/**
* Use the specified comparator for key ordering.
@ -109,9 +120,10 @@ public interface ColumnFamilyOptionsInterface {
* Comparator instance can be re-used in multiple options instances.
*
* @param comparator java instance.
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object setComparator(AbstractComparator<? extends AbstractSlice<?>> comparator);
T setComparator(
AbstractComparator<? extends AbstractSlice<?>> comparator);
/**
* <p>Set the merge operator to be used for merging two merge operands
@ -124,9 +136,9 @@ public interface ColumnFamilyOptionsInterface {
* The merge function is specified by name and must be one of the
* standard merge operators provided by RocksDB. The available
* operators are "put", "uint64add", "stringappend" and "stringappendtest".
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object setMergeOperatorName(String name);
T setMergeOperatorName(String name);
/**
* <p>Set the merge operator to be used for merging two different key/value
@ -135,38 +147,9 @@ public interface ColumnFamilyOptionsInterface {
* to the same key are found in the database.</p>
*
* @param mergeOperator {@link MergeOperator} instance.
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object setMergeOperator(MergeOperator mergeOperator);
/**
* The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then
* all write buffers are flushed to L0 as individual files and this increases
* read amplification because a get request has to check in all of these
* files. Also, an in-memory merge may result in writing lesser
* data to storage if there are duplicate records in each of these
* individual write buffers. Default: 1
*
* @param minWriteBufferNumberToMerge the minimum number of write buffers
* that will be merged together.
* @return the reference to the current option.
*/
Object setMinWriteBufferNumberToMerge(
int minWriteBufferNumberToMerge);
/**
* The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then
* all write buffers are flushed to L0 as individual files and this increases
* read amplification because a get request has to check in all of these
* files. Also, an in-memory merge may result in writing lesser
* data to storage if there are duplicate records in each of these
* individual write buffers. Default: 1
*
* @return the minimum number of write buffers that will be merged together.
*/
int minWriteBufferNumberToMerge();
T setMergeOperator(MergeOperator mergeOperator);
/**
* This prefix-extractor uses the first n bytes of a key as its prefix.
@ -179,8 +162,7 @@ public interface ColumnFamilyOptionsInterface {
* @param n use the first n bytes of a key as its prefix.
* @return the reference to the current option.
*/
Object useFixedLengthPrefixExtractor(int n);
T useFixedLengthPrefixExtractor(int n);
/**
* Same as fixed length prefix extractor, except that when slice is
@ -189,102 +171,7 @@ public interface ColumnFamilyOptionsInterface {
* @param n use the first n bytes of a key as its prefix.
* @return the reference to the current option.
*/
Object useCappedPrefixExtractor(int n);
/**
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
*
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @param compressionType Compression Type.
* @return the reference to the current option.
*/
Object setCompressionType(CompressionType compressionType);
/**
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
*
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @return Compression type.
*/
CompressionType compressionType();
/**
* <p>Different levels can have different compression
* policies. There are cases where most lower levels
* would like to use quick compression algorithms while
* the higher levels (which have more data) use
* compression algorithms that have better compression
* but could be slower. This array, if non-empty, should
* have an entry for each level of the database;
* these override the value specified in the previous
* field 'compression'.</p>
*
* <strong>NOTICE</strong>
* <p>If {@code level_compaction_dynamic_level_bytes=true},
* {@code compression_per_level[0]} still determines {@code L0},
* but other elements of the array are based on base level
* (the level {@code L0} files are merged to), and may not
* match the level users see from info log for metadata.
* </p>
* <p>If {@code L0} files are merged to {@code level - n},
* then, for {@code i&gt;0}, {@code compression_per_level[i]}
* determines compaction type for level {@code n+i-1}.</p>
*
* <strong>Example</strong>
* <p>For example, if we have 5 levels, and we determine to
* merge {@code L0} data to {@code L4} (which means {@code L1..L3}
* will be empty), then the new files go to {@code L4} uses
* compression type {@code compression_per_level[1]}.</p>
*
* <p>If now {@code L0} is merged to {@code L2}. Data goes to
* {@code L2} will be compressed according to
* {@code compression_per_level[1]}, {@code L3} using
* {@code compression_per_level[2]}and {@code L4} using
* {@code compression_per_level[3]}. Compaction for each
* level can change when data grows.</p>
*
* <p><strong>Default:</strong> empty</p>
*
* @param compressionLevels list of
* {@link org.rocksdb.CompressionType} instances.
*
* @return the reference to the current option.
*/
Object setCompressionPerLevel(
List<CompressionType> compressionLevels);
/**
* <p>Return the currently set {@link org.rocksdb.CompressionType}
* per instances.</p>
*
* <p>See: {@link #setCompressionPerLevel(java.util.List)}</p>
*
* @return list of {@link org.rocksdb.CompressionType}
* instances.
*/
List<CompressionType> compressionPerLevel();
/**
* Set the number of levels for this database
* If level-styled compaction is used, then this number determines
* the total number of levels.
*
* @param numLevels the number of levels.
* @return the reference to the current option.
*/
Object setNumLevels(int numLevels);
/**
* If level-styled compaction is used, then this number determines
* the total number of levels.
*
* @return the number of levels.
*/
int numLevels();
T useCappedPrefixExtractor(int n);
/**
* Number of files to trigger level-0 compaction. A value &lt; 0 means that
@ -294,7 +181,7 @@ public interface ColumnFamilyOptionsInterface {
* @param numFiles the number of files in level-0 to trigger compaction.
* @return the reference to the current option.
*/
Object setLevelZeroFileNumCompactionTrigger(
T setLevelZeroFileNumCompactionTrigger(
int numFiles);
/**
@ -315,7 +202,7 @@ public interface ColumnFamilyOptionsInterface {
* @param numFiles soft limit on number of level-0 files.
* @return the reference to the current option.
*/
Object setLevelZeroSlowdownWritesTrigger(
T setLevelZeroSlowdownWritesTrigger(
int numFiles);
/**
@ -333,7 +220,7 @@ public interface ColumnFamilyOptionsInterface {
* @param numFiles the hard limit of the number of level-0 files.
* @return the reference to the current option.
*/
Object setLevelZeroStopWritesTrigger(int numFiles);
T setLevelZeroStopWritesTrigger(int numFiles);
/**
* Maximum number of level-0 files. We stop writes at this point.
@ -342,111 +229,6 @@ public interface ColumnFamilyOptionsInterface {
*/
int levelZeroStopWritesTrigger();
/**
* This does nothing anymore. Deprecated.
*
* @param maxMemCompactionLevel Unused.
*
* @return the reference to the current option.
*/
@Deprecated
Object setMaxMemCompactionLevel(
int maxMemCompactionLevel);
/**
* This does nothing anymore. Deprecated.
*
* @return Always returns 0.
*/
@Deprecated
int maxMemCompactionLevel();
/**
* <p>If {@code true}, RocksDB will pick target size of each level
* dynamically. We will pick a base level b &gt;= 1. L0 will be
* directly merged into level b, instead of always into level 1.
* Level 1 to b-1 need to be empty. We try to pick b and its target
* size so that</p>
*
* <ol>
* <li>target size is in the range of
* (max_bytes_for_level_base / max_bytes_for_level_multiplier,
* max_bytes_for_level_base]</li>
* <li>target size of the last level (level num_levels-1) equals to extra size
* of the level.</li>
* </ol>
*
* <p>At the same time max_bytes_for_level_multiplier and
* max_bytes_for_level_multiplier_additional are still satisfied.</p>
*
* <p>With this option on, from an empty DB, we make last level the base
* level, which means merging L0 data into the last level, until it exceeds
* max_bytes_for_level_base. And then we make the second last level to be
* base level, to start to merge L0 data to second last level, with its
* target size to be {@code 1/max_bytes_for_level_multiplier} of the last
* levels extra size. After the data accumulates more so that we need to
* move the base level to the third last one, and so on.</p>
*
* <h2>Example</h2>
* <p>For example, assume {@code max_bytes_for_level_multiplier=10},
* {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.</p>
*
* <p>Target sizes of level 1 to 5 starts with:</p>
* {@code [- - - - 10MB]}
* <p>with base level is level. Target sizes of level 1 to 4 are not applicable
* because they will not be used.
* Until the size of Level 5 grows to more than 10MB, say 11MB, we make
* base target to level 4 and now the targets looks like:</p>
* {@code [- - - 1.1MB 11MB]}
* <p>While data are accumulated, size targets are tuned based on actual data
* of level 5. When level 5 has 50MB of data, the target is like:</p>
* {@code [- - - 5MB 50MB]}
* <p>Until level 5's actual size is more than 100MB, say 101MB. Now if we
* keep level 4 to be the base level, its target size needs to be 10.1MB,
* which doesn't satisfy the target size range. So now we make level 3
* the target size and the target sizes of the levels look like:</p>
* {@code [- - 1.01MB 10.1MB 101MB]}
* <p>In the same way, while level 5 further grows, all levels' targets grow,
* like</p>
* {@code [- - 5MB 50MB 500MB]}
* <p>Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the
* base level and make levels' target sizes like this:</p>
* {@code [- 1.001MB 10.01MB 100.1MB 1001MB]}
* <p>and go on...</p>
*
* <p>By doing it, we give {@code max_bytes_for_level_multiplier} a priority
* against {@code max_bytes_for_level_base}, for a more predictable LSM tree
* shape. It is useful to limit worse case space amplification.</p>
*
* <p>{@code max_bytes_for_level_multiplier_additional} is ignored with
* this flag on.</p>
*
* <p>Turning this feature on or off for an existing DB can cause unexpected
* LSM tree structure so it's not recommended.</p>
*
* <p><strong>Caution</strong>: this option is experimental</p>
*
* <p>Default: false</p>
*
* @param enableLevelCompactionDynamicLevelBytes boolean value indicating
* if {@code LevelCompactionDynamicLevelBytes} shall be enabled.
* @return the reference to the current option.
*/
Object setLevelCompactionDynamicLevelBytes(
boolean enableLevelCompactionDynamicLevelBytes);
/**
* <p>Return if {@code LevelCompactionDynamicLevelBytes} is enabled.
* </p>
*
* <p>For further information see
* {@link #setLevelCompactionDynamicLevelBytes(boolean)}</p>
*
* @return boolean value indicating if
* {@code levelCompactionDynamicLevelBytes} is enabled.
*/
boolean levelCompactionDynamicLevelBytes();
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
@ -456,7 +238,8 @@ public interface ColumnFamilyOptionsInterface {
* files and the total size of level-L files for all L.
* @return the reference to the current option.
*/
Object setMaxBytesForLevelMultiplier(double multiplier);
T setMaxBytesForLevelMultiplier(
double multiplier);
/**
* The ratio between the total size of level-(L+1) files and the total
@ -468,122 +251,6 @@ public interface ColumnFamilyOptionsInterface {
*/
double maxBytesForLevelMultiplier();
/**
* Maximum size of each compaction (not guarantee)
*
* @param maxCompactionBytes the compaction size limit
* @return the reference to the current option.
*/
Object setMaxCompactionBytes(long maxCompactionBytes);
/**
* Control maximum size of each compaction (not guaranteed)
*
* @return compaction size threshold
*/
long maxCompactionBytes();
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @param softRateLimit the soft-rate-limit of a compaction score
* for put delay.
* @return the reference to the current option.
*/
Object setSoftRateLimit(double softRateLimit);
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @return soft-rate-limit for put delay.
*/
double softRateLimit();
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @param hardRateLimit the hard-rate-limit of a compaction score for put
* delay.
* @return the reference to the current option.
*/
Object setHardRateLimit(double hardRateLimit);
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @return the hard-rate-limit of a compaction score for put delay.
*/
double hardRateLimit();
/**
* The maximum time interval a put will be stalled when hard_rate_limit
* is enforced. If 0, then there is no limit.
* Default: 1000
*
* @param rateLimitDelayMaxMilliseconds the maximum time interval a put
* will be stalled.
* @return the reference to the current option.
*/
Object setRateLimitDelayMaxMilliseconds(
int rateLimitDelayMaxMilliseconds);
/**
* The maximum time interval a put will be stalled when hard_rate_limit
* is enforced. If 0, then there is no limit.
* Default: 1000
*
* @return the maximum time interval a put will be stalled when
* hard_rate_limit is enforced.
*/
int rateLimitDelayMaxMilliseconds();
/**
* Purge duplicate/deleted keys when a memtable is flushed to storage.
* Default: true
*
* @param purgeRedundantKvsWhileFlush true if purging keys is disabled.
* @return the reference to the current option.
*/
Object setPurgeRedundantKvsWhileFlush(
boolean purgeRedundantKvsWhileFlush);
/**
* Purge duplicate/deleted keys when a memtable is flushed to storage.
* Default: true
*
* @return true if purging keys is disabled.
*/
boolean purgeRedundantKvsWhileFlush();
/**
* Set compaction style for DB.
*
* Default: LEVEL.
*
* @param compactionStyle Compaction style.
* @return the reference to the current option.
*/
Object setCompactionStyle(CompactionStyle compactionStyle);
/**
* Compaction style for DB.
*
* @return Compaction style.
*/
CompactionStyle compactionStyle();
/**
* FIFO compaction option.
* The oldest table file will be deleted
@ -591,9 +258,10 @@ public interface ColumnFamilyOptionsInterface {
* The default value is 1GB (1 * 1024 * 1024 * 1024).
*
* @param maxTableFilesSize the size limit of the total sum of table files.
* @return the instance of the current Object.
* @return the instance of the current object.
*/
Object setMaxTableFilesSizeFIFO(long maxTableFilesSize);
T setMaxTableFilesSizeFIFO(
long maxTableFilesSize);
/**
* FIFO compaction option.
@ -605,15 +273,22 @@ public interface ColumnFamilyOptionsInterface {
*/
long maxTableFilesSizeFIFO();
/**
* Get the config for mem-table.
*
* @return the mem-table config.
*/
MemTableConfig memTableConfig();
/**
* Set the config for mem-table.
*
* @param config the mem-table config.
* @return the instance of the current Object.
* @param memTableConfig the mem-table config.
* @return the instance of the current object.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
Object setMemTableConfig(MemTableConfig config);
T setMemTableConfig(MemTableConfig memTableConfig);
/**
* Returns the name of the current mem table representation.
@ -624,13 +299,20 @@ public interface ColumnFamilyOptionsInterface {
*/
String memTableFactoryName();
/**
* Get the config for table format.
*
* @return the table format config.
*/
TableFormatConfig tableFormatConfig();
/**
* Set the config for table format.
*
* @param config the table format config.
* @return the reference of the current Options.
* @return the reference of the current options.
*/
Object setTableFormatConfig(TableFormatConfig config);
T setTableFormatConfig(TableFormatConfig config);
/**
* @return the name of the currently used table factory.
@ -638,98 +320,48 @@ public interface ColumnFamilyOptionsInterface {
String tableFactoryName();
/**
* Allows thread-safe inplace updates.
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) &le; sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
* Compression algorithm that will be used for the bottommost level that
* contain files. If level-compaction is used, this option will only affect
* levels after base level.
*
* @param inplaceUpdateSupport true if thread-safe inplace updates
* are allowed.
* @return the reference to the current option.
* Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
*
* @param bottommostCompressionType The compression type to use for the
* bottommost level
*
* @return the reference of the current options.
*/
Object setInplaceUpdateSupport(boolean inplaceUpdateSupport);
T setBottommostCompressionType(
final CompressionType bottommostCompressionType);
/**
* Allows thread-safe inplace updates.
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) &le; sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
* Compression algorithm that will be used for the bottommost level that
* contain files. If level-compaction is used, this option will only affect
* levels after base level.
*
* @return true if thread-safe inplace updates are allowed.
* Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
*
* @return The compression type used for the bottommost level
*/
boolean inplaceUpdateSupport();
CompressionType bottommostCompressionType();
/**
* Control locality of bloom filter probes to improve cache miss rate.
* This option only applies to memtable prefix bloom and plaintable
* prefix bloom. It essentially limits the max number of cache lines each
* bloom filter check can touch.
* This optimization is turned off when set to 0. The number should never
* be greater than number of probes. This option can boost performance
* for in-memory workload but should use with care since it can cause
* higher false positive rate.
* Default: 0
* Set the different options for compression algorithms
*
* @param bloomLocality the level of locality of bloom-filter probes.
* @return the reference to the current option.
* @param compressionOptions The compression options
*
* @return the reference of the current options.
*/
Object setBloomLocality(int bloomLocality);
T setCompressionOptions(
CompressionOptions compressionOptions);
/**
* Control locality of bloom filter probes to improve cache miss rate.
* This option only applies to memtable prefix bloom and plaintable
* prefix bloom. It essentially limits the max number of cache lines each
* bloom filter check can touch.
* This optimization is turned off when set to 0. The number should never
* be greater than number of probes. This option can boost performance
* for in-memory workload but should use with care since it can cause
* higher false positive rate.
* Default: 0
* Get the different options for compression algorithms
*
* @return the level of locality of bloom-filter probes.
* @see #setBloomLocality(int)
* @return The compression options
*/
int bloomLocality();
/**
* <p>This flag specifies that the implementation should optimize the filters
* mainly for cases where keys are found rather than also optimize for keys
* missed. This would be used in cases where the application knows that
* there are very few misses or the performance in the case of misses is not
* important.</p>
*
* <p>For now, this flag allows us to not store filters for the last level i.e
* the largest level which contains data of the LSM store. For keys which
* are hits, the filters in this level are not useful because we will search
* for the data anyway.</p>
*
* <p><strong>NOTE</strong>: the filters in other levels are still useful
* even for key hit because they tell us whether to look in that level or go
* to the higher level.</p>
*
* <p>Default: false<p>
*
* @param optimizeFiltersForHits boolean value indicating if this flag is set.
* @return the reference to the current option.
*/
Object setOptimizeFiltersForHits(boolean optimizeFiltersForHits);
/**
* <p>Returns the current state of the {@code optimize_filters_for_hits}
* setting.</p>
*
* @return boolean value indicating if the flag
* {@code optimize_filters_for_hits} was set.
*/
boolean optimizeFiltersForHits();
CompressionOptions compressionOptions();
/**
* Default memtable memory budget used with the following methods:

View File

@ -0,0 +1,50 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Options for FIFO Compaction
*/
public class CompactionOptionsFIFO extends RocksObject {
public CompactionOptionsFIFO() {
super(newCompactionOptionsFIFO());
}
/**
* Once the total sum of table files reaches this, we will delete the oldest
* table file
*
* Default: 1GB
*
* @param maxTableFilesSize The maximum size of the table files
*
* @return the reference to the current options.
*/
public CompactionOptionsFIFO setMaxTableFilesSize(
final long maxTableFilesSize) {
setMaxTableFilesSize(nativeHandle_, maxTableFilesSize);
return this;
}
/**
* Once the total sum of table files reaches this, we will delete the oldest
* table file
*
* Default: 1GB
*
* @return max table file size in bytes
*/
public long maxTableFilesSize() {
return maxTableFilesSize(nativeHandle_);
}
private native void setMaxTableFilesSize(long handle, long maxTableFilesSize);
private native long maxTableFilesSize(long handle);
private native static long newCompactionOptionsFIFO();
@Override protected final native void disposeInternal(final long handle);
}

View File

@ -0,0 +1,273 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Options for Universal Compaction
*/
public class CompactionOptionsUniversal extends RocksObject {
public CompactionOptionsUniversal() {
super(newCompactionOptionsUniversal());
}
/**
* Percentage flexibilty while comparing file size. If the candidate file(s)
* size is 1% smaller than the next file's size, then include next file into
* this candidate set.
*
* Default: 1
*
* @param sizeRatio The size ratio to use
*
* @return the reference to the current options.
*/
public CompactionOptionsUniversal setSizeRatio(final int sizeRatio) {
setSizeRatio(nativeHandle_, sizeRatio);
return this;
}
/**
* Percentage flexibilty while comparing file size. If the candidate file(s)
* size is 1% smaller than the next file's size, then include next file into
* this candidate set.
*
* Default: 1
*
* @return The size ratio in use
*/
public int sizeRatio() {
return sizeRatio(nativeHandle_);
}
/**
* The minimum number of files in a single compaction run.
*
* Default: 2
*
* @param minMergeWidth minimum number of files in a single compaction run
*
* @return the reference to the current options.
*/
public CompactionOptionsUniversal setMinMergeWidth(final int minMergeWidth) {
setMinMergeWidth(nativeHandle_, minMergeWidth);
return this;
}
/**
* The minimum number of files in a single compaction run.
*
* Default: 2
*
* @return minimum number of files in a single compaction run
*/
public int minMergeWidth() {
return minMergeWidth(nativeHandle_);
}
/**
* The maximum number of files in a single compaction run.
*
* Default: {@link Long#MAX_VALUE}
*
* @param maxMergeWidth maximum number of files in a single compaction run
*
* @return the reference to the current options.
*/
public CompactionOptionsUniversal setMaxMergeWidth(final int maxMergeWidth) {
setMaxMergeWidth(nativeHandle_, maxMergeWidth);
return this;
}
/**
* The maximum number of files in a single compaction run.
*
* Default: {@link Long#MAX_VALUE}
*
* @return maximum number of files in a single compaction run
*/
public int maxMergeWidth() {
return maxMergeWidth(nativeHandle_);
}
/**
* The size amplification is defined as the amount (in percentage) of
* additional storage needed to store a single byte of data in the database.
* For example, a size amplification of 2% means that a database that
* contains 100 bytes of user-data may occupy upto 102 bytes of
* physical storage. By this definition, a fully compacted database has
* a size amplification of 0%. Rocksdb uses the following heuristic
* to calculate size amplification: it assumes that all files excluding
* the earliest file contribute to the size amplification.
*
* Default: 200, which means that a 100 byte database could require upto
* 300 bytes of storage.
*
* @param maxSizeAmplificationPercent the amount of additional storage needed
* (as a percentage) to store a single byte in the database
*
* @return the reference to the current options.
*/
public CompactionOptionsUniversal setMaxSizeAmplificationPercent(
final int maxSizeAmplificationPercent) {
setMaxSizeAmplificationPercent(nativeHandle_, maxSizeAmplificationPercent);
return this;
}
/**
* The size amplification is defined as the amount (in percentage) of
* additional storage needed to store a single byte of data in the database.
* For example, a size amplification of 2% means that a database that
* contains 100 bytes of user-data may occupy upto 102 bytes of
* physical storage. By this definition, a fully compacted database has
* a size amplification of 0%. Rocksdb uses the following heuristic
* to calculate size amplification: it assumes that all files excluding
* the earliest file contribute to the size amplification.
*
* Default: 200, which means that a 100 byte database could require upto
* 300 bytes of storage.
*
* @return the amount of additional storage needed (as a percentage) to store
* a single byte in the database
*/
public int maxSizeAmplificationPercent() {
return maxSizeAmplificationPercent(nativeHandle_);
}
/**
* If this option is set to be -1 (the default value), all the output files
* will follow compression type specified.
*
* If this option is not negative, we will try to make sure compressed
* size is just above this value. In normal cases, at least this percentage
* of data will be compressed.
*
* When we are compacting to a new file, here is the criteria whether
* it needs to be compressed: assuming here are the list of files sorted
* by generation time:
* A1...An B1...Bm C1...Ct
* where A1 is the newest and Ct is the oldest, and we are going to compact
* B1...Bm, we calculate the total size of all the files as total_size, as
* well as the total size of C1...Ct as total_C, the compaction output file
* will be compressed iff
* total_C / total_size &lt; this percentage
*
* Default: -1
*
* @param compressionSizePercent percentage of size for compression
*
* @return the reference to the current options.
*/
public CompactionOptionsUniversal setCompressionSizePercent(
final int compressionSizePercent) {
setCompressionSizePercent(nativeHandle_, compressionSizePercent);
return this;
}
/**
* If this option is set to be -1 (the default value), all the output files
* will follow compression type specified.
*
* If this option is not negative, we will try to make sure compressed
* size is just above this value. In normal cases, at least this percentage
* of data will be compressed.
*
* When we are compacting to a new file, here is the criteria whether
* it needs to be compressed: assuming here are the list of files sorted
* by generation time:
* A1...An B1...Bm C1...Ct
* where A1 is the newest and Ct is the oldest, and we are going to compact
* B1...Bm, we calculate the total size of all the files as total_size, as
* well as the total size of C1...Ct as total_C, the compaction output file
* will be compressed iff
* total_C / total_size &lt; this percentage
*
* Default: -1
*
* @return percentage of size for compression
*/
public int compressionSizePercent() {
return compressionSizePercent(nativeHandle_);
}
/**
* The algorithm used to stop picking files into a single compaction run
*
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
*
* @param compactionStopStyle The compaction algorithm
*
* @return the reference to the current options.
*/
public CompactionOptionsUniversal setStopStyle(
final CompactionStopStyle compactionStopStyle) {
setStopStyle(nativeHandle_, compactionStopStyle.getValue());
return this;
}
/**
* The algorithm used to stop picking files into a single compaction run
*
* Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize}
*
* @return The compaction algorithm
*/
public CompactionStopStyle stopStyle() {
return CompactionStopStyle.getCompactionStopStyle(stopStyle(nativeHandle_));
}
/**
* Option to optimize the universal multi level compaction by enabling
* trivial move for non overlapping files.
*
* Default: false
*
* @param allowTrivialMove true if trivial move is allowed
*
* @return the reference to the current options.
*/
public CompactionOptionsUniversal setAllowTrivialMove(
final boolean allowTrivialMove) {
setAllowTrivialMove(nativeHandle_, allowTrivialMove);
return this;
}
/**
* Option to optimize the universal multi level compaction by enabling
* trivial move for non overlapping files.
*
* Default: false
*
* @return true if trivial move is allowed
*/
public boolean allowTrivialMove() {
return allowTrivialMove(nativeHandle_);
}
private native static long newCompactionOptionsUniversal();
@Override protected final native void disposeInternal(final long handle);
private native void setSizeRatio(final long handle, final int sizeRatio);
private native int sizeRatio(final long handle);
private native void setMinMergeWidth(
final long handle, final int minMergeWidth);
private native int minMergeWidth(final long handle);
private native void setMaxMergeWidth(
final long handle, final int maxMergeWidth);
private native int maxMergeWidth(final long handle);
private native void setMaxSizeAmplificationPercent(
final long handle, final int maxSizeAmplificationPercent);
private native int maxSizeAmplificationPercent(final long handle);
private native void setCompressionSizePercent(
final long handle, final int compressionSizePercent);
private native int compressionSizePercent(final long handle);
private native void setStopStyle(
final long handle, final byte stopStyle);
private native byte stopStyle(final long handle);
private native void setAllowTrivialMove(
final long handle, final boolean allowTrivialMove);
private native boolean allowTrivialMove(final long handle);
}

View File

@ -0,0 +1,73 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Compaction Priorities
*/
public enum CompactionPriority {
/**
* Slightly Prioritize larger files by size compensated by #deletes
*/
ByCompensatedSize((byte)0x0),
/**
* First compact files whose data's latest update time is oldest.
* Try this if you only update some hot keys in small ranges.
*/
OldestLargestSeqFirst((byte)0x1),
/**
* First compact files whose range hasn't been compacted to the next level
* for the longest. If your updates are random across the key space,
* write amplification is slightly better with this option.
*/
OldestSmallestSeqFirst((byte)0x2),
/**
* First compact files whose ratio between overlapping size in next level
* and its size is the smallest. It in many cases can optimize write
* amplification.
*/
MinOverlappingRatio((byte)0x3);
private final byte value;
CompactionPriority(final byte value) {
this.value = value;
}
/**
* Returns the byte value of the enumerations value
*
* @return byte representation
*/
public byte getValue() {
return value;
}
/**
* Get CompactionPriority by byte value.
*
* @param value byte representation of CompactionPriority.
*
* @return {@link org.rocksdb.CompactionPriority} instance or null.
* @throws java.lang.IllegalArgumentException if an invalid
* value is provided.
*/
public static CompactionPriority getCompactionPriority(final byte value) {
for (final CompactionPriority compactionPriority :
CompactionPriority.values()) {
if (compactionPriority.getValue() == value){
return compactionPriority;
}
}
throw new IllegalArgumentException(
"Illegal value provided for CompactionPriority.");
}
}

View File

@ -0,0 +1,54 @@
package org.rocksdb;
/**
* Algorithm used to make a compaction request stop picking new files
* into a single compaction run
*/
public enum CompactionStopStyle {
/**
* Pick files of similar size
*/
CompactionStopStyleSimilarSize((byte)0x0),
/**
* Total size of picked files &gt; next file
*/
CompactionStopStyleTotalSize((byte)0x1);
private final byte value;
CompactionStopStyle(final byte value) {
this.value = value;
}
/**
* Returns the byte value of the enumerations value
*
* @return byte representation
*/
public byte getValue() {
return value;
}
/**
* Get CompactionStopStyle by byte value.
*
* @param value byte representation of CompactionStopStyle.
*
* @return {@link org.rocksdb.CompactionStopStyle} instance or null.
* @throws java.lang.IllegalArgumentException if an invalid
* value is provided.
*/
public static CompactionStopStyle getCompactionStopStyle(final byte value) {
for (final CompactionStopStyle compactionStopStyle :
CompactionStopStyle.values()) {
if (compactionStopStyle.getValue() == value){
return compactionStopStyle;
}
}
throw new IllegalArgumentException(
"Illegal value provided for CompactionStopStyle.");
}
}

View File

@ -0,0 +1,85 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Options for Compression
*/
public class CompressionOptions extends RocksObject {
public CompressionOptions() {
super(newCompressionOptions());
}
public CompressionOptions setWindowBits(final int windowBits) {
setWindowBits(nativeHandle_, windowBits);
return this;
}
public int windowBits() {
return windowBits(nativeHandle_);
}
public CompressionOptions setLevel(final int level) {
setLevel(nativeHandle_, level);
return this;
}
public int level() {
return level(nativeHandle_);
}
public CompressionOptions setStrategy(final int strategy) {
setStrategy(nativeHandle_, strategy);
return this;
}
public int strategy() {
return strategy(nativeHandle_);
}
/**
* Maximum size of dictionary used to prime the compression library. Currently
* this dictionary will be constructed by sampling the first output file in a
* subcompaction when the target level is bottommost. This dictionary will be
* loaded into the compression library before compressing/uncompressing each
* data block of subsequent files in the subcompaction. Effectively, this
* improves compression ratios when there are repetitions across data blocks.
*
* A value of 0 indicates the feature is disabled.
*
* Default: 0.
*
* @param maxDictBytes Maximum bytes to use for the dictionary
*
* @return the reference to the current options
*/
public CompressionOptions setMaxDictBytes(final int maxDictBytes) {
setMaxDictBytes(nativeHandle_, maxDictBytes);
return this;
}
/**
* Maximum size of dictionary used to prime the compression library.
*
* @return The maximum bytes to use for the dictionary
*/
public int maxDictBytes() {
return maxDictBytes(nativeHandle_);
}
private native static long newCompressionOptions();
@Override protected final native void disposeInternal(final long handle);
private native void setWindowBits(final long handle, final int windowBits);
private native int windowBits(final long handle);
private native void setLevel(final long handle, final int level);
private native int level(final long handle);
private native void setStrategy(final long handle, final int strategy);
private native int strategy(final long handle);
private native void setMaxDictBytes(final long handle, final int maxDictBytes);
private native int maxDictBytes(final long handle);
}

View File

@ -15,12 +15,15 @@ package org.rocksdb;
*/
public enum CompressionType {
NO_COMPRESSION((byte) 0, null),
SNAPPY_COMPRESSION((byte) 1, "snappy"),
ZLIB_COMPRESSION((byte) 2, "z"),
BZLIB2_COMPRESSION((byte) 3, "bzip2"),
LZ4_COMPRESSION((byte) 4, "lz4"),
LZ4HC_COMPRESSION((byte) 5, "lz4hc");
NO_COMPRESSION((byte) 0x0, null),
SNAPPY_COMPRESSION((byte) 0x1, "snappy"),
ZLIB_COMPRESSION((byte) 0x2, "z"),
BZLIB2_COMPRESSION((byte) 0x3, "bzip2"),
LZ4_COMPRESSION((byte) 0x4, "lz4"),
LZ4HC_COMPRESSION((byte) 0x5, "lz4hc"),
XPRESS_COMPRESSION((byte) 0x6, "xpress"),
ZSTD_COMPRESSION((byte)0x7, "zstd"),
DISABLE_COMPRESSION_OPTION((byte)0x7F, null);
/**
* <p>Get the CompressionType enumeration value by
@ -49,20 +52,22 @@ public enum CompressionType {
* <p>Get the CompressionType enumeration value by
* passing the byte identifier to this method.</p>
*
* <p>If library cannot be found the enumeration
* value {@code NO_COMPRESSION} will be returned.</p>
*
* @param byteIdentifier of CompressionType.
*
* @return CompressionType instance.
*
* @throws IllegalArgumentException If CompressionType cannot be found for the
* provided byteIdentifier
*/
public static CompressionType getCompressionType(byte byteIdentifier) {
for (CompressionType compressionType : CompressionType.values()) {
for (final CompressionType compressionType : CompressionType.values()) {
if (compressionType.getValue() == byteIdentifier) {
return compressionType;
}
}
return CompressionType.NO_COMPRESSION;
throw new IllegalArgumentException(
"Illegal value provided for CompressionType.");
}
/**
@ -84,7 +89,7 @@ public enum CompressionType {
return libraryName_;
}
private CompressionType(byte value, final String libraryName) {
CompressionType(final byte value, final String libraryName) {
value_ = value;
libraryName_ = libraryName;
}

View File

@ -5,7 +5,8 @@
package org.rocksdb;
import java.util.Properties;
import java.nio.file.Paths;
import java.util.*;
/**
* DBOptions to control the behavior of a database. It will be used
@ -14,7 +15,8 @@ import java.util.Properties;
* If {@link #dispose()} function is not called, then it will be GC'd
* automatically and native resources will be released as part of the process.
*/
public class DBOptions extends RocksObject implements DBOptionsInterface {
public class DBOptions
extends RocksObject implements DBOptionsInterface<DBOptions> {
static {
RocksDB.loadLibrary();
}
@ -71,6 +73,12 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return dbOptions;
}
@Override
public DBOptions optimizeForSmallDb() {
optimizeForSmallDb(nativeHandle_);
return this;
}
@Override
public DBOptions setIncreaseParallelism(
final int totalThreads) {
@ -106,6 +114,18 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return createMissingColumnFamilies(nativeHandle_);
}
@Override
public DBOptions setEnv(final Env env) {
setEnv(nativeHandle_, env.nativeHandle_);
this.env_ = env;
return this;
}
@Override
public Env getEnv() {
return env_;
}
@Override
public DBOptions setErrorIfExists(
final boolean errorIfExists) {
@ -178,6 +198,19 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return maxOpenFiles(nativeHandle_);
}
@Override
public DBOptions setMaxFileOpeningThreads(final int maxFileOpeningThreads) {
assert(isOwningHandle());
setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads);
return this;
}
@Override
public int maxFileOpeningThreads() {
assert(isOwningHandle());
return maxFileOpeningThreads(nativeHandle_);
}
@Override
public DBOptions setMaxTotalWalSize(
final long maxTotalWalSize) {
@ -226,6 +259,43 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return useFsync(nativeHandle_);
}
@Override
public DBOptions setDbPaths(final Collection<DbPath> dbPaths) {
assert(isOwningHandle());
final int len = dbPaths.size();
final String paths[] = new String[len];
final long targetSizes[] = new long[len];
int i = 0;
for(final DbPath dbPath : dbPaths) {
paths[i] = dbPath.path.toString();
targetSizes[i] = dbPath.targetSize;
i++;
}
setDbPaths(nativeHandle_, paths, targetSizes);
return this;
}
@Override
public List<DbPath> dbPaths() {
final int len = (int)dbPathsLen(nativeHandle_);
if(len == 0) {
return Collections.emptyList();
} else {
final String paths[] = new String[len];
final long targetSizes[] = new long[len];
dbPaths(nativeHandle_, paths, targetSizes);
final List<DbPath> dbPaths = new ArrayList<>();
for(int i = 0; i < len; i++) {
dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i]));
}
return dbPaths;
}
}
@Override
public DBOptions setDbLogDir(
final String dbLogDir) {
@ -363,6 +433,19 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return keepLogFileNum(nativeHandle_);
}
@Override
public DBOptions setRecycleLogFileNum(final long recycleLogFileNum) {
assert(isOwningHandle());
setRecycleLogFileNum(nativeHandle_, recycleLogFileNum);
return this;
}
@Override
public long recycleLogFileNum() {
assert(isOwningHandle());
return recycleLogFileNum(nativeHandle_);
}
@Override
public DBOptions setMaxManifestFileSize(
final long maxManifestFileSize) {
@ -461,6 +544,19 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return useDirectWrites(nativeHandle_);
}
@Override
public DBOptions setAllowFAllocate(final boolean allowFAllocate) {
assert(isOwningHandle());
setAllowFAllocate(nativeHandle_, allowFAllocate);
return this;
}
@Override
public boolean allowFAllocate() {
assert(isOwningHandle());
return allowFAllocate(nativeHandle_);
}
@Override
public DBOptions setAllowMmapReads(
final boolean allowMmapReads) {
@ -530,6 +626,86 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return adviseRandomOnOpen(nativeHandle_);
}
@Override
public DBOptions setDbWriteBufferSize(final long dbWriteBufferSize) {
assert(isOwningHandle());
setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize);
return this;
}
@Override
public long dbWriteBufferSize() {
assert(isOwningHandle());
return dbWriteBufferSize(nativeHandle_);
}
@Override
public DBOptions setAccessHintOnCompactionStart(final AccessHint accessHint) {
assert(isOwningHandle());
setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue());
return this;
}
@Override
public AccessHint accessHintOnCompactionStart() {
assert(isOwningHandle());
return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
}
@Override
public DBOptions setNewTableReaderForCompactionInputs(
final boolean newTableReaderForCompactionInputs) {
assert(isOwningHandle());
setNewTableReaderForCompactionInputs(nativeHandle_,
newTableReaderForCompactionInputs);
return this;
}
@Override
public boolean newTableReaderForCompactionInputs() {
assert(isOwningHandle());
return newTableReaderForCompactionInputs(nativeHandle_);
}
@Override
public DBOptions setCompactionReadaheadSize(final long compactionReadaheadSize) {
assert(isOwningHandle());
setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
return this;
}
@Override
public long compactionReadaheadSize() {
assert(isOwningHandle());
return compactionReadaheadSize(nativeHandle_);
}
@Override
public DBOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
assert(isOwningHandle());
setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
return this;
}
@Override
public long randomAccessMaxBufferSize() {
assert(isOwningHandle());
return randomAccessMaxBufferSize(nativeHandle_);
}
@Override
public DBOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
assert(isOwningHandle());
setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
return this;
}
@Override
public long writableFileMaxBufferSize() {
assert(isOwningHandle());
return writableFileMaxBufferSize(nativeHandle_);
}
@Override
public DBOptions setUseAdaptiveMutex(
final boolean useAdaptiveMutex) {
@ -558,10 +734,49 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
}
@Override
public void setAllowConcurrentMemtableWrite(
public DBOptions setWalBytesPerSync(final long walBytesPerSync) {
assert(isOwningHandle());
setWalBytesPerSync(nativeHandle_, walBytesPerSync);
return this;
}
@Override
public long walBytesPerSync() {
assert(isOwningHandle());
return walBytesPerSync(nativeHandle_);
}
@Override
public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) {
assert(isOwningHandle());
setEnableThreadTracking(nativeHandle_, enableThreadTracking);
return this;
}
@Override
public boolean enableThreadTracking() {
assert(isOwningHandle());
return enableThreadTracking(nativeHandle_);
}
@Override
public DBOptions setDelayedWriteRate(final long delayedWriteRate) {
assert(isOwningHandle());
setDelayedWriteRate(nativeHandle_, delayedWriteRate);
return this;
}
@Override
public long delayedWriteRate(){
return delayedWriteRate(nativeHandle_);
}
@Override
public DBOptions setAllowConcurrentMemtableWrite(
final boolean allowConcurrentMemtableWrite) {
setAllowConcurrentMemtableWrite(nativeHandle_,
allowConcurrentMemtableWrite);
return this;
}
@Override
@ -570,10 +785,11 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
}
@Override
public void setEnableWriteThreadAdaptiveYield(
public DBOptions setEnableWriteThreadAdaptiveYield(
final boolean enableWriteThreadAdaptiveYield) {
setEnableWriteThreadAdaptiveYield(nativeHandle_,
enableWriteThreadAdaptiveYield);
return this;
}
@Override
@ -582,8 +798,9 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
}
@Override
public void setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
public DBOptions setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec);
return this;
}
@Override
@ -592,8 +809,9 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
}
@Override
public void setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
public DBOptions setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec);
return this;
}
@Override
@ -601,17 +819,114 @@ public class DBOptions extends RocksObject implements DBOptionsInterface {
return writeThreadSlowYieldUsec(nativeHandle_);
}
@Override
public DBOptions setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) {
assert(isOwningHandle());
setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen);
return this;
}
@Override
public boolean skipStatsUpdateOnDbOpen() {
assert(isOwningHandle());
return skipStatsUpdateOnDbOpen(nativeHandle_);
}
@Override
public DBOptions setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) {
assert(isOwningHandle());
setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue());
return this;
}
@Override
public WALRecoveryMode walRecoveryMode() {
assert(isOwningHandle());
return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_));
}
@Override
public DBOptions setAllow2pc(final boolean allow2pc) {
assert(isOwningHandle());
setAllow2pc(nativeHandle_, allow2pc);
return this;
}
@Override
public boolean allow2pc() {
assert(isOwningHandle());
return allow2pc(nativeHandle_);
}
@Override
public DBOptions setRowCache(final Cache rowCache) {
assert(isOwningHandle());
setRowCache(nativeHandle_, rowCache.nativeHandle_);
this.rowCache_ = rowCache;
return this;
}
@Override
public Cache rowCache() {
assert(isOwningHandle());
return this.rowCache_;
}
@Override
public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
assert(isOwningHandle());
setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError);
return this;
}
@Override
public boolean failIfOptionsFileError() {
assert(isOwningHandle());
return failIfOptionsFileError(nativeHandle_);
}
@Override
public DBOptions setDumpMallocStats(final boolean dumpMallocStats) {
assert(isOwningHandle());
setDumpMallocStats(nativeHandle_, dumpMallocStats);
return this;
}
@Override
public boolean dumpMallocStats() {
assert(isOwningHandle());
return dumpMallocStats(nativeHandle_);
}
@Override
public DBOptions setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) {
assert(isOwningHandle());
setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery);
return this;
}
@Override
public boolean avoidFlushDuringRecovery() {
assert(isOwningHandle());
return avoidFlushDuringRecovery(nativeHandle_);
}
@Override
public DBOptions setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) {
assert(isOwningHandle());
setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown);
return this;
}
@Override
public boolean avoidFlushDuringShutdown() {
assert(isOwningHandle());
return avoidFlushDuringShutdown(nativeHandle_);
}
static final int DEFAULT_NUM_SHARD_BITS = -1;
public DBOptions setDelayedWriteRate(final long delayedWriteRate){
assert(isOwningHandle());
setDelayedWriteRate(nativeHandle_, delayedWriteRate);
return this;
}
public long delayedWriteRate(){
return delayedWriteRate(nativeHandle_);
}
/**
@ -630,12 +945,14 @@ public long delayedWriteRate(){
private native static long newDBOptions();
@Override protected final native void disposeInternal(final long handle);
private native void optimizeForSmallDb(final long handle);
private native void setIncreaseParallelism(long handle, int totalThreads);
private native void setCreateIfMissing(long handle, boolean flag);
private native boolean createIfMissing(long handle);
private native void setCreateMissingColumnFamilies(
long handle, boolean flag);
private native boolean createMissingColumnFamilies(long handle);
private native void setEnv(long handle, long envHandle);
private native void setErrorIfExists(long handle, boolean errorIfExists);
private native boolean errorIfExists(long handle);
private native void setParanoidChecks(
@ -649,6 +966,9 @@ public long delayedWriteRate(){
private native byte infoLogLevel(long handle);
private native void setMaxOpenFiles(long handle, int maxOpenFiles);
private native int maxOpenFiles(long handle);
private native void setMaxFileOpeningThreads(final long handle,
final int maxFileOpeningThreads);
private native int maxFileOpeningThreads(final long handle);
private native void setMaxTotalWalSize(long handle,
long maxTotalWalSize);
private native long maxTotalWalSize(long handle);
@ -656,6 +976,11 @@ public long delayedWriteRate(){
private native long statisticsPtr(long optHandle);
private native boolean useFsync(long handle);
private native void setUseFsync(long handle, boolean useFsync);
private native void setDbPaths(final long handle, final String[] paths,
final long[] targetSizes);
private native long dbPathsLen(final long handle);
private native void dbPaths(final long handle, final String[] paths,
final long[] targetSizes);
private native void setDbLogDir(long handle, String dbLogDir);
private native String dbLogDir(long handle);
private native void setWalDir(long handle, String walDir);
@ -683,6 +1008,8 @@ public long delayedWriteRate(){
private native void setKeepLogFileNum(long handle, long keepLogFileNum)
throws IllegalArgumentException;
private native long keepLogFileNum(long handle);
private native void setRecycleLogFileNum(long handle, long recycleLogFileNum);
private native long recycleLogFileNum(long handle);
private native void setMaxManifestFileSize(
long handle, long maxManifestFileSize);
private native long maxManifestFileSize(long handle);
@ -700,6 +1027,9 @@ public long delayedWriteRate(){
private native boolean useDirectReads(long handle);
private native void setUseDirectWrites(long handle, boolean useDirectWrites);
private native boolean useDirectWrites(long handle);
private native void setAllowFAllocate(final long handle,
final boolean allowFAllocate);
private native boolean allowFAllocate(final long handle);
private native void setAllowMmapReads(
long handle, boolean allowMmapReads);
private native boolean allowMmapReads(long handle);
@ -715,12 +1045,37 @@ public long delayedWriteRate(){
private native void setAdviseRandomOnOpen(
long handle, boolean adviseRandomOnOpen);
private native boolean adviseRandomOnOpen(long handle);
private native void setDbWriteBufferSize(final long handle,
final long dbWriteBufferSize);
private native long dbWriteBufferSize(final long handle);
private native void setAccessHintOnCompactionStart(final long handle,
final byte accessHintOnCompactionStart);
private native byte accessHintOnCompactionStart(final long handle);
private native void setNewTableReaderForCompactionInputs(final long handle,
final boolean newTableReaderForCompactionInputs);
private native boolean newTableReaderForCompactionInputs(final long handle);
private native void setCompactionReadaheadSize(final long handle,
final long compactionReadaheadSize);
private native long compactionReadaheadSize(final long handle);
private native void setRandomAccessMaxBufferSize(final long handle,
final long randomAccessMaxBufferSize);
private native long randomAccessMaxBufferSize(final long handle);
private native void setWritableFileMaxBufferSize(final long handle,
final long writableFileMaxBufferSize);
private native long writableFileMaxBufferSize(final long handle);
private native void setUseAdaptiveMutex(
long handle, boolean useAdaptiveMutex);
private native boolean useAdaptiveMutex(long handle);
private native void setBytesPerSync(
long handle, long bytesPerSync);
private native long bytesPerSync(long handle);
private native void setWalBytesPerSync(long handle, long walBytesPerSync);
private native long walBytesPerSync(long handle);
private native void setEnableThreadTracking(long handle,
boolean enableThreadTracking);
private native boolean enableThreadTracking(long handle);
private native void setDelayedWriteRate(long handle, long delayedWriteRate);
private native long delayedWriteRate(long handle);
private native void setAllowConcurrentMemtableWrite(long handle,
boolean allowConcurrentMemtableWrite);
private native boolean allowConcurrentMemtableWrite(long handle);
@ -733,10 +1088,33 @@ public long delayedWriteRate(){
private native void setWriteThreadSlowYieldUsec(long handle,
long writeThreadSlowYieldUsec);
private native long writeThreadSlowYieldUsec(long handle);
private native void setSkipStatsUpdateOnDbOpen(final long handle,
final boolean skipStatsUpdateOnDbOpen);
private native boolean skipStatsUpdateOnDbOpen(final long handle);
private native void setWalRecoveryMode(final long handle,
final byte walRecoveryMode);
private native byte walRecoveryMode(final long handle);
private native void setAllow2pc(final long handle,
final boolean allow2pc);
private native boolean allow2pc(final long handle);
private native void setRowCache(final long handle,
final long row_cache_handle);
private native void setFailIfOptionsFileError(final long handle,
final boolean failIfOptionsFileError);
private native boolean failIfOptionsFileError(final long handle);
private native void setDumpMallocStats(final long handle,
final boolean dumpMallocStats);
private native boolean dumpMallocStats(final long handle);
private native void setAvoidFlushDuringRecovery(final long handle,
final boolean avoidFlushDuringRecovery);
private native boolean avoidFlushDuringRecovery(final long handle);
private native void setAvoidFlushDuringShutdown(final long handle,
final boolean avoidFlushDuringShutdown);
private native boolean avoidFlushDuringShutdown(final long handle);
private native void setDelayedWriteRate(long handle, long delayedWriteRate);
private native long delayedWriteRate(long handle);
int numShardBits_;
RateLimiter rateLimiter_;
// instance variables
private Env env_;
private int numShardBits_;
private RateLimiter rateLimiter_;
private Cache rowCache_;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,47 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.nio.file.Path;
/**
* Tuple of database path and target size
*/
public class DbPath {
final Path path;
final long targetSize;
public DbPath(final Path path, final long targetSize) {
this.path = path;
this.targetSize = targetSize;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final DbPath dbPath = (DbPath) o;
if (targetSize != dbPath.targetSize) {
return false;
}
return path != null ? path.equals(dbPath.path) : dbPath.path == null;
}
@Override
public int hashCode() {
int result = path != null ? path.hashCode() : 0;
result = 31 * result + (int) (targetSize ^ (targetSize >>> 32));
return result;
}
}

View File

@ -14,7 +14,7 @@ public enum InfoLogLevel {
private final byte value_;
private InfoLogLevel(byte value) {
private InfoLogLevel(final byte value) {
value_ = value;
}
@ -36,8 +36,8 @@ public enum InfoLogLevel {
* @throws java.lang.IllegalArgumentException if an invalid
* value is provided.
*/
public static InfoLogLevel getInfoLogLevel(byte value) {
for (InfoLogLevel infoLogLevel : InfoLogLevel.values()) {
public static InfoLogLevel getInfoLogLevel(final byte value) {
for (final InfoLogLevel infoLogLevel : InfoLogLevel.values()) {
if (infoLogLevel.getValue() == value){
return infoLogLevel;
}

View File

@ -0,0 +1,82 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Least Recently Used Cache
*/
public class LRUCache extends Cache {
/**
* Create a new cache with a fixed size capacity
*
* @param capacity The fixed size capacity of the cache
*/
public LRUCache(final long capacity) {
this(capacity, -1, false, 0.0);
}
/**
* Create a new cache with a fixed size capacity. The cache is sharded
* to 2^numShardBits shards, by hash of the key. The total capacity
* is divided and evenly assigned to each shard.
* numShardBits = -1 means it is automatically determined: every shard
* will be at least 512KB and number of shard bits will not exceed 6.
*
* @param capacity The fixed size capacity of the cache
* @param numShardBits The cache is sharded to 2^numShardBits shards,
* by hash of the key
*/
public LRUCache(final long capacity, final int numShardBits) {
super(newLRUCache(capacity, numShardBits, false,0.0));
}
/**
* Create a new cache with a fixed size capacity. The cache is sharded
* to 2^numShardBits shards, by hash of the key. The total capacity
* is divided and evenly assigned to each shard. If strictCapacityLimit
* is set, insert to the cache will fail when cache is full.
* numShardBits = -1 means it is automatically determined: every shard
* will be at least 512KB and number of shard bits will not exceed 6.
*
* @param capacity The fixed size capacity of the cache
* @param numShardBits The cache is sharded to 2^numShardBits shards,
* by hash of the key
* @param strictCapacityLimit insert to the cache will fail when cache is full
*/
public LRUCache(final long capacity, final int numShardBits,
final boolean strictCapacityLimit) {
super(newLRUCache(capacity, numShardBits, strictCapacityLimit,0.0));
}
/**
* Create a new cache with a fixed size capacity. The cache is sharded
* to 2^numShardBits shards, by hash of the key. The total capacity
* is divided and evenly assigned to each shard. If strictCapacityLimit
* is set, insert to the cache will fail when cache is full. User can also
* set percentage of the cache reserves for high priority entries via
* highPriPoolRatio.
* numShardBits = -1 means it is automatically determined: every shard
* will be at least 512KB and number of shard bits will not exceed 6.
*
* @param capacity The fixed size capacity of the cache
* @param numShardBits The cache is sharded to 2^numShardBits shards,
* by hash of the key
* @param strictCapacityLimit insert to the cache will fail when cache is full
* @param highPriPoolRatio percentage of the cache reserves for high priority
* entries
*/
public LRUCache(final long capacity, final int numShardBits,
final boolean strictCapacityLimit, final double highPriPoolRatio) {
super(newLRUCache(capacity, numShardBits, strictCapacityLimit,
highPriPoolRatio));
}
private native static long newLRUCache(final long capacity,
final int numShardBits, final boolean strictCapacityLimit,
final double highPriPoolRatio);
@Override protected final native void disposeInternal(final long handle);
}

View File

@ -112,7 +112,8 @@ public class MutableColumnFamilyOptions {
LONG,
INT,
BOOLEAN,
INT_ARRAY
INT_ARRAY,
ENUM
}
public enum MemtableOption implements MutableColumnFamilyOptionKey {
@ -167,7 +168,9 @@ public class MutableColumnFamilyOptions {
public enum MiscOption implements MutableColumnFamilyOptionKey {
max_sequential_skip_in_iterations(ValueType.LONG),
paranoid_file_checks(ValueType.BOOLEAN);
paranoid_file_checks(ValueType.BOOLEAN),
report_bg_io_stats(ValueType.BOOLEAN),
compression_type(ValueType.ENUM);
private final ValueType valueType;
MiscOption(final ValueType valueType) {
@ -198,6 +201,7 @@ public class MutableColumnFamilyOptions {
abstract boolean asBoolean() throws IllegalStateException;
abstract int[] asIntArray() throws IllegalStateException;
abstract String asString();
abstract T asObject();
}
private static class MutableColumnFamilyOptionStringValue
@ -235,6 +239,11 @@ public class MutableColumnFamilyOptions {
String asString() {
return value;
}
@Override
String asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionDoubleValue
@ -281,6 +290,11 @@ public class MutableColumnFamilyOptions {
String asString() {
return Double.toString(value);
}
@Override
Double asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionLongValue
@ -331,6 +345,11 @@ public class MutableColumnFamilyOptions {
String asString() {
return Long.toString(value);
}
@Override
Long asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionIntValue
@ -371,6 +390,11 @@ public class MutableColumnFamilyOptions {
String asString() {
return Integer.toString(value);
}
@Override
Integer asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionBooleanValue
@ -408,6 +432,11 @@ public class MutableColumnFamilyOptions {
String asString() {
return Boolean.toString(value);
}
@Override
Boolean asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionIntArrayValue
@ -452,6 +481,54 @@ public class MutableColumnFamilyOptions {
}
return builder.toString();
}
@Override
int[] asObject() {
return value;
}
}
private static class MutableColumnFamilyOptionEnumValue<T extends Enum<T>>
extends MutableColumnFamilyOptionValue<T> {
MutableColumnFamilyOptionEnumValue(final T value) {
super(value);
}
@Override
double asDouble() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("Enum is not applicable as int");
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new NumberFormatException("Enum is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new NumberFormatException("Enum is not applicable as int[]");
}
@Override
String asString() {
return value.name();
}
@Override
T asObject() {
return value;
}
}
public static class MutableColumnFamilyOptionsBuilder
@ -583,6 +660,31 @@ public class MutableColumnFamilyOptions {
return value.asIntArray();
}
private <T extends Enum<T>> MutableColumnFamilyOptionsBuilder setEnum(
final MutableColumnFamilyOptionKey key, final T value) {
if(key.getValueType() != ValueType.ENUM) {
throw new IllegalArgumentException(
key + " does not accept a Enum value");
}
options.put(key, new MutableColumnFamilyOptionEnumValue<T>(value));
return this;
}
private <T extends Enum<T>> T getEnum(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
if(!(value instanceof MutableColumnFamilyOptionEnumValue)) {
throw new NoSuchElementException(key.name() + " is not of Enum type");
}
return ((MutableColumnFamilyOptionEnumValue<T>)value).asObject();
}
public MutableColumnFamilyOptionsBuilder fromString(final String keyStr,
final String valueStr) throws IllegalArgumentException {
Objects.requireNonNull(keyStr);
@ -715,17 +817,6 @@ public class MutableColumnFamilyOptions {
return getBoolean(CompactionOption.disable_auto_compactions);
}
@Override
public MutableColumnFamilyOptionsBuilder setSoftRateLimit(
final double softRateLimit) {
return setDouble(CompactionOption.soft_rate_limit, softRateLimit);
}
@Override
public double softRateLimit() {
return getDouble(CompactionOption.soft_rate_limit);
}
@Override
public MutableColumnFamilyOptionsBuilder setSoftPendingCompactionBytesLimit(
final long softPendingCompactionBytesLimit) {
@ -738,17 +829,6 @@ public class MutableColumnFamilyOptions {
return getLong(CompactionOption.soft_pending_compaction_bytes_limit);
}
@Override
public MutableColumnFamilyOptionsBuilder setHardRateLimit(
final double hardRateLimit) {
return setDouble(CompactionOption.hard_rate_limit, hardRateLimit);
}
@Override
public double hardRateLimit() {
return getDouble(CompactionOption.hard_rate_limit);
}
@Override
public MutableColumnFamilyOptionsBuilder setHardPendingCompactionBytesLimit(
final long hardPendingCompactionBytesLimit) {
@ -891,5 +971,27 @@ public class MutableColumnFamilyOptions {
public boolean paranoidFileChecks() {
return getBoolean(MiscOption.paranoid_file_checks);
}
@Override
public MutableColumnFamilyOptionsBuilder setCompressionType(
final CompressionType compressionType) {
return setEnum(MiscOption.compression_type, compressionType);
}
@Override
public CompressionType compressionType() {
return (CompressionType)getEnum(MiscOption.compression_type);
}
@Override
public MutableColumnFamilyOptionsBuilder setReportBgIoStats(
final boolean reportBgIoStats) {
return setBoolean(MiscOption.report_bg_io_stats, reportBgIoStats);
}
@Override
public boolean reportBgIoStats() {
return getBoolean(MiscOption.report_bg_io_stats);
}
}
}

View File

@ -5,7 +5,9 @@
package org.rocksdb;
public interface MutableColumnFamilyOptionsInterface {
public interface MutableColumnFamilyOptionsInterface
<T extends MutableColumnFamilyOptionsInterface>
extends AdvancedMutableColumnFamilyOptionsInterface<T> {
/**
* Amount of data to build up in memory (backed by an unsorted log
@ -21,7 +23,7 @@ public interface MutableColumnFamilyOptionsInterface {
*
* Default: 4MB
* @param writeBufferSize the size of write buffer.
* @return the instance of the current Object.
* @return the instance of the current object.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
@ -35,171 +37,6 @@ public interface MutableColumnFamilyOptionsInterface {
*/
long writeBufferSize();
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @param arenaBlockSize the size of an arena block
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
MutableColumnFamilyOptionsInterface setArenaBlockSize(long arenaBlockSize);
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @return the size of an arena block
*/
long arenaBlockSize();
/**
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25.
*
* Default: 0 (disable)
*
* @param memtablePrefixBloomSizeRatio The ratio
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMemtablePrefixBloomSizeRatio(
double memtablePrefixBloomSizeRatio);
/**
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25.
*
* Default: 0 (disable)
*
* @return the ratio
*/
double memtablePrefixBloomSizeRatio();
/**
* Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
* from huge page TLB but from malloc.
* Need to reserve huge pages for it to be allocated. For example:
* sysctl -w vm.nr_hugepages=20
* See linux doc Documentation/vm/hugetlbpage.txt
*
* @param memtableHugePageSize The page size of the huge
* page tlb
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMemtableHugePageSize(
long memtableHugePageSize);
/**
* Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
* from huge page TLB but from malloc.
* Need to reserve huge pages for it to be allocated. For example:
* sysctl -w vm.nr_hugepages=20
* See linux doc Documentation/vm/hugetlbpage.txt
*
* @return The page size of the huge page tlb
*/
long memtableHugePageSize();
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @param maxSuccessiveMerges the maximum number of successive merges.
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
MutableColumnFamilyOptionsInterface setMaxSuccessiveMerges(
long maxSuccessiveMerges);
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @return the maximum number of successive merges.
*/
long maxSuccessiveMerges();
/**
* The maximum number of write buffers that are built up in memory.
* The default is 2, so that when 1 write buffer is being flushed to
* storage, new writes can continue to the other write buffer.
* Default: 2
*
* @param maxWriteBufferNumber maximum number of write buffers.
* @return the instance of the current Object.
*/
MutableColumnFamilyOptionsInterface setMaxWriteBufferNumber(
int maxWriteBufferNumber);
/**
* Returns maximum number of write buffers.
*
* @return maximum number of write buffers.
* @see #setMaxWriteBufferNumber(int)
*/
int maxWriteBufferNumber();
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @param inplaceUpdateNumLocks the number of locks used for
* inplace updates.
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
MutableColumnFamilyOptionsInterface setInplaceUpdateNumLocks(
long inplaceUpdateNumLocks);
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @return the number of locks used for inplace update.
*/
long inplaceUpdateNumLocks();
/**
* Disable automatic compactions. Manual compactions can still
* be issued on this column family
@ -218,108 +55,6 @@ public interface MutableColumnFamilyOptionsInterface {
*/
boolean disableAutoCompactions();
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does
* not hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @param softRateLimit the soft-rate-limit of a compaction score
* for put delay.
* @return the reference to the current option.
*
* @deprecated Instead use {@link #setSoftPendingCompactionBytesLimit(long)}
*/
@Deprecated
MutableColumnFamilyOptionsInterface setSoftRateLimit(double softRateLimit);
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does
* not hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @return soft-rate-limit for put delay.
*
* @deprecated Instead use {@link #softPendingCompactionBytesLimit()}
*/
@Deprecated
double softRateLimit();
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
*
* Default: 64GB
*
* @param softPendingCompactionBytesLimit The soft limit to impose on
* compaction
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setSoftPendingCompactionBytesLimit(
long softPendingCompactionBytesLimit);
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
*
* Default: 64GB
*
* @return The soft limit to impose on compaction
*/
long softPendingCompactionBytesLimit();
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @param hardRateLimit the hard-rate-limit of a compaction score for put
* delay.
* @return the reference to the current option.
*
* @deprecated Instead use {@link #setHardPendingCompactionBytesLimit(long)}
*/
@Deprecated
MutableColumnFamilyOptionsInterface setHardRateLimit(double hardRateLimit);
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @return the hard-rate-limit of a compaction score for put delay.
*
* @deprecated Instead use {@link #hardPendingCompactionBytesLimit()}
*/
@Deprecated
double hardRateLimit();
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
*
* Default: 256GB
*
* @param hardPendingCompactionBytesLimit The hard limit to impose on
* compaction
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setHardPendingCompactionBytesLimit(
long hardPendingCompactionBytesLimit);
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
*
* Default: 256GB
*
* @return The hard limit to impose on compaction
*/
long hardPendingCompactionBytesLimit();
/**
* Number of files to trigger level-0 compaction. A value &lt; 0 means that
* level-0 compaction will not be triggered by number of files at all.
@ -343,44 +78,6 @@ public interface MutableColumnFamilyOptionsInterface {
*/
int level0FileNumCompactionTrigger();
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value &lt; 0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @param level0SlowdownWritesTrigger The soft limit on the number of
* level-0 files
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setLevel0SlowdownWritesTrigger(
int level0SlowdownWritesTrigger);
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value &lt; 0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @return The soft limit on the number of
* level-0 files
*/
int level0SlowdownWritesTrigger();
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @param level0StopWritesTrigger The maximum number of level-0 files
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setLevel0StopWritesTrigger(
int level0StopWritesTrigger);
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @return The maximum number of level-0 files
*/
int level0StopWritesTrigger();
/**
* We try to limit number of bytes in one compaction to be lower than this
* threshold. But it's not guaranteed.
@ -402,65 +99,6 @@ public interface MutableColumnFamilyOptionsInterface {
*/
long maxCompactionBytes();
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @param targetFileSizeBase the target size of a level-0 file.
* @return the reference to the current option.
*
* @see #setTargetFileSizeMultiplier(int)
*/
MutableColumnFamilyOptionsInterface setTargetFileSizeBase(
long targetFileSizeBase);
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @return the target size of a level-0 file.
*
* @see #targetFileSizeMultiplier()
*/
long targetFileSizeBase();
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-L file and level-(L+1) file.
* By default target_file_size_multiplier is 1, meaning
* files in different levels have the same target.
*
* @param multiplier the size ratio between a level-(L+1) file
* and level-L file.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setTargetFileSizeMultiplier(
int multiplier);
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-(L+1) file and level-L file.
* By default targetFileSizeMultiplier is 1, meaning
* files in different levels have the same target.
*
* @return the size ratio between a level-(L+1) file and level-L file.
*/
int targetFileSizeMultiplier();
/**
* The upper-bound of the total size of level-1 files in bytes.
* Maximum number of bytes for level L can be calculated as
@ -474,9 +112,10 @@ public interface MutableColumnFamilyOptionsInterface {
* @param maxBytesForLevelBase maximum bytes for level base.
*
* @return the reference to the current option.
* @see #setMaxBytesForLevelMultiplier(double)
*
* See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)}
*/
MutableColumnFamilyOptionsInterface setMaxBytesForLevelBase(
T setMaxBytesForLevelBase(
long maxBytesForLevelBase);
/**
@ -491,101 +130,30 @@ public interface MutableColumnFamilyOptionsInterface {
*
* @return the upper-bound of the total size of level-1 files
* in bytes.
* @see #maxBytesForLevelMultiplier()
*
* See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()}
*/
long maxBytesForLevelBase();
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
*
* @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L.
* @return the reference to the current option.
* @see #setMaxBytesForLevelBase(long)
*/
MutableColumnFamilyOptionsInterface setMaxBytesForLevelMultiplier(double multiplier);
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L.
* @see #maxBytesForLevelBase()
*/
double maxBytesForLevelMultiplier();
/**
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
*
* Default: 1
*
* @param maxBytesForLevelMultiplierAdditional The max-size multipliers
* for each level
* @param compressionType Compression Type.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMaxBytesForLevelMultiplierAdditional(
int[] maxBytesForLevelMultiplierAdditional);
T setCompressionType(
CompressionType compressionType);
/**
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
*
* Default: 1
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @return The max-size multipliers for each level
* @return Compression type.
*/
int[] maxBytesForLevelMultiplierAdditional();
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @param maxSequentialSkipInIterations the number of keys could
* be skipped in a iteration.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMaxSequentialSkipInIterations(
long maxSequentialSkipInIterations);
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @return the number of keys could be skipped in a iteration.
*/
long maxSequentialSkipInIterations();
/**
* After writing every SST file, reopen it and read all the keys.
*
* Default: false
*
* @param paranoidFileChecks true to enable paranoid file checks
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setParanoidFileChecks(
boolean paranoidFileChecks);
/**
* After writing every SST file, reopen it and read all the keys.
*
* Default: false
*
* @return true if paranoid file checks are enabled
*/
boolean paranoidFileChecks();
CompressionType compressionType();
}

View File

@ -5,7 +5,10 @@
package org.rocksdb;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
@ -16,11 +19,12 @@ import java.util.List;
* automaticallyand native resources will be released as part of the process.
*/
public class Options extends RocksObject
implements DBOptionsInterface, ColumnFamilyOptionsInterface,
MutableColumnFamilyOptionsInterface {
implements DBOptionsInterface<Options>, ColumnFamilyOptionsInterface<Options>,
MutableColumnFamilyOptionsInterface<Options> {
static {
RocksDB.loadLibrary();
}
/**
* Construct options for opening a RocksDB.
*
@ -68,14 +72,7 @@ public class Options extends RocksObject
return this;
}
/**
* Use the specified object to interact with the environment,
* e.g. to read/write files, schedule background work, etc.
* Default: {@link Env#getDefault()}
*
* @param env {@link Env} instance.
* @return the instance of the current Options.
*/
@Override
public Options setEnv(final Env env) {
assert(isOwningHandle());
setEnv(nativeHandle_, env.nativeHandle_);
@ -83,11 +80,7 @@ public class Options extends RocksObject
return this;
}
/**
* Returns the set RocksEnv instance.
*
* @return {@link RocksEnv} instance set in the Options.
*/
@Override
public Env getEnv() {
return env_;
}
@ -121,6 +114,12 @@ public class Options extends RocksObject
return createMissingColumnFamilies(nativeHandle_);
}
@Override
public Options optimizeForSmallDb() {
optimizeForSmallDb(nativeHandle_);
return this;
}
@Override
public Options optimizeForPointLookup(
long blockCacheSizeMb) {
@ -250,6 +249,19 @@ public class Options extends RocksObject
return maxOpenFiles(nativeHandle_);
}
@Override
public Options setMaxFileOpeningThreads(final int maxFileOpeningThreads) {
assert(isOwningHandle());
setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads);
return this;
}
@Override
public int maxFileOpeningThreads() {
assert(isOwningHandle());
return maxFileOpeningThreads(nativeHandle_);
}
@Override
public Options setMaxTotalWalSize(final long maxTotalWalSize) {
assert(isOwningHandle());
@ -283,6 +295,43 @@ public class Options extends RocksObject
return this;
}
@Override
public Options setDbPaths(final Collection<DbPath> dbPaths) {
assert(isOwningHandle());
final int len = dbPaths.size();
final String paths[] = new String[len];
final long targetSizes[] = new long[len];
int i = 0;
for(final DbPath dbPath : dbPaths) {
paths[i] = dbPath.path.toString();
targetSizes[i] = dbPath.targetSize;
i++;
}
setDbPaths(nativeHandle_, paths, targetSizes);
return this;
}
@Override
public List<DbPath> dbPaths() {
final int len = (int)dbPathsLen(nativeHandle_);
if(len == 0) {
return Collections.emptyList();
} else {
final String paths[] = new String[len];
final long targetSizes[] = new long[len];
dbPaths(nativeHandle_, paths, targetSizes);
final List<DbPath> dbPaths = new ArrayList<>();
for(int i = 0; i < len; i++) {
dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i]));
}
return dbPaths;
}
}
@Override
public String dbLogDir() {
assert(isOwningHandle());
@ -435,6 +484,20 @@ public class Options extends RocksObject
return this;
}
@Override
public Options setRecycleLogFileNum(final long recycleLogFileNum) {
assert(isOwningHandle());
setRecycleLogFileNum(nativeHandle_, recycleLogFileNum);
return this;
}
@Override
public long recycleLogFileNum() {
assert(isOwningHandle());
return recycleLogFileNum(nativeHandle_);
}
@Override
public long maxManifestFileSize() {
assert(isOwningHandle());
@ -542,6 +605,18 @@ public class Options extends RocksObject
return useDirectWrites(nativeHandle_);
}
@Override
public Options setAllowFAllocate(final boolean allowFAllocate) {
assert(isOwningHandle());
setAllowFAllocate(nativeHandle_, allowFAllocate);
return this;
}
@Override
public boolean allowFAllocate() {
assert(isOwningHandle());
return allowFAllocate(nativeHandle_);
}
@Override
public boolean allowMmapReads() {
@ -607,6 +682,86 @@ public class Options extends RocksObject
return this;
}
@Override
public Options setDbWriteBufferSize(final long dbWriteBufferSize) {
assert(isOwningHandle());
setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize);
return this;
}
@Override
public long dbWriteBufferSize() {
assert(isOwningHandle());
return dbWriteBufferSize(nativeHandle_);
}
@Override
public Options setAccessHintOnCompactionStart(final AccessHint accessHint) {
assert(isOwningHandle());
setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue());
return this;
}
@Override
public AccessHint accessHintOnCompactionStart() {
assert(isOwningHandle());
return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
}
@Override
public Options setNewTableReaderForCompactionInputs(
final boolean newTableReaderForCompactionInputs) {
assert(isOwningHandle());
setNewTableReaderForCompactionInputs(nativeHandle_,
newTableReaderForCompactionInputs);
return this;
}
@Override
public boolean newTableReaderForCompactionInputs() {
assert(isOwningHandle());
return newTableReaderForCompactionInputs(nativeHandle_);
}
@Override
public Options setCompactionReadaheadSize(final long compactionReadaheadSize) {
assert(isOwningHandle());
setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
return this;
}
@Override
public long compactionReadaheadSize() {
assert(isOwningHandle());
return compactionReadaheadSize(nativeHandle_);
}
@Override
public Options setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
assert(isOwningHandle());
setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
return this;
}
@Override
public long randomAccessMaxBufferSize() {
assert(isOwningHandle());
return randomAccessMaxBufferSize(nativeHandle_);
}
@Override
public Options setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
assert(isOwningHandle());
setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
return this;
}
@Override
public long writableFileMaxBufferSize() {
assert(isOwningHandle());
return writableFileMaxBufferSize(nativeHandle_);
}
@Override
public boolean useAdaptiveMutex() {
assert(isOwningHandle());
@ -633,10 +788,49 @@ public class Options extends RocksObject
}
@Override
public void setAllowConcurrentMemtableWrite(
public Options setWalBytesPerSync(final long walBytesPerSync) {
assert(isOwningHandle());
setWalBytesPerSync(nativeHandle_, walBytesPerSync);
return this;
}
@Override
public long walBytesPerSync() {
assert(isOwningHandle());
return walBytesPerSync(nativeHandle_);
}
@Override
public Options setEnableThreadTracking(final boolean enableThreadTracking) {
assert(isOwningHandle());
setEnableThreadTracking(nativeHandle_, enableThreadTracking);
return this;
}
@Override
public boolean enableThreadTracking() {
assert(isOwningHandle());
return enableThreadTracking(nativeHandle_);
}
@Override
public Options setDelayedWriteRate(final long delayedWriteRate) {
assert(isOwningHandle());
setDelayedWriteRate(nativeHandle_, delayedWriteRate);
return this;
}
@Override
public long delayedWriteRate(){
return delayedWriteRate(nativeHandle_);
}
@Override
public Options setAllowConcurrentMemtableWrite(
final boolean allowConcurrentMemtableWrite) {
setAllowConcurrentMemtableWrite(nativeHandle_,
allowConcurrentMemtableWrite);
return this;
}
@Override
@ -645,10 +839,11 @@ public class Options extends RocksObject
}
@Override
public void setEnableWriteThreadAdaptiveYield(
public Options setEnableWriteThreadAdaptiveYield(
final boolean enableWriteThreadAdaptiveYield) {
setEnableWriteThreadAdaptiveYield(nativeHandle_,
enableWriteThreadAdaptiveYield);
return this;
}
@Override
@ -657,8 +852,9 @@ public class Options extends RocksObject
}
@Override
public void setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
public Options setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) {
setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec);
return this;
}
@Override
@ -667,8 +863,9 @@ public class Options extends RocksObject
}
@Override
public void setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
public Options setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) {
setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec);
return this;
}
@Override
@ -676,6 +873,116 @@ public class Options extends RocksObject
return writeThreadSlowYieldUsec(nativeHandle_);
}
@Override
public Options setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) {
assert(isOwningHandle());
setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen);
return this;
}
@Override
public boolean skipStatsUpdateOnDbOpen() {
assert(isOwningHandle());
return skipStatsUpdateOnDbOpen(nativeHandle_);
}
@Override
public Options setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) {
assert(isOwningHandle());
setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue());
return this;
}
@Override
public WALRecoveryMode walRecoveryMode() {
assert(isOwningHandle());
return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_));
}
@Override
public Options setAllow2pc(final boolean allow2pc) {
assert(isOwningHandle());
setAllow2pc(nativeHandle_, allow2pc);
return this;
}
@Override
public boolean allow2pc() {
assert(isOwningHandle());
return allow2pc(nativeHandle_);
}
@Override
public Options setRowCache(final Cache rowCache) {
assert(isOwningHandle());
setRowCache(nativeHandle_, rowCache.nativeHandle_);
this.rowCache_ = rowCache;
return this;
}
@Override
public Cache rowCache() {
assert(isOwningHandle());
return this.rowCache_;
}
@Override
public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
assert(isOwningHandle());
setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError);
return this;
}
@Override
public boolean failIfOptionsFileError() {
assert(isOwningHandle());
return failIfOptionsFileError(nativeHandle_);
}
@Override
public Options setDumpMallocStats(final boolean dumpMallocStats) {
assert(isOwningHandle());
setDumpMallocStats(nativeHandle_, dumpMallocStats);
return this;
}
@Override
public boolean dumpMallocStats() {
assert(isOwningHandle());
return dumpMallocStats(nativeHandle_);
}
@Override
public Options setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) {
assert(isOwningHandle());
setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery);
return this;
}
@Override
public boolean avoidFlushDuringRecovery() {
assert(isOwningHandle());
return avoidFlushDuringRecovery(nativeHandle_);
}
@Override
public Options setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) {
assert(isOwningHandle());
setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown);
return this;
}
@Override
public boolean avoidFlushDuringShutdown() {
assert(isOwningHandle());
return avoidFlushDuringShutdown(nativeHandle_);
}
@Override
public MemTableConfig memTableConfig() {
return this.memTableConfig_;
}
@Override
public Options setMemTableConfig(final MemTableConfig config) {
memTableConfig_ = config;
@ -718,6 +1025,11 @@ public class Options extends RocksObject
return memTableFactoryName(nativeHandle_);
}
@Override
public TableFormatConfig tableFormatConfig() {
return this.tableFormatConfig_;
}
@Override
public Options setTableFormatConfig(final TableFormatConfig config) {
tableFormatConfig_ = config;
@ -747,7 +1059,7 @@ public class Options extends RocksObject
@Override
public CompressionType compressionType() {
return CompressionType.values()[compressionType(nativeHandle_)];
return CompressionType.getCompressionType(compressionType(nativeHandle_));
}
@Override
@ -780,6 +1092,34 @@ public class Options extends RocksObject
return this;
}
@Override
public Options setBottommostCompressionType(
final CompressionType bottommostCompressionType) {
setBottommostCompressionType(nativeHandle_,
bottommostCompressionType.getValue());
return this;
}
@Override
public CompressionType bottommostCompressionType() {
return CompressionType.getCompressionType(
bottommostCompressionType(nativeHandle_));
}
@Override
public Options setCompressionOptions(
final CompressionOptions compressionOptions) {
setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_);
this.compressionOptions_ = compressionOptions;
return this;
}
@Override
public CompressionOptions compressionOptions() {
return this.compressionOptions_;
}
@Override
public CompactionStyle compactionStyle() {
return CompactionStyle.values()[compactionStyle(nativeHandle_)];
@ -840,17 +1180,6 @@ public class Options extends RocksObject
return this;
}
@Override
public int maxMemCompactionLevel() {
return 0;
}
@Override
public Options setMaxMemCompactionLevel(
final int maxMemCompactionLevel) {
return this;
}
@Override
public long targetFileSizeBase() {
return targetFileSizeBase(nativeHandle_);
@ -919,41 +1248,6 @@ public class Options extends RocksObject
return this;
}
@Override
public double softRateLimit() {
return softRateLimit(nativeHandle_);
}
@Override
public Options setSoftRateLimit(final double softRateLimit) {
setSoftRateLimit(nativeHandle_, softRateLimit);
return this;
}
@Override
public double hardRateLimit() {
return hardRateLimit(nativeHandle_);
}
@Override
public Options setHardRateLimit(double hardRateLimit) {
setHardRateLimit(nativeHandle_, hardRateLimit);
return this;
}
@Override
public int rateLimitDelayMaxMilliseconds() {
return rateLimitDelayMaxMilliseconds(nativeHandle_);
}
@Override
public Options setRateLimitDelayMaxMilliseconds(
final int rateLimitDelayMaxMilliseconds) {
setRateLimitDelayMaxMilliseconds(
nativeHandle_, rateLimitDelayMaxMilliseconds);
return this;
}
@Override
public long arenaBlockSize() {
return arenaBlockSize(nativeHandle_);
@ -977,19 +1271,6 @@ public class Options extends RocksObject
return this;
}
@Override
public boolean purgeRedundantKvsWhileFlush() {
return purgeRedundantKvsWhileFlush(nativeHandle_);
}
@Override
public Options setPurgeRedundantKvsWhileFlush(
final boolean purgeRedundantKvsWhileFlush) {
setPurgeRedundantKvsWhileFlush(
nativeHandle_, purgeRedundantKvsWhileFlush);
return this;
}
@Override
public long maxSequentialSkipInIterations() {
return maxSequentialSkipInIterations(nativeHandle_);
@ -1144,7 +1425,7 @@ public class Options extends RocksObject
}
@Override
public MutableColumnFamilyOptionsInterface setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
return this;
}
@ -1176,6 +1457,81 @@ public class Options extends RocksObject
return paranoidFileChecks(nativeHandle_);
}
@Override
public Options setMaxWriteBufferNumberToMaintain(
final int maxWriteBufferNumberToMaintain) {
setMaxWriteBufferNumberToMaintain(
nativeHandle_, maxWriteBufferNumberToMaintain);
return this;
}
@Override
public int maxWriteBufferNumberToMaintain() {
return maxWriteBufferNumberToMaintain(nativeHandle_);
}
@Override
public Options setCompactionPriority(
final CompactionPriority compactionPriority) {
setCompactionPriority(nativeHandle_, compactionPriority.getValue());
return this;
}
@Override
public CompactionPriority compactionPriority() {
return CompactionPriority.getCompactionPriority(
compactionPriority(nativeHandle_));
}
@Override
public Options setReportBgIoStats(final boolean reportBgIoStats) {
setReportBgIoStats(nativeHandle_, reportBgIoStats);
return this;
}
@Override
public boolean reportBgIoStats() {
return reportBgIoStats(nativeHandle_);
}
@Override
public Options setCompactionOptionsUniversal(
final CompactionOptionsUniversal compactionOptionsUniversal) {
setCompactionOptionsUniversal(nativeHandle_,
compactionOptionsUniversal.nativeHandle_);
this.compactionOptionsUniversal_ = compactionOptionsUniversal;
return this;
}
@Override
public CompactionOptionsUniversal compactionOptionsUniversal() {
return this.compactionOptionsUniversal_;
}
@Override
public Options setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) {
setCompactionOptionsFIFO(nativeHandle_,
compactionOptionsFIFO.nativeHandle_);
this.compactionOptionsFIFO_ = compactionOptionsFIFO;
return this;
}
@Override
public CompactionOptionsFIFO compactionOptionsFIFO() {
return this.compactionOptionsFIFO_;
}
@Override
public Options setForceConsistencyChecks(final boolean forceConsistencyChecks) {
setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks);
return this;
}
@Override
public boolean forceConsistencyChecks() {
return forceConsistencyChecks(nativeHandle_);
}
private native static long newOptions();
private native static long newOptions(long dbOptHandle,
long cfOptHandle);
@ -1205,11 +1561,19 @@ public class Options extends RocksObject
private native int maxOpenFiles(long handle);
private native void setMaxTotalWalSize(long handle,
long maxTotalWalSize);
private native void setMaxFileOpeningThreads(final long handle,
final int maxFileOpeningThreads);
private native int maxFileOpeningThreads(final long handle);
private native long maxTotalWalSize(long handle);
private native void createStatistics(long optHandle);
private native long statisticsPtr(long optHandle);
private native boolean useFsync(long handle);
private native void setUseFsync(long handle, boolean useFsync);
private native void setDbPaths(final long handle, final String[] paths,
final long[] targetSizes);
private native long dbPathsLen(final long handle);
private native void dbPaths(final long handle, final String[] paths,
final long[] targetSizes);
private native void setDbLogDir(long handle, String dbLogDir);
private native String dbLogDir(long handle);
private native void setWalDir(long handle, String walDir);
@ -1237,6 +1601,8 @@ public class Options extends RocksObject
private native void setKeepLogFileNum(long handle, long keepLogFileNum)
throws IllegalArgumentException;
private native long keepLogFileNum(long handle);
private native void setRecycleLogFileNum(long handle, long recycleLogFileNum);
private native long recycleLogFileNum(long handle);
private native void setMaxManifestFileSize(
long handle, long maxManifestFileSize);
private native long maxManifestFileSize(long handle);
@ -1257,6 +1623,9 @@ public class Options extends RocksObject
private native boolean useDirectReads(long handle);
private native void setUseDirectWrites(long handle, boolean useDirectWrites);
private native boolean useDirectWrites(long handle);
private native void setAllowFAllocate(final long handle,
final boolean allowFAllocate);
private native boolean allowFAllocate(final long handle);
private native void setAllowMmapReads(
long handle, boolean allowMmapReads);
private native boolean allowMmapReads(long handle);
@ -1272,12 +1641,37 @@ public class Options extends RocksObject
private native void setAdviseRandomOnOpen(
long handle, boolean adviseRandomOnOpen);
private native boolean adviseRandomOnOpen(long handle);
private native void setDbWriteBufferSize(final long handle,
final long dbWriteBufferSize);
private native long dbWriteBufferSize(final long handle);
private native void setAccessHintOnCompactionStart(final long handle,
final byte accessHintOnCompactionStart);
private native byte accessHintOnCompactionStart(final long handle);
private native void setNewTableReaderForCompactionInputs(final long handle,
final boolean newTableReaderForCompactionInputs);
private native boolean newTableReaderForCompactionInputs(final long handle);
private native void setCompactionReadaheadSize(final long handle,
final long compactionReadaheadSize);
private native long compactionReadaheadSize(final long handle);
private native void setRandomAccessMaxBufferSize(final long handle,
final long randomAccessMaxBufferSize);
private native long randomAccessMaxBufferSize(final long handle);
private native void setWritableFileMaxBufferSize(final long handle,
final long writableFileMaxBufferSize);
private native long writableFileMaxBufferSize(final long handle);
private native void setUseAdaptiveMutex(
long handle, boolean useAdaptiveMutex);
private native boolean useAdaptiveMutex(long handle);
private native void setBytesPerSync(
long handle, long bytesPerSync);
private native long bytesPerSync(long handle);
private native void setWalBytesPerSync(long handle, long walBytesPerSync);
private native long walBytesPerSync(long handle);
private native void setEnableThreadTracking(long handle,
boolean enableThreadTracking);
private native boolean enableThreadTracking(long handle);
private native void setDelayedWriteRate(long handle, long delayedWriteRate);
private native long delayedWriteRate(long handle);
private native void setAllowConcurrentMemtableWrite(long handle,
boolean allowConcurrentMemtableWrite);
private native boolean allowConcurrentMemtableWrite(long handle);
@ -1290,7 +1684,32 @@ public class Options extends RocksObject
private native void setWriteThreadSlowYieldUsec(long handle,
long writeThreadSlowYieldUsec);
private native long writeThreadSlowYieldUsec(long handle);
private native void setSkipStatsUpdateOnDbOpen(final long handle,
final boolean skipStatsUpdateOnDbOpen);
private native boolean skipStatsUpdateOnDbOpen(final long handle);
private native void setWalRecoveryMode(final long handle,
final byte walRecoveryMode);
private native byte walRecoveryMode(final long handle);
private native void setAllow2pc(final long handle,
final boolean allow2pc);
private native boolean allow2pc(final long handle);
private native void setRowCache(final long handle,
final long row_cache_handle);
private native void setFailIfOptionsFileError(final long handle,
final boolean failIfOptionsFileError);
private native boolean failIfOptionsFileError(final long handle);
private native void setDumpMallocStats(final long handle,
final boolean dumpMallocStats);
private native boolean dumpMallocStats(final long handle);
private native void setAvoidFlushDuringRecovery(final long handle,
final boolean avoidFlushDuringRecovery);
private native boolean avoidFlushDuringRecovery(final long handle);
private native void setAvoidFlushDuringShutdown(final long handle,
final boolean avoidFlushDuringShutdown);
private native boolean avoidFlushDuringShutdown(final long handle);
// CF native handles
private native void optimizeForSmallDb(final long handle);
private native void optimizeForPointLookup(long handle,
long blockCacheSizeMb);
private native void optimizeLevelStyleCompaction(long handle,
@ -1318,6 +1737,11 @@ public class Options extends RocksObject
private native void setCompressionPerLevel(long handle,
byte[] compressionLevels);
private native byte[] compressionPerLevel(long handle);
private native void setBottommostCompressionType(long handle,
byte bottommostCompressionType);
private native byte bottommostCompressionType(long handle);
private native void setCompressionOptions(long handle,
long compressionOptionsHandle);
private native void useFixedLengthPrefixExtractor(
long handle, int prefixLength);
private native void useCappedPrefixExtractor(
@ -1351,15 +1775,6 @@ public class Options extends RocksObject
private native double maxBytesForLevelMultiplier(long handle);
private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
private native long maxCompactionBytes(long handle);
private native void setSoftRateLimit(
long handle, double softRateLimit);
private native double softRateLimit(long handle);
private native void setHardRateLimit(
long handle, double hardRateLimit);
private native double hardRateLimit(long handle);
private native void setRateLimitDelayMaxMilliseconds(
long handle, int rateLimitDelayMaxMilliseconds);
private native int rateLimitDelayMaxMilliseconds(long handle);
private native void setArenaBlockSize(
long handle, long arenaBlockSize) throws IllegalArgumentException;
private native long arenaBlockSize(long handle);
@ -1368,9 +1783,6 @@ public class Options extends RocksObject
private native boolean disableAutoCompactions(long handle);
private native void setCompactionStyle(long handle, byte compactionStyle);
private native byte compactionStyle(long handle);
private native void setPurgeRedundantKvsWhileFlush(
long handle, boolean purgeRedundantKvsWhileFlush);
private native boolean purgeRedundantKvsWhileFlush(long handle);
private native void setMaxSequentialSkipInIterations(
long handle, long maxSequentialSkipInIterations);
private native long maxSequentialSkipInIterations(long handle);
@ -1422,10 +1834,31 @@ public class Options extends RocksObject
private native void setParanoidFileChecks(long handle,
boolean paranoidFileChecks);
private native boolean paranoidFileChecks(long handle);
private native void setMaxWriteBufferNumberToMaintain(final long handle,
final int maxWriteBufferNumberToMaintain);
private native int maxWriteBufferNumberToMaintain(final long handle);
private native void setCompactionPriority(final long handle,
final byte compactionPriority);
private native byte compactionPriority(final long handle);
private native void setReportBgIoStats(final long handle,
final boolean reportBgIoStats);
private native boolean reportBgIoStats(final long handle);
private native void setCompactionOptionsUniversal(final long handle,
final long compactionOptionsUniversalHandle);
private native void setCompactionOptionsFIFO(final long handle,
final long compactionOptionsFIFOHandle);
private native void setForceConsistencyChecks(final long handle,
final boolean forceConsistencyChecks);
private native boolean forceConsistencyChecks(final long handle);
// instance variables
Env env_;
MemTableConfig memTableConfig_;
TableFormatConfig tableFormatConfig_;
RateLimiter rateLimiter_;
AbstractComparator<? extends AbstractSlice<?>> comparator_;
private Env env_;
private MemTableConfig memTableConfig_;
private TableFormatConfig tableFormatConfig_;
private RateLimiter rateLimiter_;
private AbstractComparator<? extends AbstractSlice<?>> comparator_;
private CompactionOptionsUniversal compactionOptionsUniversal_;
private CompactionOptionsFIFO compactionOptionsFIFO_;
private CompressionOptions compressionOptions_;
private Cache rowCache_;
}

View File

@ -269,6 +269,100 @@ public class ReadOptions extends RocksObject {
return this;
}
/**
* If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
* schedule a background job in the flush job queue and delete obsolete files
* in background.
*
* Default: false
*
* @return true when PurgeObsoleteFile is called in CleanupIteratorState
*/
public boolean backgroundPurgeOnIteratorCleanup() {
assert(isOwningHandle());
return backgroundPurgeOnIteratorCleanup(nativeHandle_);
}
/**
* If true, when PurgeObsoleteFile is called in CleanupIteratorState, we
* schedule a background job in the flush job queue and delete obsolete files
* in background.
*
* Default: false
*
* @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is
* called in CleanupIteratorState
* @return the reference to the current ReadOptions.
*/
public ReadOptions setBackgroundPurgeOnIteratorCleanup(
final boolean backgroundPurgeOnIteratorCleanup) {
assert(isOwningHandle());
setBackgroundPurgeOnIteratorCleanup(nativeHandle_,
backgroundPurgeOnIteratorCleanup);
return this;
}
/**
* If non-zero, NewIterator will create a new table reader which
* performs reads of the given size. Using a large size (&gt; 2MB) can
* improve the performance of forward iteration on spinning disks.
*
* Default: 0
*
* @return The readahead size is bytes
*/
public long readaheadSize() {
assert(isOwningHandle());
return readaheadSize(nativeHandle_);
}
/**
* If non-zero, NewIterator will create a new table reader which
* performs reads of the given size. Using a large size (&gt; 2MB) can
* improve the performance of forward iteration on spinning disks.
*
* Default: 0
*
* @param readaheadSize The readahead size is bytes
* @return the reference to the current ReadOptions.
*/
public ReadOptions setReadaheadSize(final long readaheadSize) {
assert(isOwningHandle());
setReadaheadSize(nativeHandle_, readaheadSize);
return this;
}
/**
* If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves
* read performance in DBs with many range deletions.
*
* Default: false
*
* @return true if keys deleted using the DeleteRange() API will be visible
*/
public boolean ignoreRangeDeletions() {
assert(isOwningHandle());
return ignoreRangeDeletions(nativeHandle_);
}
/**
* If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves
* read performance in DBs with many range deletions.
*
* Default: false
*
* @param ignoreRangeDeletions true if keys deleted using the DeleteRange()
* API should be visible
* @return the reference to the current ReadOptions.
*/
public ReadOptions setIgnoreRangeDeletions(final boolean ignoreRangeDeletions) {
assert(isOwningHandle());
setIgnoreRangeDeletions(nativeHandle_, ignoreRangeDeletions);
return this;
}
private native static long newReadOptions();
private native boolean verifyChecksums(long handle);
private native void setVerifyChecksums(long handle, boolean verifyChecksums);
@ -288,6 +382,15 @@ public class ReadOptions extends RocksObject {
private native void setPrefixSameAsStart(long handle, boolean prefixSameAsStart);
private native boolean pinData(long handle);
private native void setPinData(long handle, boolean pinData);
private native boolean backgroundPurgeOnIteratorCleanup(final long handle);
private native void setBackgroundPurgeOnIteratorCleanup(final long handle,
final boolean backgroundPurgeOnIteratorCleanup);
private native long readaheadSize(final long handle);
private native void setReadaheadSize(final long handle,
final long readaheadSize);
private native boolean ignoreRangeDeletions(final long handle);
private native void setIgnoreRangeDeletions(final long handle,
final boolean ignoreRangeDeletions);
@Override protected final native void disposeInternal(final long handle);

View File

@ -0,0 +1,83 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* The WAL Recover Mode
*/
public enum WALRecoveryMode {
/**
* Original levelDB recovery
*
* We tolerate incomplete record in trailing data on all logs
* Use case : This is legacy behavior (default)
*/
TolerateCorruptedTailRecords((byte)0x00),
/**
* Recover from clean shutdown
*
* We don't expect to find any corruption in the WAL
* Use case : This is ideal for unit tests and rare applications that
* can require high consistency guarantee
*/
AbsoluteConsistency((byte)0x01),
/**
* Recover to point-in-time consistency
* We stop the WAL playback on discovering WAL inconsistency
* Use case : Ideal for systems that have disk controller cache like
* hard disk, SSD without super capacitor that store related data
*/
PointInTimeRecovery((byte)0x02),
/**
* Recovery after a disaster
* We ignore any corruption in the WAL and try to salvage as much data as
* possible
* Use case : Ideal for last ditch effort to recover data or systems that
* operate with low grade unrelated data
*/
SkipAnyCorruptedRecords((byte)0x03);
private byte value;
WALRecoveryMode(final byte value) {
this.value = value;
}
/**
* <p>Returns the byte value of the enumerations value.</p>
*
* @return byte representation
*/
public byte getValue() {
return value;
}
/**
* <p>Get the WALRecoveryMode enumeration value by
* passing the byte identifier to this method.</p>
*
* @param byteIdentifier of WALRecoveryMode.
*
* @return CompressionType instance.
*
* @throws IllegalArgumentException If WALRecoveryMode cannot be found for the
* provided byteIdentifier
*/
public static WALRecoveryMode getWALRecoveryMode(final byte byteIdentifier) {
for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
if (walRecoveryMode.getValue() == byteIdentifier) {
return walRecoveryMode;
}
}
throw new IllegalArgumentException(
"Illegal value provided for WALRecoveryMode.");
}
}

View File

@ -92,10 +92,68 @@ public class WriteOptions extends RocksObject {
return disableWAL(nativeHandle_);
}
/**
* If true and if user is trying to write to column families that don't exist
* (they were dropped), ignore the write (don't return an error). If there
* are multiple writes in a WriteBatch, other writes will succeed.
*
* Default: false
*
* @param ignoreMissingColumnFamilies true to ignore writes to column families
* which don't exist
* @return the instance of the current WriteOptions.
*/
public WriteOptions setIgnoreMissingColumnFamilies(
final boolean ignoreMissingColumnFamilies) {
setIgnoreMissingColumnFamilies(nativeHandle_, ignoreMissingColumnFamilies);
return this;
}
/**
* If true and if user is trying to write to column families that don't exist
* (they were dropped), ignore the write (don't return an error). If there
* are multiple writes in a WriteBatch, other writes will succeed.
*
* Default: false
*
* @return true if writes to column families which don't exist are ignored
*/
public boolean ignoreMissingColumnFamilies() {
return ignoreMissingColumnFamilies(nativeHandle_);
}
/**
* If true and we need to wait or sleep for the write request, fails
* immediately with {@link Status.Code#Incomplete}.
*
* @param noSlowdown true to fail write requests if we need to wait or sleep
* @return the instance of the current WriteOptions.
*/
public WriteOptions setNoSlowdown(final boolean noSlowdown) {
setNoSlowdown(nativeHandle_, noSlowdown);
return this;
}
/**
* If true and we need to wait or sleep for the write request, fails
* immediately with {@link Status.Code#Incomplete}.
*
* @return true when write requests are failed if we need to wait or sleep
*/
public boolean noSlowdown() {
return noSlowdown(nativeHandle_);
}
private native static long newWriteOptions();
private native void setSync(long handle, boolean flag);
private native boolean sync(long handle);
private native void setDisableWAL(long handle, boolean flag);
private native boolean disableWAL(long handle);
private native void setIgnoreMissingColumnFamilies(final long handle,
final boolean ignoreMissingColumnFamilies);
private native boolean ignoreMissingColumnFamilies(final long handle);
private native void setNoSlowdown(final long handle,
final boolean noSlowdown);
private native boolean noSlowdown(final long handle);
@Override protected final native void disposeInternal(final long handle);
}

View File

@ -38,6 +38,21 @@ public class BackupableDBOptionsTest {
}
}
@Test
public void env() {
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
assertThat(backupableDBOptions.backupEnv()).
isNull();
try(final Env env = new RocksMemEnv()) {
backupableDBOptions.setBackupEnv(env);
assertThat(backupableDBOptions.backupEnv())
.isEqualTo(env);
}
}
}
@Test
public void shareTableFiles() {
try (final BackupableDBOptions backupableDBOptions =
@ -49,6 +64,27 @@ public class BackupableDBOptionsTest {
}
}
@Test
public void infoLog() {
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
assertThat(backupableDBOptions.infoLog()).
isNull();
try(final Options options = new Options();
final Logger logger = new Logger(options){
@Override
protected void log(InfoLogLevel infoLogLevel, String logMsg) {
}
}) {
backupableDBOptions.setInfoLog(logger);
assertThat(backupableDBOptions.infoLog())
.isEqualTo(logger);
}
}
}
@Test
public void sync() {
try (final BackupableDBOptions backupableDBOptions =
@ -96,6 +132,22 @@ public class BackupableDBOptionsTest {
}
}
@Test
public void backupRateLimiter() {
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
assertThat(backupableDBOptions.backupEnv()).
isNull();
try(final RateLimiter backupRateLimiter =
new RateLimiter(999)) {
backupableDBOptions.setBackupRateLimiter(backupRateLimiter);
assertThat(backupableDBOptions.backupRateLimiter())
.isEqualTo(backupRateLimiter);
}
}
}
@Test
public void restoreRateLimit() {
try (final BackupableDBOptions backupableDBOptions =
@ -111,6 +163,22 @@ public class BackupableDBOptionsTest {
}
}
@Test
public void restoreRateLimiter() {
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
assertThat(backupableDBOptions.backupEnv()).
isNull();
try(final RateLimiter restoreRateLimiter =
new RateLimiter(911)) {
backupableDBOptions.setRestoreRateLimiter(restoreRateLimiter);
assertThat(backupableDBOptions.restoreRateLimiter())
.isEqualTo(restoreRateLimiter);
}
}
}
@Test
public void shareFilesWithChecksum() {
try (final BackupableDBOptions backupableDBOptions =
@ -122,6 +190,28 @@ public class BackupableDBOptionsTest {
}
}
@Test
public void maxBackgroundOperations() {
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
final int value = rand.nextInt();
backupableDBOptions.setMaxBackgroundOperations(value);
assertThat(backupableDBOptions.maxBackgroundOperations()).
isEqualTo(value);
}
}
@Test
public void callbackTriggerIntervalSize() {
try (final BackupableDBOptions backupableDBOptions =
new BackupableDBOptions(ARBITRARY_PATH)) {
final long value = rand.nextLong();
backupableDBOptions.setCallbackTriggerIntervalSize(value);
assertThat(backupableDBOptions.callbackTriggerIntervalSize()).
isEqualTo(value);
}
}
@Test
public void failBackupDirIsNull() {
exception.expect(IllegalArgumentException.class);

View File

@ -0,0 +1,26 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
public class ClockCacheTest {
static {
RocksDB.loadLibrary();
}
@Test
public void newClockCache() {
final long capacity = 1000;
final int numShardBits = 16;
final boolean strictCapacityLimit = true;
try(final Cache clockCache = new ClockCache(capacity,
numShardBits, strictCapacityLimit)) {
//no op
}
}
}

View File

@ -198,15 +198,6 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void softRateLimit() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final double doubleValue = rand.nextDouble();
opt.setSoftRateLimit(doubleValue);
assertThat(opt.softRateLimit()).isEqualTo(doubleValue);
}
}
@Test
public void softPendingCompactionBytesLimit() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
@ -216,15 +207,6 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void hardRateLimit() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final double doubleValue = rand.nextDouble();
opt.setHardRateLimit(doubleValue);
assertThat(opt.hardRateLimit()).isEqualTo(doubleValue);
}
}
@Test
public void hardPendingCompactionBytesLimit() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
@ -261,15 +243,6 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void rateLimitDelayMaxMilliseconds() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setRateLimitDelayMaxMilliseconds(intValue);
assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue);
}
}
@Test
public void arenaBlockSize() throws RocksDBException {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
@ -288,15 +261,6 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void purgeRedundantKvsWhileFlush() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setPurgeRedundantKvsWhileFlush(boolValue);
assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue);
}
}
@Test
public void maxSequentialSkipInIterations() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
@ -393,6 +357,7 @@ public class ColumnFamilyOptionsTest {
options.optimizeLevelStyleCompaction();
options.optimizeLevelStyleCompaction(3000);
options.optimizeForPointLookup(10);
options.optimizeForSmallDb();
}
}
@ -471,6 +436,36 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void bottommostCompressionType() {
try (final ColumnFamilyOptions columnFamilyOptions
= new ColumnFamilyOptions()) {
assertThat(columnFamilyOptions.bottommostCompressionType())
.isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION);
for (final CompressionType compressionType : CompressionType.values()) {
columnFamilyOptions.setBottommostCompressionType(compressionType);
assertThat(columnFamilyOptions.bottommostCompressionType())
.isEqualTo(compressionType);
}
}
}
@Test
public void compressionOptions() {
try (final ColumnFamilyOptions columnFamilyOptions
= new ColumnFamilyOptions();
final CompressionOptions compressionOptions = new CompressionOptions()
.setMaxDictBytes(123)) {
columnFamilyOptions.setCompressionOptions(compressionOptions);
assertThat(columnFamilyOptions.compressionOptions())
.isEqualTo(compressionOptions);
assertThat(columnFamilyOptions.compressionOptions().maxDictBytes())
.isEqualTo(123);
}
}
@Test
public void compactionStyles() {
try (final ColumnFamilyOptions columnFamilyOptions
@ -498,4 +493,75 @@ public class ColumnFamilyOptionsTest {
isEqualTo(longValue);
}
}
@Test
public void maxWriteBufferNumberToMaintain() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
int intValue = rand.nextInt();
// Size has to be positive
intValue = (intValue < 0) ? -intValue : intValue;
intValue = (intValue == 0) ? intValue + 1 : intValue;
opt.setMaxWriteBufferNumberToMaintain(intValue);
assertThat(opt.maxWriteBufferNumberToMaintain()).
isEqualTo(intValue);
}
}
@Test
public void compactionPriorities() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
for (final CompactionPriority compactionPriority :
CompactionPriority.values()) {
opt.setCompactionPriority(compactionPriority);
assertThat(opt.compactionPriority()).
isEqualTo(compactionPriority);
}
}
}
@Test
public void reportBgIoStats() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean booleanValue = true;
opt.setReportBgIoStats(booleanValue);
assertThat(opt.reportBgIoStats()).
isEqualTo(booleanValue);
}
}
@Test
public void compactionOptionsUniversal() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions();
final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal()
.setCompressionSizePercent(7)) {
opt.setCompactionOptionsUniversal(optUni);
assertThat(opt.compactionOptionsUniversal()).
isEqualTo(optUni);
assertThat(opt.compactionOptionsUniversal().compressionSizePercent())
.isEqualTo(7);
}
}
@Test
public void compactionOptionsFIFO() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions();
final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO()
.setMaxTableFilesSize(2000)) {
opt.setCompactionOptionsFIFO(optFifo);
assertThat(opt.compactionOptionsFIFO()).
isEqualTo(optFifo);
assertThat(opt.compactionOptionsFIFO().maxTableFilesSize())
.isEqualTo(2000);
}
}
@Test
public void forceConsistencyChecks() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean booleanValue = true;
opt.setForceConsistencyChecks(booleanValue);
assertThat(opt.forceConsistencyChecks()).
isEqualTo(booleanValue);
}
}
}

View File

@ -0,0 +1,26 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CompactionOptionsFIFOTest {
static {
RocksDB.loadLibrary();
}
@Test
public void maxTableFilesSize() {
final long size = 500 * 1024 * 1026;
try(final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) {
opt.setMaxTableFilesSize(size);
assertThat(opt.maxTableFilesSize()).isEqualTo(size);
}
}
}

View File

@ -0,0 +1,80 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CompactionOptionsUniversalTest {
static {
RocksDB.loadLibrary();
}
@Test
public void sizeRatio() {
final int sizeRatio = 4;
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
opt.setSizeRatio(sizeRatio);
assertThat(opt.sizeRatio()).isEqualTo(sizeRatio);
}
}
@Test
public void minMergeWidth() {
final int minMergeWidth = 3;
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
opt.setMinMergeWidth(minMergeWidth);
assertThat(opt.minMergeWidth()).isEqualTo(minMergeWidth);
}
}
@Test
public void maxMergeWidth() {
final int maxMergeWidth = Integer.MAX_VALUE - 1234;
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
opt.setMaxMergeWidth(maxMergeWidth);
assertThat(opt.maxMergeWidth()).isEqualTo(maxMergeWidth);
}
}
@Test
public void maxSizeAmplificationPercent() {
final int maxSizeAmplificationPercent = 150;
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
opt.setMaxSizeAmplificationPercent(maxSizeAmplificationPercent);
assertThat(opt.maxSizeAmplificationPercent()).isEqualTo(maxSizeAmplificationPercent);
}
}
@Test
public void compressionSizePercent() {
final int compressionSizePercent = 500;
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
opt.setCompressionSizePercent(compressionSizePercent);
assertThat(opt.compressionSizePercent()).isEqualTo(compressionSizePercent);
}
}
@Test
public void stopStyle() {
final CompactionStopStyle stopStyle = CompactionStopStyle.CompactionStopStyleSimilarSize;
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
opt.setStopStyle(stopStyle);
assertThat(opt.stopStyle()).isEqualTo(stopStyle);
}
}
@Test
public void allowTrivialMove() {
final boolean allowTrivialMove = true;
try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) {
opt.setAllowTrivialMove(allowTrivialMove);
assertThat(opt.allowTrivialMove()).isEqualTo(allowTrivialMove);
}
}
}

View File

@ -0,0 +1,31 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CompactionPriorityTest {
@Test(expected = IllegalArgumentException.class)
public void failIfIllegalByteValueProvided() {
CompactionPriority.getCompactionPriority((byte) -1);
}
@Test
public void getCompactionPriority() {
assertThat(CompactionPriority.getCompactionPriority(
CompactionPriority.OldestLargestSeqFirst.getValue()))
.isEqualTo(CompactionPriority.OldestLargestSeqFirst);
}
@Test
public void valueOf() {
assertThat(CompactionPriority.valueOf("OldestSmallestSeqFirst")).
isEqualTo(CompactionPriority.OldestSmallestSeqFirst);
}
}

View File

@ -0,0 +1,31 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CompactionStopStyleTest {
@Test(expected = IllegalArgumentException.class)
public void failIfIllegalByteValueProvided() {
CompactionStopStyle.getCompactionStopStyle((byte) -1);
}
@Test
public void getCompactionStopStyle() {
assertThat(CompactionStopStyle.getCompactionStopStyle(
CompactionStopStyle.CompactionStopStyleTotalSize.getValue()))
.isEqualTo(CompactionStopStyle.CompactionStopStyleTotalSize);
}
@Test
public void valueOf() {
assertThat(CompactionStopStyle.valueOf("CompactionStopStyleSimilarSize")).
isEqualTo(CompactionStopStyle.CompactionStopStyleSimilarSize);
}
}

View File

@ -7,14 +7,47 @@ package org.rocksdb;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CompressionOptionsTest {
static {
RocksDB.loadLibrary();
}
@Test
public void getCompressionType() {
for (final CompressionType compressionType : CompressionType.values()) {
String libraryName = compressionType.getLibraryName();
compressionType.equals(CompressionType.getCompressionType(
libraryName));
public void windowBits() {
final int windowBits = 7;
try(final CompressionOptions opt = new CompressionOptions()) {
opt.setWindowBits(windowBits);
assertThat(opt.windowBits()).isEqualTo(windowBits);
}
}
@Test
public void level() {
final int level = 6;
try(final CompressionOptions opt = new CompressionOptions()) {
opt.setLevel(level);
assertThat(opt.level()).isEqualTo(level);
}
}
@Test
public void strategy() {
final int strategy = 2;
try(final CompressionOptions opt = new CompressionOptions()) {
opt.setStrategy(strategy);
assertThat(opt.strategy()).isEqualTo(strategy);
}
}
@Test
public void maxDictBytes() {
final int maxDictBytes = 999;
try(final CompressionOptions opt = new CompressionOptions()) {
opt.setMaxDictBytes(maxDictBytes);
assertThat(opt.maxDictBytes()).isEqualTo(maxDictBytes);
}
}
}

View File

@ -0,0 +1,20 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
public class CompressionTypesTest {
@Test
public void getCompressionType() {
for (final CompressionType compressionType : CompressionType.values()) {
String libraryName = compressionType.getLibraryName();
compressionType.equals(CompressionType.getCompressionType(
libraryName));
}
}
}

View File

@ -8,8 +8,8 @@ package org.rocksdb;
import org.junit.ClassRule;
import org.junit.Test;
import java.util.Properties;
import java.util.Random;
import java.nio.file.Paths;
import java.util.*;
import static org.assertj.core.api.Assertions.assertThat;
@ -63,6 +63,22 @@ public class DBOptionsTest {
}
}
@Test
public void linkageOfPrepMethods() {
try (final DBOptions opt = new DBOptions()) {
opt.optimizeForSmallDb();
}
}
@Test
public void env() {
try (final DBOptions opt = new DBOptions();
final Env env = Env.getDefault()) {
opt.setEnv(env);
assertThat(opt.getEnv()).isSameAs(env);
}
}
@Test
public void setIncreaseParallelism() {
try(final DBOptions opt = new DBOptions()) {
@ -125,6 +141,15 @@ public class DBOptionsTest {
}
}
@Test
public void maxFileOpeningThreads() {
try(final DBOptions opt = new DBOptions()) {
final int intValue = rand.nextInt();
opt.setMaxFileOpeningThreads(intValue);
assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue);
}
}
@Test
public void useFsync() {
try(final DBOptions opt = new DBOptions()) {
@ -134,6 +159,22 @@ public class DBOptionsTest {
}
}
@Test
public void dbPaths() {
final List<DbPath> dbPaths = new ArrayList<>();
dbPaths.add(new DbPath(Paths.get("/a"), 10));
dbPaths.add(new DbPath(Paths.get("/b"), 100));
dbPaths.add(new DbPath(Paths.get("/c"), 1000));
try(final DBOptions opt = new DBOptions()) {
assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList());
opt.setDbPaths(dbPaths);
assertThat(opt.dbPaths()).isEqualTo(dbPaths);
}
}
@Test
public void dbLogDir() {
try(final DBOptions opt = new DBOptions()) {
@ -226,6 +267,15 @@ public class DBOptionsTest {
}
}
@Test
public void recycleLogFileNum() throws RocksDBException {
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setRecycleLogFileNum(longValue);
assertThat(opt.recycleLogFileNum()).isEqualTo(longValue);
}
}
@Test
public void maxManifestFileSize() {
try(final DBOptions opt = new DBOptions()) {
@ -289,6 +339,15 @@ public class DBOptionsTest {
}
}
@Test
public void allowFAllocate() {
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAllowFAllocate(boolValue);
assertThat(opt.allowFAllocate()).isEqualTo(boolValue);
}
}
@Test
public void allowMmapReads() {
try(final DBOptions opt = new DBOptions()) {
@ -334,6 +393,60 @@ public class DBOptionsTest {
}
}
@Test
public void dbWriteBufferSize() {
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setDbWriteBufferSize(longValue);
assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue);
}
}
@Test
public void accessHintOnCompactionStart() {
try(final DBOptions opt = new DBOptions()) {
final AccessHint accessHint = AccessHint.SEQUENTIAL;
opt.setAccessHintOnCompactionStart(accessHint);
assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint);
}
}
@Test
public void newTableReaderForCompactionInputs() {
try(final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setNewTableReaderForCompactionInputs(boolValue);
assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
}
}
@Test
public void compactionReadaheadSize() {
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setCompactionReadaheadSize(longValue);
assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue);
}
}
@Test
public void randomAccessMaxBufferSize() {
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setRandomAccessMaxBufferSize(longValue);
assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue);
}
}
@Test
public void writableFileMaxBufferSize() {
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setWritableFileMaxBufferSize(longValue);
assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue);
}
}
@Test
public void useAdaptiveMutex() {
try(final DBOptions opt = new DBOptions()) {
@ -352,6 +465,33 @@ public class DBOptionsTest {
}
}
@Test
public void walBytesPerSync() {
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setWalBytesPerSync(longValue);
assertThat(opt.walBytesPerSync()).isEqualTo(longValue);
}
}
@Test
public void enableThreadTracking() {
try (final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setEnableThreadTracking(boolValue);
assertThat(opt.enableThreadTracking()).isEqualTo(boolValue);
}
}
@Test
public void delayedWriteRate() {
try(final DBOptions opt = new DBOptions()) {
final long longValue = rand.nextLong();
opt.setDelayedWriteRate(longValue);
assertThat(opt.delayedWriteRate()).isEqualTo(longValue);
}
}
@Test
public void allowConcurrentMemtableWrite() {
try (final DBOptions opt = new DBOptions()) {
@ -388,6 +528,87 @@ public class DBOptionsTest {
}
}
@Test
public void skipStatsUpdateOnDbOpen() {
try (final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setSkipStatsUpdateOnDbOpen(boolValue);
assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue);
}
}
@Test
public void walRecoveryMode() {
try (final DBOptions opt = new DBOptions()) {
for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
opt.setWalRecoveryMode(walRecoveryMode);
assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode);
}
}
}
@Test
public void allow2pc() {
try (final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAllow2pc(boolValue);
assertThat(opt.allow2pc()).isEqualTo(boolValue);
}
}
@Test
public void rowCache() {
try (final DBOptions opt = new DBOptions()) {
assertThat(opt.rowCache()).isNull();
try(final Cache lruCache = new LRUCache(1000)) {
opt.setRowCache(lruCache);
assertThat(opt.rowCache()).isEqualTo(lruCache);
}
try(final Cache clockCache = new ClockCache(1000)) {
opt.setRowCache(clockCache);
assertThat(opt.rowCache()).isEqualTo(clockCache);
}
}
}
@Test
public void failIfOptionsFileError() {
try (final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setFailIfOptionsFileError(boolValue);
assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue);
}
}
@Test
public void dumpMallocStats() {
try (final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setDumpMallocStats(boolValue);
assertThat(opt.dumpMallocStats()).isEqualTo(boolValue);
}
}
@Test
public void avoidFlushDuringRecovery() {
try (final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAvoidFlushDuringRecovery(boolValue);
assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue);
}
}
@Test
public void avoidFlushDuringShutdown() {
try (final DBOptions opt = new DBOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setAvoidFlushDuringShutdown(boolValue);
assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue);
}
}
@Test
public void rateLimiter() {
try(final DBOptions options = new DBOptions();

View File

@ -0,0 +1,27 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
public class LRUCacheTest {
static {
RocksDB.loadLibrary();
}
@Test
public void newLRUCache() {
final long capacity = 1000;
final int numShardBits = 16;
final boolean strictCapacityLimit = true;
final double highPriPoolRatio = 5;
try(final Cache lruCache = new LRUCache(capacity,
numShardBits, strictCapacityLimit, highPriPoolRatio)) {
//no op
}
}
}

View File

@ -5,7 +5,9 @@
package org.rocksdb;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
@ -161,15 +163,6 @@ public class OptionsTest {
}
}
@Test
public void softRateLimit() {
try (final Options opt = new Options()) {
final double doubleValue = rand.nextDouble();
opt.setSoftRateLimit(doubleValue);
assertThat(opt.softRateLimit()).isEqualTo(doubleValue);
}
}
@Test
public void softPendingCompactionBytesLimit() {
try (final Options opt = new Options()) {
@ -179,15 +172,6 @@ public class OptionsTest {
}
}
@Test
public void hardRateLimit() {
try (final Options opt = new Options()) {
final double doubleValue = rand.nextDouble();
opt.setHardRateLimit(doubleValue);
assertThat(opt.hardRateLimit()).isEqualTo(doubleValue);
}
}
@Test
public void hardPendingCompactionBytesLimit() {
try (final Options opt = new Options()) {
@ -224,15 +208,6 @@ public class OptionsTest {
}
}
@Test
public void rateLimitDelayMaxMilliseconds() {
try (final Options opt = new Options()) {
final int intValue = rand.nextInt();
opt.setRateLimitDelayMaxMilliseconds(intValue);
assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue);
}
}
@Test
public void arenaBlockSize() throws RocksDBException {
try (final Options opt = new Options()) {
@ -251,15 +226,6 @@ public class OptionsTest {
}
}
@Test
public void purgeRedundantKvsWhileFlush() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setPurgeRedundantKvsWhileFlush(boolValue);
assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue);
}
}
@Test
public void maxSequentialSkipInIterations() {
try (final Options opt = new Options()) {
@ -390,6 +356,15 @@ public class OptionsTest {
}
}
@Test
public void maxFileOpeningThreads() {
try (final Options opt = new Options()) {
final int intValue = rand.nextInt();
opt.setMaxFileOpeningThreads(intValue);
assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue);
}
}
@Test
public void useFsync() {
try (final Options opt = new Options()) {
@ -399,6 +374,22 @@ public class OptionsTest {
}
}
@Test
public void dbPaths() {
final List<DbPath> dbPaths = new ArrayList<>();
dbPaths.add(new DbPath(Paths.get("/a"), 10));
dbPaths.add(new DbPath(Paths.get("/b"), 100));
dbPaths.add(new DbPath(Paths.get("/c"), 1000));
try (final Options opt = new Options()) {
assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList());
opt.setDbPaths(dbPaths);
assertThat(opt.dbPaths()).isEqualTo(dbPaths);
}
}
@Test
public void dbLogDir() {
try (final Options opt = new Options()) {
@ -495,6 +486,15 @@ public class OptionsTest {
}
}
@Test
public void recycleLogFileNum() throws RocksDBException {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setRecycleLogFileNum(longValue);
assertThat(opt.recycleLogFileNum()).isEqualTo(longValue);
}
}
@Test
public void maxManifestFileSize() {
try (final Options opt = new Options()) {
@ -561,6 +561,15 @@ public class OptionsTest {
}
}
@Test
public void allowFAllocate() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setAllowFAllocate(boolValue);
assertThat(opt.allowFAllocate()).isEqualTo(boolValue);
}
}
@Test
public void allowMmapReads() {
try (final Options opt = new Options()) {
@ -606,6 +615,60 @@ public class OptionsTest {
}
}
@Test
public void dbWriteBufferSize() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setDbWriteBufferSize(longValue);
assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue);
}
}
@Test
public void accessHintOnCompactionStart() {
try (final Options opt = new Options()) {
final AccessHint accessHint = AccessHint.SEQUENTIAL;
opt.setAccessHintOnCompactionStart(accessHint);
assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint);
}
}
@Test
public void newTableReaderForCompactionInputs() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setNewTableReaderForCompactionInputs(boolValue);
assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
}
}
@Test
public void compactionReadaheadSize() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setCompactionReadaheadSize(longValue);
assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue);
}
}
@Test
public void randomAccessMaxBufferSize() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setRandomAccessMaxBufferSize(longValue);
assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue);
}
}
@Test
public void writableFileMaxBufferSize() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setWritableFileMaxBufferSize(longValue);
assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue);
}
}
@Test
public void useAdaptiveMutex() {
try (final Options opt = new Options()) {
@ -624,6 +687,33 @@ public class OptionsTest {
}
}
@Test
public void walBytesPerSync() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setWalBytesPerSync(longValue);
assertThat(opt.walBytesPerSync()).isEqualTo(longValue);
}
}
@Test
public void enableThreadTracking() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setEnableThreadTracking(boolValue);
assertThat(opt.enableThreadTracking()).isEqualTo(boolValue);
}
}
@Test
public void delayedWriteRate() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setDelayedWriteRate(longValue);
assertThat(opt.delayedWriteRate()).isEqualTo(longValue);
}
}
@Test
public void allowConcurrentMemtableWrite() {
try (final Options opt = new Options()) {
@ -660,6 +750,87 @@ public class OptionsTest {
}
}
@Test
public void skipStatsUpdateOnDbOpen() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setSkipStatsUpdateOnDbOpen(boolValue);
assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue);
}
}
@Test
public void walRecoveryMode() {
try (final Options opt = new Options()) {
for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
opt.setWalRecoveryMode(walRecoveryMode);
assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode);
}
}
}
@Test
public void allow2pc() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setAllow2pc(boolValue);
assertThat(opt.allow2pc()).isEqualTo(boolValue);
}
}
@Test
public void rowCache() {
try (final Options opt = new Options()) {
assertThat(opt.rowCache()).isNull();
try(final Cache lruCache = new LRUCache(1000)) {
opt.setRowCache(lruCache);
assertThat(opt.rowCache()).isEqualTo(lruCache);
}
try(final Cache clockCache = new ClockCache(1000)) {
opt.setRowCache(clockCache);
assertThat(opt.rowCache()).isEqualTo(clockCache);
}
}
}
@Test
public void failIfOptionsFileError() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setFailIfOptionsFileError(boolValue);
assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue);
}
}
@Test
public void dumpMallocStats() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setDumpMallocStats(boolValue);
assertThat(opt.dumpMallocStats()).isEqualTo(boolValue);
}
}
@Test
public void avoidFlushDuringRecovery() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setAvoidFlushDuringRecovery(boolValue);
assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue);
}
}
@Test
public void avoidFlushDuringShutdown() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setAvoidFlushDuringShutdown(boolValue);
assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue);
}
}
@Test
public void env() {
try (final Options options = new Options();
@ -677,6 +848,7 @@ public class OptionsTest {
options.optimizeLevelStyleCompaction();
options.optimizeLevelStyleCompaction(3000);
options.optimizeForPointLookup(10);
options.optimizeForSmallDb();
options.prepareForBulkLoad();
}
}
@ -738,6 +910,34 @@ public class OptionsTest {
}
}
@Test
public void bottommostCompressionType() {
try (final Options options = new Options()) {
assertThat(options.bottommostCompressionType())
.isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION);
for (final CompressionType compressionType : CompressionType.values()) {
options.setBottommostCompressionType(compressionType);
assertThat(options.bottommostCompressionType())
.isEqualTo(compressionType);
}
}
}
@Test
public void compressionOptions() {
try (final Options options = new Options();
final CompressionOptions compressionOptions = new CompressionOptions()
.setMaxDictBytes(123)) {
options.setCompressionOptions(compressionOptions);
assertThat(options.compressionOptions())
.isEqualTo(compressionOptions);
assertThat(options.compressionOptions().maxDictBytes())
.isEqualTo(123);
}
}
@Test
public void compactionStyles() {
try (final Options options = new Options()) {
@ -820,4 +1020,75 @@ public class OptionsTest {
}
}
}
@Test
public void maxWriteBufferNumberToMaintain() {
try (final Options options = new Options()) {
int intValue = rand.nextInt();
// Size has to be positive
intValue = (intValue < 0) ? -intValue : intValue;
intValue = (intValue == 0) ? intValue + 1 : intValue;
options.setMaxWriteBufferNumberToMaintain(intValue);
assertThat(options.maxWriteBufferNumberToMaintain()).
isEqualTo(intValue);
}
}
@Test
public void compactionPriorities() {
try (final Options options = new Options()) {
for (final CompactionPriority compactionPriority :
CompactionPriority.values()) {
options.setCompactionPriority(compactionPriority);
assertThat(options.compactionPriority()).
isEqualTo(compactionPriority);
}
}
}
@Test
public void reportBgIoStats() {
try (final Options options = new Options()) {
final boolean booleanValue = true;
options.setReportBgIoStats(booleanValue);
assertThat(options.reportBgIoStats()).
isEqualTo(booleanValue);
}
}
@Test
public void compactionOptionsUniversal() {
try (final Options options = new Options();
final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal()
.setCompressionSizePercent(7)) {
options.setCompactionOptionsUniversal(optUni);
assertThat(options.compactionOptionsUniversal()).
isEqualTo(optUni);
assertThat(options.compactionOptionsUniversal().compressionSizePercent())
.isEqualTo(7);
}
}
@Test
public void compactionOptionsFIFO() {
try (final Options options = new Options();
final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO()
.setMaxTableFilesSize(2000)) {
options.setCompactionOptionsFIFO(optFifo);
assertThat(options.compactionOptionsFIFO()).
isEqualTo(optFifo);
assertThat(options.compactionOptionsFIFO().maxTableFilesSize())
.isEqualTo(2000);
}
}
@Test
public void forceConsistencyChecks() {
try (final Options options = new Options()) {
final boolean booleanValue = true;
options.setForceConsistencyChecks(booleanValue);
assertThat(options.forceConsistencyChecks()).
isEqualTo(booleanValue);
}
}
}

View File

@ -101,6 +101,32 @@ public class ReadOptionsTest {
}
}
@Test
public void backgroundPurgeOnIteratorCleanup() {
try (final ReadOptions opt = new ReadOptions()) {
opt.setBackgroundPurgeOnIteratorCleanup(true);
assertThat(opt.backgroundPurgeOnIteratorCleanup()).isTrue();
}
}
@Test
public void readaheadSize() {
try (final ReadOptions opt = new ReadOptions()) {
final Random rand = new Random();
final long longValue = rand.nextLong();
opt.setReadaheadSize(longValue);
assertThat(opt.readaheadSize()).isEqualTo(longValue);
}
}
@Test
public void ignoreRangeDeletions() {
try (final ReadOptions opt = new ReadOptions()) {
opt.setIgnoreRangeDeletions(true);
assertThat(opt.ignoreRangeDeletions()).isTrue();
}
}
@Test
public void failSetVerifyChecksumUninitialized() {
try (final ReadOptions readOptions =

View File

@ -0,0 +1,22 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class WALRecoveryModeTest {
@Test
public void getWALRecoveryMode() {
for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) {
assertThat(WALRecoveryMode.getWALRecoveryMode(walRecoveryMode.getValue()))
.isEqualTo(walRecoveryMode);
}
}
}

View File

@ -19,14 +19,27 @@ public class WriteOptionsTest {
@Test
public void writeOptions() {
try (final WriteOptions writeOptions = new WriteOptions()) {
writeOptions.setDisableWAL(true);
assertThat(writeOptions.disableWAL()).isTrue();
writeOptions.setDisableWAL(false);
assertThat(writeOptions.disableWAL()).isFalse();
writeOptions.setSync(true);
assertThat(writeOptions.sync()).isTrue();
writeOptions.setSync(false);
assertThat(writeOptions.sync()).isFalse();
writeOptions.setDisableWAL(true);
assertThat(writeOptions.disableWAL()).isTrue();
writeOptions.setDisableWAL(false);
assertThat(writeOptions.disableWAL()).isFalse();
writeOptions.setIgnoreMissingColumnFamilies(true);
assertThat(writeOptions.ignoreMissingColumnFamilies()).isTrue();
writeOptions.setIgnoreMissingColumnFamilies(false);
assertThat(writeOptions.ignoreMissingColumnFamilies()).isFalse();
writeOptions.setNoSlowdown(true);
assertThat(writeOptions.noSlowdown()).isTrue();
writeOptions.setNoSlowdown(false);
assertThat(writeOptions.noSlowdown()).isFalse();
}
}
}

5
src.mk
View File

@ -332,16 +332,21 @@ JNI_NATIVE_SOURCES = \
java/rocksjni/backupenginejni.cc \
java/rocksjni/backupablejni.cc \
java/rocksjni/checkpoint.cc \
java/rocksjni/clock_cache.cc \
java/rocksjni/columnfamilyhandle.cc \
java/rocksjni/compaction_filter.cc \
java/rocksjni/compaction_options_fifo.cc \
java/rocksjni/compaction_options_universal.cc \
java/rocksjni/comparator.cc \
java/rocksjni/comparatorjnicallback.cc \
java/rocksjni/compression_options.cc \
java/rocksjni/env.cc \
java/rocksjni/env_options.cc \
java/rocksjni/external_sst_file_info.cc \
java/rocksjni/filter.cc \
java/rocksjni/iterator.cc \
java/rocksjni/loggerjnicallback.cc \
java/rocksjni/lru_cache.cc \
java/rocksjni/memtablejni.cc \
java/rocksjni/merge_operator.cc \
java/rocksjni/options.cc \