Added SetOptions support to RocksJava (#1243)

* [refactor] Split Java ColumnFamilyOptions into mutable and immutable and implement any missing immutable options

* [feature] Implement RocksDB#setOptions
This commit is contained in:
Adam Retter 2016-08-06 20:03:47 +01:00 committed by Aaron G
parent 7882cb9773
commit f4d986364e
13 changed files with 2565 additions and 458 deletions

View File

@ -80,6 +80,7 @@ JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\
org.rocksdb.MemTableTest\
org.rocksdb.MergeTest\
org.rocksdb.MixedOptionsTest\
org.rocksdb.MutableColumnFamilyOptionsTest\
org.rocksdb.NativeLibraryLoaderTest\
org.rocksdb.OptionsTest\
org.rocksdb.PlainTableConfigTest\

View File

@ -1877,6 +1877,219 @@ void Java_org_rocksdb_Options_prepareForBulkLoad(
PrepareForBulkLoad();
}
/*
* Class: org_rocksdb_Options
* Method: memtableHugePageSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_memtableHugePageSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->memtable_huge_page_size;
}
/*
* Class: org_rocksdb_Options
* Method: setMemtableHugePageSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMemtableHugePageSize(
JNIEnv* env, jobject jobj, jlong jhandle,
jlong jmemtable_huge_page_size) {
rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
jmemtable_huge_page_size);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->memtable_huge_page_size =
jmemtable_huge_page_size;
} else {
rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
}
}
/*
* Class: org_rocksdb_Options
* Method: softPendingCompactionBytesLimit
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->soft_pending_compaction_bytes_limit;
}
/*
* Class: org_rocksdb_Options
* Method: setSoftPendingCompactionBytesLimit
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setSoftPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jsoft_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->soft_pending_compaction_bytes_limit =
static_cast<int64_t>(jsoft_pending_compaction_bytes_limit);
}
/*
* Class: org_rocksdb_Options
* Method: softHardCompactionBytesLimit
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->hard_pending_compaction_bytes_limit;
}
/*
* Class: org_rocksdb_Options
* Method: setHardPendingCompactionBytesLimit
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setHardPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jhard_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->hard_pending_compaction_bytes_limit =
static_cast<int64_t>(jhard_pending_compaction_bytes_limit);
}
/*
* Class: org_rocksdb_Options
* Method: level0FileNumCompactionTrigger
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->level0_file_num_compaction_trigger;
}
/*
* Class: org_rocksdb_Options
* Method: setLevel0FileNumCompactionTrigger
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevel0FileNumCompactionTrigger(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jlevel0_file_num_compaction_trigger) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->level0_file_num_compaction_trigger =
static_cast<int32_t>(jlevel0_file_num_compaction_trigger);
}
/*
* Class: org_rocksdb_Options
* Method: level0SlowdownWritesTrigger
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->level0_slowdown_writes_trigger;
}
/*
* Class: org_rocksdb_Options
* Method: setLevel0SlowdownWritesTrigger
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevel0SlowdownWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jlevel0_slowdown_writes_trigger) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->level0_slowdown_writes_trigger =
static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
}
/*
* Class: org_rocksdb_Options
* Method: level0StopWritesTrigger
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_level0StopWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->level0_stop_writes_trigger;
}
/*
* Class: org_rocksdb_Options
* Method: setLevel0StopWritesTrigger
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevel0StopWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jlevel0_stop_writes_trigger) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->level0_stop_writes_trigger =
static_cast<int32_t>(jlevel0_stop_writes_trigger);
}
/*
* Class: org_rocksdb_Options
* Method: maxBytesForLevelMultiplierAdditional
* Signature: (J)[I
*/
jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto mbflma = reinterpret_cast<rocksdb::Options*>(
jhandle)->max_bytes_for_level_multiplier_additional;
const size_t size = mbflma.size();
jint additionals[size];
for (size_t i = 0; i < size; i++) {
additionals[i] = reinterpret_cast<jint>(mbflma[i]);
}
jsize jlen = static_cast<jsize>(size);
jintArray result = env->NewIntArray(jlen);
env->SetIntArrayRegion(result, 0, jlen, additionals);
return result;
}
/*
* Class: org_rocksdb_Options
* Method: setMaxBytesForLevelMultiplierAdditional
* Signature: (J[I)V
*/
void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional(
JNIEnv* env, jobject jobj, jlong jhandle,
jintArray jmax_bytes_for_level_multiplier_additional) {
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint *additionals =
env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0);
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->max_bytes_for_level_multiplier_additional.clear();
for (jsize i = 0; i < len; i++) {
opt->max_bytes_for_level_multiplier_additional.push_back(reinterpret_cast<int32_t>(additionals[i]));
}
}
/*
* Class: org_rocksdb_Options
* Method: paranoidFileChecks
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_paranoidFileChecks(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(
jhandle)->paranoid_file_checks;
}
/*
* Class: org_rocksdb_Options
* Method: setParanoidFileChecks
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setParanoidFileChecks(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jparanoid_file_checks) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->paranoid_file_checks =
static_cast<bool>(jparanoid_file_checks);
}
//////////////////////////////////////////////////////////////////////////////
// rocksdb::ColumnFamilyOptions
@ -2971,6 +3184,221 @@ void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits(
static_cast<bool>(joptimize_filters_for_hits);
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: memtableHugePageSize
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_memtableHugePageSize(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->memtable_huge_page_size;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setMemtableHugePageSize
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMemtableHugePageSize(
JNIEnv* env, jobject jobj, jlong jhandle,
jlong jmemtable_huge_page_size) {
rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(
jmemtable_huge_page_size);
if (s.ok()) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->memtable_huge_page_size =
jmemtable_huge_page_size;
} else {
rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s);
}
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: softPendingCompactionBytesLimit
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_softPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->soft_pending_compaction_bytes_limit;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setSoftPendingCompactionBytesLimit
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setSoftPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jsoft_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->soft_pending_compaction_bytes_limit =
static_cast<int64_t>(jsoft_pending_compaction_bytes_limit);
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: softHardCompactionBytesLimit
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_hardPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->hard_pending_compaction_bytes_limit;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setHardPendingCompactionBytesLimit
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setHardPendingCompactionBytesLimit(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jhard_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->hard_pending_compaction_bytes_limit =
static_cast<int64_t>(jhard_pending_compaction_bytes_limit);
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: level0FileNumCompactionTrigger
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_level0FileNumCompactionTrigger(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->level0_file_num_compaction_trigger;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setLevel0FileNumCompactionTrigger
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevel0FileNumCompactionTrigger(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jlevel0_file_num_compaction_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->level0_file_num_compaction_trigger =
static_cast<int32_t>(jlevel0_file_num_compaction_trigger);
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: level0SlowdownWritesTrigger
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_level0SlowdownWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->level0_slowdown_writes_trigger;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setLevel0SlowdownWritesTrigger
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevel0SlowdownWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jlevel0_slowdown_writes_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->level0_slowdown_writes_trigger =
static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: level0StopWritesTrigger
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_level0StopWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->level0_stop_writes_trigger;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setLevel0StopWritesTrigger
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevel0StopWritesTrigger(
JNIEnv* env, jobject jobj, jlong jhandle,
jint jlevel0_stop_writes_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->level0_stop_writes_trigger =
static_cast<int32_t>(jlevel0_stop_writes_trigger);
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: maxBytesForLevelMultiplierAdditional
* Signature: (J)[I
*/
jintArray Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional(
JNIEnv* env, jobject jobj, jlong jhandle) {
auto mbflma = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->max_bytes_for_level_multiplier_additional;
const size_t size = mbflma.size();
jint additionals[size];
for (size_t i = 0; i < size; i++) {
additionals[i] = reinterpret_cast<jint>(mbflma[i]);
}
jsize jlen = static_cast<jsize>(size);
jintArray result;
result = env->NewIntArray(jlen);
env->SetIntArrayRegion(result, 0, jlen, additionals);
return result;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setMaxBytesForLevelMultiplierAdditional
* Signature: (J[I)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditional(
JNIEnv* env, jobject jobj, jlong jhandle,
jintArray jmax_bytes_for_level_multiplier_additional) {
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint *additionals =
env->GetIntArrayElements(jmax_bytes_for_level_multiplier_additional, 0);
auto* cf_opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_opt->max_bytes_for_level_multiplier_additional.clear();
for (jsize i = 0; i < len; i++) {
cf_opt->max_bytes_for_level_multiplier_additional.push_back(reinterpret_cast<int32_t>(additionals[i]));
}
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: paranoidFileChecks
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_paranoidFileChecks(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->paranoid_file_checks;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setParanoidFileChecks
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jparanoid_file_checks) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
jhandle)->paranoid_file_checks =
static_cast<bool>(jparanoid_file_checks);
}
/////////////////////////////////////////////////////////////////////
// rocksdb::DBOptions

View File

@ -1587,3 +1587,35 @@ jlong Java_org_rocksdb_RocksDB_getUpdatesSince(JNIEnv* env,
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
return 0;
}
/*
* Class: org_rocksdb_RocksDB
* Method: setOptions
* Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V
*/
void Java_org_rocksdb_RocksDB_setOptions(JNIEnv* env, jobject jdb,
jlong jdb_handle, jlong jcf_handle, jobjectArray jkeys,
jobjectArray jvalues) {
std::unordered_map<std::string, std::string> options_map;
const jsize len = env->GetArrayLength(jkeys);
assert(len == env->GetArrayLength(jvalues));
for(int i = 0; i < len; i++) {
jobject jobj_key = env->GetObjectArrayElement(jkeys, i);
jobject jobj_value = env->GetObjectArrayElement(jvalues, i);
jstring jkey = reinterpret_cast<jstring>(jobj_key);
jstring jvalue = reinterpret_cast<jstring>(jobj_value);
const char* key = env->GetStringUTFChars(jkey, NULL);
const char* value = env->GetStringUTFChars(jvalue, NULL);
std::string s_key(key);
std::string s_value(value);
env->ReleaseStringUTFChars(jkey, key);
env->ReleaseStringUTFChars(jvalue, value);
env->DeleteLocalRef(jobj_key);
env->DeleteLocalRef(jobj_value);
options_map[s_key] = s_value;
}
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
db->SetOptions(cf_handle, options_map);
}

View File

@ -17,7 +17,8 @@ import java.util.Properties;
* automatically and native resources will be released as part of the process.
*/
public class ColumnFamilyOptions extends RocksObject
implements ColumnFamilyOptionsInterface {
implements ColumnFamilyOptionsInterface,
MutableColumnFamilyOptionsInterface {
static {
RocksDB.loadLibrary();
}
@ -635,6 +636,98 @@ public class ColumnFamilyOptions extends RocksObject
return optimizeFiltersForHits(nativeHandle_);
}
@Override
public ColumnFamilyOptions
setMemtableHugePageSize(
long memtableHugePageSize) {
setMemtableHugePageSize(nativeHandle_,
memtableHugePageSize);
return this;
}
@Override
public long memtableHugePageSize() {
return memtableHugePageSize(nativeHandle_);
}
@Override
public ColumnFamilyOptions setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
setSoftPendingCompactionBytesLimit(nativeHandle_,
softPendingCompactionBytesLimit);
return this;
}
@Override
public long softPendingCompactionBytesLimit() {
return softPendingCompactionBytesLimit(nativeHandle_);
}
@Override
public ColumnFamilyOptions setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
return this;
}
@Override
public long hardPendingCompactionBytesLimit() {
return hardPendingCompactionBytesLimit(nativeHandle_);
}
@Override
public ColumnFamilyOptions setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
return this;
}
@Override
public int level0FileNumCompactionTrigger() {
return level0FileNumCompactionTrigger(nativeHandle_);
}
@Override
public ColumnFamilyOptions setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
return this;
}
@Override
public int level0SlowdownWritesTrigger() {
return level0SlowdownWritesTrigger(nativeHandle_);
}
@Override
public ColumnFamilyOptions setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
return this;
}
@Override
public int level0StopWritesTrigger() {
return level0StopWritesTrigger(nativeHandle_);
}
@Override
public ColumnFamilyOptions setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
return this;
}
@Override
public int[] maxBytesForLevelMultiplierAdditional() {
return maxBytesForLevelMultiplierAdditional(nativeHandle_);
}
@Override
public ColumnFamilyOptions setParanoidFileChecks(boolean paranoidFileChecks) {
setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
return this;
}
@Override
public boolean paranoidFileChecks() {
return paranoidFileChecks(nativeHandle_);
}
/**
* <p>Private constructor to be used by
* {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}</p>
@ -776,6 +869,30 @@ public class ColumnFamilyOptions extends RocksObject
private native void setOptimizeFiltersForHits(long handle,
boolean optimizeFiltersForHits);
private native boolean optimizeFiltersForHits(long handle);
private native void setMemtableHugePageSize(long handle,
long memtableHugePageSize);
private native long memtableHugePageSize(long handle);
private native void setSoftPendingCompactionBytesLimit(long handle,
long softPendingCompactionBytesLimit);
private native long softPendingCompactionBytesLimit(long handle);
private native void setHardPendingCompactionBytesLimit(long handle,
long hardPendingCompactionBytesLimit);
private native long hardPendingCompactionBytesLimit(long handle);
private native void setLevel0FileNumCompactionTrigger(long handle,
int level0FileNumCompactionTrigger);
private native int level0FileNumCompactionTrigger(long handle);
private native void setLevel0SlowdownWritesTrigger(long handle,
int level0SlowdownWritesTrigger);
private native int level0SlowdownWritesTrigger(long handle);
private native void setLevel0StopWritesTrigger(long handle,
int level0StopWritesTrigger);
private native int level0StopWritesTrigger(long handle);
private native void setMaxBytesForLevelMultiplierAdditional(long handle,
int[] maxBytesForLevelMultiplierAdditional);
private native int[] maxBytesForLevelMultiplierAdditional(long handle);
private native void setParanoidFileChecks(long handle,
boolean paranoidFileChecks);
private native boolean paranoidFileChecks(long handle);
MemTableConfig memTableConfig_;
TableFormatConfig tableFormatConfig_;

View File

@ -16,7 +16,7 @@ public interface ColumnFamilyOptionsInterface {
* @param blockCacheSizeMb Block cache size in MB
* @return the instance of the current Object.
*/
Object optimizeForPointLookup(long blockCacheSizeMb);
ColumnFamilyOptionsInterface optimizeForPointLookup(long blockCacheSizeMb);
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
@ -31,7 +31,7 @@ public interface ColumnFamilyOptionsInterface {
*
* @return the instance of the current Object.
*/
Object optimizeLevelStyleCompaction();
ColumnFamilyOptionsInterface optimizeLevelStyleCompaction();
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
@ -139,54 +139,6 @@ public interface ColumnFamilyOptionsInterface {
*/
Object setMergeOperator(MergeOperator mergeOperator);
/**
* Amount of data to build up in memory (backed by an unsorted log
* on disk) before converting to a sorted on-disk file.
*
* Larger values increase performance, especially during bulk loads.
* Up to {@code max_write_buffer_number} write buffers may be held in memory
* at the same time, so you may wish to adjust this parameter
* to control memory usage.
*
* Also, a larger write buffer will result in a longer recovery time
* the next time the database is opened.
*
* Default: 4MB
* @param writeBufferSize the size of write buffer.
* @return the instance of the current Object.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
Object setWriteBufferSize(long writeBufferSize);
/**
* Return size of write buffer size.
*
* @return size of write buffer.
* @see #setWriteBufferSize(long)
*/
long writeBufferSize();
/**
* The maximum number of write buffers that are built up in memory.
* The default is 2, so that when 1 write buffer is being flushed to
* storage, new writes can continue to the other write buffer.
* Default: 2
*
* @param maxWriteBufferNumber maximum number of write buffers.
* @return the instance of the current Object.
*/
Object setMaxWriteBufferNumber(
int maxWriteBufferNumber);
/**
* Returns maximum number of write buffers.
*
* @return maximum number of write buffers.
* @see #setMaxWriteBufferNumber(int)
*/
int maxWriteBufferNumber();
/**
* The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then
@ -409,97 +361,6 @@ public interface ColumnFamilyOptionsInterface {
@Deprecated
int maxMemCompactionLevel();
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @param targetFileSizeBase the target size of a level-0 file.
* @return the reference to the current option.
*
* @see #setTargetFileSizeMultiplier(int)
*/
Object setTargetFileSizeBase(long targetFileSizeBase);
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @return the target size of a level-0 file.
*
* @see #targetFileSizeMultiplier()
*/
long targetFileSizeBase();
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-L file and level-(L+1) file.
* By default target_file_size_multiplier is 1, meaning
* files in different levels have the same target.
*
* @param multiplier the size ratio between a level-(L+1) file
* and level-L file.
* @return the reference to the current option.
*/
Object setTargetFileSizeMultiplier(int multiplier);
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-(L+1) file and level-L file.
* By default targetFileSizeMultiplier is 1, meaning
* files in different levels have the same target.
*
* @return the size ratio between a level-(L+1) file and level-L file.
*/
int targetFileSizeMultiplier();
/**
* The upper-bound of the total size of level-1 files in bytes.
* Maximum number of bytes for level L can be calculated as
* (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
* For example, if maxBytesForLevelBase is 20MB, and if
* max_bytes_for_level_multiplier is 10, total data size for level-1
* will be 20MB, total file size for level-2 will be 200MB,
* and total file size for level-3 will be 2GB.
* by default 'maxBytesForLevelBase' is 10MB.
*
* @param maxBytesForLevelBase maximum bytes for level base.
*
* @return the reference to the current option.
* @see #setMaxBytesForLevelMultiplier(int)
*/
Object setMaxBytesForLevelBase(
long maxBytesForLevelBase);
/**
* The upper-bound of the total size of level-1 files in bytes.
* Maximum number of bytes for level L can be calculated as
* (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
* For example, if maxBytesForLevelBase is 20MB, and if
* max_bytes_for_level_multiplier is 10, total data size for level-1
* will be 20MB, total file size for level-2 will be 200MB,
* and total file size for level-3 will be 2GB.
* by default 'maxBytesForLevelBase' is 10MB.
*
* @return the upper-bound of the total size of level-1 files
* in bytes.
* @see #maxBytesForLevelMultiplier()
*/
long maxBytesForLevelBase();
/**
* <p>If {@code true}, RocksDB will pick target size of each level
* dynamically. We will pick a base level b &gt;= 1. L0 will be
@ -586,146 +447,6 @@ public interface ColumnFamilyOptionsInterface {
*/
boolean levelCompactionDynamicLevelBytes();
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L.
* @return the reference to the current option.
* @see #setMaxBytesForLevelBase(long)
*/
Object setMaxBytesForLevelMultiplier(int multiplier);
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L.
* @see #maxBytesForLevelBase()
*/
int maxBytesForLevelMultiplier();
/**
* Maximum number of bytes in all compacted files. We avoid expanding
* the lower level file set of a compaction if it would make the
* total compaction cover more than
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
*
* @param expandedCompactionFactor the maximum number of bytes in all
* compacted files.
* @return the reference to the current option.
* @see #setSourceCompactionFactor(int)
*/
Object setExpandedCompactionFactor(int expandedCompactionFactor);
/**
* Maximum number of bytes in all compacted files. We avoid expanding
* the lower level file set of a compaction if it would make the
* total compaction cover more than
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
*
* @return the maximum number of bytes in all compacted files.
* @see #sourceCompactionFactor()
*/
int expandedCompactionFactor();
/**
* Maximum number of bytes in all source files to be compacted in a
* single compaction run. We avoid picking too many files in the
* source level so that we do not exceed the total source bytes
* for compaction to exceed
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
* Default:1, i.e. pick maxfilesize amount of data as the source of
* a compaction.
*
* @param sourceCompactionFactor the maximum number of bytes in all
* source files to be compacted in a single compaction run.
* @return the reference to the current option.
* @see #setExpandedCompactionFactor(int)
*/
Object setSourceCompactionFactor(int sourceCompactionFactor);
/**
* Maximum number of bytes in all source files to be compacted in a
* single compaction run. We avoid picking too many files in the
* source level so that we do not exceed the total source bytes
* for compaction to exceed
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
* Default:1, i.e. pick maxfilesize amount of data as the source of
* a compaction.
*
* @return the maximum number of bytes in all source files to be compactedo.
* @see #expandedCompactionFactor()
*/
int sourceCompactionFactor();
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level-&gt;level+1 compaction.
*
* @param maxGrandparentOverlapFactor maximum bytes of overlaps in
* "grandparent" level.
* @return the reference to the current option.
*/
Object setMaxGrandparentOverlapFactor(
int maxGrandparentOverlapFactor);
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level-&gt;level+1 compaction.
*
* @return maximum bytes of overlaps in "grandparent" level.
*/
int maxGrandparentOverlapFactor();
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @param softRateLimit the soft-rate-limit of a compaction score
* for put delay.
* @return the reference to the current option.
*/
Object setSoftRateLimit(double softRateLimit);
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @return soft-rate-limit for put delay.
*/
double softRateLimit();
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @param hardRateLimit the hard-rate-limit of a compaction score for put
* delay.
* @return the reference to the current option.
*/
Object setHardRateLimit(double hardRateLimit);
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @return the hard-rate-limit of a compaction score for put delay.
*/
double hardRateLimit();
/**
* The maximum time interval a put will be stalled when hard_rate_limit
* is enforced. If 0, then there is no limit.
@ -748,62 +469,6 @@ public interface ColumnFamilyOptionsInterface {
*/
int rateLimitDelayMaxMilliseconds();
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @param arenaBlockSize the size of an arena block
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
Object setArenaBlockSize(long arenaBlockSize);
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @return the size of an arena block
*/
long arenaBlockSize();
/**
* Disable automatic compactions. Manual compactions can still
* be issued on this column family
*
* @param disableAutoCompactions true if auto-compactions are disabled.
* @return the reference to the current option.
*/
Object setDisableAutoCompactions(boolean disableAutoCompactions);
/**
* Disable automatic compactions. Manual compactions can still
* be issued on this column family
*
* @return true if auto-compactions are disabled.
*/
boolean disableAutoCompactions();
/**
* Purge duplicate/deleted keys when a memtable is flushed to storage.
* Default: true
@ -860,51 +525,6 @@ public interface ColumnFamilyOptionsInterface {
*/
long maxTableFilesSizeFIFO();
/**
* If true, compaction will verify checksum on every read that happens
* as part of compaction
* Default: true
*
* @param verifyChecksumsInCompaction true if compaction verifies
* checksum on every read.
* @return the reference to the current option.
*/
Object setVerifyChecksumsInCompaction(
boolean verifyChecksumsInCompaction);
/**
* If true, compaction will verify checksum on every read that happens
* as part of compaction
* Default: true
*
* @return true if compaction verifies checksum on every read.
*/
boolean verifyChecksumsInCompaction();
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @param maxSequentialSkipInIterations the number of keys could
* be skipped in a iteration.
* @return the reference to the current option.
*/
Object setMaxSequentialSkipInIterations(long maxSequentialSkipInIterations);
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @return the number of keys could be skipped in a iteration.
*/
long maxSequentialSkipInIterations();
/**
* Set the config for mem-table.
*
@ -967,47 +587,6 @@ public interface ColumnFamilyOptionsInterface {
*/
boolean inplaceUpdateSupport();
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @param inplaceUpdateNumLocks the number of locks used for
* inplace updates.
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
Object setInplaceUpdateNumLocks(long inplaceUpdateNumLocks);
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @return the number of locks used for inplace update.
*/
long inplaceUpdateNumLocks();
/**
* Sets the size ratio of the memtable used in the prefix bloom filter.
*
* This value will be used only when a prefix-extractor is specified.
*
* @param memtablePrefixBloomSizeRatio the number of bits used in the
* prefix bloom filter.
* @return the reference to the current option.
*/
Object setMemtablePrefixBloomSizeRatio(double memtablePrefixBloomSizeRatio);
/**
* Returns the number of bits used in the prefix bloom filter.
*
* This value will be used only when a prefix-extractor is specified.
*
* @return the number of bloom-bits.
* @see #useFixedLengthPrefixExtractor(int)
*/
double memtablePrefixBloomSizeRatio();
/**
* Control locality of bloom filter probes to improve cache miss rate.
* This option only applies to memtable prefix bloom and plaintable
@ -1040,39 +619,6 @@ public interface ColumnFamilyOptionsInterface {
*/
int bloomLocality();
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @param maxSuccessiveMerges the maximum number of successive merges.
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
Object setMaxSuccessiveMerges(long maxSuccessiveMerges);
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @return the maximum number of successive merges.
*/
long maxSuccessiveMerges();
/**
* The number of partial merge operands to accumulate before partial
* merge will be performed. Partial merge will not be called

View File

@ -0,0 +1,932 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.util.*;
public class MutableColumnFamilyOptions {
private final static String KEY_VALUE_PAIR_SEPARATOR = ";";
private final static char KEY_VALUE_SEPARATOR = '=';
private final static String INT_ARRAY_INT_SEPARATOR = ",";
private final String[] keys;
private final String[] values;
// user must use builder pattern, or parser
private MutableColumnFamilyOptions(final String keys[],
final String values[]) {
this.keys = keys;
this.values = values;
}
String[] getKeys() {
return keys;
}
String[] getValues() {
return values;
}
/**
* Creates a builder which allows you
* to set MutableColumnFamilyOptions in a fluent
* manner
*
* @return A builder for MutableColumnFamilyOptions
*/
public static MutableColumnFamilyOptionsBuilder builder() {
return new MutableColumnFamilyOptionsBuilder();
}
/**
* Parses a String representation of MutableColumnFamilyOptions
*
* The format is: key1=value1;key2=value2;key3=value3 etc
*
* For int[] values, each int should be separated by a comma, e.g.
*
* key1=value1;intArrayKey1=1,2,3
*/
public static MutableColumnFamilyOptionsBuilder parse(final String str) {
Objects.requireNonNull(str);
final MutableColumnFamilyOptionsBuilder builder =
new MutableColumnFamilyOptionsBuilder();
final String options[] = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
for(final String option : options) {
final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
if(equalsOffset <= 0) {
throw new IllegalArgumentException(
"options string has an invalid key=value pair");
}
final String key = option.substring(0, equalsOffset);
if(key == null || key.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
final String value = option.substring(equalsOffset + 1);
if(value == null || value.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
builder.fromString(key, value);
}
return builder;
}
/**
* Returns a string representation
* of MutableColumnFamilyOptions which is
* suitable for consumption by {@link #parse(String)}
*
* @return String representation of MutableColumnFamilyOptions
*/
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder();
for(int i = 0; i < keys.length; i++) {
buffer
.append(keys[i])
.append(KEY_VALUE_SEPARATOR)
.append(values[i]);
if(i + 1 < keys.length) {
buffer.append(KEY_VALUE_PAIR_SEPARATOR);
}
}
return buffer.toString();
}
public enum ValueType {
DOUBLE,
LONG,
INT,
BOOLEAN,
INT_ARRAY
}
public enum MemtableOption implements MutableColumnFamilyOptionKey {
write_buffer_size(ValueType.LONG),
arena_block_size(ValueType.LONG),
memtable_prefix_bloom_size_ratio(ValueType.DOUBLE),
@Deprecated memtable_prefix_bloom_bits(ValueType.INT),
@Deprecated memtable_prefix_bloom_probes(ValueType.INT),
memtable_huge_page_size(ValueType.LONG),
max_successive_merges(ValueType.LONG),
@Deprecated filter_deletes(ValueType.BOOLEAN),
max_write_buffer_number(ValueType.INT),
inplace_update_num_locks(ValueType.LONG);
private final ValueType valueType;
MemtableOption(final ValueType valueType) {
this.valueType = valueType;
}
@Override
public ValueType getValueType() {
return valueType;
}
}
public enum CompactionOption implements MutableColumnFamilyOptionKey {
disable_auto_compactions(ValueType.BOOLEAN),
@Deprecated soft_rate_limit(ValueType.DOUBLE),
soft_pending_compaction_bytes_limit(ValueType.LONG),
@Deprecated hard_rate_limit(ValueType.DOUBLE),
hard_pending_compaction_bytes_limit(ValueType.LONG),
level0_file_num_compaction_trigger(ValueType.INT),
level0_slowdown_writes_trigger(ValueType.INT),
level0_stop_writes_trigger(ValueType.INT),
max_grandparent_overlap_factor(ValueType.INT),
expanded_compaction_factor(ValueType.INT),
source_compaction_factor(ValueType.INT),
target_file_size_base(ValueType.LONG),
target_file_size_multiplier(ValueType.INT),
max_bytes_for_level_base(ValueType.LONG),
max_bytes_for_level_multiplier(ValueType.INT),
max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY),
verify_checksums_in_compaction(ValueType.BOOLEAN);
private final ValueType valueType;
CompactionOption(final ValueType valueType) {
this.valueType = valueType;
}
@Override
public ValueType getValueType() {
return valueType;
}
}
public enum MiscOption implements MutableColumnFamilyOptionKey {
max_sequential_skip_in_iterations(ValueType.LONG),
paranoid_file_checks(ValueType.BOOLEAN);
private final ValueType valueType;
MiscOption(final ValueType valueType) {
this.valueType = valueType;
}
@Override
public ValueType getValueType() {
return valueType;
}
}
private interface MutableColumnFamilyOptionKey {
String name();
ValueType getValueType();
}
private static abstract class MutableColumnFamilyOptionValue<T> {
protected final T value;
MutableColumnFamilyOptionValue(final T value) {
this.value = value;
}
abstract double asDouble() throws NumberFormatException;
abstract long asLong() throws NumberFormatException;
abstract int asInt() throws NumberFormatException;
abstract boolean asBoolean() throws IllegalStateException;
abstract int[] asIntArray() throws IllegalStateException;
abstract String asString();
}
private static class MutableColumnFamilyOptionStringValue
extends MutableColumnFamilyOptionValue<String> {
MutableColumnFamilyOptionStringValue(final String value) {
super(value);
}
@Override
double asDouble() throws NumberFormatException {
return Double.parseDouble(value);
}
@Override
long asLong() throws NumberFormatException {
return Long.parseLong(value);
}
@Override
int asInt() throws NumberFormatException {
return Integer.parseInt(value);
}
@Override
boolean asBoolean() throws IllegalStateException {
return Boolean.parseBoolean(value);
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new IllegalStateException("String is not applicable as int[]");
}
@Override
String asString() {
return value;
}
}
private static class MutableColumnFamilyOptionDoubleValue
extends MutableColumnFamilyOptionValue<Double> {
MutableColumnFamilyOptionDoubleValue(final double value) {
super(value);
}
@Override
double asDouble() {
return value;
}
@Override
long asLong() throws NumberFormatException {
return value.longValue();
}
@Override
int asInt() throws NumberFormatException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"double value lies outside the bounds of int");
}
return value.intValue();
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException(
"double is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"double value lies outside the bounds of int");
}
return new int[] { value.intValue() };
}
@Override
String asString() {
return Double.toString(value);
}
}
private static class MutableColumnFamilyOptionLongValue
extends MutableColumnFamilyOptionValue<Long> {
MutableColumnFamilyOptionLongValue(final long value) {
super(value);
}
@Override
double asDouble() {
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return value.doubleValue();
}
@Override
long asLong() throws NumberFormatException {
return value;
}
@Override
int asInt() throws NumberFormatException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return value.intValue();
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException(
"long is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
throw new NumberFormatException(
"long value lies outside the bounds of int");
}
return new int[] { value.intValue() };
}
@Override
String asString() {
return Long.toString(value);
}
}
private static class MutableColumnFamilyOptionIntValue
extends MutableColumnFamilyOptionValue<Integer> {
MutableColumnFamilyOptionIntValue(final int value) {
super(value);
}
@Override
double asDouble() {
if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
throw new NumberFormatException("int value lies outside the bounds of int");
}
return value.doubleValue();
}
@Override
long asLong() throws NumberFormatException {
return value;
}
@Override
int asInt() throws NumberFormatException {
return value;
}
@Override
boolean asBoolean() throws IllegalStateException {
throw new IllegalStateException("int is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
return new int[] { value };
}
@Override
String asString() {
return Integer.toString(value);
}
}
private static class MutableColumnFamilyOptionBooleanValue
extends MutableColumnFamilyOptionValue<Boolean> {
MutableColumnFamilyOptionBooleanValue(final boolean value) {
super(value);
}
@Override
double asDouble() {
throw new NumberFormatException("boolean is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("boolean is not applicable as Long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("boolean is not applicable as int");
}
@Override
boolean asBoolean() {
return value;
}
@Override
int[] asIntArray() throws IllegalStateException {
throw new IllegalStateException("boolean is not applicable as int[]");
}
@Override
String asString() {
return Boolean.toString(value);
}
}
private static class MutableColumnFamilyOptionIntArrayValue
extends MutableColumnFamilyOptionValue<int[]> {
MutableColumnFamilyOptionIntArrayValue(final int[] value) {
super(value);
}
@Override
double asDouble() {
throw new NumberFormatException("int[] is not applicable as double");
}
@Override
long asLong() throws NumberFormatException {
throw new NumberFormatException("int[] is not applicable as Long");
}
@Override
int asInt() throws NumberFormatException {
throw new NumberFormatException("int[] is not applicable as int");
}
@Override
boolean asBoolean() {
throw new NumberFormatException("int[] is not applicable as boolean");
}
@Override
int[] asIntArray() throws IllegalStateException {
return value;
}
@Override
String asString() {
final StringBuilder builder = new StringBuilder();
for(int i = 0; i < value.length; i++) {
builder.append(Integer.toString(i));
if(i + 1 < value.length) {
builder.append(INT_ARRAY_INT_SEPARATOR);
}
}
return builder.toString();
}
}
public static class MutableColumnFamilyOptionsBuilder
implements MutableColumnFamilyOptionsInterface {
private final static Map<String, MutableColumnFamilyOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
static {
for(final MutableColumnFamilyOptionKey key : MemtableOption.values()) {
ALL_KEYS_LOOKUP.put(key.name(), key);
}
for(final MutableColumnFamilyOptionKey key : CompactionOption.values()) {
ALL_KEYS_LOOKUP.put(key.name(), key);
}
for(final MutableColumnFamilyOptionKey key : MiscOption.values()) {
ALL_KEYS_LOOKUP.put(key.name(), key);
}
}
private final Map<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> options = new LinkedHashMap<>();
public MutableColumnFamilyOptions build() {
final String keys[] = new String[options.size()];
final String values[] = new String[options.size()];
int i = 0;
for(final Map.Entry<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> option : options.entrySet()) {
keys[i] = option.getKey().name();
values[i] = option.getValue().asString();
i++;
}
return new MutableColumnFamilyOptions(keys, values);
}
private MutableColumnFamilyOptionsBuilder setDouble(
final MutableColumnFamilyOptionKey key, final double value) {
if(key.getValueType() != ValueType.DOUBLE) {
throw new IllegalArgumentException(
key + " does not accept a double value");
}
options.put(key, new MutableColumnFamilyOptionDoubleValue(value));
return this;
}
private double getDouble(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asDouble();
}
private MutableColumnFamilyOptionsBuilder setLong(
final MutableColumnFamilyOptionKey key, final long value) {
if(key.getValueType() != ValueType.LONG) {
throw new IllegalArgumentException(
key + " does not accept a long value");
}
options.put(key, new MutableColumnFamilyOptionLongValue(value));
return this;
}
private long getLong(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asLong();
}
private MutableColumnFamilyOptionsBuilder setInt(
final MutableColumnFamilyOptionKey key, final int value) {
if(key.getValueType() != ValueType.INT) {
throw new IllegalArgumentException(
key + " does not accept an integer value");
}
options.put(key, new MutableColumnFamilyOptionIntValue(value));
return this;
}
private int getInt(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asInt();
}
private MutableColumnFamilyOptionsBuilder setBoolean(
final MutableColumnFamilyOptionKey key, final boolean value) {
if(key.getValueType() != ValueType.BOOLEAN) {
throw new IllegalArgumentException(
key + " does not accept a boolean value");
}
options.put(key, new MutableColumnFamilyOptionBooleanValue(value));
return this;
}
private boolean getBoolean(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asBoolean();
}
private MutableColumnFamilyOptionsBuilder setIntArray(
final MutableColumnFamilyOptionKey key, final int[] value) {
if(key.getValueType() != ValueType.INT_ARRAY) {
throw new IllegalArgumentException(
key + " does not accept an int array value");
}
options.put(key, new MutableColumnFamilyOptionIntArrayValue(value));
return this;
}
private int[] getIntArray(final MutableColumnFamilyOptionKey key)
throws NoSuchElementException, NumberFormatException {
final MutableColumnFamilyOptionValue<?> value = options.get(key);
if(value == null) {
throw new NoSuchElementException(key.name() + " has not been set");
}
return value.asIntArray();
}
public MutableColumnFamilyOptionsBuilder fromString(final String keyStr,
final String valueStr) throws IllegalArgumentException {
Objects.requireNonNull(keyStr);
Objects.requireNonNull(valueStr);
final MutableColumnFamilyOptionKey key = ALL_KEYS_LOOKUP.get(keyStr);
switch(key.getValueType()) {
case DOUBLE:
return setDouble(key, Double.parseDouble(valueStr));
case LONG:
return setLong(key, Long.parseLong(valueStr));
case INT:
return setInt(key, Integer.parseInt(valueStr));
case BOOLEAN:
return setBoolean(key, Boolean.parseBoolean(valueStr));
case INT_ARRAY:
final String[] strInts = valueStr
.trim().split(INT_ARRAY_INT_SEPARATOR);
if(strInts == null || strInts.length == 0) {
throw new IllegalArgumentException(
"int array value is not correctly formatted");
}
final int value[] = new int[strInts.length];
int i = 0;
for(final String strInt : strInts) {
value[i++] = Integer.parseInt(strInt);
}
return setIntArray(key, value);
}
throw new IllegalStateException(
key + " has unknown value type: " + key.getValueType());
}
@Override
public MutableColumnFamilyOptionsBuilder setWriteBufferSize(
final long writeBufferSize) {
return setLong(MemtableOption.write_buffer_size, writeBufferSize);
}
@Override
public long writeBufferSize() {
return getLong(MemtableOption.write_buffer_size);
}
@Override
public MutableColumnFamilyOptionsBuilder setArenaBlockSize(
final long arenaBlockSize) {
return setLong(MemtableOption.arena_block_size, arenaBlockSize);
}
@Override
public long arenaBlockSize() {
return getLong(MemtableOption.arena_block_size);
}
@Override
public MutableColumnFamilyOptionsBuilder setMemtablePrefixBloomSizeRatio(
final double memtablePrefixBloomSizeRatio) {
return setDouble(MemtableOption.memtable_prefix_bloom_size_ratio,
memtablePrefixBloomSizeRatio);
}
@Override
public double memtablePrefixBloomSizeRatio() {
return getDouble(MemtableOption.memtable_prefix_bloom_size_ratio);
}
@Override
public MutableColumnFamilyOptionsBuilder setMemtableHugePageSize(
final long memtableHugePageSize) {
return setLong(MemtableOption.memtable_huge_page_size,
memtableHugePageSize);
}
@Override
public long memtableHugePageSize() {
return getLong(MemtableOption.memtable_huge_page_size);
}
@Override
public MutableColumnFamilyOptionsBuilder setMaxSuccessiveMerges(
final long maxSuccessiveMerges) {
return setLong(MemtableOption.max_successive_merges, maxSuccessiveMerges);
}
@Override
public long maxSuccessiveMerges() {
return getLong(MemtableOption.max_successive_merges);
}
@Override
public MutableColumnFamilyOptionsBuilder setMaxWriteBufferNumber(
final int maxWriteBufferNumber) {
return setInt(MemtableOption.max_write_buffer_number,
maxWriteBufferNumber);
}
@Override
public int maxWriteBufferNumber() {
return getInt(MemtableOption.max_write_buffer_number);
}
@Override
public MutableColumnFamilyOptionsBuilder setInplaceUpdateNumLocks(
final long inplaceUpdateNumLocks) {
return setLong(MemtableOption.inplace_update_num_locks,
inplaceUpdateNumLocks);
}
@Override
public long inplaceUpdateNumLocks() {
return getLong(MemtableOption.inplace_update_num_locks);
}
@Override
public MutableColumnFamilyOptionsBuilder setDisableAutoCompactions(
final boolean disableAutoCompactions) {
return setBoolean(CompactionOption.disable_auto_compactions,
disableAutoCompactions);
}
@Override
public boolean disableAutoCompactions() {
return getBoolean(CompactionOption.disable_auto_compactions);
}
@Override
public MutableColumnFamilyOptionsBuilder setSoftRateLimit(
final double softRateLimit) {
return setDouble(CompactionOption.soft_rate_limit, softRateLimit);
}
@Override
public double softRateLimit() {
return getDouble(CompactionOption.soft_rate_limit);
}
@Override
public MutableColumnFamilyOptionsBuilder setSoftPendingCompactionBytesLimit(
final long softPendingCompactionBytesLimit) {
return setLong(CompactionOption.soft_pending_compaction_bytes_limit,
softPendingCompactionBytesLimit);
}
@Override
public long softPendingCompactionBytesLimit() {
return getLong(CompactionOption.soft_pending_compaction_bytes_limit);
}
@Override
public MutableColumnFamilyOptionsBuilder setHardRateLimit(
final double hardRateLimit) {
return setDouble(CompactionOption.hard_rate_limit, hardRateLimit);
}
@Override
public double hardRateLimit() {
return getDouble(CompactionOption.hard_rate_limit);
}
@Override
public MutableColumnFamilyOptionsBuilder setHardPendingCompactionBytesLimit(
final long hardPendingCompactionBytesLimit) {
return setLong(CompactionOption.hard_pending_compaction_bytes_limit,
hardPendingCompactionBytesLimit);
}
@Override
public long hardPendingCompactionBytesLimit() {
return getLong(CompactionOption.hard_pending_compaction_bytes_limit);
}
@Override
public MutableColumnFamilyOptionsBuilder setLevel0FileNumCompactionTrigger(
final int level0FileNumCompactionTrigger) {
return setInt(CompactionOption.level0_file_num_compaction_trigger,
level0FileNumCompactionTrigger);
}
@Override
public int level0FileNumCompactionTrigger() {
return getInt(CompactionOption.level0_file_num_compaction_trigger);
}
@Override
public MutableColumnFamilyOptionsBuilder setLevel0SlowdownWritesTrigger(
final int level0SlowdownWritesTrigger) {
return setInt(CompactionOption.level0_slowdown_writes_trigger,
level0SlowdownWritesTrigger);
}
@Override
public int level0SlowdownWritesTrigger() {
return getInt(CompactionOption.level0_slowdown_writes_trigger);
}
@Override
public MutableColumnFamilyOptionsBuilder setLevel0StopWritesTrigger(
final int level0StopWritesTrigger) {
return setInt(CompactionOption.level0_stop_writes_trigger,
level0StopWritesTrigger);
}
@Override
public int level0StopWritesTrigger() {
return getInt(CompactionOption.level0_stop_writes_trigger);
}
@Override
public MutableColumnFamilyOptionsBuilder setMaxGrandparentOverlapFactor(
final int maxGrandparentOverlapFactor) {
return setInt(CompactionOption.max_grandparent_overlap_factor,
maxGrandparentOverlapFactor);
}
@Override
public int maxGrandparentOverlapFactor() {
return getInt(CompactionOption.max_grandparent_overlap_factor);
}
@Override
public MutableColumnFamilyOptionsBuilder setExpandedCompactionFactor(
final int expandedCompactionFactor) {
return setInt(CompactionOption.expanded_compaction_factor,
expandedCompactionFactor);
}
@Override
public int expandedCompactionFactor() {
return getInt(CompactionOption.expanded_compaction_factor);
}
@Override
public MutableColumnFamilyOptionsBuilder setSourceCompactionFactor(
final int sourceCompactionFactor) {
return setInt(CompactionOption.source_compaction_factor,
sourceCompactionFactor);
}
@Override
public int sourceCompactionFactor() {
return getInt(CompactionOption.source_compaction_factor);
}
@Override
public MutableColumnFamilyOptionsBuilder setTargetFileSizeBase(
final long targetFileSizeBase) {
return setLong(CompactionOption.target_file_size_base,
targetFileSizeBase);
}
@Override
public long targetFileSizeBase() {
return getLong(CompactionOption.target_file_size_base);
}
@Override
public MutableColumnFamilyOptionsBuilder setTargetFileSizeMultiplier(
final int targetFileSizeMultiplier) {
return setInt(CompactionOption.target_file_size_multiplier,
targetFileSizeMultiplier);
}
@Override
public int targetFileSizeMultiplier() {
return getInt(CompactionOption.target_file_size_multiplier);
}
@Override
public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelBase(
final long maxBytesForLevelBase) {
return setLong(CompactionOption.max_bytes_for_level_base,
maxBytesForLevelBase);
}
@Override
public long maxBytesForLevelBase() {
return getLong(CompactionOption.max_bytes_for_level_base);
}
@Override
public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplier(
final int maxBytesForLevelMultiplier) {
return setInt(CompactionOption.max_bytes_for_level_multiplier,
maxBytesForLevelMultiplier);
}
@Override
public int maxBytesForLevelMultiplier() {
return getInt(CompactionOption.max_bytes_for_level_multiplier);
}
@Override
public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplierAdditional(
final int[] maxBytesForLevelMultiplierAdditional) {
return setIntArray(
CompactionOption.max_bytes_for_level_multiplier_additional,
maxBytesForLevelMultiplierAdditional);
}
@Override
public int[] maxBytesForLevelMultiplierAdditional() {
return getIntArray(
CompactionOption.max_bytes_for_level_multiplier_additional);
}
@Override
public MutableColumnFamilyOptionsBuilder setVerifyChecksumsInCompaction(
final boolean verifyChecksumsInCompaction) {
return setBoolean(CompactionOption.verify_checksums_in_compaction,
verifyChecksumsInCompaction);
}
@Override
public boolean verifyChecksumsInCompaction() {
return getBoolean(CompactionOption.verify_checksums_in_compaction);
}
@Override
public MutableColumnFamilyOptionsBuilder setMaxSequentialSkipInIterations(
final long maxSequentialSkipInIterations) {
return setLong(MiscOption.max_sequential_skip_in_iterations,
maxSequentialSkipInIterations);
}
@Override
public long maxSequentialSkipInIterations() {
return getLong(MiscOption.max_sequential_skip_in_iterations);
}
@Override
public MutableColumnFamilyOptionsBuilder setParanoidFileChecks(
final boolean paranoidFileChecks) {
return setBoolean(MiscOption.paranoid_file_checks, paranoidFileChecks);
}
@Override
public boolean paranoidFileChecks() {
return getBoolean(MiscOption.paranoid_file_checks);
}
}
}

View File

@ -0,0 +1,666 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public interface MutableColumnFamilyOptionsInterface {
/**
* Amount of data to build up in memory (backed by an unsorted log
* on disk) before converting to a sorted on-disk file.
*
* Larger values increase performance, especially during bulk loads.
* Up to {@code max_write_buffer_number} write buffers may be held in memory
* at the same time, so you may wish to adjust this parameter
* to control memory usage.
*
* Also, a larger write buffer will result in a longer recovery time
* the next time the database is opened.
*
* Default: 4MB
* @param writeBufferSize the size of write buffer.
* @return the instance of the current Object.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
MutableColumnFamilyOptionsInterface setWriteBufferSize(long writeBufferSize);
/**
* Return size of write buffer size.
*
* @return size of write buffer.
* @see #setWriteBufferSize(long)
*/
long writeBufferSize();
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @param arenaBlockSize the size of an arena block
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
MutableColumnFamilyOptionsInterface setArenaBlockSize(long arenaBlockSize);
/**
* The size of one block in arena memory allocation.
* If &le; 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additional restriction of the The specified size:
* (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @return the size of an arena block
*/
long arenaBlockSize();
/**
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25.
*
* Default: 0 (disable)
*
* @param memtablePrefixBloomSizeRatio The ratio
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMemtablePrefixBloomSizeRatio(
double memtablePrefixBloomSizeRatio);
/**
* if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
* create prefix bloom for memtable with the size of
* write_buffer_size * memtable_prefix_bloom_size_ratio.
* If it is larger than 0.25, it is santinized to 0.25.
*
* Default: 0 (disable)
*
* @return the ratio
*/
double memtablePrefixBloomSizeRatio();
/**
* Page size for huge page TLB for bloom in memtable. If <=0, not allocate
* from huge page TLB but from malloc.
* Need to reserve huge pages for it to be allocated. For example:
* sysctl -w vm.nr_hugepages=20
* See linux doc Documentation/vm/hugetlbpage.txt
*
* @param memtableHugePageSize The page size of the huge
* page tlb
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMemtableHugePageSize(
long memtableHugePageSize);
/**
* Page size for huge page TLB for bloom in memtable. If <=0, not allocate
* from huge page TLB but from malloc.
* Need to reserve huge pages for it to be allocated. For example:
* sysctl -w vm.nr_hugepages=20
* See linux doc Documentation/vm/hugetlbpage.txt
*
* @return The page size of the huge page tlb
*/
long memtableHugePageSize();
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @param maxSuccessiveMerges the maximum number of successive merges.
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
MutableColumnFamilyOptionsInterface setMaxSuccessiveMerges(
long maxSuccessiveMerges);
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @return the maximum number of successive merges.
*/
long maxSuccessiveMerges();
/**
* The maximum number of write buffers that are built up in memory.
* The default is 2, so that when 1 write buffer is being flushed to
* storage, new writes can continue to the other write buffer.
* Default: 2
*
* @param maxWriteBufferNumber maximum number of write buffers.
* @return the instance of the current Object.
*/
MutableColumnFamilyOptionsInterface setMaxWriteBufferNumber(
int maxWriteBufferNumber);
/**
* Returns maximum number of write buffers.
*
* @return maximum number of write buffers.
* @see #setMaxWriteBufferNumber(int)
*/
int maxWriteBufferNumber();
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @param inplaceUpdateNumLocks the number of locks used for
* inplace updates.
* @return the reference to the current option.
* @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
* while overflowing the underlying platform specific value.
*/
MutableColumnFamilyOptionsInterface setInplaceUpdateNumLocks(
long inplaceUpdateNumLocks);
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @return the number of locks used for inplace update.
*/
long inplaceUpdateNumLocks();
/**
* Disable automatic compactions. Manual compactions can still
* be issued on this column family
*
* @param disableAutoCompactions true if auto-compactions are disabled.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setDisableAutoCompactions(
boolean disableAutoCompactions);
/**
* Disable automatic compactions. Manual compactions can still
* be issued on this column family
*
* @return true if auto-compactions are disabled.
*/
boolean disableAutoCompactions();
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does
* not hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @param softRateLimit the soft-rate-limit of a compaction score
* for put delay.
* @return the reference to the current option.
*
* @deprecated Instead use {@link #setSoftPendingCompactionBytesLimit(long)}
*/
@Deprecated
MutableColumnFamilyOptionsInterface setSoftRateLimit(double softRateLimit);
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit &le; hard_rate_limit. If this constraint does
* not hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @return soft-rate-limit for put delay.
*
* @deprecated Instead use {@link #softPendingCompactionBytesLimit()}
*/
@Deprecated
double softRateLimit();
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
*
* Default: 64GB
*
* @param softPendingCompactionBytesLimit The soft limit to impose on
* compaction
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setSoftPendingCompactionBytesLimit(
long softPendingCompactionBytesLimit);
/**
* All writes will be slowed down to at least delayed_write_rate if estimated
* bytes needed to be compaction exceed this threshold.
*
* Default: 64GB
*
* @return The soft limit to impose on compaction
*/
long softPendingCompactionBytesLimit();
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @param hardRateLimit the hard-rate-limit of a compaction score for put
* delay.
* @return the reference to the current option.
*
* @deprecated Instead use {@link #setHardPendingCompactionBytesLimit(long)}
*/
@Deprecated
MutableColumnFamilyOptionsInterface setHardRateLimit(double hardRateLimit);
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when &le; 1.0.
* Default: 0 (disabled)
*
* @return the hard-rate-limit of a compaction score for put delay.
*
* @deprecated Instead use {@link #hardPendingCompactionBytesLimit()}
*/
@Deprecated
double hardRateLimit();
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
*
* Default: 256GB
*
* @param hardPendingCompactionBytesLimit The hard limit to impose on
* compaction
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setHardPendingCompactionBytesLimit(
long hardPendingCompactionBytesLimit);
/**
* All writes are stopped if estimated bytes needed to be compaction exceed
* this threshold.
*
* Default: 256GB
*
* @return The hard limit to impose on compaction
*/
long hardPendingCompactionBytesLimit();
/**
* Number of files to trigger level-0 compaction. A value <0 means that
* level-0 compaction will not be triggered by number of files at all.
*
* Default: 4
*
* @param level0FileNumCompactionTrigger The number of files to trigger
* level-0 compaction
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setLevel0FileNumCompactionTrigger(
int level0FileNumCompactionTrigger);
/**
* Number of files to trigger level-0 compaction. A value <0 means that
* level-0 compaction will not be triggered by number of files at all.
*
* Default: 4
*
* @return The number of files to trigger
*/
int level0FileNumCompactionTrigger();
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value <0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @param level0SlowdownWritesTrigger The soft limit on the number of
* level-0 files
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setLevel0SlowdownWritesTrigger(
int level0SlowdownWritesTrigger);
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value <0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @return The soft limit on the number of
* level-0 files
*/
int level0SlowdownWritesTrigger();
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @param level0StopWritesTrigger The maximum number of level-0 files
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setLevel0StopWritesTrigger(
int level0StopWritesTrigger);
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @return The maximum number of level-0 files
*/
int level0StopWritesTrigger();
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level-&gt;level+1 compaction.
*
* @param maxGrandparentOverlapFactor maximum bytes of overlaps in
* "grandparent" level.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMaxGrandparentOverlapFactor(
int maxGrandparentOverlapFactor);
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level-&gt;level+1 compaction.
*
* @return maximum bytes of overlaps in "grandparent" level.
*/
int maxGrandparentOverlapFactor();
/**
* Maximum number of bytes in all compacted files. We avoid expanding
* the lower level file set of a compaction if it would make the
* total compaction cover more than
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
*
* @param expandedCompactionFactor the maximum number of bytes in all
* compacted files.
* @return the reference to the current option.
* @see #setSourceCompactionFactor(int)
*/
MutableColumnFamilyOptionsInterface setExpandedCompactionFactor(
int expandedCompactionFactor);
/**
* Maximum number of bytes in all compacted files. We avoid expanding
* the lower level file set of a compaction if it would make the
* total compaction cover more than
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
*
* @return the maximum number of bytes in all compacted files.
* @see #sourceCompactionFactor()
*/
int expandedCompactionFactor();
/**
* Maximum number of bytes in all source files to be compacted in a
* single compaction run. We avoid picking too many files in the
* source level so that we do not exceed the total source bytes
* for compaction to exceed
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
* Default:1, i.e. pick maxfilesize amount of data as the source of
* a compaction.
*
* @param sourceCompactionFactor the maximum number of bytes in all
* source files to be compacted in a single compaction run.
* @return the reference to the current option.
* @see #setExpandedCompactionFactor(int)
*/
MutableColumnFamilyOptionsInterface setSourceCompactionFactor(
int sourceCompactionFactor);
/**
* Maximum number of bytes in all source files to be compacted in a
* single compaction run. We avoid picking too many files in the
* source level so that we do not exceed the total source bytes
* for compaction to exceed
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
* Default:1, i.e. pick maxfilesize amount of data as the source of
* a compaction.
*
* @return the maximum number of bytes in all source files to be compacted.
* @see #expandedCompactionFactor()
*/
int sourceCompactionFactor();
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @param targetFileSizeBase the target size of a level-0 file.
* @return the reference to the current option.
*
* @see #setTargetFileSizeMultiplier(int)
*/
MutableColumnFamilyOptionsInterface setTargetFileSizeBase(
long targetFileSizeBase);
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @return the target size of a level-0 file.
*
* @see #targetFileSizeMultiplier()
*/
long targetFileSizeBase();
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-L file and level-(L+1) file.
* By default target_file_size_multiplier is 1, meaning
* files in different levels have the same target.
*
* @param multiplier the size ratio between a level-(L+1) file
* and level-L file.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setTargetFileSizeMultiplier(
int multiplier);
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-(L+1) file and level-L file.
* By default targetFileSizeMultiplier is 1, meaning
* files in different levels have the same target.
*
* @return the size ratio between a level-(L+1) file and level-L file.
*/
int targetFileSizeMultiplier();
/**
* The upper-bound of the total size of level-1 files in bytes.
* Maximum number of bytes for level L can be calculated as
* (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
* For example, if maxBytesForLevelBase is 20MB, and if
* max_bytes_for_level_multiplier is 10, total data size for level-1
* will be 20MB, total file size for level-2 will be 200MB,
* and total file size for level-3 will be 2GB.
* by default 'maxBytesForLevelBase' is 10MB.
*
* @param maxBytesForLevelBase maximum bytes for level base.
*
* @return the reference to the current option.
* @see #setMaxBytesForLevelMultiplier(int)
*/
MutableColumnFamilyOptionsInterface setMaxBytesForLevelBase(
long maxBytesForLevelBase);
/**
* The upper-bound of the total size of level-1 files in bytes.
* Maximum number of bytes for level L can be calculated as
* (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
* For example, if maxBytesForLevelBase is 20MB, and if
* max_bytes_for_level_multiplier is 10, total data size for level-1
* will be 20MB, total file size for level-2 will be 200MB,
* and total file size for level-3 will be 2GB.
* by default 'maxBytesForLevelBase' is 10MB.
*
* @return the upper-bound of the total size of level-1 files
* in bytes.
* @see #maxBytesForLevelMultiplier()
*/
long maxBytesForLevelBase();
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L.
* @return the reference to the current option.
* @see #setMaxBytesForLevelBase(long)
*/
MutableColumnFamilyOptionsInterface setMaxBytesForLevelMultiplier(
int multiplier);
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L.
* @see #maxBytesForLevelBase()
*/
int maxBytesForLevelMultiplier();
/**
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
*
* Default: 1
*
* @param maxBytesForLevelMultiplierAdditional The max-size multipliers
* for each level
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMaxBytesForLevelMultiplierAdditional(
int[] maxBytesForLevelMultiplierAdditional);
/**
* Different max-size multipliers for different levels.
* These are multiplied by max_bytes_for_level_multiplier to arrive
* at the max-size of each level.
*
* Default: 1
*
* @return The max-size multipliers for each level
*/
int[] maxBytesForLevelMultiplierAdditional();
/**
* If true, compaction will verify checksum on every read that happens
* as part of compaction
* Default: true
*
* @param verifyChecksumsInCompaction true if compaction verifies
* checksum on every read.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setVerifyChecksumsInCompaction(
boolean verifyChecksumsInCompaction);
/**
* If true, compaction will verify checksum on every read that happens
* as part of compaction
* Default: true
*
* @return true if compaction verifies checksum on every read.
*/
boolean verifyChecksumsInCompaction();
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @param maxSequentialSkipInIterations the number of keys could
* be skipped in a iteration.
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setMaxSequentialSkipInIterations(
long maxSequentialSkipInIterations);
/**
* An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @return the number of keys could be skipped in a iteration.
*/
long maxSequentialSkipInIterations();
/**
* After writing every SST file, reopen it and read all the keys.
*
* Default: false
*
* @param paranoidFileChecks true to enable paranoid file checks
* @return the reference to the current option.
*/
MutableColumnFamilyOptionsInterface setParanoidFileChecks(
boolean paranoidFileChecks);
/**
* After writing every SST file, reopen it and read all the keys.
*
* Default: false
*
* @return true if paranoid file checks are enabled
*/
boolean paranoidFileChecks();
}

View File

@ -16,7 +16,8 @@ import java.util.List;
* automaticallyand native resources will be released as part of the process.
*/
public class Options extends RocksObject
implements DBOptionsInterface, ColumnFamilyOptionsInterface {
implements DBOptionsInterface, ColumnFamilyOptionsInterface,
MutableColumnFamilyOptionsInterface {
static {
RocksDB.loadLibrary();
}
@ -1061,6 +1062,98 @@ public class Options extends RocksObject
return optimizeFiltersForHits(nativeHandle_);
}
@Override
public Options
setMemtableHugePageSize(
long memtableHugePageSize) {
setMemtableHugePageSize(nativeHandle_,
memtableHugePageSize);
return this;
}
@Override
public long memtableHugePageSize() {
return memtableHugePageSize(nativeHandle_);
}
@Override
public Options setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
setSoftPendingCompactionBytesLimit(nativeHandle_,
softPendingCompactionBytesLimit);
return this;
}
@Override
public long softPendingCompactionBytesLimit() {
return softPendingCompactionBytesLimit(nativeHandle_);
}
@Override
public Options setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
return this;
}
@Override
public long hardPendingCompactionBytesLimit() {
return hardPendingCompactionBytesLimit(nativeHandle_);
}
@Override
public Options setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
return this;
}
@Override
public int level0FileNumCompactionTrigger() {
return level0FileNumCompactionTrigger(nativeHandle_);
}
@Override
public Options setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
return this;
}
@Override
public int level0SlowdownWritesTrigger() {
return level0SlowdownWritesTrigger(nativeHandle_);
}
@Override
public MutableColumnFamilyOptionsInterface setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
return this;
}
@Override
public int level0StopWritesTrigger() {
return level0StopWritesTrigger(nativeHandle_);
}
@Override
public Options setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
return this;
}
@Override
public int[] maxBytesForLevelMultiplierAdditional() {
return maxBytesForLevelMultiplierAdditional(nativeHandle_);
}
@Override
public Options setParanoidFileChecks(boolean paranoidFileChecks) {
setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
return this;
}
@Override
public boolean paranoidFileChecks() {
return paranoidFileChecks(nativeHandle_);
}
private native static long newOptions();
private native static long newOptions(long dbOptHandle,
long cfOptHandle);
@ -1281,6 +1374,30 @@ public class Options extends RocksObject
private native void setOptimizeFiltersForHits(long handle,
boolean optimizeFiltersForHits);
private native boolean optimizeFiltersForHits(long handle);
private native void setMemtableHugePageSize(long handle,
long memtableHugePageSize);
private native long memtableHugePageSize(long handle);
private native void setSoftPendingCompactionBytesLimit(long handle,
long softPendingCompactionBytesLimit);
private native long softPendingCompactionBytesLimit(long handle);
private native void setHardPendingCompactionBytesLimit(long handle,
long hardPendingCompactionBytesLimit);
private native long hardPendingCompactionBytesLimit(long handle);
private native void setLevel0FileNumCompactionTrigger(long handle,
int level0FileNumCompactionTrigger);
private native int level0FileNumCompactionTrigger(long handle);
private native void setLevel0SlowdownWritesTrigger(long handle,
int level0SlowdownWritesTrigger);
private native int level0SlowdownWritesTrigger(long handle);
private native void setLevel0StopWritesTrigger(long handle,
int level0StopWritesTrigger);
private native int level0StopWritesTrigger(long handle);
private native void setMaxBytesForLevelMultiplierAdditional(long handle,
int[] maxBytesForLevelMultiplierAdditional);
private native int[] maxBytesForLevelMultiplierAdditional(long handle);
private native void setParanoidFileChecks(long handle,
boolean paranoidFileChecks);
private native boolean paranoidFileChecks(long handle);
// instance variables
Env env_;
MemTableConfig memTableConfig_;

View File

@ -1718,6 +1718,14 @@ public class RocksDB extends RocksObject {
getUpdatesSince(nativeHandle_, sequenceNumber));
}
public void setOptions(final ColumnFamilyHandle columnFamilyHandle,
final MutableColumnFamilyOptions mutableColumnFamilyOptions)
throws RocksDBException {
setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_,
mutableColumnFamilyOptions.getKeys(),
mutableColumnFamilyOptions.getValues());
}
/**
* Private constructor.
*
@ -1901,6 +1909,8 @@ public class RocksDB extends RocksObject {
boolean force) throws RocksDBException;
private native long getUpdatesSince(long handle, long sequenceNumber)
throws RocksDBException;
private native void setOptions(long handle, long cfHandle, String[] keys,
String[] values) throws RocksDBException;
protected DBOptionsInterface options_;
}

View File

@ -178,6 +178,17 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void maxBytesForLevelMultiplierAdditional() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue1 = rand.nextInt();
final int intValue2 = rand.nextInt();
final int[] ints = new int[]{intValue1, intValue2};
opt.setMaxBytesForLevelMultiplierAdditional(ints);
assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints);
}
}
@Test
public void expandedCompactionFactor() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
@ -214,6 +225,15 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void softPendingCompactionBytesLimit() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setSoftPendingCompactionBytesLimit(longValue);
assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue);
}
}
@Test
public void hardRateLimit() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
@ -223,6 +243,42 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void hardPendingCompactionBytesLimit() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setHardPendingCompactionBytesLimit(longValue);
assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue);
}
}
@Test
public void level0FileNumCompactionTrigger() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setLevel0FileNumCompactionTrigger(intValue);
assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue);
}
}
@Test
public void level0SlowdownWritesTrigger() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setLevel0SlowdownWritesTrigger(intValue);
assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue);
}
}
@Test
public void level0StopWritesTrigger() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final int intValue = rand.nextInt();
opt.setLevel0StopWritesTrigger(intValue);
assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue);
}
}
@Test
public void rateLimitDelayMaxMilliseconds() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
@ -304,6 +360,15 @@ public class ColumnFamilyOptionsTest {
}
}
@Test
public void memtableHugePageSize() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final long longValue = rand.nextLong();
opt.setMemtableHugePageSize(longValue);
assertThat(opt.memtableHugePageSize()).isEqualTo(longValue);
}
}
@Test
public void bloomLocality() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {

View File

@ -0,0 +1,94 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import org.junit.Test;
import org.rocksdb.MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder;
import java.util.NoSuchElementException;
import static org.assertj.core.api.Assertions.assertThat;
public class MutableColumnFamilyOptionsTest {
@Test
public void builder() {
final MutableColumnFamilyOptionsBuilder builder =
MutableColumnFamilyOptions.builder();
builder
.setWriteBufferSize(10)
.setInplaceUpdateNumLocks(5)
.setDisableAutoCompactions(true)
.setVerifyChecksumsInCompaction(false)
.setParanoidFileChecks(true);
assertThat(builder.writeBufferSize()).isEqualTo(10);
assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5);
assertThat(builder.disableAutoCompactions()).isEqualTo(true);
assertThat(builder.verifyChecksumsInCompaction()).isEqualTo(false);
assertThat(builder.paranoidFileChecks()).isEqualTo(true);
}
@Test(expected = NoSuchElementException.class)
public void builder_getWhenNotSet() {
final MutableColumnFamilyOptionsBuilder builder =
MutableColumnFamilyOptions.builder();
builder.writeBufferSize();
}
@Test
public void builder_build() {
final MutableColumnFamilyOptions options = MutableColumnFamilyOptions
.builder()
.setWriteBufferSize(10)
.setParanoidFileChecks(true)
.build();
assertThat(options.getKeys().length).isEqualTo(2);
assertThat(options.getValues().length).isEqualTo(2);
assertThat(options.getKeys()[0])
.isEqualTo(
MutableColumnFamilyOptions.MemtableOption.write_buffer_size.name());
assertThat(options.getValues()[0]).isEqualTo("10");
assertThat(options.getKeys()[1])
.isEqualTo(
MutableColumnFamilyOptions.MiscOption.paranoid_file_checks.name());
assertThat(options.getValues()[1]).isEqualTo("true");
}
@Test
public void mutableColumnFamilyOptions_toString() {
final String str = MutableColumnFamilyOptions
.builder()
.setWriteBufferSize(10)
.setInplaceUpdateNumLocks(5)
.setDisableAutoCompactions(true)
.setVerifyChecksumsInCompaction(false)
.setParanoidFileChecks(true)
.build()
.toString();
assertThat(str).isEqualTo("write_buffer_size=10;inplace_update_num_locks=5;"
+ "disable_auto_compactions=true;verify_checksums_in_compaction=false;"
+ "paranoid_file_checks=true");
}
@Test
public void mutableColumnFamilyOptions_parse() {
final String str = "write_buffer_size=10;inplace_update_num_locks=5;"
+ "disable_auto_compactions=true;verify_checksums_in_compaction=false;"
+ "paranoid_file_checks=true";
final MutableColumnFamilyOptionsBuilder builder =
MutableColumnFamilyOptions.parse(str);
assertThat(builder.writeBufferSize()).isEqualTo(10);
assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5);
assertThat(builder.disableAutoCompactions()).isEqualTo(true);
assertThat(builder.verifyChecksumsInCompaction()).isEqualTo(false);
assertThat(builder.paranoidFileChecks()).isEqualTo(true);
}
}

View File

@ -141,6 +141,17 @@ public class OptionsTest {
}
}
@Test
public void maxBytesForLevelMultiplierAdditional() {
try (final Options opt = new Options()) {
final int intValue1 = rand.nextInt();
final int intValue2 = rand.nextInt();
final int[] ints = new int[]{intValue1, intValue2};
opt.setMaxBytesForLevelMultiplierAdditional(ints);
assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints);
}
}
@Test
public void expandedCompactionFactor() {
try (final Options opt = new Options()) {
@ -177,6 +188,15 @@ public class OptionsTest {
}
}
@Test
public void softPendingCompactionBytesLimit() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setSoftPendingCompactionBytesLimit(longValue);
assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue);
}
}
@Test
public void hardRateLimit() {
try (final Options opt = new Options()) {
@ -186,6 +206,42 @@ public class OptionsTest {
}
}
@Test
public void hardPendingCompactionBytesLimit() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setHardPendingCompactionBytesLimit(longValue);
assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue);
}
}
@Test
public void level0FileNumCompactionTrigger() {
try (final Options opt = new Options()) {
final int intValue = rand.nextInt();
opt.setLevel0FileNumCompactionTrigger(intValue);
assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue);
}
}
@Test
public void level0SlowdownWritesTrigger() {
try (final Options opt = new Options()) {
final int intValue = rand.nextInt();
opt.setLevel0SlowdownWritesTrigger(intValue);
assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue);
}
}
@Test
public void level0StopWritesTrigger() {
try (final Options opt = new Options()) {
final int intValue = rand.nextInt();
opt.setLevel0StopWritesTrigger(intValue);
assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue);
}
}
@Test
public void rateLimitDelayMaxMilliseconds() {
try (final Options opt = new Options()) {
@ -267,6 +323,15 @@ public class OptionsTest {
}
}
@Test
public void memtableHugePageSize() {
try (final Options opt = new Options()) {
final long longValue = rand.nextLong();
opt.setMemtableHugePageSize(longValue);
assertThat(opt.memtableHugePageSize()).isEqualTo(longValue);
}
}
@Test
public void bloomLocality() {
try (final Options opt = new Options()) {

View File

@ -662,4 +662,38 @@ public class RocksDBTest {
db.enableFileDeletions(true);
}
}
@Test
public void setOptions() throws RocksDBException {
try (final DBOptions options = new DBOptions()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true);
final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions()
.setWriteBufferSize(4096)) {
final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
Arrays.asList(
new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
// open database
final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
try (final RocksDB db = RocksDB.open(options,
dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) {
try {
final MutableColumnFamilyOptions mutableOptions =
MutableColumnFamilyOptions.builder()
.setWriteBufferSize(2048)
.build();
db.setOptions(columnFamilyHandles.get(1), mutableOptions);
} finally {
for (final ColumnFamilyHandle handle : columnFamilyHandles) {
handle.close();
}
}
}
}
}
}