Merge pull request #355 from fyrz/RocksJava-Options-Refactoring-3.6

[RocksJava] Options Refactoring 3.6
This commit is contained in:
Yueh-Hsuan Chiang 2014-10-30 16:46:41 -07:00
commit a29118ffc7
8 changed files with 4057 additions and 1624 deletions

View File

@ -1,4 +1,4 @@
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.BlockBasedTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.ComparatorOptions org.rocksdb.AbstractComparator org.rocksdb.Comparator org.rocksdb.DirectComparator org.rocksdb.AbstractSlice org.rocksdb.Slice org.rocksdb.DirectSlice org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv org.rocksdb.GenericRateLimiterConfig org.rocksdb.ColumnFamilyHandle org.rocksdb.MergeOperator org.rocksdb.StringAppendOperator NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.DBOptions org.rocksdb.ColumnFamilyOptions org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.BlockBasedTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.ComparatorOptions org.rocksdb.AbstractComparator org.rocksdb.Comparator org.rocksdb.DirectComparator org.rocksdb.AbstractSlice org.rocksdb.Slice org.rocksdb.DirectSlice org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv org.rocksdb.GenericRateLimiterConfig org.rocksdb.ColumnFamilyHandle org.rocksdb.MergeOperator org.rocksdb.StringAppendOperator org.rocksdb.ComparatorOptions org.rocksdb.AbstractComparator org.rocksdb.Comparator org.rocksdb.DirectComparator org.rocksdb.AbstractSlice org.rocksdb.Slice org.rocksdb.DirectSlice
ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)
ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3)

View File

@ -0,0 +1,20 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Builtin RocksDB comparators
*
* <ol>
* <li>BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise
* order.</li>
* <li>REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise
* order</li>
* </ol>
*/
public enum BuiltinComparator {
BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR
}

View File

@ -0,0 +1,979 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public interface ColumnFamilyOptionsInterface {
/**
* Use this if you don't need to keep the data sorted, i.e. you'll never use
* an iterator, only Put() and Get() API calls
*
* @param blockCacheSizeMb Block cache size in MB
* @return the instance of the current Object.
*/
Object optimizeForPointLookup(long blockCacheSizeMb);
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
* optimized for heavy workloads and big datasets, which means you might
* observe write stalls under some conditions. As a starting point for tuning
* RocksDB options, use the following for level style compaction.</p>
*
* <p>Make sure to also call IncreaseParallelism(), which will provide the
* biggest performance gains.</p>
* <p>Note: we might use more memory than memtable_memory_budget during high
* write rate period</p>
*
* @return the instance of the current Object.
*/
Object optimizeLevelStyleCompaction();
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
* optimized for heavy workloads and big datasets, which means you might
* observe write stalls under some conditions. As a starting point for tuning
* RocksDB options, use the following for level style compaction.</p>
*
* <p>Make sure to also call IncreaseParallelism(), which will provide the
* biggest performance gains.</p>
* <p>Note: we might use more memory than memtable_memory_budget during high
* write rate period</p>
*
* @param memtableMemoryBudget memory budget in bytes
* @return the instance of the current Object.
*/
Object optimizeLevelStyleCompaction(long memtableMemoryBudget);
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
* optimized for heavy workloads and big datasets, which means you might
* observe write stalls under some conditions. As a starting point for tuning
* RocksDB options, use the following for universal style compaction.</p>
*
* <p>Universal style compaction is focused on reducing Write Amplification
* Factor for big data sets, but increases Space Amplification.</p>
*
* <p>Make sure to also call IncreaseParallelism(), which will provide the
* biggest performance gains.</p>
*
* <p>Note: we might use more memory than memtable_memory_budget during high
* write rate period</p>
*
* @return the instance of the current Object.
*/
Object optimizeUniversalStyleCompaction();
/**
* <p>Default values for some parameters in ColumnFamilyOptions are not
* optimized for heavy workloads and big datasets, which means you might
* observe write stalls under some conditions. As a starting point for tuning
* RocksDB options, use the following for universal style compaction.</p>
*
* <p>Universal style compaction is focused on reducing Write Amplification
* Factor for big data sets, but increases Space Amplification.</p>
*
* <p>Make sure to also call IncreaseParallelism(), which will provide the
* biggest performance gains.</p>
*
* <p>Note: we might use more memory than memtable_memory_budget during high
* write rate period</p>
*
* @param memtableMemoryBudget memory budget in bytes
* @return the instance of the current Object.
*/
Object optimizeUniversalStyleCompaction(long memtableMemoryBudget);
/**
* Set {@link BuiltinComparator} to be used with RocksDB.
*
* Note: Comparator can be set once upon database creation.
*
* Default: BytewiseComparator.
* @param builtinComparator a {@link BuiltinComparator} type.
* @return the instance of the current Object.
*/
Object setComparator(BuiltinComparator builtinComparator);
/**
* Use the specified comparator for key ordering.
*
* Comparator should not be disposed before options instances using this comparator is
* disposed. If dispose() function is not called, then comparator object will be
* GC'd automatically.
*
* Comparator instance can be re-used in multiple options instances.
*
* @param comparator java instance.
* @return the instance of the current Object.
*/
Object setComparator(AbstractComparator comparator);
/**
* <p>Set the merge operator to be used for merging two merge operands
* of the same key. The merge function is invoked during
* compaction and at lookup time, if multiple key/value pairs belonging
* to the same key are found in the database.</p>
*
* @param name the name of the merge function, as defined by
* the MergeOperators factory (see utilities/MergeOperators.h)
* The merge function is specified by name and must be one of the
* standard merge operators provided by RocksDB. The available
* operators are "put", "uint64add", "stringappend" and "stringappendtest".
* @return the instance of the current Object.
*/
public Object setMergeOperatorName(String name);
/**
* <p>Set the merge operator to be used for merging two different key/value
* pairs that share the same key. The merge function is invoked during
* compaction and at lookup time, if multiple key/value pairs belonging
* to the same key are found in the database.</p>
*
* @param mergeOperator {@link MergeOperator} instance.
* @return the instance of the current Object.
*/
public Object setMergeOperator(MergeOperator mergeOperator);
/**
* Amount of data to build up in memory (backed by an unsorted log
* on disk) before converting to a sorted on-disk file.
*
* Larger values increase performance, especially during bulk loads.
* Up to {@code max_write_buffer_number} write buffers may be held in memory
* at the same time, so you may wish to adjust this parameter
* to control memory usage.
*
* Also, a larger write buffer will result in a longer recovery time
* the next time the database is opened.
*
* Default: 4MB
* @param writeBufferSize the size of write buffer.
* @return the instance of the current Object.
* @throws org.rocksdb.RocksDBException
*/
Object setWriteBufferSize(long writeBufferSize)
throws RocksDBException;
/**
* Return size of write buffer size.
*
* @return size of write buffer.
* @see #setWriteBufferSize(long)
*/
long writeBufferSize();
/**
* The maximum number of write buffers that are built up in memory.
* The default is 2, so that when 1 write buffer is being flushed to
* storage, new writes can continue to the other write buffer.
* Default: 2
*
* @param maxWriteBufferNumber maximum number of write buffers.
* @return the instance of the current Object.
*/
Object setMaxWriteBufferNumber(
int maxWriteBufferNumber);
/**
* Returns maximum number of write buffers.
*
* @return maximum number of write buffers.
* @see #setMaxWriteBufferNumber(int)
*/
int maxWriteBufferNumber();
/**
* The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then
* all write buffers are flushed to L0 as individual files and this increases
* read amplification because a get request has to check in all of these
* files. Also, an in-memory merge may result in writing lesser
* data to storage if there are duplicate records in each of these
* individual write buffers. Default: 1
*
* @param minWriteBufferNumberToMerge the minimum number of write buffers
* that will be merged together.
* @return the reference to the current option.
*/
Object setMinWriteBufferNumberToMerge(
int minWriteBufferNumberToMerge);
/**
* The minimum number of write buffers that will be merged together
* before writing to storage. If set to 1, then
* all write buffers are flushed to L0 as individual files and this increases
* read amplification because a get request has to check in all of these
* files. Also, an in-memory merge may result in writing lesser
* data to storage if there are duplicate records in each of these
* individual write buffers. Default: 1
*
* @return the minimum number of write buffers that will be merged together.
*/
int minWriteBufferNumberToMerge();
/**
* This prefix-extractor uses the first n bytes of a key as its prefix.
*
* In some hash-based memtable representation such as HashLinkedList
* and HashSkipList, prefixes are used to partition the keys into
* several buckets. Prefix extractor is used to specify how to
* extract the prefix given a key.
*
* @param n use the first n bytes of a key as its prefix.
*/
Object useFixedLengthPrefixExtractor(int n);
/**
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
*
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @param compressionType Compression Type.
* @return the reference to the current option.
*/
Object setCompressionType(CompressionType compressionType);
/**
* Compress blocks using the specified compression algorithm. This
* parameter can be changed dynamically.
*
* Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression.
*
* @return Compression type.
*/
CompressionType compressionType();
/**
* Set the number of levels for this database
* If level-styled compaction is used, then this number determines
* the total number of levels.
*
* @param numLevels the number of levels.
* @return the reference to the current option.
*/
Object setNumLevels(int numLevels);
/**
* If level-styled compaction is used, then this number determines
* the total number of levels.
*
* @return the number of levels.
*/
int numLevels();
/**
* Number of files to trigger level-0 compaction. A value < 0 means that
* level-0 compaction will not be triggered by number of files at all.
* Default: 4
*
* @param numFiles the number of files in level-0 to trigger compaction.
* @return the reference to the current option.
*/
Object setLevelZeroFileNumCompactionTrigger(
int numFiles);
/**
* The number of files in level 0 to trigger compaction from level-0 to
* level-1. A value < 0 means that level-0 compaction will not be
* triggered by number of files at all.
* Default: 4
*
* @return the number of files in level 0 to trigger compaction.
*/
int levelZeroFileNumCompactionTrigger();
/**
* Soft limit on number of level-0 files. We start slowing down writes at this
* point. A value < 0 means that no writing slow down will be triggered by
* number of files in level-0.
*
* @param numFiles soft limit on number of level-0 files.
* @return the reference to the current option.
*/
Object setLevelZeroSlowdownWritesTrigger(
int numFiles);
/**
* Soft limit on the number of level-0 files. We start slowing down writes
* at this point. A value < 0 means that no writing slow down will be
* triggered by number of files in level-0.
*
* @return the soft limit on the number of level-0 files.
*/
int levelZeroSlowdownWritesTrigger();
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @param numFiles the hard limit of the number of level-0 files.
* @return the reference to the current option.
*/
Object setLevelZeroStopWritesTrigger(int numFiles);
/**
* Maximum number of level-0 files. We stop writes at this point.
*
* @return the hard limit of the number of level-0 file.
*/
int levelZeroStopWritesTrigger();
/**
* The highest level to which a new compacted memtable is pushed if it
* does not create overlap. We try to push to level 2 to avoid the
* relatively expensive level 0=>1 compactions and to avoid some
* expensive manifest file operations. We do not push all the way to
* the largest level since that can generate a lot of wasted disk
* space if the same key space is being repeatedly overwritten.
*
* @param maxMemCompactionLevel the highest level to which a new compacted
* mem-table will be pushed.
* @return the reference to the current option.
*/
Object setMaxMemCompactionLevel(
int maxMemCompactionLevel);
/**
* The highest level to which a new compacted memtable is pushed if it
* does not create overlap. We try to push to level 2 to avoid the
* relatively expensive level 0=>1 compactions and to avoid some
* expensive manifest file operations. We do not push all the way to
* the largest level since that can generate a lot of wasted disk
* space if the same key space is being repeatedly overwritten.
*
* @return the highest level where a new compacted memtable will be pushed.
*/
int maxMemCompactionLevel();
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @param targetFileSizeBase the target size of a level-0 file.
* @return the reference to the current option.
*
* @see #setTargetFileSizeMultiplier(int)
*/
Object setTargetFileSizeBase(long targetFileSizeBase);
/**
* The target file size for compaction.
* This targetFileSizeBase determines a level-1 file size.
* Target file size for level L can be calculated by
* targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
* For example, if targetFileSizeBase is 2MB and
* target_file_size_multiplier is 10, then each file on level-1 will
* be 2MB, and each file on level 2 will be 20MB,
* and each file on level-3 will be 200MB.
* by default targetFileSizeBase is 2MB.
*
* @return the target size of a level-0 file.
*
* @see #targetFileSizeMultiplier()
*/
long targetFileSizeBase();
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-L file and level-(L+1) file.
* By default target_file_size_multiplier is 1, meaning
* files in different levels have the same target.
*
* @param multiplier the size ratio between a level-(L+1) file
* and level-L file.
* @return the reference to the current option.
*/
Object setTargetFileSizeMultiplier(int multiplier);
/**
* targetFileSizeMultiplier defines the size ratio between a
* level-(L+1) file and level-L file.
* By default targetFileSizeMultiplier is 1, meaning
* files in different levels have the same target.
*
* @return the size ratio between a level-(L+1) file and level-L file.
*/
int targetFileSizeMultiplier();
/**
* The upper-bound of the total size of level-1 files in bytes.
* Maximum number of bytes for level L can be calculated as
* (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
* For example, if maxBytesForLevelBase is 20MB, and if
* max_bytes_for_level_multiplier is 10, total data size for level-1
* will be 20MB, total file size for level-2 will be 200MB,
* and total file size for level-3 will be 2GB.
* by default 'maxBytesForLevelBase' is 10MB.
*
* @return the reference to the current option.
* @see #setMaxBytesForLevelMultiplier(int)
*/
Object setMaxBytesForLevelBase(
long maxBytesForLevelBase);
/**
* The upper-bound of the total size of level-1 files in bytes.
* Maximum number of bytes for level L can be calculated as
* (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1))
* For example, if maxBytesForLevelBase is 20MB, and if
* max_bytes_for_level_multiplier is 10, total data size for level-1
* will be 20MB, total file size for level-2 will be 200MB,
* and total file size for level-3 will be 2GB.
* by default 'maxBytesForLevelBase' is 10MB.
*
* @return the upper-bound of the total size of leve-1 files in bytes.
* @see #maxBytesForLevelMultiplier()
*/
long maxBytesForLevelBase();
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @param multiplier the ratio between the total size of level-(L+1)
* files and the total size of level-L files for all L.
* @return the reference to the current option.
* @see #setMaxBytesForLevelBase(long)
*/
Object setMaxBytesForLevelMultiplier(int multiplier);
/**
* The ratio between the total size of level-(L+1) files and the total
* size of level-L files for all L.
* DEFAULT: 10
*
* @return the ratio between the total size of level-(L+1) files and
* the total size of level-L files for all L.
* @see #maxBytesForLevelBase()
*/
int maxBytesForLevelMultiplier();
/**
* Maximum number of bytes in all compacted files. We avoid expanding
* the lower level file set of a compaction if it would make the
* total compaction cover more than
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
*
* @param expandedCompactionFactor the maximum number of bytes in all
* compacted files.
* @return the reference to the current option.
* @see #setSourceCompactionFactor(int)
*/
Object setExpandedCompactionFactor(int expandedCompactionFactor);
/**
* Maximum number of bytes in all compacted files. We avoid expanding
* the lower level file set of a compaction if it would make the
* total compaction cover more than
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
*
* @return the maximum number of bytes in all compacted files.
* @see #sourceCompactionFactor()
*/
int expandedCompactionFactor();
/**
* Maximum number of bytes in all source files to be compacted in a
* single compaction run. We avoid picking too many files in the
* source level so that we do not exceed the total source bytes
* for compaction to exceed
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
* Default:1, i.e. pick maxfilesize amount of data as the source of
* a compaction.
*
* @param sourceCompactionFactor the maximum number of bytes in all
* source files to be compacted in a single compaction run.
* @return the reference to the current option.
* @see #setExpandedCompactionFactor(int)
*/
Object setSourceCompactionFactor(int sourceCompactionFactor);
/**
* Maximum number of bytes in all source files to be compacted in a
* single compaction run. We avoid picking too many files in the
* source level so that we do not exceed the total source bytes
* for compaction to exceed
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
* Default:1, i.e. pick maxfilesize amount of data as the source of
* a compaction.
*
* @return the maximum number of bytes in all source files to be compactedo.
* @see #expandedCompactionFactor()
*/
int sourceCompactionFactor();
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level->level+1 compaction.
*
* @param maxGrandparentOverlapFactor maximum bytes of overlaps in
* "grandparent" level.
* @return the reference to the current option.
*/
Object setMaxGrandparentOverlapFactor(
int maxGrandparentOverlapFactor);
/**
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
* stop building a single file in a level->level+1 compaction.
*
* @return maximum bytes of overlaps in "grandparent" level.
*/
int maxGrandparentOverlapFactor();
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @param softRateLimit the soft-rate-limit of a compaction score
* for put delay.
* @return the reference to the current option.
*/
Object setSoftRateLimit(double softRateLimit);
/**
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
* soft_rate_limit. This is ignored when == 0.0.
* CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
* Default: 0 (disabled)
*
* @return soft-rate-limit for put delay.
*/
double softRateLimit();
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when <= 1.0.
* Default: 0 (disabled)
*
* @param hardRateLimit the hard-rate-limit of a compaction score for put
* delay.
* @return the reference to the current option.
*/
Object setHardRateLimit(double hardRateLimit);
/**
* Puts are delayed 1ms at a time when any level has a compaction score that
* exceeds hard_rate_limit. This is ignored when <= 1.0.
* Default: 0 (disabled)
*
* @return the hard-rate-limit of a compaction score for put delay.
*/
double hardRateLimit();
/**
* The maximum time interval a put will be stalled when hard_rate_limit
* is enforced. If 0, then there is no limit.
* Default: 1000
*
* @param rateLimitDelayMaxMilliseconds the maximum time interval a put
* will be stalled.
* @return the reference to the current option.
*/
Object setRateLimitDelayMaxMilliseconds(
int rateLimitDelayMaxMilliseconds);
/**
* The maximum time interval a put will be stalled when hard_rate_limit
* is enforced. If 0, then there is no limit.
* Default: 1000
*
* @return the maximum time interval a put will be stalled when
* hard_rate_limit is enforced.
*/
int rateLimitDelayMaxMilliseconds();
/**
* The size of one block in arena memory allocation.
* If <= 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additonal restriction of the The specified size:
* (1) size should be in the range of [4096, 2 << 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @param arenaBlockSize the size of an arena block
* @return the reference to the current option.
* @throws org.rocksdb.RocksDBException
*/
Object setArenaBlockSize(long arenaBlockSize)
throws RocksDBException;
/**
* The size of one block in arena memory allocation.
* If <= 0, a proper value is automatically calculated (usually 1/10 of
* writer_buffer_size).
*
* There are two additonal restriction of the The specified size:
* (1) size should be in the range of [4096, 2 << 30] and
* (2) be the multiple of the CPU word (which helps with the memory
* alignment).
*
* We'll automatically check and adjust the size number to make sure it
* conforms to the restrictions.
* Default: 0
*
* @return the size of an arena block
*/
long arenaBlockSize();
/**
* Disable automatic compactions. Manual compactions can still
* be issued on this column family
*
* @param disableAutoCompactions true if auto-compactions are disabled.
* @return the reference to the current option.
*/
Object setDisableAutoCompactions(boolean disableAutoCompactions);
/**
* Disable automatic compactions. Manual compactions can still
* be issued on this column family
*
* @return true if auto-compactions are disabled.
*/
boolean disableAutoCompactions();
/**
* Purge duplicate/deleted keys when a memtable is flushed to storage.
* Default: true
*
* @param purgeRedundantKvsWhileFlush true if purging keys is disabled.
* @return the reference to the current option.
*/
Object setPurgeRedundantKvsWhileFlush(
boolean purgeRedundantKvsWhileFlush);
/**
* Purge duplicate/deleted keys when a memtable is flushed to storage.
* Default: true
*
* @return true if purging keys is disabled.
*/
boolean purgeRedundantKvsWhileFlush();
/**
* Set compaction style for DB.
*
* Default: LEVEL.
*
* @param compactionStyle Compaction style.
* @return the reference to the current option.
*/
Object setCompactionStyle(CompactionStyle compactionStyle);
/**
* Compaction style for DB.
*
* @return Compaction style.
*/
CompactionStyle compactionStyle();
/**
* If true, compaction will verify checksum on every read that happens
* as part of compaction
* Default: true
*
* @param verifyChecksumsInCompaction true if compaction verifies
* checksum on every read.
* @return the reference to the current option.
*/
Object setVerifyChecksumsInCompaction(
boolean verifyChecksumsInCompaction);
/**
* If true, compaction will verify checksum on every read that happens
* as part of compaction
* Default: true
*
* @return true if compaction verifies checksum on every read.
*/
boolean verifyChecksumsInCompaction();
/**
* Use KeyMayExist API to filter deletes when this is true.
* If KeyMayExist returns false, i.e. the key definitely does not exist, then
* the delete is a noop. KeyMayExist only incurs in-memory look up.
* This optimization avoids writing the delete to storage when appropriate.
* Default: false
*
* @param filterDeletes true if filter-deletes behavior is on.
* @return the reference to the current option.
*/
Object setFilterDeletes(boolean filterDeletes);
/**
* Use KeyMayExist API to filter deletes when this is true.
* If KeyMayExist returns false, i.e. the key definitely does not exist, then
* the delete is a noop. KeyMayExist only incurs in-memory look up.
* This optimization avoids writing the delete to storage when appropriate.
* Default: false
*
* @return true if filter-deletes behavior is on.
*/
boolean filterDeletes();
/**
* An iteration->Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @param maxSequentialSkipInIterations the number of keys could
* be skipped in a iteration.
* @return the reference to the current option.
*/
Object setMaxSequentialSkipInIterations(long maxSequentialSkipInIterations);
/**
* An iteration->Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number
* of keys (with the same userkey) that will be sequentially
* skipped before a reseek is issued.
* Default: 8
*
* @return the number of keys could be skipped in a iteration.
*/
long maxSequentialSkipInIterations();
/**
* Set the config for mem-table.
*
* @param config the mem-table config.
* @return the instance of the current Object.
* @throws org.rocksdb.RocksDBException
*/
Object setMemTableConfig(MemTableConfig config)
throws RocksDBException;
/**
* Returns the name of the current mem table representation.
* Memtable format can be set using setTableFormatConfig.
*
* @return the name of the currently-used memtable factory.
* @see #setTableFormatConfig(org.rocksdb.TableFormatConfig)
*/
String memTableFactoryName();
/**
* Set the config for table format.
*
* @param config the table format config.
* @return the reference of the current Options.
*/
Object setTableFormatConfig(TableFormatConfig config);
/**
* @return the name of the currently used table factory.
*/
String tableFactoryName();
/**
* Allows thread-safe inplace updates.
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) <= sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
*
* @param inplaceUpdateSupport true if thread-safe inplace updates
* are allowed.
* @return the reference to the current option.
*/
Object setInplaceUpdateSupport(boolean inplaceUpdateSupport);
/**
* Allows thread-safe inplace updates.
* If inplace_callback function is not set,
* Put(key, new_value) will update inplace the existing_value iff
* * key exists in current memtable
* * new sizeof(new_value) <= sizeof(existing_value)
* * existing_value for that key is a put i.e. kTypeValue
* If inplace_callback function is set, check doc for inplace_callback.
* Default: false.
*
* @return true if thread-safe inplace updates are allowed.
*/
boolean inplaceUpdateSupport();
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @param inplaceUpdateNumLocks the number of locks used for
* inplace updates.
* @return the reference to the current option.
* @throws org.rocksdb.RocksDBException
*/
Object setInplaceUpdateNumLocks(long inplaceUpdateNumLocks)
throws RocksDBException;
/**
* Number of locks used for inplace update
* Default: 10000, if inplace_update_support = true, else 0.
*
* @return the number of locks used for inplace update.
*/
long inplaceUpdateNumLocks();
/**
* Sets the number of bits used in the prefix bloom filter.
*
* This value will be used only when a prefix-extractor is specified.
*
* @param memtablePrefixBloomBits the number of bits used in the
* prefix bloom filter.
* @return the reference to the current option.
*/
Object setMemtablePrefixBloomBits(int memtablePrefixBloomBits);
/**
* Returns the number of bits used in the prefix bloom filter.
*
* This value will be used only when a prefix-extractor is specified.
*
* @return the number of bloom-bits.
* @see #useFixedLengthPrefixExtractor(int)
*/
int memtablePrefixBloomBits();
/**
* The number of hash probes per key used in the mem-table.
*
* @param memtablePrefixBloomProbes the number of hash probes per key.
* @return the reference to the current option.
*/
Object setMemtablePrefixBloomProbes(int memtablePrefixBloomProbes);
/**
* The number of hash probes per key used in the mem-table.
*
* @return the number of hash probes per key.
*/
int memtablePrefixBloomProbes();
/**
* Control locality of bloom filter probes to improve cache miss rate.
* This option only applies to memtable prefix bloom and plaintable
* prefix bloom. It essentially limits the max number of cache lines each
* bloom filter check can touch.
* This optimization is turned off when set to 0. The number should never
* be greater than number of probes. This option can boost performance
* for in-memory workload but should use with care since it can cause
* higher false positive rate.
* Default: 0
*
* @param bloomLocality the level of locality of bloom-filter probes.
* @return the reference to the current option.
*/
Object setBloomLocality(int bloomLocality);
/**
* Control locality of bloom filter probes to improve cache miss rate.
* This option only applies to memtable prefix bloom and plaintable
* prefix bloom. It essentially limits the max number of cache lines each
* bloom filter check can touch.
* This optimization is turned off when set to 0. The number should never
* be greater than number of probes. This option can boost performance
* for in-memory workload but should use with care since it can cause
* higher false positive rate.
* Default: 0
*
* @return the level of locality of bloom-filter probes.
* @see #setMemtablePrefixBloomProbes(int)
*/
int bloomLocality();
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @param maxSuccessiveMerges the maximum number of successive merges.
* @return the reference to the current option.
* @throws org.rocksdb.RocksDBException
*/
Object setMaxSuccessiveMerges(long maxSuccessiveMerges)
throws RocksDBException;
/**
* Maximum number of successive merge operations on a key in the memtable.
*
* When a merge operation is added to the memtable and the maximum number of
* successive merges is reached, the value of the key will be calculated and
* inserted into the memtable instead of the merge operation. This will
* ensure that there are never more than max_successive_merges merge
* operations in the memtable.
*
* Default: 0 (disabled)
*
* @return the maximum number of successive merges.
*/
long maxSuccessiveMerges();
/**
* The number of partial merge operands to accumulate before partial
* merge will be performed. Partial merge will not be called
* if the list of values to merge is less than min_partial_merge_operands.
*
* If min_partial_merge_operands < 2, then it will be treated as 2.
*
* Default: 2
*
* @param minPartialMergeOperands min partial merge operands
* @return the reference to the current option.
*/
Object setMinPartialMergeOperands(int minPartialMergeOperands);
/**
* The number of partial merge operands to accumulate before partial
* merge will be performed. Partial merge will not be called
* if the list of values to merge is less than min_partial_merge_operands.
*
* If min_partial_merge_operands < 2, then it will be treated as 2.
*
* Default: 2
*
* @return min partial merge operands
*/
int minPartialMergeOperands();
/**
* Default memtable memory budget used with the following methods:
*
* <ol>
* <li>{@link #optimizeLevelStyleCompaction()}</li>
* <li>{@link #optimizeUniversalStyleCompaction()}</li>
* </ol>
*/
long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET = 512 * 1024 * 1024;
}

View File

@ -0,0 +1,763 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public interface DBOptionsInterface {
/**
* If this value is set to true, then the database will be created
* if it is missing during {@code RocksDB.open()}.
* Default: false
*
* @param flag a flag indicating whether to create a database the
* specified database in {@link RocksDB#open(org.rocksdb.Options, String)} operation
* is missing.
* @return the instance of the current Options
* @see RocksDB#open(org.rocksdb.Options, String)
*/
Object setCreateIfMissing(boolean flag);
/**
* Return true if the create_if_missing flag is set to true.
* If true, the database will be created if it is missing.
*
* @return true if the createIfMissing option is set to true.
* @see #setCreateIfMissing(boolean)
*/
boolean createIfMissing();
/**
* <p>If true, missing column families will be automatically created</p>
*
* <p>Default: false</p>
*
* @param flag a flag indicating if missing column families shall be
* created automatically.
* @return true if missing column families shall be created automatically
* on open.
*/
Object setCreateMissingColumnFamilies(boolean flag);
/**
* Return true if the create_missing_column_families flag is set
* to true. If true column families be created if missing.
*
* @return true if the createMissingColumnFamilies is set to
* true.
* @see #setCreateMissingColumnFamilies(boolean)
*/
boolean createMissingColumnFamilies();
/**
* If true, an error will be thrown during RocksDB.open() if the
* database already exists.
* Default: false
*
* @param errorIfExists if true, an exception will be thrown
* during {@code RocksDB.open()} if the database already exists.
* @return the reference to the current option.
* @see RocksDB#open(org.rocksdb.Options, String)
*/
Object setErrorIfExists(boolean errorIfExists);
/**
* If true, an error will be thrown during RocksDB.open() if the
* database already exists.
*
* @return if true, an error is raised when the specified database
* already exists before open.
*/
boolean errorIfExists();
/**
* If true, the implementation will do aggressive checking of the
* data it is processing and will stop early if it detects any
* errors. This may have unforeseen ramifications: for example, a
* corruption of one DB entry may cause a large number of entries to
* become unreadable or for the entire DB to become unopenable.
* If any of the writes to the database fails (Put, Delete, Merge, Write),
* the database will switch to read-only mode and fail all other
* Write operations.
* Default: true
*
* @param paranoidChecks a flag to indicate whether paranoid-check
* is on.
* @return the reference to the current option.
*/
Object setParanoidChecks(boolean paranoidChecks);
/**
* If true, the implementation will do aggressive checking of the
* data it is processing and will stop early if it detects any
* errors. This may have unforeseen ramifications: for example, a
* corruption of one DB entry may cause a large number of entries to
* become unreadable or for the entire DB to become unopenable.
* If any of the writes to the database fails (Put, Delete, Merge, Write),
* the database will switch to read-only mode and fail all other
* Write operations.
*
* @return a boolean indicating whether paranoid-check is on.
*/
boolean paranoidChecks();
/**
* Use to control write rate of flush and compaction. Flush has higher
* priority than compaction. Rate limiting is disabled if nullptr.
* Default: nullptr
*
* @param config rate limiter config.
* @return the instance of the current Object.
*/
Object setRateLimiterConfig(RateLimiterConfig config);
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
* for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
* Default: 5000
*
* @param maxOpenFiles the maximum number of open files.
* @return the reference to the current DBOptions.
*/
Object setMaxOpenFiles(int maxOpenFiles);
/**
* Number of open files that can be used by the DB. You may need to
* increase this if your database has a large working set. Value -1 means
* files opened are always kept open. You can estimate number of files based
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
* for level-based compaction. For universal-style compaction, you can usually
* set it to -1.
*
* @return the maximum number of open files.
*/
int maxOpenFiles();
/**
* <p>Once write-ahead logs exceed this size, we will start forcing the
* flush of column families whose memtables are backed by the oldest live
* WAL file (i.e. the ones that are causing all the space amplification).
* </p>
* <p>If set to 0 (default), we will dynamically choose the WAL size limit to
* be [sum of all write_buffer_size * max_write_buffer_number] * 2</p>
* <p>Default: 0</p>
*/
Object setMaxTotalWalSize(long maxTotalWalSize);
/**
* <p>Returns the max total wal size. Once write-ahead logs exceed this size,
* we will start forcing the flush of column families whose memtables are
* backed by the oldest live WAL file (i.e. the ones that are causing all
* the space amplification).</p>
*
* <p>If set to 0 (default), we will dynamically choose the WAL size limit
* to be [sum of all write_buffer_size * max_write_buffer_number] * 2
* </p>
*
* @return max total wal size
*/
long maxTotalWalSize();
/**
* <p>Creates statistics object which collects metrics about database operations.
* Statistics objects should not be shared between DB instances as
* it does not use any locks to prevent concurrent updates.</p>
*
* @return the instance of the current Object.
* @see RocksDB#open(org.rocksdb.Options, String)
*/
Object createStatistics();
/**
* <p>Returns statistics object. Calls {@link #createStatistics()} if
* C++ returns {@code nullptr} for statistics.</p>
*
* @return the instance of the statistics object.
* @see #createStatistics()
*/
Statistics statisticsPtr();
/**
* <p>If true, then the contents of manifest and data files are
* not synced to stable storage. Their contents remain in the
* OS buffers till theOS decides to flush them.</p>
*
* <p>This option is good for bulk-loading of data.</p>
*
* <p>Once the bulk-loading is complete, please issue a sync to
* the OS to flush all dirty buffers to stable storage.</p>
*
* <p>Default: false</p>
*
* @param disableDataSync a boolean flag to specify whether to
* disable data sync.
* @return the reference to the current DBOptions.
*/
Object setDisableDataSync(boolean disableDataSync);
/**
* If true, then the contents of data files are not synced
* to stable storage. Their contents remain in the OS buffers till the
* OS decides to flush them. This option is good for bulk-loading
* of data. Once the bulk-loading is complete, please issue a
* sync to the OS to flush all dirty buffers to stable storage.
*
* @return if true, then data-sync is disabled.
*/
boolean disableDataSync();
/**
* <p>If true, then every store to stable storage will issue a fsync.</p>
* <p>If false, then every store to stable storage will issue a fdatasync.
* This parameter should be set to true while storing data to
* filesystem like ext3 that can lose files after a reboot.</p>
* <p>Default: false</p>
*
* @param useFsync a boolean flag to specify whether to use fsync
* @return the instance of the current Object.
*/
Object setUseFsync(boolean useFsync);
/**
* <p>If true, then every store to stable storage will issue a fsync.</p>
* <p>If false, then every store to stable storage will issue a fdatasync.
* This parameter should be set to true while storing data to
* filesystem like ext3 that can lose files after a reboot.</p>
*
* @return boolean value indicating if fsync is used.
*/
boolean useFsync();
/**
* This specifies the info LOG dir.
* If it is empty, the log files will be in the same dir as data.
* If it is non empty, the log files will be in the specified dir,
* and the db data dir's absolute path will be used as the log file
* name's prefix.
*
* @param dbLogDir the path to the info log directory
* @return the instance of the current Object.
*/
Object setDbLogDir(String dbLogDir);
/**
* Returns the directory of info log.
*
* If it is empty, the log files will be in the same dir as data.
* If it is non empty, the log files will be in the specified dir,
* and the db data dir's absolute path will be used as the log file
* name's prefix.
*
* @return the path to the info log directory
*/
String dbLogDir();
/**
* This specifies the absolute dir path for write-ahead logs (WAL).
* If it is empty, the log files will be in the same dir as data,
* dbname is used as the data dir by default
* If it is non empty, the log files will be in kept the specified dir.
* When destroying the db,
* all log files in wal_dir and the dir itself is deleted
*
* @param walDir the path to the write-ahead-log directory.
* @return the instance of the current Object.
*/
Object setWalDir(String walDir);
/**
* Returns the path to the write-ahead-logs (WAL) directory.
*
* If it is empty, the log files will be in the same dir as data,
* dbname is used as the data dir by default
* If it is non empty, the log files will be in kept the specified dir.
* When destroying the db,
* all log files in wal_dir and the dir itself is deleted
*
* @return the path to the write-ahead-logs (WAL) directory.
*/
String walDir();
/**
* The periodicity when obsolete files get deleted. The default
* value is 6 hours. The files that get out of scope by compaction
* process will still get automatically delete on every compaction,
* regardless of this setting
*
* @param micros the time interval in micros
* @return the instance of the current Object.
*/
Object setDeleteObsoleteFilesPeriodMicros(long micros);
/**
* The periodicity when obsolete files get deleted. The default
* value is 6 hours. The files that get out of scope by compaction
* process will still get automatically delete on every compaction,
* regardless of this setting
*
* @return the time interval in micros when obsolete files will be deleted.
*/
long deleteObsoleteFilesPeriodMicros();
/**
* Specifies the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool.
* If you're increasing this, also consider increasing number of threads in
* LOW priority thread pool. For more information, see
* Default: 1
*
* @param maxBackgroundCompactions the maximum number of background
* compaction jobs.
* @return the instance of the current Object.
*
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
* @see #maxBackgroundFlushes()
*/
Object setMaxBackgroundCompactions(int maxBackgroundCompactions);
/**
* Returns the maximum number of concurrent background compaction jobs,
* submitted to the default LOW priority thread pool.
* When increasing this number, we may also want to consider increasing
* number of threads in LOW priority thread pool.
* Default: 1
*
* @return the maximum number of concurrent background compaction jobs.
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
*/
int maxBackgroundCompactions();
/**
* Specifies the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
* Default: 1
*
* @param maxBackgroundFlushes number of max concurrent flush jobs
* @return the instance of the current Object.
*
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
* @see #maxBackgroundCompactions()
*/
Object setMaxBackgroundFlushes(int maxBackgroundFlushes);
/**
* Returns the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
* Default: 1
*
* @return the maximum number of concurrent background flush jobs.
* @see RocksEnv#setBackgroundThreads(int)
* @see RocksEnv#setBackgroundThreads(int, int)
*/
int maxBackgroundFlushes();
/**
* Specifies the maximum size of a info log file. If the current log file
* is larger than `max_log_file_size`, a new info log file will
* be created.
* If 0, all logs will be written to one log file.
*
* @param maxLogFileSize the maximum size of a info log file.
* @return the instance of the current Object.
* @throws org.rocksdb.RocksDBException
*/
Object setMaxLogFileSize(long maxLogFileSize)
throws RocksDBException;
/**
* Returns the maximum size of a info log file. If the current log file
* is larger than this size, a new info log file will be created.
* If 0, all logs will be written to one log file.
*
* @return the maximum size of the info log file.
*/
long maxLogFileSize();
/**
* Specifies the time interval for the info log file to roll (in seconds).
* If specified with non-zero value, log file will be rolled
* if it has been active longer than `log_file_time_to_roll`.
* Default: 0 (disabled)
*
* @param logFileTimeToRoll the time interval in seconds.
* @return the instance of the current Object.
* @throws org.rocksdb.RocksDBException
*/
Object setLogFileTimeToRoll(long logFileTimeToRoll)
throws RocksDBException;
/**
* Returns the time interval for the info log file to roll (in seconds).
* If specified with non-zero value, log file will be rolled
* if it has been active longer than `log_file_time_to_roll`.
* Default: 0 (disabled)
*
* @return the time interval in seconds.
*/
long logFileTimeToRoll();
/**
* Specifies the maximum number of info log files to be kept.
* Default: 1000
*
* @param keepLogFileNum the maximum number of info log files to be kept.
* @return the instance of the current Object.
* @throws org.rocksdb.RocksDBException
*/
Object setKeepLogFileNum(long keepLogFileNum)
throws RocksDBException;
/**
* Returns the maximum number of info log files to be kept.
* Default: 1000
*
* @return the maximum number of info log files to be kept.
*/
long keepLogFileNum();
/**
* Manifest file is rolled over on reaching this limit.
* The older manifest file be deleted.
* The default value is MAX_INT so that roll-over does not take place.
*
* @param maxManifestFileSize the size limit of a manifest file.
* @return the instance of the current Object.
*/
Object setMaxManifestFileSize(long maxManifestFileSize);
/**
* Manifest file is rolled over on reaching this limit.
* The older manifest file be deleted.
* The default value is MAX_INT so that roll-over does not take place.
*
* @return the size limit of a manifest file.
*/
long maxManifestFileSize();
/**
* Number of shards used for table cache.
*
* @param tableCacheNumshardbits the number of chards
* @return the instance of the current Object.
*/
Object setTableCacheNumshardbits(int tableCacheNumshardbits);
/**
* Number of shards used for table cache.
*
* @return the number of shards used for table cache.
*/
int tableCacheNumshardbits();
/**
* During data eviction of table's LRU cache, it would be inefficient
* to strictly follow LRU because this piece of memory will not really
* be released unless its refcount falls to zero. Instead, make two
* passes: the first pass will release items with refcount = 1,
* and if not enough space releases after scanning the number of
* elements specified by this parameter, we will remove items in LRU
* order.
*
* @param limit scan count limit
* @return the instance of the current Object.
*/
Object setTableCacheRemoveScanCountLimit(int limit);
/**
* During data eviction of table's LRU cache, it would be inefficient
* to strictly follow LRU because this piece of memory will not really
* be released unless its refcount falls to zero. Instead, make two
* passes: the first pass will release items with refcount = 1,
* and if not enough space releases after scanning the number of
* elements specified by this parameter, we will remove items in LRU
* order.
*
* @return scan count limit
*/
int tableCacheRemoveScanCountLimit();
/**
* {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
* will be deleted.
* <ol>
* <li>If both set to 0, logs will be deleted asap and will not get into
* the archive.</li>
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted.</li>
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* are older than WAL_ttl_seconds will be deleted.</li>
* <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
*
* @param walTtlSeconds the ttl seconds
* @return the instance of the current Object.
* @see #setWalSizeLimitMB(long)
*/
Object setWalTtlSeconds(long walTtlSeconds);
/**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
* will be deleted.
* <ol>
* <li>If both set to 0, logs will be deleted asap and will not get into
* the archive.</li>
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted.</li>
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* are older than WAL_ttl_seconds will be deleted.</li>
* <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
* </ol>
*
* @return the wal-ttl seconds
* @see #walSizeLimitMB()
*/
long walTtlSeconds();
/**
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
* will be deleted.
* <ol>
* <li>If both set to 0, logs will be deleted asap and will not get into
* the archive.</li>
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted.</li>
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
* are older than WAL_ttl_seconds will be deleted.</li>
* <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
*
* @param sizeLimitMB size limit in mega-bytes.
* @return the instance of the current Object.
* @see #setWalSizeLimitMB(long)
*/
Object setWalSizeLimitMB(long sizeLimitMB);
/**
* {@link #walTtlSeconds()} and {@code #walSizeLimitMB()} affect how archived logs
* will be deleted.
* <ol>
* <li>If both set to 0, logs will be deleted asap and will not get into
* the archive.</li>
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
* WAL files will be checked every 10 min and if total size is greater
* then WAL_size_limit_MB, they will be deleted starting with the
* earliest until size_limit is met. All empty files will be deleted.</li>
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
* WAL files will be checked every WAL_ttl_seconds i / 2 and those that
* are older than WAL_ttl_seconds will be deleted.</li>
* <li>If both are not 0, WAL files will be checked every 10 min and both
* checks will be performed with ttl being first.</li>
* </ol>
* @return size limit in mega-bytes.
* @see #walSizeLimitMB()
*/
long walSizeLimitMB();
/**
* Number of bytes to preallocate (via fallocate) the manifest
* files. Default is 4mb, which is reasonable to reduce random IO
* as well as prevent overallocation for mounts that preallocate
* large amounts of data (such as xfs's allocsize option).
*
* @param size the size in byte
* @return the instance of the current Object.
* @throws org.rocksdb.RocksDBException
*/
Object setManifestPreallocationSize(long size)
throws RocksDBException;
/**
* Number of bytes to preallocate (via fallocate) the manifest
* files. Default is 4mb, which is reasonable to reduce random IO
* as well as prevent overallocation for mounts that preallocate
* large amounts of data (such as xfs's allocsize option).
*
* @return size in bytes.
*/
long manifestPreallocationSize();
/**
* Data being read from file storage may be buffered in the OS
* Default: true
*
* @param allowOsBuffer if true, then OS buffering is allowed.
* @return the instance of the current Object.
*/
Object setAllowOsBuffer(boolean allowOsBuffer);
/**
* Data being read from file storage may be buffered in the OS
* Default: true
*
* @return if true, then OS buffering is allowed.
*/
boolean allowOsBuffer();
/**
* Allow the OS to mmap file for reading sst tables.
* Default: false
*
* @param allowMmapReads true if mmap reads are allowed.
* @return the instance of the current Object.
*/
Object setAllowMmapReads(boolean allowMmapReads);
/**
* Allow the OS to mmap file for reading sst tables.
* Default: false
*
* @return true if mmap reads are allowed.
*/
boolean allowMmapReads();
/**
* Allow the OS to mmap file for writing. Default: false
*
* @param allowMmapWrites true if mmap writes are allowd.
* @return the instance of the current Object.
*/
Object setAllowMmapWrites(boolean allowMmapWrites);
/**
* Allow the OS to mmap file for writing. Default: false
*
* @return true if mmap writes are allowed.
*/
boolean allowMmapWrites();
/**
* Disable child process inherit open files. Default: true
*
* @param isFdCloseOnExec true if child process inheriting open
* files is disabled.
* @return the instance of the current Object.
*/
Object setIsFdCloseOnExec(boolean isFdCloseOnExec);
/**
* Disable child process inherit open files. Default: true
*
* @return true if child process inheriting open files is disabled.
*/
boolean isFdCloseOnExec();
/**
* Skip log corruption error on recovery (If client is ok with
* losing most recent changes)
* Default: false
*
* @param skip true if log corruption errors are skipped during recovery.
* @return the instance of the current Object.
*/
Object setSkipLogErrorOnRecovery(boolean skip);
/**
* Skip log corruption error on recovery (If client is ok with
* losing most recent changes)
* Default: false
*
* @return true if log corruption errors are skipped during recovery.
*/
boolean skipLogErrorOnRecovery();
/**
* if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 3600 (1 hour)
*
* @param statsDumpPeriodSec time interval in seconds.
* @return the instance of the current Object.
*/
Object setStatsDumpPeriodSec(int statsDumpPeriodSec);
/**
* If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
* Default: 3600 (1 hour)
*
* @return time interval in seconds.
*/
int statsDumpPeriodSec();
/**
* If set true, will hint the underlying file system that the file
* access pattern is random, when a sst file is opened.
* Default: true
*
* @param adviseRandomOnOpen true if hinting random access is on.
* @return the instance of the current Object.
*/
Object setAdviseRandomOnOpen(boolean adviseRandomOnOpen);
/**
* If set true, will hint the underlying file system that the file
* access pattern is random, when a sst file is opened.
* Default: true
*
* @return true if hinting random access is on.
*/
boolean adviseRandomOnOpen();
/**
* Use adaptive mutex, which spins in the user space before resorting
* to kernel. This could reduce context switch when the mutex is not
* heavily contended. However, if the mutex is hot, we could end up
* wasting spin time.
* Default: false
*
* @param useAdaptiveMutex true if adaptive mutex is used.
* @return the instance of the current Object.
*/
Object setUseAdaptiveMutex(boolean useAdaptiveMutex);
/**
* Use adaptive mutex, which spins in the user space before resorting
* to kernel. This could reduce context switch when the mutex is not
* heavily contended. However, if the mutex is hot, we could end up
* wasting spin time.
* Default: false
*
* @return true if adaptive mutex is used.
*/
boolean useAdaptiveMutex();
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @param bytesPerSync size in bytes
* @return the instance of the current Object.
*/
Object setBytesPerSync(long bytesPerSync);
/**
* Allows OS to incrementally sync files to disk while they are being
* written, asynchronously, in the background.
* Issue one request for every bytes_per_sync written. 0 turns it off.
* Default: 0
*
* @return size in bytes
*/
long bytesPerSync();
}

File diff suppressed because it is too large Load Diff

View File

@ -11,11 +11,11 @@ package org.rocksdb;
public abstract class RateLimiterConfig { public abstract class RateLimiterConfig {
/** /**
* This function should only be called by * This function should only be called by
* {@link org.rocksdb.Options#setRateLimiter(long, long)}, which will * {@link org.rocksdb.DBOptions#setRateLimiter(long, long)}, which will
* create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated * create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated
* with a Java RateLimiterConfig. * with a Java RateLimiterConfig.
* *
* @see org.rocksdb.Options#setRateLimiter(long, long) * @see org.rocksdb.DBOptions#setRateLimiter(long, long)
*/ */
abstract protected long newRateLimiterHandle(); abstract protected long newRateLimiterHandle();
} }

View File

@ -44,6 +44,13 @@ public class OptionsTest {
assert(opt.paranoidChecks() == boolValue); assert(opt.paranoidChecks() == boolValue);
} }
{
// MaxTotalWalSize test
long longValue = rand.nextLong();
opt.setMaxTotalWalSize(longValue);
assert(opt.maxTotalWalSize() == longValue);
}
{ // MaxOpenFiles test { // MaxOpenFiles test
int intValue = rand.nextInt(); int intValue = rand.nextInt();
opt.setMaxOpenFiles(intValue); opt.setMaxOpenFiles(intValue);
@ -264,9 +271,9 @@ public class OptionsTest {
} }
{ // TargetFileSizeBase test { // TargetFileSizeBase test
int intValue = rand.nextInt(); long longValue = rand.nextLong();
opt.setTargetFileSizeBase(intValue); opt.setTargetFileSizeBase(longValue);
assert(opt.targetFileSizeBase() == intValue); assert(opt.targetFileSizeBase() == longValue);
} }
{ // TargetFileSizeMultiplier test { // TargetFileSizeMultiplier test

File diff suppressed because it is too large Load Diff