From 18eb563058f19856a2723f4cbe867aee7959cc6c Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Wed, 20 Jan 2016 17:05:41 +0000 Subject: [PATCH 1/9] Improve the speed and synchronization around the construction of Java/JNI objects --- java/rocksjni/backupablejni.cc | 23 +- java/rocksjni/backupenginejni.cc | 14 +- java/rocksjni/comparator.cc | 12 +- java/rocksjni/filter.cc | 12 +- java/rocksjni/loggerjnicallback.cc | 12 +- java/rocksjni/options.cc | 69 ++-- java/rocksjni/portal.h | 8 +- .../remove_emptyvalue_compactionfilterjni.cc | 13 +- java/rocksjni/restorejni.cc | 16 +- java/rocksjni/rocksjni.cc | 295 +++++++----------- java/rocksjni/slice.cc | 48 +-- java/rocksjni/ttl.cc | 146 ++++----- java/rocksjni/write_batch.cc | 15 +- java/rocksjni/write_batch_with_index.cc | 24 +- .../org/rocksdb/AbstractCompactionFilter.java | 12 +- .../java/org/rocksdb/AbstractComparator.java | 17 +- .../org/rocksdb/AbstractRocksIterator.java | 23 +- .../main/java/org/rocksdb/AbstractSlice.java | 42 +-- .../java/org/rocksdb/AbstractWriteBatch.java | 33 +- .../main/java/org/rocksdb/BackupEngine.java | 36 +-- .../main/java/org/rocksdb/BackupableDB.java | 29 +- .../java/org/rocksdb/BackupableDBOptions.java | 53 ++-- .../main/java/org/rocksdb/BloomFilter.java | 16 +- .../src/main/java/org/rocksdb/Checkpoint.java | 18 +- .../java/org/rocksdb/ColumnFamilyHandle.java | 17 +- .../java/org/rocksdb/ColumnFamilyOptions.java | 45 +-- .../src/main/java/org/rocksdb/Comparator.java | 12 +- .../java/org/rocksdb/ComparatorOptions.java | 16 +- java/src/main/java/org/rocksdb/DBOptions.java | 149 ++++----- .../java/org/rocksdb/DirectComparator.java | 12 +- .../main/java/org/rocksdb/DirectSlice.java | 32 +- java/src/main/java/org/rocksdb/Env.java | 4 +- java/src/main/java/org/rocksdb/Filter.java | 12 +- .../main/java/org/rocksdb/FlushOptions.java | 15 +- java/src/main/java/org/rocksdb/Logger.java | 17 +- .../java/org/rocksdb/NativeReference.java | 77 +++++ java/src/main/java/org/rocksdb/Options.java | 173 +++++----- .../main/java/org/rocksdb/ReadOptions.java | 27 +- .../RemoveEmptyValueCompactionFilter.java | 5 +- .../java/org/rocksdb/RestoreBackupableDB.java | 29 +- .../main/java/org/rocksdb/RestoreOptions.java | 16 +- java/src/main/java/org/rocksdb/RocksDB.java | 113 ++++--- java/src/main/java/org/rocksdb/RocksEnv.java | 6 +- .../main/java/org/rocksdb/RocksIterator.java | 6 +- .../main/java/org/rocksdb/RocksMemEnv.java | 10 +- .../java/org/rocksdb/RocksMutableObject.java | 33 ++ .../main/java/org/rocksdb/RocksObject.java | 107 +------ java/src/main/java/org/rocksdb/Slice.java | 17 +- java/src/main/java/org/rocksdb/Snapshot.java | 8 +- .../org/rocksdb/TransactionLogIterator.java | 9 +- java/src/main/java/org/rocksdb/TtlDB.java | 56 ++-- .../java/org/rocksdb/WBWIRocksIterator.java | 10 +- .../src/main/java/org/rocksdb/WriteBatch.java | 25 +- .../java/org/rocksdb/WriteBatchWithIndex.java | 20 +- .../main/java/org/rocksdb/WriteOptions.java | 11 +- .../org/rocksdb/WriteBatchWithIndexTest.java | 4 +- 56 files changed, 968 insertions(+), 1111 deletions(-) create mode 100644 java/src/main/java/org/rocksdb/NativeReference.java create mode 100644 java/src/main/java/org/rocksdb/RocksMutableObject.java diff --git a/java/rocksjni/backupablejni.cc b/java/rocksjni/backupablejni.cc index f2304dadb..a7aa6e3ef 100644 --- a/java/rocksjni/backupablejni.cc +++ b/java/rocksjni/backupablejni.cc @@ -21,17 +21,17 @@ /* * Class: org_rocksdb_BackupableDB * Method: open - * Signature: (JJ)V + * Signature: (JJ)J */ -void Java_org_rocksdb_BackupableDB_open( - JNIEnv* env, jobject jbdb, jlong jdb_handle, jlong jopt_handle) { - auto db = reinterpret_cast(jdb_handle); - auto opt = reinterpret_cast(jopt_handle); +jlong Java_org_rocksdb_BackupableDB_open( + JNIEnv* env, jclass jcls, jlong jdb_handle, jlong jopt_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* opt = reinterpret_cast(jopt_handle); auto bdb = new rocksdb::BackupableDB(db, *opt); // as BackupableDB extends RocksDB on the java side, we can reuse // the RocksDB portal here. - rocksdb::RocksDBJni::setHandle(env, jbdb, bdb); + return reinterpret_cast(bdb); } /* @@ -135,14 +135,14 @@ void Java_org_rocksdb_BackupableDB_garbageCollect(JNIEnv* env, /* * Class: org_rocksdb_BackupableDBOptions * Method: newBackupableDBOptions - * Signature: (Ljava/lang/String;)V + * Signature: (Ljava/lang/String;)J */ -void Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions( - JNIEnv* env, jobject jobj, jstring jpath) { - const char* cpath = env->GetStringUTFChars(jpath, 0); +jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions( + JNIEnv* env, jclass jcls, jstring jpath) { + const char* cpath = env->GetStringUTFChars(jpath, NULL); auto bopt = new rocksdb::BackupableDBOptions(cpath); env->ReleaseStringUTFChars(jpath, cpath); - rocksdb::BackupableDBOptionsJni::setHandle(env, jobj, bopt); + return reinterpret_cast(bopt); } /* @@ -320,5 +320,4 @@ void Java_org_rocksdb_BackupableDBOptions_disposeInternal( auto bopt = reinterpret_cast(jhandle); assert(bopt); delete bopt; - rocksdb::BackupableDBOptionsJni::setHandle(env, jopt, nullptr); } diff --git a/java/rocksjni/backupenginejni.cc b/java/rocksjni/backupenginejni.cc index a42399873..a796d6e5b 100644 --- a/java/rocksjni/backupenginejni.cc +++ b/java/rocksjni/backupenginejni.cc @@ -16,10 +16,10 @@ /* * Class: org_rocksdb_BackupEngine * Method: open - * Signature: (JJ)V + * Signature: (JJ)J */ -void Java_org_rocksdb_BackupEngine_open( - JNIEnv* env, jobject jbe, jlong env_handle, +jlong Java_org_rocksdb_BackupEngine_open( + JNIEnv* env, jclass jcls, jlong env_handle, jlong backupable_db_options_handle) { auto* rocks_env = reinterpret_cast(env_handle); auto* backupable_db_options = @@ -30,11 +30,11 @@ void Java_org_rocksdb_BackupEngine_open( *backupable_db_options, &backup_engine); if (status.ok()) { - rocksdb::BackupEngineJni::setHandle(env, jbe, backup_engine); - return; + return reinterpret_cast(backup_engine); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); + return 0; } - - rocksdb::RocksDBExceptionJni::ThrowNew(env, status); } /* diff --git a/java/rocksjni/comparator.cc b/java/rocksjni/comparator.cc index 8765daa34..dd11f10e4 100644 --- a/java/rocksjni/comparator.cc +++ b/java/rocksjni/comparator.cc @@ -36,15 +36,15 @@ void Java_org_rocksdb_AbstractComparator_disposeInternal( /* * Class: org_rocksdb_Comparator * Method: createNewComparator0 - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_Comparator_createNewComparator0( +jlong Java_org_rocksdb_Comparator_createNewComparator0( JNIEnv* env, jobject jobj, jlong copt_handle) { const rocksdb::ComparatorJniCallbackOptions* copt = reinterpret_cast(copt_handle); const rocksdb::ComparatorJniCallback* c = new rocksdb::ComparatorJniCallback(env, jobj, copt); - rocksdb::AbstractComparatorJni::setHandle(env, jobj, c); + return reinterpret_cast(c); } // @@ -53,14 +53,14 @@ void Java_org_rocksdb_Comparator_createNewComparator0( /* * Class: org_rocksdb_DirectComparator * Method: createNewDirectComparator0 - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_DirectComparator_createNewDirectComparator0( +jlong Java_org_rocksdb_DirectComparator_createNewDirectComparator0( JNIEnv* env, jobject jobj, jlong copt_handle) { const rocksdb::ComparatorJniCallbackOptions* copt = reinterpret_cast(copt_handle); const rocksdb::DirectComparatorJniCallback* c = new rocksdb::DirectComparatorJniCallback(env, jobj, copt); - rocksdb::AbstractComparatorJni::setHandle(env, jobj, c); + return reinterpret_cast(c); } // diff --git a/java/rocksjni/filter.cc b/java/rocksjni/filter.cc index 2b662d03f..96ef9856b 100644 --- a/java/rocksjni/filter.cc +++ b/java/rocksjni/filter.cc @@ -19,17 +19,17 @@ /* * Class: org_rocksdb_BloomFilter * Method: createBloomFilter - * Signature: (IZ)V + * Signature: (IZ)J */ -void Java_org_rocksdb_BloomFilter_createNewBloomFilter( - JNIEnv* env, jobject jobj, jint bits_per_key, +jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter( + JNIEnv* env, jclass jcls, jint bits_per_key, jboolean use_block_base_builder) { - rocksdb::FilterPolicy* fp = const_cast( + auto* fp = const_cast( rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder)); - std::shared_ptr *pFilterPolicy = + auto* pFilterPolicy = new std::shared_ptr; *pFilterPolicy = std::shared_ptr(fp); - rocksdb::FilterJni::setHandle(env, jobj, pFilterPolicy); + return reinterpret_cast(pFilterPolicy); } /* diff --git a/java/rocksjni/loggerjnicallback.cc b/java/rocksjni/loggerjnicallback.cc index 56857b750..d92b17c22 100644 --- a/java/rocksjni/loggerjnicallback.cc +++ b/java/rocksjni/loggerjnicallback.cc @@ -125,9 +125,9 @@ LoggerJniCallback::~LoggerJniCallback() { /* * Class: org_rocksdb_Logger * Method: createNewLoggerOptions - * Signature: (J)V + * Signature: (J)J */ -void Java_org_rocksdb_Logger_createNewLoggerOptions( +jlong Java_org_rocksdb_Logger_createNewLoggerOptions( JNIEnv* env, jobject jobj, jlong joptions) { rocksdb::LoggerJniCallback* c = new rocksdb::LoggerJniCallback(env, jobj); @@ -137,15 +137,15 @@ void Java_org_rocksdb_Logger_createNewLoggerOptions( std::shared_ptr *pLoggerJniCallback = new std::shared_ptr; *pLoggerJniCallback = std::shared_ptr(c); - rocksdb::LoggerJni::setHandle(env, jobj, pLoggerJniCallback); + return reinterpret_cast(pLoggerJniCallback); } /* * Class: org_rocksdb_Logger * Method: createNewLoggerDbOptions - * Signature: (J)V + * Signature: (J)J */ -void Java_org_rocksdb_Logger_createNewLoggerDbOptions( +jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions( JNIEnv* env, jobject jobj, jlong jdb_options) { rocksdb::LoggerJniCallback* c = new rocksdb::LoggerJniCallback(env, jobj); @@ -155,7 +155,7 @@ void Java_org_rocksdb_Logger_createNewLoggerDbOptions( std::shared_ptr *pLoggerJniCallback = new std::shared_ptr; *pLoggerJniCallback = std::shared_ptr(c); - rocksdb::LoggerJni::setHandle(env, jobj, pLoggerJniCallback); + return reinterpret_cast(pLoggerJniCallback); } /* diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index 9cb466538..b8d7684ec 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -36,25 +36,25 @@ /* * Class: org_rocksdb_Options * Method: newOptions - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jobject jobj) { +jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jclass jcls) { rocksdb::Options* op = new rocksdb::Options(); - rocksdb::OptionsJni::setHandle(env, jobj, op); + return reinterpret_cast(op); } /* * Class: org_rocksdb_Options * Method: newOptions - * Signature: (JJ)V + * Signature: (JJ)J */ -void Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jobject jobj, +jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls, jlong jdboptions, jlong jcfoptions) { - auto dbOpt = reinterpret_cast(jdboptions); - auto cfOpt = reinterpret_cast( + auto* dbOpt = reinterpret_cast(jdboptions); + auto* cfOpt = reinterpret_cast( jcfoptions); rocksdb::Options* op = new rocksdb::Options(*dbOpt, *cfOpt); - rocksdb::OptionsJni::setHandle(env, jobj, op); + return reinterpret_cast(op); } /* @@ -1932,12 +1932,12 @@ void Java_org_rocksdb_Options_prepareForBulkLoad( /* * Class: org_rocksdb_ColumnFamilyOptions * Method: newColumnFamilyOptions - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( - JNIEnv* env, jobject jobj) { +jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( + JNIEnv* env, jclass jcls) { rocksdb::ColumnFamilyOptions* op = new rocksdb::ColumnFamilyOptions(); - rocksdb::ColumnFamilyOptionsJni::setHandle(env, jobj, op); + return reinterpret_cast(op); } /* @@ -3072,12 +3072,12 @@ void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits( /* * Class: org_rocksdb_DBOptions * Method: newDBOptions - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env, - jobject jobj) { +jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env, + jclass jcls) { rocksdb::DBOptions* dbop = new rocksdb::DBOptions(); - rocksdb::DBOptionsJni::setHandle(env, jobj, dbop); + return reinterpret_cast(dbop); } /* @@ -3872,12 +3872,12 @@ jlong Java_org_rocksdb_DBOptions_bytesPerSync( /* * Class: org_rocksdb_WriteOptions * Method: newWriteOptions - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_WriteOptions_newWriteOptions( - JNIEnv* env, jobject jwrite_options) { +jlong Java_org_rocksdb_WriteOptions_newWriteOptions( + JNIEnv* env, jclass jcls) { rocksdb::WriteOptions* op = new rocksdb::WriteOptions(); - rocksdb::WriteOptionsJni::setHandle(env, jwrite_options, op); + return reinterpret_cast(op); } /* @@ -3889,8 +3889,6 @@ void Java_org_rocksdb_WriteOptions_disposeInternal( JNIEnv* env, jobject jwrite_options, jlong jhandle) { auto write_options = reinterpret_cast(jhandle); delete write_options; - - rocksdb::WriteOptionsJni::setHandle(env, jwrite_options, nullptr); } /* @@ -3939,12 +3937,12 @@ jboolean Java_org_rocksdb_WriteOptions_disableWAL( /* * Class: org_rocksdb_ReadOptions * Method: newReadOptions - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_ReadOptions_newReadOptions( - JNIEnv* env, jobject jobj) { +jlong Java_org_rocksdb_ReadOptions_newReadOptions( + JNIEnv* env, jclass jcls) { auto read_opt = new rocksdb::ReadOptions(); - rocksdb::ReadOptionsJni::setHandle(env, jobj, read_opt); + return reinterpret_cast(read_opt); } /* @@ -3955,7 +3953,6 @@ void Java_org_rocksdb_ReadOptions_newReadOptions( void Java_org_rocksdb_ReadOptions_disposeInternal( JNIEnv* env, jobject jobj, jlong jhandle) { delete reinterpret_cast(jhandle); - rocksdb::ReadOptionsJni::setHandle(env, jobj, nullptr); } /* @@ -4052,12 +4049,12 @@ jlong Java_org_rocksdb_ReadOptions_snapshot( /* * Class: org_rocksdb_ComparatorOptions * Method: newComparatorOptions - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_ComparatorOptions_newComparatorOptions( - JNIEnv* env, jobject jobj) { +jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions( + JNIEnv* env, jclass jcls) { auto comparator_opt = new rocksdb::ComparatorJniCallbackOptions(); - rocksdb::ComparatorOptionsJni::setHandle(env, jobj, comparator_opt); + return reinterpret_cast(comparator_opt); } /* @@ -4090,7 +4087,6 @@ void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex( void Java_org_rocksdb_ComparatorOptions_disposeInternal( JNIEnv * env, jobject jobj, jlong jhandle) { delete reinterpret_cast(jhandle); - rocksdb::ComparatorOptionsJni::setHandle(env, jobj, nullptr); } ///////////////////////////////////////////////////////////////////// @@ -4099,12 +4095,12 @@ void Java_org_rocksdb_ComparatorOptions_disposeInternal( /* * Class: org_rocksdb_FlushOptions * Method: newFlushOptions - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_FlushOptions_newFlushOptions( - JNIEnv* env, jobject jobj) { +jlong Java_org_rocksdb_FlushOptions_newFlushOptions( + JNIEnv* env, jclass jcls) { auto flush_opt = new rocksdb::FlushOptions(); - rocksdb::FlushOptionsJni::setHandle(env, jobj, flush_opt); + return reinterpret_cast(flush_opt); } /* @@ -4137,5 +4133,4 @@ jboolean Java_org_rocksdb_FlushOptions_waitForFlush( void Java_org_rocksdb_FlushOptions_disposeInternal( JNIEnv * env, jobject jobj, jlong jhandle) { delete reinterpret_cast(jhandle); - rocksdb::FlushOptionsJni::setHandle(env, jobj, nullptr); } diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 0c5a9245f..9f93be4d1 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -64,11 +64,15 @@ template class RocksDBNativeClass { return reinterpret_cast( env->GetLongField(jobj, getHandleFieldID(env))); } +}; +// Native class template for sub-classes of RocksMutableObject +template class NativeRocksMutableObject : public RocksDBNativeClass { + public: // Pass the pointer to the java side. static void setHandle(JNIEnv* env, jobject jdb, PTR ptr) { env->SetLongField( - jdb, getHandleFieldID(env), + jdb, RocksDBNativeClass::getHandleFieldID(env), reinterpret_cast(ptr)); } }; @@ -407,7 +411,7 @@ class AbstractComparatorJni : public RocksDBNativeClass< }; // The portal class for org.rocksdb.AbstractSlice -class AbstractSliceJni : public RocksDBNativeClass< +class AbstractSliceJni : public NativeRocksMutableObject< const rocksdb::Slice*, AbstractSliceJni> { public: static jclass getJClass(JNIEnv* env) { diff --git a/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc b/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc index 3cf7b3a03..ef17efeec 100644 --- a/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc +++ b/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc @@ -12,16 +12,13 @@ /* * Class: org_rocksdb_RemoveEmptyValueCompactionFilter * Method: createNewRemoveEmptyValueCompactionFilter0 - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0( - JNIEnv* env, jobject jobj) { - const rocksdb::RemoveEmptyValueCompactionFilter* compaction_filter = +jlong Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0( + JNIEnv* env, jclass jcls) { + auto* compaction_filter = new rocksdb::RemoveEmptyValueCompactionFilter(); // set the native handle to our native compaction filter - static jclass jclazz = - env->FindClass("org/rocksdb/RemoveEmptyValueCompactionFilter"); - static jfieldID fid = env->GetFieldID(jclazz, "nativeHandle_", "J"); - env->SetLongField(jobj, fid, reinterpret_cast(compaction_filter)); + return reinterpret_cast(compaction_filter); } diff --git a/java/rocksjni/restorejni.cc b/java/rocksjni/restorejni.cc index 40b13dac5..e86571dd5 100644 --- a/java/rocksjni/restorejni.cc +++ b/java/rocksjni/restorejni.cc @@ -22,17 +22,17 @@ * Signature: (Z)J */ jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env, - jobject jobj, jboolean keep_log_files) { + jclass jcls, jboolean keep_log_files) { auto ropt = new rocksdb::RestoreOptions(keep_log_files); return reinterpret_cast(ropt); } /* * Class: org_rocksdb_RestoreOptions - * Method: dispose + * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_RestoreOptions_dispose(JNIEnv* env, jobject jobj, +void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* env, jobject jobj, jlong jhandle) { auto ropt = reinterpret_cast(jhandle); assert(ropt); @@ -45,8 +45,8 @@ void Java_org_rocksdb_RestoreOptions_dispose(JNIEnv* env, jobject jobj, * Signature: (J)J */ jlong Java_org_rocksdb_RestoreBackupableDB_newRestoreBackupableDB(JNIEnv* env, - jobject jobj, jlong jopt_handle) { - auto opt = reinterpret_cast(jopt_handle); + jclass jcls, jlong jopt_handle) { + auto* opt = reinterpret_cast(jopt_handle); auto rdb = new rocksdb::RestoreBackupableDB(rocksdb::Env::Default(), *opt); return reinterpret_cast(rdb); } @@ -185,11 +185,11 @@ void Java_org_rocksdb_RestoreBackupableDB_garbageCollect( /* * Class: org_rocksdb_RestoreBackupableDB - * Method: dispose + * Method: disposeInternal * Signature: (J)V */ -void Java_org_rocksdb_RestoreBackupableDB_dispose(JNIEnv* env, jobject jobj, - jlong jhandle) { +void Java_org_rocksdb_RestoreBackupableDB_disposeInternal(JNIEnv* env, + jobject jobj, jlong jhandle) { auto ropt = reinterpret_cast(jhandle); assert(ropt); delete ropt; diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index d9c0c6147..eb704ad26 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -26,217 +26,142 @@ ////////////////////////////////////////////////////////////////////////////// // rocksdb::DB::Open - -/* - * Class: org_rocksdb_RocksDB - * Method: open - * Signature: (JLjava/lang/String;)V - */ -void Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2( - JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path) { - auto opt = reinterpret_cast(jopt_handle); +jlong rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, jstring jdb_path, + std::function open_fn + ) { + auto* opt = reinterpret_cast(jopt_handle); rocksdb::DB* db = nullptr; - const char* db_path = env->GetStringUTFChars(jdb_path, 0); - rocksdb::Status s = rocksdb::DB::Open(*opt, db_path, &db); + const char* db_path = env->GetStringUTFChars(jdb_path, NULL); + rocksdb::Status s = open_fn(*opt, db_path, &db); env->ReleaseStringUTFChars(jdb_path, db_path); if (s.ok()) { - rocksdb::RocksDBJni::setHandle(env, jdb, db); - return; + return reinterpret_cast(db); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); -} - -/* - * Class: org_rocksdb_RocksDB - * Method: openROnly - * Signature: (JLjava/lang/String;)V - */ -void Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2( - JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path) { - auto opt = reinterpret_cast(jopt_handle); - rocksdb::DB* db = nullptr; - const char* db_path = env->GetStringUTFChars(jdb_path, 0); - rocksdb::Status s = rocksdb::DB::OpenForReadOnly(*opt, - db_path, &db); - env->ReleaseStringUTFChars(jdb_path, db_path); - - if (s.ok()) { - rocksdb::RocksDBJni::setHandle(env, jdb, db); - return; - } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); -} - -/* - * Class: org_rocksdb_RocksDB - * Method: openROnly - * Signature: (JLjava/lang/String;Ljava/util/List;I)Ljava/util/List; - */ -jobject - Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2Ljava_util_List_2I( - JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path, - jobject jcfdesc_list, jint jcfdesc_count) { - auto opt = reinterpret_cast(jopt_handle); - rocksdb::DB* db = nullptr; - const char* db_path = env->GetStringUTFChars(jdb_path, 0); - - std::vector cfnames_to_free; - std::vector jcfnames_for_free; - - std::vector column_families; - std::vector handles; - // get iterator for ColumnFamilyDescriptors - jobject iteratorObj = env->CallObjectMethod( - jcfdesc_list, rocksdb::ListJni::getIteratorMethod(env)); - - // iterate over ColumnFamilyDescriptors - while (env->CallBooleanMethod( - iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - // get ColumnFamilyDescriptor - jobject jcf_descriptor = env->CallObjectMethod(iteratorObj, - rocksdb::ListJni::getNextMethod(env)); - // get ColumnFamilyName - jbyteArray cf_name_in_byte_array = static_cast( - env->CallObjectMethod(jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod( - env))); - // get CF Options - jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod( - env)); - rocksdb::ColumnFamilyOptions* cfOptions = - rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj); - - jbyte* cfname = env->GetByteArrayElements(cf_name_in_byte_array, 0); - const int len = env->GetArrayLength(cf_name_in_byte_array); - - // free allocated cfnames after call to open - cfnames_to_free.push_back(cfname); - jcfnames_for_free.push_back(cf_name_in_byte_array); - column_families.push_back(rocksdb::ColumnFamilyDescriptor( - std::string(reinterpret_cast(cfname), len), *cfOptions)); - } - - rocksdb::Status s = rocksdb::DB::OpenForReadOnly(*opt, - db_path, column_families, &handles, &db); - env->ReleaseStringUTFChars(jdb_path, db_path); - // free jbyte allocations - for (std::vector::size_type i = 0; - i != cfnames_to_free.size(); i++) { - // free cfnames - env->ReleaseByteArrayElements(jcfnames_for_free[i], cfnames_to_free[i], 0); - } - - // check if open operation was successful - if (s.ok()) { - rocksdb::RocksDBJni::setHandle(env, jdb, db); - jclass jListClazz = env->FindClass("java/util/ArrayList"); - jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId( - env, jListClazz); - jobject jcfhandle_list = env->NewObject(jListClazz, - midList, handles.size()); - // insert in java list - for (std::vector::size_type i = 0; - i != handles.size(); i++) { - // jlong must be converted to Long due to collections restrictions - jclass jLongClazz = env->FindClass("java/lang/Long"); - jmethodID midLong = env->GetMethodID(jLongClazz, "", "(J)V"); - jobject obj = env->NewObject(jLongClazz, midLong, - reinterpret_cast(handles[i])); - env->CallBooleanMethod(jcfhandle_list, - rocksdb::ListJni::getListAddMethodId(env), obj); - } - - return jcfhandle_list; - } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return nullptr; } /* * Class: org_rocksdb_RocksDB * Method: open - * Signature: (JLjava/lang/String;Ljava/util/List;I)Ljava/util/List; + * Signature: (JLjava/lang/String;)J */ -jobject Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2Ljava_util_List_2I( - JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path, - jobject jcfdesc_list, jint jcfdesc_count) { - auto opt = reinterpret_cast(jopt_handle); - rocksdb::DB* db = nullptr; - const char* db_path = env->GetStringUTFChars(jdb_path, 0); +jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, + (rocksdb::Status(*) + (const rocksdb::Options&, const std::string&, rocksdb::DB**) + )&rocksdb::DB::Open + ); +} - std::vector cfnames_to_free; - std::vector jcfnames_for_free; +/* + * Class: org_rocksdb_RocksDB + * Method: openROnly + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, []( + const rocksdb::Options& options, + const std::string& db_path, rocksdb::DB** db) { + return rocksdb::DB::OpenForReadOnly(options, db_path, db); + }); +} + +jlongArray rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, + jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options, + std::function&, + std::vector*, + rocksdb::DB**)> open_fn + ) { + auto* opt = reinterpret_cast(jopt_handle); + const char* db_path = env->GetStringUTFChars(jdb_path, NULL); std::vector column_families; - std::vector handles; - // get iterator for ColumnFamilyDescriptors - jobject iteratorObj = env->CallObjectMethod( - jcfdesc_list, rocksdb::ListJni::getIteratorMethod(env)); - // iterate over ColumnFamilyDescriptors - while (env->CallBooleanMethod( - iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - // get ColumnFamilyDescriptor - jobject jcf_descriptor = env->CallObjectMethod(iteratorObj, - rocksdb::ListJni::getNextMethod(env)); - // get ColumnFamilyName - jbyteArray cf_name_in_byte_array = static_cast( - env->CallObjectMethod(jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod( - env))); - // get CF Options - jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod( - env)); - rocksdb::ColumnFamilyOptions* cfOptions = - rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj); + jsize len_cols = env->GetArrayLength(jcolumn_names); + jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL); + for(int i = 0; i < len_cols; i++) { + jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + jbyteArray jcn_ba = reinterpret_cast(jcn); + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL); + const int jcf_name_len = env->GetArrayLength(jcn_ba); - jbyte* cfname = env->GetByteArrayElements(cf_name_in_byte_array, 0); - const int len = env->GetArrayLength(cf_name_in_byte_array); + //TODO(AR) do I need to make a copy of jco[i] ? - // free allocated cfnames after call to open - cfnames_to_free.push_back(cfname); - jcfnames_for_free.push_back(cf_name_in_byte_array); - column_families.push_back(rocksdb::ColumnFamilyDescriptor( - std::string(reinterpret_cast(cfname), len), *cfOptions)); + std::string cf_name (reinterpret_cast(jcf_name), jcf_name_len); + rocksdb::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); } + env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT); - rocksdb::Status s = rocksdb::DB::Open(*opt, db_path, column_families, + std::vector handles; + rocksdb::DB* db = nullptr; + rocksdb::Status s = open_fn(*opt, db_path, column_families, &handles, &db); - env->ReleaseStringUTFChars(jdb_path, db_path); - // free jbyte allocations - for (std::vector::size_type i = 0; - i != cfnames_to_free.size(); i++) { - // free cfnames - env->ReleaseByteArrayElements(jcfnames_for_free[i], cfnames_to_free[i], 0); - } // check if open operation was successful if (s.ok()) { - rocksdb::RocksDBJni::setHandle(env, jdb, db); - jclass jListClazz = env->FindClass("java/util/ArrayList"); - jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId( - env, jListClazz); - jobject jcfhandle_list = env->NewObject(jListClazz, - midList, handles.size()); - // insert in java list - for (std::vector::size_type i = 0; - i != handles.size(); i++) { - // jlong must be converted to Long due to collections restrictions - jclass jLongClazz = env->FindClass("java/lang/Long"); - jmethodID midLong = env->GetMethodID(jLongClazz, "", "(J)V"); - jobject obj = env->NewObject(jLongClazz, midLong, - reinterpret_cast(handles[i])); - env->CallBooleanMethod(jcfhandle_list, - rocksdb::ListJni::getListAddMethodId(env), obj); + jsize resultsLen = 1 + len_cols; //db handle + column family handles + jlong results[resultsLen]; + results[0] = reinterpret_cast(db); + for(int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(handles[i - 1]); } - return jcfhandle_list; + jlongArray jresults = env->NewLongArray(resultsLen); + env->SetLongArrayRegion(jresults, 0, resultsLen, results); + return jresults; + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return NULL; } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return nullptr; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: openROnly + * Signature: (JLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names, + jcolumn_options, []( + const rocksdb::DBOptions& options, const std::string& db_path, + const std::vector& column_families, + std::vector* handles, rocksdb::DB** db) { + return rocksdb::DB::OpenForReadOnly(options, db_path, column_families, + handles, db); + }); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: open + * Signature: (JLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, + (rocksdb::Status(*) + (const rocksdb::DBOptions&, const std::string&, + const std::vector&, + std::vector*, rocksdb::DB**) + )&rocksdb::DB::Open + ); } ////////////////////////////////////////////////////////////////////////////// diff --git a/java/rocksjni/slice.cc b/java/rocksjni/slice.cc index 5e05e46f7..e5eb383bd 100644 --- a/java/rocksjni/slice.cc +++ b/java/rocksjni/slice.cc @@ -22,12 +22,11 @@ /* * Class: org_rocksdb_AbstractSlice * Method: createNewSliceFromString - * Signature: (Ljava/lang/String;)V + * Signature: (Ljava/lang/String;)J */ -void Java_org_rocksdb_AbstractSlice_createNewSliceFromString( - JNIEnv* env, jobject jobj, jstring jstr) { - - const auto* str = env->GetStringUTFChars(jstr, 0); +jlong Java_org_rocksdb_AbstractSlice_createNewSliceFromString( + JNIEnv * env, jclass jcls, jstring jstr) { + const auto* str = env->GetStringUTFChars(jstr, NULL); const size_t len = strlen(str); char* buf = new char[len + 1]; memcpy(buf, str, len); @@ -35,7 +34,7 @@ void Java_org_rocksdb_AbstractSlice_createNewSliceFromString( env->ReleaseStringUTFChars(jstr, str); const auto* slice = new rocksdb::Slice(buf); - rocksdb::AbstractSliceJni::setHandle(env, jobj, slice); + return reinterpret_cast(slice); } /* @@ -115,10 +114,10 @@ void Java_org_rocksdb_AbstractSlice_disposeInternal( /* * Class: org_rocksdb_Slice * Method: createNewSlice0 - * Signature: ([BI)V + * Signature: ([BI)J */ -void Java_org_rocksdb_Slice_createNewSlice0( - JNIEnv * env, jobject jobj, jbyteArray data, jint offset) { +jlong Java_org_rocksdb_Slice_createNewSlice0( + JNIEnv * env, jclass jcls, jbyteArray data, jint offset) { const jsize dataSize = env->GetArrayLength(data); const int len = dataSize - offset; @@ -126,32 +125,33 @@ void Java_org_rocksdb_Slice_createNewSlice0( env->GetByteArrayRegion(data, offset, len, ptrData); const auto* slice = new rocksdb::Slice((const char*)ptrData, len); - rocksdb::AbstractSliceJni::setHandle(env, jobj, slice); + return reinterpret_cast(slice); } /* * Class: org_rocksdb_Slice * Method: createNewSlice1 - * Signature: ([B)V + * Signature: ([B)J */ -void Java_org_rocksdb_Slice_createNewSlice1( - JNIEnv * env, jobject jobj, jbyteArray data) { +jlong Java_org_rocksdb_Slice_createNewSlice1( + JNIEnv * env, jclass jcls, jbyteArray data) { const int len = env->GetArrayLength(data) + 1; jboolean isCopy; jbyte* ptrData = env->GetByteArrayElements(data, &isCopy); - char* buf = new char[len]; + // NOTE: buf will be deleted in the org.rocksdb.Slice#dispose method + char* buf = new char[len]; memcpy(buf, ptrData, len - 1); buf[len-1]='\0'; const auto* slice = new rocksdb::Slice(buf, len - 1); - rocksdb::AbstractSliceJni::setHandle(env, jobj, slice); env->ReleaseByteArrayElements(data, ptrData, JNI_ABORT); - // NOTE: buf will be deleted in the org.rocksdb.Slice#dispose method + + return reinterpret_cast(slice); } /* @@ -187,27 +187,27 @@ void Java_org_rocksdb_Slice_disposeInternalBuf( /* * Class: org_rocksdb_DirectSlice * Method: createNewDirectSlice0 - * Signature: (Ljava/nio/ByteBuffer;I)V + * Signature: (Ljava/nio/ByteBuffer;I)J */ -void Java_org_rocksdb_DirectSlice_createNewDirectSlice0( - JNIEnv* env, jobject jobj, jobject data, jint length) { +jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice0( + JNIEnv* env, jclass jcls, jobject data, jint length) { const auto* ptrData = reinterpret_cast(env->GetDirectBufferAddress(data)); const auto* slice = new rocksdb::Slice(ptrData, length); - rocksdb::AbstractSliceJni::setHandle(env, jobj, slice); + return reinterpret_cast(slice); } /* * Class: org_rocksdb_DirectSlice * Method: createNewDirectSlice1 - * Signature: (Ljava/nio/ByteBuffer;)V + * Signature: (Ljava/nio/ByteBuffer;)J */ -void Java_org_rocksdb_DirectSlice_createNewDirectSlice1( - JNIEnv* env, jobject jobj, jobject data) { +jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1( + JNIEnv* env, jclass jcls, jobject data) { const auto* ptrData = reinterpret_cast(env->GetDirectBufferAddress(data)); const auto* slice = new rocksdb::Slice(ptrData); - rocksdb::AbstractSliceJni::setHandle(env, jobj, slice); + return reinterpret_cast(slice); } /* diff --git a/java/rocksjni/ttl.cc b/java/rocksjni/ttl.cc index 219e6c4db..91d3dca9a 100644 --- a/java/rocksjni/ttl.cc +++ b/java/rocksjni/ttl.cc @@ -20,10 +20,10 @@ /* * Class: org_rocksdb_TtlDB * Method: open - * Signature: (JLjava/lang/String;IZ)V + * Signature: (JLjava/lang/String;IZ)J */ -void Java_org_rocksdb_TtlDB_open(JNIEnv* env, - jobject jttldb, jlong joptions_handle, jstring jdb_path, +jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, + jclass jcls, jlong joptions_handle, jstring jdb_path, jint jttl, jboolean jread_only) { auto* opt = reinterpret_cast(joptions_handle); rocksdb::DBWithTTL* db = nullptr; @@ -35,113 +35,79 @@ void Java_org_rocksdb_TtlDB_open(JNIEnv* env, // as TTLDB extends RocksDB on the java side, we can reuse // the RocksDB portal here. if (s.ok()) { - rocksdb::RocksDBJni::setHandle(env, jttldb, db); - return; + return reinterpret_cast(db); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); } /* * Class: org_rocksdb_TtlDB * Method: openCF - * Signature: (JLjava/lang/String;Ljava/util/List; - * ILjava/util/List;Z)Ljava/util/List; + * Signature: (JLjava/lang/String;[[B[J[IZ)[J */ -jobject +jlongArray Java_org_rocksdb_TtlDB_openCF( - JNIEnv* env, jobject jdb, jlong jopt_handle, jstring jdb_path, - jobject jcfdesc_list, jint jcfdesc_count, jobject jttl_list, - jboolean jread_only) { - auto* opt = reinterpret_cast(jopt_handle); - rocksdb::DBWithTTL* db = nullptr; - const char* db_path = env->GetStringUTFChars(jdb_path, 0); - - std::vector cfnames_to_free; - std::vector jcfnames_for_free; + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options, + jintArray jttls, jboolean jread_only) { + auto* opt = reinterpret_cast(jopt_handle); + const char* db_path = env->GetStringUTFChars(jdb_path, NULL); std::vector column_families; + + jsize len_cols = env->GetArrayLength(jcolumn_names); + jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL); + for(int i = 0; i < len_cols; i++) { + jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + jbyteArray jcn_ba = reinterpret_cast(jcn); + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL); + const int jcf_name_len = env->GetArrayLength(jcn_ba); + + //TODO(AR) do I need to make a copy of jco[i] ? + + std::string cf_name (reinterpret_cast(jcf_name), jcf_name_len); + rocksdb::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + } + env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT); + + std::vector handles; + rocksdb::DBWithTTL* db = nullptr; + std::vector ttl_values; - std::vector handles; - // get iterator for ColumnFamilyDescriptors - jobject iteratorObj = env->CallObjectMethod( - jcfdesc_list, rocksdb::ListJni::getIteratorMethod(env)); - - // iterate over ColumnFamilyDescriptors - while (env->CallBooleanMethod( - iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - // get ColumnFamilyDescriptor - jobject jcf_descriptor = env->CallObjectMethod(iteratorObj, - rocksdb::ListJni::getNextMethod(env)); - // get ColumnFamilyName - jbyteArray byteArray = static_cast(env->CallObjectMethod( - jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod( - env))); - // get CF Options - jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod( - env)); - rocksdb::ColumnFamilyOptions* cfOptions = - rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj); - - jbyte* cfname = env->GetByteArrayElements(byteArray, 0); - const int len = env->GetArrayLength(byteArray); - - // free allocated cfnames after call to open - cfnames_to_free.push_back(cfname); - jcfnames_for_free.push_back(byteArray); - column_families.push_back(rocksdb::ColumnFamilyDescriptor( - std::string(reinterpret_cast(cfname), len), *cfOptions)); - } - // get iterator for TTL values - iteratorObj = env->CallObjectMethod( - jttl_list, rocksdb::ListJni::getIteratorMethod(env)); - // iterate over TTL values - while (env->CallBooleanMethod( - iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - // get TTL object - jobject jttl_object = env->CallObjectMethod(iteratorObj, - rocksdb::ListJni::getNextMethod(env)); - // get Integer value - jclass jIntClazz = env->FindClass("java/lang/Integer"); - jmethodID getVal = env->GetMethodID(jIntClazz, "intValue", "()I"); - ttl_values.push_back(env->CallIntMethod(jttl_object, getVal)); + jint* jttlv = env->GetIntArrayElements(jttls, NULL); + jsize len_ttls = env->GetArrayLength(jttls); + for(int i = 0; i < len_ttls; i++) { + ttl_values.push_back(jttlv[i]); } + env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT); + rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, column_families, &handles, &db, ttl_values, jread_only); - env->ReleaseStringUTFChars(jdb_path, db_path); - // free jbyte allocations - for (std::vector::size_type i = 0; - i != cfnames_to_free.size(); i++) { - // free cfnames - env->ReleaseByteArrayElements(jcfnames_for_free[i], cfnames_to_free[i], 0); - } - // check if open operation was successful if (s.ok()) { - rocksdb::RocksDBJni::setHandle(env, jdb, db); - jclass jListClazz = env->FindClass("java/util/ArrayList"); - jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId( - env, jListClazz); - jobject jcfhandle_list = env->NewObject(jListClazz, - midList, handles.size()); - // insert in java list - for (std::vector::size_type i = 0; - i != handles.size(); i++) { - // jlong must be converted to Long due to collections restrictions - jclass jLongClazz = env->FindClass("java/lang/Long"); - jmethodID midLong = env->GetMethodID(jLongClazz, "", "(J)V"); - jobject obj = env->NewObject(jLongClazz, midLong, - reinterpret_cast(handles[i])); - env->CallBooleanMethod(jcfhandle_list, - rocksdb::ListJni::getListAddMethodId(env), obj); + jsize resultsLen = 1 + len_cols; //db handle + column family handles + jlong results[resultsLen]; + results[0] = reinterpret_cast(db); + for(int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(handles[i - 1]); } - return jcfhandle_list; + jlongArray jresults = env->NewLongArray(resultsLen); + env->SetLongArrayRegion(jresults, 0, resultsLen, results); + return jresults; + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return NULL; } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return nullptr; } /* diff --git a/java/rocksjni/write_batch.cc b/java/rocksjni/write_batch.cc index 83d2e6dfe..41690967c 100644 --- a/java/rocksjni/write_batch.cc +++ b/java/rocksjni/write_batch.cc @@ -27,14 +27,13 @@ /* * Class: org_rocksdb_WriteBatch * Method: newWriteBatch - * Signature: (I)V + * Signature: (I)J */ -void Java_org_rocksdb_WriteBatch_newWriteBatch( - JNIEnv* env, jobject jobj, jint jreserved_bytes) { +jlong Java_org_rocksdb_WriteBatch_newWriteBatch( + JNIEnv* env, jclass jcls, jint jreserved_bytes) { rocksdb::WriteBatch* wb = new rocksdb::WriteBatch( static_cast(jreserved_bytes)); - - rocksdb::WriteBatchJni::setHandle(env, jobj, wb); + return reinterpret_cast(wb); } /* @@ -218,13 +217,13 @@ void Java_org_rocksdb_WriteBatch_disposeInternal( /* * Class: org_rocksdb_WriteBatch_Handler * Method: createNewHandler0 - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0( +jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0( JNIEnv* env, jobject jobj) { const rocksdb::WriteBatchHandlerJniCallback* h = new rocksdb::WriteBatchHandlerJniCallback(env, jobj); - rocksdb::WriteBatchHandlerJni::setHandle(env, jobj, h); + return reinterpret_cast(h); } /* diff --git a/java/rocksjni/write_batch_with_index.cc b/java/rocksjni/write_batch_with_index.cc index 51296427e..1123517d9 100644 --- a/java/rocksjni/write_batch_with_index.cc +++ b/java/rocksjni/write_batch_with_index.cc @@ -15,40 +15,40 @@ /* * Class: org_rocksdb_WriteBatchWithIndex * Method: newWriteBatchWithIndex - * Signature: ()V + * Signature: ()J */ -void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__( - JNIEnv* env, jobject jobj) { +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__( + JNIEnv* env, jclass jcls) { rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex(); - rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi); + return reinterpret_cast(wbwi); } /* * Class: org_rocksdb_WriteBatchWithIndex * Method: newWriteBatchWithIndex - * Signature: (Z)V + * Signature: (Z)J */ -void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z( - JNIEnv* env, jobject jobj, jboolean joverwrite_key) { +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z( + JNIEnv* env, jclass jcls, jboolean joverwrite_key) { rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, static_cast(joverwrite_key)); - rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi); + return reinterpret_cast(wbwi); } /* * Class: org_rocksdb_WriteBatchWithIndex * Method: newWriteBatchWithIndex - * Signature: (JIZ)V + * Signature: (JIZ)J */ -void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ( - JNIEnv* env, jobject jobj, jlong jfallback_index_comparator_handle, +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ( + JNIEnv* env, jclass jcls, jlong jfallback_index_comparator_handle, jint jreserved_bytes, jboolean joverwrite_key) { rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex( reinterpret_cast(jfallback_index_comparator_handle), static_cast(jreserved_bytes), static_cast(joverwrite_key)); - rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi); + return reinterpret_cast(wbwi); } /* diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index 1ecedf156..6853f1d4b 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -13,6 +13,10 @@ package org.rocksdb; public abstract class AbstractCompactionFilter> extends RocksObject { + protected AbstractCompactionFilter(final long nativeHandle) { + super(nativeHandle); + } + /** * Deletes underlying C++ comparator pointer. * @@ -20,10 +24,6 @@ public abstract class AbstractCompactionFilter> * RocksDB instances referencing the comparator are closed. * Otherwise an undefined behavior will occur. */ - @Override protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); - } - - private native void disposeInternal(long handle); + @Override + protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/AbstractComparator.java b/java/src/main/java/org/rocksdb/AbstractComparator.java index 04a26bfba..a459a9366 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparator.java +++ b/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -14,8 +14,11 @@ package org.rocksdb; * @see org.rocksdb.Comparator * @see org.rocksdb.DirectComparator */ -public abstract class AbstractComparator> - extends RocksObject { +public abstract class AbstractComparator> extends NativeReference { + + protected AbstractComparator() { + super(true); + } /** * The name of the comparator. Used to check for comparator @@ -91,10 +94,12 @@ public abstract class AbstractComparator> * RocksDB instances referencing the comparator are closed. * Otherwise an undefined behavior will occur. */ - @Override protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); + @Override + protected void disposeInternal() { + disposeInternal(getNativeHandle()); } - private native void disposeInternal(long handle); + protected abstract long getNativeHandle(); + + private native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java index b7419cba9..a1547b3b3 100644 --- a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java +++ b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -25,8 +25,7 @@ public abstract class AbstractRocksIterator

protected AbstractRocksIterator(final P parent, final long nativeHandle) { - super(); - nativeHandle_ = nativeHandle; + super(nativeHandle); // parent must point to a valid RocksDB instance. assert (parent != null); // RocksIterator must hold a reference to the related parent instance @@ -37,43 +36,43 @@ public abstract class AbstractRocksIterator

@Override public boolean isValid() { - assert (isInitialized()); + assert (isOwningHandle()); return isValid0(nativeHandle_); } @Override public void seekToFirst() { - assert (isInitialized()); + assert (isOwningHandle()); seekToFirst0(nativeHandle_); } @Override public void seekToLast() { - assert (isInitialized()); + assert (isOwningHandle()); seekToLast0(nativeHandle_); } @Override public void seek(byte[] target) { - assert (isInitialized()); + assert (isOwningHandle()); seek0(nativeHandle_, target, target.length); } @Override public void next() { - assert (isInitialized()); + assert (isOwningHandle()); next0(nativeHandle_); } @Override public void prev() { - assert (isInitialized()); + assert (isOwningHandle()); prev0(nativeHandle_); } @Override public void status() throws RocksDBException { - assert (isInitialized()); + assert (isOwningHandle()); status0(nativeHandle_); } @@ -87,15 +86,11 @@ public abstract class AbstractRocksIterator

*/ @Override protected void disposeInternal() { - synchronized (parent_) { - assert (isInitialized()); - if (parent_.isInitialized()) { + if (parent_.isOwningHandle()) { disposeInternal(nativeHandle_); } - } } - abstract void disposeInternal(long handle); abstract boolean isValid0(long handle); abstract void seekToFirst0(long handle); abstract void seekToLast0(long handle); diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index ea77f5384..b9e67d472 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -24,7 +24,15 @@ package org.rocksdb; * C++ BaseComparatorJniCallback subclass, which in turn destroys the * Java @see org.rocksdb.AbstractSlice subclass Objects. */ -abstract class AbstractSlice extends RocksObject { +abstract class AbstractSlice extends RocksMutableObject { + + protected AbstractSlice() { + super(); + } + + protected AbstractSlice(final long nativeHandle) { + super(nativeHandle); + } /** * Returns the data of the slice. @@ -34,7 +42,7 @@ abstract class AbstractSlice extends RocksObject { * @see org.rocksdb.AbstractSlice#data0(long) */ public T data() { - assert (isInitialized()); + assert (isOwningHandle()); return data0(nativeHandle_); } @@ -56,7 +64,7 @@ abstract class AbstractSlice extends RocksObject { * @return The length in bytes. */ public int size() { - assert (isInitialized()); + assert (isOwningHandle()); return size0(nativeHandle_); } @@ -67,7 +75,7 @@ abstract class AbstractSlice extends RocksObject { * @return true if there is no data, false otherwise. */ public boolean empty() { - assert (isInitialized()); + assert (isOwningHandle()); return empty0(nativeHandle_); } @@ -80,7 +88,7 @@ abstract class AbstractSlice extends RocksObject { * @return The string representation of the data. */ public String toString(final boolean hex) { - assert (isInitialized()); + assert (isOwningHandle()); return toString0(nativeHandle_, hex); } @@ -101,7 +109,7 @@ abstract class AbstractSlice extends RocksObject { */ public int compare(final AbstractSlice other) { assert (other != null); - assert (isInitialized()); + assert (isOwningHandle()); return compare0(nativeHandle_, other.nativeHandle_); } @@ -141,13 +149,20 @@ abstract class AbstractSlice extends RocksObject { */ public boolean startsWith(final AbstractSlice prefix) { if (prefix != null) { - assert (isInitialized()); + assert (isOwningHandle()); return startsWith0(nativeHandle_, prefix.nativeHandle_); } else { return false; } } + protected native static long createNewSliceFromString(final String str); + private native int size0(long handle); + private native boolean empty0(long handle); + private native String toString0(long handle, boolean hex); + private native int compare0(long handle, long otherHandle); + private native boolean startsWith0(long handle, long otherHandle); + /** * Deletes underlying C++ slice pointer. * Note that this function should be called only after all @@ -155,17 +170,6 @@ abstract class AbstractSlice extends RocksObject { * Otherwise an undefined behavior will occur. */ @Override - protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); - } - - protected native void createNewSliceFromString(String str); - private native int size0(long handle); - private native boolean empty0(long handle); - private native String toString0(long handle, boolean hex); - private native int compare0(long handle, long otherHandle); - private native boolean startsWith0(long handle, long otherHandle); - private native void disposeInternal(long handle); + protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 984e400ab..b40e9461e 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -7,71 +7,64 @@ package org.rocksdb; public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { + protected AbstractWriteBatch(final long nativeHandle) { + super(nativeHandle); + } + @Override public int count() { - assert (isInitialized()); + assert (isOwningHandle()); return count0(); } @Override public void put(byte[] key, byte[] value) { - assert (isInitialized()); + assert (isOwningHandle()); put(key, key.length, value, value.length); } @Override public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { - assert (isInitialized()); + assert (isOwningHandle()); put(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); } @Override public void merge(byte[] key, byte[] value) { - assert (isInitialized()); + assert (isOwningHandle()); merge(key, key.length, value, value.length); } @Override public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { - assert (isInitialized()); + assert (isOwningHandle()); merge(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); } @Override public void remove(byte[] key) { - assert (isInitialized()); + assert (isOwningHandle()); remove(key, key.length); } @Override public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) { - assert (isInitialized()); + assert (isOwningHandle()); remove(key, key.length, columnFamilyHandle.nativeHandle_); } @Override public void putLogData(byte[] blob) { - assert (isInitialized()); + assert (isOwningHandle()); putLogData(blob, blob.length); } @Override public void clear() { - assert (isInitialized()); + assert (isOwningHandle()); clear0(); } - /** - * Delete the c++ side pointer. - */ - @Override - protected void disposeInternal() { - assert (isInitialized()); - disposeInternal(nativeHandle_); - } - - abstract void disposeInternal(long handle); - abstract int count0(); abstract void put(byte[] key, int keyLen, byte[] value, int valueLen); diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 4791719aa..776307a74 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -19,8 +19,8 @@ import java.util.List; */ public class BackupEngine extends RocksObject implements AutoCloseable { - protected BackupEngine() { - super(); + protected BackupEngine(final long nativeHandle) { + super(nativeHandle); } /** @@ -33,9 +33,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { */ public static BackupEngine open(final Env env, final BackupableDBOptions options) throws RocksDBException { - final BackupEngine be = new BackupEngine(); - be.open(env.nativeHandle_, options.nativeHandle_); - return be; + return new BackupEngine(open(env.nativeHandle_, options.nativeHandle_)); } /** @@ -74,7 +72,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { public void createNewBackup( final RocksDB db, final boolean flushBeforeBackup) throws RocksDBException { - assert (isInitialized()); + assert (isOwningHandle()); createNewBackup(nativeHandle_, db.nativeHandle_, flushBeforeBackup); } @@ -85,7 +83,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * @return A list of information about each available backup */ public List getBackupInfo() { - assert (isInitialized()); + assert (isOwningHandle()); return getBackupInfo(nativeHandle_); } @@ -97,7 +95,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * @return array of backup ids as int ids. */ public int[] getCorruptedBackups() { - assert(isInitialized()); + assert(isOwningHandle()); return getCorruptedBackups(nativeHandle_); } @@ -110,7 +108,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * native library. */ public void garbageCollect() throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); garbageCollect(nativeHandle_); } @@ -121,7 +119,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { */ public void purgeOldBackups( final int numBackupsToKeep) throws RocksDBException { - assert (isInitialized()); + assert (isOwningHandle()); purgeOldBackups(nativeHandle_, numBackupsToKeep); } @@ -131,7 +129,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * @param backupId The id of the backup to delete */ public void deleteBackup(final int backupId) throws RocksDBException { - assert (isInitialized()); + assert (isOwningHandle()); deleteBackup(nativeHandle_, backupId); } @@ -158,7 +156,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { public void restoreDbFromBackup( final int backupId, final String dbDir, final String walDir, final RestoreOptions restoreOptions) throws RocksDBException { - assert (isInitialized()); + assert (isOwningHandle()); restoreDbFromBackup(nativeHandle_, backupId, dbDir, walDir, restoreOptions.nativeHandle_); } @@ -173,7 +171,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { public void restoreDbFromLatestBackup( final String dbDir, final String walDir, final RestoreOptions restoreOptions) throws RocksDBException { - assert (isInitialized()); + assert (isOwningHandle()); restoreDbFromLatestBackup(nativeHandle_, dbDir, walDir, restoreOptions.nativeHandle_); } @@ -186,14 +184,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { dispose(); } - @Override - protected void disposeInternal() { - assert (isInitialized()); - disposeInternal(nativeHandle_); - } - - private native void open(final long env, final long backupableDbOptions) - throws RocksDBException; + private native static long open(final long env, + final long backupableDbOptions) throws RocksDBException; private native void createNewBackup(final long handle, final long dbHandle, final boolean flushBeforeBackup) throws RocksDBException; @@ -218,5 +210,5 @@ public class BackupEngine extends RocksObject implements AutoCloseable { final String dbDir, final String walDir, final long restoreOptionsHandle) throws RocksDBException; - private native void disposeInternal(final long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/BackupableDB.java b/java/src/main/java/org/rocksdb/BackupableDB.java index 6de20736f..9bc29af81 100644 --- a/java/src/main/java/org/rocksdb/BackupableDB.java +++ b/java/src/main/java/org/rocksdb/BackupableDB.java @@ -33,9 +33,8 @@ public class BackupableDB extends RocksDB { final Options opt, final BackupableDBOptions bopt, final String db_path) throws RocksDBException { - RocksDB db = RocksDB.open(opt, db_path); - BackupableDB bdb = new BackupableDB(); - bdb.open(db.nativeHandle_, bopt.nativeHandle_); + final RocksDB db = RocksDB.open(opt, db_path); + final BackupableDB bdb = new BackupableDB(open(db.nativeHandle_, bopt.nativeHandle_)); // Prevent the RocksDB object from attempting to delete // the underly C++ DB object. @@ -56,7 +55,7 @@ public class BackupableDB extends RocksDB { */ public void createNewBackup(final boolean flushBeforeBackup) throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); createNewBackup(nativeHandle_, flushBeforeBackup); } @@ -70,7 +69,7 @@ public class BackupableDB extends RocksDB { */ public void purgeOldBackups(final int numBackupsToKeep) throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); purgeOldBackups(nativeHandle_, numBackupsToKeep); } @@ -83,7 +82,7 @@ public class BackupableDB extends RocksDB { * native library. */ public void deleteBackup(final int backupId) throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); deleteBackup0(nativeHandle_, backupId); } @@ -94,7 +93,7 @@ public class BackupableDB extends RocksDB { * @return List of {@link BackupInfo} instances. */ public List getBackupInfos() { - assert(isInitialized()); + assert(isOwningHandle()); return getBackupInfo(nativeHandle_); } @@ -106,7 +105,7 @@ public class BackupableDB extends RocksDB { * @return array of backup ids as int ids. */ public int[] getCorruptedBackups() { - assert(isInitialized()); + assert(isOwningHandle()); return getCorruptedBackups(nativeHandle_); } @@ -119,7 +118,7 @@ public class BackupableDB extends RocksDB { * native library. */ public void garbageCollect() throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); garbageCollect(nativeHandle_); } @@ -132,19 +131,19 @@ public class BackupableDB extends RocksDB { * of the c++ {@code rocksdb::BackupableDB} and should be transparent * to Java developers.

*/ - @Override public synchronized void close() { - if (isInitialized()) { + @Override public void close() { super.close(); - } } /** *

A protected construction that will be used in the static * factory method {@link #open(Options, BackupableDBOptions, String)}. *

+ * + * @param nativeHandle The native handle of the C++ BackupableDB object */ - protected BackupableDB() { - super(); + protected BackupableDB(final long nativeHandle) { + super(nativeHandle); } @Override protected void finalize() throws Throwable { @@ -152,7 +151,7 @@ public class BackupableDB extends RocksDB { super.finalize(); } - protected native void open(long rocksDBHandle, long backupDBOptionsHandle); + protected native static long open(final long rocksDBHandle, final long backupDBOptionsHandle); protected native void createNewBackup(long handle, boolean flag) throws RocksDBException; protected native void purgeOldBackups(long handle, int numBackupsToKeep) diff --git a/java/src/main/java/org/rocksdb/BackupableDBOptions.java b/java/src/main/java/org/rocksdb/BackupableDBOptions.java index d32f2db8c..ea5e51a7a 100644 --- a/java/src/main/java/org/rocksdb/BackupableDBOptions.java +++ b/java/src/main/java/org/rocksdb/BackupableDBOptions.java @@ -6,7 +6,6 @@ package org.rocksdb; import java.io.File; -import java.nio.file.Path; /** *

BackupableDBOptions to control the behavior of a backupable database. @@ -27,12 +26,16 @@ public class BackupableDBOptions extends RocksObject { * @throws java.lang.IllegalArgumentException if illegal path is used. */ public BackupableDBOptions(final String path) { - super(); - File backupPath = path == null ? null : new File(path); + super(newBackupableDBOptions(ensureWritableFile(path))); + } + + private static String ensureWritableFile(final String path) { + final File backupPath = path == null ? null : new File(path); if (backupPath == null || !backupPath.isDirectory() || !backupPath.canWrite()) { throw new IllegalArgumentException("Illegal path provided."); + } else { + return path; } - newBackupableDBOptions(path); } /** @@ -41,7 +44,7 @@ public class BackupableDBOptions extends RocksObject { * @return the path to the BackupableDB directory. */ public String backupDir() { - assert(isInitialized()); + assert(isOwningHandle()); return backupDir(nativeHandle_); } @@ -58,7 +61,7 @@ public class BackupableDBOptions extends RocksObject { * @return instance of current BackupableDBOptions. */ public BackupableDBOptions setShareTableFiles(final boolean shareTableFiles) { - assert(isInitialized()); + assert(isOwningHandle()); setShareTableFiles(nativeHandle_, shareTableFiles); return this; } @@ -70,7 +73,7 @@ public class BackupableDBOptions extends RocksObject { * backups. */ public boolean shareTableFiles() { - assert(isInitialized()); + assert(isOwningHandle()); return shareTableFiles(nativeHandle_); } @@ -87,7 +90,7 @@ public class BackupableDBOptions extends RocksObject { * @return instance of current BackupableDBOptions. */ public BackupableDBOptions setSync(final boolean sync) { - assert(isInitialized()); + assert(isOwningHandle()); setSync(nativeHandle_, sync); return this; } @@ -98,7 +101,7 @@ public class BackupableDBOptions extends RocksObject { * @return boolean value if synchronous backups are configured. */ public boolean sync() { - assert(isInitialized()); + assert(isOwningHandle()); return sync(nativeHandle_); } @@ -112,7 +115,7 @@ public class BackupableDBOptions extends RocksObject { * @return instance of current BackupableDBOptions. */ public BackupableDBOptions setDestroyOldData(final boolean destroyOldData) { - assert(isInitialized()); + assert(isOwningHandle()); setDestroyOldData(nativeHandle_, destroyOldData); return this; } @@ -123,7 +126,7 @@ public class BackupableDBOptions extends RocksObject { * @return boolean value indicating if old data will be destroyed. */ public boolean destroyOldData() { - assert(isInitialized()); + assert(isOwningHandle()); return destroyOldData(nativeHandle_); } @@ -139,7 +142,7 @@ public class BackupableDBOptions extends RocksObject { * @return instance of current BackupableDBOptions. */ public BackupableDBOptions setBackupLogFiles(final boolean backupLogFiles) { - assert(isInitialized()); + assert(isOwningHandle()); setBackupLogFiles(nativeHandle_, backupLogFiles); return this; } @@ -150,7 +153,7 @@ public class BackupableDBOptions extends RocksObject { * @return boolean value indicating if log files will be persisted. */ public boolean backupLogFiles() { - assert(isInitialized()); + assert(isOwningHandle()); return backupLogFiles(nativeHandle_); } @@ -165,7 +168,7 @@ public class BackupableDBOptions extends RocksObject { * @return instance of current BackupableDBOptions. */ public BackupableDBOptions setBackupRateLimit(long backupRateLimit) { - assert(isInitialized()); + assert(isOwningHandle()); backupRateLimit = (backupRateLimit <= 0) ? 0 : backupRateLimit; setBackupRateLimit(nativeHandle_, backupRateLimit); return this; @@ -178,7 +181,7 @@ public class BackupableDBOptions extends RocksObject { * @return numerical value describing the backup transfer limit in bytes per second. */ public long backupRateLimit() { - assert(isInitialized()); + assert(isOwningHandle()); return backupRateLimit(nativeHandle_); } @@ -193,7 +196,7 @@ public class BackupableDBOptions extends RocksObject { * @return instance of current BackupableDBOptions. */ public BackupableDBOptions setRestoreRateLimit(long restoreRateLimit) { - assert(isInitialized()); + assert(isOwningHandle()); restoreRateLimit = (restoreRateLimit <= 0) ? 0 : restoreRateLimit; setRestoreRateLimit(nativeHandle_, restoreRateLimit); return this; @@ -206,7 +209,7 @@ public class BackupableDBOptions extends RocksObject { * @return numerical value describing the restore transfer limit in bytes per second. */ public long restoreRateLimit() { - assert(isInitialized()); + assert(isOwningHandle()); return restoreRateLimit(nativeHandle_); } @@ -227,7 +230,7 @@ public class BackupableDBOptions extends RocksObject { */ public BackupableDBOptions setShareFilesWithChecksum( final boolean shareFilesWithChecksum) { - assert(isInitialized()); + assert(isOwningHandle()); setShareFilesWithChecksum(nativeHandle_, shareFilesWithChecksum); return this; } @@ -239,19 +242,11 @@ public class BackupableDBOptions extends RocksObject { * is active. */ public boolean shareFilesWithChecksum() { - assert(isInitialized()); + assert(isOwningHandle()); return shareFilesWithChecksum(nativeHandle_); } - /** - * Release the memory allocated for the current instance - * in the c++ side. - */ - @Override protected void disposeInternal() { - disposeInternal(nativeHandle_); - } - - private native void newBackupableDBOptions(String path); + private native static long newBackupableDBOptions(final String path); private native String backupDir(long handle); private native void setShareTableFiles(long handle, boolean flag); private native boolean shareTableFiles(long handle); @@ -267,5 +262,5 @@ public class BackupableDBOptions extends RocksObject { private native long restoreRateLimit(long handle); private native void setShareFilesWithChecksum(long handle, boolean flag); private native boolean shareFilesWithChecksum(long handle); - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/BloomFilter.java b/java/src/main/java/org/rocksdb/BloomFilter.java index 2c9585f71..a8c2f7e7f 100644 --- a/java/src/main/java/org/rocksdb/BloomFilter.java +++ b/java/src/main/java/org/rocksdb/BloomFilter.java @@ -22,8 +22,6 @@ public class BloomFilter extends Filter { private static final int DEFAULT_BITS_PER_KEY = 10; private static final boolean DEFAULT_MODE = true; - private final int bitsPerKey_; - private final boolean useBlockBasedMode_; /** * BloomFilter constructor @@ -73,17 +71,9 @@ public class BloomFilter extends Filter { * @param useBlockBasedMode use block based mode or full filter mode */ public BloomFilter(final int bitsPerKey, final boolean useBlockBasedMode) { - super(); - bitsPerKey_ = bitsPerKey; - useBlockBasedMode_ = useBlockBasedMode; - createNewFilter(); + super(createNewBloomFilter(bitsPerKey, useBlockBasedMode)); } - @Override - protected final void createNewFilter() { - createNewBloomFilter(bitsPerKey_, useBlockBasedMode_); - } - - private native void createNewBloomFilter(int bitsKeyKey, - boolean useBlockBasedMode); + private native static long createNewBloomFilter(final int bitsKeyKey, + final boolean useBlockBasedMode); } diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java index 9faa355e1..d86722778 100644 --- a/java/src/main/java/org/rocksdb/Checkpoint.java +++ b/java/src/main/java/org/rocksdb/Checkpoint.java @@ -27,7 +27,7 @@ public class Checkpoint extends RocksObject { if (db == null) { throw new IllegalArgumentException( "RocksDB instance shall not be null."); - } else if (!db.isInitialized()) { + } else if (!db.isOwningHandle()) { throw new IllegalStateException( "RocksDB instance must be initialized."); } @@ -51,21 +51,15 @@ public class Checkpoint extends RocksObject { createCheckpoint(nativeHandle_, checkpointPath); } - @Override - protected void disposeInternal() { - disposeInternal(nativeHandle_); + private Checkpoint(final RocksDB db) { + super(newCheckpoint(db.nativeHandle_)); + this.db_ = db; } - private Checkpoint(RocksDB db) { - super(); - nativeHandle_ = newCheckpoint(db.nativeHandle_); - db_ = db; - } - - private RocksDB db_; + private final RocksDB db_; private static native long newCheckpoint(long dbHandle); - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); private native void createCheckpoint(long handle, String checkpointPath) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index d414ee587..b9f6bd97e 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -12,14 +12,13 @@ package org.rocksdb; public class ColumnFamilyHandle extends RocksObject { ColumnFamilyHandle(final RocksDB rocksDB, final long nativeHandle) { - super(); - nativeHandle_ = nativeHandle; + super(nativeHandle); // rocksDB must point to a valid RocksDB instance; assert(rocksDB != null); // ColumnFamilyHandle must hold a reference to the related RocksDB instance // to guarantee that while a GC cycle starts ColumnFamilyHandle instances // are freed prior to RocksDB instances. - rocksDB_ = rocksDB; + this.rocksDB_ = rocksDB; } /** @@ -30,16 +29,14 @@ public class ColumnFamilyHandle extends RocksObject { * Therefore {@code disposeInternal()} checks if the RocksDB is initialized * before freeing the native handle.

*/ - @Override protected void disposeInternal() { - synchronized (rocksDB_) { - assert (isInitialized()); - if (rocksDB_.isInitialized()) { - disposeInternal(nativeHandle_); - } + @Override + protected void disposeInternal() { + if(rocksDB_.isOwningHandle()) { + disposeInternal(nativeHandle_); } } - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); private final RocksDB rocksDB_; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index 612efbe7f..91457defa 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -29,8 +29,7 @@ public class ColumnFamilyOptions extends RocksObject * an {@code rocksdb::DBOptions} in the c++ side. */ public ColumnFamilyOptions() { - super(); - newColumnFamilyOptions(); + super(newColumnFamilyOptions()); } /** @@ -114,7 +113,7 @@ public class ColumnFamilyOptions extends RocksObject @Override public ColumnFamilyOptions setComparator(final BuiltinComparator builtinComparator) { - assert(isInitialized()); + assert(isOwningHandle()); setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); return this; } @@ -122,15 +121,15 @@ public class ColumnFamilyOptions extends RocksObject @Override public ColumnFamilyOptions setComparator( final AbstractComparator> comparator) { - assert (isInitialized()); - setComparatorHandle(nativeHandle_, comparator.nativeHandle_); + assert (isOwningHandle()); + setComparatorHandle(nativeHandle_, comparator.getNativeHandle()); comparator_ = comparator; return this; } @Override public ColumnFamilyOptions setMergeOperatorName(final String name) { - assert (isInitialized()); + assert (isOwningHandle()); if (name == null) { throw new IllegalArgumentException( "Merge operator name must not be null."); @@ -154,28 +153,28 @@ public class ColumnFamilyOptions extends RocksObject @Override public ColumnFamilyOptions setWriteBufferSize(final long writeBufferSize) { - assert(isInitialized()); + assert(isOwningHandle()); setWriteBufferSize(nativeHandle_, writeBufferSize); return this; } @Override public long writeBufferSize() { - assert(isInitialized()); + assert(isOwningHandle()); return writeBufferSize(nativeHandle_); } @Override public ColumnFamilyOptions setMaxWriteBufferNumber( final int maxWriteBufferNumber) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber); return this; } @Override public int maxWriteBufferNumber() { - assert(isInitialized()); + assert(isOwningHandle()); return maxWriteBufferNumber(nativeHandle_); } @@ -193,14 +192,14 @@ public class ColumnFamilyOptions extends RocksObject @Override public ColumnFamilyOptions useFixedLengthPrefixExtractor(final int n) { - assert(isInitialized()); + assert(isOwningHandle()); useFixedLengthPrefixExtractor(nativeHandle_, n); return this; } @Override public ColumnFamilyOptions useCappedPrefixExtractor(final int n) { - assert(isInitialized()); + assert(isOwningHandle()); useCappedPrefixExtractor(nativeHandle_, n); return this; } @@ -485,7 +484,7 @@ public class ColumnFamilyOptions extends RocksObject public ColumnFamilyOptions setMaxTableFilesSizeFIFO( final long maxTableFilesSize) { assert(maxTableFilesSize > 0); // unsigned native type - assert(isInitialized()); + assert(isOwningHandle()); setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize); return this; } @@ -542,7 +541,7 @@ public class ColumnFamilyOptions extends RocksObject @Override public String memTableFactoryName() { - assert(isInitialized()); + assert(isOwningHandle()); return memTableFactoryName(nativeHandle_); } @@ -556,7 +555,7 @@ public class ColumnFamilyOptions extends RocksObject @Override public String tableFactoryName() { - assert(isInitialized()); + assert(isOwningHandle()); return tableFactoryName(nativeHandle_); } @@ -655,15 +654,6 @@ public class ColumnFamilyOptions extends RocksObject return optimizeFiltersForHits(nativeHandle_); } - /** - * Release the memory allocated for the current instance - * in the c++ side. - */ - @Override protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); - } - /** *

Private constructor to be used by * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}

@@ -671,15 +661,14 @@ public class ColumnFamilyOptions extends RocksObject * @param handle native handle to ColumnFamilyOptions instance. */ private ColumnFamilyOptions(final long handle) { - super(); - nativeHandle_ = handle; + super(handle); } private static native long getColumnFamilyOptionsFromProps( String optString); - private native void newColumnFamilyOptions(); - private native void disposeInternal(long handle); + private static native long newColumnFamilyOptions(); + @Override protected final native void disposeInternal(final long handle); private native void optimizeForPointLookup(long handle, long blockCacheSizeMb); diff --git a/java/src/main/java/org/rocksdb/Comparator.java b/java/src/main/java/org/rocksdb/Comparator.java index 41f7fbc93..009f2e51f 100644 --- a/java/src/main/java/org/rocksdb/Comparator.java +++ b/java/src/main/java/org/rocksdb/Comparator.java @@ -15,10 +15,18 @@ package org.rocksdb; * using @see org.rocksdb.DirectComparator */ public abstract class Comparator extends AbstractComparator { + + private final long nativeHandle_; + public Comparator(final ComparatorOptions copt) { super(); - createNewComparator0(copt.nativeHandle_); + this.nativeHandle_ = createNewComparator0(copt.nativeHandle_); } - private native void createNewComparator0(final long comparatorOptionsHandle); + @Override + protected final long getNativeHandle() { + return nativeHandle_; + } + + private native long createNewComparator0(final long comparatorOptionsHandle); } diff --git a/java/src/main/java/org/rocksdb/ComparatorOptions.java b/java/src/main/java/org/rocksdb/ComparatorOptions.java index f0ba520a3..3a05befa4 100644 --- a/java/src/main/java/org/rocksdb/ComparatorOptions.java +++ b/java/src/main/java/org/rocksdb/ComparatorOptions.java @@ -10,8 +10,7 @@ package org.rocksdb; */ public class ComparatorOptions extends RocksObject { public ComparatorOptions() { - super(); - newComparatorOptions(); + super(newComparatorOptions()); } /** @@ -24,7 +23,7 @@ public class ComparatorOptions extends RocksObject { * @return true if adaptive mutex is used. */ public boolean useAdaptiveMutex() { - assert(isInitialized()); + assert(isOwningHandle()); return useAdaptiveMutex(nativeHandle_); } @@ -39,19 +38,14 @@ public class ComparatorOptions extends RocksObject { * @return the reference to the current comparator options. */ public ComparatorOptions setUseAdaptiveMutex(final boolean useAdaptiveMutex) { - assert (isInitialized()); + assert (isOwningHandle()); setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); return this; } - @Override protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); - } - - private native void newComparatorOptions(); + private native static long newComparatorOptions(); private native boolean useAdaptiveMutex(final long handle); private native void setUseAdaptiveMutex(final long handle, final boolean useAdaptiveMutex); - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java index d2e1bf94c..a9ed2527a 100644 --- a/java/src/main/java/org/rocksdb/DBOptions.java +++ b/java/src/main/java/org/rocksdb/DBOptions.java @@ -26,9 +26,8 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { * an {@code rocksdb::DBOptions} in the c++ side. */ public DBOptions() { - super(); + super(newDBOptions()); numShardBits_ = DEFAULT_NUM_SHARD_BITS; - newDBOptions(); } /** @@ -75,70 +74,70 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { @Override public DBOptions setIncreaseParallelism( final int totalThreads) { - assert (isInitialized()); + assert(isOwningHandle()); setIncreaseParallelism(nativeHandle_, totalThreads); return this; } @Override public DBOptions setCreateIfMissing(final boolean flag) { - assert(isInitialized()); + assert(isOwningHandle()); setCreateIfMissing(nativeHandle_, flag); return this; } @Override public boolean createIfMissing() { - assert(isInitialized()); + assert(isOwningHandle()); return createIfMissing(nativeHandle_); } @Override public DBOptions setCreateMissingColumnFamilies( final boolean flag) { - assert(isInitialized()); + assert(isOwningHandle()); setCreateMissingColumnFamilies(nativeHandle_, flag); return this; } @Override public boolean createMissingColumnFamilies() { - assert(isInitialized()); + assert(isOwningHandle()); return createMissingColumnFamilies(nativeHandle_); } @Override public DBOptions setErrorIfExists( final boolean errorIfExists) { - assert(isInitialized()); + assert(isOwningHandle()); setErrorIfExists(nativeHandle_, errorIfExists); return this; } @Override public boolean errorIfExists() { - assert(isInitialized()); + assert(isOwningHandle()); return errorIfExists(nativeHandle_); } @Override public DBOptions setParanoidChecks( final boolean paranoidChecks) { - assert(isInitialized()); + assert(isOwningHandle()); setParanoidChecks(nativeHandle_, paranoidChecks); return this; } @Override public boolean paranoidChecks() { - assert(isInitialized()); + assert(isOwningHandle()); return paranoidChecks(nativeHandle_); } @Override public DBOptions setRateLimiterConfig( final RateLimiterConfig config) { - assert(isInitialized()); + assert(isOwningHandle()); rateLimiterConfig_ = config; setRateLimiter(nativeHandle_, config.newRateLimiterHandle()); return this; @@ -146,7 +145,7 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { @Override public DBOptions setLogger(final Logger logger) { - assert(isInitialized()); + assert(isOwningHandle()); setLogger(nativeHandle_, logger.nativeHandle_); return this; } @@ -154,14 +153,14 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { @Override public DBOptions setInfoLogLevel( final InfoLogLevel infoLogLevel) { - assert(isInitialized()); + assert(isOwningHandle()); setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); return this; } @Override public InfoLogLevel infoLogLevel() { - assert(isInitialized()); + assert(isOwningHandle()); return InfoLogLevel.getInfoLogLevel( infoLogLevel(nativeHandle_)); } @@ -169,41 +168,41 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { @Override public DBOptions setMaxOpenFiles( final int maxOpenFiles) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxOpenFiles(nativeHandle_, maxOpenFiles); return this; } @Override public int maxOpenFiles() { - assert(isInitialized()); + assert(isOwningHandle()); return maxOpenFiles(nativeHandle_); } @Override public DBOptions setMaxTotalWalSize( final long maxTotalWalSize) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxTotalWalSize(nativeHandle_, maxTotalWalSize); return this; } @Override public long maxTotalWalSize() { - assert(isInitialized()); + assert(isOwningHandle()); return maxTotalWalSize(nativeHandle_); } @Override public DBOptions createStatistics() { - assert(isInitialized()); + assert(isOwningHandle()); createStatistics(nativeHandle_); return this; } @Override public Statistics statisticsPtr() { - assert(isInitialized()); + assert(isOwningHandle()); long statsPtr = statisticsPtr(nativeHandle_); if(statsPtr == 0) { @@ -217,287 +216,287 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { @Override public DBOptions setDisableDataSync( final boolean disableDataSync) { - assert(isInitialized()); + assert(isOwningHandle()); setDisableDataSync(nativeHandle_, disableDataSync); return this; } @Override public boolean disableDataSync() { - assert(isInitialized()); + assert(isOwningHandle()); return disableDataSync(nativeHandle_); } @Override public DBOptions setUseFsync( final boolean useFsync) { - assert(isInitialized()); + assert(isOwningHandle()); setUseFsync(nativeHandle_, useFsync); return this; } @Override public boolean useFsync() { - assert(isInitialized()); + assert(isOwningHandle()); return useFsync(nativeHandle_); } @Override public DBOptions setDbLogDir( final String dbLogDir) { - assert(isInitialized()); + assert(isOwningHandle()); setDbLogDir(nativeHandle_, dbLogDir); return this; } @Override public String dbLogDir() { - assert(isInitialized()); + assert(isOwningHandle()); return dbLogDir(nativeHandle_); } @Override public DBOptions setWalDir( final String walDir) { - assert(isInitialized()); + assert(isOwningHandle()); setWalDir(nativeHandle_, walDir); return this; } @Override public String walDir() { - assert(isInitialized()); + assert(isOwningHandle()); return walDir(nativeHandle_); } @Override public DBOptions setDeleteObsoleteFilesPeriodMicros( final long micros) { - assert(isInitialized()); + assert(isOwningHandle()); setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros); return this; } @Override public long deleteObsoleteFilesPeriodMicros() { - assert(isInitialized()); + assert(isOwningHandle()); return deleteObsoleteFilesPeriodMicros(nativeHandle_); } @Override public DBOptions setMaxBackgroundCompactions( final int maxBackgroundCompactions) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); return this; } @Override public int maxBackgroundCompactions() { - assert(isInitialized()); + assert(isOwningHandle()); return maxBackgroundCompactions(nativeHandle_); } @Override public DBOptions setMaxBackgroundFlushes( final int maxBackgroundFlushes) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes); return this; } @Override public int maxBackgroundFlushes() { - assert(isInitialized()); + assert(isOwningHandle()); return maxBackgroundFlushes(nativeHandle_); } @Override public DBOptions setMaxLogFileSize( final long maxLogFileSize) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxLogFileSize(nativeHandle_, maxLogFileSize); return this; } @Override public long maxLogFileSize() { - assert(isInitialized()); + assert(isOwningHandle()); return maxLogFileSize(nativeHandle_); } @Override public DBOptions setLogFileTimeToRoll( final long logFileTimeToRoll) { - assert(isInitialized()); + assert(isOwningHandle()); setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll); return this; } @Override public long logFileTimeToRoll() { - assert(isInitialized()); + assert(isOwningHandle()); return logFileTimeToRoll(nativeHandle_); } @Override public DBOptions setKeepLogFileNum( final long keepLogFileNum) { - assert(isInitialized()); + assert(isOwningHandle()); setKeepLogFileNum(nativeHandle_, keepLogFileNum); return this; } @Override public long keepLogFileNum() { - assert(isInitialized()); + assert(isOwningHandle()); return keepLogFileNum(nativeHandle_); } @Override public DBOptions setMaxManifestFileSize( final long maxManifestFileSize) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxManifestFileSize(nativeHandle_, maxManifestFileSize); return this; } @Override public long maxManifestFileSize() { - assert(isInitialized()); + assert(isOwningHandle()); return maxManifestFileSize(nativeHandle_); } @Override public DBOptions setTableCacheNumshardbits( final int tableCacheNumshardbits) { - assert(isInitialized()); + assert(isOwningHandle()); setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits); return this; } @Override public int tableCacheNumshardbits() { - assert(isInitialized()); + assert(isOwningHandle()); return tableCacheNumshardbits(nativeHandle_); } @Override public DBOptions setWalTtlSeconds( final long walTtlSeconds) { - assert(isInitialized()); + assert(isOwningHandle()); setWalTtlSeconds(nativeHandle_, walTtlSeconds); return this; } @Override public long walTtlSeconds() { - assert(isInitialized()); + assert(isOwningHandle()); return walTtlSeconds(nativeHandle_); } @Override public DBOptions setWalSizeLimitMB( final long sizeLimitMB) { - assert(isInitialized()); + assert(isOwningHandle()); setWalSizeLimitMB(nativeHandle_, sizeLimitMB); return this; } @Override public long walSizeLimitMB() { - assert(isInitialized()); + assert(isOwningHandle()); return walSizeLimitMB(nativeHandle_); } @Override public DBOptions setManifestPreallocationSize( final long size) { - assert(isInitialized()); + assert(isOwningHandle()); setManifestPreallocationSize(nativeHandle_, size); return this; } @Override public long manifestPreallocationSize() { - assert(isInitialized()); + assert(isOwningHandle()); return manifestPreallocationSize(nativeHandle_); } @Override public DBOptions setAllowOsBuffer( final boolean allowOsBuffer) { - assert(isInitialized()); + assert(isOwningHandle()); setAllowOsBuffer(nativeHandle_, allowOsBuffer); return this; } @Override public boolean allowOsBuffer() { - assert(isInitialized()); + assert(isOwningHandle()); return allowOsBuffer(nativeHandle_); } @Override public DBOptions setAllowMmapReads( final boolean allowMmapReads) { - assert(isInitialized()); + assert(isOwningHandle()); setAllowMmapReads(nativeHandle_, allowMmapReads); return this; } @Override public boolean allowMmapReads() { - assert(isInitialized()); + assert(isOwningHandle()); return allowMmapReads(nativeHandle_); } @Override public DBOptions setAllowMmapWrites( final boolean allowMmapWrites) { - assert(isInitialized()); + assert(isOwningHandle()); setAllowMmapWrites(nativeHandle_, allowMmapWrites); return this; } @Override public boolean allowMmapWrites() { - assert(isInitialized()); + assert(isOwningHandle()); return allowMmapWrites(nativeHandle_); } @Override public DBOptions setIsFdCloseOnExec( final boolean isFdCloseOnExec) { - assert(isInitialized()); + assert(isOwningHandle()); setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec); return this; } @Override public boolean isFdCloseOnExec() { - assert(isInitialized()); + assert(isOwningHandle()); return isFdCloseOnExec(nativeHandle_); } @Override public DBOptions setStatsDumpPeriodSec( final int statsDumpPeriodSec) { - assert(isInitialized()); + assert(isOwningHandle()); setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec); return this; } @Override public int statsDumpPeriodSec() { - assert(isInitialized()); + assert(isOwningHandle()); return statsDumpPeriodSec(nativeHandle_); } @Override public DBOptions setAdviseRandomOnOpen( final boolean adviseRandomOnOpen) { - assert(isInitialized()); + assert(isOwningHandle()); setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen); return this; } @@ -510,21 +509,21 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { @Override public DBOptions setUseAdaptiveMutex( final boolean useAdaptiveMutex) { - assert(isInitialized()); + assert(isOwningHandle()); setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); return this; } @Override public boolean useAdaptiveMutex() { - assert(isInitialized()); + assert(isOwningHandle()); return useAdaptiveMutex(nativeHandle_); } @Override public DBOptions setBytesPerSync( final long bytesPerSync) { - assert(isInitialized()); + assert(isOwningHandle()); setBytesPerSync(nativeHandle_, bytesPerSync); return this; } @@ -534,33 +533,23 @@ public class DBOptions extends RocksObject implements DBOptionsInterface { return bytesPerSync(nativeHandle_); } - /** - * Release the memory allocated for the current instance - * in the c++ side. - */ - @Override protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); - } - static final int DEFAULT_NUM_SHARD_BITS = -1; /** *

Private constructor to be used by * {@link #getDBOptionsFromProps(java.util.Properties)}

* - * @param handle native handle to DBOptions instance. + * @param nativeHandle native handle to DBOptions instance. */ - private DBOptions(final long handle) { - super(); - nativeHandle_ = handle; + private DBOptions(final long nativeHandle) { + super(nativeHandle); } private static native long getDBOptionsFromProps( String optString); - private native void newDBOptions(); - private native void disposeInternal(long handle); + private native static long newDBOptions(); + @Override protected final native void disposeInternal(final long handle); private native void setIncreaseParallelism(long handle, int totalThreads); private native void setCreateIfMissing(long handle, boolean flag); diff --git a/java/src/main/java/org/rocksdb/DirectComparator.java b/java/src/main/java/org/rocksdb/DirectComparator.java index 68ad11f6c..ba3fce798 100644 --- a/java/src/main/java/org/rocksdb/DirectComparator.java +++ b/java/src/main/java/org/rocksdb/DirectComparator.java @@ -15,10 +15,18 @@ package org.rocksdb; * using @see org.rocksdb.Comparator */ public abstract class DirectComparator extends AbstractComparator { + + private final long nativeHandle_; + public DirectComparator(final ComparatorOptions copt) { super(); - createNewDirectComparator0(copt.nativeHandle_); + this.nativeHandle_ = createNewDirectComparator0(copt.nativeHandle_); } - private native void createNewDirectComparator0(final long comparatorOptionsHandle); + @Override + protected final long getNativeHandle() { + return nativeHandle_; + } + + private native long createNewDirectComparator0(final long comparatorOptionsHandle); } diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 7a59a3d82..9f8269105 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -16,7 +16,6 @@ import java.nio.ByteBuffer; * values consider using @see org.rocksdb.Slice */ public class DirectSlice extends AbstractSlice { - //TODO(AR) only needed by WriteBatchWithIndexTest until JDK8 public final static DirectSlice NONE = new DirectSlice(); /** @@ -24,9 +23,7 @@ public class DirectSlice extends AbstractSlice { * without an underlying C++ object set * at creation time. * - * Note: You should be aware that - * {@see org.rocksdb.RocksObject#disOwnNativeHandle()} is intentionally - * called from the default DirectSlice constructor, and that it is marked as + * Note: You should be aware that it is intentionally marked as * package-private. This is so that developers cannot construct their own default * DirectSlice objects (at present). As developers cannot construct their own * DirectSlice objects through this, they are not creating underlying C++ @@ -34,7 +31,6 @@ public class DirectSlice extends AbstractSlice { */ DirectSlice() { super(); - disOwnNativeHandle(); } /** @@ -45,8 +41,7 @@ public class DirectSlice extends AbstractSlice { * @param str The string */ public DirectSlice(final String str) { - super(); - createNewSliceFromString(str); + super(createNewSliceFromString(str)); } /** @@ -58,9 +53,7 @@ public class DirectSlice extends AbstractSlice { * @param length The length of the data to use for the slice */ public DirectSlice(final ByteBuffer data, final int length) { - super(); - assert(data.isDirect()); - createNewDirectSlice0(data, length); + super(createNewDirectSlice0(ensureDirect(data), length)); } /** @@ -71,9 +64,13 @@ public class DirectSlice extends AbstractSlice { * @param data The bugger containing the data */ public DirectSlice(final ByteBuffer data) { - super(); + super(createNewDirectSlice1(ensureDirect(data))); + } + + private static ByteBuffer ensureDirect(final ByteBuffer data) { + //TODO(AR) consider throwing a checked exception, as if it's not direct this can SIGSEGV assert(data.isDirect()); - createNewDirectSlice1(data); + return data; } /** @@ -85,7 +82,7 @@ public class DirectSlice extends AbstractSlice { * @return the requested byte */ public byte get(int offset) { - assert (isInitialized()); + assert (isOwningHandle()); return get0(nativeHandle_, offset); } @@ -93,7 +90,7 @@ public class DirectSlice extends AbstractSlice { * Clears the backing slice */ public void clear() { - assert (isInitialized()); + assert (isOwningHandle()); clear0(nativeHandle_); } @@ -105,12 +102,13 @@ public class DirectSlice extends AbstractSlice { * @param n The number of bytes to drop */ public void removePrefix(final int n) { - assert (isInitialized()); + assert (isOwningHandle()); removePrefix0(nativeHandle_, n); } - private native void createNewDirectSlice0(ByteBuffer data, int length); - private native void createNewDirectSlice1(ByteBuffer data); + private native static long createNewDirectSlice0(final ByteBuffer data, + final int length); + private native static long createNewDirectSlice1(final ByteBuffer data); @Override protected final native ByteBuffer data0(long handle); private native byte get0(long handle, int offset); private native void clear0(long handle); diff --git a/java/src/main/java/org/rocksdb/Env.java b/java/src/main/java/org/rocksdb/Env.java index 74088fd86..7d30ea5df 100644 --- a/java/src/main/java/org/rocksdb/Env.java +++ b/java/src/main/java/org/rocksdb/Env.java @@ -70,8 +70,8 @@ public abstract class Env extends RocksObject { } - protected Env() { - super(); + protected Env(final long nativeHandle) { + super(nativeHandle); } static { diff --git a/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java index 1cc0ccd4c..01853d969 100644 --- a/java/src/main/java/org/rocksdb/Filter.java +++ b/java/src/main/java/org/rocksdb/Filter.java @@ -13,7 +13,10 @@ package org.rocksdb; * DB::Get() call. */ public abstract class Filter extends RocksObject { - protected abstract void createNewFilter(); + + protected Filter(final long nativeHandle) { + super(nativeHandle); + } /** * Deletes underlying C++ filter pointer. @@ -22,10 +25,11 @@ public abstract class Filter extends RocksObject { * RocksDB instances referencing the filter are closed. * Otherwise an undefined behavior will occur. */ - @Override protected void disposeInternal() { - assert(isInitialized()); + @Override + protected void disposeInternal() { disposeInternal(nativeHandle_); } - private native void disposeInternal(long handle); + @Override + protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/FlushOptions.java b/java/src/main/java/org/rocksdb/FlushOptions.java index 9ddf95f1c..4931b5d85 100644 --- a/java/src/main/java/org/rocksdb/FlushOptions.java +++ b/java/src/main/java/org/rocksdb/FlushOptions.java @@ -10,8 +10,7 @@ public class FlushOptions extends RocksObject { * Construct a new instance of FlushOptions. */ public FlushOptions(){ - super(); - newFlushOptions(); + super(newFlushOptions()); } /** @@ -23,7 +22,7 @@ public class FlushOptions extends RocksObject { * @return instance of current FlushOptions. */ public FlushOptions setWaitForFlush(final boolean waitForFlush) { - assert(isInitialized()); + assert(isOwningHandle()); setWaitForFlush(nativeHandle_, waitForFlush); return this; } @@ -35,16 +34,12 @@ public class FlushOptions extends RocksObject { * waits for termination of the flush process. */ public boolean waitForFlush() { - assert(isInitialized()); + assert(isOwningHandle()); return waitForFlush(nativeHandle_); } - @Override protected void disposeInternal() { - disposeInternal(nativeHandle_); - } - - private native void newFlushOptions(); - private native void disposeInternal(long handle); + private native static long newFlushOptions(); + @Override protected final native void disposeInternal(final long handle); private native void setWaitForFlush(long handle, boolean wait); private native boolean waitForFlush(long handle); diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java index 26359ff2e..868a43260 100644 --- a/java/src/main/java/org/rocksdb/Logger.java +++ b/java/src/main/java/org/rocksdb/Logger.java @@ -35,7 +35,9 @@ package org.rocksdb; * {@link org.rocksdb.InfoLogLevel#FATAL_LEVEL}. *

*/ -public abstract class Logger extends RocksObject { +public abstract class Logger extends NativeReference { + + final long nativeHandle_; /** *

AbstractLogger constructor.

@@ -47,7 +49,8 @@ public abstract class Logger extends RocksObject { * @param options {@link org.rocksdb.Options} instance. */ public Logger(final Options options) { - createNewLoggerOptions(options.nativeHandle_); + super(true); + this.nativeHandle_ = createNewLoggerOptions(options.nativeHandle_); } /** @@ -60,7 +63,8 @@ public abstract class Logger extends RocksObject { * @param dboptions {@link org.rocksdb.DBOptions} instance. */ public Logger(final DBOptions dboptions) { - createNewLoggerDbOptions(dboptions.nativeHandle_); + super(true); + this.nativeHandle_ = createNewLoggerDbOptions(dboptions.nativeHandle_); } /** @@ -93,16 +97,15 @@ public abstract class Logger extends RocksObject { */ @Override protected void disposeInternal() { - assert(isInitialized()); disposeInternal(nativeHandle_); } - protected native void createNewLoggerOptions( + protected native long createNewLoggerOptions( long options); - protected native void createNewLoggerDbOptions( + protected native long createNewLoggerDbOptions( long dbOptions); protected native void setInfoLogLevel(long handle, byte infoLogLevel); protected native byte infoLogLevel(long handle); - private native void disposeInternal(long handle); + private native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/NativeReference.java b/java/src/main/java/org/rocksdb/NativeReference.java new file mode 100644 index 000000000..0ca44be42 --- /dev/null +++ b/java/src/main/java/org/rocksdb/NativeReference.java @@ -0,0 +1,77 @@ +package org.rocksdb; + +import java.util.concurrent.atomic.AtomicBoolean; + +public abstract class NativeReference { + + /** + * A flag indicating whether the current {@code RocksObject} is responsible to + * release the c++ object stored in its {@code nativeHandle_}. + */ + private final AtomicBoolean owningHandle_; + + protected NativeReference(final boolean owningHandle) { + this.owningHandle_ = new AtomicBoolean(owningHandle); + } + + public boolean isOwningHandle() { + return owningHandle_.get(); + } + + /** + * Revoke ownership of the native object. + *

+ * This will prevent the object from attempting to delete the underlying + * native object in its finalizer. This must be used when another object + * takes over ownership of the native object or both will attempt to delete + * the underlying object when garbage collected. + *

+ * When {@code disOwnNativeHandle()} is called, {@code dispose()} will simply set + * {@code nativeHandle_} to 0 without releasing its associated C++ resource. + * As a result, incorrectly use this function may cause memory leak, and this + * function call will not affect the return value of {@code isInitialized()}. + *

+ * @see #dispose() + */ + protected final void disOwnNativeHandle() { + owningHandle_.set(false); + } + + /** + * Release the c++ object manually pointed by the native handle. + *

+ * Note that {@code dispose()} will also be called during the GC process + * if it was not called before its {@code RocksObject} went out-of-scope. + * However, since Java may wrongly wrongly assume those objects are + * small in that they seems to only hold a long variable. As a result, + * they might have low priority in the GC process. To prevent this, + * it is suggested to call {@code dispose()} manually. + *

+ *

+ * Note that once an instance of {@code RocksObject} has been disposed, + * calling its function will lead undefined behavior. + *

+ */ + public final void dispose() { + if (owningHandle_.compareAndSet(true, false)) { + disposeInternal(); + } + } + + /** + * The helper function of {@code dispose()} which all subclasses of + * {@code RocksObject} must implement to release their associated + * C++ resource. + */ + protected abstract void disposeInternal(); + + /** + * Simply calls {@code dispose()} and release its c++ resource if it has not + * yet released. + */ + @Override + protected void finalize() throws Throwable { + dispose(); + super.finalize(); + } +} diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index dfce746bf..2c6f2e81f 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -27,8 +27,7 @@ public class Options extends RocksObject * an {@code rocksdb::Options} in the c++ side. */ public Options() { - super(); - newOptions(); + super(newOptions()); env_ = Env.getDefault(); } @@ -42,28 +41,27 @@ public class Options extends RocksObject */ public Options(final DBOptions dbOptions, final ColumnFamilyOptions columnFamilyOptions) { - super(); - newOptions(dbOptions.nativeHandle_, columnFamilyOptions.nativeHandle_); + super(newOptions(dbOptions.nativeHandle_, columnFamilyOptions.nativeHandle_)); env_ = Env.getDefault(); } @Override public Options setIncreaseParallelism(final int totalThreads) { - assert(isInitialized()); + assert(isOwningHandle()); setIncreaseParallelism(nativeHandle_, totalThreads); return this; } @Override public Options setCreateIfMissing(final boolean flag) { - assert(isInitialized()); + assert(isOwningHandle()); setCreateIfMissing(nativeHandle_, flag); return this; } @Override public Options setCreateMissingColumnFamilies(final boolean flag) { - assert(isInitialized()); + assert(isOwningHandle()); setCreateMissingColumnFamilies(nativeHandle_, flag); return this; } @@ -77,7 +75,7 @@ public class Options extends RocksObject * @return the instance of the current Options. */ public Options setEnv(final Env env) { - assert(isInitialized()); + assert(isOwningHandle()); setEnv(nativeHandle_, env.nativeHandle_); env_ = env; return this; @@ -111,13 +109,13 @@ public class Options extends RocksObject @Override public boolean createIfMissing() { - assert(isInitialized()); + assert(isOwningHandle()); return createIfMissing(nativeHandle_); } @Override public boolean createMissingColumnFamilies() { - assert(isInitialized()); + assert(isOwningHandle()); return createMissingColumnFamilies(nativeHandle_); } @@ -161,7 +159,7 @@ public class Options extends RocksObject @Override public Options setComparator(final BuiltinComparator builtinComparator) { - assert(isInitialized()); + assert(isOwningHandle()); setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); return this; } @@ -169,15 +167,15 @@ public class Options extends RocksObject @Override public Options setComparator( final AbstractComparator> comparator) { - assert (isInitialized()); - setComparatorHandle(nativeHandle_, comparator.nativeHandle_); + assert(isOwningHandle()); + setComparatorHandle(nativeHandle_, comparator.getNativeHandle()); comparator_ = comparator; return this; } @Override public Options setMergeOperatorName(final String name) { - assert (isInitialized()); + assert(isOwningHandle()); if (name == null) { throw new IllegalArgumentException( "Merge operator name must not be null."); @@ -194,164 +192,164 @@ public class Options extends RocksObject @Override public Options setWriteBufferSize(final long writeBufferSize) { - assert(isInitialized()); + assert(isOwningHandle()); setWriteBufferSize(nativeHandle_, writeBufferSize); return this; } @Override public long writeBufferSize() { - assert(isInitialized()); + assert(isOwningHandle()); return writeBufferSize(nativeHandle_); } @Override public Options setMaxWriteBufferNumber(final int maxWriteBufferNumber) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber); return this; } @Override public int maxWriteBufferNumber() { - assert(isInitialized()); + assert(isOwningHandle()); return maxWriteBufferNumber(nativeHandle_); } @Override public boolean errorIfExists() { - assert(isInitialized()); + assert(isOwningHandle()); return errorIfExists(nativeHandle_); } @Override public Options setErrorIfExists(final boolean errorIfExists) { - assert(isInitialized()); + assert(isOwningHandle()); setErrorIfExists(nativeHandle_, errorIfExists); return this; } @Override public boolean paranoidChecks() { - assert(isInitialized()); + assert(isOwningHandle()); return paranoidChecks(nativeHandle_); } @Override public Options setParanoidChecks(final boolean paranoidChecks) { - assert(isInitialized()); + assert(isOwningHandle()); setParanoidChecks(nativeHandle_, paranoidChecks); return this; } @Override public int maxOpenFiles() { - assert(isInitialized()); + assert(isOwningHandle()); return maxOpenFiles(nativeHandle_); } @Override public Options setMaxTotalWalSize(final long maxTotalWalSize) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxTotalWalSize(nativeHandle_, maxTotalWalSize); return this; } @Override public long maxTotalWalSize() { - assert(isInitialized()); + assert(isOwningHandle()); return maxTotalWalSize(nativeHandle_); } @Override public Options setMaxOpenFiles(final int maxOpenFiles) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxOpenFiles(nativeHandle_, maxOpenFiles); return this; } @Override public boolean disableDataSync() { - assert(isInitialized()); + assert(isOwningHandle()); return disableDataSync(nativeHandle_); } @Override public Options setDisableDataSync(final boolean disableDataSync) { - assert(isInitialized()); + assert(isOwningHandle()); setDisableDataSync(nativeHandle_, disableDataSync); return this; } @Override public boolean useFsync() { - assert(isInitialized()); + assert(isOwningHandle()); return useFsync(nativeHandle_); } @Override public Options setUseFsync(final boolean useFsync) { - assert(isInitialized()); + assert(isOwningHandle()); setUseFsync(nativeHandle_, useFsync); return this; } @Override public String dbLogDir() { - assert(isInitialized()); + assert(isOwningHandle()); return dbLogDir(nativeHandle_); } @Override public Options setDbLogDir(final String dbLogDir) { - assert(isInitialized()); + assert(isOwningHandle()); setDbLogDir(nativeHandle_, dbLogDir); return this; } @Override public String walDir() { - assert(isInitialized()); + assert(isOwningHandle()); return walDir(nativeHandle_); } @Override public Options setWalDir(final String walDir) { - assert(isInitialized()); + assert(isOwningHandle()); setWalDir(nativeHandle_, walDir); return this; } @Override public long deleteObsoleteFilesPeriodMicros() { - assert(isInitialized()); + assert(isOwningHandle()); return deleteObsoleteFilesPeriodMicros(nativeHandle_); } @Override public Options setDeleteObsoleteFilesPeriodMicros( final long micros) { - assert(isInitialized()); + assert(isOwningHandle()); setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros); return this; } @Override public int maxBackgroundCompactions() { - assert(isInitialized()); + assert(isOwningHandle()); return maxBackgroundCompactions(nativeHandle_); } @Override public Options createStatistics() { - assert(isInitialized()); + assert(isOwningHandle()); createStatistics(nativeHandle_); return this; } @Override public Statistics statisticsPtr() { - assert(isInitialized()); + assert(isOwningHandle()); long statsPtr = statisticsPtr(nativeHandle_); if(statsPtr == 0) { @@ -365,74 +363,74 @@ public class Options extends RocksObject @Override public Options setMaxBackgroundCompactions( final int maxBackgroundCompactions) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); return this; } @Override public int maxBackgroundFlushes() { - assert(isInitialized()); + assert(isOwningHandle()); return maxBackgroundFlushes(nativeHandle_); } @Override public Options setMaxBackgroundFlushes( final int maxBackgroundFlushes) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes); return this; } @Override public long maxLogFileSize() { - assert(isInitialized()); + assert(isOwningHandle()); return maxLogFileSize(nativeHandle_); } @Override public Options setMaxLogFileSize(final long maxLogFileSize) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxLogFileSize(nativeHandle_, maxLogFileSize); return this; } @Override public long logFileTimeToRoll() { - assert(isInitialized()); + assert(isOwningHandle()); return logFileTimeToRoll(nativeHandle_); } @Override public Options setLogFileTimeToRoll(final long logFileTimeToRoll) { - assert(isInitialized()); + assert(isOwningHandle()); setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll); return this; } @Override public long keepLogFileNum() { - assert(isInitialized()); + assert(isOwningHandle()); return keepLogFileNum(nativeHandle_); } @Override public Options setKeepLogFileNum(final long keepLogFileNum) { - assert(isInitialized()); + assert(isOwningHandle()); setKeepLogFileNum(nativeHandle_, keepLogFileNum); return this; } @Override public long maxManifestFileSize() { - assert(isInitialized()); + assert(isOwningHandle()); return maxManifestFileSize(nativeHandle_); } @Override public Options setMaxManifestFileSize( final long maxManifestFileSize) { - assert(isInitialized()); + assert(isOwningHandle()); setMaxManifestFileSize(nativeHandle_, maxManifestFileSize); return this; } @@ -441,7 +439,7 @@ public class Options extends RocksObject public Options setMaxTableFilesSizeFIFO( final long maxTableFilesSize) { assert(maxTableFilesSize > 0); // unsigned native type - assert(isInitialized()); + assert(isOwningHandle()); setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize); return this; } @@ -453,118 +451,118 @@ public class Options extends RocksObject @Override public int tableCacheNumshardbits() { - assert(isInitialized()); + assert(isOwningHandle()); return tableCacheNumshardbits(nativeHandle_); } @Override public Options setTableCacheNumshardbits( final int tableCacheNumshardbits) { - assert(isInitialized()); + assert(isOwningHandle()); setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits); return this; } @Override public long walTtlSeconds() { - assert(isInitialized()); + assert(isOwningHandle()); return walTtlSeconds(nativeHandle_); } @Override public Options setWalTtlSeconds(final long walTtlSeconds) { - assert(isInitialized()); + assert(isOwningHandle()); setWalTtlSeconds(nativeHandle_, walTtlSeconds); return this; } @Override public long walSizeLimitMB() { - assert(isInitialized()); + assert(isOwningHandle()); return walSizeLimitMB(nativeHandle_); } @Override public Options setWalSizeLimitMB(final long sizeLimitMB) { - assert(isInitialized()); + assert(isOwningHandle()); setWalSizeLimitMB(nativeHandle_, sizeLimitMB); return this; } @Override public long manifestPreallocationSize() { - assert(isInitialized()); + assert(isOwningHandle()); return manifestPreallocationSize(nativeHandle_); } @Override public Options setManifestPreallocationSize(final long size) { - assert(isInitialized()); + assert(isOwningHandle()); setManifestPreallocationSize(nativeHandle_, size); return this; } @Override public boolean allowOsBuffer() { - assert(isInitialized()); + assert(isOwningHandle()); return allowOsBuffer(nativeHandle_); } @Override public Options setAllowOsBuffer(final boolean allowOsBuffer) { - assert(isInitialized()); + assert(isOwningHandle()); setAllowOsBuffer(nativeHandle_, allowOsBuffer); return this; } @Override public boolean allowMmapReads() { - assert(isInitialized()); + assert(isOwningHandle()); return allowMmapReads(nativeHandle_); } @Override public Options setAllowMmapReads(final boolean allowMmapReads) { - assert(isInitialized()); + assert(isOwningHandle()); setAllowMmapReads(nativeHandle_, allowMmapReads); return this; } @Override public boolean allowMmapWrites() { - assert(isInitialized()); + assert(isOwningHandle()); return allowMmapWrites(nativeHandle_); } @Override public Options setAllowMmapWrites(final boolean allowMmapWrites) { - assert(isInitialized()); + assert(isOwningHandle()); setAllowMmapWrites(nativeHandle_, allowMmapWrites); return this; } @Override public boolean isFdCloseOnExec() { - assert(isInitialized()); + assert(isOwningHandle()); return isFdCloseOnExec(nativeHandle_); } @Override public Options setIsFdCloseOnExec(final boolean isFdCloseOnExec) { - assert(isInitialized()); + assert(isOwningHandle()); setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec); return this; } @Override public int statsDumpPeriodSec() { - assert(isInitialized()); + assert(isOwningHandle()); return statsDumpPeriodSec(nativeHandle_); } @Override public Options setStatsDumpPeriodSec(final int statsDumpPeriodSec) { - assert(isInitialized()); + assert(isOwningHandle()); setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec); return this; } @@ -576,20 +574,20 @@ public class Options extends RocksObject @Override public Options setAdviseRandomOnOpen(final boolean adviseRandomOnOpen) { - assert(isInitialized()); + assert(isOwningHandle()); setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen); return this; } @Override public boolean useAdaptiveMutex() { - assert(isInitialized()); + assert(isOwningHandle()); return useAdaptiveMutex(nativeHandle_); } @Override public Options setUseAdaptiveMutex(final boolean useAdaptiveMutex) { - assert(isInitialized()); + assert(isOwningHandle()); setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); return this; } @@ -601,7 +599,7 @@ public class Options extends RocksObject @Override public Options setBytesPerSync(final long bytesPerSync) { - assert(isInitialized()); + assert(isOwningHandle()); setBytesPerSync(nativeHandle_, bytesPerSync); return this; } @@ -622,28 +620,28 @@ public class Options extends RocksObject @Override public Options setLogger(final Logger logger) { - assert(isInitialized()); + assert(isOwningHandle()); setLogger(nativeHandle_, logger.nativeHandle_); return this; } @Override public Options setInfoLogLevel(final InfoLogLevel infoLogLevel) { - assert(isInitialized()); + assert(isOwningHandle()); setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); return this; } @Override public InfoLogLevel infoLogLevel() { - assert(isInitialized()); + assert(isOwningHandle()); return InfoLogLevel.getInfoLogLevel( infoLogLevel(nativeHandle_)); } @Override public String memTableFactoryName() { - assert(isInitialized()); + assert(isOwningHandle()); return memTableFactoryName(nativeHandle_); } @@ -656,20 +654,20 @@ public class Options extends RocksObject @Override public String tableFactoryName() { - assert(isInitialized()); + assert(isOwningHandle()); return tableFactoryName(nativeHandle_); } @Override public Options useFixedLengthPrefixExtractor(final int n) { - assert(isInitialized()); + assert(isOwningHandle()); useFixedLengthPrefixExtractor(nativeHandle_, n); return this; } @Override public Options useCappedPrefixExtractor(final int n) { - assert(isInitialized()); + assert(isOwningHandle()); useCappedPrefixExtractor(nativeHandle_, n); return this; } @@ -1085,19 +1083,10 @@ public class Options extends RocksObject return optimizeFiltersForHits(nativeHandle_); } - /** - * Release the memory allocated for the current instance - * in the c++ side. - */ - @Override protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); - } - - private native void newOptions(); - private native void newOptions(long dbOptHandle, + private native static long newOptions(); + private native static long newOptions(long dbOptHandle, long cfOptHandle); - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); private native void setEnv(long optHandle, long envHandle); private native void prepareForBulkLoad(long handle); diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index 3baf8e808..37d9d2fe4 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -13,10 +13,9 @@ package org.rocksdb; */ public class ReadOptions extends RocksObject { public ReadOptions() { - super(); - newReadOptions(); + super(newReadOptions()); } - private native void newReadOptions(); + private native static long newReadOptions(); /** * If true, all data read from underlying storage will be @@ -26,7 +25,7 @@ public class ReadOptions extends RocksObject { * @return true if checksum verification is on. */ public boolean verifyChecksums() { - assert(isInitialized()); + assert(isOwningHandle()); return verifyChecksums(nativeHandle_); } private native boolean verifyChecksums(long handle); @@ -42,7 +41,7 @@ public class ReadOptions extends RocksObject { */ public ReadOptions setVerifyChecksums( final boolean verifyChecksums) { - assert(isInitialized()); + assert(isOwningHandle()); setVerifyChecksums(nativeHandle_, verifyChecksums); return this; } @@ -59,7 +58,7 @@ public class ReadOptions extends RocksObject { * @return true if the fill-cache behavior is on. */ public boolean fillCache() { - assert(isInitialized()); + assert(isOwningHandle()); return fillCache(nativeHandle_); } private native boolean fillCache(long handle); @@ -74,7 +73,7 @@ public class ReadOptions extends RocksObject { * @return the reference to the current ReadOptions. */ public ReadOptions setFillCache(final boolean fillCache) { - assert(isInitialized()); + assert(isOwningHandle()); setFillCache(nativeHandle_, fillCache); return this; } @@ -92,7 +91,7 @@ public class ReadOptions extends RocksObject { * @return the reference to the current ReadOptions. */ public ReadOptions setSnapshot(final Snapshot snapshot) { - assert(isInitialized()); + assert(isOwningHandle()); if (snapshot != null) { setSnapshot(nativeHandle_, snapshot.nativeHandle_); } else { @@ -109,7 +108,7 @@ public class ReadOptions extends RocksObject { * is assigned null. */ public Snapshot snapshot() { - assert(isInitialized()); + assert(isOwningHandle()); long snapshotHandle = snapshot(nativeHandle_); if (snapshotHandle != 0) { return new Snapshot(snapshotHandle); @@ -130,7 +129,7 @@ public class ReadOptions extends RocksObject { * @return true if tailing iterator is enabled. */ public boolean tailing() { - assert(isInitialized()); + assert(isOwningHandle()); return tailing(nativeHandle_); } private native boolean tailing(long handle); @@ -147,17 +146,13 @@ public class ReadOptions extends RocksObject { * @return the reference to the current ReadOptions. */ public ReadOptions setTailing(final boolean tailing) { - assert(isInitialized()); + assert(isOwningHandle()); setTailing(nativeHandle_, tailing); return this; } private native void setTailing( long handle, boolean tailing); - - @Override protected void disposeInternal() { - disposeInternal(nativeHandle_); - } - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index 2f54cdf45..5bc5dbe72 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -10,9 +10,8 @@ package org.rocksdb; */ public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter { public RemoveEmptyValueCompactionFilter() { - super(); - createNewRemoveEmptyValueCompactionFilter0(); + super(createNewRemoveEmptyValueCompactionFilter0()); } - private native void createNewRemoveEmptyValueCompactionFilter0(); + private native static long createNewRemoveEmptyValueCompactionFilter0(); } diff --git a/java/src/main/java/org/rocksdb/RestoreBackupableDB.java b/java/src/main/java/org/rocksdb/RestoreBackupableDB.java index 90592e845..86610cc31 100644 --- a/java/src/main/java/org/rocksdb/RestoreBackupableDB.java +++ b/java/src/main/java/org/rocksdb/RestoreBackupableDB.java @@ -23,8 +23,7 @@ public class RestoreBackupableDB extends RocksObject { * @param options {@link org.rocksdb.BackupableDBOptions} instance */ public RestoreBackupableDB(final BackupableDBOptions options) { - super(); - nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_); + super(newRestoreBackupableDB(options.nativeHandle_)); } /** @@ -52,7 +51,7 @@ public class RestoreBackupableDB extends RocksObject { public void restoreDBFromBackup(final long backupId, final String dbDir, final String walDir, final RestoreOptions restoreOptions) throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); restoreDBFromBackup0(nativeHandle_, backupId, dbDir, walDir, restoreOptions.nativeHandle_); } @@ -70,7 +69,7 @@ public class RestoreBackupableDB extends RocksObject { public void restoreDBFromLatestBackup(final String dbDir, final String walDir, final RestoreOptions restoreOptions) throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); restoreDBFromLatestBackup0(nativeHandle_, dbDir, walDir, restoreOptions.nativeHandle_); } @@ -85,7 +84,7 @@ public class RestoreBackupableDB extends RocksObject { */ public void purgeOldBackups(final int numBackupsToKeep) throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); purgeOldBackups0(nativeHandle_, numBackupsToKeep); } @@ -99,7 +98,7 @@ public class RestoreBackupableDB extends RocksObject { */ public void deleteBackup(final int backupId) throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); deleteBackup0(nativeHandle_, backupId); } @@ -110,7 +109,7 @@ public class RestoreBackupableDB extends RocksObject { * @return List of {@link BackupInfo} instances. */ public List getBackupInfos() { - assert(isInitialized()); + assert(isOwningHandle()); return getBackupInfo(nativeHandle_); } @@ -122,7 +121,7 @@ public class RestoreBackupableDB extends RocksObject { * @return array of backup ids as int ids. */ public int[] getCorruptedBackups() { - assert(isInitialized()); + assert(isOwningHandle()); return getCorruptedBackups(nativeHandle_); } @@ -135,19 +134,11 @@ public class RestoreBackupableDB extends RocksObject { * native library. */ public void garbageCollect() throws RocksDBException { - assert(isInitialized()); + assert(isOwningHandle()); garbageCollect(nativeHandle_); } - /** - *

Release the memory allocated for the current instance - * in the c++ side.

- */ - @Override public synchronized void disposeInternal() { - dispose(nativeHandle_); - } - - private native long newRestoreBackupableDB(long options); + private native static long newRestoreBackupableDB(final long options); private native void restoreDBFromBackup0(long nativeHandle, long backupId, String dbDir, String walDir, long restoreOptions) throws RocksDBException; @@ -162,5 +153,5 @@ public class RestoreBackupableDB extends RocksObject { private native int[] getCorruptedBackups(long handle); private native void garbageCollect(long handle) throws RocksDBException; - private native void dispose(long nativeHandle); + @Override protected final native void disposeInternal(final long nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/RestoreOptions.java b/java/src/main/java/org/rocksdb/RestoreOptions.java index 8cfe56640..9eecbc8e1 100644 --- a/java/src/main/java/org/rocksdb/RestoreOptions.java +++ b/java/src/main/java/org/rocksdb/RestoreOptions.java @@ -23,19 +23,9 @@ public class RestoreOptions extends RocksObject { * Default: false */ public RestoreOptions(final boolean keepLogFiles) { - super(); - nativeHandle_ = newRestoreOptions(keepLogFiles); + super(newRestoreOptions(keepLogFiles)); } - /** - * Release the memory allocated for the current instance - * in the c++ side. - */ - @Override public synchronized void disposeInternal() { - assert(isInitialized()); - dispose(nativeHandle_); - } - - private native long newRestoreOptions(boolean keepLogFiles); - private native void dispose(long handle); + private native static long newRestoreOptions(boolean keepLogFiles); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 786335745..87b7de026 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -179,9 +179,7 @@ public class RocksDB extends RocksObject { // when non-default Options is used, keeping an Options reference // in RocksDB can prevent Java to GC during the life-time of // the currently-created RocksDB. - RocksDB db = new RocksDB(); - db.open(options.nativeHandle_, path); - + final RocksDB db = new RocksDB(open(options.nativeHandle_, path)); db.storeOptionsInstance(options); return db; } @@ -225,13 +223,23 @@ public class RocksDB extends RocksObject { final List columnFamilyDescriptors, final List columnFamilyHandles) throws RocksDBException { - RocksDB db = new RocksDB(); - List cfReferences = db.open(options.nativeHandle_, path, - columnFamilyDescriptors, columnFamilyDescriptors.size()); + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; for (int i = 0; i < columnFamilyDescriptors.size(); i++) { - columnFamilyHandles.add(new ColumnFamilyHandle(db, cfReferences.get(i))); + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors.get(i); + cfNames[i] = cfDescriptor.columnFamilyName(); + cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; } + + final long[] handles = open(options.nativeHandle_, path, cfNames, cfOptionHandles); + final RocksDB db = new RocksDB(handles[0]); db.storeOptionsInstance(options); + + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i])); + } + return db; } @@ -276,7 +284,7 @@ public class RocksDB extends RocksObject { throws RocksDBException { // This allows to use the rocksjni default Options instead of // the c++ one. - DBOptions options = new DBOptions(); + final DBOptions options = new DBOptions(); return openReadOnly(options, path, columnFamilyDescriptors, columnFamilyHandles); } @@ -303,9 +311,7 @@ public class RocksDB extends RocksObject { // when non-default Options is used, keeping an Options reference // in RocksDB can prevent Java to GC during the life-time of // the currently-created RocksDB. - RocksDB db = new RocksDB(); - db.openROnly(options.nativeHandle_, path); - + final RocksDB db = new RocksDB(openROnly(options.nativeHandle_, path)); db.storeOptionsInstance(options); return db; } @@ -339,14 +345,23 @@ public class RocksDB extends RocksObject { // when non-default Options is used, keeping an Options reference // in RocksDB can prevent Java to GC during the life-time of // the currently-created RocksDB. - RocksDB db = new RocksDB(); - List cfReferences = db.openROnly(options.nativeHandle_, path, - columnFamilyDescriptors, columnFamilyDescriptors.size()); - for (int i=0; i open(long optionsHandle, String path, - List columnFamilyDescriptors, - int columnFamilyDescriptorsLength) - throws RocksDBException; + protected native static long open(final long optionsHandle, + final String path) throws RocksDBException; + + /** + * @param optionsHandle Native handle pointing to an Options object + * @param path The directory path for the database files + * @param columnFamilyNames An array of column family names + * @param columnFamilyOptions An array of native handles pointing to ColumnFamilyOptions objects + * + * @return An array of native handles, [0] is the handle of the RocksDB object + * [1..1+n] are handles of the ColumnFamilyReferences + * + * @throws RocksDBException thrown if the database could not be opened + */ + protected native static long[] open(final long optionsHandle, + final String path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions) throws RocksDBException; + + protected native static long openROnly(final long optionsHandle, + final String path) throws RocksDBException; + + /** + * @param optionsHandle Native handle pointing to an Options object + * @param path The directory path for the database files + * @param columnFamilyNames An array of column family names + * @param columnFamilyOptions An array of native handles pointing to ColumnFamilyOptions objects + * + * @return An array of native handles, [0] is the handle of the RocksDB object + * [1..1+n] are handles of the ColumnFamilyReferences + * + * @throws RocksDBException thrown if the database could not be opened + */ + protected native static long[] openROnly(final long optionsHandle, + final String path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions + ) throws RocksDBException; + protected native static List listColumnFamilies( long optionsHandle, String path) throws RocksDBException; - protected native void openROnly( - long optionsHandle, String path) throws RocksDBException; - protected native List openROnly( - long optionsHandle, String path, - List columnFamilyDescriptors, - int columnFamilyDescriptorsLength) throws RocksDBException; protected native void put( long handle, byte[] key, int keyLen, byte[] value, int valueLen) throws RocksDBException; @@ -1793,7 +1828,7 @@ public class RocksDB extends RocksObject { protected native long getSnapshot(long nativeHandle); protected native void releaseSnapshot( long nativeHandle, long snapshotHandle); - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); private native long getDefaultColumnFamily(long handle); private native long createColumnFamily(long handle, ColumnFamilyDescriptor columnFamilyDescriptor) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/RocksEnv.java b/java/src/main/java/org/rocksdb/RocksEnv.java index 4c34a9f4b..72dc22c42 100644 --- a/java/src/main/java/org/rocksdb/RocksEnv.java +++ b/java/src/main/java/org/rocksdb/RocksEnv.java @@ -24,8 +24,7 @@ public class RocksEnv extends Env { * {@code dispose()} of the created RocksEnv will be no-op.

*/ RocksEnv(final long handle) { - super(); - nativeHandle_ = handle; + super(handle); disOwnNativeHandle(); } @@ -38,6 +37,7 @@ public class RocksEnv extends Env { * RocksEnv with RocksJava. The default env allocation is managed * by C++.

*/ - @Override protected void disposeInternal() { + @Override + protected final void disposeInternal(final long handle) { } } diff --git a/java/src/main/java/org/rocksdb/RocksIterator.java b/java/src/main/java/org/rocksdb/RocksIterator.java index d93a96197..42e2460cf 100644 --- a/java/src/main/java/org/rocksdb/RocksIterator.java +++ b/java/src/main/java/org/rocksdb/RocksIterator.java @@ -33,7 +33,7 @@ public class RocksIterator extends AbstractRocksIterator { * @return key for the current entry. */ public byte[] key() { - assert(isInitialized()); + assert(isOwningHandle()); return key0(nativeHandle_); } @@ -46,11 +46,11 @@ public class RocksIterator extends AbstractRocksIterator { * @return value for the current entry. */ public byte[] value() { - assert(isInitialized()); + assert(isOwningHandle()); return value0(nativeHandle_); } - @Override final native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); @Override final native boolean isValid0(long handle); @Override final native void seekToFirst0(long handle); @Override final native void seekToLast0(long handle); diff --git a/java/src/main/java/org/rocksdb/RocksMemEnv.java b/java/src/main/java/org/rocksdb/RocksMemEnv.java index 4517577be..d7854eae1 100644 --- a/java/src/main/java/org/rocksdb/RocksMemEnv.java +++ b/java/src/main/java/org/rocksdb/RocksMemEnv.java @@ -19,15 +19,9 @@ public class RocksMemEnv extends Env { *

{@code *base_env} must remain live while the result is in use.

*/ public RocksMemEnv() { - super(); - nativeHandle_ = createMemEnv(); - } - - @Override - protected void disposeInternal() { - disposeInternal(nativeHandle_); + super(createMemEnv()); } private static native long createMemEnv(); - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java new file mode 100644 index 000000000..f4ca3565d --- /dev/null +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -0,0 +1,33 @@ +package org.rocksdb; + +public abstract class RocksMutableObject extends NativeReference { + + private final boolean shouldOwnHandle; + protected volatile long nativeHandle_; + + protected RocksMutableObject() { + super(false); + this.shouldOwnHandle = false; + } + + protected RocksMutableObject(final long nativeHandle) { + super(true); + this.shouldOwnHandle = true; + this.nativeHandle_ = nativeHandle; + } + + @Override + public boolean isOwningHandle() { + return ((!shouldOwnHandle) || super.isOwningHandle()) && nativeHandle_ != 0; + } + + /** + * Deletes underlying C++ object pointer. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + protected abstract void disposeInternal(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index 2d645805a..d5c71aecb 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -22,104 +22,25 @@ package org.rocksdb; * as {@code dispose()} will be called in the finalizer during the * regular GC process.

*/ -public abstract class RocksObject { - protected RocksObject() { - nativeHandle_ = 0; - owningHandle_ = true; - } - - /** - * Release the c++ object manually pointed by the native handle. - *

- * Note that {@code dispose()} will also be called during the GC process - * if it was not called before its {@code RocksObject} went out-of-scope. - * However, since Java may wrongly wrongly assume those objects are - * small in that they seems to only hold a long variable. As a result, - * they might have low priority in the GC process. To prevent this, - * it is suggested to call {@code dispose()} manually. - *

- *

- * Note that once an instance of {@code RocksObject} has been disposed, - * calling its function will lead undefined behavior. - *

- */ - public final synchronized void dispose() { - if (isOwningNativeHandle() && isInitialized()) { - disposeInternal(); - } - nativeHandle_ = 0; - disOwnNativeHandle(); - } - - /** - * The helper function of {@code dispose()} which all subclasses of - * {@code RocksObject} must implement to release their associated - * C++ resource. - */ - protected abstract void disposeInternal(); - - /** - * Revoke ownership of the native object. - *

- * This will prevent the object from attempting to delete the underlying - * native object in its finalizer. This must be used when another object - * takes over ownership of the native object or both will attempt to delete - * the underlying object when garbage collected. - *

- * When {@code disOwnNativeHandle()} is called, {@code dispose()} will simply set - * {@code nativeHandle_} to 0 without releasing its associated C++ resource. - * As a result, incorrectly use this function may cause memory leak, and this - * function call will not affect the return value of {@code isInitialized()}. - *

- * @see #dispose() - * @see #isInitialized() - */ - protected void disOwnNativeHandle() { - owningHandle_ = false; - } - - /** - * Returns true if the current {@code RocksObject} is responsible to release - * its native handle. - * - * @return true if the current {@code RocksObject} is responsible to release - * its native handle. - * - * @see #disOwnNativeHandle() - * @see #dispose() - */ - protected boolean isOwningNativeHandle() { - return owningHandle_; - } - - /** - * Returns true if the associated native handle has been initialized. - * - * @return true if the associated native handle has been initialized. - * - * @see #dispose() - */ - protected boolean isInitialized() { - return (nativeHandle_ != 0); - } - - /** - * Simply calls {@code dispose()} and release its c++ resource if it has not - * yet released. - */ - @Override protected void finalize() throws Throwable { - dispose(); - super.finalize(); - } +public abstract class RocksObject extends NativeReference { /** * A long variable holding c++ pointer pointing to some RocksDB C++ object. */ - protected long nativeHandle_; + protected final long nativeHandle_; + + protected RocksObject(final long nativeHandle) { + super(true); + this.nativeHandle_ = nativeHandle; + } /** - * A flag indicating whether the current {@code RocksObject} is responsible to - * release the c++ object stored in its {@code nativeHandle_}. + * Deletes underlying C++ object pointer. */ - private boolean owningHandle_; + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + protected abstract void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/Slice.java b/java/src/main/java/org/rocksdb/Slice.java index 2a1ae6fae..ae0815392 100644 --- a/java/src/main/java/org/rocksdb/Slice.java +++ b/java/src/main/java/org/rocksdb/Slice.java @@ -29,7 +29,6 @@ public class Slice extends AbstractSlice { */ private Slice() { super(); - disOwnNativeHandle(); } /** @@ -39,8 +38,7 @@ public class Slice extends AbstractSlice { * @param str String value. */ public Slice(final String str) { - super(); - createNewSliceFromString(str); + super(createNewSliceFromString(str)); } /** @@ -51,8 +49,7 @@ public class Slice extends AbstractSlice { * @param offset offset within the byte array. */ public Slice(final byte[] data, final int offset) { - super(); - createNewSlice0(data, offset); + super(createNewSlice0(data, offset)); } /** @@ -62,8 +59,7 @@ public class Slice extends AbstractSlice { * @param data byte array. */ public Slice(final byte[] data) { - super(); - createNewSlice1(data); + super(createNewSlice1(data)); } /** @@ -82,7 +78,8 @@ public class Slice extends AbstractSlice { } @Override protected final native byte[] data0(long handle); - private native void createNewSlice0(byte[] data, int length); - private native void createNewSlice1(byte[] data); - private native void disposeInternalBuf(long handle); + private native static long createNewSlice0(final byte[] data, + final int length); + private native static long createNewSlice1(final byte[] data); + private native void disposeInternalBuf(final long handle); } diff --git a/java/src/main/java/org/rocksdb/Snapshot.java b/java/src/main/java/org/rocksdb/Snapshot.java index c71eac937..8475ec995 100644 --- a/java/src/main/java/org/rocksdb/Snapshot.java +++ b/java/src/main/java/org/rocksdb/Snapshot.java @@ -10,8 +10,7 @@ package org.rocksdb; */ public class Snapshot extends RocksObject { Snapshot(final long nativeHandle) { - super(); - nativeHandle_ = nativeHandle; + super(nativeHandle); } /** @@ -21,7 +20,7 @@ public class Snapshot extends RocksObject { * this snapshot. */ public long getSequenceNumber() { - assert(isInitialized()); + assert(isOwningHandle()); return getSequenceNumber(nativeHandle_); } @@ -30,7 +29,8 @@ public class Snapshot extends RocksObject { * to the snapshot is released by the database * instance. */ - @Override protected void disposeInternal() { + @Override + protected final void disposeInternal(final long handle) { } private native long getSequenceNumber(long handle); diff --git a/java/src/main/java/org/rocksdb/TransactionLogIterator.java b/java/src/main/java/org/rocksdb/TransactionLogIterator.java index 36f7e2cdf..f9684bd72 100644 --- a/java/src/main/java/org/rocksdb/TransactionLogIterator.java +++ b/java/src/main/java/org/rocksdb/TransactionLogIterator.java @@ -57,12 +57,7 @@ public class TransactionLogIterator extends RocksObject { * @param nativeHandle address to native address. */ TransactionLogIterator(final long nativeHandle) { - super(); - nativeHandle_ = nativeHandle; - } - - @Override protected void disposeInternal() { - disposeInternal(nativeHandle_); + super(nativeHandle); } /** @@ -107,7 +102,7 @@ public class TransactionLogIterator extends RocksObject { private final WriteBatch writeBatch_; } - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); private native boolean isValid(long handle); private native void next(long handle); private native void status(long handle) diff --git a/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java index 351ab5c07..d0d2b9e6d 100644 --- a/java/src/main/java/org/rocksdb/TtlDB.java +++ b/java/src/main/java/org/rocksdb/TtlDB.java @@ -84,9 +84,7 @@ public class TtlDB extends RocksDB { */ public static TtlDB open(final Options options, final String db_path, final int ttl, final boolean readOnly) throws RocksDBException { - TtlDB ttldb = new TtlDB(); - ttldb.open(options.nativeHandle_, db_path, ttl, readOnly); - return ttldb; + return new TtlDB(open(options.nativeHandle_, db_path, ttl, readOnly)); } /** @@ -117,12 +115,25 @@ public class TtlDB extends RocksDB { throw new IllegalArgumentException("There must be a ttl value per column" + "family handle."); } - TtlDB ttlDB = new TtlDB(); - List cfReferences = ttlDB.openCF(options.nativeHandle_, db_path, - columnFamilyDescriptors, columnFamilyDescriptors.size(), - ttlValues, readOnly); - for (int i=0; i */ - @Override public synchronized void close() { - if (isInitialized()) { + @Override + public void close() { super.close(); - } } /** @@ -175,22 +184,25 @@ public class TtlDB extends RocksDB { * {@link #open(DBOptions, String, java.util.List, java.util.List, * java.util.List, boolean)}. *

+ * + * @param nativeHandle The native handle of the C++ TtlDB object */ - protected TtlDB() { - super(); + protected TtlDB(final long nativeHandle) { + super(nativeHandle); } @Override protected void finalize() throws Throwable { - close(); + close(); //TODO(AR) revisit here when implementing AutoCloseable super.finalize(); } - private native void open(long optionsHandle, String db_path, int ttl, - boolean readOnly) throws RocksDBException; - private native List openCF(long optionsHandle, String db_path, - List columnFamilyDescriptors, - int columnFamilyDescriptorsLength, List ttlValues, - boolean readOnly) throws RocksDBException; + private native static long open(final long optionsHandle, + final String db_path, final int ttl, final boolean readOnly) + throws RocksDBException; + private native static long[] openCF(final long optionsHandle, + final String db_path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions, final int[] ttlValues, + final boolean readOnly) throws RocksDBException; private native long createColumnFamilyWithTtl(long handle, ColumnFamilyDescriptor columnFamilyDescriptor, int ttl) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index b807810dc..6d06c8bd3 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -23,13 +23,13 @@ public class WBWIRocksIterator extends AbstractRocksIterator> fallbackIndexComparator, final int reservedBytes, final boolean overwriteKey) { - super(); - newWriteBatchWithIndex(fallbackIndexComparator.nativeHandle_, reservedBytes, overwriteKey); + super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(), reservedBytes, overwriteKey)); } /** @@ -126,7 +123,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator); } - @Override final native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); @Override final native int count0(); @Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen); @Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen, @@ -139,10 +136,11 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { @Override final native void putLogData(byte[] blob, int blobLen); @Override final native void clear0(); - private native void newWriteBatchWithIndex(); - private native void newWriteBatchWithIndex(boolean overwriteKey); - private native void newWriteBatchWithIndex(long fallbackIndexComparatorHandle, int reservedBytes, - boolean overwriteKey); + private native static long newWriteBatchWithIndex(); + private native static long newWriteBatchWithIndex(final boolean overwriteKey); + private native static long newWriteBatchWithIndex( + final long fallbackIndexComparatorHandle, final int reservedBytes, + final boolean overwriteKey); private native long iterator0(); private native long iterator1(long cfHandle); private native long iteratorWithBase(long baseIteratorHandle, long cfHandle); diff --git a/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java index d6a32fb4f..4e7abd873 100644 --- a/java/src/main/java/org/rocksdb/WriteOptions.java +++ b/java/src/main/java/org/rocksdb/WriteOptions.java @@ -16,13 +16,8 @@ public class WriteOptions extends RocksObject { * Construct WriteOptions instance. */ public WriteOptions() { - super(); - newWriteOptions(); - } + super(newWriteOptions()); - @Override protected void disposeInternal() { - assert(isInitialized()); - disposeInternal(nativeHandle_); } /** @@ -97,10 +92,10 @@ public class WriteOptions extends RocksObject { return disableWAL(nativeHandle_); } - private native void newWriteOptions(); + private native static long newWriteOptions(); private native void setSync(long handle, boolean flag); private native boolean sync(long handle); private native void setDisableWAL(long handle, boolean flag); private native boolean disableWAL(long handle); - private native void disposeInternal(long handle); + @Override protected final native void disposeInternal(final long handle); } diff --git a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java index 837610d29..08cac9bce 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java @@ -209,7 +209,9 @@ public class WriteBatchWithIndexTest { it.seek(key); assertThat(it.isValid()).isTrue(); - assertThat(it.entry().equals(expected[testOffset])).isTrue(); + + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry.equals(expected[testOffset])).isTrue(); } //forward iterative access From 0e7e6f6e4b05350d71f1fa7aee3dde199e014676 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Sun, 24 Jan 2016 14:04:28 +0000 Subject: [PATCH 2/9] Improve Javadoc --- .../org/rocksdb/AbstractCompactionFilter.java | 7 ++++--- java/src/main/java/org/rocksdb/BackupEngine.java | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index 6853f1d4b..7d3c5bcd9 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -8,7 +8,8 @@ package org.rocksdb; * A CompactionFilter allows an application to modify/delete a key-value at * the time of compaction. * - * At present we just permit an overriding Java class to wrap a C++ implementation + * At present we just permit an overriding Java class to wrap a C++ + * implementation */ public abstract class AbstractCompactionFilter> extends RocksObject { @@ -18,10 +19,10 @@ public abstract class AbstractCompactionFilter> } /** - * Deletes underlying C++ comparator pointer. + * Deletes underlying C++ compaction pointer. * * Note that this function should be called only after all - * RocksDB instances referencing the comparator are closed. + * RocksDB instances referencing the compaction filter are closed. * Otherwise an undefined behavior will occur. */ @Override diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 776307a74..606c5d951 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -30,6 +30,7 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * @param options Any options for the backup engine * * @return A new BackupEngine instance + * @throws RocksDBException thrown if the backup engine could not be opened */ public static BackupEngine open(final Env env, final BackupableDBOptions options) throws RocksDBException { @@ -45,6 +46,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * @param db The database to backup * * Note - This method is not thread safe + * + * @throws RocksDBException thrown if a new backup could not be created */ public void createNewBackup(final RocksDB db) throws RocksDBException { createNewBackup(db, false); @@ -68,6 +71,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * parameter. * * Note - This method is not thread safe + * + * @throws RocksDBException thrown if a new backup could not be created */ public void createNewBackup( final RocksDB db, final boolean flushBeforeBackup) @@ -116,6 +121,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * Deletes old backups, keeping just the latest numBackupsToKeep * * @param numBackupsToKeep The latest n backups to keep + * + * @throws RocksDBException thrown if the old backups could not be deleted */ public void purgeOldBackups( final int numBackupsToKeep) throws RocksDBException { @@ -127,6 +134,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * Deletes a backup * * @param backupId The id of the backup to delete + * + * @throws RocksDBException thrown if the backup could not be deleted */ public void deleteBackup(final int backupId) throws RocksDBException { assert (isOwningHandle()); @@ -152,6 +161,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * @param walDir The location of the log files for your database, * often the same as dbDir * @param restoreOptions Options for controlling the restore + * + * @throws RocksDBException thrown if the database could not be restored */ public void restoreDbFromBackup( final int backupId, final String dbDir, final String walDir, @@ -167,6 +178,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { * @param dbDir The directory to restore the backup to, i.e. where your database is * @param walDir The location of the log files for your database, often the same as dbDir * @param restoreOptions Options for controlling the restore + * + * @throws RocksDBException thrown if the database could not be restored */ public void restoreDbFromLatestBackup( final String dbDir, final String walDir, @@ -178,6 +191,8 @@ public class BackupEngine extends RocksObject implements AutoCloseable { /** * Close the Backup Engine + * + * @throws RocksDBException thrown if the backup engine could not be closed */ @Override public void close() throws RocksDBException { From 76e8beeeb9b3172a1e02853e4c5c7485894b7e04 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Mon, 1 Feb 2016 20:00:40 +0000 Subject: [PATCH 3/9] Pass by pointer from/to Java from JNI not by object --- java/rocksjni/portal.h | 6 - java/rocksjni/rocksjni.cc | 315 ++++++++---------- java/rocksjni/ttl.cc | 23 +- java/rocksjni/write_batch.cc | 75 +++-- java/rocksjni/write_batch_test.cc | 26 +- java/rocksjni/write_batch_with_index.cc | 113 +++---- .../java/org/rocksdb/AbstractWriteBatch.java | 43 ++- java/src/main/java/org/rocksdb/RocksDB.java | 121 +++---- java/src/main/java/org/rocksdb/TtlDB.java | 7 +- .../src/main/java/org/rocksdb/WriteBatch.java | 35 +- .../java/org/rocksdb/WriteBatchWithIndex.java | 44 ++- .../java/org/rocksdb/ColumnFamilyTest.java | 4 +- .../test/java/org/rocksdb/RocksDBTest.java | 14 +- .../test/java/org/rocksdb/WriteBatchTest.java | 24 +- 14 files changed, 422 insertions(+), 428 deletions(-) diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 9f93be4d1..ef7df8837 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -58,12 +58,6 @@ template class RocksDBNativeClass { assert(fid != nullptr); return fid; } - - // Get the pointer from Java - static PTR getHandle(JNIEnv* env, jobject jobj) { - return reinterpret_cast( - env->GetLongField(jobj, getHandleFieldID(env))); - } }; // Native class template for sub-classes of RocksMutableObject diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index eb704ad26..5004a27fc 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -323,12 +324,12 @@ void Java_org_rocksdb_RocksDB_put__JJ_3BI_3BIJ( /* * Class: org_rocksdb_RocksDB * Method: write0 - * Signature: (JJ)V + * Signature: (JJJ)V */ void Java_org_rocksdb_RocksDB_write0( - JNIEnv* env, jobject jdb, + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jwrite_options_handle, jlong jwb_handle) { - rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb); + auto* db = reinterpret_cast(jdb_handle); auto* write_options = reinterpret_cast( jwrite_options_handle); auto* wb = reinterpret_cast(jwb_handle); @@ -343,12 +344,12 @@ void Java_org_rocksdb_RocksDB_write0( /* * Class: org_rocksdb_RocksDB * Method: write1 - * Signature: (JJ)V + * Signature: (JJJ)V */ void Java_org_rocksdb_RocksDB_write1( - JNIEnv* env, jobject jdb, + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jwrite_options_handle, jlong jwbwi_handle) { - rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb); + auto* db = reinterpret_cast(jdb_handle); auto* write_options = reinterpret_cast( jwrite_options_handle); auto* wbwi = reinterpret_cast(jwbwi_handle); @@ -392,52 +393,16 @@ jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db, return static_cast(keyMayExist); } -/* - * Class: org_rocksdb_RocksDB - * Method: keyMayExist - * Signature: ([BILjava/lang/StringBuffer;)Z - */ -jboolean Java_org_rocksdb_RocksDB_keyMayExist___3BILjava_lang_StringBuffer_2( - JNIEnv* env, jobject jdb, jbyteArray jkey, jint jkey_len, - jobject jstring_buffer) { - rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb); - return key_may_exist_helper(env, db, rocksdb::ReadOptions(), - nullptr, jkey, jkey_len, jstring_buffer); -} - -/* - * Class: org_rocksdb_RocksDB - * Method: keyMayExist - * Signature: ([BIJLjava/lang/StringBuffer;)Z - */ -jboolean Java_org_rocksdb_RocksDB_keyMayExist___3BIJLjava_lang_StringBuffer_2( - JNIEnv* env, jobject jdb, jbyteArray jkey, jint jkey_len, - jlong jcf_handle, jobject jstring_buffer) { - rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb); - auto cf_handle = reinterpret_cast( - jcf_handle); - if (cf_handle != nullptr) { - return key_may_exist_helper(env, db, rocksdb::ReadOptions(), - cf_handle, jkey, jkey_len, jstring_buffer); - } else { - rocksdb::RocksDBExceptionJni::ThrowNew(env, - rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - } - return true; -} - /* * Class: org_rocksdb_RocksDB * Method: keyMayExist * Signature: (J[BILjava/lang/StringBuffer;)Z */ jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BILjava_lang_StringBuffer_2( - JNIEnv* env, jobject jdb, jlong jread_options_handle, - jbyteArray jkey, jint jkey_len, jobject jstring_buffer) { - rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb); - auto& read_options = *reinterpret_cast( - jread_options_handle); - return key_may_exist_helper(env, db, read_options, + JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_len, + jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); + return key_may_exist_helper(env, db, rocksdb::ReadOptions(), nullptr, jkey, jkey_len, jstring_buffer); } @@ -447,12 +412,48 @@ jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BILjava_lang_StringBuffer_2( * Signature: (J[BIJLjava/lang/StringBuffer;)Z */ jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIJLjava_lang_StringBuffer_2( - JNIEnv* env, jobject jdb, jlong jread_options_handle, - jbyteArray jkey, jint jkey_len, jlong jcf_handle, jobject jstring_buffer) { - rocksdb::DB* db = rocksdb::RocksDBJni::getHandle(env, jdb); + JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_len, + jlong jcf_handle, jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); + auto* cf_handle = reinterpret_cast( + jcf_handle); + if (cf_handle != nullptr) { + return key_may_exist_helper(env, db, rocksdb::ReadOptions(), + cf_handle, jkey, jkey_len, jstring_buffer); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + return true; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExist + * Signature: (JJ[BILjava/lang/StringBuffer;)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BILjava_lang_StringBuffer_2( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_len, jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); auto& read_options = *reinterpret_cast( jread_options_handle); - auto cf_handle = reinterpret_cast( + return key_may_exist_helper(env, db, read_options, + nullptr, jkey, jkey_len, jstring_buffer); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExist + * Signature: (JJ[BIJLjava/lang/StringBuffer;)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIJLjava_lang_StringBuffer_2( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle, jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); + auto& read_options = *reinterpret_cast( + jread_options_handle); + auto* cf_handle = reinterpret_cast( jcf_handle); if (cf_handle != nullptr) { return key_may_exist_helper(env, db, read_options, cf_handle, @@ -460,8 +461,8 @@ jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIJLjava_lang_StringBuffer_2( } else { rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); - } - return true; + return true; + } } ////////////////////////////////////////////////////////////////////////////// @@ -628,49 +629,38 @@ jint rocksdb_get_helper( } // cf multi get -jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db, - const rocksdb::ReadOptions& rOpt, jobject jkey_list, jint jkeys_count, - jobject jcfhandle_list) { - std::vector keys; - std::vector keys_to_free; +jobjectArray multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db, + const rocksdb::ReadOptions& rOpt, jobjectArray jkeys, + jlongArray jcolumn_family_handles) { std::vector cf_handles; - - if (jcfhandle_list != nullptr) { - // get cf iterator - jobject cfIteratorObj = env->CallObjectMethod( - jcfhandle_list, rocksdb::ListJni::getIteratorMethod(env)); - - // iterate over keys and convert java byte array to slice - while (env->CallBooleanMethod( - cfIteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - jobject jobj = (jbyteArray) env->CallObjectMethod( - cfIteratorObj, rocksdb::ListJni::getNextMethod(env)); - rocksdb::ColumnFamilyHandle* cfHandle = - rocksdb::ColumnFamilyHandleJni::getHandle(env, jobj); - cf_handles.push_back(cfHandle); + if (jcolumn_family_handles != nullptr) { + jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, NULL); + for (int i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); } - // Process key list - // get iterator - jobject iteratorObj = env->CallObjectMethod( - jkey_list, rocksdb::ListJni::getIteratorMethod(env)); + std::vector keys; + std::vector> keys_to_free; + jsize len_keys = env->GetArrayLength(jkeys); + if(env->EnsureLocalCapacity(len_keys) != 0) { + // out of memory + return NULL; + } + for (int i = 0; i < len_keys; i++) { + jobject jk = env->GetObjectArrayElement(jkeys, i); + jbyteArray jk_ba = reinterpret_cast(jk); + jsize len_key = env->GetArrayLength(jk_ba); + jbyte* jk_val = env->GetByteArrayElements(jk_ba, NULL); - // iterate over keys and convert java byte array to slice - while (env->CallBooleanMethod( - iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - jbyteArray jkey = (jbyteArray) env->CallObjectMethod( - iteratorObj, rocksdb::ListJni::getNextMethod(env)); - jint key_length = env->GetArrayLength(jkey); - - jbyte* key = new jbyte[key_length]; - env->GetByteArrayRegion(jkey, 0, key_length, key); - // store allocated jbyte to free it after multiGet call - keys_to_free.push_back(key); - - rocksdb::Slice key_slice( - reinterpret_cast(key), key_length); + rocksdb::Slice key_slice(reinterpret_cast(jk_val), len_key); keys.push_back(key_slice); + + keys_to_free.push_back(std::make_tuple(jk_ba, jk_val, jk)); } std::vector values; @@ -681,13 +671,23 @@ jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db, s = db->MultiGet(rOpt, cf_handles, keys, &values); } - // Don't reuse class pointer - jclass jclazz = env->FindClass("java/util/ArrayList"); - jmethodID mid = rocksdb::ListJni::getArrayListConstructorMethodId( - env, jclazz); - jobject jvalue_list = env->NewObject(jclazz, mid, jkeys_count); + // free up allocated byte arrays + for (std::vector>::size_type i = 0; + i < keys_to_free.size(); i++) { + jobject jk; + jbyteArray jk_ba; + jbyte* jk_val; + std::tie(jk_ba, jk_val, jk) = keys_to_free[i]; + env->ReleaseByteArrayElements(jk_ba, jk_val, JNI_ABORT); + env->DeleteLocalRef(jk); + } - // insert in java list + // prepare the results + jclass jcls_ba = env->FindClass("[B"); + jobjectArray jresults = + env->NewObjectArray(static_cast(s.size()), jcls_ba, NULL); + + // add to the jresults for (std::vector::size_type i = 0; i != s.size(); i++) { if (s[i].ok()) { jbyteArray jentry_value = @@ -695,73 +695,60 @@ jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db, env->SetByteArrayRegion( jentry_value, 0, static_cast(values[i].size()), reinterpret_cast(values[i].c_str())); - env->CallBooleanMethod( - jvalue_list, rocksdb::ListJni::getListAddMethodId(env), - jentry_value); - } else { - env->CallBooleanMethod( - jvalue_list, rocksdb::ListJni::getListAddMethodId(env), nullptr); + env->SetObjectArrayElement(jresults, static_cast(i), jentry_value); + env->DeleteLocalRef(jentry_value); } } - // free up allocated byte arrays - for (std::vector::size_type i = 0; i != keys_to_free.size(); i++) { - delete[] keys_to_free[i]; - } - keys_to_free.clear(); - return jvalue_list; + + return jresults; } /* * Class: org_rocksdb_RocksDB * Method: multiGet - * Signature: (JLjava/util/List;I)Ljava/util/List; + * Signature: (J[[B)[[B */ -jobject Java_org_rocksdb_RocksDB_multiGet__JLjava_util_List_2I( - JNIEnv* env, jobject jdb, jlong jdb_handle, - jobject jkey_list, jint jkeys_count) { +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B( + JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys) { return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), - rocksdb::ReadOptions(), jkey_list, jkeys_count, nullptr); + rocksdb::ReadOptions(), jkeys, nullptr); } /* * Class: org_rocksdb_RocksDB * Method: multiGet - * Signature: (JLjava/util/List;ILjava/util/List;)Ljava/util/List; + * Signature: (J[[B[J)[[B */ -jobject - Java_org_rocksdb_RocksDB_multiGet__JLjava_util_List_2ILjava_util_List_2( - JNIEnv* env, jobject jdb, jlong jdb_handle, - jobject jkey_list, jint jkeys_count, jobject jcfhandle_list) { +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys, + jlongArray jcolumn_family_handles) { return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), - rocksdb::ReadOptions(), jkey_list, jkeys_count, jcfhandle_list); + rocksdb::ReadOptions(), jkeys, jcolumn_family_handles); } /* * Class: org_rocksdb_RocksDB * Method: multiGet - * Signature: (JJLjava/util/List;I)Ljava/util/List; + * Signature: (JJ[[B)[[B */ -jobject Java_org_rocksdb_RocksDB_multiGet__JJLjava_util_List_2I( - JNIEnv* env, jobject jdb, jlong jdb_handle, - jlong jropt_handle, jobject jkey_list, jint jkeys_count) { +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys) { return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), - *reinterpret_cast(jropt_handle), jkey_list, - jkeys_count, nullptr); + *reinterpret_cast(jropt_handle), jkeys, nullptr); } /* * Class: org_rocksdb_RocksDB * Method: multiGet - * Signature: (JJLjava/util/List;ILjava/util/List;)Ljava/util/List; + * Signature: (JJ[[B[J)[[B */ -jobject - Java_org_rocksdb_RocksDB_multiGet__JJLjava_util_List_2ILjava_util_List_2( - JNIEnv* env, jobject jdb, jlong jdb_handle, - jlong jropt_handle, jobject jkey_list, jint jkeys_count, - jobject jcfhandle_list) { +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys, jlongArray jcolumn_family_handles) { return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), - *reinterpret_cast(jropt_handle), jkey_list, - jkeys_count, jcfhandle_list); + *reinterpret_cast(jropt_handle), jkeys, + jcolumn_family_handles); } /* @@ -1129,47 +1116,42 @@ jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ( /* * Class: org_rocksdb_RocksDB * Method: iterators - * Signature: (JLjava/util/List;J)[J + * Signature: (J[JJ)[J */ jlongArray Java_org_rocksdb_RocksDB_iterators( - JNIEnv* env, jobject jdb, jlong db_handle, jobject jcfhandle_list, - jlong jread_options_handle) { - auto db = reinterpret_cast(db_handle); + JNIEnv* env, jobject jdb, jlong db_handle, + jlongArray jcolumn_family_handles, jlong jread_options_handle) { + auto* db = reinterpret_cast(db_handle); auto& read_options = *reinterpret_cast( jread_options_handle); std::vector cf_handles; - std::vector iterators; - - if (jcfhandle_list != nullptr) { - // get cf iterator - jobject cfIteratorObj = env->CallObjectMethod( - jcfhandle_list, rocksdb::ListJni::getIteratorMethod(env)); - - // iterate over keys and convert java byte array to slice - while (env->CallBooleanMethod( - cfIteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - jobject jobj = (jbyteArray) env->CallObjectMethod( - cfIteratorObj, rocksdb::ListJni::getNextMethod(env)); - rocksdb::ColumnFamilyHandle* cfHandle = - rocksdb::ColumnFamilyHandleJni::getHandle(env, jobj); - cf_handles.push_back(cfHandle); + if (jcolumn_family_handles != nullptr) { + jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, NULL); + for (int i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); } + std::vector iterators; rocksdb::Status s = db->NewIterators(read_options, cf_handles, &iterators); if (s.ok()) { jlongArray jLongArray = env->NewLongArray(static_cast(iterators.size())); - for (std::vector::size_type i = 0; i < iterators.size(); - i++) { + for (std::vector::size_type i = 0; + i < iterators.size(); i++) { env->SetLongArrayRegion(jLongArray, static_cast(i), 1, reinterpret_cast(&iterators[i])); } return jLongArray; + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return NULL; } - rocksdb::RocksDBExceptionJni::ThrowNew(env, s); - return env->NewLongArray(0); } /* @@ -1187,32 +1169,23 @@ jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily( /* * Class: org_rocksdb_RocksDB * Method: createColumnFamily - * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;)J; + * Signature: (J[BJ)J */ jlong Java_org_rocksdb_RocksDB_createColumnFamily( JNIEnv* env, jobject jdb, jlong jdb_handle, - jobject jcf_descriptor) { + jbyteArray jcolumn_name, jlong jcolumn_options) { rocksdb::ColumnFamilyHandle* handle; auto db_handle = reinterpret_cast(jdb_handle); - // get ColumnFamilyName - jbyteArray byteArray = static_cast(env->CallObjectMethod( - jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod( - env))); - // get CF Options - jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod( - env)); - rocksdb::ColumnFamilyOptions* cfOptions = - rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj); + jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0); + const int len = env->GetArrayLength(jcolumn_name); - jbyte* cfname = env->GetByteArrayElements(byteArray, 0); - const int len = env->GetArrayLength(byteArray); + auto* cfOptions = + reinterpret_cast(jcolumn_options); rocksdb::Status s = db_handle->CreateColumnFamily( *cfOptions, std::string(reinterpret_cast(cfname), len), &handle); - env->ReleaseByteArrayElements(byteArray, cfname, 0); + env->ReleaseByteArrayElements(jcolumn_name, cfname, 0); if (s.ok()) { return reinterpret_cast(handle); diff --git a/java/rocksjni/ttl.cc b/java/rocksjni/ttl.cc index 91d3dca9a..535bc10a0 100644 --- a/java/rocksjni/ttl.cc +++ b/java/rocksjni/ttl.cc @@ -113,33 +113,24 @@ jlongArray /* * Class: org_rocksdb_TtlDB * Method: createColumnFamilyWithTtl - * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;I)J; + * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J; */ jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl( JNIEnv* env, jobject jobj, jlong jdb_handle, - jobject jcf_descriptor, jint jttl) { + jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) { rocksdb::ColumnFamilyHandle* handle; auto* db_handle = reinterpret_cast(jdb_handle); - // get ColumnFamilyName - jbyteArray byteArray = static_cast(env->CallObjectMethod( - jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyNameMethod( - env))); - // get CF Options - jobject jcf_opt_obj = env->CallObjectMethod(jcf_descriptor, - rocksdb::ColumnFamilyDescriptorJni::getColumnFamilyOptionsMethod( - env)); - rocksdb::ColumnFamilyOptions* cfOptions = - rocksdb::ColumnFamilyOptionsJni::getHandle(env, jcf_opt_obj); + jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0); + const int len = env->GetArrayLength(jcolumn_name); - jbyte* cfname = env->GetByteArrayElements(byteArray, 0); - const int len = env->GetArrayLength(byteArray); + auto* cfOptions = + reinterpret_cast(jcolumn_options); rocksdb::Status s = db_handle->CreateColumnFamilyWithTtl( *cfOptions, std::string(reinterpret_cast(cfname), len), &handle, jttl); - env->ReleaseByteArrayElements(byteArray, cfname, 0); + env->ReleaseByteArrayElements(jcolumn_name, cfname, 0); if (s.ok()) { return reinterpret_cast(handle); diff --git a/java/rocksjni/write_batch.cc b/java/rocksjni/write_batch.cc index 41690967c..4a73c4a79 100644 --- a/java/rocksjni/write_batch.cc +++ b/java/rocksjni/write_batch.cc @@ -39,10 +39,11 @@ jlong Java_org_rocksdb_WriteBatch_newWriteBatch( /* * Class: org_rocksdb_WriteBatch * Method: count0 - * Signature: ()I + * Signature: (J)I */ -jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj) { - rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); +jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); return static_cast(wb->Count()); @@ -51,10 +52,11 @@ jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj) { /* * Class: org_rocksdb_WriteBatch * Method: clear0 - * Signature: ()V + * Signature: (J)V */ -void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj) { - rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); +void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); wb->Clear(); @@ -63,13 +65,13 @@ void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj) { /* * Class: org_rocksdb_WriteBatch * Method: put - * Signature: ([BI[BI)V + * Signature: (J[BI[BI)V */ -void Java_org_rocksdb_WriteBatch_put___3BI_3BI( - JNIEnv* env, jobject jobj, +void Java_org_rocksdb_WriteBatch_put__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { - auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); auto put = [&wb] (rocksdb::Slice key, rocksdb::Slice value) { wb->Put(key, value); @@ -81,13 +83,13 @@ void Java_org_rocksdb_WriteBatch_put___3BI_3BI( /* * Class: org_rocksdb_WriteBatch * Method: put - * Signature: ([BI[BIJ)V + * Signature: (J[BI[BIJ)V */ -void Java_org_rocksdb_WriteBatch_put___3BI_3BIJ( - JNIEnv* env, jobject jobj, +void Java_org_rocksdb_WriteBatch_put__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { - auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); auto* cf_handle = reinterpret_cast(jcf_handle); assert(cf_handle != nullptr); @@ -101,13 +103,13 @@ void Java_org_rocksdb_WriteBatch_put___3BI_3BIJ( /* * Class: org_rocksdb_WriteBatch * Method: merge - * Signature: ([BI[BI)V + * Signature: (J[BI[BI)V */ -void Java_org_rocksdb_WriteBatch_merge___3BI_3BI( - JNIEnv* env, jobject jobj, +void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { - auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); auto merge = [&wb] (rocksdb::Slice key, rocksdb::Slice value) { wb->Merge(key, value); @@ -119,13 +121,13 @@ void Java_org_rocksdb_WriteBatch_merge___3BI_3BI( /* * Class: org_rocksdb_WriteBatch * Method: merge - * Signature: ([BI[BIJ)V + * Signature: (J[BI[BIJ)V */ -void Java_org_rocksdb_WriteBatch_merge___3BI_3BIJ( - JNIEnv* env, jobject jobj, +void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { - auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); auto* cf_handle = reinterpret_cast(jcf_handle); assert(cf_handle != nullptr); @@ -139,12 +141,12 @@ void Java_org_rocksdb_WriteBatch_merge___3BI_3BIJ( /* * Class: org_rocksdb_WriteBatch * Method: remove - * Signature: ([BI)V + * Signature: (J[BI)V */ -void Java_org_rocksdb_WriteBatch_remove___3BI( - JNIEnv* env, jobject jobj, +void Java_org_rocksdb_WriteBatch_remove__J_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len) { - auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); auto remove = [&wb] (rocksdb::Slice key) { wb->Delete(key); @@ -155,12 +157,12 @@ void Java_org_rocksdb_WriteBatch_remove___3BI( /* * Class: org_rocksdb_WriteBatch * Method: remove - * Signature: ([BIJ)V + * Signature: (J[BIJ)V */ -void Java_org_rocksdb_WriteBatch_remove___3BIJ( - JNIEnv* env, jobject jobj, +void Java_org_rocksdb_WriteBatch_remove__J_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, jlong jcf_handle) { - auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); auto* cf_handle = reinterpret_cast(jcf_handle); assert(cf_handle != nullptr); @@ -173,11 +175,12 @@ void Java_org_rocksdb_WriteBatch_remove___3BIJ( /* * Class: org_rocksdb_WriteBatch * Method: putLogData - * Signature: ([BI)V + * Signature: (J[BI)V */ void Java_org_rocksdb_WriteBatch_putLogData( - JNIEnv* env, jobject jobj, jbyteArray jblob, jint jblob_len) { - auto* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jblob, + jint jblob_len) { + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); auto putLogData = [&wb] (rocksdb::Slice blob) { wb->PutLogData(blob); @@ -188,11 +191,11 @@ void Java_org_rocksdb_WriteBatch_putLogData( /* * Class: org_rocksdb_WriteBatch * Method: iterate - * Signature: (J)V + * Signature: (JJ)V */ void Java_org_rocksdb_WriteBatch_iterate( - JNIEnv* env , jobject jobj, jlong handlerHandle) { - rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + JNIEnv* env , jobject jobj, jlong jwb_handle, jlong handlerHandle) { + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); rocksdb::Status s = wb->Iterate( diff --git a/java/rocksjni/write_batch_test.cc b/java/rocksjni/write_batch_test.cc index 9b4c7fd61..4e8705967 100644 --- a/java/rocksjni/write_batch_test.cc +++ b/java/rocksjni/write_batch_test.cc @@ -28,11 +28,11 @@ /* * Class: org_rocksdb_WriteBatchTest * Method: getContents - * Signature: (Lorg/rocksdb/WriteBatch;)[B + * Signature: (J)[B */ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents( - JNIEnv* env, jclass jclazz, jobject jobj) { - rocksdb::WriteBatch* b = rocksdb::WriteBatchJni::getHandle(env, jobj); + JNIEnv* env, jclass jclazz, jlong jwb_handle) { + auto* b = reinterpret_cast(jwb_handle); assert(b != nullptr); // todo: Currently the following code is directly copied from @@ -109,11 +109,11 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents( /* * Class: org_rocksdb_WriteBatchTestInternalHelper * Method: setSequence - * Signature: (Lorg/rocksdb/WriteBatch;J)V + * Signature: (JJ)V */ void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence( - JNIEnv* env, jclass jclazz, jobject jobj, jlong jsn) { - rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + JNIEnv* env, jclass jclazz, jlong jwb_handle, jlong jsn) { + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); rocksdb::WriteBatchInternal::SetSequence( @@ -123,11 +123,11 @@ void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence( /* * Class: org_rocksdb_WriteBatchTestInternalHelper * Method: sequence - * Signature: (Lorg/rocksdb/WriteBatch;)J + * Signature: (J)J */ jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence( - JNIEnv* env, jclass jclazz, jobject jobj) { - rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); + JNIEnv* env, jclass jclazz, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); assert(wb != nullptr); return static_cast(rocksdb::WriteBatchInternal::Sequence(wb)); @@ -136,13 +136,13 @@ jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence( /* * Class: org_rocksdb_WriteBatchTestInternalHelper * Method: append - * Signature: (Lorg/rocksdb/WriteBatch;Lorg/rocksdb/WriteBatch;)V + * Signature: (JJ)V */ void Java_org_rocksdb_WriteBatchTestInternalHelper_append( - JNIEnv* env, jclass jclazz, jobject jwb1, jobject jwb2) { - rocksdb::WriteBatch* wb1 = rocksdb::WriteBatchJni::getHandle(env, jwb1); + JNIEnv* env, jclass jclazz, jlong jwb_handle_1, jlong jwb_handle_2) { + auto* wb1 = reinterpret_cast(jwb_handle_1); assert(wb1 != nullptr); - rocksdb::WriteBatch* wb2 = rocksdb::WriteBatchJni::getHandle(env, jwb2); + auto* wb2 = reinterpret_cast(jwb_handle_2); assert(wb2 != nullptr); rocksdb::WriteBatchInternal::Append(wb1, wb2); diff --git a/java/rocksjni/write_batch_with_index.cc b/java/rocksjni/write_batch_with_index.cc index 1123517d9..ade91b63a 100644 --- a/java/rocksjni/write_batch_with_index.cc +++ b/java/rocksjni/write_batch_with_index.cc @@ -53,13 +53,12 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ( /* * Class: org_rocksdb_WriteBatchWithIndex - * Method: count - * Signature: ()I + * Method: count0 + * Signature: (J)I */ jint Java_org_rocksdb_WriteBatchWithIndex_count0( - JNIEnv* env, jobject jobj) { - rocksdb::WriteBatchWithIndex* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); return static_cast(wbwi->GetWriteBatch()->Count()); @@ -68,13 +67,12 @@ jint Java_org_rocksdb_WriteBatchWithIndex_count0( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: put - * Signature: ([BI[BI)V + * Signature: (J[BI[BI)V */ -void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BI( - JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, - jbyteArray jentry_value, jint jentry_value_len) { - auto* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); +void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); auto put = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) { wbwi->Put(key, value); @@ -86,13 +84,13 @@ void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BI( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: put - * Signature: ([BI[BIJ)V + * Signature: (J[BI[BIJ)V */ -void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BIJ( - JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, - jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { - auto* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); +void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, + jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); auto* cf_handle = reinterpret_cast(jcf_handle); assert(cf_handle != nullptr); @@ -106,13 +104,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BIJ( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: merge - * Signature: ([BI[BI)V + * Signature: (J[BI[BI)V */ -void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BI( - JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, - jbyteArray jentry_value, jint jentry_value_len) { - auto* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); +void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); auto merge = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) { wbwi->Merge(key, value); @@ -124,13 +121,13 @@ void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BI( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: merge - * Signature: ([BI[BIJ)V + * Signature: (J[BI[BIJ)V */ -void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BIJ( - JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, - jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { - auto* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); +void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, + jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); auto* cf_handle = reinterpret_cast(jcf_handle); assert(cf_handle != nullptr); @@ -144,12 +141,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BIJ( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: remove - * Signature: ([BI)V + * Signature: (J[BI)V */ -void Java_org_rocksdb_WriteBatchWithIndex_remove___3BI( - JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len) { - auto* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); +void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); auto remove = [&wbwi] (rocksdb::Slice key) { wbwi->Delete(key); @@ -160,13 +157,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_remove___3BI( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: remove - * Signature: ([BIJ)V + * Signature: (J[BIJ)V */ -void Java_org_rocksdb_WriteBatchWithIndex_remove___3BIJ( - JNIEnv* env, jobject jobj, - jbyteArray jkey, jint jkey_len, jlong jcf_handle) { - auto* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); +void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); auto* cf_handle = reinterpret_cast(jcf_handle); assert(cf_handle != nullptr); @@ -179,12 +175,12 @@ void Java_org_rocksdb_WriteBatchWithIndex_remove___3BIJ( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: putLogData - * Signature: ([BI)V + * Signature: (J[BI)V */ void Java_org_rocksdb_WriteBatchWithIndex_putLogData( - JNIEnv* env, jobject jobj, jbyteArray jblob, jint jblob_len) { - auto* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jblob, + jint jblob_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); auto putLogData = [&wbwi] (rocksdb::Slice blob) { wbwi->PutLogData(blob); @@ -195,12 +191,11 @@ void Java_org_rocksdb_WriteBatchWithIndex_putLogData( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: clear - * Signature: ()V + * Signature: (J)V */ void Java_org_rocksdb_WriteBatchWithIndex_clear0( - JNIEnv* env, jobject jobj) { - rocksdb::WriteBatchWithIndex* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); assert(wbwi != nullptr); wbwi->GetWriteBatch()->Clear(); @@ -209,12 +204,11 @@ void Java_org_rocksdb_WriteBatchWithIndex_clear0( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: iterator0 - * Signature: ()J + * Signature: (J)J */ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0( - JNIEnv* env, jobject jobj) { - rocksdb::WriteBatchWithIndex* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(); return reinterpret_cast(wbwi_iterator); } @@ -222,12 +216,11 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: iterator1 - * Signature: (J)J + * Signature: (JJ)J */ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1( - JNIEnv* env, jobject jobj, jlong jcf_handle) { - rocksdb::WriteBatchWithIndex* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); auto* cf_handle = reinterpret_cast(jcf_handle); rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(cf_handle); return reinterpret_cast(wbwi_iterator); @@ -236,12 +229,12 @@ jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1( /* * Class: org_rocksdb_WriteBatchWithIndex * Method: iteratorWithBase - * Signature: (JJ)J + * Signature: (JJJ)J */ jlong Java_org_rocksdb_WriteBatchWithIndex_iteratorWithBase( - JNIEnv* env, jobject jobj, jlong jcf_handle, jlong jbi_handle) { - rocksdb::WriteBatchWithIndex* wbwi = - rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle, + jlong jbi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); auto* cf_handle = reinterpret_cast(jcf_handle); auto* base_iterator = reinterpret_cast(jbi_handle); auto* iterator = wbwi->NewIteratorWithBase(cf_handle, base_iterator); diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index b40e9461e..73d9876fb 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -14,72 +14,79 @@ public abstract class AbstractWriteBatch extends RocksObject implements WriteBat @Override public int count() { assert (isOwningHandle()); - return count0(); + return count0(nativeHandle_); } @Override public void put(byte[] key, byte[] value) { assert (isOwningHandle()); - put(key, key.length, value, value.length); + put(nativeHandle_, key, key.length, value, value.length); } @Override public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { assert (isOwningHandle()); - put(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); + put(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); } @Override public void merge(byte[] key, byte[] value) { assert (isOwningHandle()); - merge(key, key.length, value, value.length); + merge(nativeHandle_, key, key.length, value, value.length); } @Override public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { assert (isOwningHandle()); - merge(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); + merge(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); } @Override public void remove(byte[] key) { assert (isOwningHandle()); - remove(key, key.length); + remove(nativeHandle_, key, key.length); } @Override public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) { assert (isOwningHandle()); - remove(key, key.length, columnFamilyHandle.nativeHandle_); + remove(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); } @Override public void putLogData(byte[] blob) { assert (isOwningHandle()); - putLogData(blob, blob.length); + putLogData(nativeHandle_, blob, blob.length); } @Override public void clear() { assert (isOwningHandle()); - clear0(); + clear0(nativeHandle_); } - abstract int count0(); + abstract int count0(final long handle); - abstract void put(byte[] key, int keyLen, byte[] value, int valueLen); + abstract void put(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen); - abstract void put(byte[] key, int keyLen, byte[] value, int valueLen, long cfHandle); + abstract void put(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen, final long cfHandle); - abstract void merge(byte[] key, int keyLen, byte[] value, int valueLen); + abstract void merge(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen); - abstract void merge(byte[] key, int keyLen, byte[] value, int valueLen, long cfHandle); + abstract void merge(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen, final long cfHandle); - abstract void remove(byte[] key, int keyLen); + abstract void remove(final long handle, final byte[] key, + final int keyLen); - abstract void remove(byte[] key, int keyLen, long cfHandle); + abstract void remove(final long handle, final byte[] key, + final int keyLen, final long cfHandle); - abstract void putLogData(byte[] blob, int blobLen); + abstract void putLogData(final long handle, final byte[] blob, + final int blobLen); - abstract void clear0(); + abstract void clear0(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 87b7de026..fc98ac2b0 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -477,7 +477,7 @@ public class RocksDB extends RocksObject { * @return boolean value indicating if key does not exist or might exist. */ public boolean keyMayExist(final byte[] key, final StringBuffer value){ - return keyMayExist(key, key.length, value); + return keyMayExist(nativeHandle_, key, key.length, value); } /** @@ -495,7 +495,7 @@ public class RocksDB extends RocksObject { */ public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final StringBuffer value){ - return keyMayExist(key, key.length, columnFamilyHandle.nativeHandle_, + return keyMayExist(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_, value); } @@ -514,7 +514,7 @@ public class RocksDB extends RocksObject { */ public boolean keyMayExist(final ReadOptions readOptions, final byte[] key, final StringBuffer value){ - return keyMayExist(readOptions.nativeHandle_, + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, key, key.length, value); } @@ -535,7 +535,7 @@ public class RocksDB extends RocksObject { public boolean keyMayExist(final ReadOptions readOptions, final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final StringBuffer value){ - return keyMayExist(readOptions.nativeHandle_, + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_, value); } @@ -551,7 +551,7 @@ public class RocksDB extends RocksObject { */ public void write(final WriteOptions writeOpts, final WriteBatch updates) throws RocksDBException { - write0(writeOpts.nativeHandle_, updates.nativeHandle_); + write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); } /** @@ -565,7 +565,7 @@ public class RocksDB extends RocksObject { */ public void write(final WriteOptions writeOpts, final WriteBatchWithIndex updates) throws RocksDBException { - write1(writeOpts.nativeHandle_, updates.nativeHandle_); + write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); } /** @@ -811,16 +811,15 @@ public class RocksDB extends RocksObject { throws RocksDBException { assert(keys.size() != 0); - List values = multiGet( - nativeHandle_, keys, keys.size()); + final byte[][] values = multiGet(nativeHandle_, keys.toArray(new byte[keys.size()][])); Map keyValueMap = new HashMap<>(); - for(int i = 0; i < values.size(); i++) { - if(values.get(i) == null) { + for(int i = 0; i < values.length; i++) { + if(values[i] == null) { continue; } - keyValueMap.put(keys.get(i), values.get(i)); + keyValueMap.put(keys.get(i), values[i]); } return keyValueMap; @@ -849,19 +848,23 @@ public class RocksDB extends RocksObject { assert(keys.size() != 0); // Check if key size equals cfList size. If not a exception must be // thrown. If not a Segmentation fault happens. - if (keys.size()!=columnFamilyHandleList.size()) { + if (keys.size() != columnFamilyHandleList.size()) { throw new IllegalArgumentException( "For each key there must be a ColumnFamilyHandle."); } - List values = multiGet(nativeHandle_, keys, keys.size(), - columnFamilyHandleList); + final long[] cfHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + final byte[][] values = multiGet(nativeHandle_, keys.toArray(new byte[keys.size()][]), + cfHandles); Map keyValueMap = new HashMap<>(); - for(int i = 0; i < values.size(); i++) { - if (values.get(i) == null) { + for(int i = 0; i < values.length; i++) { + if (values[i] == null) { continue; } - keyValueMap.put(keys.get(i), values.get(i)); + keyValueMap.put(keys.get(i), values[i]); } return keyValueMap; } @@ -881,16 +884,15 @@ public class RocksDB extends RocksObject { final List keys) throws RocksDBException { assert(keys.size() != 0); - List values = multiGet( - nativeHandle_, opt.nativeHandle_, keys, keys.size()); + final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, keys.toArray(new byte[keys.size()][])); Map keyValueMap = new HashMap<>(); - for(int i = 0; i < values.size(); i++) { - if(values.get(i) == null) { + for(int i = 0; i < values.length; i++) { + if(values[i] == null) { continue; } - keyValueMap.put(keys.get(i), values.get(i)); + keyValueMap.put(keys.get(i), values[i]); } return keyValueMap; @@ -925,16 +927,18 @@ public class RocksDB extends RocksObject { throw new IllegalArgumentException( "For each key there must be a ColumnFamilyHandle."); } - - List values = multiGet(nativeHandle_, opt.nativeHandle_, - keys, keys.size(), columnFamilyHandleList); + final long[] cfHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, keys.toArray(new byte[keys.size()][]), cfHandles); Map keyValueMap = new HashMap<>(); - for(int i = 0; i < values.size(); i++) { - if(values.get(i) == null) { + for(int i = 0; i < values.length; i++) { + if(values[i] == null) { continue; } - keyValueMap.put(keys.get(i), values.get(i)); + keyValueMap.put(keys.get(i), values[i]); } return keyValueMap; @@ -1261,11 +1265,16 @@ public class RocksDB extends RocksObject { public List newIterators( final List columnFamilyHandleList, final ReadOptions readOptions) throws RocksDBException { - List iterators = - new ArrayList<>(columnFamilyHandleList.size()); - long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandleList, + final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + + final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles, readOptions.nativeHandle_); + + final List iterators = new ArrayList<>(columnFamilyHandleList.size()); for (int i=0; i multiGet( - long dbHandle, List keys, int keysCount); - protected native List multiGet( - long dbHandle, List keys, int keysCount, List - cfHandles); - protected native List multiGet( - long dbHandle, long rOptHandle, List keys, int keysCount); - protected native List multiGet( - long dbHandle, long rOptHandle, List keys, int keysCount, - List cfHandles); + protected native byte[][] multiGet(final long dbHandle, final byte[][] keys); + protected native byte[][] multiGet(final long dbHandle, final byte[][] keys, final long[] columnFamilyHandles); + protected native byte[][] multiGet(final long dbHandle, final long rOptHandle, final byte[][] keys); + protected native byte[][] multiGet(final long dbHandle, final long rOptHandle, final byte[][] keys, final long[] columnFamilyHandles); protected native byte[] get( long handle, byte[] key, int keyLen) throws RocksDBException; protected native byte[] get( @@ -1822,16 +1827,16 @@ public class RocksDB extends RocksObject { protected native long iteratorCF(long handle, long cfHandle); protected native long iteratorCF(long handle, long cfHandle, long readOptHandle); - protected native long[] iterators(long handle, - List columnFamilyNames, long readOptHandle) + protected native long[] iterators(final long handle, + final long[] columnFamilyHandles, final long readOptHandle) throws RocksDBException; protected native long getSnapshot(long nativeHandle); protected native void releaseSnapshot( long nativeHandle, long snapshotHandle); @Override protected final native void disposeInternal(final long handle); private native long getDefaultColumnFamily(long handle); - private native long createColumnFamily(long handle, - ColumnFamilyDescriptor columnFamilyDescriptor) throws RocksDBException; + private native long createColumnFamily(final long handle, + final byte[] columnFamilyName, final long columnFamilyOptions) throws RocksDBException; private native void dropColumnFamily(long handle, long cfHandle) throws RocksDBException; private native void flush(long handle, long flushOptHandle) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java index d0d2b9e6d..8589478fe 100644 --- a/java/src/main/java/org/rocksdb/TtlDB.java +++ b/java/src/main/java/org/rocksdb/TtlDB.java @@ -159,7 +159,8 @@ public class TtlDB extends RocksDB { final int ttl) throws RocksDBException { return new ColumnFamilyHandle(this, createColumnFamilyWithTtl(nativeHandle_, - columnFamilyDescriptor, ttl)); + columnFamilyDescriptor.columnFamilyName(), + columnFamilyDescriptor.columnFamilyOptions().nativeHandle_, ttl)); } /** @@ -203,7 +204,7 @@ public class TtlDB extends RocksDB { final String db_path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions, final int[] ttlValues, final boolean readOnly) throws RocksDBException; - private native long createColumnFamilyWithTtl(long handle, - ColumnFamilyDescriptor columnFamilyDescriptor, int ttl) + private native long createColumnFamilyWithTtl(final long handle, + final byte[] columnFamilyName, final long columnFamilyOptions, int ttl) throws RocksDBException; } diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index 182ef87d3..760e5dea7 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -48,7 +48,7 @@ public class WriteBatch extends AbstractWriteBatch { * @throws RocksDBException If we cannot iterate over the batch */ public void iterate(final Handler handler) throws RocksDBException { - iterate(handler.nativeHandle_); + iterate(nativeHandle_, handler.nativeHandle_); } /** @@ -64,20 +64,27 @@ public class WriteBatch extends AbstractWriteBatch { } @Override protected final native void disposeInternal(final long handle); - @Override final native int count0(); - @Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen); - @Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen, - long cfHandle); - @Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen); - @Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen, - long cfHandle); - @Override final native void remove(byte[] key, int keyLen); - @Override final native void remove(byte[] key, int keyLen, long cfHandle); - @Override final native void putLogData(byte[] blob, int blobLen); - @Override final native void clear0(); + @Override final native int count0(final long handle); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen, final long cfHandle); + @Override final native void putLogData(final long handle, + final byte[] blob, final int blobLen); + @Override final native void clear0(final long handle); - private native static long newWriteBatch(int reserved_bytes); - private native void iterate(long handlerHandle) throws RocksDBException; + private native static long newWriteBatch(final int reserved_bytes); + private native void iterate(final long handle, final long handlerHandle) throws RocksDBException; /** diff --git a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java index c8cf7ff0b..469945f06 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java +++ b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -73,7 +73,8 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * @return An iterator for the Write Batch contents, restricted to the column family */ public WBWIRocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle) { - return new WBWIRocksIterator(this, iterator1(columnFamilyHandle.nativeHandle_)); + return new WBWIRocksIterator(this, iterator1(nativeHandle_, + columnFamilyHandle.nativeHandle_)); } /** @@ -87,7 +88,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * @return An iterator for the Write Batch contents */ public WBWIRocksIterator newIterator() { - return new WBWIRocksIterator(this, iterator0()); + return new WBWIRocksIterator(this, iterator0(nativeHandle_)); } /** @@ -104,7 +105,9 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { final RocksIterator baseIterator) { RocksIterator iterator = new RocksIterator( baseIterator.parent_, - iteratorWithBase(columnFamilyHandle.nativeHandle_, baseIterator.nativeHandle_)); + iteratorWithBase(nativeHandle_, + columnFamilyHandle.nativeHandle_, + baseIterator.nativeHandle_)); //when the iterator is deleted it will also delete the baseIterator baseIterator.disOwnNativeHandle(); return iterator; @@ -124,24 +127,31 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { } @Override protected final native void disposeInternal(final long handle); - @Override final native int count0(); - @Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen); - @Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen, - long cfHandle); - @Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen); - @Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen, - long cfHandle); - @Override final native void remove(byte[] key, int keyLen); - @Override final native void remove(byte[] key, int keyLen, long cfHandle); - @Override final native void putLogData(byte[] blob, int blobLen); - @Override final native void clear0(); + @Override final native int count0(final long handle); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen, final long cfHandle); + @Override final native void putLogData(final long handle, final byte[] blob, + final int blobLen); + @Override final native void clear0(final long handle); private native static long newWriteBatchWithIndex(); private native static long newWriteBatchWithIndex(final boolean overwriteKey); private native static long newWriteBatchWithIndex( final long fallbackIndexComparatorHandle, final int reservedBytes, final boolean overwriteKey); - private native long iterator0(); - private native long iterator1(long cfHandle); - private native long iteratorWithBase(long baseIteratorHandle, long cfHandle); + private native long iterator0(final long handle); + private native long iterator1(final long handle, final long cfHandle); + private native long iteratorWithBase(final long handle, final long baseIteratorHandle, final long cfHandle); } diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java index 5c62cca73..af0d71e70 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java @@ -383,9 +383,7 @@ public class ColumnFamilyTest { db.put(columnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); - List keys = new ArrayList<>(); - keys.add("key".getBytes()); - keys.add("newcfkey".getBytes()); + List keys = Arrays.asList(new byte[][]{"key".getBytes(), "newcfkey".getBytes()}); Map retValues = db.multiGet(columnFamilyHandleList, keys); assertThat(retValues.size()).isEqualTo(2); assertThat(new String(retValues.get(keys.get(0)))) diff --git a/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java index c8e59a5b3..cb1c1f77d 100644 --- a/java/src/test/java/org/rocksdb/RocksDBTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -9,10 +9,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Random; +import java.util.*; import static org.assertj.core.api.Assertions.assertThat; @@ -174,7 +171,7 @@ public class RocksDBTest { } @Test - public void multiGet() throws RocksDBException { + public void multiGet() throws RocksDBException, InterruptedException { RocksDB db = null; ReadOptions rOpt = null; try { @@ -182,10 +179,9 @@ public class RocksDBTest { rOpt = new ReadOptions(); db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); - List lookupKeys = new ArrayList() {{ - add("key1".getBytes()); - add("key2".getBytes()); - }}; + List lookupKeys = new ArrayList<>(); + lookupKeys.add("key1".getBytes()); + lookupKeys.add("key2".getBytes()); Map results = db.multiGet(lookupKeys); assertThat(results).isNotNull(); assertThat(results.values()).isNotNull(); diff --git a/java/src/test/java/org/rocksdb/WriteBatchTest.java b/java/src/test/java/org/rocksdb/WriteBatchTest.java index 0cdfb7b1d..b7fb50533 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchTest.java @@ -109,7 +109,11 @@ public class WriteBatchTest { .equals(new String(getContents(batch), "US-ASCII"))); } - static native byte[] getContents(WriteBatch batch); + static byte[] getContents(final WriteBatch wb) { + return getContents(wb.nativeHandle_); + } + + private static native byte[] getContents(final long writeBatchHandle); } /** @@ -117,7 +121,19 @@ public class WriteBatchTest { * c++ WriteBatchInternal. */ class WriteBatchTestInternalHelper { - static native void setSequence(WriteBatch batch, long sn); - static native long sequence(WriteBatch batch); - static native void append(WriteBatch b1, WriteBatch b2); + static void setSequence(final WriteBatch wb, final long sn) { + setSequence(wb.nativeHandle_, sn); + } + + static long sequence(final WriteBatch wb) { + return sequence(wb.nativeHandle_); + } + + static void append(final WriteBatch wb1, final WriteBatch wb2) { + append(wb1.nativeHandle_, wb2.nativeHandle_); + } + + private static native void setSequence(final long writeBatchHandle, final long sn); + private static native long sequence(final long writeBatchHandle); + private static native void append(final long writeBatchHandle1, final long writeBatchHandle2); } From e84137c8ae84a71ddd83fae981760046dbd7ae44 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Wed, 3 Feb 2016 17:47:38 +0000 Subject: [PATCH 4/9] Remove unnessecary java.util.List expense in JNI --- java/rocksjni/options.cc | 61 ++++++++----------- java/rocksjni/rocksjni.cc | 24 +++----- .../java/org/rocksdb/ColumnFamilyOptions.java | 14 ++--- java/src/main/java/org/rocksdb/Options.java | 14 ++--- java/src/main/java/org/rocksdb/RocksDB.java | 4 +- 5 files changed, 53 insertions(+), 64 deletions(-) diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index b8d7684ec..018d8bbe4 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -1081,21 +1081,19 @@ jbyte Java_org_rocksdb_Options_compressionType( * vector. */ std::vector rocksdb_compression_vector_helper( - JNIEnv* env, jobject jcompressionLevels) { + JNIEnv* env, jbyteArray jcompressionLevels) { std::vector compressionLevels; - // iterate over compressionLevels - jobject iteratorObj = env->CallObjectMethod( - jcompressionLevels, rocksdb::ListJni::getIteratorMethod(env)); - while (env->CallBooleanMethod( - iteratorObj, rocksdb::ListJni::getHasNextMethod(env)) == JNI_TRUE) { - // get compression - jobject jcompression_obj = env->CallObjectMethod(iteratorObj, - rocksdb::ListJni::getNextMethod(env)); - jbyte jcompression = env->CallByteMethod(jcompression_obj, - rocksdb::ByteJni::getByteValueMethod(env)); - compressionLevels.push_back(static_cast( - jcompression)); + + jsize len = env->GetArrayLength(jcompressionLevels); + jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels, NULL); + for(int i = 0; i < len; i++) { + jbyte jcl; + jcl = jcompressionLevel[i]; + compressionLevels.push_back(static_cast(jcl)); } + env->ReleaseByteArrayElements(jcompressionLevels, jcompressionLevel, + JNI_ABORT); + return compressionLevels; } @@ -1103,34 +1101,29 @@ std::vector rocksdb_compression_vector_helper( * Helper method to convert a CompressionType vector to a Java * List. */ -jobject rocksdb_compression_list_helper(JNIEnv* env, +jbyteArray rocksdb_compression_list_helper(JNIEnv* env, std::vector compressionLevels) { - jclass jListClazz = env->FindClass("java/util/ArrayList"); - jmethodID midList = rocksdb::ListJni::getArrayListConstructorMethodId( - env, jListClazz); - jobject jcompressionLevels = env->NewObject(jListClazz, - midList, compressionLevels.size()); - // insert in java list + jbyte jbuf[compressionLevels.size()]; for (std::vector::size_type i = 0; i != compressionLevels.size(); i++) { - jclass jByteClazz = env->FindClass("java/lang/Byte"); - jmethodID midByte = env->GetMethodID(jByteClazz, "", "(B)V"); - jobject obj = env->NewObject(jByteClazz, midByte, - compressionLevels[i]); - env->CallBooleanMethod(jcompressionLevels, - rocksdb::ListJni::getListAddMethodId(env), obj); + jbuf[i] = compressionLevels[i]; } + // insert in java array + jbyteArray jcompressionLevels = env->NewByteArray( + static_cast(compressionLevels.size())); + env->SetByteArrayRegion(jcompressionLevels, 0, + static_cast(compressionLevels.size()), jbuf); return jcompressionLevels; } /* * Class: org_rocksdb_Options * Method: setCompressionPerLevel - * Signature: (JLjava/util/List;)V + * Signature: (J[B)V */ void Java_org_rocksdb_Options_setCompressionPerLevel( JNIEnv* env, jobject jobj, jlong jhandle, - jobject jcompressionLevels) { + jbyteArray jcompressionLevels) { auto* options = reinterpret_cast(jhandle); std::vector compressionLevels = rocksdb_compression_vector_helper(env, jcompressionLevels); @@ -1140,9 +1133,9 @@ void Java_org_rocksdb_Options_setCompressionPerLevel( /* * Class: org_rocksdb_Options * Method: compressionPerLevel - * Signature: (J)Ljava/util/List; + * Signature: (J)[B */ -jobject Java_org_rocksdb_Options_compressionPerLevel( +jbyteArray Java_org_rocksdb_Options_compressionPerLevel( JNIEnv* env, jobject jobj, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return rocksdb_compression_list_helper(env, @@ -2285,11 +2278,11 @@ jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType( /* * Class: org_rocksdb_ColumnFamilyOptions * Method: setCompressionPerLevel - * Signature: (JLjava/util/List;)V + * Signature: (J[B)V */ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel( JNIEnv* env, jobject jobj, jlong jhandle, - jobject jcompressionLevels) { + jbyteArray jcompressionLevels) { auto* options = reinterpret_cast(jhandle); std::vector compressionLevels = rocksdb_compression_vector_helper(env, jcompressionLevels); @@ -2299,9 +2292,9 @@ void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel( /* * Class: org_rocksdb_ColumnFamilyOptions * Method: compressionPerLevel - * Signature: (J)Ljava/util/List; + * Signature: (J)[B */ -jobject Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( +jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( JNIEnv* env, jobject jobj, jlong jhandle) { auto* options = reinterpret_cast(jhandle); return rocksdb_compression_list_helper(env, diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index 5004a27fc..7ef416618 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -171,25 +171,21 @@ jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( /* * Class: org_rocksdb_RocksDB * Method: listColumnFamilies - * Signature: (JLjava/lang/String;)Ljava/util/List; + * Signature: (JLjava/lang/String;)[[B */ -jobject Java_org_rocksdb_RocksDB_listColumnFamilies( +jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies( JNIEnv* env, jclass jclazz, jlong jopt_handle, jstring jdb_path) { std::vector column_family_names; - auto opt = reinterpret_cast(jopt_handle); + auto* opt = reinterpret_cast(jopt_handle); const char* db_path = env->GetStringUTFChars(jdb_path, 0); - jobject jvalue_list = nullptr; - rocksdb::Status s = rocksdb::DB::ListColumnFamilies(*opt, db_path, &column_family_names); env->ReleaseStringUTFChars(jdb_path, db_path); - if (s.ok()) { - // Don't reuse class pointer - jclass jListClazz = env->FindClass("java/util/ArrayList"); - jmethodID mid = rocksdb::ListJni::getArrayListConstructorMethodId(env, - jListClazz); - jvalue_list = env->NewObject(jListClazz, mid, column_family_names.size()); + jclass jcls_ba = env->FindClass("[B"); + jobjectArray jresults = env->NewObjectArray( + static_cast(column_family_names.size()), jcls_ba, NULL); + if (s.ok()) { for (std::vector::size_type i = 0; i < column_family_names.size(); i++) { jbyteArray jcf_value = @@ -197,11 +193,11 @@ jobject Java_org_rocksdb_RocksDB_listColumnFamilies( env->SetByteArrayRegion( jcf_value, 0, static_cast(column_family_names[i].size()), reinterpret_cast(column_family_names[i].data())); - env->CallBooleanMethod(jvalue_list, - rocksdb::ListJni::getListAddMethodId(env), jcf_value); + env->SetObjectArrayElement(jresults, static_cast(i), jcf_value); + env->DeleteLocalRef(jcf_value); } } - return jvalue_list; + return jresults; } ////////////////////////////////////////////////////////////////////////////// diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index 91457defa..500935cf4 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -218,10 +218,10 @@ public class ColumnFamilyOptions extends RocksObject @Override public ColumnFamilyOptions setCompressionPerLevel( final List compressionLevels) { - final List byteCompressionTypes = new ArrayList<>( - compressionLevels.size()); - for (final CompressionType compressionLevel : compressionLevels) { - byteCompressionTypes.add(compressionLevel.getValue()); + final byte[] byteCompressionTypes = new byte[ + compressionLevels.size()]; + for (int i = 0; i < compressionLevels.size(); i++) { + byteCompressionTypes[i] = compressionLevels.get(i).getValue(); } setCompressionPerLevel(nativeHandle_, byteCompressionTypes); return this; @@ -229,7 +229,7 @@ public class ColumnFamilyOptions extends RocksObject @Override public List compressionPerLevel() { - final List byteCompressionTypes = + final byte[] byteCompressionTypes = compressionPerLevel(nativeHandle_); final List compressionLevels = new ArrayList<>(); for (final Byte byteCompressionType : byteCompressionTypes) { @@ -695,8 +695,8 @@ public class ColumnFamilyOptions extends RocksObject private native void setCompressionType(long handle, byte compressionType); private native byte compressionType(long handle); private native void setCompressionPerLevel(long handle, - List compressionLevels); - private native List compressionPerLevel(long handle); + byte[] compressionLevels); + private native byte[] compressionPerLevel(long handle); private native void useFixedLengthPrefixExtractor( long handle, int prefixLength); private native void useCappedPrefixExtractor( diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index 2c6f2e81f..1bc45ed1a 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -679,10 +679,10 @@ public class Options extends RocksObject @Override public Options setCompressionPerLevel(final List compressionLevels) { - final List byteCompressionTypes = new ArrayList<>( - compressionLevels.size()); - for (final CompressionType compressionLevel : compressionLevels) { - byteCompressionTypes.add(compressionLevel.getValue()); + final byte[] byteCompressionTypes = new byte[ + compressionLevels.size()]; + for (int i = 0; i < compressionLevels.size(); i++) { + byteCompressionTypes[i] = compressionLevels.get(i).getValue(); } setCompressionPerLevel(nativeHandle_, byteCompressionTypes); return this; @@ -690,7 +690,7 @@ public class Options extends RocksObject @Override public List compressionPerLevel() { - final List byteCompressionTypes = + final byte[] byteCompressionTypes = compressionPerLevel(nativeHandle_); final List compressionLevels = new ArrayList<>(); for (final Byte byteCompressionType : byteCompressionTypes) { @@ -1206,8 +1206,8 @@ public class Options extends RocksObject private native void setCompressionType(long handle, byte compressionType); private native byte compressionType(long handle); private native void setCompressionPerLevel(long handle, - List compressionLevels); - private native List compressionPerLevel(long handle); + byte[] compressionLevels); + private native byte[] compressionPerLevel(long handle); private native void useFixedLengthPrefixExtractor( long handle, int prefixLength); private native void useCappedPrefixExtractor( diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index fc98ac2b0..143649347 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -377,7 +377,7 @@ public class RocksDB extends RocksObject { */ public static List listColumnFamilies(final Options options, final String path) throws RocksDBException { - return RocksDB.listColumnFamilies(options.nativeHandle_, path); + return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_, path)); } private void storeOptionsInstance(DBOptionsInterface options) { @@ -1734,7 +1734,7 @@ public class RocksDB extends RocksObject { final long[] columnFamilyOptions ) throws RocksDBException; - protected native static List listColumnFamilies( + protected native static byte[][] listColumnFamilies( long optionsHandle, String path) throws RocksDBException; protected native void put( long handle, byte[] key, int keyLen, From c5af85ecadb8149ddb06ae473b47ca783e018eb0 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 4 Feb 2016 14:28:18 +0000 Subject: [PATCH 5/9] Fix a memory leak of Slice objects from org.rocksdb.WBWIRocksIterator#entry1 --- java/rocksjni/comparatorjnicallback.cc | 6 +- java/rocksjni/portal.h | 84 +++---------------- java/rocksjni/write_batch_with_index.cc | 46 ++++++++-- .../main/java/org/rocksdb/AbstractSlice.java | 26 +++--- .../main/java/org/rocksdb/DirectSlice.java | 21 +++-- .../java/org/rocksdb/RocksMutableObject.java | 43 +++++++--- java/src/main/java/org/rocksdb/Slice.java | 5 +- .../java/org/rocksdb/WBWIRocksIterator.java | 43 +++++++--- 8 files changed, 141 insertions(+), 133 deletions(-) diff --git a/java/rocksjni/comparatorjnicallback.cc b/java/rocksjni/comparatorjnicallback.cc index 1c0317003..57ee0f95c 100644 --- a/java/rocksjni/comparatorjnicallback.cc +++ b/java/rocksjni/comparatorjnicallback.cc @@ -60,8 +60,8 @@ int BaseComparatorJniCallback::Compare(const Slice& a, const Slice& b) const { // performance. mtx_compare->Lock(); - AbstractSliceJni::setHandle(m_env, m_jSliceA, &a); - AbstractSliceJni::setHandle(m_env, m_jSliceB, &b); + AbstractSliceJni::setHandle(m_env, m_jSliceA, &a, JNI_FALSE); + AbstractSliceJni::setHandle(m_env, m_jSliceB, &b, JNI_FALSE); jint result = m_env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA, m_jSliceB); @@ -89,7 +89,7 @@ void BaseComparatorJniCallback::FindShortestSeparator( // performance. mtx_findShortestSeparator->Lock(); - AbstractSliceJni::setHandle(m_env, m_jSliceLimit, &limit); + AbstractSliceJni::setHandle(m_env, m_jSliceLimit, &limit, JNI_FALSE); jstring jsResultStart = (jstring)m_env->CallObjectMethod(m_jComparator, m_jFindShortestSeparatorMethodId, jsStart, m_jSliceLimit); diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index ef7df8837..f4ad29af5 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -49,25 +49,22 @@ template class RocksDBNativeClass { assert(jclazz != nullptr); return jclazz; } - - // Get the field id of the member variable to store - // the ptr - static jfieldID getHandleFieldID(JNIEnv* env) { - static jfieldID fid = env->GetFieldID( - DERIVED::getJClass(env), "nativeHandle_", "J"); - assert(fid != nullptr); - return fid; - } }; // Native class template for sub-classes of RocksMutableObject template class NativeRocksMutableObject : public RocksDBNativeClass { public: + + static jmethodID getSetNativeHandleMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + DERIVED::getJClass(env), "setNativeHandle", "(JZ)V"); + assert(mid != nullptr); + return mid; + } + // Pass the pointer to the java side. - static void setHandle(JNIEnv* env, jobject jdb, PTR ptr) { - env->SetLongField( - jdb, RocksDBNativeClass::getHandleFieldID(env), - reinterpret_cast(ptr)); + static void setHandle(JNIEnv* env, jobject jobj, PTR ptr, jboolean java_owns_handle) { + env->CallVoidMethod(jobj, getSetNativeHandleMethod(env), reinterpret_cast(ptr), java_owns_handle); } }; @@ -647,67 +644,6 @@ class WriteEntryJni { assert(jclazz != nullptr); return jclazz; } - - static void setWriteType(JNIEnv* env, jobject jwrite_entry, - WriteType write_type) { - jobject jwrite_type; - switch (write_type) { - case kPutRecord: - jwrite_type = WriteTypeJni::PUT(env); - break; - - case kMergeRecord: - jwrite_type = WriteTypeJni::MERGE(env); - break; - - case kDeleteRecord: - jwrite_type = WriteTypeJni::DELETE(env); - break; - - case kLogDataRecord: - jwrite_type = WriteTypeJni::LOG(env); - break; - - default: - jwrite_type = nullptr; - } - assert(jwrite_type != nullptr); - env->SetObjectField(jwrite_entry, getWriteTypeField(env), jwrite_type); - } - - static void setKey(JNIEnv* env, jobject jwrite_entry, - const rocksdb::Slice* slice) { - jobject jkey = env->GetObjectField(jwrite_entry, getKeyField(env)); - AbstractSliceJni::setHandle(env, jkey, slice); - } - - static void setValue(JNIEnv* env, jobject jwrite_entry, - const rocksdb::Slice* slice) { - jobject jvalue = env->GetObjectField(jwrite_entry, getValueField(env)); - AbstractSliceJni::setHandle(env, jvalue, slice); - } - - private: - static jfieldID getWriteTypeField(JNIEnv* env) { - static jfieldID fid = env->GetFieldID( - getJClass(env), "type", "Lorg/rocksdb/WBWIRocksIterator$WriteType;"); - assert(fid != nullptr); - return fid; - } - - static jfieldID getKeyField(JNIEnv* env) { - static jfieldID fid = env->GetFieldID( - getJClass(env), "key", "Lorg/rocksdb/DirectSlice;"); - assert(fid != nullptr); - return fid; - } - - static jfieldID getValueField(JNIEnv* env) { - static jfieldID fid = env->GetFieldID( - getJClass(env), "value", "Lorg/rocksdb/DirectSlice;"); - assert(fid != nullptr); - return fid; - } }; class InfoLogLevelJni { diff --git a/java/rocksjni/write_batch_with_index.cc b/java/rocksjni/write_batch_with_index.cc index ade91b63a..1c7b6711c 100644 --- a/java/rocksjni/write_batch_with_index.cc +++ b/java/rocksjni/write_batch_with_index.cc @@ -353,27 +353,57 @@ void Java_org_rocksdb_WBWIRocksIterator_status0( /* * Class: org_rocksdb_WBWIRocksIterator * Method: entry1 - * Signature: (JLorg/rocksdb/WBWIRocksIterator/WriteEntry;)V + * Signature: (J)[J */ -void Java_org_rocksdb_WBWIRocksIterator_entry1( - JNIEnv* env, jobject jobj, jlong handle, jobject jwrite_entry) { +jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1( + JNIEnv* env, jobject jobj, jlong handle) { auto* it = reinterpret_cast(handle); const rocksdb::WriteEntry& we = it->Entry(); - jobject jwe = rocksdb::WBWIRocksIteratorJni::getWriteEntry(env, jobj); - rocksdb::WriteEntryJni::setWriteType(env, jwe, we.type); + jlong results[3]; + + //set the type of the write entry + switch (we.type) { + case rocksdb::kPutRecord: + results[0] = 0x1; + break; + + case rocksdb::kMergeRecord: + results[0] = 0x2; + break; + + case rocksdb::kDeleteRecord: + results[0] = 0x4; + break; + + case rocksdb::kLogDataRecord: + results[0] = 0x8; + break; + + default: + results[0] = 0x0; + } + + //TODO(AR) do we leak buf and value_buf? + + //set the pointer to the key slice char* buf = new char[we.key.size()]; memcpy(buf, we.key.data(), we.key.size()); auto* key_slice = new rocksdb::Slice(buf, we.key.size()); - rocksdb::WriteEntryJni::setKey(env, jwe, key_slice); + results[1] = reinterpret_cast(key_slice); + //set the pointer to the value slice if (we.type == rocksdb::kDeleteRecord || we.type == rocksdb::kLogDataRecord) { // set native handle of value slice to null if no value available - rocksdb::WriteEntryJni::setValue(env, jwe, nullptr); + results[2] = 0; } else { char* value_buf = new char[we.value.size()]; memcpy(value_buf, we.value.data(), we.value.size()); auto* value_slice = new rocksdb::Slice(value_buf, we.value.size()); - rocksdb::WriteEntryJni::setValue(env, jwe, value_slice); + results[2] = reinterpret_cast(value_slice); } + + jlongArray jresults = env->NewLongArray(3); + env->SetLongArrayRegion(jresults, 0, 3, results); + return jresults; } diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index b9e67d472..c5fd2f58c 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -42,8 +42,7 @@ abstract class AbstractSlice extends RocksMutableObject { * @see org.rocksdb.AbstractSlice#data0(long) */ public T data() { - assert (isOwningHandle()); - return data0(nativeHandle_); + return data0(getNativeHandle()); } /** @@ -64,8 +63,7 @@ abstract class AbstractSlice extends RocksMutableObject { * @return The length in bytes. */ public int size() { - assert (isOwningHandle()); - return size0(nativeHandle_); + return size0(getNativeHandle()); } /** @@ -75,8 +73,7 @@ abstract class AbstractSlice extends RocksMutableObject { * @return true if there is no data, false otherwise. */ public boolean empty() { - assert (isOwningHandle()); - return empty0(nativeHandle_); + return empty0(getNativeHandle()); } /** @@ -88,8 +85,7 @@ abstract class AbstractSlice extends RocksMutableObject { * @return The string representation of the data. */ public String toString(final boolean hex) { - assert (isOwningHandle()); - return toString0(nativeHandle_, hex); + return toString0(getNativeHandle(), hex); } @Override @@ -109,8 +105,15 @@ abstract class AbstractSlice extends RocksMutableObject { */ public int compare(final AbstractSlice other) { assert (other != null); - assert (isOwningHandle()); - return compare0(nativeHandle_, other.nativeHandle_); + if(!isOwningHandle()) { + return other.isOwningHandle() ? -1 : 0; + } else { + if(!other.isOwningHandle()) { + return 1; + } else { + return compare0(getNativeHandle(), other.getNativeHandle()); + } + } } @Override @@ -149,8 +152,7 @@ abstract class AbstractSlice extends RocksMutableObject { */ public boolean startsWith(final AbstractSlice prefix) { if (prefix != null) { - assert (isOwningHandle()); - return startsWith0(nativeHandle_, prefix.nativeHandle_); + return startsWith0(getNativeHandle(), prefix.getNativeHandle()); } else { return false; } diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 9f8269105..8f96eb49f 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -24,10 +24,11 @@ public class DirectSlice extends AbstractSlice { * at creation time. * * Note: You should be aware that it is intentionally marked as - * package-private. This is so that developers cannot construct their own default - * DirectSlice objects (at present). As developers cannot construct their own - * DirectSlice objects through this, they are not creating underlying C++ - * DirectSlice objects, and so there is nothing to free (dispose) from Java. + * package-private. This is so that developers cannot construct their own + * default DirectSlice objects (at present). As developers cannot construct + * their own DirectSlice objects through this, they are not creating + * underlying C++ DirectSlice objects, and so there is nothing to free + * (dispose) from Java. */ DirectSlice() { super(); @@ -68,7 +69,8 @@ public class DirectSlice extends AbstractSlice { } private static ByteBuffer ensureDirect(final ByteBuffer data) { - //TODO(AR) consider throwing a checked exception, as if it's not direct this can SIGSEGV + // TODO(AR) consider throwing a checked exception, as if it's not direct + // this can SIGSEGV assert(data.isDirect()); return data; } @@ -82,16 +84,14 @@ public class DirectSlice extends AbstractSlice { * @return the requested byte */ public byte get(int offset) { - assert (isOwningHandle()); - return get0(nativeHandle_, offset); + return get0(getNativeHandle(), offset); } /** * Clears the backing slice */ public void clear() { - assert (isOwningHandle()); - clear0(nativeHandle_); + clear0(getNativeHandle()); } /** @@ -102,8 +102,7 @@ public class DirectSlice extends AbstractSlice { * @param n The number of bytes to drop */ public void removePrefix(final int n) { - assert (isOwningHandle()); - removePrefix0(nativeHandle_, n); + removePrefix0(getNativeHandle(), n); } private native static long createNewDirectSlice0(final ByteBuffer data, diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java index f4ca3565d..823711742 100644 --- a/java/src/main/java/org/rocksdb/RocksMutableObject.java +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -1,30 +1,47 @@ package org.rocksdb; -public abstract class RocksMutableObject extends NativeReference { +public abstract class RocksMutableObject /*extends NativeReference*/ { - private final boolean shouldOwnHandle; - protected volatile long nativeHandle_; + private long nativeHandle_; + private boolean owningHandle_; protected RocksMutableObject() { - super(false); - this.shouldOwnHandle = false; } protected RocksMutableObject(final long nativeHandle) { - super(true); - this.shouldOwnHandle = true; this.nativeHandle_ = nativeHandle; + this.owningHandle_ = true; + } + + public synchronized void setNativeHandle(final long nativeHandle, final boolean owningNativeHandle) { + this.nativeHandle_ = nativeHandle; + this.owningHandle_ = owningNativeHandle; + } + + //@Override + protected synchronized boolean isOwningHandle() { + return this.owningHandle_; + } + + protected synchronized long getNativeHandle() { + assert(this.nativeHandle_ != 0); + return this.nativeHandle_; + } + + public synchronized final void dispose() { + if(isOwningHandle()) { + disposeInternal(); + this.owningHandle_ = false; + this.nativeHandle_ = 0; + } } @Override - public boolean isOwningHandle() { - return ((!shouldOwnHandle) || super.isOwningHandle()) && nativeHandle_ != 0; + protected void finalize() throws Throwable { + dispose(); + super.finalize(); } - /** - * Deletes underlying C++ object pointer. - */ - @Override protected void disposeInternal() { disposeInternal(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/Slice.java b/java/src/main/java/org/rocksdb/Slice.java index ae0815392..cbb9742a3 100644 --- a/java/src/main/java/org/rocksdb/Slice.java +++ b/java/src/main/java/org/rocksdb/Slice.java @@ -73,8 +73,9 @@ public class Slice extends AbstractSlice { */ @Override protected void disposeInternal() { - disposeInternalBuf(nativeHandle_); - super.disposeInternal(); + final long nativeHandle = getNativeHandle(); + disposeInternalBuf(nativeHandle); + super.disposeInternal(nativeHandle); } @Override protected final native byte[] data0(long handle); diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index 6d06c8bd3..7eb019d0b 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -5,10 +5,12 @@ package org.rocksdb; -public class WBWIRocksIterator extends AbstractRocksIterator { +public class WBWIRocksIterator + extends AbstractRocksIterator { private final WriteEntry entry = new WriteEntry(); - protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, final long nativeHandle) { + protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, + final long nativeHandle) { super(wbwi, nativeHandle); } @@ -20,12 +22,20 @@ public class WBWIRocksIterator extends AbstractRocksIteratortry-with-resources + * statement */ @Override + @Deprecated protected void finalize() throws Throwable { + if(isOwningHandle()) { + //TODO(AR) log a warning message... developer should have called close() + } dispose(); super.finalize(); } diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 606c5d951..22f1d359e 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -175,8 +175,10 @@ public class BackupEngine extends RocksObject implements AutoCloseable { /** * Restore the database from the latest backup * - * @param dbDir The directory to restore the backup to, i.e. where your database is - * @param walDir The location of the log files for your database, often the same as dbDir + * @param dbDir The directory to restore the backup to, i.e. where your + * database is + * @param walDir The location of the log files for your database, often the + * same as dbDir * @param restoreOptions Options for controlling the restore * * @throws RocksDBException thrown if the database could not be restored @@ -189,16 +191,6 @@ public class BackupEngine extends RocksObject implements AutoCloseable { restoreOptions.nativeHandle_); } - /** - * Close the Backup Engine - * - * @throws RocksDBException thrown if the backup engine could not be closed - */ - @Override - public void close() throws RocksDBException { - dispose(); - } - private native static long open(final long env, final long backupableDbOptions) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 143649347..ba5e18b47 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -48,7 +48,8 @@ public class RocksDB extends RocksObject { } catch (IOException e) { - throw new RuntimeException("Unable to load the RocksDB shared library" + e); + throw new RuntimeException("Unable to load the RocksDB shared library" + + e); } } @@ -78,7 +79,8 @@ public class RocksDB extends RocksObject { UnsatisfiedLinkError err = null; for (String path : paths) { try { - System.load(path + "/" + Environment.getJniLibraryFileName("rocksdbjni")); + System.load(path + "/" + + Environment.getJniLibraryFileName("rocksdbjni")); success = true; break; } catch (UnsatisfiedLinkError e) { @@ -116,8 +118,8 @@ public class RocksDB extends RocksObject { * the path to the database using the specified options and db path and a list * of column family names. *

- * If opened in read write mode every existing column family name must be passed - * within the list to this method.

+ * If opened in read write mode every existing column family name must be + * passed within the list to this method.

*

* If opened in read-only mode only a subset of existing column families must * be passed to this method.

@@ -189,8 +191,8 @@ public class RocksDB extends RocksObject { * the path to the database using the specified options and db path and a list * of column family names. *

- * If opened in read write mode every existing column family name must be passed - * within the list to this method.

+ * If opened in read write mode every existing column family name must be + * passed within the list to this method.

*

* If opened in read-only mode only a subset of existing column families must * be passed to this method.

@@ -204,7 +206,8 @@ public class RocksDB extends RocksObject { * with new Options instance as underlying native statistics instance does not * use any locks to prevent concurrent updates.

*

- * ColumnFamily handles are disposed when the RocksDB instance is disposed.

+ * ColumnFamily handles are disposed when the RocksDB instance is disposed. + *

* * @param options {@link org.rocksdb.DBOptions} instance. * @param path the path to the rocksdb. @@ -227,12 +230,14 @@ public class RocksDB extends RocksObject { final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; for (int i = 0; i < columnFamilyDescriptors.size(); i++) { - final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors.get(i); + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); cfNames[i] = cfDescriptor.columnFamilyName(); cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; } - final long[] handles = open(options.nativeHandle_, path, cfNames, cfOptionHandles); + final long[] handles = open(options.nativeHandle_, path, cfNames, + cfOptionHandles); final RocksDB db = new RocksDB(handles[0]); db.storeOptionsInstance(options); @@ -349,12 +354,14 @@ public class RocksDB extends RocksObject { final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; for (int i = 0; i < columnFamilyDescriptors.size(); i++) { - final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors.get(i); + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); cfNames[i] = cfDescriptor.columnFamilyName(); cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; } - final long[] handles = openROnly(options.nativeHandle_, path, cfNames, cfOptionHandles); + final long[] handles = openROnly(options.nativeHandle_, path, cfNames, + cfOptionHandles); final RocksDB db = new RocksDB(handles[0]); db.storeOptionsInstance(options); @@ -377,21 +384,14 @@ public class RocksDB extends RocksObject { */ public static List listColumnFamilies(final Options options, final String path) throws RocksDBException { - return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_, path)); + return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_, + path)); } private void storeOptionsInstance(DBOptionsInterface options) { options_ = options; } - /** - * Close the RocksDB instance. - * This function is equivalent to dispose(). - */ - public void close() { - dispose(); - } - /** * Set the database entry for "key" to "value". * @@ -401,7 +401,8 @@ public class RocksDB extends RocksObject { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public void put(final byte[] key, final byte[] value) throws RocksDBException { + public void put(final byte[] key, final byte[] value) + throws RocksDBException { put(nativeHandle_, key, key.length, value, value.length); } @@ -460,8 +461,8 @@ public class RocksDB extends RocksObject { public void put(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts, final byte[] key, final byte[] value) throws RocksDBException { - put(nativeHandle_, writeOpts.nativeHandle_, key, key.length, value, value.length, - columnFamilyHandle.nativeHandle_); + put(nativeHandle_, writeOpts.nativeHandle_, key, key.length, value, + value.length, columnFamilyHandle.nativeHandle_); } /** @@ -495,8 +496,8 @@ public class RocksDB extends RocksObject { */ public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final StringBuffer value){ - return keyMayExist(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_, - value); + return keyMayExist(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_, value); } /** @@ -578,7 +579,8 @@ public class RocksDB extends RocksObject { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public void merge(final byte[] key, final byte[] value) throws RocksDBException { + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { merge(nativeHandle_, key, key.length, value, value.length); } @@ -753,9 +755,10 @@ public class RocksDB extends RocksObject { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key) - throws RocksDBException { - return get(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + return get(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); } /** @@ -811,7 +814,8 @@ public class RocksDB extends RocksObject { throws RocksDBException { assert(keys.size() != 0); - final byte[][] values = multiGet(nativeHandle_, keys.toArray(new byte[keys.size()][])); + final byte[][] values = multiGet(nativeHandle_, + keys.toArray(new byte[keys.size()][])); Map keyValueMap = new HashMap<>(); for(int i = 0; i < values.length; i++) { @@ -843,8 +847,10 @@ public class RocksDB extends RocksObject { * @throws IllegalArgumentException thrown if the size of passed keys is not * equal to the amount of passed column family handles. */ - public Map multiGet(final List columnFamilyHandleList, - final List keys) throws RocksDBException, IllegalArgumentException { + public Map multiGet( + final List columnFamilyHandleList, + final List keys) throws RocksDBException, + IllegalArgumentException { assert(keys.size() != 0); // Check if key size equals cfList size. If not a exception must be // thrown. If not a Segmentation fault happens. @@ -856,8 +862,8 @@ public class RocksDB extends RocksObject { for (int i = 0; i < columnFamilyHandleList.size(); i++) { cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; } - final byte[][] values = multiGet(nativeHandle_, keys.toArray(new byte[keys.size()][]), - cfHandles); + final byte[][] values = multiGet(nativeHandle_, + keys.toArray(new byte[keys.size()][]), cfHandles); Map keyValueMap = new HashMap<>(); for(int i = 0; i < values.length; i++) { @@ -884,7 +890,8 @@ public class RocksDB extends RocksObject { final List keys) throws RocksDBException { assert(keys.size() != 0); - final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, keys.toArray(new byte[keys.size()][])); + final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, + keys.toArray(new byte[keys.size()][])); Map keyValueMap = new HashMap<>(); for(int i = 0; i < values.length; i++) { @@ -931,7 +938,8 @@ public class RocksDB extends RocksObject { for (int i = 0; i < columnFamilyHandleList.size(); i++) { cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; } - final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, keys.toArray(new byte[keys.size()][]), cfHandles); + final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, + keys.toArray(new byte[keys.size()][]), cfHandles); Map keyValueMap = new HashMap<>(); for(int i = 0; i < values.length; i++) { @@ -970,8 +978,8 @@ public class RocksDB extends RocksObject { * @throws RocksDBException thrown if error happens in underlying * native library. */ - public void remove(final ColumnFamilyHandle columnFamilyHandle, final byte[] key) - throws RocksDBException { + public void remove(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { remove(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); } @@ -1021,8 +1029,9 @@ public class RocksDB extends RocksObject { * *

Valid property names include: *

    - *
  • "rocksdb.num-files-at-level<N>" - return the number of files at level <N>, - * where <N> is an ASCII representation of a level number (e.g. "0").
  • + *
  • "rocksdb.num-files-at-level<N>" - return the number of files at + * level <N>, where <N> is an ASCII representation of a level + * number (e.g. "0").
  • *
  • "rocksdb.stats" - returns a multi-line string that describes statistics * about the internal operation of the DB.
  • *
  • "rocksdb.sstables" - returns a multi-line string that describes all @@ -1039,8 +1048,8 @@ public class RocksDB extends RocksObject { */ public String getProperty(final ColumnFamilyHandle columnFamilyHandle, final String property) throws RocksDBException { - return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_, property, - property.length()); + return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_, + property, property.length()); } /** @@ -1051,8 +1060,9 @@ public class RocksDB extends RocksObject { * *

    Valid property names include: *

      - *
    • "rocksdb.num-files-at-level<N>" - return the number of files at level <N>, - * where <N> is an ASCII representation of a level number (e.g. "0").
    • + *
    • "rocksdb.num-files-at-level<N>" - return the number of files at + * level <N>, where <N> is an ASCII representation of a level + * number (e.g. "0").
    • *
    • "rocksdb.stats" - returns a multi-line string that describes statistics * about the internal operation of the DB.
    • *
    • "rocksdb.sstables" - returns a multi-line string that describes all @@ -1070,8 +1080,8 @@ public class RocksDB extends RocksObject { } /** - *

      Similar to GetProperty(), but only works for a subset of properties whose - * return value is a numerical value. Return the value as long.

      + *

      Similar to GetProperty(), but only works for a subset of properties + * whose return value is a numerical value. Return the value as long.

      * *

      Note: As the returned property is of type * {@code uint64_t} on C++ side the returning value can be negative @@ -1096,8 +1106,8 @@ public class RocksDB extends RocksObject { } /** - *

      Similar to GetProperty(), but only works for a subset of properties whose - * return value is a numerical value. Return the value as long.

      + *

      Similar to GetProperty(), but only works for a subset of properties + * whose return value is a numerical value. Return the value as long.

      * *

      Note: As the returned property is of type * {@code uint64_t} on C++ side the returning value can be negative @@ -1121,8 +1131,8 @@ public class RocksDB extends RocksObject { */ public long getLongProperty(final ColumnFamilyHandle columnFamilyHandle, final String property) throws RocksDBException { - return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_, property, - property.length()); + return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_, + property, property.length()); } /** @@ -1204,7 +1214,8 @@ public class RocksDB extends RocksObject { * instance * @return instance of iterator object. */ - public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle) { + public RocksIterator newIterator( + final ColumnFamilyHandle columnFamilyHandle) { return new RocksIterator(this, iteratorCF(nativeHandle_, columnFamilyHandle.nativeHandle_)); } @@ -1244,7 +1255,8 @@ public class RocksDB extends RocksObject { * native library. */ public List newIterators( - final List columnFamilyHandleList) throws RocksDBException { + final List columnFamilyHandleList) + throws RocksDBException { return newIterators(columnFamilyHandleList, new ReadOptions()); } @@ -1274,7 +1286,8 @@ public class RocksDB extends RocksObject { final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles, readOptions.nativeHandle_); - final List iterators = new ArrayList<>(columnFamilyHandleList.size()); + final List iterators = new ArrayList<>( + columnFamilyHandleList.size()); for (int i=0; i Date: Thu, 4 Feb 2016 18:43:16 +0000 Subject: [PATCH 8/9] Refactored tests to use try-with-resources --- .../main/java/RocksDBColumnFamilySample.java | 82 +- java/samples/src/main/java/RocksDBSample.java | 514 ++++---- .../org/rocksdb/AbstractComparatorTest.java | 196 ++-- .../java/org/rocksdb/BackupEngineTest.java | 229 ++-- .../org/rocksdb/BackupableDBOptionsTest.java | 230 ++-- .../java/org/rocksdb/BackupableDBTest.java | 457 +++----- .../rocksdb/BlockBasedTableConfigTest.java | 24 +- .../test/java/org/rocksdb/CheckPointTest.java | 91 +- .../org/rocksdb/ColumnFamilyOptionsTest.java | 459 ++------ .../java/org/rocksdb/ColumnFamilyTest.java | 1043 +++++++---------- .../org/rocksdb/ComparatorOptionsTest.java | 9 +- .../test/java/org/rocksdb/ComparatorTest.java | 213 ++-- .../org/rocksdb/CompressionOptionsTest.java | 5 +- .../test/java/org/rocksdb/DBOptionsTest.java | 410 ++----- .../java/org/rocksdb/DirectSliceTest.java | 70 +- .../src/test/java/org/rocksdb/FilterTest.java | 40 +- java/src/test/java/org/rocksdb/FlushTest.java | 56 +- .../java/org/rocksdb/InfoLogLevelTest.java | 66 +- .../java/org/rocksdb/KeyMayExistTest.java | 110 +- .../src/test/java/org/rocksdb/LoggerTest.java | 256 ++-- .../test/java/org/rocksdb/MemTableTest.java | 34 +- java/src/test/java/org/rocksdb/MergeTest.java | 351 +++--- .../java/org/rocksdb/MixedOptionsTest.java | 57 +- .../org/rocksdb/NativeLibraryLoaderTest.java | 2 +- .../test/java/org/rocksdb/OptionsTest.java | 743 +++--------- .../org/rocksdb/PlainTableConfigTest.java | 10 +- .../org/rocksdb/PlatformRandomHelper.java | 2 +- .../test/java/org/rocksdb/ReadOnlyTest.java | 488 ++++---- .../java/org/rocksdb/ReadOptionsTest.java | 128 +- .../test/java/org/rocksdb/RocksDBTest.java | 835 ++++++------- .../test/java/org/rocksdb/RocksEnvTest.java | 35 +- .../java/org/rocksdb/RocksIteratorTest.java | 68 +- .../java/org/rocksdb/RocksMemEnvTest.java | 160 +-- .../java/org/rocksdb/RocksMemoryResource.java | 4 + java/src/test/java/org/rocksdb/SliceTest.java | 68 +- .../test/java/org/rocksdb/SnapshotTest.java | 276 ++--- .../org/rocksdb/StatisticsCollectorTest.java | 28 +- .../rocksdb/TransactionLogIteratorTest.java | 208 ++-- java/src/test/java/org/rocksdb/TtlDBTest.java | 154 +-- .../org/rocksdb/WriteBatchHandlerTest.java | 81 +- .../test/java/org/rocksdb/WriteBatchTest.java | 130 +- .../org/rocksdb/WriteBatchWithIndexTest.java | 210 ++-- .../java/org/rocksdb/WriteOptionsTest.java | 21 +- 43 files changed, 3284 insertions(+), 5369 deletions(-) diff --git a/java/samples/src/main/java/RocksDBColumnFamilySample.java b/java/samples/src/main/java/RocksDBColumnFamilySample.java index 8d682928c..7dd0de9cc 100644 --- a/java/samples/src/main/java/RocksDBColumnFamilySample.java +++ b/java/samples/src/main/java/RocksDBColumnFamilySample.java @@ -22,73 +22,57 @@ public class RocksDBColumnFamilySample { String db_path = args[0]; System.out.println("RocksDBColumnFamilySample"); - RocksDB db = null; - Options options = null; - ColumnFamilyHandle columnFamilyHandle = null; - WriteBatch wb = null; - try { - options = new Options().setCreateIfMissing(true); - db = RocksDB.open(options, db_path); + try(final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, db_path)) { + assert(db != null); // create column family - columnFamilyHandle = db.createColumnFamily( + try(final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily( new ColumnFamilyDescriptor("new_cf".getBytes(), - new ColumnFamilyOptions())); - assert(columnFamilyHandle != null); - - } finally { - if (columnFamilyHandle != null) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - db = null; - } - if (options != null) { - options.dispose(); + new ColumnFamilyOptions()))) { + assert (columnFamilyHandle != null); } } // open DB with two column families - List columnFamilyDescriptors = new ArrayList<>(); + final List columnFamilyDescriptors = + new ArrayList<>(); // have to open default column family columnFamilyDescriptors.add(new ColumnFamilyDescriptor( RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyOptions())); // open the new one, too columnFamilyDescriptors.add(new ColumnFamilyDescriptor( "new_cf".getBytes(), new ColumnFamilyOptions())); - List columnFamilyHandles = new ArrayList<>(); - try { - db = RocksDB.open(new DBOptions(), db_path, - columnFamilyDescriptors, columnFamilyHandles); + final List columnFamilyHandles = new ArrayList<>(); + try(final DBOptions options = new DBOptions(); + final RocksDB db = RocksDB.open(options, db_path, + columnFamilyDescriptors, columnFamilyHandles)) { assert(db != null); - // put and get from non-default column family - db.put(columnFamilyHandles.get(0), new WriteOptions(), - "key".getBytes(), "value".getBytes()); - String value = new String(db.get(columnFamilyHandles.get(0), - "key".getBytes())); + try { + // put and get from non-default column family + db.put(columnFamilyHandles.get(0), new WriteOptions(), + "key".getBytes(), "value".getBytes()); + String value = new String(db.get(columnFamilyHandles.get(0), + "key".getBytes())); - // atomic write - wb = new WriteBatch(); - wb.put(columnFamilyHandles.get(0), "key2".getBytes(), "value2".getBytes()); - wb.put(columnFamilyHandles.get(1), "key3".getBytes(), "value3".getBytes()); - wb.remove(columnFamilyHandles.get(0), "key".getBytes()); - db.write(new WriteOptions(), wb); + // atomic write + try (final WriteBatch wb = new WriteBatch()) { + wb.put(columnFamilyHandles.get(0), "key2".getBytes(), + "value2".getBytes()); + wb.put(columnFamilyHandles.get(1), "key3".getBytes(), + "value3".getBytes()); + wb.remove(columnFamilyHandles.get(0), "key".getBytes()); + db.write(new WriteOptions(), wb); + } - // drop column family - db.dropColumnFamily(columnFamilyHandles.get(1)); - - } finally { - for (ColumnFamilyHandle handle : columnFamilyHandles){ - handle.dispose(); - } - if (db != null) { - db.close(); - } - if (wb != null) { - wb.dispose(); + // drop column family + db.dropColumnFamily(columnFamilyHandles.get(1)); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } } } } diff --git a/java/samples/src/main/java/RocksDBSample.java b/java/samples/src/main/java/RocksDBSample.java index 3ac17777d..de5bc26d5 100644 --- a/java/samples/src/main/java/RocksDBSample.java +++ b/java/samples/src/main/java/RocksDBSample.java @@ -8,8 +8,10 @@ import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.ArrayList; + import org.rocksdb.*; import org.rocksdb.util.SizeUnit; + import java.io.IOException; public class RocksDBSample { @@ -26,287 +28,273 @@ public class RocksDBSample { String db_path_not_found = db_path + "_not_found"; System.out.println("RocksDBSample"); - RocksDB db = null; - Options options = new Options(); - try { - db = RocksDB.open(options, db_path_not_found); - assert(false); - } catch (RocksDBException e) { - System.out.format("caught the expceted exception -- %s\n", e); - assert(db == null); - } + try (final Options options = new Options(); + final Filter bloomFilter = new BloomFilter(10); + final ReadOptions readOptions = new ReadOptions() + .setFillCache(false)) { - try { - options.setCreateIfMissing(true) - .createStatistics() - .setWriteBufferSize(8 * SizeUnit.KB) - .setMaxWriteBufferNumber(3) - .setMaxBackgroundCompactions(10) - .setCompressionType(CompressionType.SNAPPY_COMPRESSION) - .setCompactionStyle(CompactionStyle.UNIVERSAL); - } catch (IllegalArgumentException e) { - assert(false); - } - - Statistics stats = options.statisticsPtr(); - - assert(options.createIfMissing() == true); - assert(options.writeBufferSize() == 8 * SizeUnit.KB); - assert(options.maxWriteBufferNumber() == 3); - assert(options.maxBackgroundCompactions() == 10); - assert(options.compressionType() == CompressionType.SNAPPY_COMPRESSION); - assert(options.compactionStyle() == CompactionStyle.UNIVERSAL); - - assert(options.memTableFactoryName().equals("SkipListFactory")); - options.setMemTableConfig( - new HashSkipListMemTableConfig() - .setHeight(4) - .setBranchingFactor(4) - .setBucketCount(2000000)); - assert(options.memTableFactoryName().equals("HashSkipListRepFactory")); - - options.setMemTableConfig( - new HashLinkedListMemTableConfig() - .setBucketCount(100000)); - assert(options.memTableFactoryName().equals("HashLinkedListRepFactory")); - - options.setMemTableConfig( - new VectorMemTableConfig().setReservedSize(10000)); - assert(options.memTableFactoryName().equals("VectorRepFactory")); - - options.setMemTableConfig(new SkipListMemTableConfig()); - assert(options.memTableFactoryName().equals("SkipListFactory")); - - options.setTableFormatConfig(new PlainTableConfig()); - // Plain-Table requires mmap read - options.setAllowMmapReads(true); - assert(options.tableFactoryName().equals("PlainTable")); - - options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000, - 10000, 10)); - options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000)); - - - Filter bloomFilter = new BloomFilter(10); - BlockBasedTableConfig table_options = new BlockBasedTableConfig(); - table_options.setBlockCacheSize(64 * SizeUnit.KB) - .setFilter(bloomFilter) - .setCacheNumShardBits(6) - .setBlockSizeDeviation(5) - .setBlockRestartInterval(10) - .setCacheIndexAndFilterBlocks(true) - .setHashIndexAllowCollision(false) - .setBlockCacheCompressedSize(64 * SizeUnit.KB) - .setBlockCacheCompressedNumShardBits(10); - - assert(table_options.blockCacheSize() == 64 * SizeUnit.KB); - assert(table_options.cacheNumShardBits() == 6); - assert(table_options.blockSizeDeviation() == 5); - assert(table_options.blockRestartInterval() == 10); - assert(table_options.cacheIndexAndFilterBlocks() == true); - assert(table_options.hashIndexAllowCollision() == false); - assert(table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB); - assert(table_options.blockCacheCompressedNumShardBits() == 10); - - options.setTableFormatConfig(table_options); - assert(options.tableFactoryName().equals("BlockBasedTable")); - - try { - db = RocksDB.open(options, db_path); - db.put("hello".getBytes(), "world".getBytes()); - byte[] value = db.get("hello".getBytes()); - assert("world".equals(new String(value))); - String str = db.getProperty("rocksdb.stats"); - assert(str != null && !str.equals("")); - } catch (RocksDBException e) { - System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e); - assert(db == null); - assert(false); - } - // be sure to release the c++ pointer - db.close(); - - ReadOptions readOptions = new ReadOptions(); - readOptions.setFillCache(false); - - try { - db = RocksDB.open(options, db_path); - db.put("hello".getBytes(), "world".getBytes()); - byte[] value = db.get("hello".getBytes()); - System.out.format("Get('hello') = %s\n", - new String(value)); - - for (int i = 1; i <= 9; ++i) { - for (int j = 1; j <= 9; ++j) { - db.put(String.format("%dx%d", i, j).getBytes(), - String.format("%d", i * j).getBytes()); - } + try (final RocksDB db = RocksDB.open(options, db_path_not_found)) { + assert (false); + } catch (RocksDBException e) { + System.out.format("caught the expected exception -- %s\n", e); } - for (int i = 1; i <= 9; ++i) { - for (int j = 1; j <= 9; ++j) { - System.out.format("%s ", new String(db.get( - String.format("%dx%d", i, j).getBytes()))); - } - System.out.println(""); + try { + options.setCreateIfMissing(true) + .createStatistics() + .setWriteBufferSize(8 * SizeUnit.KB) + .setMaxWriteBufferNumber(3) + .setMaxBackgroundCompactions(10) + .setCompressionType(CompressionType.SNAPPY_COMPRESSION) + .setCompactionStyle(CompactionStyle.UNIVERSAL); + } catch (IllegalArgumentException e) { + assert (false); } - // write batch test - WriteOptions writeOpt = new WriteOptions(); - for (int i = 10; i <= 19; ++i) { - WriteBatch batch = new WriteBatch(); - for (int j = 10; j <= 19; ++j) { - batch.put(String.format("%dx%d", i, j).getBytes(), + Statistics stats = options.statisticsPtr(); + + assert (options.createIfMissing() == true); + assert (options.writeBufferSize() == 8 * SizeUnit.KB); + assert (options.maxWriteBufferNumber() == 3); + assert (options.maxBackgroundCompactions() == 10); + assert (options.compressionType() == CompressionType.SNAPPY_COMPRESSION); + assert (options.compactionStyle() == CompactionStyle.UNIVERSAL); + + assert (options.memTableFactoryName().equals("SkipListFactory")); + options.setMemTableConfig( + new HashSkipListMemTableConfig() + .setHeight(4) + .setBranchingFactor(4) + .setBucketCount(2000000)); + assert (options.memTableFactoryName().equals("HashSkipListRepFactory")); + + options.setMemTableConfig( + new HashLinkedListMemTableConfig() + .setBucketCount(100000)); + assert (options.memTableFactoryName().equals("HashLinkedListRepFactory")); + + options.setMemTableConfig( + new VectorMemTableConfig().setReservedSize(10000)); + assert (options.memTableFactoryName().equals("VectorRepFactory")); + + options.setMemTableConfig(new SkipListMemTableConfig()); + assert (options.memTableFactoryName().equals("SkipListFactory")); + + options.setTableFormatConfig(new PlainTableConfig()); + // Plain-Table requires mmap read + options.setAllowMmapReads(true); + assert (options.tableFactoryName().equals("PlainTable")); + + options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000, + 10000, 10)); + options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000)); + + final BlockBasedTableConfig table_options = new BlockBasedTableConfig(); + table_options.setBlockCacheSize(64 * SizeUnit.KB) + .setFilter(bloomFilter) + .setCacheNumShardBits(6) + .setBlockSizeDeviation(5) + .setBlockRestartInterval(10) + .setCacheIndexAndFilterBlocks(true) + .setHashIndexAllowCollision(false) + .setBlockCacheCompressedSize(64 * SizeUnit.KB) + .setBlockCacheCompressedNumShardBits(10); + + assert (table_options.blockCacheSize() == 64 * SizeUnit.KB); + assert (table_options.cacheNumShardBits() == 6); + assert (table_options.blockSizeDeviation() == 5); + assert (table_options.blockRestartInterval() == 10); + assert (table_options.cacheIndexAndFilterBlocks() == true); + assert (table_options.hashIndexAllowCollision() == false); + assert (table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB); + assert (table_options.blockCacheCompressedNumShardBits() == 10); + + options.setTableFormatConfig(table_options); + assert (options.tableFactoryName().equals("BlockBasedTable")); + + try (final RocksDB db = RocksDB.open(options, db_path)) { + db.put("hello".getBytes(), "world".getBytes()); + byte[] value = db.get("hello".getBytes()); + assert ("world".equals(new String(value))); + String str = db.getProperty("rocksdb.stats"); + assert (str != null && !str.equals("")); + } catch (RocksDBException e) { + System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e); + assert (false); + } + + try (final RocksDB db = RocksDB.open(options, db_path)) { + db.put("hello".getBytes(), "world".getBytes()); + byte[] value = db.get("hello".getBytes()); + System.out.format("Get('hello') = %s\n", + new String(value)); + + for (int i = 1; i <= 9; ++i) { + for (int j = 1; j <= 9; ++j) { + db.put(String.format("%dx%d", i, j).getBytes(), + String.format("%d", i * j).getBytes()); + } + } + + for (int i = 1; i <= 9; ++i) { + for (int j = 1; j <= 9; ++j) { + System.out.format("%s ", new String(db.get( + String.format("%dx%d", i, j).getBytes()))); + } + System.out.println(""); + } + + // write batch test + try (final WriteOptions writeOpt = new WriteOptions()) { + for (int i = 10; i <= 19; ++i) { + try (final WriteBatch batch = new WriteBatch()) { + for (int j = 10; j <= 19; ++j) { + batch.put(String.format("%dx%d", i, j).getBytes(), String.format("%d", i * j).getBytes()); + } + db.write(writeOpt, batch); + } + } } - db.write(writeOpt, batch); - batch.dispose(); - } - for (int i = 10; i <= 19; ++i) { - for (int j = 10; j <= 19; ++j) { - assert(new String( - db.get(String.format("%dx%d", i, j).getBytes())).equals( - String.format("%d", i * j))); - System.out.format("%s ", new String(db.get( - String.format("%dx%d", i, j).getBytes()))); + for (int i = 10; i <= 19; ++i) { + for (int j = 10; j <= 19; ++j) { + assert (new String( + db.get(String.format("%dx%d", i, j).getBytes())).equals( + String.format("%d", i * j))); + System.out.format("%s ", new String(db.get( + String.format("%dx%d", i, j).getBytes()))); + } + System.out.println(""); } - System.out.println(""); - } - writeOpt.dispose(); - value = db.get("1x1".getBytes()); - assert(value != null); - value = db.get("world".getBytes()); - assert(value == null); - value = db.get(readOptions, "world".getBytes()); - assert(value == null); + value = db.get("1x1".getBytes()); + assert (value != null); + value = db.get("world".getBytes()); + assert (value == null); + value = db.get(readOptions, "world".getBytes()); + assert (value == null); - byte[] testKey = "asdf".getBytes(); - byte[] testValue = - "asdfghjkl;'?> insufficientArray.length); - len = db.get("asdfjkl;".getBytes(), enoughArray); - assert(len == RocksDB.NOT_FOUND); - len = db.get(testKey, enoughArray); - assert(len == testValue.length); + byte[] insufficientArray = new byte[10]; + byte[] enoughArray = new byte[50]; + int len; + len = db.get(testKey, insufficientArray); + assert (len > insufficientArray.length); + len = db.get("asdfjkl;".getBytes(), enoughArray); + assert (len == RocksDB.NOT_FOUND); + len = db.get(testKey, enoughArray); + assert (len == testValue.length); - len = db.get(readOptions, testKey, insufficientArray); - assert(len > insufficientArray.length); - len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray); - assert(len == RocksDB.NOT_FOUND); - len = db.get(readOptions, testKey, enoughArray); - assert(len == testValue.length); + len = db.get(readOptions, testKey, insufficientArray); + assert (len > insufficientArray.length); + len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray); + assert (len == RocksDB.NOT_FOUND); + len = db.get(readOptions, testKey, enoughArray); + assert (len == testValue.length); - db.remove(testKey); - len = db.get(testKey, enoughArray); - assert(len == RocksDB.NOT_FOUND); + db.remove(testKey); + len = db.get(testKey, enoughArray); + assert (len == RocksDB.NOT_FOUND); - // repeat the test with WriteOptions - WriteOptions writeOpts = new WriteOptions(); - writeOpts.setSync(true); - writeOpts.setDisableWAL(true); - db.put(writeOpts, testKey, testValue); - len = db.get(testKey, enoughArray); - assert(len == testValue.length); - assert(new String(testValue).equals( - new String(enoughArray, 0, len))); - writeOpts.dispose(); - - try { - for (TickerType statsType : TickerType.values()) { - stats.getTickerCount(statsType); + // repeat the test with WriteOptions + try (final WriteOptions writeOpts = new WriteOptions()) { + writeOpts.setSync(true); + writeOpts.setDisableWAL(true); + db.put(writeOpts, testKey, testValue); + len = db.get(testKey, enoughArray); + assert (len == testValue.length); + assert (new String(testValue).equals( + new String(enoughArray, 0, len))); } - System.out.println("getTickerCount() passed."); - } catch (Exception e) { - System.out.println("Failed in call to getTickerCount()"); - assert(false); //Should never reach here. - } - try { - for (HistogramType histogramType : HistogramType.values()) { - HistogramData data = stats.geHistogramData(histogramType); + try { + for (TickerType statsType : TickerType.values()) { + stats.getTickerCount(statsType); + } + System.out.println("getTickerCount() passed."); + } catch (Exception e) { + System.out.println("Failed in call to getTickerCount()"); + assert (false); //Should never reach here. } - System.out.println("geHistogramData() passed."); - } catch (Exception e) { - System.out.println("Failed in call to geHistogramData()"); - assert(false); //Should never reach here. + + try { + for (HistogramType histogramType : HistogramType.values()) { + HistogramData data = stats.geHistogramData(histogramType); + } + System.out.println("geHistogramData() passed."); + } catch (Exception e) { + System.out.println("Failed in call to geHistogramData()"); + assert (false); //Should never reach here. + } + + try (final RocksIterator iterator = db.newIterator()) { + + boolean seekToFirstPassed = false; + for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) { + iterator.status(); + assert (iterator.key() != null); + assert (iterator.value() != null); + seekToFirstPassed = true; + } + if (seekToFirstPassed) { + System.out.println("iterator seekToFirst tests passed."); + } + + boolean seekToLastPassed = false; + for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { + iterator.status(); + assert (iterator.key() != null); + assert (iterator.value() != null); + seekToLastPassed = true; + } + + if (seekToLastPassed) { + System.out.println("iterator seekToLastPassed tests passed."); + } + + iterator.seekToFirst(); + iterator.seek(iterator.key()); + assert (iterator.key() != null); + assert (iterator.value() != null); + + System.out.println("iterator seek test passed."); + + } + System.out.println("iterator tests passed."); + + final List keys = new ArrayList<>(); + try (final RocksIterator iterator = db.newIterator()) { + for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { + keys.add(iterator.key()); + } + } + + Map values = db.multiGet(keys); + assert (values.size() == keys.size()); + for (byte[] value1 : values.values()) { + assert (value1 != null); + } + + values = db.multiGet(new ReadOptions(), keys); + assert (values.size() == keys.size()); + for (byte[] value1 : values.values()) { + assert (value1 != null); + } + } catch (RocksDBException e) { + System.err.println(e); } - - RocksIterator iterator = db.newIterator(); - - boolean seekToFirstPassed = false; - for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) { - iterator.status(); - assert(iterator.key() != null); - assert(iterator.value() != null); - seekToFirstPassed = true; - } - if(seekToFirstPassed) { - System.out.println("iterator seekToFirst tests passed."); - } - - boolean seekToLastPassed = false; - for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { - iterator.status(); - assert(iterator.key() != null); - assert(iterator.value() != null); - seekToLastPassed = true; - } - - if(seekToLastPassed) { - System.out.println("iterator seekToLastPassed tests passed."); - } - - iterator.seekToFirst(); - iterator.seek(iterator.key()); - assert(iterator.key() != null); - assert(iterator.value() != null); - - System.out.println("iterator seek test passed."); - - iterator.dispose(); - System.out.println("iterator tests passed."); - - iterator = db.newIterator(); - List keys = new ArrayList(); - for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { - keys.add(iterator.key()); - } - iterator.dispose(); - - Map values = db.multiGet(keys); - assert(values.size() == keys.size()); - for(byte[] value1 : values.values()) { - assert(value1 != null); - } - - values = db.multiGet(new ReadOptions(), keys); - assert(values.size() == keys.size()); - for(byte[] value1 : values.values()) { - assert(value1 != null); - } - } catch (RocksDBException e) { - System.err.println(e); } - if (db != null) { - db.close(); - } - // be sure to dispose c++ pointers - options.dispose(); - readOptions.dispose(); } } diff --git a/java/src/test/java/org/rocksdb/AbstractComparatorTest.java b/java/src/test/java/org/rocksdb/AbstractComparatorTest.java index bf8b3c0f7..db4b4d7d0 100644 --- a/java/src/test/java/org/rocksdb/AbstractComparatorTest.java +++ b/java/src/test/java/org/rocksdb/AbstractComparatorTest.java @@ -8,6 +8,7 @@ package org.rocksdb; import java.io.IOException; import java.nio.file.*; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Random; @@ -39,57 +40,43 @@ public abstract class AbstractComparatorTest { * * @throws java.io.IOException if IO error happens. */ - public void testRoundtrip(final Path db_path) throws IOException, RocksDBException { - - Options opt = null; - RocksDB db = null; - - try { - opt = new Options(); - opt.setCreateIfMissing(true); - opt.setComparator(getAscendingIntKeyComparator()); + public void testRoundtrip(final Path db_path) throws IOException, + RocksDBException { + try (final AbstractComparator comparator = getAscendingIntKeyComparator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setComparator(comparator)) { // store 10,000 random integer keys final int ITERATIONS = 10000; - - db = RocksDB.open(opt, db_path.toString()); - final Random random = new Random(); - for (int i = 0; i < ITERATIONS; i++) { - final byte key[] = intToByte(random.nextInt()); - if (i > 0 && db.get(key) != null) { // does key already exist (avoid duplicates) - i--; // generate a different key - } else { - db.put(key, "value".getBytes()); + try (final RocksDB db = RocksDB.open(opt, db_path.toString())) { + final Random random = new Random(); + for (int i = 0; i < ITERATIONS; i++) { + final byte key[] = intToByte(random.nextInt()); + // does key already exist (avoid duplicates) + if (i > 0 && db.get(key) != null) { + i--; // generate a different key + } else { + db.put(key, "value".getBytes()); + } } } - db.close(); // re-open db and read from start to end // integer keys should be in ascending // order as defined by SimpleIntComparator - db = RocksDB.open(opt, db_path.toString()); - final RocksIterator it = db.newIterator(); - it.seekToFirst(); - int lastKey = Integer.MIN_VALUE; - int count = 0; - for (it.seekToFirst(); it.isValid(); it.next()) { - final int thisKey = byteToInt(it.key()); - assertThat(thisKey).isGreaterThan(lastKey); - lastKey = thisKey; - count++; - } - it.dispose(); - db.close(); - - assertThat(count).isEqualTo(ITERATIONS); - - } finally { - if (db != null) { - db.close(); - } - - if (opt != null) { - opt.dispose(); + try (final RocksDB db = RocksDB.open(opt, db_path.toString()); + final RocksIterator it = db.newIterator()) { + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + final int thisKey = byteToInt(it.key()); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } + assertThat(count).isEqualTo(ITERATIONS); } } } @@ -109,80 +96,75 @@ public abstract class AbstractComparatorTest { public void testRoundtripCf(final Path db_path) throws IOException, RocksDBException { - DBOptions opt = null; - RocksDB db = null; - List cfDescriptors = - new ArrayList<>(); - cfDescriptors.add(new ColumnFamilyDescriptor( - RocksDB.DEFAULT_COLUMN_FAMILY)); - cfDescriptors.add(new ColumnFamilyDescriptor("new_cf".getBytes(), - new ColumnFamilyOptions().setComparator( - getAscendingIntKeyComparator()))); - List cfHandles = new ArrayList<>(); - try { - opt = new DBOptions(). + try(final AbstractComparator comparator = getAscendingIntKeyComparator()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions().setComparator(comparator)) + ); + + final List cfHandles = new ArrayList<>(); + + try (final DBOptions opt = new DBOptions(). setCreateIfMissing(true). - setCreateMissingColumnFamilies(true); + setCreateMissingColumnFamilies(true)) { - // store 10,000 random integer keys - final int ITERATIONS = 10000; + // store 10,000 random integer keys + final int ITERATIONS = 10000; - db = RocksDB.open(opt, db_path.toString(), cfDescriptors, cfHandles); - assertThat(cfDescriptors.size()).isEqualTo(2); - assertThat(cfHandles.size()).isEqualTo(2); + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles)) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); - final Random random = new Random(); - for (int i = 0; i < ITERATIONS; i++) { - final byte key[] = intToByte(random.nextInt()); - if (i > 0 && db.get(cfHandles.get(1), key) != null) { - // does key already exist (avoid duplicates) - i--; // generate a different key - } else { - db.put(cfHandles.get(1), key, "value".getBytes()); + final Random random = new Random(); + for (int i = 0; i < ITERATIONS; i++) { + final byte key[] = intToByte(random.nextInt()); + if (i > 0 && db.get(cfHandles.get(1), key) != null) { + // does key already exist (avoid duplicates) + i--; // generate a different key + } else { + db.put(cfHandles.get(1), key, "value".getBytes()); + } + } + } finally { + for (final ColumnFamilyHandle handle : cfHandles) { + handle.close(); + } + } + cfHandles.clear(); } - } - for (ColumnFamilyHandle handle : cfHandles) { - handle.dispose(); - } - cfHandles.clear(); - db.close(); - // re-open db and read from start to end - // integer keys should be in ascending - // order as defined by SimpleIntComparator - db = RocksDB.open(opt, db_path.toString(), cfDescriptors, cfHandles); - assertThat(cfDescriptors.size()).isEqualTo(2); - assertThat(cfHandles.size()).isEqualTo(2); - final RocksIterator it = db.newIterator(cfHandles.get(1)); - it.seekToFirst(); - int lastKey = Integer.MIN_VALUE; - int count = 0; - for (it.seekToFirst(); it.isValid(); it.next()) { - final int thisKey = byteToInt(it.key()); - assertThat(thisKey).isGreaterThan(lastKey); - lastKey = thisKey; - count++; - } + // re-open db and read from start to end + // integer keys should be in ascending + // order as defined by SimpleIntComparator + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles); + final RocksIterator it = db.newIterator(cfHandles.get(1))) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); - it.dispose(); - for (ColumnFamilyHandle handle : cfHandles) { - handle.dispose(); - } - cfHandles.clear(); - db.close(); - assertThat(count).isEqualTo(ITERATIONS); + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + final int thisKey = byteToInt(it.key()); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } - } finally { - for (ColumnFamilyHandle handle : cfHandles) { - handle.dispose(); - } + assertThat(count).isEqualTo(ITERATIONS); - if (db != null) { - db.close(); - } - - if (opt != null) { - opt.dispose(); + } finally { + for (final ColumnFamilyHandle handle : cfHandles) { + handle.close(); + } + } + cfHandles.clear(); + } } } } diff --git a/java/src/test/java/org/rocksdb/BackupEngineTest.java b/java/src/test/java/org/rocksdb/BackupEngineTest.java index f010ff3ac..b50ddf499 100644 --- a/java/src/test/java/org/rocksdb/BackupEngineTest.java +++ b/java/src/test/java/org/rocksdb/BackupEngineTest.java @@ -28,148 +28,96 @@ public class BackupEngineTest { @Test public void backupDb() throws RocksDBException { - Options opt = null; - RocksDB db = null; - try { - opt = new Options().setCreateIfMissing(true); - // Open empty database. - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values prepareDatabase(db); + // Create two backups - BackupableDBOptions bopt = null; - try { - bopt = new BackupableDBOptions( + try(final BackupableDBOptions bopt = new BackupableDBOptions( backupFolder.getRoot().getAbsolutePath()); - try(final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { - be.createNewBackup(db, false); - be.createNewBackup(db, true); - verifyNumberOfValidBackups(be, 2); - } - } finally { - if(bopt != null) { - bopt.dispose(); - } - } - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 2); } } } @Test public void deleteBackup() throws RocksDBException { - Options opt = null; - RocksDB db = null; - try { - opt = new Options().setCreateIfMissing(true); - // Open empty database. - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { // Fill database with some test values prepareDatabase(db); // Create two backups - BackupableDBOptions bopt = null; - try { - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); - try(final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { - be.createNewBackup(db, false); - be.createNewBackup(db, true); - final List backupInfo = - verifyNumberOfValidBackups(be, 2); - // Delete the first backup - be.deleteBackup(backupInfo.get(0).backupId()); - final List newBackupInfo = - verifyNumberOfValidBackups(be, 1); + try(final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + final List backupInfo = + verifyNumberOfValidBackups(be, 2); + // Delete the first backup + be.deleteBackup(backupInfo.get(0).backupId()); + final List newBackupInfo = + verifyNumberOfValidBackups(be, 1); - // The second backup must remain. - assertThat(newBackupInfo.get(0).backupId()). - isEqualTo(backupInfo.get(1).backupId()); - } - } finally { - if(bopt != null) { - bopt.dispose(); - } - } - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + // The second backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(1).backupId()); } } } @Test public void purgeOldBackups() throws RocksDBException { - Options opt = null; - RocksDB db = null; - try { - opt = new Options().setCreateIfMissing(true); - // Open empty database. - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { // Fill database with some test values prepareDatabase(db); // Create four backups - BackupableDBOptions bopt = null; - try { - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); - try(final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { - be.createNewBackup(db, false); - be.createNewBackup(db, true); - be.createNewBackup(db, true); - be.createNewBackup(db, true); - final List backupInfo = - verifyNumberOfValidBackups(be, 4); - // Delete everything except the latest backup - be.purgeOldBackups(1); - final List newBackupInfo = - verifyNumberOfValidBackups(be, 1); - // The latest backup must remain. - assertThat(newBackupInfo.get(0).backupId()). - isEqualTo(backupInfo.get(3).backupId()); - } - } finally { - if(bopt != null) { - bopt.dispose(); - } - } - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + try(final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + be.createNewBackup(db, true); + be.createNewBackup(db, true); + final List backupInfo = + verifyNumberOfValidBackups(be, 4); + // Delete everything except the latest backup + be.purgeOldBackups(1); + final List newBackupInfo = + verifyNumberOfValidBackups(be, 1); + // The latest backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(3).backupId()); } } } @Test - public void restoreLatestBackup() - throws RocksDBException { - Options opt = null; - RocksDB db = null; - try { - opt = new Options().setCreateIfMissing(true); + public void restoreLatestBackup() throws RocksDBException { + try(final Options opt = new Options().setCreateIfMissing(true)) { // Open empty database. - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(db); - BackupableDBOptions bopt = null; + RocksDB db = null; try { - bopt = new BackupableDBOptions( + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Fill database with some test values + prepareDatabase(db); + + try (final BackupableDBOptions bopt = new BackupableDBOptions( backupFolder.getRoot().getAbsolutePath()); - try (final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { be.createNewBackup(db, true); verifyNumberOfValidBackups(be, 1); db.put("key1".getBytes(), "valueV2".getBytes()); @@ -182,51 +130,44 @@ public class BackupEngineTest { assertThat(new String(db.get("key2".getBytes()))).endsWith("V3"); db.close(); + db = null; verifyNumberOfValidBackups(be, 2); // restore db from latest backup - be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(), - dbFolder.getRoot().getAbsolutePath(), - new RestoreOptions(false)); + try(final RestoreOptions ropts = new RestoreOptions(false)) { + be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(), + dbFolder.getRoot().getAbsolutePath(), ropts); + } + // Open database again. - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath()); + // Values must have suffix V2 because of restoring latest backup. assertThat(new String(db.get("key1".getBytes()))).endsWith("V2"); assertThat(new String(db.get("key2".getBytes()))).endsWith("V2"); } } finally { - if(bopt != null) { - bopt.dispose(); + if(db != null) { + db.close(); } } - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void restoreFromBackup() throws RocksDBException { - Options opt = null; - RocksDB db = null; - try { - opt = new Options().setCreateIfMissing(true); - // Open empty database. - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(db); - BackupableDBOptions bopt = null; + try(final Options opt = new Options().setCreateIfMissing(true)) { + RocksDB db = null; try { - bopt = new BackupableDBOptions( + // Open empty database. + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Fill database with some test values + prepareDatabase(db); + try (final BackupableDBOptions bopt = new BackupableDBOptions( backupFolder.getRoot().getAbsolutePath()); - try (final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { be.createNewBackup(db, true); verifyNumberOfValidBackups(be, 1); db.put("key1".getBytes(), "valueV2".getBytes()); @@ -240,9 +181,10 @@ public class BackupEngineTest { //close the database db.close(); + db = null; //restore the backup - List backupInfo = verifyNumberOfValidBackups(be, 2); + final List backupInfo = verifyNumberOfValidBackups(be, 2); // restore db from first backup be.restoreDbFromBackup(backupInfo.get(0).backupId(), dbFolder.getRoot().getAbsolutePath(), @@ -256,17 +198,10 @@ public class BackupEngineTest { assertThat(new String(db.get("key2".getBytes()))).endsWith("V1"); } } finally { - if(bopt != null) { - bopt.dispose(); + if(db != null) { + db.close(); } } - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } diff --git a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java index 44dc5b578..c3836ac9b 100644 --- a/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java @@ -16,7 +16,8 @@ import org.junit.rules.ExpectedException; public class BackupableDBOptionsTest { - private final static String ARBITRARY_PATH = System.getProperty("java.io.tmpdir"); + private final static String ARBITRARY_PATH = + System.getProperty("java.io.tmpdir"); @ClassRule public static final RocksMemoryResource rocksMemoryResource = @@ -30,87 +31,61 @@ public class BackupableDBOptionsTest { @Test public void backupDir() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { assertThat(backupableDBOptions.backupDir()). isEqualTo(ARBITRARY_PATH); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void shareTableFiles() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); - boolean value = rand.nextBoolean(); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); backupableDBOptions.setShareTableFiles(value); assertThat(backupableDBOptions.shareTableFiles()). isEqualTo(value); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void sync() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); - boolean value = rand.nextBoolean(); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); backupableDBOptions.setSync(value); assertThat(backupableDBOptions.sync()).isEqualTo(value); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void destroyOldData() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); - boolean value = rand.nextBoolean(); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH);) { + final boolean value = rand.nextBoolean(); backupableDBOptions.setDestroyOldData(value); assertThat(backupableDBOptions.destroyOldData()). isEqualTo(value); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void backupLogFiles() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); - boolean value = rand.nextBoolean(); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); backupableDBOptions.setBackupLogFiles(value); assertThat(backupableDBOptions.backupLogFiles()). isEqualTo(value); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void backupRateLimit() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); - long value = Math.abs(rand.nextLong()); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final long value = Math.abs(rand.nextLong()); backupableDBOptions.setBackupRateLimit(value); assertThat(backupableDBOptions.backupRateLimit()). isEqualTo(value); @@ -118,19 +93,14 @@ public class BackupableDBOptionsTest { backupableDBOptions.setBackupRateLimit(-1); assertThat(backupableDBOptions.backupRateLimit()). isEqualTo(0); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void restoreRateLimit() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); - long value = Math.abs(rand.nextLong()); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final long value = Math.abs(rand.nextLong()); backupableDBOptions.setRestoreRateLimit(value); assertThat(backupableDBOptions.restoreRateLimit()). isEqualTo(value); @@ -138,145 +108,153 @@ public class BackupableDBOptionsTest { backupableDBOptions.setRestoreRateLimit(-1); assertThat(backupableDBOptions.restoreRateLimit()). isEqualTo(0); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void shareFilesWithChecksum() { - BackupableDBOptions backupableDBOptions = null; - try { - backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { boolean value = rand.nextBoolean(); backupableDBOptions.setShareFilesWithChecksum(value); assertThat(backupableDBOptions.shareFilesWithChecksum()). isEqualTo(value); - } finally { - if (backupableDBOptions != null) { - backupableDBOptions.dispose(); - } } } @Test public void failBackupDirIsNull() { exception.expect(IllegalArgumentException.class); - new BackupableDBOptions(null); + try (final BackupableDBOptions opts = new BackupableDBOptions(null)) { + //no-op + } } @Test - public void failBackupDirIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.backupDir(); + public void failBackupDirIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.backupDir(); + } } @Test - public void failSetShareTableFilesIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.setShareTableFiles(true); + public void failSetShareTableFilesIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setShareTableFiles(true); + } } @Test - public void failShareTableFilesIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.shareTableFiles(); + public void failShareTableFilesIfDisposed() { + try (BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.shareTableFiles(); + } } @Test - public void failSetSyncIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.setSync(true); + public void failSetSyncIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setSync(true); + } } @Test - public void failSyncIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.sync(); + public void failSyncIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.sync(); + } } @Test - public void failSetDestroyOldDataIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.setDestroyOldData(true); + public void failSetDestroyOldDataIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setDestroyOldData(true); + } } @Test - public void failDestroyOldDataIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.destroyOldData(); + public void failDestroyOldDataIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.destroyOldData(); + } } @Test - public void failSetBackupLogFilesIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.setBackupLogFiles(true); + public void failSetBackupLogFilesIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setBackupLogFiles(true); + } } @Test - public void failBackupLogFilesIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.backupLogFiles(); + public void failBackupLogFilesIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.backupLogFiles(); + } } @Test - public void failSetBackupRateLimitIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.setBackupRateLimit(1); + public void failSetBackupRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setBackupRateLimit(1); + } } @Test - public void failBackupRateLimitIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.backupRateLimit(); + public void failBackupRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.backupRateLimit(); + } } @Test - public void failSetRestoreRateLimitIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.setRestoreRateLimit(1); + public void failSetRestoreRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setRestoreRateLimit(1); + } } @Test - public void failRestoreRateLimitIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.restoreRateLimit(); + public void failRestoreRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.restoreRateLimit(); + } } @Test - public void failSetShareFilesWithChecksumIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.setShareFilesWithChecksum(true); + public void failSetShareFilesWithChecksumIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setShareFilesWithChecksum(true); + } } @Test - public void failShareFilesWithChecksumIfDisposed(){ - BackupableDBOptions options = setupUninitializedBackupableDBOptions( - exception); - options.shareFilesWithChecksum(); + public void failShareFilesWithChecksumIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.shareFilesWithChecksum(); + } } private BackupableDBOptions setupUninitializedBackupableDBOptions( ExpectedException exception) { - BackupableDBOptions backupableDBOptions = + final BackupableDBOptions backupableDBOptions = new BackupableDBOptions(ARBITRARY_PATH); - backupableDBOptions.dispose(); + backupableDBOptions.close(); exception.expect(AssertionError.class); return backupableDBOptions; } diff --git a/java/src/test/java/org/rocksdb/BackupableDBTest.java b/java/src/test/java/org/rocksdb/BackupableDBTest.java index b5e2f129c..4675f4198 100644 --- a/java/src/test/java/org/rocksdb/BackupableDBTest.java +++ b/java/src/test/java/org/rocksdb/BackupableDBTest.java @@ -28,74 +28,48 @@ public class BackupableDBTest { @Test public void backupDb() throws RocksDBException { - Options opt = null; - BackupableDBOptions bopt = null; - BackupableDB bdb = null; - try { - opt = new Options().setCreateIfMissing(true); - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options().setCreateIfMissing(true); + final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath())) { assertThat(bopt.backupDir()).isEqualTo( backupFolder.getRoot().getAbsolutePath()); // Open empty database. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(bdb); - // Create two backups - bdb.createNewBackup(false); - bdb.createNewBackup(true); - verifyNumberOfValidBackups(bdb, 2); - } finally { - if (bdb != null) { - bdb.close(); - } - if (bopt != null) { - bopt.dispose(); - } - if (opt != null) { - opt.dispose(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(bdb); + // Create two backups + bdb.createNewBackup(false); + bdb.createNewBackup(true); + verifyNumberOfValidBackups(bdb, 2); } } } @Test public void deleteBackup() throws RocksDBException { - Options opt = null; - BackupableDBOptions bopt = null; - BackupableDB bdb = null; - try { - opt = new Options().setCreateIfMissing(true); - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options().setCreateIfMissing(true); + final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath())) { assertThat(bopt.backupDir()).isEqualTo( backupFolder.getRoot().getAbsolutePath()); // Open empty database. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(bdb); - // Create two backups - bdb.createNewBackup(false); - bdb.createNewBackup(true); - List backupInfo = - verifyNumberOfValidBackups(bdb, 2); - // Delete the first backup - bdb.deleteBackup(backupInfo.get(0).backupId()); - List newBackupInfo = - verifyNumberOfValidBackups(bdb, 1); - // The second backup must remain. - assertThat(newBackupInfo.get(0).backupId()). - isEqualTo(backupInfo.get(1).backupId()); - } finally { - if (bdb != null) { - bdb.close(); - } - if (bopt != null) { - bopt.dispose(); - } - if (opt != null) { - opt.dispose(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(bdb); + // Create two backups + bdb.createNewBackup(false); + bdb.createNewBackup(true); + List backupInfo = + verifyNumberOfValidBackups(bdb, 2); + // Delete the first backup + bdb.deleteBackup(backupInfo.get(0).backupId()); + final List newBackupInfo = + verifyNumberOfValidBackups(bdb, 1); + // The second backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(1).backupId()); } } } @@ -103,90 +77,61 @@ public class BackupableDBTest { @Test public void deleteBackupWithRestoreBackupableDB() throws RocksDBException { - Options opt = null; - BackupableDBOptions bopt = null; - BackupableDB bdb = null; - RestoreBackupableDB rdb = null; - try { - opt = new Options().setCreateIfMissing(true); - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options().setCreateIfMissing(true); + final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath())) { assertThat(bopt.backupDir()).isEqualTo( backupFolder.getRoot().getAbsolutePath()); // Open empty database. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(bdb); - // Create two backups - bdb.createNewBackup(false); - bdb.createNewBackup(true); - List backupInfo = - verifyNumberOfValidBackups(bdb, 2); - // init RestoreBackupableDB - rdb = new RestoreBackupableDB(bopt); - // Delete the first backup - rdb.deleteBackup(backupInfo.get(0).backupId()); - // Fetch backup info using RestoreBackupableDB - List newBackupInfo = verifyNumberOfValidBackups(rdb, 1); - // The second backup must remain. - assertThat(newBackupInfo.get(0).backupId()). - isEqualTo(backupInfo.get(1).backupId()); - } finally { - if (bdb != null) { - bdb.close(); - } - if (rdb != null) { - rdb.dispose(); - } - if (bopt != null) { - bopt.dispose(); - } - if (opt != null) { - opt.dispose(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(bdb); + // Create two backups + bdb.createNewBackup(false); + bdb.createNewBackup(true); + final List backupInfo = + verifyNumberOfValidBackups(bdb, 2); + // init RestoreBackupableDB + try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) { + // Delete the first backup + rdb.deleteBackup(backupInfo.get(0).backupId()); + // Fetch backup info using RestoreBackupableDB + List newBackupInfo = verifyNumberOfValidBackups(rdb, 1); + // The second backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(1).backupId()); + } } } } @Test public void purgeOldBackups() throws RocksDBException { - Options opt = null; - BackupableDBOptions bopt = null; - BackupableDB bdb = null; - try { - opt = new Options().setCreateIfMissing(true); - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options().setCreateIfMissing(true); + final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath())) { assertThat(bopt.backupDir()).isEqualTo( backupFolder.getRoot().getAbsolutePath()); // Open empty database. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(bdb); - // Create two backups - bdb.createNewBackup(false); - bdb.createNewBackup(true); - bdb.createNewBackup(true); - bdb.createNewBackup(true); - List backupInfo = - verifyNumberOfValidBackups(bdb, 4); - // Delete everything except the latest backup - bdb.purgeOldBackups(1); - List newBackupInfo = - verifyNumberOfValidBackups(bdb, 1); - // The latest backup must remain. - assertThat(newBackupInfo.get(0).backupId()). - isEqualTo(backupInfo.get(3).backupId()); - } finally { - if (bdb != null) { - bdb.close(); - } - if (bopt != null) { - bopt.dispose(); - } - if (opt != null) { - opt.dispose(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(bdb); + // Create two backups + bdb.createNewBackup(false); + bdb.createNewBackup(true); + bdb.createNewBackup(true); + bdb.createNewBackup(true); + final List backupInfo = + verifyNumberOfValidBackups(bdb, 4); + // Delete everything except the latest backup + bdb.purgeOldBackups(1); + final List newBackupInfo = + verifyNumberOfValidBackups(bdb, 1); + // The latest backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(3).backupId()); } } } @@ -194,58 +139,43 @@ public class BackupableDBTest { @Test public void purgeOldBackupsWithRestoreBackupableDb() throws RocksDBException { - Options opt = null; - BackupableDBOptions bopt = null; - BackupableDB bdb = null; - RestoreBackupableDB rdb = null; - try { - opt = new Options().setCreateIfMissing(true); - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options().setCreateIfMissing(true); + final BackupableDBOptions bopt = + new BackupableDBOptions(backupFolder.getRoot().getAbsolutePath()) + ) { assertThat(bopt.backupDir()).isEqualTo( backupFolder.getRoot().getAbsolutePath()); // Open empty database. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(bdb); - // Create two backups - bdb.createNewBackup(false); - bdb.createNewBackup(true); - bdb.createNewBackup(true); - bdb.createNewBackup(true); - List infos = verifyNumberOfValidBackups(bdb, 4); - assertThat(infos.get(1).size()). - isEqualTo(infos.get(2).size()); - assertThat(infos.get(1).numberFiles()). - isEqualTo(infos.get(2).numberFiles()); - long maxTimeBeforePurge = Long.MIN_VALUE; - for (BackupInfo backupInfo : infos) { - if (maxTimeBeforePurge < backupInfo.timestamp()) { - maxTimeBeforePurge = backupInfo.timestamp(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(bdb); + // Create two backups + bdb.createNewBackup(false); + bdb.createNewBackup(true); + bdb.createNewBackup(true); + bdb.createNewBackup(true); + List infos = verifyNumberOfValidBackups(bdb, 4); + assertThat(infos.get(1).size()). + isEqualTo(infos.get(2).size()); + assertThat(infos.get(1).numberFiles()). + isEqualTo(infos.get(2).numberFiles()); + long maxTimeBeforePurge = Long.MIN_VALUE; + for (BackupInfo backupInfo : infos) { + if (maxTimeBeforePurge < backupInfo.timestamp()) { + maxTimeBeforePurge = backupInfo.timestamp(); + } + } + // init RestoreBackupableDB + try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) { + // the same number of backups must + // exist using RestoreBackupableDB. + verifyNumberOfValidBackups(rdb, 4); + rdb.purgeOldBackups(1); + infos = verifyNumberOfValidBackups(rdb, 1); + assertThat(infos.get(0).timestamp()). + isEqualTo(maxTimeBeforePurge); } - } - // init RestoreBackupableDB - rdb = new RestoreBackupableDB(bopt); - // the same number of backups must - // exist using RestoreBackupableDB. - verifyNumberOfValidBackups(rdb, 4); - rdb.purgeOldBackups(1); - infos = verifyNumberOfValidBackups(rdb, 1); - assertThat(infos.get(0).timestamp()). - isEqualTo(maxTimeBeforePurge); - } finally { - if (bdb != null) { - bdb.close(); - } - if (rdb != null) { - rdb.dispose(); - } - if (bopt != null) { - bopt.dispose(); - } - if (opt != null) { - opt.dispose(); } } } @@ -253,58 +183,44 @@ public class BackupableDBTest { @Test public void restoreLatestBackup() throws RocksDBException { - Options opt = null; - BackupableDBOptions bopt = null; - BackupableDB bdb = null; - RestoreBackupableDB rdb = null; - try { - opt = new Options().setCreateIfMissing(true); - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options().setCreateIfMissing(true); + final BackupableDBOptions bopt = + new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath())) { assertThat(bopt.backupDir()).isEqualTo( backupFolder.getRoot().getAbsolutePath()); // Open empty database. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(bdb); - bdb.createNewBackup(true); - verifyNumberOfValidBackups(bdb, 1); - bdb.put("key1".getBytes(), "valueV2".getBytes()); - bdb.put("key2".getBytes(), "valueV2".getBytes()); - bdb.createNewBackup(true); - verifyNumberOfValidBackups(bdb, 2); - bdb.put("key1".getBytes(), "valueV3".getBytes()); - bdb.put("key2".getBytes(), "valueV3".getBytes()); - assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3"); - assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3"); - bdb.close(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(bdb); + bdb.createNewBackup(true); + verifyNumberOfValidBackups(bdb, 1); + bdb.put("key1".getBytes(), "valueV2".getBytes()); + bdb.put("key2".getBytes(), "valueV2".getBytes()); + bdb.createNewBackup(true); + verifyNumberOfValidBackups(bdb, 2); + bdb.put("key1".getBytes(), "valueV3".getBytes()); + bdb.put("key2".getBytes(), "valueV3".getBytes()); + assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3"); + assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3"); + } // init RestoreBackupableDB - rdb = new RestoreBackupableDB(bopt); - verifyNumberOfValidBackups(rdb, 2); - // restore db from latest backup - rdb.restoreDBFromLatestBackup(dbFolder.getRoot().getAbsolutePath(), - dbFolder.getRoot().getAbsolutePath(), - new RestoreOptions(false)); + try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) { + verifyNumberOfValidBackups(rdb, 2); + // restore db from latest backup + rdb.restoreDBFromLatestBackup(dbFolder.getRoot().getAbsolutePath(), + dbFolder.getRoot().getAbsolutePath(), + new RestoreOptions(false)); + } + // Open database again. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Values must have suffix V2 because of restoring latest backup. - assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V2"); - assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V2"); - } finally { - if (bdb != null) { - bdb.close(); - } - if (rdb != null) { - rdb.dispose(); - } - if (bopt != null) { - bopt.dispose(); - } - if (opt != null) { - opt.dispose(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Values must have suffix V2 because of restoring latest backup. + assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V2"); + assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V2"); } } } @@ -312,59 +228,44 @@ public class BackupableDBTest { @Test public void restoreFromBackup() throws RocksDBException { - Options opt = null; - BackupableDBOptions bopt = null; - BackupableDB bdb = null; - RestoreBackupableDB rdb = null; - try { - opt = new Options().setCreateIfMissing(true); - bopt = new BackupableDBOptions( - backupFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options().setCreateIfMissing(true); + final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath())) { assertThat(bopt.backupDir()).isEqualTo( backupFolder.getRoot().getAbsolutePath()); // Open empty database. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Fill database with some test values - prepareDatabase(bdb); - bdb.createNewBackup(true); - verifyNumberOfValidBackups(bdb, 1); - bdb.put("key1".getBytes(), "valueV2".getBytes()); - bdb.put("key2".getBytes(), "valueV2".getBytes()); - bdb.createNewBackup(true); - verifyNumberOfValidBackups(bdb, 2); - bdb.put("key1".getBytes(), "valueV3".getBytes()); - bdb.put("key2".getBytes(), "valueV3".getBytes()); - assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3"); - assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3"); - bdb.close(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(bdb); + bdb.createNewBackup(true); + verifyNumberOfValidBackups(bdb, 1); + bdb.put("key1".getBytes(), "valueV2".getBytes()); + bdb.put("key2".getBytes(), "valueV2".getBytes()); + bdb.createNewBackup(true); + verifyNumberOfValidBackups(bdb, 2); + bdb.put("key1".getBytes(), "valueV3".getBytes()); + bdb.put("key2".getBytes(), "valueV3".getBytes()); + assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V3"); + assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V3"); + } // init RestoreBackupableDB - rdb = new RestoreBackupableDB(bopt); - List backupInfo = verifyNumberOfValidBackups(rdb, 2); - // restore db from first backup - rdb.restoreDBFromBackup(backupInfo.get(0).backupId(), - dbFolder.getRoot().getAbsolutePath(), - dbFolder.getRoot().getAbsolutePath(), - new RestoreOptions(false)); + try (final RestoreBackupableDB rdb = new RestoreBackupableDB(bopt)) { + final List backupInfo = verifyNumberOfValidBackups(rdb, 2); + // restore db from first backup + rdb.restoreDBFromBackup(backupInfo.get(0).backupId(), + dbFolder.getRoot().getAbsolutePath(), + dbFolder.getRoot().getAbsolutePath(), + new RestoreOptions(false)); + } + // Open database again. - bdb = BackupableDB.open(opt, bopt, - dbFolder.getRoot().getAbsolutePath()); - // Values must have suffix V2 because of restoring latest backup. - assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V1"); - assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V1"); - } finally { - if (bdb != null) { - bdb.close(); - } - if (rdb != null) { - rdb.dispose(); - } - if (bopt != null) { - bopt.dispose(); - } - if (opt != null) { - opt.dispose(); + try (final BackupableDB bdb = BackupableDB.open(opt, bopt, + dbFolder.getRoot().getAbsolutePath())) { + // Values must have suffix V2 because of restoring latest backup. + assertThat(new String(bdb.get("key1".getBytes()))).endsWith("V1"); + assertThat(new String(bdb.get("key2".getBytes()))).endsWith("V1"); } } } @@ -372,13 +273,13 @@ public class BackupableDBTest { /** * Verify backups. * - * @param bdb {@link BackupableDB} instance. + * @param bdb {@link BackupableDB} instance. * @param expectedNumberOfBackups numerical value * @throws RocksDBException thrown if an error occurs within the native - * part of the library. + * part of the library. */ - private List verifyNumberOfValidBackups(BackupableDB bdb, - int expectedNumberOfBackups) throws RocksDBException { + private List verifyNumberOfValidBackups(final BackupableDB bdb, + final int expectedNumberOfBackups) throws RocksDBException { // Verify that backups exist assertThat(bdb.getCorruptedBackups().length). isEqualTo(0); @@ -392,13 +293,13 @@ public class BackupableDBTest { /** * Verify backups. * - * @param rdb {@link RestoreBackupableDB} instance. + * @param rdb {@link RestoreBackupableDB} instance. * @param expectedNumberOfBackups numerical value * @throws RocksDBException thrown if an error occurs within the native - * part of the library. + * part of the library. */ private List verifyNumberOfValidBackups( - RestoreBackupableDB rdb, int expectedNumberOfBackups) + final RestoreBackupableDB rdb, final int expectedNumberOfBackups) throws RocksDBException { // Verify that backups exist assertThat(rdb.getCorruptedBackups().length). @@ -415,9 +316,9 @@ public class BackupableDBTest { * * @param db {@link RocksDB} instance. * @throws RocksDBException thrown if an error occurs within the native - * part of the library. + * part of the library. */ - private void prepareDatabase(RocksDB db) + private void prepareDatabase(final RocksDB db) throws RocksDBException { db.put("key1".getBytes(), "valueV1".getBytes()); db.put("key2".getBytes(), "valueV1".getBytes()); diff --git a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java index 2b1ce5ffa..ccb7b7625 100644 --- a/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java +++ b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java @@ -131,34 +131,20 @@ public class BlockBasedTableConfigTest { @Test public void blockBasedTableWithFilter() { - Options options = null; - try { - options = new Options(); - options.setTableFormatConfig( - new BlockBasedTableConfig().setFilter( - new BloomFilter(10))); + try(final Options options = new Options() + .setTableFormatConfig(new BlockBasedTableConfig() + .setFilter(new BloomFilter(10)))) { assertThat(options.tableFactoryName()). isEqualTo("BlockBasedTable"); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void blockBasedTableWithoutFilter() { - Options options = null; - try { - options = new Options(); - options.setTableFormatConfig( - new BlockBasedTableConfig().setFilter(null)); + try(final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setFilter(null))) { assertThat(options.tableFactoryName()). isEqualTo("BlockBasedTable"); - } finally { - if (options != null) { - options.dispose(); - } } } diff --git a/java/src/test/java/org/rocksdb/CheckPointTest.java b/java/src/test/java/org/rocksdb/CheckPointTest.java index 3081e585a..e79569fb8 100644 --- a/java/src/test/java/org/rocksdb/CheckPointTest.java +++ b/java/src/test/java/org/rocksdb/CheckPointTest.java @@ -22,76 +22,61 @@ public class CheckPointTest { @Test public void checkPoint() throws RocksDBException { - RocksDB db = null; - Options options = null; - Checkpoint checkpoint = null; - try { - options = new Options(). - setCreateIfMissing(true); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - db.put("key".getBytes(), "value".getBytes()); - checkpoint = Checkpoint.create(db); - checkpoint.createCheckpoint(checkpointFolder. - getRoot().getAbsolutePath() + "/snapshot1"); - db.put("key2".getBytes(), "value2".getBytes()); - checkpoint.createCheckpoint(checkpointFolder. - getRoot().getAbsolutePath() + "/snapshot2"); - db.close(); - db = RocksDB.open(options, - checkpointFolder.getRoot().getAbsolutePath() + - "/snapshot1"); - assertThat(new String(db.get("key".getBytes()))). - isEqualTo("value"); - assertThat(db.get("key2".getBytes())).isNull(); - db.close(); - db = RocksDB.open(options, - checkpointFolder.getRoot().getAbsolutePath() + - "/snapshot2"); - assertThat(new String(db.get("key".getBytes()))). - isEqualTo("value"); - assertThat(new String(db.get("key2".getBytes()))). - isEqualTo("value2"); - } finally { - if (db != null) { - db.close(); + try (final Options options = new Options(). + setCreateIfMissing(true)) { + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + try (final Checkpoint checkpoint = Checkpoint.create(db)) { + checkpoint.createCheckpoint(checkpointFolder. + getRoot().getAbsolutePath() + "/snapshot1"); + db.put("key2".getBytes(), "value2".getBytes()); + checkpoint.createCheckpoint(checkpointFolder. + getRoot().getAbsolutePath() + "/snapshot2"); + } } - if (options != null) { - options.dispose(); + + try (final RocksDB db = RocksDB.open(options, + checkpointFolder.getRoot().getAbsolutePath() + + "/snapshot1")) { + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + assertThat(db.get("key2".getBytes())).isNull(); } - if (checkpoint != null) { - checkpoint.dispose(); + + try (final RocksDB db = RocksDB.open(options, + checkpointFolder.getRoot().getAbsolutePath() + + "/snapshot2")) { + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + assertThat(new String(db.get("key2".getBytes()))). + isEqualTo("value2"); } } } @Test(expected = IllegalArgumentException.class) public void failIfDbIsNull() { - Checkpoint.create(null); + try (final Checkpoint checkpoint = Checkpoint.create(null)) { + + } } @Test(expected = IllegalStateException.class) public void failIfDbNotInitialized() throws RocksDBException { - RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); - db.dispose(); - Checkpoint.create(db); + try (final RocksDB db = RocksDB.open( + dbFolder.getRoot().getAbsolutePath())) { + db.close(); + Checkpoint.create(db); + } } @Test(expected = RocksDBException.class) public void failWithIllegalPath() throws RocksDBException { - RocksDB db = null; - Checkpoint checkpoint = null; - try { - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); - checkpoint = Checkpoint.create(db); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final Checkpoint checkpoint = Checkpoint.create(db)) { checkpoint.createCheckpoint("/Z:///:\\C:\\TZ/-"); - } finally { - if (db != null) { - db.close(); - } - if (checkpoint != null) { - checkpoint.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java index e0ebd67ac..e2ebf13d8 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java @@ -26,616 +26,386 @@ public class ColumnFamilyOptionsTest { @Test public void getColumnFamilyOptionsFromProps() { - ColumnFamilyOptions opt = null; - try { + Properties properties = new Properties(); + properties.put("write_buffer_size", "112"); + properties.put("max_write_buffer_number", "13"); + + try (final ColumnFamilyOptions opt = ColumnFamilyOptions. + getColumnFamilyOptionsFromProps(properties)) { // setup sample properties - Properties properties = new Properties(); - properties.put("write_buffer_size", "112"); - properties.put("max_write_buffer_number", "13"); - opt = ColumnFamilyOptions. - getColumnFamilyOptionsFromProps(properties); assertThat(opt).isNotNull(); assertThat(String.valueOf(opt.writeBufferSize())). isEqualTo(properties.get("write_buffer_size")); assertThat(String.valueOf(opt.maxWriteBufferNumber())). isEqualTo(properties.get("max_write_buffer_number")); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void failColumnFamilyOptionsFromPropsWithIllegalValue() { - ColumnFamilyOptions opt = null; - try { - // setup sample properties - Properties properties = new Properties(); - properties.put("tomato", "1024"); - properties.put("burger", "2"); - opt = ColumnFamilyOptions. - getColumnFamilyOptionsFromProps(properties); + // setup sample properties + final Properties properties = new Properties(); + properties.put("tomato", "1024"); + properties.put("burger", "2"); + + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(properties)) { assertThat(opt).isNull(); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test(expected = IllegalArgumentException.class) public void failColumnFamilyOptionsFromPropsWithNullValue() { - ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null); + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) { + } } @Test(expected = IllegalArgumentException.class) public void failColumnFamilyOptionsFromPropsWithEmptyProps() { - ColumnFamilyOptions.getColumnFamilyOptionsFromProps( - new Properties()); + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps( + new Properties())) { + } } @Test public void writeBufferSize() throws RocksDBException { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - long longValue = rand.nextLong(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); opt.setWriteBufferSize(longValue); assertThat(opt.writeBufferSize()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxWriteBufferNumber() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setMaxWriteBufferNumber(intValue); assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void minWriteBufferNumberToMerge() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setMinWriteBufferNumberToMerge(intValue); assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void numLevels() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setNumLevels(intValue); assertThat(opt.numLevels()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelZeroFileNumCompactionTrigger() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setLevelZeroFileNumCompactionTrigger(intValue); assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelZeroSlowdownWritesTrigger() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setLevelZeroSlowdownWritesTrigger(intValue); assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelZeroStopWritesTrigger() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setLevelZeroStopWritesTrigger(intValue); assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void targetFileSizeBase() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - long longValue = rand.nextLong(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); opt.setTargetFileSizeBase(longValue); assertThat(opt.targetFileSizeBase()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void targetFileSizeMultiplier() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setTargetFileSizeMultiplier(intValue); assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxBytesForLevelBase() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - long longValue = rand.nextLong(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); opt.setMaxBytesForLevelBase(longValue); assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelCompactionDynamicLevelBytes() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { final boolean boolValue = rand.nextBoolean(); opt.setLevelCompactionDynamicLevelBytes(boolValue); assertThat(opt.levelCompactionDynamicLevelBytes()) .isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxBytesForLevelMultiplier() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setMaxBytesForLevelMultiplier(intValue); assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void expandedCompactionFactor() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setExpandedCompactionFactor(intValue); assertThat(opt.expandedCompactionFactor()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void sourceCompactionFactor() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setSourceCompactionFactor(intValue); assertThat(opt.sourceCompactionFactor()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxGrandparentOverlapFactor() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setMaxGrandparentOverlapFactor(intValue); assertThat(opt.maxGrandparentOverlapFactor()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void softRateLimit() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - double doubleValue = rand.nextDouble(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); opt.setSoftRateLimit(doubleValue); assertThat(opt.softRateLimit()).isEqualTo(doubleValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void hardRateLimit() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - double doubleValue = rand.nextDouble(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); opt.setHardRateLimit(doubleValue); assertThat(opt.hardRateLimit()).isEqualTo(doubleValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void rateLimitDelayMaxMilliseconds() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setRateLimitDelayMaxMilliseconds(intValue); assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void arenaBlockSize() throws RocksDBException { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - long longValue = rand.nextLong(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); opt.setArenaBlockSize(longValue); assertThat(opt.arenaBlockSize()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void disableAutoCompactions() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - boolean boolValue = rand.nextBoolean(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setDisableAutoCompactions(boolValue); assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void purgeRedundantKvsWhileFlush() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - boolean boolValue = rand.nextBoolean(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setPurgeRedundantKvsWhileFlush(boolValue); assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void verifyChecksumsInCompaction() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - boolean boolValue = rand.nextBoolean(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setVerifyChecksumsInCompaction(boolValue); assertThat(opt.verifyChecksumsInCompaction()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void filterDeletes() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - boolean boolValue = rand.nextBoolean(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setFilterDeletes(boolValue); assertThat(opt.filterDeletes()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxSequentialSkipInIterations() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - long longValue = rand.nextLong(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); opt.setMaxSequentialSkipInIterations(longValue); assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void inplaceUpdateSupport() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - boolean boolValue = rand.nextBoolean(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setInplaceUpdateSupport(boolValue); assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void inplaceUpdateNumLocks() throws RocksDBException { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - long longValue = rand.nextLong(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); opt.setInplaceUpdateNumLocks(longValue); assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void memtablePrefixBloomBits() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); - int intValue = rand.nextInt(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setMemtablePrefixBloomBits(intValue); assertThat(opt.memtablePrefixBloomBits()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void memtablePrefixBloomProbes() { - ColumnFamilyOptions opt = null; - try { - int intValue = rand.nextInt(); - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setMemtablePrefixBloomProbes(intValue); assertThat(opt.memtablePrefixBloomProbes()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void bloomLocality() { - ColumnFamilyOptions opt = null; - try { - int intValue = rand.nextInt(); - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setBloomLocality(intValue); assertThat(opt.bloomLocality()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxSuccessiveMerges() throws RocksDBException { - ColumnFamilyOptions opt = null; - try { - long longValue = rand.nextLong(); - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); opt.setMaxSuccessiveMerges(longValue); assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void minPartialMergeOperands() { - ColumnFamilyOptions opt = null; - try { - int intValue = rand.nextInt(); - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); opt.setMinPartialMergeOperands(intValue); assertThat(opt.minPartialMergeOperands()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void optimizeFiltersForHits() { - ColumnFamilyOptions opt = null; - try { - boolean aBoolean = rand.nextBoolean(); - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean aBoolean = rand.nextBoolean(); opt.setOptimizeFiltersForHits(aBoolean); assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void memTable() throws RocksDBException { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { opt.setMemTableConfig(new HashLinkedListMemTableConfig()); assertThat(opt.memTableFactoryName()). isEqualTo("HashLinkedListRepFactory"); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void comparator() throws RocksDBException { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { opt.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void linkageOfPrepMethods() { - ColumnFamilyOptions options = null; - try { - options = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { options.optimizeUniversalStyleCompaction(); options.optimizeUniversalStyleCompaction(4000); options.optimizeLevelStyleCompaction(); options.optimizeLevelStyleCompaction(3000); options.optimizeForPointLookup(10); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void shouldSetTestPrefixExtractor() { - ColumnFamilyOptions options = null; - try { - options = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { options.useFixedLengthPrefixExtractor(100); options.useFixedLengthPrefixExtractor(10); - } finally { - if (options != null) { - options.dispose(); - } } } - @Test public void shouldSetTestCappedPrefixExtractor() { - ColumnFamilyOptions options = null; - try { - options = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { options.useCappedPrefixExtractor(100); options.useCappedPrefixExtractor(10); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void compressionTypes() { - ColumnFamilyOptions columnFamilyOptions = null; - try { - columnFamilyOptions = new ColumnFamilyOptions(); - for (CompressionType compressionType : + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + for (final CompressionType compressionType : CompressionType.values()) { columnFamilyOptions.setCompressionType(compressionType); assertThat(columnFamilyOptions.compressionType()). @@ -643,21 +413,16 @@ public class ColumnFamilyOptionsTest { assertThat(CompressionType.valueOf("NO_COMPRESSION")). isEqualTo(CompressionType.NO_COMPRESSION); } - } finally { - if (columnFamilyOptions != null) { - columnFamilyOptions.dispose(); - } } } @Test public void compressionPerLevel() { - ColumnFamilyOptions columnFamilyOptions = null; - try { - columnFamilyOptions = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); List compressionTypeList = new ArrayList<>(); - for (int i=0; i < columnFamilyOptions.numLevels(); i++) { + for (int i = 0; i < columnFamilyOptions.numLevels(); i++) { compressionTypeList.add(CompressionType.NO_COMPRESSION); } columnFamilyOptions.setCompressionPerLevel(compressionTypeList); @@ -666,18 +431,13 @@ public class ColumnFamilyOptionsTest { assertThat(compressionType).isEqualTo( CompressionType.NO_COMPRESSION); } - } finally { - if (columnFamilyOptions != null) { - columnFamilyOptions.dispose(); - } } } @Test public void differentCompressionsPerLevel() { - ColumnFamilyOptions columnFamilyOptions = null; - try { - columnFamilyOptions = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { columnFamilyOptions.setNumLevels(3); assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); @@ -697,38 +457,27 @@ public class ColumnFamilyOptionsTest { CompressionType.SNAPPY_COMPRESSION, CompressionType.LZ4_COMPRESSION); - } finally { - if (columnFamilyOptions != null) { - columnFamilyOptions.dispose(); - } } } @Test public void compactionStyles() { - ColumnFamilyOptions ColumnFamilyOptions = null; - try { - ColumnFamilyOptions = new ColumnFamilyOptions(); - for (CompactionStyle compactionStyle : + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + for (final CompactionStyle compactionStyle : CompactionStyle.values()) { - ColumnFamilyOptions.setCompactionStyle(compactionStyle); - assertThat(ColumnFamilyOptions.compactionStyle()). + columnFamilyOptions.setCompactionStyle(compactionStyle); + assertThat(columnFamilyOptions.compactionStyle()). isEqualTo(compactionStyle); assertThat(CompactionStyle.valueOf("FIFO")). isEqualTo(CompactionStyle.FIFO); } - } finally { - if (ColumnFamilyOptions != null) { - ColumnFamilyOptions.dispose(); - } } } @Test public void maxTableFilesSizeFIFO() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { long longValue = rand.nextLong(); // Size has to be positive longValue = (longValue < 0) ? -longValue : longValue; @@ -736,10 +485,6 @@ public class ColumnFamilyOptionsTest { opt.setMaxTableFilesSizeFIFO(longValue); assertThat(opt.maxTableFilesSizeFIFO()). isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java index af0d71e70..c5b4fe96a 100644 --- a/java/src/test/java/org/rocksdb/ColumnFamilyTest.java +++ b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java @@ -25,430 +25,350 @@ public class ColumnFamilyTest { @Test public void listColumnFamilies() throws RocksDBException { - RocksDB db = null; - Options options = null; - try { - options = new Options(); - options.setCreateIfMissing(true); - - DBOptions dbOptions = new DBOptions(); - dbOptions.setCreateIfMissing(true); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { // Test listColumnFamilies - List columnFamilyNames; - columnFamilyNames = RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); + final List columnFamilyNames = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); assertThat(columnFamilyNames).isNotNull(); assertThat(columnFamilyNames.size()).isGreaterThan(0); assertThat(columnFamilyNames.size()).isEqualTo(1); assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } } } @Test public void defaultColumnFamily() throws RocksDBException { - RocksDB db = null; - Options options = null; - ColumnFamilyHandle cfh; - try { - options = new Options().setCreateIfMissing(true); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + final ColumnFamilyHandle cfh = db.getDefaultColumnFamily(); + try { + assertThat(cfh).isNotNull(); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - cfh = db.getDefaultColumnFamily(); - assertThat(cfh).isNotNull(); + final byte[] key = "key".getBytes(); + final byte[] value = "value".getBytes(); - final byte[] key = "key".getBytes(); - final byte[] value = "value".getBytes(); + db.put(cfh, key, value); - db.put(cfh, key, value); + final byte[] actualValue = db.get(cfh, key); - final byte[] actualValue = db.get(cfh, key); - - assertThat(cfh).isNotNull(); - assertThat(actualValue).isEqualTo(value); - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + assertThat(cfh).isNotNull(); + assertThat(actualValue).isEqualTo(value); + } finally { + cfh.close(); } } } @Test public void createColumnFamily() throws RocksDBException { - RocksDB db = null; - Options options = null; - ColumnFamilyHandle columnFamilyHandle = null; - try { - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - columnFamilyHandle = db.createColumnFamily( - new ColumnFamilyDescriptor("new_cf".getBytes(), new ColumnFamilyOptions())); - - List columnFamilyNames; - columnFamilyNames = RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); - assertThat(columnFamilyNames).isNotNull(); - assertThat(columnFamilyNames.size()).isGreaterThan(0); - assertThat(columnFamilyNames.size()).isEqualTo(2); - assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); - assertThat(new String(columnFamilyNames.get(1))).isEqualTo("new_cf"); - } finally { - if (columnFamilyHandle != null) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions())); + try { + final List columnFamilyNames = RocksDB.listColumnFamilies( + options, dbFolder.getRoot().getAbsolutePath()); + assertThat(columnFamilyNames).isNotNull(); + assertThat(columnFamilyNames.size()).isGreaterThan(0); + assertThat(columnFamilyNames.size()).isEqualTo(2); + assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); + assertThat(new String(columnFamilyNames.get(1))).isEqualTo("new_cf"); + } finally { + columnFamilyHandle.close(); } } } @Test public void openWithColumnFamilies() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - // Test open database with column family names - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); + final List cfNames = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - assertThat(columnFamilyHandleList.size()).isEqualTo(2); - db.put("dfkey1".getBytes(), "dfvalue".getBytes()); - db.put(columnFamilyHandleList.get(0), "dfkey2".getBytes(), - "dfvalue".getBytes()); - db.put(columnFamilyHandleList.get(1), "newcfkey1".getBytes(), - "newcfvalue".getBytes()); + final List columnFamilyHandleList = + new ArrayList<>(); - String retVal = new String(db.get(columnFamilyHandleList.get(1), - "newcfkey1".getBytes())); - assertThat(retVal).isEqualTo("newcfvalue"); - assertThat((db.get(columnFamilyHandleList.get(1), - "dfkey1".getBytes()))).isNull(); - db.remove(columnFamilyHandleList.get(1), "newcfkey1".getBytes()); - assertThat((db.get(columnFamilyHandleList.get(1), - "newcfkey1".getBytes()))).isNull(); - db.remove(columnFamilyHandleList.get(0), new WriteOptions(), - "dfkey2".getBytes()); - assertThat(db.get(columnFamilyHandleList.get(0), new ReadOptions(), - "dfkey2".getBytes())).isNull(); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + // Test open database with column family names + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfNames, + columnFamilyHandleList)) { + + try { + assertThat(columnFamilyHandleList.size()).isEqualTo(2); + db.put("dfkey1".getBytes(), "dfvalue".getBytes()); + db.put(columnFamilyHandleList.get(0), "dfkey2".getBytes(), + "dfvalue".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey1".getBytes(), + "newcfvalue".getBytes()); + + String retVal = new String(db.get(columnFamilyHandleList.get(1), + "newcfkey1".getBytes())); + assertThat(retVal).isEqualTo("newcfvalue"); + assertThat((db.get(columnFamilyHandleList.get(1), + "dfkey1".getBytes()))).isNull(); + db.remove(columnFamilyHandleList.get(1), "newcfkey1".getBytes()); + assertThat((db.get(columnFamilyHandleList.get(1), + "newcfkey1".getBytes()))).isNull(); + db.remove(columnFamilyHandleList.get(0), new WriteOptions(), + "dfkey2".getBytes()); + assertThat(db.get(columnFamilyHandleList.get(0), new ReadOptions(), + "dfkey2".getBytes())).isNull(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test public void getWithOutValueAndCf() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfDescriptors = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - // Test open database with column family names - cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfDescriptors, columnFamilyHandleList); - db.put(columnFamilyHandleList.get(0), new WriteOptions(), - "key1".getBytes(), "value".getBytes()); - db.put("key2".getBytes(), "12345678".getBytes()); - byte[] outValue = new byte[5]; - // not found value - int getResult = db.get("keyNotFound".getBytes(), outValue); - assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); - // found value which fits in outValue - getResult = db.get(columnFamilyHandleList.get(0), "key1".getBytes(), outValue); - assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); - assertThat(outValue).isEqualTo("value".getBytes()); - // found value which fits partially - getResult = db.get(columnFamilyHandleList.get(0), new ReadOptions(), - "key2".getBytes(), outValue); - assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); - assertThat(outValue).isEqualTo("12345".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + final List columnFamilyHandleList = new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + db.put(columnFamilyHandleList.get(0), new WriteOptions(), + "key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + final byte[] outValue = new byte[5]; + // not found value + int getResult = db.get("keyNotFound".getBytes(), outValue); + assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); + // found value which fits in outValue + getResult = db.get(columnFamilyHandleList.get(0), "key1".getBytes(), + outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("value".getBytes()); + // found value which fits partially + getResult = db.get(columnFamilyHandleList.get(0), new ReadOptions(), + "key2".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("12345".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test public void createWriteDropColumnFamily() throws RocksDBException { - RocksDB db = null; - DBOptions opt = null; - ColumnFamilyHandle tmpColumnFamilyHandle = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - opt = new DBOptions(); - opt.setCreateIfMissing(true); - opt.setCreateMissingColumnFamilies(true); - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - tmpColumnFamilyHandle = db.createColumnFamily( - new ColumnFamilyDescriptor("tmpCF".getBytes(), new ColumnFamilyOptions())); - db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes()); - db.dropColumnFamily(tmpColumnFamilyHandle); - tmpColumnFamilyHandle.dispose(); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (tmpColumnFamilyHandle != null) { - tmpColumnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + ColumnFamilyHandle tmpColumnFamilyHandle = null; + try { + tmpColumnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("tmpCF".getBytes(), + new ColumnFamilyOptions())); + db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes()); + db.dropColumnFamily(tmpColumnFamilyHandle); + } finally { + if (tmpColumnFamilyHandle != null) { + tmpColumnFamilyHandle.close(); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test public void writeBatch() throws RocksDBException { - RocksDB db = null; - DBOptions opt = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - opt = new DBOptions(); - opt.setCreateIfMissing(true); - opt.setCreateMissingColumnFamilies(true); + try (final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions() + .setMergeOperator(new StringAppendOperator())) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, + defaultCfOptions), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList); + final WriteBatch writeBatch = new WriteBatch(); + final WriteOptions writeOpt = new WriteOptions()) { + try { + writeBatch.put("key".getBytes(), "value".getBytes()); + writeBatch.put(db.getDefaultColumnFamily(), + "mergeKey".getBytes(), "merge".getBytes()); + writeBatch.merge(db.getDefaultColumnFamily(), "mergeKey".getBytes(), + "merge".getBytes()); + writeBatch.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), + "value".getBytes()); + writeBatch.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), + "value2".getBytes()); + writeBatch.remove("xyz".getBytes()); + writeBatch.remove(columnFamilyHandleList.get(1), "xyz".getBytes()); + db.write(writeOpt, writeBatch); - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions().setMergeOperator(new StringAppendOperator()))); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - - WriteBatch writeBatch = new WriteBatch(); - WriteOptions writeOpt = new WriteOptions(); - writeBatch.put("key".getBytes(), "value".getBytes()); - writeBatch.put(db.getDefaultColumnFamily(), - "mergeKey".getBytes(), "merge".getBytes()); - writeBatch.merge(db.getDefaultColumnFamily(), "mergeKey".getBytes(), - "merge".getBytes()); - writeBatch.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), - "value".getBytes()); - writeBatch.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), - "value2".getBytes()); - writeBatch.remove("xyz".getBytes()); - writeBatch.remove(columnFamilyHandleList.get(1), "xyz".getBytes()); - db.write(writeOpt, writeBatch); - writeBatch.dispose(); - assertThat(db.get(columnFamilyHandleList.get(1), - "xyz".getBytes()) == null); - assertThat(new String(db.get(columnFamilyHandleList.get(1), - "newcfkey".getBytes()))).isEqualTo("value"); - assertThat(new String(db.get(columnFamilyHandleList.get(1), - "newcfkey2".getBytes()))).isEqualTo("value2"); - assertThat(new String(db.get("key".getBytes()))).isEqualTo("value"); - // check if key is merged - assertThat(new String(db.get(db.getDefaultColumnFamily(), - "mergeKey".getBytes()))).isEqualTo("merge,merge"); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + assertThat(db.get(columnFamilyHandleList.get(1), + "xyz".getBytes()) == null); + assertThat(new String(db.get(columnFamilyHandleList.get(1), + "newcfkey".getBytes()))).isEqualTo("value"); + assertThat(new String(db.get(columnFamilyHandleList.get(1), + "newcfkey2".getBytes()))).isEqualTo("value2"); + assertThat(new String(db.get("key".getBytes()))).isEqualTo("value"); + // check if key is merged + assertThat(new String(db.get(db.getDefaultColumnFamily(), + "mergeKey".getBytes()))).isEqualTo("merge,merge"); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @Test public void iteratorOnColumnFamily() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - RocksIterator rocksIterator = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), - "value".getBytes()); - db.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), - "value2".getBytes()); - rocksIterator = db.newIterator( - columnFamilyHandleList.get(1)); - rocksIterator.seekToFirst(); - Map refMap = new HashMap<>(); - refMap.put("newcfkey", "value"); - refMap.put("newcfkey2", "value2"); - int i = 0; - while (rocksIterator.isValid()) { - i++; - assertThat(refMap.get(new String(rocksIterator.key()))). - isEqualTo(new String(rocksIterator.value())); - rocksIterator.next(); - } - assertThat(i).isEqualTo(2); - rocksIterator.dispose(); - } finally { - if (rocksIterator != null) { - rocksIterator.dispose(); - } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), + "value".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), + "value2".getBytes()); + try (final RocksIterator rocksIterator = + db.newIterator(columnFamilyHandleList.get(1))) { + rocksIterator.seekToFirst(); + Map refMap = new HashMap<>(); + refMap.put("newcfkey", "value"); + refMap.put("newcfkey2", "value2"); + int i = 0; + while (rocksIterator.isValid()) { + i++; + assertThat(refMap.get(new String(rocksIterator.key()))). + isEqualTo(new String(rocksIterator.value())); + rocksIterator.next(); + } + assertThat(i).isEqualTo(2); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test public void multiGet() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfDescriptors = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + db.put(columnFamilyHandleList.get(0), "key".getBytes(), + "value".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), + "value".getBytes()); - cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfDescriptors.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfDescriptors, columnFamilyHandleList); - db.put(columnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); - db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); - - List keys = Arrays.asList(new byte[][]{"key".getBytes(), "newcfkey".getBytes()}); - Map retValues = db.multiGet(columnFamilyHandleList, keys); - assertThat(retValues.size()).isEqualTo(2); - assertThat(new String(retValues.get(keys.get(0)))) - .isEqualTo("value"); - assertThat(new String(retValues.get(keys.get(1)))) - .isEqualTo("value"); - retValues = db.multiGet(new ReadOptions(), columnFamilyHandleList, keys); - assertThat(retValues.size()).isEqualTo(2); - assertThat(new String(retValues.get(keys.get(0)))) - .isEqualTo("value"); - assertThat(new String(retValues.get(keys.get(1)))) - .isEqualTo("value"); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + final List keys = Arrays.asList(new byte[][]{ + "key".getBytes(), "newcfkey".getBytes() + }); + Map retValues = db.multiGet(columnFamilyHandleList, + keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(keys.get(0)))) + .isEqualTo("value"); + assertThat(new String(retValues.get(keys.get(1)))) + .isEqualTo("value"); + retValues = db.multiGet(new ReadOptions(), columnFamilyHandleList, + keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(keys.get(0)))) + .isEqualTo("value"); + assertThat(new String(retValues.get(keys.get(1)))) + .isEqualTo("value"); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test public void properties() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - assertThat(db.getProperty("rocksdb.estimate-num-keys")). - isNotNull(); - assertThat(db.getLongProperty(columnFamilyHandleList.get(0), - "rocksdb.estimate-num-keys")).isGreaterThanOrEqualTo(0); - assertThat(db.getProperty("rocksdb.stats")).isNotNull(); - assertThat(db.getProperty(columnFamilyHandleList.get(0), - "rocksdb.sstables")).isNotNull(); - assertThat(db.getProperty(columnFamilyHandleList.get(1), - "rocksdb.estimate-num-keys")).isNotNull(); - assertThat(db.getProperty(columnFamilyHandleList.get(1), - "rocksdb.stats")).isNotNull(); - assertThat(db.getProperty(columnFamilyHandleList.get(1), - "rocksdb.sstables")).isNotNull(); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + assertThat(db.getProperty("rocksdb.estimate-num-keys")). + isNotNull(); + assertThat(db.getLongProperty(columnFamilyHandleList.get(0), + "rocksdb.estimate-num-keys")).isGreaterThanOrEqualTo(0); + assertThat(db.getProperty("rocksdb.stats")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(0), + "rocksdb.sstables")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), + "rocksdb.estimate-num-keys")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), + "rocksdb.stats")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), + "rocksdb.sstables")).isNotNull(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @@ -456,289 +376,230 @@ public class ColumnFamilyTest { @Test public void iterators() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - List iterators = null; - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - iterators = db.newIterators(columnFamilyHandleList); - assertThat(iterators.size()).isEqualTo(2); - RocksIterator iter = iterators.get(0); - iter.seekToFirst(); - Map defRefMap = new HashMap<>(); - defRefMap.put("dfkey1", "dfvalue"); - defRefMap.put("key", "value"); - while (iter.isValid()) { - assertThat(defRefMap.get(new String(iter.key()))). - isEqualTo(new String(iter.value())); - iter.next(); - } - // iterate over new_cf key/value pairs - Map cfRefMap = new HashMap<>(); - cfRefMap.put("newcfkey", "value"); - cfRefMap.put("newcfkey2", "value2"); - iter = iterators.get(1); - iter.seekToFirst(); - while (iter.isValid()) { - assertThat(cfRefMap.get(new String(iter.key()))). - isEqualTo(new String(iter.value())); - iter.next(); - } - } finally { - if (iterators != null) { - for (RocksIterator rocksIterator : iterators) { - rocksIterator.dispose(); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + List iterators = null; + try { + iterators = db.newIterators(columnFamilyHandleList); + assertThat(iterators.size()).isEqualTo(2); + RocksIterator iter = iterators.get(0); + iter.seekToFirst(); + final Map defRefMap = new HashMap<>(); + defRefMap.put("dfkey1", "dfvalue"); + defRefMap.put("key", "value"); + while (iter.isValid()) { + assertThat(defRefMap.get(new String(iter.key()))). + isEqualTo(new String(iter.value())); + iter.next(); + } + // iterate over new_cf key/value pairs + final Map cfRefMap = new HashMap<>(); + cfRefMap.put("newcfkey", "value"); + cfRefMap.put("newcfkey2", "value2"); + iter = iterators.get(1); + iter.seekToFirst(); + while (iter.isValid()) { + assertThat(cfRefMap.get(new String(iter.key()))). + isEqualTo(new String(iter.value())); + iter.next(); + } + } finally { + if (iterators != null) { + for (final RocksIterator rocksIterator : iterators) { + rocksIterator.close(); + } + } + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); } - } - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); } } } @Test(expected = RocksDBException.class) public void failPutDisposedCF() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - db.dropColumnFamily(columnFamilyHandleList.get(1)); - db.put(columnFamilyHandleList.get(1), "key".getBytes(), "value".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.put(columnFamilyHandleList.get(1), "key".getBytes(), + "value".getBytes()); + } finally { + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test(expected = RocksDBException.class) public void failRemoveDisposedCF() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - db.dropColumnFamily(columnFamilyHandleList.get(1)); - db.remove(columnFamilyHandleList.get(1), "key".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.remove(columnFamilyHandleList.get(1), "key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test(expected = RocksDBException.class) public void failGetDisposedCF() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); - - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - db.dropColumnFamily(columnFamilyHandleList.get(1)); - db.get(columnFamilyHandleList.get(1), "key".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.get(columnFamilyHandleList.get(1), "key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test(expected = RocksDBException.class) public void failMultiGetWithoutCorrectNumberOfCF() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true); + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + final List keys = new ArrayList<>(); + keys.add("key".getBytes()); + keys.add("newcfkey".getBytes()); + final List cfCustomList = new ArrayList<>(); + db.multiGet(cfCustomList, keys); - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList); - List keys = new ArrayList<>(); - keys.add("key".getBytes()); - keys.add("newcfkey".getBytes()); - List cfCustomList = new ArrayList<>(); - db.multiGet(cfCustomList, keys); - - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @Test public void testByteCreateFolumnFamily() throws RocksDBException { - RocksDB db = null; - Options options = null; - ColumnFamilyHandle cf1 = null, cf2 = null, cf3 = null; - try { - options = new Options().setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - byte[] b0 = new byte[] { (byte)0x00 }; - byte[] b1 = new byte[] { (byte)0x01 }; - byte[] b2 = new byte[] { (byte)0x02 }; - cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); - cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); - List families = RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); - assertThat(families).contains("default".getBytes(), b0, b1); - cf3 = db.createColumnFamily(new ColumnFamilyDescriptor(b2)); - } finally { - if (cf1 != null) { - cf1.dispose(); - } - if (cf2 != null) { - cf2.dispose(); - } - if (cf3 != null) { - cf3.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + final byte[] b0 = new byte[]{(byte) 0x00}; + final byte[] b1 = new byte[]{(byte) 0x01}; + final byte[] b2 = new byte[]{(byte) 0x02}; + ColumnFamilyHandle cf1 = null, cf2 = null, cf3 = null; + try { + cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); + cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); + final List families = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), b0, b1); + cf3 = db.createColumnFamily(new ColumnFamilyDescriptor(b2)); + } finally { + if (cf1 != null) { + cf1.close(); + } + if (cf2 != null) { + cf2.close(); + } + if (cf3 != null) { + cf3.close(); + } } } } @Test public void testCFNamesWithZeroBytes() throws RocksDBException { - RocksDB db = null; - Options options = null; ColumnFamilyHandle cf1 = null, cf2 = null; - try { - options = new Options().setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - - byte[] b0 = new byte[] { 0, 0 }; - byte[] b1 = new byte[] { 0, 1 }; - cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); - cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); - List families = RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); - assertThat(families).contains("default".getBytes(), b0, b1); - } finally { - if (cf1 != null) { - cf1.dispose(); - } - if (cf2 != null) { - cf2.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + ) { + try { + final byte[] b0 = new byte[]{0, 0}; + final byte[] b1 = new byte[]{0, 1}; + cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); + cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); + final List families = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), b0, b1); + } finally { + if (cf1 != null) { + cf1.close(); + } + if (cf2 != null) { + cf2.close(); + } } } } @Test public void testCFNameSimplifiedChinese() throws RocksDBException { - RocksDB db = null; - Options options = null; ColumnFamilyHandle columnFamilyHandle = null; - try { - options = new Options().setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - final String simplifiedChinese = "\u7b80\u4f53\u5b57"; - columnFamilyHandle = db.createColumnFamily( - new ColumnFamilyDescriptor(simplifiedChinese.getBytes())); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + ) { + try { + final String simplifiedChinese = "\u7b80\u4f53\u5b57"; + columnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor(simplifiedChinese.getBytes())); - List families = RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); - assertThat(families).contains("default".getBytes(), simplifiedChinese.getBytes()); - } finally { - if (columnFamilyHandle != null) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + final List families = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), + simplifiedChinese.getBytes()); + } finally { + if (columnFamilyHandle != null) { + columnFamilyHandle.close(); + } } } - - } } diff --git a/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java b/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java index 2a86515e3..fcdd09acb 100644 --- a/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java @@ -18,18 +18,15 @@ public class ComparatorOptionsTest { @Test public void comparatorOptions() { - final ComparatorOptions copt = new ComparatorOptions(); + try(final ComparatorOptions copt = new ComparatorOptions()) { - assertThat(copt).isNotNull(); - - { // UseAdaptiveMutex test + assertThat(copt).isNotNull(); + // UseAdaptiveMutex test copt.setUseAdaptiveMutex(true); assertThat(copt.useAdaptiveMutex()).isTrue(); copt.setUseAdaptiveMutex(false); assertThat(copt.useAdaptiveMutex()).isFalse(); } - - copt.dispose(); } } diff --git a/java/src/test/java/org/rocksdb/ComparatorTest.java b/java/src/test/java/org/rocksdb/ComparatorTest.java index d4cea0cb8..b34821844 100644 --- a/java/src/test/java/org/rocksdb/ComparatorTest.java +++ b/java/src/test/java/org/rocksdb/ComparatorTest.java @@ -79,66 +79,52 @@ public class ComparatorTest { @Test public void builtinForwardComparator() throws RocksDBException { - Options options = null; - RocksDB rocksDB = null; - RocksIterator rocksIterator = null; - try { - options = new Options(); - options.setCreateIfMissing(true); - options.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); - rocksDB = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); + final RocksDB rocksDb = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + rocksDb.put("abc1".getBytes(), "abc1".getBytes()); + rocksDb.put("abc2".getBytes(), "abc2".getBytes()); + rocksDb.put("abc3".getBytes(), "abc3".getBytes()); - rocksDB.put("abc1".getBytes(), "abc1".getBytes()); - rocksDB.put("abc2".getBytes(), "abc2".getBytes()); - rocksDB.put("abc3".getBytes(), "abc3".getBytes()); - - rocksIterator = rocksDB.newIterator(); - // Iterate over keys using a iterator - rocksIterator.seekToFirst(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc1".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc1".getBytes()); - rocksIterator.next(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc2".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc2".getBytes()); - rocksIterator.next(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc3".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc3".getBytes()); - rocksIterator.next(); - assertThat(rocksIterator.isValid()).isFalse(); - // Get last one - rocksIterator.seekToLast(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc3".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc3".getBytes()); - // Seek for abc - rocksIterator.seek("abc".getBytes()); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc1".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc1".getBytes()); - - } finally { - if (rocksIterator != null) { - rocksIterator.dispose(); - } - if (rocksDB != null) { - rocksDB.close(); - } - if (options != null) { - options.dispose(); + try(final RocksIterator rocksIterator = rocksDb.newIterator()) { + // Iterate over keys using a iterator + rocksIterator.seekToFirst(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc2".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc2".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isFalse(); + // Get last one + rocksIterator.seekToLast(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + // Seek for abc + rocksIterator.seek("abc".getBytes()); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); } } } @@ -146,69 +132,56 @@ public class ComparatorTest { @Test public void builtinReverseComparator() throws RocksDBException { - Options options = null; - RocksDB rocksDB = null; - RocksIterator rocksIterator = null; - try { - options = new Options(); - options.setCreateIfMissing(true); - options.setComparator( - BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR); - rocksDB = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR); + final RocksDB rocksDb = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { - rocksDB.put("abc1".getBytes(), "abc1".getBytes()); - rocksDB.put("abc2".getBytes(), "abc2".getBytes()); - rocksDB.put("abc3".getBytes(), "abc3".getBytes()); + rocksDb.put("abc1".getBytes(), "abc1".getBytes()); + rocksDb.put("abc2".getBytes(), "abc2".getBytes()); + rocksDb.put("abc3".getBytes(), "abc3".getBytes()); - rocksIterator = rocksDB.newIterator(); - // Iterate over keys using a iterator - rocksIterator.seekToFirst(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc3".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc3".getBytes()); - rocksIterator.next(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc2".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc2".getBytes()); - rocksIterator.next(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc1".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc1".getBytes()); - rocksIterator.next(); - assertThat(rocksIterator.isValid()).isFalse(); - // Get last one - rocksIterator.seekToLast(); - assertThat(rocksIterator.isValid()).isTrue(); - assertThat(rocksIterator.key()).isEqualTo( - "abc1".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc1".getBytes()); - // Will be invalid because abc is after abc1 - rocksIterator.seek("abc".getBytes()); - assertThat(rocksIterator.isValid()).isFalse(); - // Will be abc3 because the next one after abc999 - // is abc3 - rocksIterator.seek("abc999".getBytes()); - assertThat(rocksIterator.key()).isEqualTo( - "abc3".getBytes()); - assertThat(rocksIterator.value()).isEqualTo( - "abc3".getBytes()); - } finally { - if (rocksIterator != null) { - rocksIterator.dispose(); - } - if (rocksDB != null) { - rocksDB.close(); - } - if (options != null) { - options.dispose(); + try (final RocksIterator rocksIterator = rocksDb.newIterator()) { + // Iterate over keys using a iterator + rocksIterator.seekToFirst(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc2".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc2".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isFalse(); + // Get last one + rocksIterator.seekToLast(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + // Will be invalid because abc is after abc1 + rocksIterator.seek("abc".getBytes()); + assertThat(rocksIterator.isValid()).isFalse(); + // Will be abc3 because the next one after abc999 + // is abc3 + rocksIterator.seek("abc999".getBytes()); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); } } } diff --git a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java index 2e2633524..51b7259f6 100644 --- a/java/src/test/java/org/rocksdb/CompressionOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java @@ -8,11 +8,10 @@ package org.rocksdb; import org.junit.Test; -public class CompressionOptionsTest -{ +public class CompressionOptionsTest { @Test public void getCompressionType() { - for (CompressionType compressionType : CompressionType.values()) { + for (final CompressionType compressionType : CompressionType.values()) { String libraryName = compressionType.getLibraryName(); compressionType.equals(CompressionType.getCompressionType( libraryName)); diff --git a/java/src/test/java/org/rocksdb/DBOptionsTest.java b/java/src/test/java/org/rocksdb/DBOptionsTest.java index 7cb29a4a5..523e53784 100644 --- a/java/src/test/java/org/rocksdb/DBOptionsTest.java +++ b/java/src/test/java/org/rocksdb/DBOptionsTest.java @@ -24,547 +24,339 @@ public class DBOptionsTest { @Test public void getDBOptionsFromProps() { - DBOptions opt = null; - try { - // setup sample properties - Properties properties = new Properties(); - properties.put("allow_mmap_reads", "true"); - properties.put("bytes_per_sync", "13"); - opt = DBOptions.getDBOptionsFromProps(properties); + // setup sample properties + final Properties properties = new Properties(); + properties.put("allow_mmap_reads", "true"); + properties.put("bytes_per_sync", "13"); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) { assertThat(opt).isNotNull(); assertThat(String.valueOf(opt.allowMmapReads())). isEqualTo(properties.get("allow_mmap_reads")); assertThat(String.valueOf(opt.bytesPerSync())). isEqualTo(properties.get("bytes_per_sync")); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void failDBOptionsFromPropsWithIllegalValue() { - DBOptions opt = null; - try { - // setup sample properties - Properties properties = new Properties(); - properties.put("tomato", "1024"); - properties.put("burger", "2"); - opt = DBOptions. - getDBOptionsFromProps(properties); + // setup sample properties + final Properties properties = new Properties(); + properties.put("tomato", "1024"); + properties.put("burger", "2"); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) { assertThat(opt).isNull(); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test(expected = IllegalArgumentException.class) public void failDBOptionsFromPropsWithNullValue() { - DBOptions.getDBOptionsFromProps(null); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(null)) { + //no-op + } } @Test(expected = IllegalArgumentException.class) public void failDBOptionsFromPropsWithEmptyProps() { - DBOptions.getDBOptionsFromProps( - new Properties()); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps( + new Properties())) { + //no-op + } } @Test public void setIncreaseParallelism() { - DBOptions opt = null; - try { - opt = new DBOptions(); + try(final DBOptions opt = new DBOptions()) { final int threads = Runtime.getRuntime().availableProcessors() * 2; opt.setIncreaseParallelism(threads); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void createIfMissing() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setCreateIfMissing(boolValue); - assertThat(opt.createIfMissing()). - isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.createIfMissing()).isEqualTo(boolValue); } } @Test public void createMissingColumnFamilies() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setCreateMissingColumnFamilies(boolValue); - assertThat(opt.createMissingColumnFamilies()). - isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.createMissingColumnFamilies()).isEqualTo(boolValue); } } @Test public void errorIfExists() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setErrorIfExists(boolValue); assertThat(opt.errorIfExists()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void paranoidChecks() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setParanoidChecks(boolValue); - assertThat(opt.paranoidChecks()). - isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.paranoidChecks()).isEqualTo(boolValue); } } @Test public void maxTotalWalSize() { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setMaxTotalWalSize(longValue); - assertThat(opt.maxTotalWalSize()). - isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.maxTotalWalSize()).isEqualTo(longValue); } } @Test public void maxOpenFiles() { - DBOptions opt = null; - try { - opt = new DBOptions(); - int intValue = rand.nextInt(); + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); opt.setMaxOpenFiles(intValue); assertThat(opt.maxOpenFiles()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void disableDataSync() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setDisableDataSync(boolValue); - assertThat(opt.disableDataSync()). - isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.disableDataSync()).isEqualTo(boolValue); } } @Test public void useFsync() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setUseFsync(boolValue); assertThat(opt.useFsync()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void dbLogDir() { - DBOptions opt = null; - try { - opt = new DBOptions(); - String str = "path/to/DbLogDir"; + try(final DBOptions opt = new DBOptions()) { + final String str = "path/to/DbLogDir"; opt.setDbLogDir(str); assertThat(opt.dbLogDir()).isEqualTo(str); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void walDir() { - DBOptions opt = null; - try { - opt = new DBOptions(); - String str = "path/to/WalDir"; + try(final DBOptions opt = new DBOptions()) { + final String str = "path/to/WalDir"; opt.setWalDir(str); assertThat(opt.walDir()).isEqualTo(str); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void deleteObsoleteFilesPeriodMicros() { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setDeleteObsoleteFilesPeriodMicros(longValue); - assertThat(opt.deleteObsoleteFilesPeriodMicros()). - isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.deleteObsoleteFilesPeriodMicros()).isEqualTo(longValue); } } @Test public void maxBackgroundCompactions() { - DBOptions opt = null; - try { - opt = new DBOptions(); - int intValue = rand.nextInt(); + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); opt.setMaxBackgroundCompactions(intValue); - assertThat(opt.maxBackgroundCompactions()). - isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.maxBackgroundCompactions()).isEqualTo(intValue); } } @Test public void maxBackgroundFlushes() { - DBOptions opt = null; - try { - opt = new DBOptions(); - int intValue = rand.nextInt(); + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); opt.setMaxBackgroundFlushes(intValue); - assertThat(opt.maxBackgroundFlushes()). - isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.maxBackgroundFlushes()).isEqualTo(intValue); } } @Test public void maxLogFileSize() throws RocksDBException { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setMaxLogFileSize(longValue); assertThat(opt.maxLogFileSize()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void logFileTimeToRoll() throws RocksDBException { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setLogFileTimeToRoll(longValue); - assertThat(opt.logFileTimeToRoll()). - isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.logFileTimeToRoll()).isEqualTo(longValue); } } @Test public void keepLogFileNum() throws RocksDBException { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setKeepLogFileNum(longValue); assertThat(opt.keepLogFileNum()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxManifestFileSize() { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setMaxManifestFileSize(longValue); - assertThat(opt.maxManifestFileSize()). - isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.maxManifestFileSize()).isEqualTo(longValue); } } @Test public void tableCacheNumshardbits() { - DBOptions opt = null; - try { - opt = new DBOptions(); - int intValue = rand.nextInt(); + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); opt.setTableCacheNumshardbits(intValue); - assertThat(opt.tableCacheNumshardbits()). - isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.tableCacheNumshardbits()).isEqualTo(intValue); } } @Test public void walSizeLimitMB() { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setWalSizeLimitMB(longValue); assertThat(opt.walSizeLimitMB()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void walTtlSeconds() { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setWalTtlSeconds(longValue); assertThat(opt.walTtlSeconds()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void manifestPreallocationSize() throws RocksDBException { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setManifestPreallocationSize(longValue); - assertThat(opt.manifestPreallocationSize()). - isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } + assertThat(opt.manifestPreallocationSize()).isEqualTo(longValue); } } @Test public void allowOsBuffer() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setAllowOsBuffer(boolValue); assertThat(opt.allowOsBuffer()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void allowMmapReads() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setAllowMmapReads(boolValue); assertThat(opt.allowMmapReads()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void allowMmapWrites() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setAllowMmapWrites(boolValue); assertThat(opt.allowMmapWrites()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void isFdCloseOnExec() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setIsFdCloseOnExec(boolValue); assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void statsDumpPeriodSec() { - DBOptions opt = null; - try { - opt = new DBOptions(); - int intValue = rand.nextInt(); + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); opt.setStatsDumpPeriodSec(intValue); assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void adviseRandomOnOpen() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setAdviseRandomOnOpen(boolValue); assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void useAdaptiveMutex() { - DBOptions opt = null; - try { - opt = new DBOptions(); - boolean boolValue = rand.nextBoolean(); + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); opt.setUseAdaptiveMutex(boolValue); assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void bytesPerSync() { - DBOptions opt = null; - try { - opt = new DBOptions(); - long longValue = rand.nextLong(); + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); opt.setBytesPerSync(longValue); assertThat(opt.bytesPerSync()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void rateLimiterConfig() { - DBOptions options = null; - DBOptions anotherOptions = null; - try { - options = new DBOptions(); - RateLimiterConfig rateLimiterConfig = + try(final DBOptions options = new DBOptions(); + final DBOptions anotherOptions = new DBOptions()) { + final RateLimiterConfig rateLimiterConfig = new GenericRateLimiterConfig(1000, 100 * 1000, 1); options.setRateLimiterConfig(rateLimiterConfig); // Test with parameter initialization - anotherOptions = new DBOptions(); + anotherOptions.setRateLimiterConfig( new GenericRateLimiterConfig(1000)); - } finally { - if (options != null) { - options.dispose(); - } - if (anotherOptions != null) { - anotherOptions.dispose(); - } } } @Test public void statistics() { - DBOptions options = new DBOptions(); - Statistics statistics = options.createStatistics(). - statisticsPtr(); - assertThat(statistics).isNotNull(); + try(final DBOptions options = new DBOptions()) { + Statistics statistics = options.createStatistics(). + statisticsPtr(); + assertThat(statistics).isNotNull(); - DBOptions anotherOptions = new DBOptions(); - statistics = anotherOptions.statisticsPtr(); - assertThat(statistics).isNotNull(); + try(final DBOptions anotherOptions = new DBOptions()) { + statistics = anotherOptions.statisticsPtr(); + assertThat(statistics).isNotNull(); + } + } } } diff --git a/java/src/test/java/org/rocksdb/DirectSliceTest.java b/java/src/test/java/org/rocksdb/DirectSliceTest.java index 615adab38..2d3abea45 100644 --- a/java/src/test/java/org/rocksdb/DirectSliceTest.java +++ b/java/src/test/java/org/rocksdb/DirectSliceTest.java @@ -18,11 +18,8 @@ public class DirectSliceTest { @Test public void directSlice() { - DirectSlice directSlice = null; - DirectSlice otherSlice = null; - try { - directSlice = new DirectSlice("abc"); - otherSlice = new DirectSlice("abc"); + try(final DirectSlice directSlice = new DirectSlice("abc"); + final DirectSlice otherSlice = new DirectSlice("abc")) { assertThat(directSlice.toString()).isEqualTo("abc"); // clear first slice directSlice.clear(); @@ -32,75 +29,46 @@ public class DirectSliceTest { // remove prefix otherSlice.removePrefix(1); assertThat(otherSlice.toString()).isEqualTo("bc"); - } finally { - if (directSlice != null) { - directSlice.dispose(); - } - if (otherSlice != null) { - otherSlice.dispose(); - } } } @Test public void directSliceWithByteBuffer() { - DirectSlice directSlice = null; - try { - byte[] data = "Some text".getBytes(); - ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1); - buffer.put(data); - buffer.put(data.length, (byte)0); + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1); + buffer.put(data); + buffer.put(data.length, (byte)0); - directSlice = new DirectSlice(buffer); + try(final DirectSlice directSlice = new DirectSlice(buffer)) { assertThat(directSlice.toString()).isEqualTo("Some text"); - } finally { - if (directSlice != null) { - directSlice.dispose(); - } } } @Test public void directSliceWithByteBufferAndLength() { - DirectSlice directSlice = null; - try { - byte[] data = "Some text".getBytes(); - ByteBuffer buffer = ByteBuffer.allocateDirect(data.length); - buffer.put(data); - directSlice = new DirectSlice(buffer, 4); + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length); + buffer.put(data); + try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) { assertThat(directSlice.toString()).isEqualTo("Some"); - } finally { - if (directSlice != null) { - directSlice.dispose(); - } } } @Test(expected = AssertionError.class) public void directSliceInitWithoutDirectAllocation() { - DirectSlice directSlice = null; - try { - byte[] data = "Some text".getBytes(); - ByteBuffer buffer = ByteBuffer.wrap(data); - directSlice = new DirectSlice(buffer); - } finally { - if (directSlice != null) { - directSlice.dispose(); - } + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.wrap(data); + try(final DirectSlice directSlice = new DirectSlice(buffer)) { + //no-op } } @Test(expected = AssertionError.class) public void directSlicePrefixInitWithoutDirectAllocation() { - DirectSlice directSlice = null; - try { - byte[] data = "Some text".getBytes(); - ByteBuffer buffer = ByteBuffer.wrap(data); - directSlice = new DirectSlice(buffer, 4); - } finally { - if (directSlice != null) { - directSlice.dispose(); - } + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.wrap(data); + try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) { + //no-op } } } diff --git a/java/src/test/java/org/rocksdb/FilterTest.java b/java/src/test/java/org/rocksdb/FilterTest.java index d5a1830b3..e5bb60fda 100644 --- a/java/src/test/java/org/rocksdb/FilterTest.java +++ b/java/src/test/java/org/rocksdb/FilterTest.java @@ -16,31 +16,23 @@ public class FilterTest { @Test public void filter() { - Options options = null; - try { - options = new Options(); - // test table config - options.setTableFormatConfig(new BlockBasedTableConfig(). - setFilter(new BloomFilter())); - options.dispose(); - System.gc(); - System.runFinalization(); - // new Bloom filter - options = new Options(); - BlockBasedTableConfig blockConfig = new BlockBasedTableConfig(); - blockConfig.setFilter(new BloomFilter()); - options.setTableFormatConfig(blockConfig); - BloomFilter bloomFilter = new BloomFilter(10); - blockConfig.setFilter(bloomFilter); - options.setTableFormatConfig(blockConfig); - System.gc(); - System.runFinalization(); - blockConfig.setFilter(new BloomFilter(10, false)); - options.setTableFormatConfig(blockConfig); + // new Bloom filter + final BlockBasedTableConfig blockConfig = new BlockBasedTableConfig(); + try(final Options options = new Options()) { - } finally { - if (options != null) { - options.dispose(); + try(final Filter bloomFilter = new BloomFilter()) { + blockConfig.setFilter(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + + try(final Filter bloomFilter = new BloomFilter(10)) { + blockConfig.setFilter(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + + try(final Filter bloomFilter = new BloomFilter(10, false)) { + blockConfig.setFilter(bloomFilter); + options.setTableFormatConfig(blockConfig); } } } diff --git a/java/src/test/java/org/rocksdb/FlushTest.java b/java/src/test/java/org/rocksdb/FlushTest.java index 094910f27..f3530292a 100644 --- a/java/src/test/java/org/rocksdb/FlushTest.java +++ b/java/src/test/java/org/rocksdb/FlushTest.java @@ -22,44 +22,28 @@ public class FlushTest { @Test public void flush() throws RocksDBException { - RocksDB db = null; - Options options = null; - WriteOptions wOpt = null; - FlushOptions flushOptions = null; - try { - options = new Options(); - // Setup options - options.setCreateIfMissing(true); - options.setMaxWriteBufferNumber(10); - options.setMinWriteBufferNumberToMerge(10); - wOpt = new WriteOptions(); - flushOptions = new FlushOptions(); - flushOptions.setWaitForFlush(true); + try(final Options options = new Options() + .setCreateIfMissing(true) + .setMaxWriteBufferNumber(10) + .setMinWriteBufferNumberToMerge(10); + final WriteOptions wOpt = new WriteOptions() + .setDisableWAL(true); + final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true)) { assertThat(flushOptions.waitForFlush()).isTrue(); - wOpt.setDisableWAL(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - db.put(wOpt, "key1".getBytes(), "value1".getBytes()); - db.put(wOpt, "key2".getBytes(), "value2".getBytes()); - db.put(wOpt, "key3".getBytes(), "value3".getBytes()); - db.put(wOpt, "key4".getBytes(), "value4".getBytes()); - assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")).isEqualTo("4"); - db.flush(flushOptions); - assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")). - isEqualTo("0"); - } finally { - if (flushOptions != null) { - flushOptions.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - if (wOpt != null) { - wOpt.dispose(); - } + try(final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put(wOpt, "key1".getBytes(), "value1".getBytes()); + db.put(wOpt, "key2".getBytes(), "value2".getBytes()); + db.put(wOpt, "key3".getBytes(), "value3".getBytes()); + db.put(wOpt, "key4".getBytes(), "value4".getBytes()); + assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")) + .isEqualTo("4"); + db.flush(flushOptions); + assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")) + .isEqualTo("0"); + } } } } diff --git a/java/src/test/java/org/rocksdb/InfoLogLevelTest.java b/java/src/test/java/org/rocksdb/InfoLogLevelTest.java index 71a032a0b..48ecfa16a 100644 --- a/java/src/test/java/org/rocksdb/InfoLogLevelTest.java +++ b/java/src/test/java/org/rocksdb/InfoLogLevelTest.java @@ -24,81 +24,52 @@ public class InfoLogLevelTest { @Test public void testInfoLogLevel() throws RocksDBException, IOException { - RocksDB db = null; - try { - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { db.put("key".getBytes(), "value".getBytes()); assertThat(getLogContentsWithoutHeader()).isNotEmpty(); - } finally { - if (db != null) { - db.close(); - } } } @Test - public void testFatalLogLevel() throws RocksDBException, + public void testFatalLogLevel() throws RocksDBException, IOException { - RocksDB db = null; - Options options = null; - try { - options = new Options(). - setCreateIfMissing(true). - setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); + try (final Options options = new Options(). + setCreateIfMissing(true). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { assertThat(options.infoLogLevel()). isEqualTo(InfoLogLevel.FATAL_LEVEL); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); db.put("key".getBytes(), "value".getBytes()); // As InfoLogLevel is set to FATAL_LEVEL, here we expect the log // content to be empty. assertThat(getLogContentsWithoutHeader()).isEmpty(); - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } } } @Test public void testFatalLogLevelWithDBOptions() throws RocksDBException, IOException { - RocksDB db = null; - Options options = null; - DBOptions dbOptions = null; - try { - dbOptions = new DBOptions(). - setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); - options = new Options(dbOptions, - new ColumnFamilyOptions()). - setCreateIfMissing(true); + try (final DBOptions dbOptions = new DBOptions(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); + final Options options = new Options(dbOptions, + new ColumnFamilyOptions()). + setCreateIfMissing(true); + final RocksDB db = + RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { assertThat(dbOptions.infoLogLevel()). isEqualTo(InfoLogLevel.FATAL_LEVEL); assertThat(options.infoLogLevel()). isEqualTo(InfoLogLevel.FATAL_LEVEL); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); db.put("key".getBytes(), "value".getBytes()); assertThat(getLogContentsWithoutHeader()).isEmpty(); - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - if (dbOptions != null) { - dbOptions.dispose(); - } } } @Test(expected = IllegalArgumentException.class) public void failIfIllegalByteValueProvided() { - InfoLogLevel.getInfoLogLevel((byte)-1); + InfoLogLevel.getInfoLogLevel((byte) -1); } @Test @@ -114,9 +85,10 @@ public class InfoLogLevelTest { * @throws IOException if file is not found. */ private String getLogContentsWithoutHeader() throws IOException { - final String separator = Environment.isWindows() ? "\n" : System.getProperty("line.separator"); + final String separator = Environment.isWindows() ? + "\n" : System.getProperty("line.separator"); final String[] lines = new String(readAllBytes(get( - dbFolder.getRoot().getAbsolutePath()+ "/LOG"))).split(separator); + dbFolder.getRoot().getAbsolutePath() + "/LOG"))).split(separator); int first_non_header = lines.length; // Identify the last line of the header diff --git a/java/src/test/java/org/rocksdb/KeyMayExistTest.java b/java/src/test/java/org/rocksdb/KeyMayExistTest.java index a39ddbb21..bc341c9d2 100644 --- a/java/src/test/java/org/rocksdb/KeyMayExistTest.java +++ b/java/src/test/java/org/rocksdb/KeyMayExistTest.java @@ -10,6 +10,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; @@ -25,70 +26,61 @@ public class KeyMayExistTest { @Test public void keyMayExist() throws RocksDBException { - RocksDB db = null; - DBOptions options = null; - List cfDescriptors = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - options = new DBOptions(); - options.setCreateIfMissing(true) - .setCreateMissingColumnFamilies(true); - // open database using cf names + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); - cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfDescriptors.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath(), - cfDescriptors, columnFamilyHandleList); - assertThat(columnFamilyHandleList.size()). - isEqualTo(2); - db.put("key".getBytes(), "value".getBytes()); - // Test without column family - StringBuffer retValue = new StringBuffer(); - boolean exists = db.keyMayExist("key".getBytes(), retValue); - assertThat(exists).isTrue(); - assertThat(retValue.toString()). - isEqualTo("value"); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + assertThat(columnFamilyHandleList.size()). + isEqualTo(2); + db.put("key".getBytes(), "value".getBytes()); + // Test without column family + StringBuffer retValue = new StringBuffer(); + boolean exists = db.keyMayExist("key".getBytes(), retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); - // Test without column family but with readOptions - retValue = new StringBuffer(); - exists = db.keyMayExist(new ReadOptions(), "key".getBytes(), - retValue); - assertThat(exists).isTrue(); - assertThat(retValue.toString()). - isEqualTo("value"); + // Test without column family but with readOptions + try (final ReadOptions readOptions = new ReadOptions()) { + retValue = new StringBuffer(); + exists = db.keyMayExist(readOptions, "key".getBytes(), retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); + } - // Test with column family - retValue = new StringBuffer(); - exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(), - retValue); - assertThat(exists).isTrue(); - assertThat(retValue.toString()). - isEqualTo("value"); + // Test with column family + retValue = new StringBuffer(); + exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(), + retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); - // Test with column family and readOptions - retValue = new StringBuffer(); - exists = db.keyMayExist(new ReadOptions(), - columnFamilyHandleList.get(0), "key".getBytes(), - retValue); - assertThat(exists).isTrue(); - assertThat(retValue.toString()). - isEqualTo("value"); + // Test with column family and readOptions + try (final ReadOptions readOptions = new ReadOptions()) { + retValue = new StringBuffer(); + exists = db.keyMayExist(readOptions, + columnFamilyHandleList.get(0), "key".getBytes(), + retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); + } - // KeyMayExist in CF1 must return false - assertThat(db.keyMayExist(columnFamilyHandleList.get(1), - "key".getBytes(), retValue)).isFalse(); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + // KeyMayExist in CF1 must return false + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), + "key".getBytes(), retValue)).isFalse(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } diff --git a/java/src/test/java/org/rocksdb/LoggerTest.java b/java/src/test/java/org/rocksdb/LoggerTest.java index 2eff3191a..3e6d359be 100644 --- a/java/src/test/java/org/rocksdb/LoggerTest.java +++ b/java/src/test/java/org/rocksdb/LoggerTest.java @@ -6,6 +6,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; @@ -19,202 +20,165 @@ public class LoggerTest { @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); - private AtomicInteger logMessageCounter = new AtomicInteger(); - @Test public void customLogger() throws RocksDBException { - RocksDB db = null; - logMessageCounter.set(0); - try { - - // Setup options - final Options options = new Options(). - setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL). - setCreateIfMissing(true); - - // Create new logger with max log level passed by options - Logger logger = new Logger(options) { - @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { - assertThat(logMsg).isNotNull(); - assertThat(logMsg.length()).isGreaterThan(0); - logMessageCounter.incrementAndGet(); - } - }; - + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { // Set custom logger to options options.setLogger(logger); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - - // there should be more than zero received log messages in - // debug level. - assertThat(logMessageCounter.get()).isGreaterThan(0); - } finally { - if (db != null) { - db.close(); + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be more than zero received log messages in + // debug level. + assertThat(logMessageCounter.get()).isGreaterThan(0); } } - logMessageCounter.set(0); } @Test public void fatalLogger() throws RocksDBException { - RocksDB db = null; - logMessageCounter.set(0); + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); - try { - // Setup options - final Options options = new Options(). - setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). - setCreateIfMissing(true); - - // Create new logger with max log level passed by options - Logger logger = new Logger(options) { - @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { - assertThat(logMsg).isNotNull(); - assertThat(logMsg.length()).isGreaterThan(0); - logMessageCounter.incrementAndGet(); - } - }; + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { // Set custom logger to options options.setLogger(logger); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - - // there should be zero messages - // using fatal level as log level. - assertThat(logMessageCounter.get()).isEqualTo(0); - } finally { - if (db != null) { - db.close(); + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); } } - logMessageCounter.set(0); } @Test public void dbOptionsLogger() throws RocksDBException { - RocksDB db = null; - Logger logger = null; - List cfHandles = new ArrayList<>(); - List cfDescriptors = new ArrayList<>(); - cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - - logMessageCounter.set(0); - try { - // Setup options - final DBOptions options = new DBOptions(). - setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). - setCreateIfMissing(true); - - // Create new logger with max log level passed by options - logger = new Logger(options) { - @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { - assertThat(logMsg).isNotNull(); - assertThat(logMsg.length()).isGreaterThan(0); - logMessageCounter.incrementAndGet(); - } - }; - + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final DBOptions options = new DBOptions(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { // Set custom logger to options options.setLogger(logger); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), - cfDescriptors, cfHandles); - // there should be zero messages - // using fatal level as log level. - assertThat(logMessageCounter.get()).isEqualTo(0); - logMessageCounter.set(0); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : cfHandles) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (logger != null) { - logger.dispose(); + + final List cfDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + final List cfHandles = new ArrayList<>(); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, cfHandles)) { + try { + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : cfHandles) { + columnFamilyHandle.close(); + } + } } } } @Test public void setInfoLogLevel() { - Logger logger = null; - try { - // Setup options - final Options options = new Options(). - setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). - setCreateIfMissing(true); - - // Create new logger with max log level passed by options - logger = new Logger(options) { - @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { - assertThat(logMsg).isNotNull(); - assertThat(logMsg.length()).isGreaterThan(0); - logMessageCounter.incrementAndGet(); - } - }; + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { assertThat(logger.infoLogLevel()). isEqualTo(InfoLogLevel.FATAL_LEVEL); logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL); assertThat(logger.infoLogLevel()). isEqualTo(InfoLogLevel.DEBUG_LEVEL); - } finally { - if (logger != null) { - logger.dispose(); - } } } @Test public void changeLogLevelAtRuntime() throws RocksDBException { - RocksDB db = null; - logMessageCounter.set(0); - - try { - // Setup options - final Options options = new Options(). - setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). - setCreateIfMissing(true); - - // Create new logger with max log level passed by options - Logger logger = new Logger(options) { - @Override - protected void log(InfoLogLevel infoLogLevel, String logMsg) { - assertThat(logMsg).isNotNull(); - assertThat(logMsg.length()).isGreaterThan(0); - logMessageCounter.incrementAndGet(); - } - }; + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + // Create new logger with max log level passed by options + final Logger logger = new Logger(options) { + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { // Set custom logger to options options.setLogger(logger); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - // there should be zero messages - // using fatal level as log level. - assertThat(logMessageCounter.get()).isEqualTo(0); + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { - // change log level to debug level - logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL); + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); - db.put("key".getBytes(), "value".getBytes()); - db.flush(new FlushOptions().setWaitForFlush(true)); + // change log level to debug level + logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL); - // messages shall be received due to previous actions. - assertThat(logMessageCounter.get()).isNotEqualTo(0); + db.put("key".getBytes(), "value".getBytes()); + db.flush(new FlushOptions().setWaitForFlush(true)); - } finally { - if (db != null) { - db.close(); + // messages shall be received due to previous actions. + assertThat(logMessageCounter.get()).isNotEqualTo(0); } } - logMessageCounter.set(0); } } diff --git a/java/src/test/java/org/rocksdb/MemTableTest.java b/java/src/test/java/org/rocksdb/MemTableTest.java index b54f583d0..bbd5e2055 100644 --- a/java/src/test/java/org/rocksdb/MemTableTest.java +++ b/java/src/test/java/org/rocksdb/MemTableTest.java @@ -18,9 +18,7 @@ public class MemTableTest { @Test public void hashSkipListMemTable() throws RocksDBException { - Options options = null; - try { - options = new Options(); + try(final Options options = new Options()) { // Test HashSkipListMemTableConfig HashSkipListMemTableConfig memTableConfig = new HashSkipListMemTableConfig(); @@ -40,18 +38,12 @@ public class MemTableTest { assertThat(memTableConfig.branchingFactor()). isEqualTo(6); options.setMemTableConfig(memTableConfig); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void skipListMemTable() throws RocksDBException { - Options options = null; - try { - options = new Options(); + try(final Options options = new Options()) { SkipListMemTableConfig skipMemTableConfig = new SkipListMemTableConfig(); assertThat(skipMemTableConfig.lookahead()). @@ -60,19 +52,12 @@ public class MemTableTest { assertThat(skipMemTableConfig.lookahead()). isEqualTo(20); options.setMemTableConfig(skipMemTableConfig); - options.dispose(); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void hashLinkedListMemTable() throws RocksDBException { - Options options = null; - try { - options = new Options(); + try(final Options options = new Options()) { HashLinkedListMemTableConfig hashLinkedListMemTableConfig = new HashLinkedListMemTableConfig(); assertThat(hashLinkedListMemTableConfig.bucketCount()). @@ -107,18 +92,12 @@ public class MemTableTest { thresholdUseSkiplist()). isEqualTo(29); options.setMemTableConfig(hashLinkedListMemTableConfig); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void vectorMemTable() throws RocksDBException { - Options options = null; - try { - options = new Options(); + try(final Options options = new Options()) { VectorMemTableConfig vectorMemTableConfig = new VectorMemTableConfig(); assertThat(vectorMemTableConfig.reservedSize()). @@ -127,11 +106,6 @@ public class MemTableTest { assertThat(vectorMemTableConfig.reservedSize()). isEqualTo(123); options.setMemTableConfig(vectorMemTableConfig); - options.dispose(); - } finally { - if (options != null) { - options.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/MergeTest.java b/java/src/test/java/org/rocksdb/MergeTest.java index 9eec4e1eb..d38df3195 100644 --- a/java/src/test/java/org/rocksdb/MergeTest.java +++ b/java/src/test/java/org/rocksdb/MergeTest.java @@ -5,6 +5,7 @@ package org.rocksdb; +import java.util.Arrays; import java.util.List; import java.util.ArrayList; @@ -27,78 +28,60 @@ public class MergeTest { @Test public void stringOption() throws InterruptedException, RocksDBException { - RocksDB db = null; - Options opt = null; - try { - String db_path_string = - dbFolder.getRoot().getAbsolutePath(); - opt = new Options(); - opt.setCreateIfMissing(true); - opt.setMergeOperatorName("stringappend"); - - db = RocksDB.open(opt, db_path_string); + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperatorName("stringappend"); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { // writing aa under key db.put("key".getBytes(), "aa".getBytes()); // merge bb under key db.merge("key".getBytes(), "bb".getBytes()); - byte[] value = db.get("key".getBytes()); - String strValue = new String(value); + final byte[] value = db.get("key".getBytes()); + final String strValue = new String(value); assertThat(strValue).isEqualTo("aa,bb"); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void cFStringOption() throws InterruptedException, RocksDBException { - RocksDB db = null; - DBOptions opt = null; - List columnFamilyHandleList = - new ArrayList<>(); - try { - String db_path_string = - dbFolder.getRoot().getAbsolutePath(); - opt = new DBOptions(); - opt.setCreateIfMissing(true); - opt.setCreateMissingColumnFamilies(true); - List cfDescriptors = - new ArrayList<>(); - cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions().setMergeOperatorName( - "stringappend"))); - cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions().setMergeOperatorName( - "stringappend"))); - db = RocksDB.open(opt, db_path_string, - cfDescriptors, columnFamilyHandleList); + try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperatorName("stringappend"); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperatorName("stringappend") + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2) + ); - // writing aa under key - db.put(columnFamilyHandleList.get(1), - "cfkey".getBytes(), "aa".getBytes()); - // merge bb under key - db.merge(columnFamilyHandleList.get(1), - "cfkey".getBytes(), "bb".getBytes()); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + // writing aa under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "bb".getBytes()); - byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes()); - String strValue = new String(value); - assertThat(strValue).isEqualTo("aa,bb"); - } finally { - for (ColumnFamilyHandle handle : columnFamilyHandleList) { - handle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + String strValue = new String(value); + assertThat(strValue).isEqualTo("aa,bb"); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandleList) { + handle.close(); + } + } } } } @@ -106,99 +89,85 @@ public class MergeTest { @Test public void operatorOption() throws InterruptedException, RocksDBException { - RocksDB db = null; - Options opt = null; - try { - String db_path_string = - dbFolder.getRoot().getAbsolutePath(); - opt = new Options(); - opt.setCreateIfMissing(true); - - StringAppendOperator stringAppendOperator = new StringAppendOperator(); - opt.setMergeOperator(stringAppendOperator); - - db = RocksDB.open(opt, db_path_string); + final StringAppendOperator stringAppendOperator = + new StringAppendOperator(); + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { // Writing aa under key db.put("key".getBytes(), "aa".getBytes()); // Writing bb under key db.merge("key".getBytes(), "bb".getBytes()); - byte[] value = db.get("key".getBytes()); - String strValue = new String(value); + final byte[] value = db.get("key".getBytes()); + final String strValue = new String(value); assertThat(strValue).isEqualTo("aa,bb"); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void cFOperatorOption() throws InterruptedException, RocksDBException { - RocksDB db = null; - DBOptions opt = null; - ColumnFamilyHandle cfHandle = null; - List cfDescriptors = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - try { - String db_path_string = - dbFolder.getRoot().getAbsolutePath(); - opt = new DBOptions(); - opt.setCreateIfMissing(true); - opt.setCreateMissingColumnFamilies(true); - StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final StringAppendOperator stringAppendOperator = + new StringAppendOperator(); + try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator) + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2) + ); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList) + ) { + try { + // writing aa under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "bb".getBytes()); + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + String strValue = new String(value); - cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions().setMergeOperator( - stringAppendOperator))); - cfDescriptors.add(new ColumnFamilyDescriptor("new_cf".getBytes(), - new ColumnFamilyOptions().setMergeOperator( - stringAppendOperator))); - db = RocksDB.open(opt, db_path_string, - cfDescriptors, columnFamilyHandleList); + // Test also with createColumnFamily + try (final ColumnFamilyOptions cfHandleOpts = + new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator); + final ColumnFamilyHandle cfHandle = + db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf2".getBytes(), + cfHandleOpts)) + ) { + // writing xx under cfkey2 + db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes()); + // merge yy under cfkey2 + db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), + "yy".getBytes()); + value = db.get(cfHandle, "cfkey2".getBytes()); + String strValueTmpCf = new String(value); - // writing aa under key - db.put(columnFamilyHandleList.get(1), - "cfkey".getBytes(), "aa".getBytes()); - // merge bb under key - db.merge(columnFamilyHandleList.get(1), - "cfkey".getBytes(), "bb".getBytes()); - byte[] value = db.get(columnFamilyHandleList.get(1), "cfkey".getBytes()); - String strValue = new String(value); - - // Test also with createColumnFamily - cfHandle = db.createColumnFamily( - new ColumnFamilyDescriptor("new_cf2".getBytes(), - new ColumnFamilyOptions().setMergeOperator(stringAppendOperator))); - // writing xx under cfkey2 - db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes()); - // merge yy under cfkey2 - db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), "yy".getBytes()); - value = db.get(cfHandle, "cfkey2".getBytes()); - String strValueTmpCf = new String(value); - - assertThat(strValue).isEqualTo("aa,bb"); - assertThat(strValueTmpCf).isEqualTo("xx,yy"); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (cfHandle != null) { - cfHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + assertThat(strValue).isEqualTo("aa,bb"); + assertThat(strValueTmpCf).isEqualTo("xx,yy"); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @@ -206,97 +175,67 @@ public class MergeTest { @Test public void operatorGcBehaviour() throws RocksDBException { - Options opt = null; - RocksDB db = null; - try { - String db_path_string = - dbFolder.getRoot().getAbsolutePath(); - opt = new Options(); - opt.setCreateIfMissing(true); - StringAppendOperator stringAppendOperator = new StringAppendOperator(); - opt.setMergeOperator(stringAppendOperator); - db = RocksDB.open(opt, db_path_string); - db.close(); - opt.dispose(); - System.gc(); - System.runFinalization(); - // test reuse - opt = new Options(); - opt.setMergeOperator(stringAppendOperator); - db = RocksDB.open(opt, db_path_string); - db.close(); - opt.dispose(); - System.gc(); - System.runFinalization(); - // test param init - opt = new Options(); - opt.setMergeOperator(new StringAppendOperator()); - db = RocksDB.open(opt, db_path_string); - db.close(); - opt.dispose(); - System.gc(); - System.runFinalization(); - // test replace one with another merge operator instance - opt = new Options(); - opt.setMergeOperator(stringAppendOperator); - StringAppendOperator newStringAppendOperator = new StringAppendOperator(); + final StringAppendOperator stringAppendOperator + = new StringAppendOperator(); + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test reuse + try (final Options opt = new Options() + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test param init + try (final Options opt = new Options() + .setMergeOperator(new StringAppendOperator()); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test replace one with another merge operator instance + try (final Options opt = new Options() + .setMergeOperator(stringAppendOperator)) { + final StringAppendOperator newStringAppendOperator + = new StringAppendOperator(); opt.setMergeOperator(newStringAppendOperator); - db = RocksDB.open(opt, db_path_string); - db.close(); - opt.dispose(); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op } } } @Test public void emptyStringInSetMergeOperatorByName() { - Options opt = null; - ColumnFamilyOptions cOpt = null; - try { - opt = new Options(); - cOpt = new ColumnFamilyOptions(); - opt.setMergeOperatorName(""); - cOpt.setMergeOperatorName(""); - } finally { - if (opt != null) { - opt.dispose(); - } - if (cOpt != null) { - cOpt.dispose(); - } + try (final Options opt = new Options() + .setMergeOperatorName(""); + final ColumnFamilyOptions cOpt = new ColumnFamilyOptions() + .setMergeOperatorName("")) { + //no-op } } @Test(expected = IllegalArgumentException.class) public void nullStringInSetMergeOperatorByNameOptions() { - Options opt = null; - try { - opt = new Options(); + try (final Options opt = new Options()) { opt.setMergeOperatorName(null); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test(expected = IllegalArgumentException.class) public void - nullStringInSetMergeOperatorByNameColumnFamilyOptions() { - ColumnFamilyOptions opt = null; - try { - opt = new ColumnFamilyOptions(); + nullStringInSetMergeOperatorByNameColumnFamilyOptions() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { opt.setMergeOperatorName(null); - } finally { - if (opt != null) { - opt.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/MixedOptionsTest.java b/java/src/test/java/org/rocksdb/MixedOptionsTest.java index a3090a1b1..bbe295719 100644 --- a/java/src/test/java/org/rocksdb/MixedOptionsTest.java +++ b/java/src/test/java/org/rocksdb/MixedOptionsTest.java @@ -19,38 +19,37 @@ public class MixedOptionsTest { @Test public void mixedOptionsTest(){ // Set a table factory and check the names - ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(); - cfOptions.setTableFormatConfig(new BlockBasedTableConfig(). - setFilter(new BloomFilter())); - assertThat(cfOptions.tableFactoryName()).isEqualTo( - "BlockBasedTable"); - cfOptions.setTableFormatConfig(new PlainTableConfig()); - assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable"); - // Initialize a dbOptions object from cf options and - // db options - DBOptions dbOptions = new DBOptions(); - Options options = new Options(dbOptions, cfOptions); - assertThat(options.tableFactoryName()).isEqualTo("PlainTable"); - // Free instances - options.dispose(); - options = null; - cfOptions.dispose(); - cfOptions = null; - dbOptions.dispose(); - dbOptions = null; - System.gc(); - System.runFinalization(); + try(final Filter bloomFilter = new BloomFilter(); + final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions() + .setTableFormatConfig( + new BlockBasedTableConfig().setFilter(bloomFilter)) + ) { + assertThat(cfOptions.tableFactoryName()).isEqualTo( + "BlockBasedTable"); + cfOptions.setTableFormatConfig(new PlainTableConfig()); + assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable"); + // Initialize a dbOptions object from cf options and + // db options + try (final DBOptions dbOptions = new DBOptions(); + final Options options = new Options(dbOptions, cfOptions)) { + assertThat(options.tableFactoryName()).isEqualTo("PlainTable"); + // Free instances + } + } + // Test Optimize for statements - cfOptions = new ColumnFamilyOptions(); + try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) { cfOptions.optimizeUniversalStyleCompaction(); cfOptions.optimizeLevelStyleCompaction(); cfOptions.optimizeForPointLookup(1024); - options = new Options(); - options.optimizeLevelStyleCompaction(); - options.optimizeLevelStyleCompaction(400); - options.optimizeUniversalStyleCompaction(); - options.optimizeUniversalStyleCompaction(400); - options.optimizeForPointLookup(1024); - options.prepareForBulkLoad(); + try(final Options options = new Options()) { + options.optimizeLevelStyleCompaction(); + options.optimizeLevelStyleCompaction(400); + options.optimizeUniversalStyleCompaction(); + options.optimizeUniversalStyleCompaction(400); + options.optimizeForPointLookup(1024); + options.prepareForBulkLoad(); + } + } } } diff --git a/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java index 4e9ad27a2..519a28be4 100644 --- a/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java +++ b/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java @@ -23,7 +23,7 @@ public class NativeLibraryLoaderTest { public void tempFolder() throws IOException { NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( temporaryFolder.getRoot().getAbsolutePath()); - Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(), + final Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(), Environment.getJniLibraryFileName("rocksdb")); assertThat(Files.exists(path)).isTrue(); assertThat(Files.isReadable(path)).isTrue(); diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java index 6d11e6fa7..a588b5de7 100644 --- a/java/src/test/java/org/rocksdb/OptionsTest.java +++ b/java/src/test/java/org/rocksdb/OptionsTest.java @@ -8,6 +8,7 @@ package org.rocksdb; import java.util.ArrayList; import java.util.List; import java.util.Random; + import org.junit.ClassRule; import org.junit.Test; @@ -25,985 +26,599 @@ public class OptionsTest { @Test public void setIncreaseParallelism() { - Options opt = null; - try { - opt = new Options(); + try (final Options opt = new Options()) { final int threads = Runtime.getRuntime().availableProcessors() * 2; opt.setIncreaseParallelism(threads); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void writeBufferSize() throws RocksDBException { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setWriteBufferSize(longValue); assertThat(opt.writeBufferSize()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxWriteBufferNumber() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMaxWriteBufferNumber(intValue); assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void minWriteBufferNumberToMerge() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMinWriteBufferNumberToMerge(intValue); assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void numLevels() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setNumLevels(intValue); assertThat(opt.numLevels()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelZeroFileNumCompactionTrigger() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setLevelZeroFileNumCompactionTrigger(intValue); assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelZeroSlowdownWritesTrigger() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setLevelZeroSlowdownWritesTrigger(intValue); assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelZeroStopWritesTrigger() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setLevelZeroStopWritesTrigger(intValue); assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void targetFileSizeBase() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setTargetFileSizeBase(longValue); assertThat(opt.targetFileSizeBase()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void targetFileSizeMultiplier() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setTargetFileSizeMultiplier(intValue); assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxBytesForLevelBase() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setMaxBytesForLevelBase(longValue); assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void levelCompactionDynamicLevelBytes() { - Options opt = null; - try { - opt = new Options(); + try (final Options opt = new Options()) { final boolean boolValue = rand.nextBoolean(); opt.setLevelCompactionDynamicLevelBytes(boolValue); assertThat(opt.levelCompactionDynamicLevelBytes()) .isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxBytesForLevelMultiplier() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMaxBytesForLevelMultiplier(intValue); assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void expandedCompactionFactor() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setExpandedCompactionFactor(intValue); assertThat(opt.expandedCompactionFactor()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void sourceCompactionFactor() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setSourceCompactionFactor(intValue); assertThat(opt.sourceCompactionFactor()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxGrandparentOverlapFactor() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMaxGrandparentOverlapFactor(intValue); assertThat(opt.maxGrandparentOverlapFactor()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void softRateLimit() { - Options opt = null; - try { - opt = new Options(); - double doubleValue = rand.nextDouble(); + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); opt.setSoftRateLimit(doubleValue); assertThat(opt.softRateLimit()).isEqualTo(doubleValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void hardRateLimit() { - Options opt = null; - try { - opt = new Options(); - double doubleValue = rand.nextDouble(); + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); opt.setHardRateLimit(doubleValue); assertThat(opt.hardRateLimit()).isEqualTo(doubleValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void rateLimitDelayMaxMilliseconds() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setRateLimitDelayMaxMilliseconds(intValue); assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void arenaBlockSize() throws RocksDBException { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setArenaBlockSize(longValue); assertThat(opt.arenaBlockSize()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void disableAutoCompactions() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setDisableAutoCompactions(boolValue); assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void purgeRedundantKvsWhileFlush() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setPurgeRedundantKvsWhileFlush(boolValue); assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void verifyChecksumsInCompaction() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setVerifyChecksumsInCompaction(boolValue); assertThat(opt.verifyChecksumsInCompaction()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void filterDeletes() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setFilterDeletes(boolValue); assertThat(opt.filterDeletes()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxSequentialSkipInIterations() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setMaxSequentialSkipInIterations(longValue); assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void inplaceUpdateSupport() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setInplaceUpdateSupport(boolValue); assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void inplaceUpdateNumLocks() throws RocksDBException { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setInplaceUpdateNumLocks(longValue); assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void memtablePrefixBloomBits() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMemtablePrefixBloomBits(intValue); assertThat(opt.memtablePrefixBloomBits()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void memtablePrefixBloomProbes() { - Options opt = null; - try { - int intValue = rand.nextInt(); - opt = new Options(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMemtablePrefixBloomProbes(intValue); assertThat(opt.memtablePrefixBloomProbes()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void bloomLocality() { - Options opt = null; - try { - int intValue = rand.nextInt(); - opt = new Options(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setBloomLocality(intValue); assertThat(opt.bloomLocality()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxSuccessiveMerges() throws RocksDBException { - Options opt = null; - try { - long longValue = rand.nextLong(); - opt = new Options(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setMaxSuccessiveMerges(longValue); assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void minPartialMergeOperands() { - Options opt = null; - try { - int intValue = rand.nextInt(); - opt = new Options(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMinPartialMergeOperands(intValue); assertThat(opt.minPartialMergeOperands()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void optimizeFiltersForHits() { - Options opt = null; - try { - boolean aBoolean = rand.nextBoolean(); - opt = new Options(); + try (final Options opt = new Options()) { + final boolean aBoolean = rand.nextBoolean(); opt.setOptimizeFiltersForHits(aBoolean); assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void createIfMissing() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setCreateIfMissing(boolValue); assertThat(opt.createIfMissing()). isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void createMissingColumnFamilies() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setCreateMissingColumnFamilies(boolValue); assertThat(opt.createMissingColumnFamilies()). isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void errorIfExists() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setErrorIfExists(boolValue); assertThat(opt.errorIfExists()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void paranoidChecks() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setParanoidChecks(boolValue); assertThat(opt.paranoidChecks()). isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxTotalWalSize() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setMaxTotalWalSize(longValue); assertThat(opt.maxTotalWalSize()). isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxOpenFiles() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMaxOpenFiles(intValue); assertThat(opt.maxOpenFiles()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void disableDataSync() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setDisableDataSync(boolValue); assertThat(opt.disableDataSync()). isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void useFsync() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setUseFsync(boolValue); assertThat(opt.useFsync()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void dbLogDir() { - Options opt = null; - try { - opt = new Options(); - String str = "path/to/DbLogDir"; + try (final Options opt = new Options()) { + final String str = "path/to/DbLogDir"; opt.setDbLogDir(str); assertThat(opt.dbLogDir()).isEqualTo(str); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void walDir() { - Options opt = null; - try { - opt = new Options(); - String str = "path/to/WalDir"; + try (final Options opt = new Options()) { + final String str = "path/to/WalDir"; opt.setWalDir(str); assertThat(opt.walDir()).isEqualTo(str); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void deleteObsoleteFilesPeriodMicros() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setDeleteObsoleteFilesPeriodMicros(longValue); assertThat(opt.deleteObsoleteFilesPeriodMicros()). isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxBackgroundCompactions() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMaxBackgroundCompactions(intValue); assertThat(opt.maxBackgroundCompactions()). isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxBackgroundFlushes() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setMaxBackgroundFlushes(intValue); assertThat(opt.maxBackgroundFlushes()). isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxLogFileSize() throws RocksDBException { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setMaxLogFileSize(longValue); assertThat(opt.maxLogFileSize()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void logFileTimeToRoll() throws RocksDBException { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setLogFileTimeToRoll(longValue); assertThat(opt.logFileTimeToRoll()). isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void keepLogFileNum() throws RocksDBException { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setKeepLogFileNum(longValue); assertThat(opt.keepLogFileNum()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void maxManifestFileSize() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setMaxManifestFileSize(longValue); assertThat(opt.maxManifestFileSize()). isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void tableCacheNumshardbits() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setTableCacheNumshardbits(intValue); assertThat(opt.tableCacheNumshardbits()). isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void walSizeLimitMB() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setWalSizeLimitMB(longValue); assertThat(opt.walSizeLimitMB()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void walTtlSeconds() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setWalTtlSeconds(longValue); assertThat(opt.walTtlSeconds()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void manifestPreallocationSize() throws RocksDBException { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setManifestPreallocationSize(longValue); assertThat(opt.manifestPreallocationSize()). isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void allowOsBuffer() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setAllowOsBuffer(boolValue); assertThat(opt.allowOsBuffer()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void allowMmapReads() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setAllowMmapReads(boolValue); assertThat(opt.allowMmapReads()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void allowMmapWrites() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setAllowMmapWrites(boolValue); assertThat(opt.allowMmapWrites()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void isFdCloseOnExec() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setIsFdCloseOnExec(boolValue); assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void statsDumpPeriodSec() { - Options opt = null; - try { - opt = new Options(); - int intValue = rand.nextInt(); + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); opt.setStatsDumpPeriodSec(intValue); assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void adviseRandomOnOpen() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setAdviseRandomOnOpen(boolValue); assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void useAdaptiveMutex() { - Options opt = null; - try { - opt = new Options(); - boolean boolValue = rand.nextBoolean(); + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); opt.setUseAdaptiveMutex(boolValue); assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void bytesPerSync() { - Options opt = null; - try { - opt = new Options(); - long longValue = rand.nextLong(); + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); opt.setBytesPerSync(longValue); assertThat(opt.bytesPerSync()).isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void env() { - Options options = null; - try { - options = new Options(); - Env env = Env.getDefault(); + try (final Options options = new Options(); + final Env env = Env.getDefault()) { options.setEnv(env); assertThat(options.getEnv()).isSameAs(env); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void linkageOfPrepMethods() { - Options options = null; - try { - options = new Options(); + try (final Options options = new Options()) { options.optimizeUniversalStyleCompaction(); options.optimizeUniversalStyleCompaction(4000); options.optimizeLevelStyleCompaction(); options.optimizeLevelStyleCompaction(3000); options.optimizeForPointLookup(10); options.prepareForBulkLoad(); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void compressionTypes() { - Options options = null; - try { - options = new Options(); - for (CompressionType compressionType : + try (final Options options = new Options()) { + for (final CompressionType compressionType : CompressionType.values()) { options.setCompressionType(compressionType); assertThat(options.compressionType()). @@ -1011,22 +626,17 @@ public class OptionsTest { assertThat(CompressionType.valueOf("NO_COMPRESSION")). isEqualTo(CompressionType.NO_COMPRESSION); } - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void compressionPerLevel() { - ColumnFamilyOptions columnFamilyOptions = null; - try { - columnFamilyOptions = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions columnFamilyOptions = + new ColumnFamilyOptions()) { assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); List compressionTypeList = new ArrayList<>(); - for (int i=0; i < columnFamilyOptions.numLevels(); i++) { + for (int i = 0; i < columnFamilyOptions.numLevels(); i++) { compressionTypeList.add(CompressionType.NO_COMPRESSION); } columnFamilyOptions.setCompressionPerLevel(compressionTypeList); @@ -1035,18 +645,13 @@ public class OptionsTest { assertThat(compressionType).isEqualTo( CompressionType.NO_COMPRESSION); } - } finally { - if (columnFamilyOptions != null) { - columnFamilyOptions.dispose(); - } } } @Test public void differentCompressionsPerLevel() { - ColumnFamilyOptions columnFamilyOptions = null; - try { - columnFamilyOptions = new ColumnFamilyOptions(); + try (final ColumnFamilyOptions columnFamilyOptions = + new ColumnFamilyOptions()) { columnFamilyOptions.setNumLevels(3); assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); @@ -1066,19 +671,13 @@ public class OptionsTest { CompressionType.SNAPPY_COMPRESSION, CompressionType.LZ4_COMPRESSION); - } finally { - if (columnFamilyOptions != null) { - columnFamilyOptions.dispose(); - } } } @Test public void compactionStyles() { - Options options = null; - try { - options = new Options(); - for (CompactionStyle compactionStyle : + try (final Options options = new Options()) { + for (final CompactionStyle compactionStyle : CompactionStyle.values()) { options.setCompactionStyle(compactionStyle); assertThat(options.compactionStyle()). @@ -1086,18 +685,12 @@ public class OptionsTest { assertThat(CompactionStyle.valueOf("FIFO")). isEqualTo(CompactionStyle.FIFO); } - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void maxTableFilesSizeFIFO() { - Options opt = null; - try { - opt = new Options(); + try (final Options opt = new Options()) { long longValue = rand.nextLong(); // Size has to be positive longValue = (longValue < 0) ? -longValue : longValue; @@ -1105,61 +698,36 @@ public class OptionsTest { opt.setMaxTableFilesSizeFIFO(longValue); assertThat(opt.maxTableFilesSizeFIFO()). isEqualTo(longValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test public void rateLimiterConfig() { - Options options = null; - Options anotherOptions = null; - RateLimiterConfig rateLimiterConfig; - try { - options = new Options(); - rateLimiterConfig = new GenericRateLimiterConfig(1000, 100 * 1000, 1); + try (final Options options = new Options(); + final Options anotherOptions = new Options()) { + final RateLimiterConfig rateLimiterConfig = + new GenericRateLimiterConfig(1000, 100 * 1000, 1); options.setRateLimiterConfig(rateLimiterConfig); // Test with parameter initialization - anotherOptions = new Options(); + anotherOptions.setRateLimiterConfig( new GenericRateLimiterConfig(1000)); - } finally { - if (options != null) { - options.dispose(); - } - if (anotherOptions != null) { - anotherOptions.dispose(); - } } } @Test public void shouldSetTestPrefixExtractor() { - Options options = null; - try { - options = new Options(); + try (final Options options = new Options()) { options.useFixedLengthPrefixExtractor(100); options.useFixedLengthPrefixExtractor(10); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void shouldSetTestCappedPrefixExtractor() { - Options options = null; - try { - options = new Options(); + try (final Options options = new Options()) { options.useCappedPrefixExtractor(100); options.useCappedPrefixExtractor(10); - } finally { - if (options != null) { - options.dispose(); - } } } @@ -1167,9 +735,7 @@ public class OptionsTest { @Test public void shouldTestMemTableFactoryName() throws RocksDBException { - Options options = null; - try { - options = new Options(); + try (final Options options = new Options()) { options.setMemTableConfig(new VectorMemTableConfig()); assertThat(options.memTableFactoryName()). isEqualTo("VectorRepFactory"); @@ -1177,31 +743,18 @@ public class OptionsTest { new HashLinkedListMemTableConfig()); assertThat(options.memTableFactoryName()). isEqualTo("HashLinkedListRepFactory"); - } finally { - if (options != null) { - options.dispose(); - } } } @Test public void statistics() { - Options options = null; - Options anotherOptions = null; - try { - options = new Options(); + try (final Options options = new Options()) { Statistics statistics = options.createStatistics(). statisticsPtr(); assertThat(statistics).isNotNull(); - anotherOptions = new Options(); - statistics = anotherOptions.statisticsPtr(); - assertThat(statistics).isNotNull(); - } finally { - if (options != null) { - options.dispose(); - } - if (anotherOptions != null) { - anotherOptions.dispose(); + try (final Options anotherOptions = new Options()) { + statistics = anotherOptions.statisticsPtr(); + assertThat(statistics).isNotNull(); } } } diff --git a/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/java/src/test/java/org/rocksdb/PlainTableConfigTest.java index b815cd058..05bd13863 100644 --- a/java/src/test/java/org/rocksdb/PlainTableConfigTest.java +++ b/java/src/test/java/org/rocksdb/PlainTableConfigTest.java @@ -80,16 +80,10 @@ public class PlainTableConfigTest { @Test public void plainTableConfig() { - Options opt = null; - try { - opt = new Options(); - PlainTableConfig plainTableConfig = new PlainTableConfig(); + try(final Options opt = new Options()) { + final PlainTableConfig plainTableConfig = new PlainTableConfig(); opt.setTableFormatConfig(plainTableConfig); assertThat(opt.tableFactoryName()).isEqualTo("PlainTable"); - } finally { - if (opt != null) { - opt.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/PlatformRandomHelper.java b/java/src/test/java/org/rocksdb/PlatformRandomHelper.java index e88a8951d..b437e7f97 100644 --- a/java/src/test/java/org/rocksdb/PlatformRandomHelper.java +++ b/java/src/test/java/org/rocksdb/PlatformRandomHelper.java @@ -18,7 +18,7 @@ public class PlatformRandomHelper { * @return boolean value indicating if operating system is 64 Bit. */ public static boolean isOs64Bit(){ - boolean is64Bit; + final boolean is64Bit; if (System.getProperty("os.name").contains("Windows")) { is64Bit = (System.getenv("ProgramFiles(x86)") != null); } else { diff --git a/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/java/src/test/java/org/rocksdb/ReadOnlyTest.java index 5cf2b32d4..d993c9148 100644 --- a/java/src/test/java/org/rocksdb/ReadOnlyTest.java +++ b/java/src/test/java/org/rocksdb/ReadOnlyTest.java @@ -10,6 +10,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; @@ -25,340 +26,279 @@ public class ReadOnlyTest { @Test public void readOnlyOpen() throws RocksDBException { - RocksDB db = null; - RocksDB db2 = null; - RocksDB db3 = null; - Options options = null; - List columnFamilyHandleList = - new ArrayList<>(); - List readOnlyColumnFamilyHandleList = - new ArrayList<>(); - List readOnlyColumnFamilyHandleList2 = - new ArrayList<>(); - try { - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { db.put("key".getBytes(), "value".getBytes()); - db2 = RocksDB.openReadOnly( - dbFolder.getRoot().getAbsolutePath()); - assertThat("value"). - isEqualTo(new String(db2.get("key".getBytes()))); - db.close(); - db2.close(); + try (final RocksDB db2 = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath())) { + assertThat("value"). + isEqualTo(new String(db2.get("key".getBytes()))); + } + } - List cfDescriptors = new ArrayList<>(); - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor( + RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); - db = RocksDB.open( - dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList); - columnFamilyHandleList.add(db.createColumnFamily( - new ColumnFamilyDescriptor("new_cf".getBytes(), new ColumnFamilyOptions()))); - columnFamilyHandleList.add(db.createColumnFamily( - new ColumnFamilyDescriptor("new_cf2".getBytes(), new ColumnFamilyOptions()))); - db.put(columnFamilyHandleList.get(2), "key2".getBytes(), - "value2".getBytes()); + final List columnFamilyHandleList = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try (final ColumnFamilyOptions newCfOpts = new ColumnFamilyOptions(); + final ColumnFamilyOptions newCf2Opts = new ColumnFamilyOptions() + ) { + columnFamilyHandleList.add(db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes(), newCfOpts))); + columnFamilyHandleList.add(db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf2".getBytes(), newCf2Opts))); + db.put(columnFamilyHandleList.get(2), "key2".getBytes(), + "value2".getBytes()); - db2 = RocksDB.openReadOnly( - dbFolder.getRoot().getAbsolutePath(), cfDescriptors, - readOnlyColumnFamilyHandleList); - assertThat(db2.get("key2".getBytes())).isNull(); - assertThat(db2.get(readOnlyColumnFamilyHandleList.get(0), "key2".getBytes())). - isNull(); - cfDescriptors.clear(); - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); - cfDescriptors.add( - new ColumnFamilyDescriptor("new_cf2".getBytes(), new ColumnFamilyOptions())); - db3 = RocksDB.openReadOnly( - dbFolder.getRoot().getAbsolutePath(), cfDescriptors, readOnlyColumnFamilyHandleList2); - assertThat(new String(db3.get(readOnlyColumnFamilyHandleList2.get(1), - "key2".getBytes()))).isEqualTo("value2"); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db2 != null) { - db2.close(); - } - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList2) { - columnFamilyHandle.dispose(); - } - if (db3 != null) { - db3.close(); - } - if (options != null) { - options.dispose(); + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB db2 = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + try (final ColumnFamilyOptions newCfOpts2 = + new ColumnFamilyOptions(); + final ColumnFamilyOptions newCf2Opts2 = + new ColumnFamilyOptions() + ) { + assertThat(db2.get("key2".getBytes())).isNull(); + assertThat(db2.get(readOnlyColumnFamilyHandleList.get(0), + "key2".getBytes())). + isNull(); + cfDescriptors.clear(); + cfDescriptors.add( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, + newCfOpts2)); + cfDescriptors.add(new ColumnFamilyDescriptor("new_cf2".getBytes(), + newCf2Opts2)); + + final List readOnlyColumnFamilyHandleList2 + = new ArrayList<>(); + try (final RocksDB db3 = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList2)) { + try { + assertThat(new String(db3.get( + readOnlyColumnFamilyHandleList2.get(1), + "key2".getBytes()))).isEqualTo("value2"); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList2) { + columnFamilyHandle.close(); + } + } + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @Test(expected = RocksDBException.class) public void failToWriteInReadOnly() throws RocksDBException { - RocksDB db = null; - RocksDB rDb = null; - Options options = null; - List cfDescriptors = new ArrayList<>(); - List readOnlyColumnFamilyHandleList = - new ArrayList<>(); - try { + try (final Options options = new Options() + .setCreateIfMissing(true)) { - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + } - options = new Options(); - options.setCreateIfMissing(true); + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - db.close(); - rDb = RocksDB.openReadOnly( + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( dbFolder.getRoot().getAbsolutePath(), cfDescriptors, - readOnlyColumnFamilyHandleList); - - // test that put fails in readonly mode - rDb.put("key".getBytes(), "value".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (rDb != null) { - rDb.close(); - } - if (options != null) { - options.dispose(); + readOnlyColumnFamilyHandleList)) { + try { + // test that put fails in readonly mode + rDb.put("key".getBytes(), "value".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @Test(expected = RocksDBException.class) public void failToCFWriteInReadOnly() throws RocksDBException { - RocksDB db = null; - RocksDB rDb = null; - Options options = null; - List cfDescriptors = new ArrayList<>(); - List readOnlyColumnFamilyHandleList = - new ArrayList<>(); - try { - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - db.close(); - rDb = RocksDB.openReadOnly( + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( dbFolder.getRoot().getAbsolutePath(), cfDescriptors, - readOnlyColumnFamilyHandleList); - - rDb.put(readOnlyColumnFamilyHandleList.get(0), - "key".getBytes(), "value".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (rDb != null) { - rDb.close(); - } - if (options != null) { - options.dispose(); + readOnlyColumnFamilyHandleList)) { + try { + rDb.put(readOnlyColumnFamilyHandleList.get(0), + "key".getBytes(), "value".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @Test(expected = RocksDBException.class) public void failToRemoveInReadOnly() throws RocksDBException { - RocksDB db = null; - RocksDB rDb = null; - Options options = null; - List cfDescriptors = new ArrayList<>(); - List readOnlyColumnFamilyHandleList = - new ArrayList<>(); - try { - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } - options = new Options(); - options.setCreateIfMissing(true); + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - db.close(); - rDb = RocksDB.openReadOnly( + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + + try (final RocksDB rDb = RocksDB.openReadOnly( dbFolder.getRoot().getAbsolutePath(), cfDescriptors, - readOnlyColumnFamilyHandleList); - - rDb.remove("key".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (rDb != null) { - rDb.close(); - } - if (options != null) { - options.dispose(); + readOnlyColumnFamilyHandleList)) { + try { + rDb.remove("key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @Test(expected = RocksDBException.class) public void failToCFRemoveInReadOnly() throws RocksDBException { - RocksDB db = null; - RocksDB rDb = null; - Options options = null; - List cfDescriptors = new ArrayList<>(); - List readOnlyColumnFamilyHandleList = - new ArrayList<>(); - try { - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } - options = new Options(); - options.setCreateIfMissing(true); + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - db.close(); - - rDb = RocksDB.openReadOnly( + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( dbFolder.getRoot().getAbsolutePath(), cfDescriptors, - readOnlyColumnFamilyHandleList); - - rDb.remove(readOnlyColumnFamilyHandleList.get(0), - "key".getBytes()); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (rDb != null) { - rDb.close(); - } - if (options != null) { - options.dispose(); + readOnlyColumnFamilyHandleList)) { + try { + rDb.remove(readOnlyColumnFamilyHandleList.get(0), + "key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @Test(expected = RocksDBException.class) public void failToWriteBatchReadOnly() throws RocksDBException { - RocksDB db = null; - RocksDB rDb = null; - Options options = null; - List cfDescriptors = new ArrayList<>(); - List readOnlyColumnFamilyHandleList = - new ArrayList<>(); - try { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - db.close(); - - rDb = RocksDB.openReadOnly( + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( dbFolder.getRoot().getAbsolutePath(), cfDescriptors, readOnlyColumnFamilyHandleList); - - WriteBatch wb = new WriteBatch(); - wb.put("key".getBytes(), "value".getBytes()); - rDb.write(new WriteOptions(), wb); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (rDb != null) { - rDb.close(); - } - if (options != null) { - options.dispose(); + final WriteBatch wb = new WriteBatch(); + final WriteOptions wOpts = new WriteOptions()) { + try { + wb.put("key".getBytes(), "value".getBytes()); + rDb.write(wOpts, wb); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } @Test(expected = RocksDBException.class) public void failToCFWriteBatchReadOnly() throws RocksDBException { - RocksDB db = null; - RocksDB rDb = null; - Options options = null; - WriteBatch wb = null; - List cfDescriptors = new ArrayList<>(); - List readOnlyColumnFamilyHandleList = - new ArrayList<>(); - try { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } - cfDescriptors.add( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, - new ColumnFamilyOptions())); + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); - - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - db.close(); - - rDb = RocksDB.openReadOnly( + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( dbFolder.getRoot().getAbsolutePath(), cfDescriptors, readOnlyColumnFamilyHandleList); - - wb = new WriteBatch(); - wb.put(readOnlyColumnFamilyHandleList.get(0), - "key".getBytes(), "value".getBytes()); - rDb.write(new WriteOptions(), wb); - } finally { - for (ColumnFamilyHandle columnFamilyHandle : readOnlyColumnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (rDb != null) { - rDb.close(); - } - if (options != null) { - options.dispose(); - } - if (wb != null) { - wb.dispose(); + final WriteBatch wb = new WriteBatch(); + final WriteOptions wOpts = new WriteOptions()) { + try { + wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(), + "value".getBytes()); + rDb.write(wOpts, wb); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } } } } diff --git a/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/java/src/test/java/org/rocksdb/ReadOptionsTest.java index df42cf0cd..a10f5ac7e 100644 --- a/java/src/test/java/org/rocksdb/ReadOptionsTest.java +++ b/java/src/test/java/org/rocksdb/ReadOptionsTest.java @@ -24,127 +24,111 @@ public class ReadOptionsTest { public ExpectedException exception = ExpectedException.none(); @Test - public void verifyChecksum(){ - ReadOptions opt = null; - try { - opt = new ReadOptions(); - Random rand = new Random(); - boolean boolValue = rand.nextBoolean(); + public void verifyChecksum() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); opt.setVerifyChecksums(boolValue); assertThat(opt.verifyChecksums()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test - public void fillCache(){ - ReadOptions opt = null; - try { - opt = new ReadOptions(); - Random rand = new Random(); - boolean boolValue = rand.nextBoolean(); + public void fillCache() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); opt.setFillCache(boolValue); assertThat(opt.fillCache()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test - public void tailing(){ - ReadOptions opt = null; - try { - opt = new ReadOptions(); - Random rand = new Random(); - boolean boolValue = rand.nextBoolean(); + public void tailing() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); opt.setTailing(boolValue); assertThat(opt.tailing()).isEqualTo(boolValue); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test - public void snapshot(){ - ReadOptions opt = null; - try { - opt = new ReadOptions(); + public void snapshot() { + try (final ReadOptions opt = new ReadOptions()) { opt.setSnapshot(null); assertThat(opt.snapshot()).isNull(); - } finally { - if (opt != null) { - opt.dispose(); - } } } @Test - public void failSetVerifyChecksumUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.setVerifyChecksums(true); + public void failSetVerifyChecksumUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setVerifyChecksums(true); + } } @Test - public void failVerifyChecksumUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.verifyChecksums(); + public void failVerifyChecksumUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.verifyChecksums(); + } } @Test - public void failSetFillCacheUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.setFillCache(true); + public void failSetFillCacheUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setFillCache(true); + } } @Test - public void failFillCacheUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.fillCache(); + public void failFillCacheUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.fillCache(); + } } @Test - public void failSetTailingUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.setTailing(true); + public void failSetTailingUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setTailing(true); + } } @Test - public void failTailingUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.tailing(); + public void failTailingUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.tailing(); + } } @Test - public void failSetSnapshotUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.setSnapshot(null); + public void failSetSnapshotUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setSnapshot(null); + } } @Test - public void failSnapshotUninitialized(){ - ReadOptions readOptions = setupUninitializedReadOptions( - exception); - readOptions.snapshot(); + public void failSnapshotUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.snapshot(); + } } private ReadOptions setupUninitializedReadOptions( ExpectedException exception) { - ReadOptions readOptions = new ReadOptions(); - readOptions.dispose(); + final ReadOptions readOptions = new ReadOptions(); + readOptions.close(); exception.expect(AssertionError.class); return readOptions; } diff --git a/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java index cb1c1f77d..ceac69f5f 100644 --- a/java/src/test/java/org/rocksdb/RocksDBTest.java +++ b/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -27,96 +27,65 @@ public class RocksDBTest { @Test public void open() throws RocksDBException { - RocksDB db = null; - Options opt = null; - try { - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); - db.close(); - opt = new Options(); - opt.setCreateIfMissing(true); - db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath()); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + } + } + + @Test + public void open_opt() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); } } @Test public void put() throws RocksDBException { - RocksDB db = null; - WriteOptions opt = null; - try { - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions opt = new WriteOptions()) { db.put("key1".getBytes(), "value".getBytes()); - opt = new WriteOptions(); db.put(opt, "key2".getBytes(), "12345678".getBytes()); assertThat(db.get("key1".getBytes())).isEqualTo( "value".getBytes()); assertThat(db.get("key2".getBytes())).isEqualTo( "12345678".getBytes()); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void write() throws RocksDBException { - RocksDB db = null; - Options options = null; - WriteBatch wb1 = null; - WriteBatch wb2 = null; - WriteOptions opts = null; - try { - options = new Options(). - setMergeOperator(new StringAppendOperator()). - setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - opts = new WriteOptions(); - wb1 = new WriteBatch(); - wb1.put("key1".getBytes(), "aa".getBytes()); - wb1.merge("key1".getBytes(), "bb".getBytes()); - wb2 = new WriteBatch(); - wb2.put("key2".getBytes(), "xx".getBytes()); - wb2.merge("key2".getBytes(), "yy".getBytes()); - db.write(opts, wb1); - db.write(opts, wb2); + try (final Options options = new Options().setMergeOperator( + new StringAppendOperator()).setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions opts = new WriteOptions()) { + + try (final WriteBatch wb1 = new WriteBatch()) { + wb1.put("key1".getBytes(), "aa".getBytes()); + wb1.merge("key1".getBytes(), "bb".getBytes()); + + try (final WriteBatch wb2 = new WriteBatch()) { + wb2.put("key2".getBytes(), "xx".getBytes()); + wb2.merge("key2".getBytes(), "yy".getBytes()); + db.write(opts, wb1); + db.write(opts, wb2); + } + } + assertThat(db.get("key1".getBytes())).isEqualTo( "aa,bb".getBytes()); assertThat(db.get("key2".getBytes())).isEqualTo( "xx,yy".getBytes()); - } finally { - if (db != null) { - db.close(); - } - if (wb1 != null) { - wb1.dispose(); - } - if (wb2 != null) { - wb2.dispose(); - } - if (options != null) { - options.dispose(); - } - if (opts != null) { - opts.dispose(); - } } } @Test public void getWithOutValue() throws RocksDBException { - RocksDB db = null; - try { - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); byte[] outValue = new byte[5]; @@ -131,20 +100,13 @@ public class RocksDBTest { getResult = db.get("key2".getBytes(), outValue); assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); assertThat(outValue).isEqualTo("12345".getBytes()); - } finally { - if (db != null) { - db.close(); - } } } @Test public void getWithOutValueReadOptions() throws RocksDBException { - RocksDB db = null; - ReadOptions rOpt = null; - try { - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); - rOpt = new ReadOptions(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ReadOptions rOpt = new ReadOptions()) { db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); byte[] outValue = new byte[5]; @@ -160,23 +122,13 @@ public class RocksDBTest { getResult = db.get(rOpt, "key2".getBytes(), outValue); assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); assertThat(outValue).isEqualTo("12345".getBytes()); - } finally { - if (db != null) { - db.close(); - } - if (rOpt != null) { - rOpt.dispose(); - } } } @Test public void multiGet() throws RocksDBException, InterruptedException { - RocksDB db = null; - ReadOptions rOpt = null; - try { - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); - rOpt = new ReadOptions(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ReadOptions rOpt = new ReadOptions()) { db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); List lookupKeys = new ArrayList<>(); @@ -209,27 +161,18 @@ public class RocksDBTest { assertThat(results.values()).isNotNull(); assertThat(results.values()). contains("value".getBytes()); - } finally { - if (db != null) { - db.close(); - } - if (rOpt != null) { - rOpt.dispose(); - } } } @Test public void merge() throws RocksDBException { - RocksDB db = null; - Options opt = null; - WriteOptions wOpt; - try { - opt = new Options(). - setCreateIfMissing(true). - setMergeOperator(new StringAppendOperator()); - wOpt = new WriteOptions(); - db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(new StringAppendOperator()); + final WriteOptions wOpt = new WriteOptions(); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()) + ) { db.put("key1".getBytes(), "value".getBytes()); assertThat(db.get("key1".getBytes())).isEqualTo( "value".getBytes()); @@ -245,23 +188,13 @@ public class RocksDBTest { db.merge(wOpt, "key2".getBytes(), "xxxx".getBytes()); assertThat(db.get("key2".getBytes())).isEqualTo( "xxxx".getBytes()); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void remove() throws RocksDBException { - RocksDB db = null; - WriteOptions wOpt; - try { - wOpt = new WriteOptions(); - db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions()) { db.put("key1".getBytes(), "value".getBytes()); db.put("key2".getBytes(), "12345678".getBytes()); assertThat(db.get("key1".getBytes())).isEqualTo( @@ -272,66 +205,47 @@ public class RocksDBTest { db.remove(wOpt, "key2".getBytes()); assertThat(db.get("key1".getBytes())).isNull(); assertThat(db.get("key2".getBytes())).isNull(); - } finally { - if (db != null) { - db.close(); - } } } @Test public void getIntProperty() throws RocksDBException { - RocksDB db = null; - Options options = null; - WriteOptions wOpt = null; - try { - options = new Options(); - wOpt = new WriteOptions(); - // Setup options - options.setCreateIfMissing(true); - options.setMaxWriteBufferNumber(10); - options.setMinWriteBufferNumberToMerge(10); - wOpt.setDisableWAL(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + try ( + final Options options = new Options() + .setCreateIfMissing(true) + .setMaxWriteBufferNumber(10) + .setMinWriteBufferNumberToMerge(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions().setDisableWAL(true) + ) { db.put(wOpt, "key1".getBytes(), "value1".getBytes()); db.put(wOpt, "key2".getBytes(), "value2".getBytes()); db.put(wOpt, "key3".getBytes(), "value3".getBytes()); db.put(wOpt, "key4".getBytes(), "value4".getBytes()); - assertThat(db.getLongProperty("rocksdb.num-entries-active-mem-table")).isGreaterThan(0); - assertThat(db.getLongProperty("rocksdb.cur-size-active-mem-table")).isGreaterThan(0); - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - if (wOpt != null) { - wOpt.dispose(); - } + assertThat(db.getLongProperty("rocksdb.num-entries-active-mem-table")) + .isGreaterThan(0); + assertThat(db.getLongProperty("rocksdb.cur-size-active-mem-table")) + .isGreaterThan(0); } } @Test public void fullCompactRange() throws RocksDBException { - RocksDB db = null; - Options opt = null; - try { - opt = new Options(). - setCreateIfMissing(true). - setDisableAutoCompactions(true). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(4). - setWriteBufferSize(100<<10). - setLevelZeroFileNumCompactionTrigger(3). - setTargetFileSizeBase(200 << 10). - setTargetFileSizeMultiplier(1). - setMaxBytesForLevelBase(500 << 10). - setMaxBytesForLevelMultiplier(1). - setDisableAutoCompactions(false); - // open database - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { // fill database with key/value pairs byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { @@ -339,66 +253,53 @@ public class RocksDBTest { db.put((String.valueOf(i)).getBytes(), b); } db.compactRange(); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void fullCompactRangeColumnFamily() throws RocksDBException { - RocksDB db = null; - DBOptions opt = null; - List columnFamilyHandles = - new ArrayList<>(); - try { - opt = new DBOptions(). - setCreateIfMissing(true). - setCreateMissingColumnFamilies(true); - List columnFamilyDescriptors = - new ArrayList<>(); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - RocksDB.DEFAULT_COLUMN_FAMILY)); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - "new_cf".getBytes(), - new ColumnFamilyOptions(). - setDisableAutoCompactions(true). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(4). - setWriteBufferSize(100 << 10). - setLevelZeroFileNumCompactionTrigger(3). - setTargetFileSizeBase(200 << 10). - setTargetFileSizeMultiplier(1). - setMaxBytesForLevelBase(500 << 10). - setMaxBytesForLevelMultiplier(1). - setDisableAutoCompactions(false))); + try ( + final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)); + // open database - db = RocksDB.open(opt, + final List columnFamilyHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, - columnFamilyHandles); - // fill database with key/value pairs - byte[] b = new byte[10000]; - for (int i = 0; i < 200; i++) { - rand.nextBytes(b); - db.put(columnFamilyHandles.get(1), - String.valueOf(i).getBytes(), b); - } - db.compactRange(columnFamilyHandles.get(1)); - } finally { - for (ColumnFamilyHandle handle : columnFamilyHandles) { - handle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1)); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } } } } @@ -406,24 +307,20 @@ public class RocksDBTest { @Test public void compactRangeWithKeys() throws RocksDBException { - RocksDB db = null; - Options opt = null; - try { - opt = new Options(). - setCreateIfMissing(true). - setDisableAutoCompactions(true). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(4). - setWriteBufferSize(100<<10). - setLevelZeroFileNumCompactionTrigger(3). - setTargetFileSizeBase(200 << 10). - setTargetFileSizeMultiplier(1). - setMaxBytesForLevelBase(500 << 10). - setMaxBytesForLevelMultiplier(1). - setDisableAutoCompactions(false); - // open database - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + try (final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { // fill database with key/value pairs byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { @@ -431,37 +328,27 @@ public class RocksDBTest { db.put((String.valueOf(i)).getBytes(), b); } db.compactRange("0".getBytes(), "201".getBytes()); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void compactRangeWithKeysReduce() throws RocksDBException { - RocksDB db = null; - Options opt = null; - try { - opt = new Options(). - setCreateIfMissing(true). - setDisableAutoCompactions(true). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(4). - setWriteBufferSize(100<<10). - setLevelZeroFileNumCompactionTrigger(3). - setTargetFileSizeBase(200 << 10). - setTargetFileSizeMultiplier(1). - setMaxBytesForLevelBase(500 << 10). - setMaxBytesForLevelMultiplier(1). - setDisableAutoCompactions(false); - // open database - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + try ( + final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { // fill database with key/value pairs byte[] b = new byte[10000]; for (int i = 0; i < 200; i++) { @@ -471,67 +358,55 @@ public class RocksDBTest { db.flush(new FlushOptions().setWaitForFlush(true)); db.compactRange("0".getBytes(), "201".getBytes(), true, -1, 0); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void compactRangeWithKeysColumnFamily() throws RocksDBException { - RocksDB db = null; - DBOptions opt = null; - List columnFamilyHandles = - new ArrayList<>(); - try { - opt = new DBOptions(). - setCreateIfMissing(true). - setCreateMissingColumnFamilies(true); - List columnFamilyDescriptors = - new ArrayList<>(); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - RocksDB.DEFAULT_COLUMN_FAMILY)); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - "new_cf".getBytes(), - new ColumnFamilyOptions(). - setDisableAutoCompactions(true). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(4). - setWriteBufferSize(100<<10). - setLevelZeroFileNumCompactionTrigger(3). - setTargetFileSizeBase(200 << 10). - setTargetFileSizeMultiplier(1). - setMaxBytesForLevelBase(500 << 10). - setMaxBytesForLevelMultiplier(1). - setDisableAutoCompactions(false))); + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + // open database - db = RocksDB.open(opt, + final List columnFamilyHandles = + new ArrayList<>(); + try (final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, - columnFamilyHandles); - // fill database with key/value pairs - byte[] b = new byte[10000]; - for (int i = 0; i < 200; i++) { - rand.nextBytes(b); - db.put(columnFamilyHandles.get(1), - String.valueOf(i).getBytes(), b); - } - db.compactRange(columnFamilyHandles.get(1), - "0".getBytes(), "201".getBytes()); - } finally { - for (ColumnFamilyHandle handle : columnFamilyHandles) { - handle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1), + "0".getBytes(), "201".getBytes()); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } } } } @@ -539,54 +414,48 @@ public class RocksDBTest { @Test public void compactRangeWithKeysReduceColumnFamily() throws RocksDBException { - RocksDB db = null; - DBOptions opt = null; - List columnFamilyHandles = - new ArrayList<>(); - try { - opt = new DBOptions(). - setCreateIfMissing(true). - setCreateMissingColumnFamilies(true); - List columnFamilyDescriptors = - new ArrayList<>(); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - RocksDB.DEFAULT_COLUMN_FAMILY)); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - "new_cf".getBytes(), - new ColumnFamilyOptions(). - setDisableAutoCompactions(true). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(4). - setWriteBufferSize(100<<10). - setLevelZeroFileNumCompactionTrigger(3). - setTargetFileSizeBase(200 << 10). - setTargetFileSizeMultiplier(1). - setMaxBytesForLevelBase(500 << 10). - setMaxBytesForLevelMultiplier(1). - setDisableAutoCompactions(false))); + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); // open database - db = RocksDB.open(opt, + try (final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, - columnFamilyHandles); - // fill database with key/value pairs - byte[] b = new byte[10000]; - for (int i = 0; i < 200; i++) { - rand.nextBytes(b); - db.put(columnFamilyHandles.get(1), - String.valueOf(i).getBytes(), b); - } - db.compactRange(columnFamilyHandles.get(1), "0".getBytes(), - "201".getBytes(), true, -1, 0); - } finally { - for (ColumnFamilyHandle handle : columnFamilyHandles) { - handle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1), "0".getBytes(), + "201".getBytes(), true, -1, 0); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } } } } @@ -594,35 +463,33 @@ public class RocksDBTest { @Test public void compactRangeToLevel() throws RocksDBException, InterruptedException { - RocksDB db = null; - Options opt = null; - try { - final int NUM_KEYS_PER_L0_FILE = 100; - final int KEY_SIZE = 20; - final int VALUE_SIZE = 300; - final int L0_FILE_SIZE = - NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); - final int NUM_L0_FILES = 10; - final int TEST_SCALE = 5; - final int KEY_INTERVAL = 100; - opt = new Options(). - setCreateIfMissing(true). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(5). - // a slightly bigger write buffer than L0 file - // so that we can ensure manual flush always - // go before background flush happens. - setWriteBufferSize(L0_FILE_SIZE * 2). - // Disable auto L0 -> L1 compaction - setLevelZeroFileNumCompactionTrigger(20). - setTargetFileSizeBase(L0_FILE_SIZE * 100). - setTargetFileSizeMultiplier(1). - // To disable auto compaction - setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). - setMaxBytesForLevelMultiplier(2). - setDisableAutoCompactions(true); - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + final int NUM_KEYS_PER_L0_FILE = 100; + final int KEY_SIZE = 20; + final int VALUE_SIZE = 300; + final int L0_FILE_SIZE = + NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); + final int NUM_L0_FILES = 10; + final int TEST_SCALE = 5; + final int KEY_INTERVAL = 100; + try (final Options opt = new Options(). + setCreateIfMissing(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(5). + // a slightly bigger write buffer than L0 file + // so that we can ensure manual flush always + // go before background flush happens. + setWriteBufferSize(L0_FILE_SIZE * 2). + // Disable auto L0 -> L1 compaction + setLevelZeroFileNumCompactionTrigger(20). + setTargetFileSizeBase(L0_FILE_SIZE * 100). + setTargetFileSizeMultiplier(1). + // To disable auto compaction + setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). + setMaxBytesForLevelMultiplier(2). + setDisableAutoCompactions(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()) + ) { // fill database with key/value pairs byte[] value = new byte[VALUE_SIZE]; int int_key = 0; @@ -634,7 +501,7 @@ public class RocksDBTest { rand.nextBytes(value); db.put(String.format("%020d", int_key).getBytes(), - value); + value); } db.flush(new FlushOptions().setWaitForFlush(true)); // Make sure we do create one more L0 files. @@ -665,141 +532,121 @@ public class RocksDBTest { db.getProperty("rocksdb.num-files-at-level2")). isEqualTo("0"); } - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } @Test public void compactRangeToLevelColumnFamily() throws RocksDBException { - RocksDB db = null; - DBOptions opt = null; - List columnFamilyHandles = - new ArrayList<>(); - try { - final int NUM_KEYS_PER_L0_FILE = 100; - final int KEY_SIZE = 20; - final int VALUE_SIZE = 300; - final int L0_FILE_SIZE = - NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); - final int NUM_L0_FILES = 10; - final int TEST_SCALE = 5; - final int KEY_INTERVAL = 100; - opt = new DBOptions(). - setCreateIfMissing(true). - setCreateMissingColumnFamilies(true); - List columnFamilyDescriptors = - new ArrayList<>(); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - RocksDB.DEFAULT_COLUMN_FAMILY)); - columnFamilyDescriptors.add(new ColumnFamilyDescriptor( - "new_cf".getBytes(), - new ColumnFamilyOptions(). - setCompactionStyle(CompactionStyle.LEVEL). - setNumLevels(5). - // a slightly bigger write buffer than L0 file - // so that we can ensure manual flush always - // go before background flush happens. - setWriteBufferSize(L0_FILE_SIZE * 2). - // Disable auto L0 -> L1 compaction - setLevelZeroFileNumCompactionTrigger(20). - setTargetFileSizeBase(L0_FILE_SIZE * 100). - setTargetFileSizeMultiplier(1). - // To disable auto compaction - setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). - setMaxBytesForLevelMultiplier(2). - setDisableAutoCompactions(true))); + final int NUM_KEYS_PER_L0_FILE = 100; + final int KEY_SIZE = 20; + final int VALUE_SIZE = 300; + final int L0_FILE_SIZE = + NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); + final int NUM_L0_FILES = 10; + final int TEST_SCALE = 5; + final int KEY_INTERVAL = 100; + + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(5). + // a slightly bigger write buffer than L0 file + // so that we can ensure manual flush always + // go before background flush happens. + setWriteBufferSize(L0_FILE_SIZE * 2). + // Disable auto L0 -> L1 compaction + setLevelZeroFileNumCompactionTrigger(20). + setTargetFileSizeBase(L0_FILE_SIZE * 100). + setTargetFileSizeMultiplier(1). + // To disable auto compaction + setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). + setMaxBytesForLevelMultiplier(2). + setDisableAutoCompactions(true) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); // open database - db = RocksDB.open(opt, + try (final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, - columnFamilyHandles); - // fill database with key/value pairs - byte[] value = new byte[VALUE_SIZE]; - int int_key = 0; - for (int round = 0; round < 5; ++round) { - int initial_key = int_key; - for (int f = 1; f <= NUM_L0_FILES; ++f) { - for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { - int_key += KEY_INTERVAL; - rand.nextBytes(value); + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] value = new byte[VALUE_SIZE]; + int int_key = 0; + for (int round = 0; round < 5; ++round) { + int initial_key = int_key; + for (int f = 1; f <= NUM_L0_FILES; ++f) { + for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { + int_key += KEY_INTERVAL; + rand.nextBytes(value); - db.put(columnFamilyHandles.get(1), - String.format("%020d", int_key).getBytes(), - value); + db.put(columnFamilyHandles.get(1), + String.format("%020d", int_key).getBytes(), + value); + } + db.flush(new FlushOptions().setWaitForFlush(true), + columnFamilyHandles.get(1)); + // Make sure we do create one more L0 files. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level0")). + isEqualTo("" + f); + } + + // Compact all L0 files we just created + db.compactRange( + columnFamilyHandles.get(1), + String.format("%020d", initial_key).getBytes(), + String.format("%020d", int_key - 1).getBytes()); + // Making sure there isn't any L0 files. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level0")). + isEqualTo("0"); + // Making sure there are some L1 files. + // Here we only use != 0 instead of a specific number + // as we don't want the test make any assumption on + // how compaction works. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level1")). + isNotEqualTo("0"); + // Because we only compacted those keys we issued + // in this round, there shouldn't be any L1 -> L2 + // compaction. So we expect zero L2 files here. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level2")). + isEqualTo("0"); + } + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); } - db.flush(new FlushOptions().setWaitForFlush(true), - columnFamilyHandles.get(1)); - // Make sure we do create one more L0 files. - assertThat( - db.getProperty(columnFamilyHandles.get(1), - "rocksdb.num-files-at-level0")). - isEqualTo("" + f); } - - // Compact all L0 files we just created - db.compactRange( - columnFamilyHandles.get(1), - String.format("%020d", initial_key).getBytes(), - String.format("%020d", int_key - 1).getBytes()); - // Making sure there isn't any L0 files. - assertThat( - db.getProperty(columnFamilyHandles.get(1), - "rocksdb.num-files-at-level0")). - isEqualTo("0"); - // Making sure there are some L1 files. - // Here we only use != 0 instead of a specific number - // as we don't want the test make any assumption on - // how compaction works. - assertThat( - db.getProperty(columnFamilyHandles.get(1), - "rocksdb.num-files-at-level1")). - isNotEqualTo("0"); - // Because we only compacted those keys we issued - // in this round, there shouldn't be any L1 -> L2 - // compaction. So we expect zero L2 files here. - assertThat( - db.getProperty(columnFamilyHandles.get(1), - "rocksdb.num-files-at-level2")). - isEqualTo("0"); - } - } finally { - for (ColumnFamilyHandle handle : columnFamilyHandles) { - handle.dispose(); - } - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); } } } @Test public void enableDisableFileDeletions() throws RocksDBException { - RocksDB db = null; - Options options = null; - try { - options = new Options().setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { db.disableFileDeletions(); db.enableFileDeletions(false); db.disableFileDeletions(); db.enableFileDeletions(true); - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/RocksEnvTest.java b/java/src/test/java/org/rocksdb/RocksEnvTest.java index a051a3562..d89570aad 100644 --- a/java/src/test/java/org/rocksdb/RocksEnvTest.java +++ b/java/src/test/java/org/rocksdb/RocksEnvTest.java @@ -17,22 +17,23 @@ public class RocksEnvTest { new RocksMemoryResource(); @Test - public void rocksEnv(){ - Env rocksEnv = RocksEnv.getDefault(); - rocksEnv.setBackgroundThreads(5); - // default rocksenv will always return zero for flush pool - // no matter what was set via setBackgroundThreads - assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). - isEqualTo(0); - rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL); - // default rocksenv will always return zero for flush pool - // no matter what was set via setBackgroundThreads - assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). - isEqualTo(0); - rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL); - // default rocksenv will always return zero for compaction pool - // no matter what was set via setBackgroundThreads - assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)). - isEqualTo(0); + public void rocksEnv() { + try (final Env rocksEnv = RocksEnv.getDefault()) { + rocksEnv.setBackgroundThreads(5); + // default rocksenv will always return zero for flush pool + // no matter what was set via setBackgroundThreads + assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). + isEqualTo(0); + rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL); + // default rocksenv will always return zero for flush pool + // no matter what was set via setBackgroundThreads + assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). + isEqualTo(0); + rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL); + // default rocksenv will always return zero for compaction pool + // no matter what was set via setBackgroundThreads + assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)). + isEqualTo(0); + } } } diff --git a/java/src/test/java/org/rocksdb/RocksIteratorTest.java b/java/src/test/java/org/rocksdb/RocksIteratorTest.java index eb841d3e6..4471df9cc 100644 --- a/java/src/test/java/org/rocksdb/RocksIteratorTest.java +++ b/java/src/test/java/org/rocksdb/RocksIteratorTest.java @@ -22,50 +22,36 @@ public class RocksIteratorTest { @Test public void rocksIterator() throws RocksDBException { - RocksDB db = null; - Options options = null; - RocksIterator iterator = null; - try { - options = new Options(); - options.setCreateIfMissing(true) - .setCreateMissingColumnFamilies(true); - db = RocksDB.open(options, - dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { db.put("key1".getBytes(), "value1".getBytes()); db.put("key2".getBytes(), "value2".getBytes()); - iterator = db.newIterator(); - - iterator.seekToFirst(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key1".getBytes()); - assertThat(iterator.value()).isEqualTo("value1".getBytes()); - iterator.next(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key2".getBytes()); - assertThat(iterator.value()).isEqualTo("value2".getBytes()); - iterator.next(); - assertThat(iterator.isValid()).isFalse(); - iterator.seekToLast(); - iterator.prev(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key1".getBytes()); - assertThat(iterator.value()).isEqualTo("value1".getBytes()); - iterator.seekToFirst(); - iterator.seekToLast(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key2".getBytes()); - assertThat(iterator.value()).isEqualTo("value2".getBytes()); - iterator.status(); - } finally { - if (iterator != null) { - iterator.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + iterator.seekToLast(); + iterator.prev(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + iterator.seekToFirst(); + iterator.seekToLast(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + iterator.status(); } } } diff --git a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java index 7530e51b1..141f7f850 100644 --- a/java/src/test/java/org/rocksdb/RocksMemEnvTest.java +++ b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java @@ -33,73 +33,55 @@ public class RocksMemEnvTest { "baz".getBytes() }; - Env env = null; - Options options = null; - RocksDB db = null; - FlushOptions flushOptions = null; - try { - env = new RocksMemEnv(); - options = new Options(). - setCreateIfMissing(true). - setEnv(env); - flushOptions = new FlushOptions(). - setWaitForFlush(true); - db = RocksDB.open(options, "dir/db"); + try (final Env env = new RocksMemEnv(); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true); + ) { + try (final RocksDB db = RocksDB.open(options, "dir/db")) { + // write key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + db.put(keys[i], values[i]); + } - // write key/value pairs using MemEnv - for (int i=0; i < keys.length; i++) { - db.put(keys[i], values[i]); + // read key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } + + // Check iterator access + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + for (int i = 0; i < keys.length; i++) { + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo(keys[i]); + assertThat(iterator.value()).isEqualTo(values[i]); + iterator.next(); + } + // reached end of database + assertThat(iterator.isValid()).isFalse(); + } + + // flush + db.flush(flushOptions); + + // read key/value pairs after flush using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } } - // read key/value pairs using MemEnv - for (int i=0; i < keys.length; i++) { - assertThat(db.get(keys[i])).isEqualTo(values[i]); - } - - // Check iterator access - RocksIterator iterator = db.newIterator(); - iterator.seekToFirst(); - for (int i=0; i < keys.length; i++) { - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo(keys[i]); - assertThat(iterator.value()).isEqualTo(values[i]); - iterator.next(); - } - // reached end of database - assertThat(iterator.isValid()).isFalse(); - iterator.dispose(); - - // flush - db.flush(flushOptions); - - // read key/value pairs after flush using MemEnv - for (int i=0; i < keys.length; i++) { - assertThat(db.get(keys[i])).isEqualTo(values[i]); - } - - db.close(); options.setCreateIfMissing(false); // After reopen the values shall still be in the mem env. // as long as the env is not freed. - db = RocksDB.open(options, "dir/db"); - // read key/value pairs using MemEnv - for (int i=0; i < keys.length; i++) { - assertThat(db.get(keys[i])).isEqualTo(values[i]); - } - - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - if (flushOptions != null) { - flushOptions.dispose(); - } - if (env != null) { - env.dispose(); + try (final RocksDB db = RocksDB.open(options, "dir/db")) { + // read key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } } } } @@ -125,27 +107,22 @@ public class RocksMemEnvTest { "baz".getBytes() }; - Env env = null; - Options options = null; - RocksDB db = null, otherDb = null; - - try { - env = new RocksMemEnv(); - options = new Options(). - setCreateIfMissing(true). - setEnv(env); - db = RocksDB.open(options, "dir/db"); - otherDb = RocksDB.open(options, "dir/otherDb"); - + try (final Env env = new RocksMemEnv(); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + final RocksDB db = RocksDB.open(options, "dir/db"); + final RocksDB otherDb = RocksDB.open(options, "dir/otherDb") + ) { // write key/value pairs using MemEnv // to db and to otherDb. - for (int i=0; i < keys.length; i++) { + for (int i = 0; i < keys.length; i++) { db.put(keys[i], values[i]); otherDb.put(otherKeys[i], values[i]); } // verify key/value pairs after flush using MemEnv - for (int i=0; i < keys.length; i++) { + for (int i = 0; i < keys.length; i++) { // verify db assertThat(db.get(otherKeys[i])).isNull(); assertThat(db.get(keys[i])).isEqualTo(values[i]); @@ -154,43 +131,18 @@ public class RocksMemEnvTest { assertThat(otherDb.get(keys[i])).isNull(); assertThat(otherDb.get(otherKeys[i])).isEqualTo(values[i]); } - } finally { - if (db != null) { - db.close(); - } - if (otherDb != null) { - otherDb.close(); - } - if (options != null) { - options.dispose(); - } - if (env != null) { - env.dispose(); - } } } @Test(expected = RocksDBException.class) public void createIfMissingFalse() throws RocksDBException { - Env env = null; - Options options = null; - RocksDB db = null; - - try { - env = new RocksMemEnv(); - options = new Options(). - setCreateIfMissing(false). - setEnv(env); + try (final Env env = new RocksMemEnv(); + final Options options = new Options() + .setCreateIfMissing(false) + .setEnv(env); + final RocksDB db = RocksDB.open(options, "db/dir")) { // shall throw an exception because db dir does not // exist. - db = RocksDB.open(options, "db/dir"); - } finally { - if (options != null) { - options.dispose(); - } - if (env != null) { - env.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/RocksMemoryResource.java b/java/src/test/java/org/rocksdb/RocksMemoryResource.java index de9ba0d6b..6fd1c7e66 100644 --- a/java/src/test/java/org/rocksdb/RocksMemoryResource.java +++ b/java/src/test/java/org/rocksdb/RocksMemoryResource.java @@ -5,7 +5,11 @@ import org.junit.rules.ExternalResource; /** * Resource to trigger garbage collection after each test * run. + * + * @deprecated Will be removed with the implementation of + * {@link RocksObject#finalize()} */ +@Deprecated public class RocksMemoryResource extends ExternalResource { static { diff --git a/java/src/test/java/org/rocksdb/SliceTest.java b/java/src/test/java/org/rocksdb/SliceTest.java index 51f542fa5..952c9ab86 100644 --- a/java/src/test/java/org/rocksdb/SliceTest.java +++ b/java/src/test/java/org/rocksdb/SliceTest.java @@ -17,89 +17,45 @@ public class SliceTest { @Test public void slice() { - Slice slice = null; - Slice otherSlice = null; - Slice thirdSlice = null; - try { - slice = new Slice("testSlice"); + try (final Slice slice = new Slice("testSlice")) { assertThat(slice.empty()).isFalse(); assertThat(slice.size()).isEqualTo(9); assertThat(slice.data()).isEqualTo("testSlice".getBytes()); + } - otherSlice = new Slice("otherSlice".getBytes()); + try (final Slice otherSlice = new Slice("otherSlice".getBytes())) { assertThat(otherSlice.data()).isEqualTo("otherSlice".getBytes()); + } - thirdSlice = new Slice("otherSlice".getBytes(), 5); + try (final Slice thirdSlice = new Slice("otherSlice".getBytes(), 5)) { assertThat(thirdSlice.data()).isEqualTo("Slice".getBytes()); - } finally { - if (slice != null) { - slice.dispose(); - } - if (otherSlice != null) { - otherSlice.dispose(); - } - if (thirdSlice != null) { - thirdSlice.dispose(); - } } } @Test public void sliceEquals() { - Slice slice = null; - Slice slice2 = null; - try { - slice = new Slice("abc"); - slice2 = new Slice("abc"); + try (final Slice slice = new Slice("abc"); + final Slice slice2 = new Slice("abc")) { assertThat(slice.equals(slice2)).isTrue(); assertThat(slice.hashCode() == slice2.hashCode()).isTrue(); - } finally { - if (slice != null) { - slice.dispose(); - } - if (slice2 != null) { - slice2.dispose(); - } } } - @Test public void sliceStartWith() { - Slice slice = null; - Slice match = null; - Slice noMatch = null; - try { - slice = new Slice("matchpoint"); - match = new Slice("mat"); - noMatch = new Slice("nomatch"); - - //assertThat(slice.startsWith(match)).isTrue(); + try (final Slice slice = new Slice("matchpoint"); + final Slice match = new Slice("mat"); + final Slice noMatch = new Slice("nomatch")) { + assertThat(slice.startsWith(match)).isTrue(); assertThat(slice.startsWith(noMatch)).isFalse(); - } finally { - if (slice != null) { - slice.dispose(); - } - if (match != null) { - match.dispose(); - } - if (noMatch != null) { - noMatch.dispose(); - } } } @Test public void sliceToString() { - Slice slice = null; - try { - slice = new Slice("stringTest"); + try (final Slice slice = new Slice("stringTest")) { assertThat(slice.toString()).isEqualTo("stringTest"); assertThat(slice.toString(true)).isNotEqualTo(""); - } finally { - if (slice != null) { - slice.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/SnapshotTest.java b/java/src/test/java/org/rocksdb/SnapshotTest.java index 19e4c5021..581bae50b 100644 --- a/java/src/test/java/org/rocksdb/SnapshotTest.java +++ b/java/src/test/java/org/rocksdb/SnapshotTest.java @@ -22,195 +22,147 @@ public class SnapshotTest { @Test public void snapshots() throws RocksDBException { - RocksDB db = null; - Options options = null; - ReadOptions readOptions = null; - try { - - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { db.put("key".getBytes(), "value".getBytes()); // Get new Snapshot of database - Snapshot snapshot = db.getSnapshot(); - assertThat(snapshot.getSequenceNumber()).isGreaterThan(0); - assertThat(snapshot.getSequenceNumber()).isEqualTo(1); - readOptions = new ReadOptions(); - // set snapshot in ReadOptions - readOptions.setSnapshot(snapshot); - // retrieve key value pair - assertThat(new String(db.get("key".getBytes()))). - isEqualTo("value"); - // retrieve key value pair created before - // the snapshot was made - assertThat(new String(db.get(readOptions, - "key".getBytes()))).isEqualTo("value"); - // add new key/value pair - db.put("newkey".getBytes(), "newvalue".getBytes()); - // using no snapshot the latest db entries - // will be taken into account - assertThat(new String(db.get("newkey".getBytes()))). - isEqualTo("newvalue"); - // snapshopot was created before newkey - assertThat(db.get(readOptions, "newkey".getBytes())). - isNull(); - // Retrieve snapshot from read options - Snapshot sameSnapshot = readOptions.snapshot(); - readOptions.setSnapshot(sameSnapshot); - // results must be the same with new Snapshot - // instance using the same native pointer - assertThat(new String(db.get(readOptions, - "key".getBytes()))).isEqualTo("value"); - // update key value pair to newvalue - db.put("key".getBytes(), "newvalue".getBytes()); - // read with previously created snapshot will - // read previous version of key value pair - assertThat(new String(db.get(readOptions, - "key".getBytes()))).isEqualTo("value"); - // read for newkey using the snapshot must be - // null - assertThat(db.get(readOptions, "newkey".getBytes())). - isNull(); - // setting null to snapshot in ReadOptions leads - // to no Snapshot being used. - readOptions.setSnapshot(null); - assertThat(new String(db.get(readOptions, - "newkey".getBytes()))).isEqualTo("newvalue"); - // release Snapshot - db.releaseSnapshot(snapshot); - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - if (readOptions != null) { - readOptions.dispose(); + try (final Snapshot snapshot = db.getSnapshot()) { + assertThat(snapshot.getSequenceNumber()).isGreaterThan(0); + assertThat(snapshot.getSequenceNumber()).isEqualTo(1); + try (final ReadOptions readOptions = new ReadOptions()) { + // set snapshot in ReadOptions + readOptions.setSnapshot(snapshot); + + // retrieve key value pair + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + // retrieve key value pair created before + // the snapshot was made + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // add new key/value pair + db.put("newkey".getBytes(), "newvalue".getBytes()); + // using no snapshot the latest db entries + // will be taken into account + assertThat(new String(db.get("newkey".getBytes()))). + isEqualTo("newvalue"); + // snapshopot was created before newkey + assertThat(db.get(readOptions, "newkey".getBytes())). + isNull(); + // Retrieve snapshot from read options + try (final Snapshot sameSnapshot = readOptions.snapshot()) { + readOptions.setSnapshot(sameSnapshot); + // results must be the same with new Snapshot + // instance using the same native pointer + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // update key value pair to newvalue + db.put("key".getBytes(), "newvalue".getBytes()); + // read with previously created snapshot will + // read previous version of key value pair + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // read for newkey using the snapshot must be + // null + assertThat(db.get(readOptions, "newkey".getBytes())). + isNull(); + // setting null to snapshot in ReadOptions leads + // to no Snapshot being used. + readOptions.setSnapshot(null); + assertThat(new String(db.get(readOptions, + "newkey".getBytes()))).isEqualTo("newvalue"); + // release Snapshot + db.releaseSnapshot(snapshot); + } + } } } } @Test public void iteratorWithSnapshot() throws RocksDBException { - RocksDB db = null; - Options options = null; - ReadOptions readOptions = null; - RocksIterator iterator = null; - RocksIterator snapshotIterator = null; - try { - - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { db.put("key".getBytes(), "value".getBytes()); + // Get new Snapshot of database - Snapshot snapshot = db.getSnapshot(); - readOptions = new ReadOptions(); // set snapshot in ReadOptions - readOptions.setSnapshot(snapshot); - db.put("key2".getBytes(), "value2".getBytes()); + try (final Snapshot snapshot = db.getSnapshot(); + final ReadOptions readOptions = + new ReadOptions().setSnapshot(snapshot)) { + db.put("key2".getBytes(), "value2".getBytes()); - // iterate over current state of db - iterator = db.newIterator(); - iterator.seekToFirst(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key".getBytes()); - iterator.next(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key2".getBytes()); - iterator.next(); - assertThat(iterator.isValid()).isFalse(); + // iterate over current state of db + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + } - // iterate using a snapshot - snapshotIterator = db.newIterator(readOptions); - snapshotIterator.seekToFirst(); - assertThat(snapshotIterator.isValid()).isTrue(); - assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); - snapshotIterator.next(); - assertThat(snapshotIterator.isValid()).isFalse(); + // iterate using a snapshot + try (final RocksIterator snapshotIterator = + db.newIterator(readOptions)) { + snapshotIterator.seekToFirst(); + assertThat(snapshotIterator.isValid()).isTrue(); + assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); + snapshotIterator.next(); + assertThat(snapshotIterator.isValid()).isFalse(); + } - // release Snapshot - db.releaseSnapshot(snapshot); - } finally { - if (iterator != null) { - iterator.dispose(); - } - if (snapshotIterator != null) { - snapshotIterator.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - if (readOptions != null) { - readOptions.dispose(); + // release Snapshot + db.releaseSnapshot(snapshot); } } } @Test public void iteratorWithSnapshotOnColumnFamily() throws RocksDBException { - RocksDB db = null; - Options options = null; - ReadOptions readOptions = null; - RocksIterator iterator = null; - RocksIterator snapshotIterator = null; - try { + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { - options = new Options(); - options.setCreateIfMissing(true); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); db.put("key".getBytes(), "value".getBytes()); + // Get new Snapshot of database - Snapshot snapshot = db.getSnapshot(); - readOptions = new ReadOptions(); // set snapshot in ReadOptions - readOptions.setSnapshot(snapshot); - db.put("key2".getBytes(), "value2".getBytes()); + try (final Snapshot snapshot = db.getSnapshot(); + final ReadOptions readOptions = new ReadOptions() + .setSnapshot(snapshot)) { + db.put("key2".getBytes(), "value2".getBytes()); - // iterate over current state of column family - iterator = db.newIterator(db.getDefaultColumnFamily()); - iterator.seekToFirst(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key".getBytes()); - iterator.next(); - assertThat(iterator.isValid()).isTrue(); - assertThat(iterator.key()).isEqualTo("key2".getBytes()); - iterator.next(); - assertThat(iterator.isValid()).isFalse(); + // iterate over current state of column family + try (final RocksIterator iterator = db.newIterator( + db.getDefaultColumnFamily())) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + } - // iterate using a snapshot on default column family - snapshotIterator = db.newIterator(db.getDefaultColumnFamily(), - readOptions); - snapshotIterator.seekToFirst(); - assertThat(snapshotIterator.isValid()).isTrue(); - assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); - snapshotIterator.next(); - assertThat(snapshotIterator.isValid()).isFalse(); + // iterate using a snapshot on default column family + try (final RocksIterator snapshotIterator = db.newIterator( + db.getDefaultColumnFamily(), readOptions)) { + snapshotIterator.seekToFirst(); + assertThat(snapshotIterator.isValid()).isTrue(); + assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); + snapshotIterator.next(); + assertThat(snapshotIterator.isValid()).isFalse(); - // release Snapshot - db.releaseSnapshot(snapshot); - } finally { - if (iterator != null) { - iterator.dispose(); - } - if (snapshotIterator != null) { - snapshotIterator.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - if (readOptions != null) { - readOptions.dispose(); + // release Snapshot + db.releaseSnapshot(snapshot); + } } } } diff --git a/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java b/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java index 0feaa4237..9f014d1d3 100644 --- a/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java +++ b/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java @@ -26,19 +26,18 @@ public class StatisticsCollectorTest { @Test public void statisticsCollector() throws InterruptedException, RocksDBException { - Options opt = null; - RocksDB db = null; - try { - opt = new Options().createStatistics().setCreateIfMissing(true); - Statistics stats = opt.statisticsPtr(); + try (final Options opt = new Options() + .createStatistics() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + final Statistics stats = opt.statisticsPtr(); - db = RocksDB.open(opt, - dbFolder.getRoot().getAbsolutePath()); + final StatsCallbackMock callback = new StatsCallbackMock(); + final StatsCollectorInput statsInput = + new StatsCollectorInput(stats, callback); - StatsCallbackMock callback = new StatsCallbackMock(); - StatsCollectorInput statsInput = new StatsCollectorInput(stats, callback); - - StatisticsCollector statsCollector = new StatisticsCollector( + final StatisticsCollector statsCollector = new StatisticsCollector( Collections.singletonList(statsInput), 100); statsCollector.start(); @@ -48,13 +47,6 @@ public class StatisticsCollectorTest { assertThat(callback.histCallbackCount).isGreaterThan(0); statsCollector.shutDown(1000); - } finally { - if (db != null) { - db.close(); - } - if (opt != null) { - opt.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java b/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java index 1de2efdea..b619258ec 100644 --- a/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java +++ b/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java @@ -17,43 +17,27 @@ public class TransactionLogIteratorTest { @Test public void transactionLogIterator() throws RocksDBException { - RocksDB db = null; - Options options = null; - TransactionLogIterator transactionLogIterator = null; - try { - options = new Options(). - setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - transactionLogIterator = db.getUpdatesSince(0); - } finally { - if (transactionLogIterator != null) { - transactionLogIterator.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + //no-op } } @Test public void getBatch() throws RocksDBException { final int numberOfPuts = 5; - RocksDB db = null; - Options options = null; - ColumnFamilyHandle cfHandle = null; - TransactionLogIterator transactionLogIterator = null; - try { - options = new Options(). - setCreateIfMissing(true). - setWalTtlSeconds(1000). - setWalSizeLimitMB(10); + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - - for (int i = 0; i < numberOfPuts; i++){ + for (int i = 0; i < numberOfPuts; i++) { db.put(String.valueOf(i).getBytes(), String.valueOf(i).getBytes()); } @@ -65,117 +49,89 @@ public class TransactionLogIteratorTest { isEqualTo(numberOfPuts); // insert 5 writes into a cf - cfHandle = db.createColumnFamily( - new ColumnFamilyDescriptor("new_cf".getBytes())); + try (final ColumnFamilyHandle cfHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes()))) { + for (int i = 0; i < numberOfPuts; i++) { + db.put(cfHandle, String.valueOf(i).getBytes(), + String.valueOf(i).getBytes()); + } + // the latest sequence number is 10 because + // (5 + 5) puts were written beforehand + assertThat(db.getLatestSequenceNumber()). + isEqualTo(numberOfPuts + numberOfPuts); - for (int i = 0; i < numberOfPuts; i++){ - db.put(cfHandle, String.valueOf(i).getBytes(), - String.valueOf(i).getBytes()); - } - // the latest sequence number is 10 because - // (5 + 5) puts were written beforehand - assertThat(db.getLatestSequenceNumber()). - isEqualTo(numberOfPuts + numberOfPuts); + // Get updates since the beginning + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.status(); - // Get updates since the beginning - transactionLogIterator = db.getUpdatesSince(0); - assertThat(transactionLogIterator.isValid()).isTrue(); - transactionLogIterator.status(); - - // The first sequence number is 1 - TransactionLogIterator.BatchResult batchResult = - transactionLogIterator.getBatch(); - assertThat(batchResult.sequenceNumber()).isEqualTo(1); - } finally { - if (transactionLogIterator != null) { - transactionLogIterator.dispose(); - } - if (cfHandle != null) { - cfHandle.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + // The first sequence number is 1 + final TransactionLogIterator.BatchResult batchResult = + transactionLogIterator.getBatch(); + assertThat(batchResult.sequenceNumber()).isEqualTo(1); + } } } } @Test - public void transactionLogIteratorStallAtLastRecord() throws RocksDBException { - RocksDB db = null; - Options options = null; - TransactionLogIterator transactionLogIterator = null; - try { - options = new Options(). - setCreateIfMissing(true). - setWalTtlSeconds(1000). - setWalSizeLimitMB(10); + public void transactionLogIteratorStallAtLastRecord() + throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); db.put("key1".getBytes(), "value1".getBytes()); // Get updates since the beginning - transactionLogIterator = db.getUpdatesSince(0); - transactionLogIterator.status(); - assertThat(transactionLogIterator.isValid()).isTrue(); - transactionLogIterator.next(); - assertThat(transactionLogIterator.isValid()).isFalse(); - transactionLogIterator.status(); - db.put("key2".getBytes(), "value2".getBytes()); - transactionLogIterator.next(); - transactionLogIterator.status(); - assertThat(transactionLogIterator.isValid()).isTrue(); - - } finally { - if (transactionLogIterator != null) { - transactionLogIterator.dispose(); - } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } - } - } - - @Test - public void transactionLogIteratorCheckAfterRestart() throws RocksDBException { - final int numberOfKeys = 2; - RocksDB db = null; - Options options = null; - TransactionLogIterator transactionLogIterator = null; - try { - options = new Options(). - setCreateIfMissing(true). - setWalTtlSeconds(1000). - setWalSizeLimitMB(10); - - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - db.put("key1".getBytes(), "value1".getBytes()); - db.put("key2".getBytes(), "value2".getBytes()); - db.flush(new FlushOptions().setWaitForFlush(true)); - // reopen - db.close(); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); - assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys); - - transactionLogIterator = db.getUpdatesSince(0); - for (int i = 0; i < numberOfKeys; i++) { + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { transactionLogIterator.status(); assertThat(transactionLogIterator.isValid()).isTrue(); transactionLogIterator.next(); + assertThat(transactionLogIterator.isValid()).isFalse(); + transactionLogIterator.status(); + db.put("key2".getBytes(), "value2".getBytes()); + transactionLogIterator.next(); + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); } - } finally { - if (transactionLogIterator != null) { - transactionLogIterator.dispose(); + } + } + + @Test + public void transactionLogIteratorCheckAfterRestart() + throws RocksDBException { + final int numberOfKeys = 2; + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10)) { + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1".getBytes()); + db.put("key2".getBytes(), "value2".getBytes()); + db.flush(new FlushOptions().setWaitForFlush(true)); + } - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); + + // reopen + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys); + + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + for (int i = 0; i < numberOfKeys; i++) { + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.next(); + } + } } } } diff --git a/java/src/test/java/org/rocksdb/TtlDBTest.java b/java/src/test/java/org/rocksdb/TtlDBTest.java index 934363a87..6539ea4e0 100644 --- a/java/src/test/java/org/rocksdb/TtlDBTest.java +++ b/java/src/test/java/org/rocksdb/TtlDBTest.java @@ -11,6 +11,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; @@ -26,108 +27,74 @@ public class TtlDBTest { public TemporaryFolder dbFolder = new TemporaryFolder(); @Test - public void ttlDBOpen() throws RocksDBException, - InterruptedException { - Options options = null; - TtlDB ttlDB = null; - try { - options = new Options(). - setCreateIfMissing(true). - setMaxGrandparentOverlapFactor(0); - ttlDB = TtlDB.open(options, - dbFolder.getRoot().getAbsolutePath()); + public void ttlDBOpen() throws RocksDBException, InterruptedException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setMaxGrandparentOverlapFactor(0); + final TtlDB ttlDB = TtlDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { ttlDB.put("key".getBytes(), "value".getBytes()); assertThat(ttlDB.get("key".getBytes())). isEqualTo("value".getBytes()); assertThat(ttlDB.get("key".getBytes())).isNotNull(); - } finally { - if (ttlDB != null) { - ttlDB.close(); - } - if (options != null) { - options.dispose(); - } } } @Test - public void ttlDBOpenWithTtl() throws RocksDBException, - InterruptedException { - Options options = null; - TtlDB ttlDB = null; - try { - options = new Options(). - setCreateIfMissing(true). - setMaxGrandparentOverlapFactor(0); - ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(), - 1, false); + public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setMaxGrandparentOverlapFactor(0); + final TtlDB ttlDB = TtlDB.open(options, + dbFolder.getRoot().getAbsolutePath(), 1, false); + ) { ttlDB.put("key".getBytes(), "value".getBytes()); assertThat(ttlDB.get("key".getBytes())). isEqualTo("value".getBytes()); TimeUnit.SECONDS.sleep(2); - ttlDB.compactRange(); assertThat(ttlDB.get("key".getBytes())).isNull(); - } finally { - if (ttlDB != null) { - ttlDB.close(); - } - if (options != null) { - options.dispose(); - } } } @Test - public void ttlDbOpenWithColumnFamilies() throws RocksDBException, InterruptedException { - DBOptions dbOptions = null; - TtlDB ttlDB = null; - List cfNames = - new ArrayList<>(); - List columnFamilyHandleList = - new ArrayList<>(); - cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes())); - List ttlValues = new ArrayList<>(); - // Default column family with infinite lifetime - ttlValues.add(0); - // new column family with 1 second ttl - ttlValues.add(1); + public void ttlDbOpenWithColumnFamilies() throws RocksDBException, + InterruptedException { + final List cfNames = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); + final List ttlValues = Arrays.asList(0, 1); - try { - dbOptions = new DBOptions(). - setCreateMissingColumnFamilies(true). - setCreateIfMissing(true); - ttlDB = TtlDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), - cfNames, columnFamilyHandleList, ttlValues, false); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions dbOptions = new DBOptions() + .setCreateMissingColumnFamilies(true) + .setCreateIfMissing(true); + final TtlDB ttlDB = TtlDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), cfNames, + columnFamilyHandleList, ttlValues, false)) { + try { + ttlDB.put("key".getBytes(), "value".getBytes()); + assertThat(ttlDB.get("key".getBytes())). + isEqualTo("value".getBytes()); + ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(), + "value".getBytes()); + assertThat(ttlDB.get(columnFamilyHandleList.get(1), + "key".getBytes())).isEqualTo("value".getBytes()); + TimeUnit.SECONDS.sleep(2); - ttlDB.put("key".getBytes(), "value".getBytes()); - assertThat(ttlDB.get("key".getBytes())). - isEqualTo("value".getBytes()); - ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(), - "value".getBytes()); - assertThat(ttlDB.get(columnFamilyHandleList.get(1), - "key".getBytes())).isEqualTo("value".getBytes()); - TimeUnit.SECONDS.sleep(2); + ttlDB.compactRange(); + ttlDB.compactRange(columnFamilyHandleList.get(1)); - ttlDB.compactRange(); - ttlDB.compactRange(columnFamilyHandleList.get(1)); - - assertThat(ttlDB.get("key".getBytes())).isNotNull(); - assertThat(ttlDB.get(columnFamilyHandleList.get(1), - "key".getBytes())).isNull(); - - - } finally { - for (ColumnFamilyHandle columnFamilyHandle : - columnFamilyHandleList) { - columnFamilyHandle.dispose(); - } - if (ttlDB != null) { - ttlDB.close(); - } - if (dbOptions != null) { - dbOptions.dispose(); + assertThat(ttlDB.get("key".getBytes())).isNotNull(); + assertThat(ttlDB.get(columnFamilyHandleList.get(1), + "key".getBytes())).isNull(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } } } } @@ -135,15 +102,12 @@ public class TtlDBTest { @Test public void createTtlColumnFamily() throws RocksDBException, InterruptedException { - Options options = null; - TtlDB ttlDB = null; - ColumnFamilyHandle columnFamilyHandle = null; - try { - options = new Options().setCreateIfMissing(true); - ttlDB = TtlDB.open(options, - dbFolder.getRoot().getAbsolutePath()); - columnFamilyHandle = ttlDB.createColumnFamilyWithTtl( - new ColumnFamilyDescriptor("new_cf".getBytes()), 1); + try (final Options options = new Options().setCreateIfMissing(true); + final TtlDB ttlDB = TtlDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyHandle columnFamilyHandle = + ttlDB.createColumnFamilyWithTtl( + new ColumnFamilyDescriptor("new_cf".getBytes()), 1)) { ttlDB.put(columnFamilyHandle, "key".getBytes(), "value".getBytes()); assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())). @@ -151,16 +115,6 @@ public class TtlDBTest { TimeUnit.SECONDS.sleep(2); ttlDB.compactRange(columnFamilyHandle); assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).isNull(); - } finally { - if (columnFamilyHandle != null) { - columnFamilyHandle.dispose(); - } - if (ttlDB != null) { - ttlDB.close(); - } - if (options != null) { - options.dispose(); - } } } } diff --git a/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java b/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java index 257ef6438..35c63f2af 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java @@ -23,28 +23,26 @@ public class WriteBatchHandlerTest { @Test public void writeBatchHandler() throws IOException, RocksDBException { - WriteBatch batch = null; - CapturingWriteBatchHandler handler = null; - try { - // setup test data - final List>> testEvents = new ArrayList<>(); - testEvents.add(new Tuple<>(Action.DELETE, - new Tuple("k0".getBytes(), null))); - testEvents.add(new Tuple<>(Action.PUT, - new Tuple<>("k1".getBytes(), "v1".getBytes()))); - testEvents.add(new Tuple<>(Action.PUT, - new Tuple<>("k2".getBytes(), "v2".getBytes()))); - testEvents.add(new Tuple<>(Action.PUT, - new Tuple<>("k3".getBytes(), "v3".getBytes()))); - testEvents.add(new Tuple<>(Action.LOG, - new Tuple(null, "log1".getBytes()))); - testEvents.add(new Tuple<>(Action.MERGE, - new Tuple<>("k2".getBytes(), "v22".getBytes()))); - testEvents.add(new Tuple<>(Action.DELETE, - new Tuple("k3".getBytes(), null))); + // setup test data + final List>> testEvents = Arrays.asList( + new Tuple<>(Action.DELETE, + new Tuple("k0".getBytes(), null)), + new Tuple<>(Action.PUT, + new Tuple<>("k1".getBytes(), "v1".getBytes())), + new Tuple<>(Action.PUT, + new Tuple<>("k2".getBytes(), "v2".getBytes())), + new Tuple<>(Action.PUT, + new Tuple<>("k3".getBytes(), "v3".getBytes())), + new Tuple<>(Action.LOG, + new Tuple(null, "log1".getBytes())), + new Tuple<>(Action.MERGE, + new Tuple<>("k2".getBytes(), "v22".getBytes())), + new Tuple<>(Action.DELETE, + new Tuple("k3".getBytes(), null)) + ); - // load test data to the write batch - batch = new WriteBatch(); + // load test data to the write batch + try (final WriteBatch batch = new WriteBatch()) { for (final Tuple> testEvent : testEvents) { final Tuple data = testEvent.value; switch (testEvent.key) { @@ -67,29 +65,27 @@ public class WriteBatchHandlerTest { } } - // attempt to read test data back from the WriteBatch by iterating with a handler - handler = new CapturingWriteBatchHandler(); - batch.iterate(handler); + // attempt to read test data back from the WriteBatch by iterating + // with a handler + try (final CapturingWriteBatchHandler handler = + new CapturingWriteBatchHandler()) { + batch.iterate(handler); - // compare the results to the test data - final List>> actualEvents = handler.getEvents(); - assertThat(testEvents.size()).isSameAs(actualEvents.size()); + // compare the results to the test data + final List>> actualEvents = + handler.getEvents(); + assertThat(testEvents.size()).isSameAs(actualEvents.size()); - for (int i = 0; i < testEvents.size(); i++) { - assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue(); - } - } finally { - if (handler != null) { - handler.dispose(); - } - if (batch != null) { - batch.dispose(); + for (int i = 0; i < testEvents.size(); i++) { + assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue(); + } } } } - private static boolean equals(final Tuple> expected, - final Tuple> actual) { + private static boolean equals( + final Tuple> expected, + final Tuple> actual) { if (!expected.key.equals(actual.key)) { return false; } @@ -136,7 +132,8 @@ public class WriteBatchHandlerTest { */ private static class CapturingWriteBatchHandler extends WriteBatch.Handler { - private final List>> events = new ArrayList<>(); + private final List>> events + = new ArrayList<>(); /** * Returns a copy of the current events list @@ -159,12 +156,14 @@ public class WriteBatchHandlerTest { @Override public void delete(final byte[] key) { - events.add(new Tuple<>(Action.DELETE, new Tuple(key, null))); + events.add(new Tuple<>(Action.DELETE, + new Tuple(key, null))); } @Override public void logData(final byte[] blob) { - events.add(new Tuple<>(Action.LOG, new Tuple(null, blob))); + events.add(new Tuple<>(Action.LOG, + new Tuple(null, blob))); } } } diff --git a/java/src/test/java/org/rocksdb/WriteBatchTest.java b/java/src/test/java/org/rocksdb/WriteBatchTest.java index b7fb50533..fdfb02444 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchTest.java @@ -20,9 +20,9 @@ import static org.assertj.core.api.Assertions.assertThat; /** * This class mimics the db/write_batch_test.cc * in the c++ rocksdb library. - * + *

      * Not ported yet: - * + *

      * Continue(); * PutGatherSlices(); */ @@ -36,77 +36,83 @@ public class WriteBatchTest { @Test public void emptyWriteBatch() { - WriteBatch batch = new WriteBatch(); - assertThat(batch.count()).isEqualTo(0); + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.count()).isEqualTo(0); + } } @Test public void multipleBatchOperations() throws UnsupportedEncodingException { - WriteBatch batch = new WriteBatch(); - batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); - batch.remove("box".getBytes("US-ASCII")); - batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII")); - WriteBatchTestInternalHelper.setSequence(batch, 100); - assertThat(WriteBatchTestInternalHelper.sequence(batch)). - isNotNull(). - isEqualTo(100); - assertThat(batch.count()).isEqualTo(3); - assertThat(new String(getContents(batch), "US-ASCII")). - isEqualTo("Put(baz, boo)@102" + - "Delete(box)@101" + - "Put(foo, bar)@100"); + try (WriteBatch batch = new WriteBatch()) { + batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); + batch.remove("box".getBytes("US-ASCII")); + batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII")); + + WriteBatchTestInternalHelper.setSequence(batch, 100); + assertThat(WriteBatchTestInternalHelper.sequence(batch)). + isNotNull(). + isEqualTo(100); + assertThat(batch.count()).isEqualTo(3); + assertThat(new String(getContents(batch), "US-ASCII")). + isEqualTo("Put(baz, boo)@102" + + "Delete(box)@101" + + "Put(foo, bar)@100"); + } } @Test public void testAppendOperation() throws UnsupportedEncodingException { - WriteBatch b1 = new WriteBatch(); - WriteBatch b2 = new WriteBatch(); - WriteBatchTestInternalHelper.setSequence(b1, 200); - WriteBatchTestInternalHelper.setSequence(b2, 300); - WriteBatchTestInternalHelper.append(b1, b2); - assertThat(getContents(b1).length).isEqualTo(0); - assertThat(b1.count()).isEqualTo(0); - b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII")); - WriteBatchTestInternalHelper.append(b1, b2); - assertThat("Put(a, va)@200".equals(new String(getContents(b1), "US-ASCII"))); - assertThat(b1.count()).isEqualTo(1); - b2.clear(); - b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII")); - WriteBatchTestInternalHelper.append(b1, b2); - assertThat(("Put(a, va)@200" + - "Put(b, vb)@201") - .equals(new String(getContents(b1), "US-ASCII"))); - assertThat(b1.count()).isEqualTo(2); - b2.remove("foo".getBytes("US-ASCII")); - WriteBatchTestInternalHelper.append(b1, b2); - assertThat(("Put(a, va)@200" + - "Put(b, vb)@202" + - "Put(b, vb)@201" + - "Delete(foo)@203") - .equals(new String(getContents(b1), "US-ASCII"))); - assertThat(b1.count()).isEqualTo(4); + try (final WriteBatch b1 = new WriteBatch(); + final WriteBatch b2 = new WriteBatch()) { + WriteBatchTestInternalHelper.setSequence(b1, 200); + WriteBatchTestInternalHelper.setSequence(b2, 300); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(getContents(b1).length).isEqualTo(0); + assertThat(b1.count()).isEqualTo(0); + b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII")); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat("Put(a, va)@200".equals(new String(getContents(b1), + "US-ASCII"))); + assertThat(b1.count()).isEqualTo(1); + b2.clear(); + b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII")); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(("Put(a, va)@200" + + "Put(b, vb)@201") + .equals(new String(getContents(b1), "US-ASCII"))); + assertThat(b1.count()).isEqualTo(2); + b2.remove("foo".getBytes("US-ASCII")); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(("Put(a, va)@200" + + "Put(b, vb)@202" + + "Put(b, vb)@201" + + "Delete(foo)@203") + .equals(new String(getContents(b1), "US-ASCII"))); + assertThat(b1.count()).isEqualTo(4); + } } @Test public void blobOperation() throws UnsupportedEncodingException { - WriteBatch batch = new WriteBatch(); - batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII")); - batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII")); - batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII")); - batch.putLogData("blob1".getBytes("US-ASCII")); - batch.remove("k2".getBytes("US-ASCII")); - batch.putLogData("blob2".getBytes("US-ASCII")); - batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); - assertThat(batch.count()).isEqualTo(5); - assertThat(("Merge(foo, bar)@4" + - "Put(k1, v1)@0" + - "Delete(k2)@3" + - "Put(k2, v2)@1" + - "Put(k3, v3)@2") - .equals(new String(getContents(batch), "US-ASCII"))); + try (final WriteBatch batch = new WriteBatch()) { + batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII")); + batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII")); + batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII")); + batch.putLogData("blob1".getBytes("US-ASCII")); + batch.remove("k2".getBytes("US-ASCII")); + batch.putLogData("blob2".getBytes("US-ASCII")); + batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); + assertThat(batch.count()).isEqualTo(5); + assertThat(("Merge(foo, bar)@4" + + "Put(k1, v1)@0" + + "Delete(k2)@3" + + "Put(k2, v2)@1" + + "Put(k3, v3)@2") + .equals(new String(getContents(batch), "US-ASCII"))); + } } static byte[] getContents(final WriteBatch wb) { @@ -133,7 +139,11 @@ class WriteBatchTestInternalHelper { append(wb1.nativeHandle_, wb2.nativeHandle_); } - private static native void setSequence(final long writeBatchHandle, final long sn); + private static native void setSequence(final long writeBatchHandle, + final long sn); + private static native long sequence(final long writeBatchHandle); - private static native void append(final long writeBatchHandle1, final long writeBatchHandle2); + + private static native void append(final long writeBatchHandle1, + final long writeBatchHandle2); } diff --git a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java index 08cac9bce..74558fc2e 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java @@ -32,13 +32,9 @@ public class WriteBatchWithIndexTest { @Test public void readYourOwnWrites() throws RocksDBException { - RocksDB db = null; - Options options = null; - try { - options = new Options(); - // Setup options - options.setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { final byte[] k1 = "key1".getBytes(); final byte[] v1 = "value1".getBytes(); @@ -48,13 +44,9 @@ public class WriteBatchWithIndexTest { db.put(k1, v1); db.put(k2, v2); - final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); - - RocksIterator base = null; - RocksIterator it = null; - try { - base = db.newIterator(); - it = wbwi.newIteratorWithBase(base); + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final RocksIterator base = db.newIterator(); + final RocksIterator it = wbwi.newIteratorWithBase(base)) { it.seek(k1); assertThat(it.isValid()).isTrue(); @@ -95,169 +87,121 @@ public class WriteBatchWithIndexTest { assertThat(it.isValid()).isTrue(); assertThat(it.key()).isEqualTo(k1); assertThat(it.value()).isEqualTo(v1Other); - } finally { - if (it != null) { - it.dispose(); - } - if (base != null) { - base.dispose(); - } - } - - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); } } } @Test public void write_writeBatchWithIndex() throws RocksDBException { - RocksDB db = null; - Options options = null; - try { - options = new Options(); - // Setup options - options.setCreateIfMissing(true); - db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { final byte[] k1 = "key1".getBytes(); final byte[] v1 = "value1".getBytes(); final byte[] k2 = "key2".getBytes(); final byte[] v2 = "value2".getBytes(); - WriteBatchWithIndex wbwi = null; - - try { - wbwi = new WriteBatchWithIndex(); - - + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { wbwi.put(k1, v1); wbwi.put(k2, v2); db.write(new WriteOptions(), wbwi); - } finally { - if(wbwi != null) { - wbwi.dispose(); - } } assertThat(db.get(k1)).isEqualTo(v1); assertThat(db.get(k2)).isEqualTo(v2); - - } finally { - if (db != null) { - db.close(); - } - if (options != null) { - options.dispose(); - } } } @Test public void iterator() throws RocksDBException { - final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) { - final String k1 = "key1"; - final String v1 = "value1"; - final String k2 = "key2"; - final String v2 = "value2"; - final String k3 = "key3"; - final String v3 = "value3"; - final byte[] k1b = k1.getBytes(); - final byte[] v1b = v1.getBytes(); - final byte[] k2b = k2.getBytes(); - final byte[] v2b = v2.getBytes(); - final byte[] k3b = k3.getBytes(); - final byte[] v3b = v3.getBytes(); + final String k1 = "key1"; + final String v1 = "value1"; + final String k2 = "key2"; + final String v2 = "value2"; + final String k3 = "key3"; + final String v3 = "value3"; + final byte[] k1b = k1.getBytes(); + final byte[] v1b = v1.getBytes(); + final byte[] k2b = k2.getBytes(); + final byte[] v2b = v2.getBytes(); + final byte[] k3b = k3.getBytes(); + final byte[] v3b = v3.getBytes(); - //add put records - wbwi.put(k1b, v1b); - wbwi.put(k2b, v2b); - wbwi.put(k3b, v3b); + //add put records + wbwi.put(k1b, v1b); + wbwi.put(k2b, v2b); + wbwi.put(k3b, v3b); - //add a deletion record - final String k4 = "key4"; - final byte[] k4b = k4.getBytes(); - wbwi.remove(k4b); + //add a deletion record + final String k4 = "key4"; + final byte[] k4b = k4.getBytes(); + wbwi.remove(k4b); - WBWIRocksIterator.WriteEntry[] expected = { - new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, - new DirectSlice(k1), new DirectSlice(v1)), - new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, - new DirectSlice(k2), new DirectSlice(v2)), - new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, - new DirectSlice(k3), new DirectSlice(v3)), - new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE, - new DirectSlice(k4), DirectSlice.NONE) - }; + final WBWIRocksIterator.WriteEntry[] expected = { + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k1), new DirectSlice(v1)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k2), new DirectSlice(v2)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k3), new DirectSlice(v3)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE, + new DirectSlice(k4), DirectSlice.NONE) + }; - WBWIRocksIterator it = null; - try { - it = wbwi.newIterator(); + try (final WBWIRocksIterator it = wbwi.newIterator()) { + //direct access - seek to key offsets + final int[] testOffsets = {2, 0, 1, 3}; - //direct access - seek to key offsets - final int[] testOffsets = {2, 0, 1, 3}; + for (int i = 0; i < testOffsets.length; i++) { + final int testOffset = testOffsets[i]; + final byte[] key = toArray(expected[testOffset].getKey().data()); - for(int i = 0; i < testOffsets.length; i++) { - final int testOffset = testOffsets[i]; - final byte[] key = toArray(expected[testOffset].getKey().data()); + it.seek(key); + assertThat(it.isValid()).isTrue(); - it.seek(key); - assertThat(it.isValid()).isTrue(); + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry.equals(expected[testOffset])).isTrue(); + } - final WBWIRocksIterator.WriteEntry entry = it.entry(); - assertThat(entry.equals(expected[testOffset])).isTrue(); - } + //forward iterative access + int i = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + assertThat(it.entry().equals(expected[i++])).isTrue(); + } - //forward iterative access - int i = 0; - for(it.seekToFirst(); it.isValid(); it.next()) { - assertThat(it.entry().equals(expected[i++])).isTrue(); - } - - //reverse iterative access - i = expected.length - 1; - for(it.seekToLast(); it.isValid(); it.prev()) { - assertThat(it.entry().equals(expected[i--])).isTrue(); - } - - } finally { - if(it != null) { - it.dispose(); + //reverse iterative access + i = expected.length - 1; + for (it.seekToLast(); it.isValid(); it.prev()) { + assertThat(it.entry().equals(expected[i--])).isTrue(); + } } } } @Test public void zeroByteTests() { - final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); - byte[] zeroByteValue = new byte[] { 0, 0 }; + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) { + final byte[] zeroByteValue = new byte[]{0, 0}; + //add zero byte value + wbwi.put(zeroByteValue, zeroByteValue); - //add zero byte value - wbwi.put(zeroByteValue, zeroByteValue); + final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length); + buffer.put(zeroByteValue); - ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length); - buffer.put(zeroByteValue); + WBWIRocksIterator.WriteEntry[] expected = { + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(buffer, zeroByteValue.length), + new DirectSlice(buffer, zeroByteValue.length)) + }; - WBWIRocksIterator.WriteEntry[] expected = { - new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, - new DirectSlice(buffer, zeroByteValue.length), - new DirectSlice(buffer, zeroByteValue.length)) - }; - WBWIRocksIterator it = null; - try { - it = wbwi.newIterator(); - it.seekToFirst(); - assertThat(it.entry().equals(expected[0])).isTrue(); - assertThat(it.entry().hashCode() == expected[0].hashCode()).isTrue(); - } finally { - if(it != null) { - it.dispose(); + try (final WBWIRocksIterator it = wbwi.newIterator()) { + it.seekToFirst(); + assertThat(it.entry().equals(expected[0])).isTrue(); + assertThat(it.entry().hashCode() == expected[0].hashCode()).isTrue(); } } } diff --git a/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/java/src/test/java/org/rocksdb/WriteOptionsTest.java index 333a76194..c6af5c818 100644 --- a/java/src/test/java/org/rocksdb/WriteOptionsTest.java +++ b/java/src/test/java/org/rocksdb/WriteOptionsTest.java @@ -17,15 +17,16 @@ public class WriteOptionsTest { new RocksMemoryResource(); @Test - public void writeOptions(){ - WriteOptions writeOptions = new WriteOptions(); - writeOptions.setDisableWAL(true); - assertThat(writeOptions.disableWAL()).isTrue(); - writeOptions.setDisableWAL(false); - assertThat(writeOptions.disableWAL()).isFalse(); - writeOptions.setSync(true); - assertThat(writeOptions.sync()).isTrue(); - writeOptions.setSync(false); - assertThat(writeOptions.sync()).isFalse(); + public void writeOptions() { + try (final WriteOptions writeOptions = new WriteOptions()) { + writeOptions.setDisableWAL(true); + assertThat(writeOptions.disableWAL()).isTrue(); + writeOptions.setDisableWAL(false); + assertThat(writeOptions.disableWAL()).isFalse(); + writeOptions.setSync(true); + assertThat(writeOptions.sync()).isTrue(); + writeOptions.setSync(false); + assertThat(writeOptions.sync()).isFalse(); + } } } From 188bb2e7ad2c8eaf6e203fc53071001a9697f087 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Wed, 10 Feb 2016 14:21:23 +0000 Subject: [PATCH 9/9] Fix formatting identified by `arc lint` --- java/rocksjni/options.cc | 5 +- java/rocksjni/portal.h | 9 ++- java/rocksjni/rocksjni.cc | 6 +- .../java/org/rocksdb/AbstractWriteBatch.java | 15 +++-- .../main/java/org/rocksdb/BackupableDB.java | 10 +-- .../java/org/rocksdb/BackupableDBOptions.java | 66 ++++++++++--------- .../java/org/rocksdb/ColumnFamilyHandle.java | 6 +- .../java/org/rocksdb/ColumnFamilyOptions.java | 31 +++++---- java/src/main/java/org/rocksdb/DBOptions.java | 4 +- .../java/org/rocksdb/DirectComparator.java | 3 +- java/src/main/java/org/rocksdb/Options.java | 16 +++-- .../RemoveEmptyValueCompactionFilter.java | 3 +- .../java/org/rocksdb/RestoreBackupableDB.java | 3 +- .../main/java/org/rocksdb/RestoreOptions.java | 11 ++-- java/src/main/java/org/rocksdb/TtlDB.java | 7 +- .../src/main/java/org/rocksdb/WriteBatch.java | 6 +- .../java/org/rocksdb/WriteBatchWithIndex.java | 49 ++++++++------ 17 files changed, 147 insertions(+), 103 deletions(-) diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index 018d8bbe4..aac2564b2 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -1083,9 +1083,10 @@ jbyte Java_org_rocksdb_Options_compressionType( std::vector rocksdb_compression_vector_helper( JNIEnv* env, jbyteArray jcompressionLevels) { std::vector compressionLevels; - + jsize len = env->GetArrayLength(jcompressionLevels); - jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels, NULL); + jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels, + NULL); for(int i = 0; i < len; i++) { jbyte jcl; jcl = jcompressionLevel[i]; diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index f4ad29af5..cc47367a7 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -52,7 +52,8 @@ template class RocksDBNativeClass { }; // Native class template for sub-classes of RocksMutableObject -template class NativeRocksMutableObject : public RocksDBNativeClass { +template class NativeRocksMutableObject + : public RocksDBNativeClass { public: static jmethodID getSetNativeHandleMethod(JNIEnv* env) { @@ -63,8 +64,10 @@ template class NativeRocksMutableObject : public Rocks } // Pass the pointer to the java side. - static void setHandle(JNIEnv* env, jobject jobj, PTR ptr, jboolean java_owns_handle) { - env->CallVoidMethod(jobj, getSetNativeHandleMethod(env), reinterpret_cast(ptr), java_owns_handle); + static void setHandle(JNIEnv* env, jobject jobj, PTR ptr, + jboolean java_owns_handle) { + env->CallVoidMethod(jobj, getSetNativeHandleMethod(env), + reinterpret_cast(ptr), java_owns_handle); } }; diff --git a/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc index 7ef416618..c0c73ae2d 100644 --- a/java/rocksjni/rocksjni.cc +++ b/java/rocksjni/rocksjni.cc @@ -156,8 +156,8 @@ jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J( jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options) { - return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, - (rocksdb::Status(*) + return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names, + jcolumn_options, (rocksdb::Status(*) (const rocksdb::DBOptions&, const std::string&, const std::vector&, std::vector*, rocksdb::DB**) @@ -458,7 +458,7 @@ jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIJLjava_lang_StringBuffer_2( rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); return true; - } + } } ////////////////////////////////////////////////////////////////////////////// diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 73d9876fb..c4eb01b1e 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -5,7 +5,8 @@ package org.rocksdb; -public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { +public abstract class AbstractWriteBatch extends RocksObject + implements WriteBatchInterface { protected AbstractWriteBatch(final long nativeHandle) { super(nativeHandle); @@ -24,9 +25,11 @@ public abstract class AbstractWriteBatch extends RocksObject implements WriteBat } @Override - public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { + public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, + byte[] value) { assert (isOwningHandle()); - put(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); + put(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); } @Override @@ -36,9 +39,11 @@ public abstract class AbstractWriteBatch extends RocksObject implements WriteBat } @Override - public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { + public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, + byte[] value) { assert (isOwningHandle()); - merge(nativeHandle_, key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); + merge(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); } @Override diff --git a/java/src/main/java/org/rocksdb/BackupableDB.java b/java/src/main/java/org/rocksdb/BackupableDB.java index 9bc29af81..cebd69f67 100644 --- a/java/src/main/java/org/rocksdb/BackupableDB.java +++ b/java/src/main/java/org/rocksdb/BackupableDB.java @@ -21,8 +21,8 @@ public class BackupableDB extends RocksDB { * * @param opt {@link org.rocksdb.Options} to set for the database. * @param bopt {@link org.rocksdb.BackupableDBOptions} to use. - * @param db_path Path to store data to. The path for storing the backup should be - * specified in the {@link org.rocksdb.BackupableDBOptions}. + * @param db_path Path to store data to. The path for storing the backup + * should be specified in the {@link org.rocksdb.BackupableDBOptions}. * * @return {@link BackupableDB} reference to the opened database. * @@ -34,7 +34,8 @@ public class BackupableDB extends RocksDB { throws RocksDBException { final RocksDB db = RocksDB.open(opt, db_path); - final BackupableDB bdb = new BackupableDB(open(db.nativeHandle_, bopt.nativeHandle_)); + final BackupableDB bdb = new BackupableDB(open(db.nativeHandle_, + bopt.nativeHandle_)); // Prevent the RocksDB object from attempting to delete // the underly C++ DB object. @@ -151,7 +152,8 @@ public class BackupableDB extends RocksDB { super.finalize(); } - protected native static long open(final long rocksDBHandle, final long backupDBOptionsHandle); + protected native static long open(final long rocksDBHandle, + final long backupDBOptionsHandle); protected native void createNewBackup(long handle, boolean flag) throws RocksDBException; protected native void purgeOldBackups(long handle, int numBackupsToKeep) diff --git a/java/src/main/java/org/rocksdb/BackupableDBOptions.java b/java/src/main/java/org/rocksdb/BackupableDBOptions.java index ea5e51a7a..89591de82 100644 --- a/java/src/main/java/org/rocksdb/BackupableDBOptions.java +++ b/java/src/main/java/org/rocksdb/BackupableDBOptions.java @@ -21,8 +21,8 @@ public class BackupableDBOptions extends RocksObject { /** *

      BackupableDBOptions constructor.

      * - * @param path Where to keep the backup files. Has to be different than db name. - * Best to set this to {@code db name_ + "/backups"} + * @param path Where to keep the backup files. Has to be different than db + * name. Best to set this to {@code db name_ + "/backups"} * @throws java.lang.IllegalArgumentException if illegal path is used. */ public BackupableDBOptions(final String path) { @@ -31,7 +31,8 @@ public class BackupableDBOptions extends RocksObject { private static String ensureWritableFile(final String path) { final File backupPath = path == null ? null : new File(path); - if (backupPath == null || !backupPath.isDirectory() || !backupPath.canWrite()) { + if (backupPath == null || !backupPath.isDirectory() || + !backupPath.canWrite()) { throw new IllegalArgumentException("Illegal path provided."); } else { return path; @@ -51,10 +52,11 @@ public class BackupableDBOptions extends RocksObject { /** *

      Share table files between backups.

      * - * @param shareTableFiles If {@code share_table_files == true}, backup will assume - * that table files with same name have the same contents. This enables incremental - * backups and avoids unnecessary data copies. If {@code share_table_files == false}, - * each backup will be on its own and will not share any data with other backups. + * @param shareTableFiles If {@code share_table_files == true}, backup will + * assume that table files with same name have the same contents. This + * enables incremental backups and avoids unnecessary data copies. If + * {@code share_table_files == false}, each backup will be on its own and + * will not share any data with other backups. * *

      Default: true

      * @@ -80,10 +82,10 @@ public class BackupableDBOptions extends RocksObject { /** *

      Set synchronous backups.

      * - * @param sync If {@code sync == true}, we can guarantee you'll get consistent backup - * even on a machine crash/reboot. Backup process is slower with sync enabled. - * If {@code sync == false}, we don't guarantee anything on machine reboot. - * However,chances are some of the backups are consistent. + * @param sync If {@code sync == true}, we can guarantee you'll get consistent + * backup even on a machine crash/reboot. Backup process is slower with sync + * enabled. If {@code sync == false}, we don't guarantee anything on machine + * reboot. However, chances are some of the backups are consistent. * *

      Default: true

      * @@ -108,7 +110,8 @@ public class BackupableDBOptions extends RocksObject { /** *

      Set if old data will be destroyed.

      * - * @param destroyOldData If true, it will delete whatever backups there are already. + * @param destroyOldData If true, it will delete whatever backups there are + * already. * *

      Default: false

      * @@ -133,9 +136,9 @@ public class BackupableDBOptions extends RocksObject { /** *

      Set if log files shall be persisted.

      * - * @param backupLogFiles If false, we won't backup log files. This option can be - * useful for backing up in-memory databases where log file are persisted,but table - * files are in memory. + * @param backupLogFiles If false, we won't backup log files. This option can + * be useful for backing up in-memory databases where log file are + * persisted, but table files are in memory. * *

      Default: true

      * @@ -160,8 +163,8 @@ public class BackupableDBOptions extends RocksObject { /** *

      Set backup rate limit.

      * - * @param backupRateLimit Max bytes that can be transferred in a second during backup. - * If 0 or negative, then go as fast as you can. + * @param backupRateLimit Max bytes that can be transferred in a second during + * backup. If 0 or negative, then go as fast as you can. * *

      Default: 0

      * @@ -175,10 +178,11 @@ public class BackupableDBOptions extends RocksObject { } /** - *

      Return backup rate limit which described the max bytes that can be transferred in a - * second during backup.

      + *

      Return backup rate limit which described the max bytes that can be + * transferred in a second during backup.

      * - * @return numerical value describing the backup transfer limit in bytes per second. + * @return numerical value describing the backup transfer limit in bytes per + * second. */ public long backupRateLimit() { assert(isOwningHandle()); @@ -188,8 +192,8 @@ public class BackupableDBOptions extends RocksObject { /** *

      Set restore rate limit.

      * - * @param restoreRateLimit Max bytes that can be transferred in a second during restore. - * If 0 or negative, then go as fast as you can. + * @param restoreRateLimit Max bytes that can be transferred in a second + * during restore. If 0 or negative, then go as fast as you can. * *

      Default: 0

      * @@ -203,10 +207,11 @@ public class BackupableDBOptions extends RocksObject { } /** - *

      Return restore rate limit which described the max bytes that can be transferred in a - * second during restore.

      + *

      Return restore rate limit which described the max bytes that can be + * transferred in a second during restore.

      * - * @return numerical value describing the restore transfer limit in bytes per second. + * @return numerical value describing the restore transfer limit in bytes per + * second. */ public long restoreRateLimit() { assert(isOwningHandle()); @@ -214,12 +219,13 @@ public class BackupableDBOptions extends RocksObject { } /** - *

      Only used if share_table_files is set to true. If true, will consider that - * backups can come from different databases, hence a sst is not uniquely - * identified by its name, but by the triple (file name, crc32, file length)

      + *

      Only used if share_table_files is set to true. If true, will consider + * that backups can come from different databases, hence a sst is not uniquely + * identified by its name, but by the triple (file name, crc32, file length) + *

      * - * @param shareFilesWithChecksum boolean value indicating if SST files are stored - * using the triple (file name, crc32, file length) and not its name. + * @param shareFilesWithChecksum boolean value indicating if SST files are + * stored using the triple (file name, crc32, file length) and not its name. * *

      Note: this is an experimental option, and you'll need to set it manually * turn it on only if you know what you're doing*

      diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index b9f6bd97e..6aa22d3fe 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -25,9 +25,9 @@ public class ColumnFamilyHandle extends RocksObject { *

      Deletes underlying C++ iterator pointer.

      * *

      Note: the underlying handle can only be safely deleted if the RocksDB - * instance related to a certain ColumnFamilyHandle is still valid and initialized. - * Therefore {@code disposeInternal()} checks if the RocksDB is initialized - * before freeing the native handle.

      + * instance related to a certain ColumnFamilyHandle is still valid and + * initialized. Therefore {@code disposeInternal()} checks if the RocksDB is + * initialized before freeing the native handle.

      */ @Override protected void disposeInternal() { diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index 500935cf4..6a7b580bf 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -13,8 +13,8 @@ import java.util.Properties; * ColumnFamilyOptions to control the behavior of a database. It will be used * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * - * If {@link #dispose()} function is not called, then it will be GC'd automatically - * and native resources will be released as part of the process. + * If {@link #dispose()} function is not called, then it will be GC'd + * automatically and native resources will be released as part of the process. */ public class ColumnFamilyOptions extends RocksObject implements ColumnFamilyOptionsInterface { @@ -112,7 +112,8 @@ public class ColumnFamilyOptions extends RocksObject } @Override - public ColumnFamilyOptions setComparator(final BuiltinComparator builtinComparator) { + public ColumnFamilyOptions setComparator( + final BuiltinComparator builtinComparator) { assert(isOwningHandle()); setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); return this; @@ -139,13 +140,15 @@ public class ColumnFamilyOptions extends RocksObject } @Override - public ColumnFamilyOptions setMergeOperator(final MergeOperator mergeOperator) { + public ColumnFamilyOptions setMergeOperator( + final MergeOperator mergeOperator) { setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle()); return this; } public ColumnFamilyOptions setCompactionFilter( - final AbstractCompactionFilter> compactionFilter) { + final AbstractCompactionFilter> + compactionFilter) { setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_); compactionFilter_ = compactionFilter; return this; @@ -205,7 +208,8 @@ public class ColumnFamilyOptions extends RocksObject } @Override - public ColumnFamilyOptions setCompressionType(final CompressionType compressionType) { + public ColumnFamilyOptions setCompressionType( + final CompressionType compressionType) { setCompressionType(nativeHandle_, compressionType.getValue()); return this; } @@ -522,7 +526,8 @@ public class ColumnFamilyOptions extends RocksObject @Override public ColumnFamilyOptions setMaxSequentialSkipInIterations( final long maxSequentialSkipInIterations) { - setMaxSequentialSkipInIterations(nativeHandle_, maxSequentialSkipInIterations); + setMaxSequentialSkipInIterations(nativeHandle_, + maxSequentialSkipInIterations); return this; } @@ -677,12 +682,12 @@ public class ColumnFamilyOptions extends RocksObject private native void optimizeUniversalStyleCompaction(long handle, long memtableMemoryBudget); private native void setComparatorHandle(long handle, int builtinComparator); - private native void setComparatorHandle(long optHandle, long comparatorHandle); - private native void setMergeOperatorName( - long handle, String name); - private native void setMergeOperator( - long handle, long mergeOperatorHandle); - private native void setCompactionFilterHandle(long handle, long compactionFilterHandle); + private native void setComparatorHandle(long optHandle, + long comparatorHandle); + private native void setMergeOperatorName(long handle, String name); + private native void setMergeOperator(long handle, long mergeOperatorHandle); + private native void setCompactionFilterHandle(long handle, + long compactionFilterHandle); private native void setWriteBufferSize(long handle, long writeBufferSize) throws IllegalArgumentException; private native long writeBufferSize(long handle); diff --git a/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java index a9ed2527a..878dd4d70 100644 --- a/java/src/main/java/org/rocksdb/DBOptions.java +++ b/java/src/main/java/org/rocksdb/DBOptions.java @@ -11,8 +11,8 @@ import java.util.Properties; * DBOptions to control the behavior of a database. It will be used * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * - * If {@link #dispose()} function is not called, then it will be GC'd automatically - * and native resources will be released as part of the process. + * If {@link #dispose()} function is not called, then it will be GC'd + * automatically and native resources will be released as part of the process. */ public class DBOptions extends RocksObject implements DBOptionsInterface { static { diff --git a/java/src/main/java/org/rocksdb/DirectComparator.java b/java/src/main/java/org/rocksdb/DirectComparator.java index ba3fce798..d28804756 100644 --- a/java/src/main/java/org/rocksdb/DirectComparator.java +++ b/java/src/main/java/org/rocksdb/DirectComparator.java @@ -28,5 +28,6 @@ public abstract class DirectComparator extends AbstractComparator { return nativeHandle_; } - private native long createNewDirectComparator0(final long comparatorOptionsHandle); + private native long createNewDirectComparator0( + final long comparatorOptionsHandle); } diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index 1bc45ed1a..56fd5ab18 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -12,8 +12,8 @@ import java.util.List; * Options to control the behavior of a database. It will be used * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). * - * If {@link #dispose()} function is not called, then it will be GC'd automatically - * and native resources will be released as part of the process. + * If {@link #dispose()} function is not called, then it will be GC'd + * automaticallyand native resources will be released as part of the process. */ public class Options extends RocksObject implements DBOptionsInterface, ColumnFamilyOptionsInterface { @@ -41,7 +41,8 @@ public class Options extends RocksObject */ public Options(final DBOptions dbOptions, final ColumnFamilyOptions columnFamilyOptions) { - super(newOptions(dbOptions.nativeHandle_, columnFamilyOptions.nativeHandle_)); + super(newOptions(dbOptions.nativeHandle_, + columnFamilyOptions.nativeHandle_)); env_ = Env.getDefault(); } @@ -678,7 +679,8 @@ public class Options extends RocksObject } @Override - public Options setCompressionPerLevel(final List compressionLevels) { + public Options setCompressionPerLevel( + final List compressionLevels) { final byte[] byteCompressionTypes = new byte[ compressionLevels.size()]; for (int i = 0; i < compressionLevels.size(); i++) { @@ -973,7 +975,8 @@ public class Options extends RocksObject @Override public Options setMaxSequentialSkipInIterations( final long maxSequentialSkipInIterations) { - setMaxSequentialSkipInIterations(nativeHandle_, maxSequentialSkipInIterations); + setMaxSequentialSkipInIterations(nativeHandle_, + maxSequentialSkipInIterations); return this; } @@ -1189,7 +1192,8 @@ public class Options extends RocksObject private native void optimizeUniversalStyleCompaction(long handle, long memtableMemoryBudget); private native void setComparatorHandle(long handle, int builtinComparator); - private native void setComparatorHandle(long optHandle, long comparatorHandle); + private native void setComparatorHandle(long optHandle, + long comparatorHandle); private native void setMergeOperatorName( long handle, String name); private native void setMergeOperator( diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index 5bc5dbe72..1beb45c46 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -8,7 +8,8 @@ package org.rocksdb; /** * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++ */ -public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter { +public class RemoveEmptyValueCompactionFilter + extends AbstractCompactionFilter { public RemoveEmptyValueCompactionFilter() { super(createNewRemoveEmptyValueCompactionFilter0()); } diff --git a/java/src/main/java/org/rocksdb/RestoreBackupableDB.java b/java/src/main/java/org/rocksdb/RestoreBackupableDB.java index 86610cc31..f303b1507 100644 --- a/java/src/main/java/org/rocksdb/RestoreBackupableDB.java +++ b/java/src/main/java/org/rocksdb/RestoreBackupableDB.java @@ -153,5 +153,6 @@ public class RestoreBackupableDB extends RocksObject { private native int[] getCorruptedBackups(long handle); private native void garbageCollect(long handle) throws RocksDBException; - @Override protected final native void disposeInternal(final long nativeHandle); + @Override protected final native void disposeInternal( + final long nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/RestoreOptions.java b/java/src/main/java/org/rocksdb/RestoreOptions.java index 9eecbc8e1..54b0eff28 100644 --- a/java/src/main/java/org/rocksdb/RestoreOptions.java +++ b/java/src/main/java/org/rocksdb/RestoreOptions.java @@ -16,11 +16,12 @@ public class RestoreOptions extends RocksObject { /** * Constructor * - * @param keepLogFiles If true, restore won't overwrite the existing log files in wal_dir. It - * will also move all log files from archive directory to wal_dir. Use this - * option in combination with BackupableDBOptions::backup_log_files = false - * for persisting in-memory databases. - * Default: false + * @param keepLogFiles If true, restore won't overwrite the existing log files + * in wal_dir. It will also move all log files from archive directory to + * wal_dir. Use this option in combination with + * BackupableDBOptions::backup_log_files = false for persisting in-memory + * databases. + * Default: false */ public RestoreOptions(final boolean keepLogFiles) { super(newRestoreOptions(keepLogFiles)); diff --git a/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java index 8589478fe..72704893c 100644 --- a/java/src/main/java/org/rocksdb/TtlDB.java +++ b/java/src/main/java/org/rocksdb/TtlDB.java @@ -112,14 +112,15 @@ public class TtlDB extends RocksDB { final List ttlValues, final boolean readOnly) throws RocksDBException { if (columnFamilyDescriptors.size() != ttlValues.size()) { - throw new IllegalArgumentException("There must be a ttl value per column" + - "family handle."); + throw new IllegalArgumentException("There must be a ttl value per column" + + "family handle."); } final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; for (int i = 0; i < columnFamilyDescriptors.size(); i++) { - final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors.get(i); + final ColumnFamilyDescriptor cfDescriptor = + columnFamilyDescriptors.get(i); cfNames[i] = cfDescriptor.columnFamilyName(); cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; } diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index de614b1b5..d9e1098cb 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -84,13 +84,15 @@ public class WriteBatch extends AbstractWriteBatch { @Override final native void clear0(final long handle); private native static long newWriteBatch(final int reserved_bytes); - private native void iterate(final long handle, final long handlerHandle) throws RocksDBException; + private native void iterate(final long handle, final long handlerHandle) + throws RocksDBException; /** * Handler callback for iterating over the contents of a batch. */ - public static abstract class Handler extends AbstractImmutableNativeReference { + public static abstract class Handler + extends AbstractImmutableNativeReference { private final long nativeHandle_; public Handler() { super(true); diff --git a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java index 469945f06..254bf7e6a 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java +++ b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -12,10 +12,10 @@ package org.rocksdb; * Calling put, merge, remove or putLogData calls the same function * as with {@link org.rocksdb.WriteBatch} whilst also building an index. * - * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator() }to create an iterator - * over the write batch or - * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} to - * get an iterator for the database with Read-Your-Own-Writes like capability + * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to + * create an iterator over the write batch or + * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} + * to get an iterator for the database with Read-Your-Own-Writes like capability */ public class WriteBatchWithIndex extends AbstractWriteBatch { /** @@ -56,9 +56,12 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * inserting a duplicate key, in this way an iterator will never * show two entries with the same key. */ - public WriteBatchWithIndex(final AbstractComparator> - fallbackIndexComparator, final int reservedBytes, final boolean overwriteKey) { - super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(), reservedBytes, overwriteKey)); + public WriteBatchWithIndex( + final AbstractComparator> + fallbackIndexComparator, final int reservedBytes, + final boolean overwriteKey) { + super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(), + reservedBytes, overwriteKey)); } /** @@ -70,9 +73,11 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * time. * * @param columnFamilyHandle The column family to iterate over - * @return An iterator for the Write Batch contents, restricted to the column family + * @return An iterator for the Write Batch contents, restricted to the column + * family */ - public WBWIRocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle) { + public WBWIRocksIterator newIterator( + final ColumnFamilyHandle columnFamilyHandle) { return new WBWIRocksIterator(this, iterator1(nativeHandle_, columnFamilyHandle.nativeHandle_)); } @@ -97,11 +102,13 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * as a delta and baseIterator as a base * * @param columnFamilyHandle The column family to iterate over - * @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()} - * @return An iterator which shows a view comprised of both the database point-in-time - * from baseIterator and modifications made in this write batch. + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @return An iterator which shows a view comprised of both the database + * point-in-time from baseIterator and modifications made in this write batch. */ - public RocksIterator newIteratorWithBase(final ColumnFamilyHandle columnFamilyHandle, + public RocksIterator newIteratorWithBase( + final ColumnFamilyHandle columnFamilyHandle, final RocksIterator baseIterator) { RocksIterator iterator = new RocksIterator( baseIterator.parent_, @@ -116,14 +123,17 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { /** * Provides Read-Your-Own-Writes like functionality by * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} - * as a delta and baseIterator as a base. Operates on the default column family. + * as a delta and baseIterator as a base. Operates on the default column + * family. * - * @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()} - * @return An iterator which shows a view comprised of both the database point-in-time - * from baseIterator and modifications made in this write batch. + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @return An iterator which shows a view comprised of both the database + * point-in-timefrom baseIterator and modifications made in this write batch. */ public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) { - return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator); + return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), + baseIterator); } @Override protected final native void disposeInternal(final long handle); @@ -153,5 +163,6 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { final boolean overwriteKey); private native long iterator0(final long handle); private native long iterator1(final long handle, final long cfHandle); - private native long iteratorWithBase(final long handle, final long baseIteratorHandle, final long cfHandle); + private native long iteratorWithBase(final long handle, + final long baseIteratorHandle, final long cfHandle); }