From 5cef458a2c88245fa49b481004c91b170f2daa48 Mon Sep 17 00:00:00 2001 From: sdong Date: Wed, 16 Apr 2014 19:30:33 -0700 Subject: [PATCH 01/10] RocksDB 2.8 to be able to read files generated by 2.6 Summary: From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults. In this patch, we fix it by: (1) try rocksdb.stats if rocksdb.properties is not found (2) add some null checking before consuming rep->table_properties Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened. Reviewers: haobo, igor, yhchiang Reviewed By: igor CC: ljin, xjin, dhruba, kailiu, leveldb Differential Revision: https://reviews.facebook.net/D17961 --- table/block_based_table_reader.cc | 31 ++++++++++++++++++++++++------- table/block_based_table_reader.h | 5 +++++ table/meta_blocks.cc | 2 ++ table/table_properties.cc | 2 ++ 4 files changed, 33 insertions(+), 7 deletions(-) diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index f686239cb..4e827bcd0 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -365,8 +365,20 @@ Status BlockBasedTable::Open(const Options& options, const EnvOptions& soptions, s = ReadMetaBlock(rep, &meta, &meta_iter); // Read the properties + bool found_properties_block = true; meta_iter->Seek(kPropertiesBlock); - if (meta_iter->Valid() && meta_iter->key() == kPropertiesBlock) { + if (meta_iter->status().ok() && + (!meta_iter->Valid() || meta_iter->key() != kPropertiesBlock)) { + meta_iter->Seek(kPropertiesBlockOldName); + if (meta_iter->status().ok() && + (!meta_iter->Valid() || meta_iter->key() != kPropertiesBlockOldName)) { + found_properties_block = false; + Log(WARN_LEVEL, rep->options.info_log, + "Cannot find Properties block from file."); + } + } + + if (found_properties_block) { s = meta_iter->status(); TableProperties* table_properties = nullptr; if (s.ok()) { @@ -1019,11 +1031,13 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader) { // Some old version of block-based tables don't have index type present in // table properties. If that's the case we can safely use the kBinarySearch. auto index_type = BlockBasedTableOptions::kBinarySearch; - auto& props = rep_->table_properties->user_collected_properties; - auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); - if (pos != props.end()) { - index_type = static_cast( - DecodeFixed32(pos->second.c_str())); + if (rep_->table_properties) { + auto& props = rep_->table_properties->user_collected_properties; + auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); + if (pos != props.end()) { + index_type = static_cast( + DecodeFixed32(pos->second.c_str())); + } } auto file = rep_->file.get(); @@ -1082,7 +1096,10 @@ uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) { // key is past the last key in the file. If table_properties is not // available, approximate the offset by returning the offset of the // metaindex block (which is right near the end of the file). - result = rep_->table_properties->data_size; + result = 0; + if (rep_->table_properties) { + result = rep_->table_properties->data_size; + } // table_properties is not present in the table. if (result == 0) { result = rep_->metaindex_handle.offset(); diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 613460634..5cff9ce6a 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -12,6 +12,7 @@ #include #include #include +#include #include "rocksdb/statistics.h" #include "rocksdb/status.h" @@ -198,4 +199,8 @@ class BlockBasedTable : public TableReader { void operator=(const TableReader&) = delete; }; +// Backward compatible properties block name. Limited in block based +// table. +extern const std::string kPropertiesBlockOldName; + } // namespace rocksdb diff --git a/table/meta_blocks.cc b/table/meta_blocks.cc index 4465899fb..4e940b97e 100644 --- a/table/meta_blocks.cc +++ b/table/meta_blocks.cc @@ -244,6 +244,8 @@ Status ReadTableProperties(RandomAccessFile* file, uint64_t file_size, metaindex_block.NewIterator(BytewiseComparator())); // -- Read property block + // This function is not used by BlockBasedTable, so we don't have to + // worry about old properties block name. meta_iter->Seek(kPropertiesBlock); TableProperties table_properties; if (meta_iter->Valid() && diff --git a/table/table_properties.cc b/table/table_properties.cc index 2da1a975a..de9eb9477 100644 --- a/table/table_properties.cc +++ b/table/table_properties.cc @@ -91,5 +91,7 @@ const std::string TablePropertiesNames::kFixedKeyLen = "rocksdb.fixed.key.length"; extern const std::string kPropertiesBlock = "rocksdb.properties"; +// Old property block name for backward compatibility +extern const std::string kPropertiesBlockOldName = "rocksdb.stats"; } // namespace rocksdb From 62551b1c4ed617d9ceaac845832b1e51026f86d1 Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Thu, 17 Apr 2014 10:49:58 -0700 Subject: [PATCH 02/10] Don't compile sync_point if NDEBUG Summary: We don't really need sync_point.o if we're compiling with NDEBUG. This diff depends on D17823 Test Plan: compiles Reviewers: haobo, ljin, sdong Reviewed By: haobo CC: leveldb Differential Revision: https://reviews.facebook.net/D17829 --- Makefile | 2 ++ util/sync_point.cc | 2 ++ util/sync_point.h | 9 +++++---- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index f0f94026a..4307773dc 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,9 @@ ifeq ($(MAKECMDGOALS),shared_lib) PLATFORM_SHARED_LDFLAGS=-fPIC OPT += -DNDEBUG endif + ifeq ($(MAKECMDGOALS),static_lib) +PLATFORM_SHARED_LDFLAGS=-fPIC OPT += -DNDEBUG endif diff --git a/util/sync_point.cc b/util/sync_point.cc index 5d0ac2dd6..4e4c46a1f 100644 --- a/util/sync_point.cc +++ b/util/sync_point.cc @@ -5,6 +5,7 @@ #include "util/sync_point.h" +#ifndef NDEBUG namespace rocksdb { SyncPoint* SyncPoint::GetInstance() { @@ -60,3 +61,4 @@ void SyncPoint::Process(const std::string& point) { } } // namespace rocksdb +#endif // NDEBUG diff --git a/util/sync_point.h b/util/sync_point.h index 3cc892370..3e880213e 100644 --- a/util/sync_point.h +++ b/util/sync_point.h @@ -13,6 +13,10 @@ namespace rocksdb { +#ifdef NDEBUG +#define TEST_SYNC_POINT(x) +#else + // This class provides facility to reproduce race conditions deterministically // in unit tests. // Developer could specify sync points in the codebase via TEST_SYNC_POINT. @@ -72,8 +76,5 @@ class SyncPoint { // utilized to re-produce race conditions between threads. // See TransactionLogIteratorRace in db_test.cc for an example use case. // TEST_SYNC_POINT is no op in release build. -#ifdef NDEBUG -#define TEST_SYNC_POINT(x) -#else #define TEST_SYNC_POINT(x) rocksdb::SyncPoint::GetInstance()->Process(x) -#endif +#endif // NDEBUG From e37dd216f9384bfdabc6760fa296e8ee28c79d30 Mon Sep 17 00:00:00 2001 From: Kai Liu Date: Fri, 11 Apr 2014 13:55:26 -0700 Subject: [PATCH 03/10] Index type doesn't have to be persisted Summary: With the recent changes, there is no need to check the property block about the index block type. If user want to use it, they don't really need any disk format change; everything happens in the fly. Also another team encountered an error while reading the index type from properties. Test Plan: ran all the tests Reviewers: sdong CC: Task ID: # Blame Rev: --- table/block_based_table_reader.cc | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 4e827bcd0..656a3c79e 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -1030,15 +1030,7 @@ bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options, Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader) { // Some old version of block-based tables don't have index type present in // table properties. If that's the case we can safely use the kBinarySearch. - auto index_type = BlockBasedTableOptions::kBinarySearch; - if (rep_->table_properties) { - auto& props = rep_->table_properties->user_collected_properties; - auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); - if (pos != props.end()) { - index_type = static_cast( - DecodeFixed32(pos->second.c_str())); - } - } + auto index_type = rep_->index_type; auto file = rep_->file.get(); const auto& index_handle = rep_->index_handle; From fa430bfd04bf8e17d0cde2055cd223d13c2625d1 Mon Sep 17 00:00:00 2001 From: sdong Date: Thu, 17 Apr 2014 14:07:05 -0700 Subject: [PATCH 04/10] Minimize accessing multiple objects in Version::Get() Summary: One of our profilings shows that Version::Get() sometimes is slow when getting pointer of user comparators or other global objects. In this patch: (1) we keep pointers of immutable objects in Version to avoid accesses them though option objects or cfd objects (2) table_reader is directly cached in FileMetaData so that table cache don't have to go through handle first to fetch it (3) If level 0 has less than 3 files, skip the filtering logic based on SST tables' key range. Smallest and largest key are stored in separated memory locations, which has potential cache misses Test Plan: make all check Reviewers: haobo, ljin Reviewed By: haobo CC: igor, yhchiang, nkg-, leveldb Differential Revision: https://reviews.facebook.net/D17739 --- db/db_impl.cc | 4 +-- db/db_impl_readonly.cc | 2 +- db/table_cache.cc | 38 ++++++++++++++++------------- db/version_edit.h | 5 +++- db/version_set.cc | 55 ++++++++++++++++++++++++++---------------- db/version_set.h | 9 +++++-- 6 files changed, 69 insertions(+), 44 deletions(-) diff --git a/db/db_impl.cc b/db/db_impl.cc index 1dc8eb1dc..44f18fb48 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -3219,7 +3219,7 @@ Status DBImpl::GetImpl(const ReadOptions& options, PERF_TIMER_START(get_from_output_files_time); sv->current->Get(options, lkey, value, &s, &merge_context, &stats, - *cfd->options(), value_found); + value_found); have_stat_update = true; PERF_TIMER_STOP(get_from_output_files_time); RecordTick(options_.statistics.get(), MEMTABLE_MISS); @@ -3334,7 +3334,7 @@ std::vector DBImpl::MultiGet( // Done } else { super_version->current->Get(options, lkey, value, &s, &merge_context, - &mgd->stats, *cfd->options()); + &mgd->stats); mgd->have_stat_update = true; } diff --git a/db/db_impl_readonly.cc b/db/db_impl_readonly.cc index 435384bd3..43083746d 100644 --- a/db/db_impl_readonly.cc +++ b/db/db_impl_readonly.cc @@ -67,7 +67,7 @@ Status DBImplReadOnly::Get(const ReadOptions& options, } else { Version::GetStats stats; super_version->current->Get(options, lkey, value, &s, &merge_context, - &stats, *cfd->options()); + &stats); } return s; } diff --git a/db/table_cache.cc b/db/table_cache.cc index 36168d109..c73168185 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -106,19 +106,20 @@ Iterator* TableCache::NewIterator(const ReadOptions& options, if (table_reader_ptr != nullptr) { *table_reader_ptr = nullptr; } - Cache::Handle* handle = file_meta.table_reader_handle; + TableReader* table_reader = file_meta.table_reader; + Cache::Handle* handle = nullptr; Status s; - if (!handle) { + if (table_reader == nullptr) { s = FindTable(toptions, icomparator, file_meta.number, file_meta.file_size, &handle, nullptr, options.read_tier == kBlockCacheTier); + table_reader = GetTableReaderFromHandle(handle); } if (!s.ok()) { return NewErrorIterator(s); } - TableReader* table_reader = GetTableReaderFromHandle(handle); Iterator* result = table_reader->NewIterator(options); - if (!file_meta.table_reader_handle) { + if (handle != nullptr) { result->RegisterCleanup(&UnrefEntry, cache_, handle); } if (table_reader_ptr != nullptr) { @@ -138,17 +139,18 @@ Status TableCache::Get(const ReadOptions& options, bool (*saver)(void*, const ParsedInternalKey&, const Slice&, bool), bool* table_io, void (*mark_key_may_exist)(void*)) { - Cache::Handle* handle = file_meta.table_reader_handle; + TableReader* t = file_meta.table_reader; Status s; - if (!handle) { + Cache::Handle* handle = nullptr; + if (!t) { s = FindTable(storage_options_, internal_comparator, file_meta.number, file_meta.file_size, &handle, table_io, options.read_tier == kBlockCacheTier); + t = GetTableReaderFromHandle(handle); } if (s.ok()) { - TableReader* t = GetTableReaderFromHandle(handle); s = t->Get(options, k, arg, saver, mark_key_may_exist); - if (!file_meta.table_reader_handle) { + if (handle != nullptr) { ReleaseHandle(handle); } } else if (options.read_tier && s.IsIncomplete()) { @@ -164,15 +166,16 @@ Status TableCache::GetTableProperties( const FileMetaData& file_meta, std::shared_ptr* properties, bool no_io) { Status s; - auto table_handle = file_meta.table_reader_handle; + auto table_reader = file_meta.table_reader; // table already been pre-loaded? - if (table_handle) { - auto table = GetTableReaderFromHandle(table_handle); - *properties = table->GetTableProperties(); + if (table_reader) { + *properties = table_reader->GetTableProperties(); + return s; } bool table_io; + Cache::Handle* table_handle = nullptr; s = FindTable(toptions, internal_comparator, file_meta.number, file_meta.file_size, &table_handle, &table_io, no_io); if (!s.ok()) { @@ -190,20 +193,21 @@ bool TableCache::PrefixMayMatch(const ReadOptions& options, const FileMetaData& file_meta, const Slice& internal_prefix, bool* table_io) { bool may_match = true; - auto table_handle = file_meta.table_reader_handle; - if (table_handle == nullptr) { + auto table_reader = file_meta.table_reader; + Cache::Handle* table_handle = nullptr; + if (table_reader == nullptr) { // Need to get table handle from file number Status s = FindTable(storage_options_, icomparator, file_meta.number, file_meta.file_size, &table_handle, table_io); if (!s.ok()) { return may_match; } + table_reader = GetTableReaderFromHandle(table_handle); } - auto table = GetTableReaderFromHandle(table_handle); - may_match = table->PrefixMayMatch(internal_prefix); + may_match = table_reader->PrefixMayMatch(internal_prefix); - if (file_meta.table_reader_handle == nullptr) { + if (table_handle != nullptr) { // Need to release handle if it is generated from here. ReleaseHandle(table_handle); } diff --git a/db/version_edit.h b/db/version_edit.h index 98731cfb2..acaec8a4f 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -32,6 +32,8 @@ struct FileMetaData { // Needs to be disposed when refs becomes 0. Cache::Handle* table_reader_handle; + // Table reader in table_reader_handle + TableReader* table_reader; FileMetaData(uint64_t number, uint64_t file_size) : refs(0), @@ -39,7 +41,8 @@ struct FileMetaData { number(number), file_size(file_size), being_compacted(false), - table_reader_handle(nullptr) {} + table_reader_handle(nullptr), + table_reader(nullptr) {} FileMetaData() : FileMetaData(0, 0) {} }; diff --git a/db/version_set.cc b/db/version_set.cc index 12a8f670b..e6ece7c3e 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -483,6 +483,17 @@ bool BySmallestKey(FileMetaData* a, FileMetaData* b, Version::Version(ColumnFamilyData* cfd, VersionSet* vset, uint64_t version_number) : cfd_(cfd), + internal_comparator_((cfd == nullptr) ? nullptr + : &cfd->internal_comparator()), + user_comparator_((cfd == nullptr) + ? nullptr + : internal_comparator_->user_comparator()), + table_cache_((cfd == nullptr) ? nullptr : cfd->table_cache()), + merge_operator_((cfd == nullptr) ? nullptr + : cfd->options()->merge_operator.get()), + info_log_((cfd == nullptr) ? nullptr : cfd->options()->info_log.get()), + db_statistics_((cfd == nullptr) ? nullptr + : cfd->options()->statistics.get()), vset_(vset), next_(this), prev_(this), @@ -504,27 +515,22 @@ void Version::Get(const ReadOptions& options, Status* status, MergeContext* merge_context, GetStats* stats, - const Options& db_options, bool* value_found) { Slice ikey = k.internal_key(); Slice user_key = k.user_key(); - const Comparator* ucmp = cfd_->internal_comparator().user_comparator(); - - auto merge_operator = db_options.merge_operator.get(); - auto logger = db_options.info_log.get(); assert(status->ok() || status->IsMergeInProgress()); Saver saver; saver.state = status->ok()? kNotFound : kMerge; - saver.ucmp = ucmp; + saver.ucmp = user_comparator_; saver.user_key = user_key; saver.value_found = value_found; saver.value = value; - saver.merge_operator = merge_operator; + saver.merge_operator = merge_operator_; saver.merge_context = merge_context; - saver.logger = logger; + saver.logger = info_log_; saver.didIO = false; - saver.statistics = db_options.statistics.get(); + saver.statistics = db_statistics_; stats->seek_file = nullptr; stats->seek_file_level = -1; @@ -555,7 +561,7 @@ void Version::Get(const ReadOptions& options, // On Level-n (n>=1), files are sorted. // Binary search to find earliest index whose largest key >= ikey. // We will also stop when the file no longer overlaps ikey - start_index = FindFile(cfd_->internal_comparator(), files_[level], ikey); + start_index = FindFile(*internal_comparator_, files_[level], ikey); } // Traverse each relevant file to find the desired key @@ -564,8 +570,10 @@ void Version::Get(const ReadOptions& options, #endif for (uint32_t i = start_index; i < num_files; ++i) { FileMetaData* f = files[i]; - if (ucmp->Compare(user_key, f->smallest.user_key()) < 0 || - ucmp->Compare(user_key, f->largest.user_key()) > 0) { + // Skip key range filtering for levle 0 if there are few level 0 files. + if ((level > 0 || num_files > 2) && + (user_comparator_->Compare(user_key, f->smallest.user_key()) < 0 || + user_comparator_->Compare(user_key, f->largest.user_key()) > 0)) { // Only process overlapping files. if (level > 0) { // If on Level-n (n>=1) then the files are sorted. @@ -581,8 +589,8 @@ void Version::Get(const ReadOptions& options, // Sanity check to make sure that the files are correctly sorted if (prev_file) { if (level != 0) { - int comp_sign = cfd_->internal_comparator().Compare( - prev_file->largest, f->smallest); + int comp_sign = + internal_comparator_->Compare(prev_file->largest, f->smallest); assert(comp_sign < 0); } else { // level == 0, the current file cannot be newer than the previous one. @@ -596,9 +604,8 @@ void Version::Get(const ReadOptions& options, prev_file = f; #endif bool tableIO = false; - *status = cfd_->table_cache()->Get(options, cfd_->internal_comparator(), - *f, ikey, &saver, SaveValue, &tableIO, - MarkKeyMayExist); + *status = table_cache_->Get(options, *internal_comparator_, *f, ikey, + &saver, SaveValue, &tableIO, MarkKeyMayExist); // TODO: examine the behavior for corrupted key if (!status->ok()) { return; @@ -643,12 +650,12 @@ void Version::Get(const ReadOptions& options, if (kMerge == saver.state) { // merge_operands are in saver and we hit the beginning of the key history // do a final merge of nullptr and operands; - if (merge_operator->FullMerge(user_key, nullptr, - saver.merge_context->GetOperands(), - value, logger)) { + if (merge_operator_->FullMerge(user_key, nullptr, + saver.merge_context->GetOperands(), value, + info_log_)) { *status = Status::OK(); } else { - RecordTick(db_options.statistics.get(), NUMBER_MERGE_FAILURES); + RecordTick(db_statistics_, NUMBER_MERGE_FAILURES); *status = Status::Corruption("could not perform end-of-key merge for ", user_key); } @@ -1458,6 +1465,12 @@ class VersionSet::Builder { base_->vset_->storage_options_, cfd_->internal_comparator(), file_meta->number, file_meta->file_size, &file_meta->table_reader_handle, &table_io, false); + if (file_meta->table_reader_handle != nullptr) { + // Load table_reader + file_meta->table_reader = + cfd_->table_cache()->GetTableReaderFromHandle( + file_meta->table_reader_handle); + } } } } diff --git a/db/version_set.h b/db/version_set.h index fd3e5c893..ef616f34b 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -88,8 +88,7 @@ class Version { int seek_file_level; }; void Get(const ReadOptions&, const LookupKey& key, std::string* val, - Status* status, MergeContext* merge_context, - GetStats* stats, const Options& db_option, + Status* status, MergeContext* merge_context, GetStats* stats, bool* value_found = nullptr); // Adds "stats" into the current state. Returns true if a new @@ -230,6 +229,12 @@ class Version { void UpdateFilesBySize(); ColumnFamilyData* cfd_; // ColumnFamilyData to which this Version belongs + const InternalKeyComparator* internal_comparator_; + const Comparator* user_comparator_; + TableCache* table_cache_; + const MergeOperator* merge_operator_; + Logger* info_log_; + Statistics* db_statistics_; VersionSet* vset_; // VersionSet to which this Version belongs Version* next_; // Next version in linked list Version* prev_; // Previous version in linked list From 86ae8203e6f62f60eeeeb2e75b4aa4d68abe2b8d Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Thu, 17 Apr 2014 14:29:06 -0700 Subject: [PATCH 05/10] Fix ifdef NDEBUG --- util/sync_point.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/sync_point.h b/util/sync_point.h index 3e880213e..b4b61a9fc 100644 --- a/util/sync_point.h +++ b/util/sync_point.h @@ -11,12 +11,12 @@ #include #include -namespace rocksdb { - #ifdef NDEBUG #define TEST_SYNC_POINT(x) #else +namespace rocksdb { + // This class provides facility to reproduce race conditions deterministically // in unit tests. // Developer could specify sync points in the codebase via TEST_SYNC_POINT. From ce353c2474c22a5a05fd29d13f079cfa2c3a20d1 Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Thu, 17 Apr 2014 14:43:42 -0700 Subject: [PATCH 06/10] Nuke tools/shell Summary: We don't use or build this code Test Plan: builds Reviewers: dhruba Reviewed By: dhruba CC: leveldb Differential Revision: https://reviews.facebook.net/D17979 --- tools/shell/DBClientProxy.cpp | 271 ------------------------- tools/shell/DBClientProxy.h | 64 ------ tools/shell/LeveldbShell.cpp | 8 - tools/shell/ShellContext.cpp | 104 ---------- tools/shell/ShellContext.h | 51 ----- tools/shell/ShellState.cpp | 139 ------------- tools/shell/ShellState.h | 87 -------- tools/shell/test/DBClientProxyTest.cpp | 182 ----------------- 8 files changed, 906 deletions(-) delete mode 100644 tools/shell/DBClientProxy.cpp delete mode 100644 tools/shell/DBClientProxy.h delete mode 100644 tools/shell/LeveldbShell.cpp delete mode 100644 tools/shell/ShellContext.cpp delete mode 100644 tools/shell/ShellContext.h delete mode 100644 tools/shell/ShellState.cpp delete mode 100644 tools/shell/ShellState.h delete mode 100644 tools/shell/test/DBClientProxyTest.cpp diff --git a/tools/shell/DBClientProxy.cpp b/tools/shell/DBClientProxy.cpp deleted file mode 100644 index 93277ac18..000000000 --- a/tools/shell/DBClientProxy.cpp +++ /dev/null @@ -1,271 +0,0 @@ - -#include - -#include "DBClientProxy.h" - - -#include "thrift/lib/cpp/protocol/TBinaryProtocol.h" -#include "thrift/lib/cpp/transport/TSocket.h" -#include "thrift/lib/cpp/transport/TTransportUtils.h" - - - -using namespace std; -using namespace boost; -using namespace Tleveldb; -using namespace apache::thrift::protocol; -using namespace apache::thrift::transport; - -namespace rocksdb { - -DBClientProxy::DBClientProxy(const string & host, int port) : - host_(host), - port_(port), - dbToHandle_(), - dbClient_() { -} - -DBClientProxy::~DBClientProxy() { - cleanUp(); -} - - -void DBClientProxy::connect(void) { - cleanUp(); - printf("Connecting to %s:%d\n", host_.c_str(), port_); - try { - boost::shared_ptr socket(new TSocket(host_, port_)); - boost::shared_ptr transport(new TBufferedTransport(socket)); - boost::shared_ptr protocol(new TBinaryProtocol(transport)); - dbClient_.reset(new DBClient(protocol)); - - transport->open(); - } catch (const std::exception & e) { - dbClient_.reset(); - throw; - } -} - -void DBClientProxy::cleanUp(void) { - if(dbClient_.get()) { - for(map::iterator itor = dbToHandle_.begin(); - itor != dbToHandle_.end(); - ++itor) { - dbClient_->Close(itor->second, itor->first); - } - dbClient_.reset(); - } - dbToHandle_.clear(); -} - -void DBClientProxy::open(const string & db) { - if(!dbClient_.get()) { - printf("please connect() first\n"); - return; - } - - // printf("opening database : %s\n", db.c_str()); - // we use default DBOptions here - DBOptions opt; - DBHandle handle; - try { - dbClient_->Open(handle, db, opt); - } catch (const LeveldbException & e) { - printf("%s\n", e.message.c_str()); - if(kIOError == e.errorCode) { - printf("no such database : %s\n", db.c_str()); - return; - }else { - printf("Unknown error : %d\n", e.errorCode); - return; - } - } - - dbToHandle_[db] = handle; -} - - -bool DBClientProxy::create(const string & db) { - if(!dbClient_.get()) { - printf("please connect() first\n"); - return false; - } - - printf("creating database : %s\n", db.c_str()); - DBOptions opt; - opt.create_if_missing = true; - opt.error_if_exists = true; - DBHandle handle; - try { - dbClient_->Open(handle, db, opt); - }catch (const LeveldbException & e) { - printf("%s\n", e.message.c_str()); - printf("error code : %d\n", e.errorCode); - if(kNotFound == e.errorCode) { - printf("no such database : %s\n", db.c_str()); - return false;; - } else { - printf("Unknown error : %d\n", e.errorCode); - return false; - } - } - - dbToHandle_[db] = handle; - return true; -} - - -map::iterator -DBClientProxy::getHandle(const string & db) { - map::iterator itor = dbToHandle_.find(db); - if(dbToHandle_.end() == itor) { - open(db); - itor = dbToHandle_.find(db); - } - - return itor; -} - - -bool DBClientProxy::get(const string & db, - const string & key, - string & value) { - if(!dbClient_.get()) { - printf("please connect() first\n"); - return false; - } - - map::iterator itor = getHandle(db); - if(dbToHandle_.end() == itor) { - return false; - } - - ResultItem ret; - Slice k; - k.data = key; - k.size = key.size(); - // we use default values of options here - ReadOptions opt; - dbClient_->Get(ret, - itor->second, - k, - opt); - if(kOk == ret.status) { - value = ret.value.data; - return true; - } else if(kNotFound == ret.status) { - printf("no such key : %s\n", key.c_str()); - return false; - } else { - printf("get data error : %d\n", ret.status); - return false; - } -} - - - -bool DBClientProxy::put(const string & db, - const string & key, - const string & value) { - if(!dbClient_.get()) { - printf("please connect() first\n"); - return false; - } - - map::iterator itor = getHandle(db); - if(dbToHandle_.end() == itor) { - return false; - } - - kv temp; - temp.key.data = key; - temp.key.size = key.size(); - temp.value.data = value; - temp.value.size = value.size(); - WriteOptions opt; - opt.sync = true; - Code code; - code = dbClient_->Put(itor->second, - temp, - opt); - - - if(kOk == code) { - // printf("set value finished\n"); - return true; - } else { - printf("put data error : %d\n", code); - return false; - } -} - -bool DBClientProxy::scan(const string & db, - const string & start_key, - const string & end_key, - const string & limit, - vector > & kvs) { - if(!dbClient_.get()) { - printf("please connect() first\n"); - return false; - } - - int limitInt = -1; - limitInt = atoi(limit.c_str()); - if(limitInt <= 0) { - printf("Error while parse limit : %s\n", limit.c_str()); - return false; - } - - if(start_key > end_key) { - printf("empty range.\n"); - return false; - } - - map::iterator itor = getHandle(db); - if(dbToHandle_.end() == itor) { - return false; - } - - ResultIterator ret; - // we use the default values of options here - ReadOptions opt; - Slice k; - k.data = start_key; - k.size = start_key.size(); - dbClient_->NewIterator(ret, - itor->second, - opt, - seekToKey, - k); - Iterator it; - if(kOk == ret.status) { - it = ret.iterator; - } else { - printf("get iterator error : %d\n", ret.status); - return false; - } - - int idx = 0; - string ck = start_key; - while(idx < limitInt && ck < end_key) { - ResultPair retPair; - dbClient_->GetNext(retPair, itor->second, it); - if(kOk == retPair.status) { - ++idx; - ck = retPair.keyvalue.key.data; - if (ck < end_key) { - kvs.push_back(make_pair(retPair.keyvalue.key.data, - retPair.keyvalue.value.data)); - } - } else if(kEnd == retPair.status) { - printf("not enough values\n"); - return true; - } else { - printf("GetNext() error : %d\n", retPair.status); - return false; - } - } - return true; -} - -} // namespace diff --git a/tools/shell/DBClientProxy.h b/tools/shell/DBClientProxy.h deleted file mode 100644 index fba228b99..000000000 --- a/tools/shell/DBClientProxy.h +++ /dev/null @@ -1,64 +0,0 @@ - -#ifndef TOOLS_SHELL_DBCLIENTPROXY -#define TOOLS_SHELL_DBCLIENTPROXY - -#include -#include -#include -#include -#include - -#include "DB.h" - -/* - * class DBClientProxy maintains: - * 1. a connection to rocksdb service - * 2. a map from db names to opened db handles - * - * it's client codes' responsibility to catch all possible exceptions. - */ - -namespace rocksdb { - -class DBClientProxy : private boost::noncopyable { - public: - // connect to host_:port_ - void connect(void); - - // return true on success, false otherwise - bool get(const std::string & db, - const std::string & key, - std::string & value); - - // return true on success, false otherwise - bool put(const std::string & db, - const std::string & key, - const std::string & value); - - // return true on success, false otherwise - bool scan(const std::string & db, - const std::string & start_key, - const std::string & end_key, - const std::string & limit, - std::vector > & kvs); - - // return true on success, false otherwise - bool create(const std::string & db); - - DBClientProxy(const std::string & host, int port); - ~DBClientProxy(); - - private: - // some internal help functions - void cleanUp(void); - void open(const std::string & db); - std::map::iterator getHandle(const std::string & db); - - const std::string host_; - const int port_; - std::map dbToHandle_; - boost::shared_ptr dbClient_; -}; - -} // namespace -#endif diff --git a/tools/shell/LeveldbShell.cpp b/tools/shell/LeveldbShell.cpp deleted file mode 100644 index e6274d3bf..000000000 --- a/tools/shell/LeveldbShell.cpp +++ /dev/null @@ -1,8 +0,0 @@ - - -#include "ShellContext.h" - -int main(int argc, char ** argv) { - ShellContext c(argc, argv); - c.run(); -} diff --git a/tools/shell/ShellContext.cpp b/tools/shell/ShellContext.cpp deleted file mode 100644 index 05a9bb81c..000000000 --- a/tools/shell/ShellContext.cpp +++ /dev/null @@ -1,104 +0,0 @@ - -#include -#include - -#include "ShellContext.h" -#include "ShellState.h" - - - -#include "thrift/lib/cpp/protocol/TBinaryProtocol.h" -#include "thrift/lib/cpp/transport/TSocket.h" -#include "thrift/lib/cpp/transport/TTransportUtils.h" - - - -using namespace std; -using namespace boost; -using namespace Tleveldb; -using namespace rocksdb; -using namespace apache::thrift::protocol; -using namespace apache::thrift::transport; - -void ShellContext::changeState(ShellState * pState) { - pShellState_ = pState; -} - -void ShellContext::stop(void) { - exit_ = true; -} - -bool ShellContext::ParseInput(void) { - if(argc_ != 3) { - printf("leveldb_shell host port\n"); - return false; - } - - port_ = atoi(argv_[2]); - if(port_ <= 0) { - printf("Error while parse port : %s\n", argv_[2]); - return false; - } - - clientProxy_.reset(new DBClientProxy(argv_[1], port_)); - if(!clientProxy_.get()) { - return false; - } else { - return true; - } -} - -void ShellContext::connect(void) { - clientProxy_->connect(); -} - -void ShellContext::create(const string & db) { - if (clientProxy_->create(db)) { - printf("%s created\n", db.c_str()); - } -} - -void ShellContext::get(const string & db, - const string & key) { - string v; - if (clientProxy_->get(db, key, v)) { - printf("%s\n", v.c_str()); - } -} - -void ShellContext::put(const string & db, - const string & key, - const string & value) { - if (clientProxy_->put(db, key, value)) { - printf("(%s, %s) has been set\n", key.c_str(), value.c_str()); - } -} - -void ShellContext::scan(const string & db, - const string & start_key, - const string & end_key, - const string & limit) { - vector > kvs; - if (clientProxy_->scan(db, start_key, end_key, limit, kvs)) { - for(unsigned int i = 0; i < kvs.size(); ++i) { - printf("%d (%s, %s)\n", i, kvs[i].first.c_str(), kvs[i].second.c_str()); - } - } -} - -void ShellContext::run(void) { - while(!exit_) { - pShellState_->run(this); - } -} - -ShellContext::ShellContext(int argc, char ** argv) : - pShellState_(ShellStateStart::getInstance()), - exit_(false), - argc_(argc), - argv_(argv), - port_(-1), - clientProxy_() { -} - - diff --git a/tools/shell/ShellContext.h b/tools/shell/ShellContext.h deleted file mode 100644 index 5c2b94482..000000000 --- a/tools/shell/ShellContext.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef TOOLS_SHELL_SHELLCONTEXT -#define TOOLS_SHELL_SHELLCONTEXT - -#include -#include -#include -#include - -#include "DB.h" -#include "DBClientProxy.h" - -class ShellState; - -class ShellContext : private boost::noncopyable { - public: - void changeState(ShellState * pState); - - void stop(void); - - bool ParseInput(void); - - void connect(void); - - void get(const std::string & db, - const std::string & key); - - void put(const std::string & db, - const std::string & key, - const std::string & value); - - void scan(const std::string & db, - const std::string & start_key, - const std::string & end_key, - const std::string & limit); - - void create(const std::string & db); - - void run(void); - - ShellContext(int argc, char ** argv); - - private: - ShellState * pShellState_; - bool exit_; - int argc_; - char ** argv_; - int port_; - boost::shared_ptr clientProxy_; -}; - -#endif diff --git a/tools/shell/ShellState.cpp b/tools/shell/ShellState.cpp deleted file mode 100644 index 057a337aa..000000000 --- a/tools/shell/ShellState.cpp +++ /dev/null @@ -1,139 +0,0 @@ -#include -#include -#include -#include - -#include "ShellState.h" -#include "ShellContext.h" -#include "transport/TTransportException.h" - -using namespace std; - -using namespace apache::thrift::transport; - -const char * PMT = ">> "; - - -void ShellStateStart::run(ShellContext * c) { - if(!c->ParseInput()) { - c->changeState(ShellStateStop::getInstance()); - } else { - c->changeState(ShellStateConnecting::getInstance()); - } -} - - -void ShellStateStop::run(ShellContext * c) { - c->stop(); -} - -void ShellStateConnecting::run(ShellContext * c) { - try { - c->connect(); - } catch (const TTransportException & e) { - cout << e.what() << endl; - c->changeState(ShellStateStop::getInstance()); - return; - } - - c->changeState(ShellStateConnected::getInstance()); -} - -void ShellStateConnected::unknownCmd(void) { - cout << "Unknown command!" << endl; - cout << "Use help to list all available commands" << endl; -} - -void ShellStateConnected::helpMsg(void) { - cout << "Currently supported commands:" << endl; - cout << "create db" << endl; - cout << "get db key" << endl; - cout << "scan db start_key end_key limit" << endl; - cout << "put db key value" << endl; - cout << "exit/quit" << endl; -} - -void ShellStateConnected::handleConError(ShellContext * c) { - cout << "Connection down" << endl; - cout << "Reconnect ? (y/n) :" << endl; - string s; - while(getline(cin, s)) { - if("y" == s) { - c->changeState(ShellStateConnecting::getInstance()); - break; - } else if("n" == s) { - c->changeState(ShellStateStop::getInstance()); - break; - } else { - cout << "Reconnect ? (y/n) :" << endl; - } - } -} - -void ShellStateConnected::run(ShellContext * c) { - string line; - cout << PMT; - getline(cin, line); - istringstream is(line); - vector params; - string param; - while(is >> param) { - params.push_back(param); - } - - // empty input line - if(params.empty()) - return; - - if("quit" == params[0] || "exit" == params[0]) { - c->changeState(ShellStateStop::getInstance()); - } else if("get" == params[0]) { - if(params.size() == 3) { - try { - c->get(params[1], params[2]); - } catch (const TTransportException & e) { - cout << e.what() << endl; - handleConError(c); - } - } else { - unknownCmd(); - } - } else if("create" == params[0]) { - if(params.size() == 2) { - try { - c->create(params[1]); - } catch (const TTransportException & e) { - cout << e.what() << endl; - handleConError(c); - } - } else { - unknownCmd(); - } - }else if("put" == params[0]) { - if(params.size() == 4) { - try { - c->put(params[1], params[2], params[3]); - } catch (const TTransportException & e) { - cout << e.what() << endl; - handleConError(c); - } - } else { - unknownCmd(); - } - } else if("scan" == params[0]) { - if(params.size() == 5) { - try { - c->scan(params[1], params[2], params[3], params[4]); - } catch (const TTransportException & e) { - cout << e.what() << endl; - handleConError(c); - } - } else { - unknownCmd(); - } - } else if("help" == params[0]) { - helpMsg(); - } else { - unknownCmd(); - } -} diff --git a/tools/shell/ShellState.h b/tools/shell/ShellState.h deleted file mode 100644 index 4027af202..000000000 --- a/tools/shell/ShellState.h +++ /dev/null @@ -1,87 +0,0 @@ - -#ifndef TOOLS_SHELL_SHELLSTATE -#define TOOLS_SHELL_SHELLSTATE - -class ShellContext; - -/* - * Currently, there are four types of state in total - * 1. start state: the first state the program enters - * 2. connecting state: the program try to connect to a rocksdb server, whose - * previous states could be "start" or "connected" states - * 3. connected states: the program has already connected to a server, and is - * processing user commands - * 4. stop state: the last state the program enters, do some cleaning up things - */ - -class ShellState { - public: - virtual void run(ShellContext *) = 0; - virtual ~ShellState() {} -}; - - -class ShellStateStart : public ShellState { - public: - static ShellStateStart * getInstance(void) { - static ShellStateStart instance; - return &instance; - } - - virtual void run(ShellContext *); - - private: - ShellStateStart() {} - virtual ~ShellStateStart() {} -}; - -class ShellStateStop : public ShellState { - public: - static ShellStateStop * getInstance(void) { - static ShellStateStop instance; - return &instance; - } - - virtual void run(ShellContext *); - - private: - ShellStateStop() {} - virtual ~ShellStateStop() {} - -}; - -class ShellStateConnecting : public ShellState { - public: - static ShellStateConnecting * getInstance(void) { - static ShellStateConnecting instance; - return &instance; - } - - virtual void run(ShellContext *); - - private: - ShellStateConnecting() {} - virtual ~ShellStateConnecting() {} - -}; - -class ShellStateConnected : public ShellState { - public: - static ShellStateConnected * getInstance(void) { - static ShellStateConnected instance; - return &instance; - } - - virtual void run(ShellContext *); - - private: - ShellStateConnected() {} - virtual ~ShellStateConnected() {} - - void unknownCmd(); - void handleConError(ShellContext *); - void helpMsg(); -}; - -#endif - diff --git a/tools/shell/test/DBClientProxyTest.cpp b/tools/shell/test/DBClientProxyTest.cpp deleted file mode 100644 index 3b64ffc5f..000000000 --- a/tools/shell/test/DBClientProxyTest.cpp +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Tests for DBClientProxy class for leveldb - * @author Bo Liu (newpoo.liu@gmail.com) - * Copyright 2012 Facebook - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "server_options.h" - - -#include "../DBClientProxy.h" -using namespace rocksdb; - - -using namespace apache::thrift; -using namespace apache::thrift::protocol; -using namespace apache::thrift::transport; -using boost::shared_ptr; -using namespace Tleveldb; -using namespace std; - - - -extern "C" void startServer(int argc, char**argv); -extern "C" void stopServer(int port); -extern ServerOptions server_options; - -static const string db1("db1"); - - -static void testDBClientProxy(DBClientProxy & dbcp) { - bool flag; - const int NOK = 100; - const int BUFSIZE = 16; - int testcase = 0; - - vector keys, values; - vector > kvs, correctKvs; - string k, v; - - for(int i = 0; i < NOK; ++i) { - char bufKey[BUFSIZE]; - char bufValue[BUFSIZE]; - snprintf(bufKey, BUFSIZE, "key%d", i); - snprintf(bufValue, BUFSIZE, "value%d", i); - keys.push_back(bufKey); - values.push_back(bufValue); - correctKvs.push_back((make_pair(string(bufKey), string(bufValue)))); - } - - sort(correctKvs.begin(), correctKvs.end()); - - - // can not do get(), put(), scan() or create() before connected. - flag = dbcp.get(db1, keys[0], v); - ASSERT_TRUE(false == flag); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - flag = dbcp.put(db1, keys[0], keys[1]); - ASSERT_TRUE(false == flag); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - flag = dbcp.scan(db1, "a", "w", "100", kvs); - ASSERT_TRUE(false == flag); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - flag = dbcp.create(db1); - ASSERT_TRUE(false == flag); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - - dbcp.connect(); - - // create a database - flag = dbcp.create(db1); - ASSERT_TRUE(true == flag); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - - // no such key - flag = dbcp.get(db1, keys[0], v); - ASSERT_TRUE(false == flag); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - - - // scan() success with empty returned key-value pairs - kvs.clear(); - flag = dbcp.scan(db1, "a", "w", "100", kvs); - ASSERT_TRUE(true == flag); - ASSERT_TRUE(kvs.empty()); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - - - // put() - for(int i = 0; i < NOK; ++i) { - flag = dbcp.put(db1, keys[i], values[i]); - ASSERT_TRUE(true == flag); - } - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - - - // scan all of key-value pairs - kvs.clear(); - flag = dbcp.scan(db1, "a", "w", "100", kvs); - ASSERT_TRUE(true == flag); - ASSERT_TRUE(kvs == correctKvs); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - - - // scan the first 20 key-value pairs - { - kvs.clear(); - flag = dbcp.scan(db1, "a", "w", "20", kvs); - ASSERT_TRUE(true == flag); - vector > tkvs(correctKvs.begin(), correctKvs.begin() + 20); - ASSERT_TRUE(kvs == tkvs); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - } - - // scan key[10] to key[50] - { - kvs.clear(); - flag = dbcp.scan(db1, correctKvs[10].first, correctKvs[50].first, "100", kvs); - ASSERT_TRUE(true == flag); - - vector > tkvs(correctKvs.begin() + 10, correctKvs.begin() + 50); - ASSERT_TRUE(kvs == tkvs); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - } - - // scan "key10" to "key40" by limit constraint - { - kvs.clear(); - flag = dbcp.scan(db1, correctKvs[10].first.c_str(), "w", "30", kvs); - ASSERT_TRUE(true == flag); - vector > tkvs(correctKvs.begin() + 10, correctKvs.begin() + 40); - ASSERT_TRUE(kvs == tkvs); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - } - - - // get() - flag = dbcp.get(db1, "unknownKey", v); - ASSERT_TRUE(false == flag); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); - - flag = dbcp.get(db1, keys[0], v); - ASSERT_TRUE(true == flag); - ASSERT_TRUE(v == values[0]); - printf("\033[01;40;32mTEST CASE %d passed\033[01;40;37m\n", ++testcase); -} - - - -static void cleanupDir(std::string dir) { - // remove old data, if any - char* cleanup = new char[100]; - snprintf(cleanup, 100, "rm -rf %s", dir.c_str()); - system(cleanup); -} - -int main(int argc, char **argv) { - // create a server - startServer(argc, argv); - printf("Server thread created.\n"); - - // give some time to the server to initialize itself - while (server_options.getPort() == 0) { - sleep(1); - } - - cleanupDir(server_options.getDataDirectory(db1)); - - DBClientProxy dbcp("localhost", server_options.getPort()); - testDBClientProxy(dbcp); -} - From 651792251a6d0593e5b6326a7b49ee548158f9b5 Mon Sep 17 00:00:00 2001 From: sdong Date: Thu, 17 Apr 2014 15:14:04 -0700 Subject: [PATCH 07/10] Fix bugs introduced by D17961 Summary: D17961 has two bugs: (1) two level iterator fails to populate FileMetaData.table_reader, causing performance regression. (2) table cache handle the !status.ok() case in the wrong place, causing seg fault which shouldn't happen. Test Plan: make all check Reviewers: ljin, igor, haobo Reviewed By: ljin CC: yhchiang, dhruba, leveldb Differential Revision: https://reviews.facebook.net/D17991 --- db/table_cache.cc | 10 ++++++---- db/version_set.cc | 8 ++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/db/table_cache.cc b/db/table_cache.cc index c73168185..395951324 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -112,11 +112,11 @@ Iterator* TableCache::NewIterator(const ReadOptions& options, if (table_reader == nullptr) { s = FindTable(toptions, icomparator, file_meta.number, file_meta.file_size, &handle, nullptr, options.read_tier == kBlockCacheTier); + if (!s.ok()) { + return NewErrorIterator(s); + } table_reader = GetTableReaderFromHandle(handle); } - if (!s.ok()) { - return NewErrorIterator(s); - } Iterator* result = table_reader->NewIterator(options); if (handle != nullptr) { @@ -146,7 +146,9 @@ Status TableCache::Get(const ReadOptions& options, s = FindTable(storage_options_, internal_comparator, file_meta.number, file_meta.file_size, &handle, table_io, options.read_tier == kBlockCacheTier); - t = GetTableReaderFromHandle(handle); + if (s.ok()) { + t = GetTableReaderFromHandle(handle); + } } if (s.ok()) { s = t->Get(options, k, arg, saver, mark_key_may_exist); diff --git a/db/version_set.cc b/db/version_set.cc index e6ece7c3e..704f2a929 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -151,7 +151,7 @@ namespace { struct EncodedFileMetaData { uint64_t number; // file number uint64_t file_size; // file size - Cache::Handle* table_reader_handle; // cached table reader's handler + TableReader* table_reader; // cached table reader }; } // namespace @@ -199,7 +199,7 @@ class Version::LevelFileNumIterator : public Iterator { auto* file_meta = (*flist_)[index_]; current_value_.number = file_meta->number; current_value_.file_size = file_meta->file_size; - current_value_.table_reader_handle = file_meta->table_reader_handle; + current_value_.table_reader = file_meta->table_reader; return Slice(reinterpret_cast(¤t_value_), sizeof(EncodedFileMetaData)); } @@ -231,7 +231,7 @@ static Iterator* GetFileIterator(void* arg, const ReadOptions& options, const EncodedFileMetaData* encoded_meta = reinterpret_cast(file_value.data()); FileMetaData meta(encoded_meta->number, encoded_meta->file_size); - meta.table_reader_handle = encoded_meta->table_reader_handle; + meta.table_reader = encoded_meta->table_reader; return cache->NewIterator( options.prefix ? options_copy : options, soptions, icomparator, meta, nullptr /* don't need reference to table*/, for_compaction); @@ -257,7 +257,7 @@ bool Version::PrefixMayMatch(const ReadOptions& options, reinterpret_cast( level_iter->value().data()); FileMetaData meta(encoded_meta->number, encoded_meta->file_size); - meta.table_reader_handle = encoded_meta->table_reader_handle; + meta.table_reader = encoded_meta->table_reader; may_match = cfd_->table_cache()->PrefixMayMatch( options, cfd_->internal_comparator(), meta, internal_prefix, nullptr); } From bb6fd15a6ee2572fc7e1cb0f54c2647f3ba797f1 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang Date: Thu, 17 Apr 2014 17:28:51 -0700 Subject: [PATCH 08/10] [Java] Add a basic binding and test for BackupableDB and StackableDB. Summary: Add a skeleton binding and test for BackupableDB which shows that BackupableDB and RocksDB can share the same JNI calls. Test Plan: make rocksdbjava make jtest Reviewers: haobo, ankgup87, sdong, dhruba Reviewed By: haobo CC: leveldb Differential Revision: https://reviews.facebook.net/D17793 --- Makefile | 2 +- java/Makefile | 4 +- java/org/rocksdb/BackupableDB.java | 84 ++++++++++++++++++++ java/org/rocksdb/BackupableDBOptions.java | 52 +++++++++++++ java/org/rocksdb/Options.java | 4 + java/org/rocksdb/RocksDB.java | 22 +++--- java/org/rocksdb/test/BackupableDBTest.java | 41 ++++++++++ java/rocksjni/backupablejni.cc | 85 +++++++++++++++++++++ java/rocksjni/portal.h | 34 +++++++++ 9 files changed, 315 insertions(+), 13 deletions(-) create mode 100644 java/org/rocksdb/BackupableDB.java create mode 100644 java/org/rocksdb/BackupableDBOptions.java create mode 100644 java/org/rocksdb/test/BackupableDBTest.java create mode 100644 java/rocksjni/backupablejni.cc diff --git a/Makefile b/Makefile index 4307773dc..8712ac3b5 100644 --- a/Makefile +++ b/Makefile @@ -422,7 +422,7 @@ ldb: tools/ldb.o $(LIBOBJECTS) # --------------------------------------------------------------------------- # Jni stuff # --------------------------------------------------------------------------- -JNI_NATIVE_SOURCES = ./java/rocksjni/rocksjni.cc ./java/rocksjni/options.cc ./java/rocksjni/write_batch.cc +JNI_NATIVE_SOURCES = ./java/rocksjni/*.cc JAVA_INCLUDE = -I/usr/lib/jvm/java-openjdk/include/ -I/usr/lib/jvm/java-openjdk/include/linux ROCKSDBJNILIB = ./java/librocksdbjni.so diff --git a/java/Makefile b/java/Makefile index 0f971433f..c4b80b3e9 100644 --- a/java/Makefile +++ b/java/Makefile @@ -1,4 +1,4 @@ -NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions +NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions NATIVE_INCLUDE = ./include ROCKSDB_JAR = rocksdbjni.jar @@ -21,7 +21,9 @@ sample: java @rm -rf /tmp/rocksdbjni_not_found test: java + javac org/rocksdb/test/*.java java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.WriteBatchTest + java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.test.BackupableDBTest db_bench: java javac org/rocksdb/benchmark/*.java diff --git a/java/org/rocksdb/BackupableDB.java b/java/org/rocksdb/BackupableDB.java new file mode 100644 index 000000000..100680ebd --- /dev/null +++ b/java/org/rocksdb/BackupableDB.java @@ -0,0 +1,84 @@ +// Copyright (c) 2014, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * A subclass of RocksDB which supports backup-related operations. + * + * @see BackupableDBOptions + */ +public class BackupableDB extends RocksDB { + /** + * Open a BackupableDB under the specified path. + * Note that the backup path should be set properly in the + * input BackupableDBOptions. + * + * @param opt options for db. + * @param bopt backup related options. + * @param the db path for storing data. The path for storing + * backup should be specified in the BackupableDBOptions. + * @return reference to the opened BackupableDB. + */ + public static BackupableDB open( + Options opt, BackupableDBOptions bopt, String db_path) + throws RocksDBException { + // since BackupableDB c++ will handle the life cycle of + // the returned RocksDB of RocksDB.open(), here we store + // it as a BackupableDB member variable to avoid GC. + BackupableDB bdb = new BackupableDB(RocksDB.open(opt, db_path)); + bdb.open(bdb.db_.nativeHandle_, bopt.nativeHandle_); + + return bdb; + } + + /** + * Captures the state of the database in the latest backup. + * Note that this function is not thread-safe. + * + * @param flushBeforeBackup if true, then all data will be flushed + * before creating backup. + */ + public void createNewBackup(boolean flushBeforeBackup) { + createNewBackup(nativeHandle_, flushBeforeBackup); + } + + + /** + * Close the BackupableDB instance and release resource. + * + * Internally, BackupableDB owns the rocksdb::DB pointer to its + * associated RocksDB. The release of that RocksDB pointer is + * handled in the destructor of the c++ rocksdb::BackupableDB and + * should be transparent to Java developers. + */ + @Override public synchronized void close() { + if (isOpened()) { + super.close0(); + } + } + + /** + * A protected construction that will be used in the static factory + * method BackupableDB.open(). + */ + protected BackupableDB(RocksDB db) { + super(); + db_ = db; + } + + @Override protected void finalize() { + close(); + } + + private boolean isOpened() { + return nativeHandle_ != 0; + } + + protected native void open(long rocksDBHandle, long backupDBOptionsHandle); + protected native void createNewBackup(long handle, boolean flag); + + private final RocksDB db_; +} diff --git a/java/org/rocksdb/BackupableDBOptions.java b/java/org/rocksdb/BackupableDBOptions.java new file mode 100644 index 000000000..209c2bc47 --- /dev/null +++ b/java/org/rocksdb/BackupableDBOptions.java @@ -0,0 +1,52 @@ +// Copyright (c) 2014, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * BackupableDBOptions to control the behavior of a backupable database. + * It will be used during the creation of a BackupableDB. + * + * Note that dispose() must be called before an Options instance + * become out-of-scope to release the allocated memory in c++. + */ +public class BackupableDBOptions { + public BackupableDBOptions(String path) { + newBackupableDBOptions(path); + } + + /** + * Returns the path to the BackupableDB directory. + * + * @return the path to the BackupableDB directory. + */ + public String backupDir() { + assert(isInitialized()); + return backupDir(nativeHandle_); + } + + /** + * Release the memory allocated for the current instance + * in the c++ side. + */ + public synchronized void dispose() { + if (isInitialized()) { + dispose(nativeHandle_); + } + } + + @Override protected void finalize() { + dispose(); + } + + boolean isInitialized() { + return nativeHandle_ != 0; + } + + private native void newBackupableDBOptions(String path); + private native String backupDir(long handle); + private native void dispose(long handle); + long nativeHandle_; +} diff --git a/java/org/rocksdb/Options.java b/java/org/rocksdb/Options.java index 13c05bc3f..256318eae 100644 --- a/java/org/rocksdb/Options.java +++ b/java/org/rocksdb/Options.java @@ -228,6 +228,10 @@ public class Options { } } + @Override protected void finalize() { + dispose(); + } + private boolean isInitialized() { return (nativeHandle_ != 0); } diff --git a/java/org/rocksdb/RocksDB.java b/java/org/rocksdb/RocksDB.java index 896c37012..1d6f0fb4e 100644 --- a/java/org/rocksdb/RocksDB.java +++ b/java/org/rocksdb/RocksDB.java @@ -145,33 +145,33 @@ public class RocksDB { /** * Private constructor. */ - private RocksDB() { + protected RocksDB() { nativeHandle_ = 0; } // native methods - private native void open( + protected native void open( long optionsHandle, long cacheSize, String path) throws RocksDBException; - private native void put( + protected native void put( long handle, byte[] key, int keyLen, byte[] value, int valueLen) throws RocksDBException; - private native void put( + protected native void put( long handle, long writeOptHandle, byte[] key, int keyLen, byte[] value, int valueLen) throws RocksDBException; - private native void write( + protected native void write( long writeOptHandle, long batchHandle) throws RocksDBException; - private native int get( + protected native int get( long handle, byte[] key, int keyLen, byte[] value, int valueLen) throws RocksDBException; - private native byte[] get( + protected native byte[] get( long handle, byte[] key, int keyLen) throws RocksDBException; - private native void remove( + protected native void remove( long handle, byte[] key, int keyLen) throws RocksDBException; - private native void remove( + protected native void remove( long handle, long writeOptHandle, byte[] key, int keyLen) throws RocksDBException; - private native void close0(); + protected native void close0(); - private long nativeHandle_; + protected long nativeHandle_; } diff --git a/java/org/rocksdb/test/BackupableDBTest.java b/java/org/rocksdb/test/BackupableDBTest.java new file mode 100644 index 000000000..8af51dd16 --- /dev/null +++ b/java/org/rocksdb/test/BackupableDBTest.java @@ -0,0 +1,41 @@ +// Copyright (c) 2014, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb.test; + +import org.rocksdb.*; + +public class BackupableDBTest { + static final String db_path = "/tmp/backupablejni_db"; + static final String backup_path = "/tmp/backupablejni_db_backup"; + static { + System.loadLibrary("rocksdbjni"); + } + public static void main(String[] args) { + + Options opt = new Options(); + opt.setCreateIfMissing(true); + + BackupableDBOptions bopt = new BackupableDBOptions(backup_path); + BackupableDB bdb = null; + + try { + bdb = BackupableDB.open(opt, bopt, db_path); + bdb.put("hello".getBytes(), "BackupableDB".getBytes()); + bdb.createNewBackup(true); + byte[] value = bdb.get("hello".getBytes()); + assert(new String(value).equals("BackupableDB")); + } catch (RocksDBException e) { + System.err.format("[ERROR]: %s%n", e); + e.printStackTrace(); + } finally { + opt.dispose(); + bopt.dispose(); + if (bdb != null) { + bdb.close(); + } + } + } +} diff --git a/java/rocksjni/backupablejni.cc b/java/rocksjni/backupablejni.cc new file mode 100644 index 000000000..8b57a0c62 --- /dev/null +++ b/java/rocksjni/backupablejni.cc @@ -0,0 +1,85 @@ +// Copyright (c) 2014, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::DB methods from Java side. + +#include +#include +#include +#include + +#include "include/org_rocksdb_BackupableDB.h" +#include "include/org_rocksdb_BackupableDBOptions.h" +#include "rocksjni/portal.h" +#include "utilities/backupable_db.h" + +/* + * Class: org_rocksdb_BackupableDB + * Method: open + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDB_open( + JNIEnv* env, jobject jbdb, jlong jdb_handle, jlong jopt_handle) { + auto db = reinterpret_cast(jdb_handle); + auto opt = reinterpret_cast(jopt_handle); + auto bdb = new rocksdb::BackupableDB(db, *opt); + + // as BackupableDB extends RocksDB on the java side, we can reuse + // the RocksDB portal here. + rocksdb::RocksDBJni::setHandle(env, jbdb, bdb); +} + +/* + * Class: org_rocksdb_BackupableDB + * Method: createNewBackup + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupableDB_createNewBackup( + JNIEnv* env, jobject jbdb, jlong jhandle, jboolean jflag) { + reinterpret_cast(jhandle)->CreateNewBackup(jflag); +} + +/////////////////////////////////////////////////////////////////////////// +// BackupDBOptions + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: newBackupableDBOptions + * Signature: (Ljava/lang/String;)V + */ +void Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions( + JNIEnv* env, jobject jobj, jstring jpath) { + const char* cpath = env->GetStringUTFChars(jpath, 0); + auto bopt = new rocksdb::BackupableDBOptions(cpath); + env->ReleaseStringUTFChars(jpath, cpath); + + rocksdb::BackupableDBOptionsJni::setHandle(env, jobj, bopt); +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: backupDir + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_BackupableDBOptions_backupDir( + JNIEnv* env, jobject jopt, jlong jhandle, jstring jpath) { + auto bopt = reinterpret_cast(jhandle); + return env->NewStringUTF(bopt->backup_dir.c_str()); +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: dispose + * Signature: (J)V + */ +void Java_org_rocksdb_BackupableDBOptions_dispose( + JNIEnv* env, jobject jopt, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + assert(bopt); + delete bopt; + + rocksdb::BackupableDBOptionsJni::setHandle(env, jopt, nullptr); +} diff --git a/java/rocksjni/portal.h b/java/rocksjni/portal.h index 5b0524aec..847700c92 100644 --- a/java/rocksjni/portal.h +++ b/java/rocksjni/portal.h @@ -12,6 +12,7 @@ #include #include "rocksdb/db.h" +#include "utilities/backupable_db.h" namespace rocksdb { @@ -170,5 +171,38 @@ class WriteBatchJni { reinterpret_cast(wb)); } }; + +class BackupableDBOptionsJni { + public: + // Get the java class id of org.rocksdb.BackupableDBOptions. + static jclass getJClass(JNIEnv* env) { + static jclass jclazz = env->FindClass("org/rocksdb/BackupableDBOptions"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get the field id of the member variable of org.rocksdb.BackupableDBOptions + // that stores the pointer to rocksdb::BackupableDBOptions + static jfieldID getHandleFieldID(JNIEnv* env) { + static jfieldID fid = env->GetFieldID( + getJClass(env), "nativeHandle_", "J"); + assert(fid != nullptr); + return fid; + } + + // Get the pointer to rocksdb::BackupableDBOptions + static rocksdb::BackupableDBOptions* getHandle(JNIEnv* env, jobject jobj) { + return reinterpret_cast( + env->GetLongField(jobj, getHandleFieldID(env))); + } + + // Pass the rocksdb::BackupableDBOptions pointer to the java side. + static void setHandle( + JNIEnv* env, jobject jobj, rocksdb::BackupableDBOptions* op) { + env->SetLongField( + jobj, getHandleFieldID(env), + reinterpret_cast(op)); + } +}; } // namespace rocksdb #endif // JAVA_ROCKSJNI_PORTAL_H_ From 9b2a0939cf45afea9c83bdbf9d6ce6f4bf18c59d Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang Date: Fri, 18 Apr 2014 01:14:29 -0700 Subject: [PATCH 09/10] [Java] Add Java bindings for 30 options for rocksdb::DBOptions. Summary: 1. Add Java bindings for 30 options for rocksdb::DBOptions. 2. Add org.rocksdb.test.OptionsTest 3. Codes are semi-auto generated, JavaDocs are manually polished. Test Plan: make rocksdbjava make jtest Reviewers: haobo, ankgup87, sdong, dhruba Reviewed By: dhruba CC: leveldb Differential Revision: https://reviews.facebook.net/D18015 --- java/Makefile | 1 + java/org/rocksdb/Options.java | 938 ++++++++++++++++++++++++- java/org/rocksdb/test/OptionsTest.java | 201 ++++++ java/rocksjni/options.cc | 611 +++++++++++++++- 4 files changed, 1713 insertions(+), 38 deletions(-) create mode 100644 java/org/rocksdb/test/OptionsTest.java diff --git a/java/Makefile b/java/Makefile index c4b80b3e9..af848c4dc 100644 --- a/java/Makefile +++ b/java/Makefile @@ -24,6 +24,7 @@ test: java javac org/rocksdb/test/*.java java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.WriteBatchTest java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.test.BackupableDBTest + java -ea -Djava.library.path=.:../ -cp "$(ROCKSDB_JAR):.:./*" org.rocksdb.test.OptionsTest db_bench: java javac org/rocksdb/benchmark/*.java diff --git a/java/org/rocksdb/Options.java b/java/org/rocksdb/Options.java index 256318eae..a98d7bee4 100644 --- a/java/org/rocksdb/Options.java +++ b/java/org/rocksdb/Options.java @@ -173,33 +173,6 @@ public class Options { return disableSeekCompaction(nativeHandle_); } - /* - * Maximum number of concurrent background jobs, submitted to - * the default LOW priority thread pool. - * Default: 1 - * - * @param maxBackgroundCompactions the maximum number of concurrent - * background jobs. - * @return the instance of the current Options. - * @see RocksDB.open() - */ - public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) { - assert(isInitialized()); - setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); - return this; - } - - /* - * Returns maximum number of background concurrent jobs. - * - * @return maximum number of background concurrent jobs. - * @see setMaxBackgroundCompactions - */ - public int maxBackgroundCompactions() { - assert(isInitialized()); - return maxBackgroundCompactions(nativeHandle_); - } - /** * Set the amount of cache in bytes that will be used by RocksDB. * If cacheSize is non-positive, then cache will not be used. @@ -218,6 +191,914 @@ public class Options { return cacheSize_; } + /** + * If true, an error will be thrown during RocksDB.open() if the + * database already exists. + * + * @return if true, an error is raised when the specified database + * already exists before open. + */ + public boolean errorIfExists() { + assert(isInitialized()); + return errorIfExists(nativeHandle_); + } + private native boolean errorIfExists(long handle); + + /** + * If true, an error will be thrown during RocksDB.open() if the + * database already exists. + * Default: false + * + * @param errorIfExists if true, an exception will be thrown + * during RocksDB.open() if the database already exists. + * @return the reference to the current option. + * @see RocksDB.open() + */ + public Options setErrorIfExists(boolean errorIfExists) { + assert(isInitialized()); + setErrorIfExists(nativeHandle_, errorIfExists); + return this; + } + private native void setErrorIfExists(long handle, boolean errorIfExists); + + /** + * If true, the implementation will do aggressive checking of the + * data it is processing and will stop early if it detects any + * errors. This may have unforeseen ramifications: for example, a + * corruption of one DB entry may cause a large number of entries to + * become unreadable or for the entire DB to become unopenable. + * If any of the writes to the database fails (Put, Delete, Merge, Write), + * the database will switch to read-only mode and fail all other + * Write operations. + * + * @return a boolean indicating whether paranoid-check is on. + */ + public boolean paranoidChecks() { + assert(isInitialized()); + return paranoidChecks(nativeHandle_); + } + private native boolean paranoidChecks(long handle); + + /** + * If true, the implementation will do aggressive checking of the + * data it is processing and will stop early if it detects any + * errors. This may have unforeseen ramifications: for example, a + * corruption of one DB entry may cause a large number of entries to + * become unreadable or for the entire DB to become unopenable. + * If any of the writes to the database fails (Put, Delete, Merge, Write), + * the database will switch to read-only mode and fail all other + * Write operations. + * Default: true + * + * @param paranoidChecks a flag to indicate whether paranoid-check + * is on. + * @return the reference to the current option. + */ + public Options setParanoidChecks(boolean paranoidChecks) { + assert(isInitialized()); + setParanoidChecks(nativeHandle_, paranoidChecks); + return this; + } + private native void setParanoidChecks( + long handle, boolean paranoidChecks); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on target_file_size_base and target_file_size_multiplier for level-based + * compaction. For universal-style compaction, you can usually set it to -1. + * + * @return the maximum number of open files. + */ + public int maxOpenFiles() { + assert(isInitialized()); + return maxOpenFiles(nativeHandle_); + } + private native int maxOpenFiles(long handle); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on target_file_size_base and target_file_size_multiplier for level-based + * compaction. For universal-style compaction, you can usually set it to -1. + * Default: 5000 + * + * @param maxOpenFiles the maximum number of open files. + * @return the reference to the current option. + */ + public Options setMaxOpenFiles(int maxOpenFiles) { + assert(isInitialized()); + setMaxOpenFiles(nativeHandle_, maxOpenFiles); + return this; + } + private native void setMaxOpenFiles(long handle, int maxOpenFiles); + + /** + * If true, then the contents of data files are not synced + * to stable storage. Their contents remain in the OS buffers till the + * OS decides to flush them. This option is good for bulk-loading + * of data. Once the bulk-loading is complete, please issue a + * sync to the OS to flush all dirty buffesrs to stable storage. + * + * @return if true, then data-sync is disabled. + */ + public boolean disableDataSync() { + assert(isInitialized()); + return disableDataSync(nativeHandle_); + } + private native boolean disableDataSync(long handle); + + /** + * If true, then the contents of data files are not synced + * to stable storage. Their contents remain in the OS buffers till the + * OS decides to flush them. This option is good for bulk-loading + * of data. Once the bulk-loading is complete, please issue a + * sync to the OS to flush all dirty buffesrs to stable storage. + * Default: false + * + * @param disableDataSync a boolean flag to specify whether to + * disable data sync. + * @return the reference to the current option. + */ + public Options setDisableDataSync(boolean disableDataSync) { + assert(isInitialized()); + setDisableDataSync(nativeHandle_, disableDataSync); + return this; + } + private native void setDisableDataSync(long handle, boolean disableDataSync); + + /** + * If true, then every store to stable storage will issue a fsync. + * If false, then every store to stable storage will issue a fdatasync. + * This parameter should be set to true while storing data to + * filesystem like ext3 that can lose files after a reboot. + * + * @return true if fsync is used. + */ + public boolean useFsync() { + assert(isInitialized()); + return useFsync(nativeHandle_); + } + private native boolean useFsync(long handle); + + /** + * If true, then every store to stable storage will issue a fsync. + * If false, then every store to stable storage will issue a fdatasync. + * This parameter should be set to true while storing data to + * filesystem like ext3 that can lose files after a reboot. + * Default: false + * + * @param useFsync a boolean flag to specify whether to use fsync + * @return the reference to the current option. + */ + public Options setUseFsync(boolean useFsync) { + assert(isInitialized()); + setUseFsync(nativeHandle_, useFsync); + return this; + } + private native void setUseFsync(long handle, boolean useFsync); + + /** + * The time interval in seconds between each two consecutive stats logs. + * This number controls how often a new scribe log about + * db deploy stats is written out. + * -1 indicates no logging at all. + * + * @return the time interval in seconds between each two consecutive + * stats logs. + */ + public int dbStatsLogInterval() { + assert(isInitialized()); + return dbStatsLogInterval(nativeHandle_); + } + private native int dbStatsLogInterval(long handle); + + /** + * The time interval in seconds between each two consecutive stats logs. + * This number controls how often a new scribe log about + * db deploy stats is written out. + * -1 indicates no logging at all. + * Default value is 1800 (half an hour). + * + * @param dbStatsLogInterval the time interval in seconds between each + * two consecutive stats logs. + * @return the reference to the current option. + */ + public Options setDbStatsLogInterval(int dbStatsLogInterval) { + assert(isInitialized()); + setDbStatsLogInterval(nativeHandle_, dbStatsLogInterval); + return this; + } + private native void setDbStatsLogInterval( + long handle, int dbStatsLogInterval); + + /** + * Returns the directory of info log. + * + * If it is empty, the log files will be in the same dir as data. + * If it is non empty, the log files will be in the specified dir, + * and the db data dir's absolute path will be used as the log file + * name's prefix. + * + * @return the path to the info log directory + */ + public String dbLogDir() { + assert(isInitialized()); + return dbLogDir(nativeHandle_); + } + private native String dbLogDir(long handle); + + /** + * This specifies the info LOG dir. + * If it is empty, the log files will be in the same dir as data. + * If it is non empty, the log files will be in the specified dir, + * and the db data dir's absolute path will be used as the log file + * name's prefix. + * + * @param dbLogDir the path to the info log directory + * @return the reference to the current option. + */ + public Options setDbLogDir(String dbLogDir) { + assert(isInitialized()); + setDbLogDir(nativeHandle_, dbLogDir); + return this; + } + private native void setDbLogDir(long handle, String dbLogDir); + + /** + * Returns the path to the write-ahead-logs (WAL) directory. + * + * If it is empty, the log files will be in the same dir as data, + * dbname is used as the data dir by default + * If it is non empty, the log files will be in kept the specified dir. + * When destroying the db, + * all log files in wal_dir and the dir itself is deleted + * + * @return the path to the write-ahead-logs (WAL) directory. + */ + public String walDir() { + assert(isInitialized()); + return walDir(nativeHandle_); + } + private native String walDir(long handle); + + /** + * This specifies the absolute dir path for write-ahead logs (WAL). + * If it is empty, the log files will be in the same dir as data, + * dbname is used as the data dir by default + * If it is non empty, the log files will be in kept the specified dir. + * When destroying the db, + * all log files in wal_dir and the dir itself is deleted + * + * @param walDir the path to the write-ahead-log directory. + * @return the reference to the current option. + */ + public Options setWalDir(String walDir) { + assert(isInitialized()); + setWalDir(nativeHandle_, walDir); + return this; + } + private native void setWalDir(long handle, String walDir); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @return the time interval in micros when obsolete files will be deleted. + */ + public long deleteObsoleteFilesPeriodMicros() { + assert(isInitialized()); + return deleteObsoleteFilesPeriodMicros(nativeHandle_); + } + private native long deleteObsoleteFilesPeriodMicros(long handle); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @param micros the time interval in micros + * @return the reference to the current option. + */ + public Options setDeleteObsoleteFilesPeriodMicros(long micros) { + assert(isInitialized()); + setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros); + return this; + } + private native void setDeleteObsoleteFilesPeriodMicros( + long handle, long micros); + + /** + * Returns the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * When increasing this number, we may also want to consider increasing + * number of threads in LOW priority thread pool. + * Default: 1 + * + * @return the maximum number of concurrent background compaction jobs. + * @see Env.setBackgroundThreads() + */ + public int maxBackgroundCompactions() { + assert(isInitialized()); + return maxBackgroundCompactions(nativeHandle_); + } + private native int maxBackgroundCompactions(long handle); + + /** + * Specifies the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * If you're increasing this, also consider increasing number of threads in + * LOW priority thread pool. For more information, see + * Default: 1 + * + * @param maxBackgroundCompactions the maximum number of background + * compaction jobs. + * @return the reference to the current option. + * + * @see Env.setBackgroundThreads() + * @see maxBackgroundFlushes() + */ + public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) { + assert(isInitialized()); + setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); + return this; + } + private native void setMaxBackgroundCompactions( + long handle, int maxBackgroundCompactions); + + /** + * Returns the maximum number of concurrent background flush jobs. + * If you're increasing this, also consider increasing number of threads in + * HIGH priority thread pool. For more information, see + * Default: 1 + * + * @return the maximum number of concurrent background flush jobs. + * @see Env.setBackgroundThreads() + */ + public int maxBackgroundFlushes() { + assert(isInitialized()); + return maxBackgroundFlushes(nativeHandle_); + } + private native int maxBackgroundFlushes(long handle); + + /** + * Specifies the maximum number of concurrent background flush jobs. + * If you're increasing this, also consider increasing number of threads in + * HIGH priority thread pool. For more information, see + * Default: 1 + * + * @param maxBackgroundFlushes + * @return the reference to the current option. + * + * @see Env.setBackgroundThreads() + * @see maxBackgroundCompactions() + */ + public Options setMaxBackgroundFlushes(int maxBackgroundFlushes) { + assert(isInitialized()); + setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes); + return this; + } + private native void setMaxBackgroundFlushes( + long handle, int maxBackgroundFlushes); + + /** + * Returns the maximum size of a info log file. If the current log file + * is larger than this size, a new info log file will be created. + * If 0, all logs will be written to one log file. + * + * @return the maximum size of the info log file. + */ + public long maxLogFileSize() { + assert(isInitialized()); + return maxLogFileSize(nativeHandle_); + } + private native long maxLogFileSize(long handle); + + /** + * Specifies the maximum size of a info log file. If the current log file + * is larger than `max_log_file_size`, a new info log file will + * be created. + * If 0, all logs will be written to one log file. + * + * @param maxLogFileSize the maximum size of a info log file. + * @return the reference to the current option. + */ + public Options setMaxLogFileSize(long maxLogFileSize) { + assert(isInitialized()); + setMaxLogFileSize(nativeHandle_, maxLogFileSize); + return this; + } + private native void setMaxLogFileSize(long handle, long maxLogFileSize); + + /** + * Returns the time interval for the info log file to roll (in seconds). + * If specified with non-zero value, log file will be rolled + * if it has been active longer than `log_file_time_to_roll`. + * Default: 0 (disabled) + * + * @return the time interval in seconds. + */ + public long logFileTimeToRoll() { + assert(isInitialized()); + return logFileTimeToRoll(nativeHandle_); + } + private native long logFileTimeToRoll(long handle); + + /** + * Specifies the time interval for the info log file to roll (in seconds). + * If specified with non-zero value, log file will be rolled + * if it has been active longer than `log_file_time_to_roll`. + * Default: 0 (disabled) + * + * @param logFileTimeToRoll the time interval in seconds. + * @return the reference to the current option. + */ + public Options setLogFileTimeToRoll(long logFileTimeToRoll) { + assert(isInitialized()); + setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll); + return this; + } + private native void setLogFileTimeToRoll( + long handle, long logFileTimeToRoll); + + /** + * Returns the maximum number of info log files to be kept. + * Default: 1000 + * + * @return the maximum number of info log files to be kept. + */ + public long keepLogFileNum() { + assert(isInitialized()); + return keepLogFileNum(nativeHandle_); + } + private native long keepLogFileNum(long handle); + + /** + * Specifies the maximum number of info log files to be kept. + * Default: 1000 + * + * @param keepLogFileNum the maximum number of info log files to be kept. + * @return the reference to the current option. + */ + public Options setKeepLogFileNum(long keepLogFileNum) { + assert(isInitialized()); + setKeepLogFileNum(nativeHandle_, keepLogFileNum); + return this; + } + private native void setKeepLogFileNum(long handle, long keepLogFileNum); + + /** + * Manifest file is rolled over on reaching this limit. + * The older manifest file be deleted. + * The default value is MAX_INT so that roll-over does not take place. + * + * @return the size limit of a manifest file. + */ + public long maxManifestFileSize() { + assert(isInitialized()); + return maxManifestFileSize(nativeHandle_); + } + private native long maxManifestFileSize(long handle); + + /** + * Manifest file is rolled over on reaching this limit. + * The older manifest file be deleted. + * The default value is MAX_INT so that roll-over does not take place. + * + * @param maxManifestFileSize the size limit of a manifest file. + * @return the reference to the current option. + */ + public Options setMaxManifestFileSize(long maxManifestFileSize) { + assert(isInitialized()); + setMaxManifestFileSize(nativeHandle_, maxManifestFileSize); + return this; + } + private native void setMaxManifestFileSize( + long handle, long maxManifestFileSize); + + /** + * Number of shards used for table cache. + * + * @return the number of shards used for table cache. + */ + public int tableCacheNumshardbits() { + assert(isInitialized()); + return tableCacheNumshardbits(nativeHandle_); + } + private native int tableCacheNumshardbits(long handle); + + /** + * Number of shards used for table cache. + * + * @param tableCacheNumshardbits the number of chards + * @return the reference to the current option. + */ + public Options setTableCacheNumshardbits(int tableCacheNumshardbits) { + assert(isInitialized()); + setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits); + return this; + } + private native void setTableCacheNumshardbits( + long handle, int tableCacheNumshardbits); + + /** + * During data eviction of table's LRU cache, it would be inefficient + * to strictly follow LRU because this piece of memory will not really + * be released unless its refcount falls to zero. Instead, make two + * passes: the first pass will release items with refcount = 1, + * and if not enough space releases after scanning the number of + * elements specified by this parameter, we will remove items in LRU + * order. + * + * @return scan count limit + */ + public int tableCacheRemoveScanCountLimit() { + assert(isInitialized()); + return tableCacheRemoveScanCountLimit(nativeHandle_); + } + private native int tableCacheRemoveScanCountLimit(long handle); + + /** + * During data eviction of table's LRU cache, it would be inefficient + * to strictly follow LRU because this piece of memory will not really + * be released unless its refcount falls to zero. Instead, make two + * passes: the first pass will release items with refcount = 1, + * and if not enough space releases after scanning the number of + * elements specified by this parameter, we will remove items in LRU + * order. + * + * @param limit scan count limit + * @return the reference to the current option. + */ + public Options setTableCacheRemoveScanCountLimit(int limit) { + assert(isInitialized()); + setTableCacheRemoveScanCountLimit(nativeHandle_, limit); + return this; + } + private native void setTableCacheRemoveScanCountLimit( + long handle, int limit); + + /** + * The following two fields affect how archived logs will be deleted. + * 1. If both set to 0, logs will be deleted asap and will not get into + * the archive. + * 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted. + * 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_secondsi / 2 and those that + * are older than WAL_ttl_seconds will be deleted. + * 4. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first. + * + * @return the wal-ttl seconds + */ + public long walTtlSeconds() { + assert(isInitialized()); + return walTtlSeconds(nativeHandle_); + } + private native long walTtlSeconds(long handle); + + /** + * The following two fields affect how archived logs will be deleted. + * 1. If both set to 0, logs will be deleted asap and will not get into + * the archive. + * 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted. + * 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_secondsi / 2 and those that + * are older than WAL_ttl_seconds will be deleted. + * 4. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first. + * + * @param walTtlSeconds the ttl seconds + * @return the reference to the current option. + */ + public Options setWALTtlSeconds(long walTtlSeconds) { + assert(isInitialized()); + setWALTtlSeconds(nativeHandle_, walTtlSeconds); + return this; + } + private native void setWALTtlSeconds(long handle, long walTtlSeconds); + + /** + * Number of bytes to preallocate (via fallocate) the manifest + * files. Default is 4mb, which is reasonable to reduce random IO + * as well as prevent overallocation for mounts that preallocate + * large amounts of data (such as xfs's allocsize option). + * + * @return size in bytes. + */ + public long manifestPreallocationSize() { + assert(isInitialized()); + return manifestPreallocationSize(nativeHandle_); + } + private native long manifestPreallocationSize(long handle); + + /** + * Number of bytes to preallocate (via fallocate) the manifest + * files. Default is 4mb, which is reasonable to reduce random IO + * as well as prevent overallocation for mounts that preallocate + * large amounts of data (such as xfs's allocsize option). + * + * @param size the size in byte + * @return the reference to the current option. + */ + public Options setManifestPreallocationSize(long size) { + assert(isInitialized()); + setManifestPreallocationSize(nativeHandle_, size); + return this; + } + private native void setManifestPreallocationSize( + long handle, long size); + + /** + * Data being read from file storage may be buffered in the OS + * Default: true + * + * @return if true, then OS buffering is allowed. + */ + public boolean allowOsBuffer() { + assert(isInitialized()); + return allowOsBuffer(nativeHandle_); + } + private native boolean allowOsBuffer(long handle); + + /** + * Data being read from file storage may be buffered in the OS + * Default: true + * + * @param allowOsBufferif true, then OS buffering is allowed. + * @return the reference to the current option. + */ + public Options setAllowOsBuffer(boolean allowOsBuffer) { + assert(isInitialized()); + setAllowOsBuffer(nativeHandle_, allowOsBuffer); + return this; + } + private native void setAllowOsBuffer( + long handle, boolean allowOsBuffer); + + /** + * Allow the OS to mmap file for reading sst tables. + * Default: false + * + * @return true if mmap reads are allowed. + */ + public boolean allowMmapReads() { + assert(isInitialized()); + return allowMmapReads(nativeHandle_); + } + private native boolean allowMmapReads(long handle); + + /** + * Allow the OS to mmap file for reading sst tables. + * Default: false + * + * @param allowMmapReads true if mmap reads are allowed. + * @return the reference to the current option. + */ + public Options setAllowMmapReads(boolean allowMmapReads) { + assert(isInitialized()); + setAllowMmapReads(nativeHandle_, allowMmapReads); + return this; + } + private native void setAllowMmapReads( + long handle, boolean allowMmapReads); + + /** + * Allow the OS to mmap file for writing. Default: false + * + * @return true if mmap writes are allowed. + */ + public boolean allowMmapWrites() { + assert(isInitialized()); + return allowMmapWrites(nativeHandle_); + } + private native boolean allowMmapWrites(long handle); + + /** + * Allow the OS to mmap file for writing. Default: false + * + * @param allowMmapWrites true if mmap writes are allowd. + * @return the reference to the current option. + */ + public Options setAllowMmapWrites(boolean allowMmapWrites) { + assert(isInitialized()); + setAllowMmapWrites(nativeHandle_, allowMmapWrites); + return this; + } + private native void setAllowMmapWrites( + long handle, boolean allowMmapWrites); + + /** + * Disable child process inherit open files. Default: true + * + * @return true if child process inheriting open files is disabled. + */ + public boolean isFdCloseOnExec() { + assert(isInitialized()); + return isFdCloseOnExec(nativeHandle_); + } + private native boolean isFdCloseOnExec(long handle); + + /** + * Disable child process inherit open files. Default: true + * + * @param isFdCloseOnExec true if child process inheriting open + * files is disabled. + * @return the reference to the current option. + */ + public Options setIsFdCloseOnExec(boolean isFdCloseOnExec) { + assert(isInitialized()); + setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec); + return this; + } + private native void setIsFdCloseOnExec( + long handle, boolean isFdCloseOnExec); + + /** + * Skip log corruption error on recovery (If client is ok with + * losing most recent changes) + * Default: false + * + * @return true if log corruption errors are skipped during recovery. + */ + public boolean skipLogErrorOnRecovery() { + assert(isInitialized()); + return skipLogErrorOnRecovery(nativeHandle_); + } + private native boolean skipLogErrorOnRecovery(long handle); + + /** + * Skip log corruption error on recovery (If client is ok with + * losing most recent changes) + * Default: false + * + * @param skip true if log corruption errors are skipped during recovery. + * @return the reference to the current option. + */ + public Options setSkipLogErrorOnRecovery(boolean skip) { + assert(isInitialized()); + setSkipLogErrorOnRecovery(nativeHandle_, skip); + return this; + } + private native void setSkipLogErrorOnRecovery( + long handle, boolean skip); + + /** + * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 3600 (1 hour) + * + * @return time interval in seconds. + */ + public int statsDumpPeriodSec() { + assert(isInitialized()); + return statsDumpPeriodSec(nativeHandle_); + } + private native int statsDumpPeriodSec(long handle); + + /** + * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 3600 (1 hour) + * + * @param statsDumpPeriodSec time interval in seconds. + * @return the reference to the current option. + */ + public Options setStatsDumpPeriodSec(int statsDumpPeriodSec) { + assert(isInitialized()); + setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec); + return this; + } + private native void setStatsDumpPeriodSec( + long handle, int statsDumpPeriodSec); + + /** + * If set true, will hint the underlying file system that the file + * access pattern is random, when a sst file is opened. + * Default: true + * + * @return true if hinting random access is on. + */ + public boolean adviseRandomOnOpen() { + return adviseRandomOnOpen(nativeHandle_); + } + private native boolean adviseRandomOnOpen(long handle); + + /** + * If set true, will hint the underlying file system that the file + * access pattern is random, when a sst file is opened. + * Default: true + * + * @param adviseRandomOnOpen true if hinting random access is on. + * @return the reference to the current option. + */ + public Options setAdviseRandomOnOpen(boolean adviseRandomOnOpen) { + assert(isInitialized()); + setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen); + return this; + } + private native void setAdviseRandomOnOpen( + long handle, boolean adviseRandomOnOpen); + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @return true if adaptive mutex is used. + */ + public boolean useAdaptiveMutex() { + assert(isInitialized()); + return useAdaptiveMutex(nativeHandle_); + } + private native boolean useAdaptiveMutex(long handle); + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @param useAdaptiveMutex true if adaptive mutex is used. + * @return the reference to the current option. + */ + public Options setUseAdaptiveMutex(boolean useAdaptiveMutex) { + assert(isInitialized()); + setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); + return this; + } + private native void setUseAdaptiveMutex( + long handle, boolean useAdaptiveMutex); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @return size in bytes + */ + public long bytesPerSync() { + return bytesPerSync(nativeHandle_); + } + private native long bytesPerSync(long handle); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @param bytesPerSync size in bytes + * @return the reference to the current option. + */ + public Options setBytesPerSync(long bytesPerSync) { + assert(isInitialized()); + setBytesPerSync(nativeHandle_, bytesPerSync); + return this; + } + private native void setBytesPerSync( + long handle, long bytesPerSync); + + /** + * Allow RocksDB to use thread local storage to optimize performance. + * Default: true + * + * @return true if thread-local storage is allowed + */ + public boolean allowThreadLocal() { + assert(isInitialized()); + return allowThreadLocal(nativeHandle_); + } + private native boolean allowThreadLocal(long handle); + + /** + * Allow RocksDB to use thread local storage to optimize performance. + * Default: true + * + * @param allowThreadLocal true if thread-local storage is allowed. + * @return the reference to the current option. + */ + public Options setAllowThreadLocal(boolean allowThreadLocal) { + assert(isInitialized()); + setAllowThreadLocal(nativeHandle_, allowThreadLocal); + return this; + } + private native void setAllowThreadLocal( + long handle, boolean allowThreadLocal); + /** * Release the memory allocated for the current instance * in the c++ side. @@ -250,9 +1131,6 @@ public class Options { private native void setDisableSeekCompaction( long handle, boolean disableSeekCompaction); private native boolean disableSeekCompaction(long handle); - private native void setMaxBackgroundCompactions( - long handle, int maxBackgroundCompactions); - private native int maxBackgroundCompactions(long handle); long nativeHandle_; long cacheSize_; diff --git a/java/org/rocksdb/test/OptionsTest.java b/java/org/rocksdb/test/OptionsTest.java new file mode 100644 index 000000000..7c13db6d1 --- /dev/null +++ b/java/org/rocksdb/test/OptionsTest.java @@ -0,0 +1,201 @@ +// Copyright (c) 2014, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb.test; + +import java.util.Random; +import org.rocksdb.Options; + +public class OptionsTest { + static { + System.loadLibrary("rocksdbjni"); + } + public static void main(String[] args) { + Options opt = new Options(); + Random rand = new Random(); + { // CreateIfMissing test + boolean boolValue = rand.nextBoolean(); + opt.setCreateIfMissing(boolValue); + assert(opt.createIfMissing() == boolValue); + } + + { // ErrorIfExists test + boolean boolValue = rand.nextBoolean(); + opt.setErrorIfExists(boolValue); + assert(opt.errorIfExists() == boolValue); + } + + { // ParanoidChecks test + boolean boolValue = rand.nextBoolean(); + opt.setParanoidChecks(boolValue); + assert(opt.paranoidChecks() == boolValue); + } + + { // MaxOpenFiles test + int intValue = rand.nextInt(); + opt.setMaxOpenFiles(intValue); + assert(opt.maxOpenFiles() == intValue); + } + + { // DisableDataSync test + boolean boolValue = rand.nextBoolean(); + opt.setDisableDataSync(boolValue); + assert(opt.disableDataSync() == boolValue); + } + + { // UseFsync test + boolean boolValue = rand.nextBoolean(); + opt.setUseFsync(boolValue); + assert(opt.useFsync() == boolValue); + } + + { // DbStatsLogInterval test + int intValue = rand.nextInt(); + opt.setDbStatsLogInterval(intValue); + assert(opt.dbStatsLogInterval() == intValue); + } + + { // DbLogDir test + String str = "path/to/DbLogDir"; + opt.setDbLogDir(str); + assert(opt.dbLogDir().equals(str)); + } + + { // WalDir test + String str = "path/to/WalDir"; + opt.setWalDir(str); + assert(opt.walDir().equals(str)); + } + + { // DeleteObsoleteFilesPeriodMicros test + long longValue = rand.nextLong(); + opt.setDeleteObsoleteFilesPeriodMicros(longValue); + assert(opt.deleteObsoleteFilesPeriodMicros() == longValue); + } + + { // MaxBackgroundCompactions test + int intValue = rand.nextInt(); + opt.setMaxBackgroundCompactions(intValue); + assert(opt.maxBackgroundCompactions() == intValue); + } + + { // MaxBackgroundFlushes test + int intValue = rand.nextInt(); + opt.setMaxBackgroundFlushes(intValue); + assert(opt.maxBackgroundFlushes() == intValue); + } + + { // MaxLogFileSize test + long longValue = rand.nextLong(); + opt.setMaxLogFileSize(longValue); + assert(opt.maxLogFileSize() == longValue); + } + + { // LogFileTimeToRoll test + long longValue = rand.nextLong(); + opt.setLogFileTimeToRoll(longValue); + assert(opt.logFileTimeToRoll() == longValue); + } + + { // KeepLogFileNum test + long longValue = rand.nextLong(); + opt.setKeepLogFileNum(longValue); + assert(opt.keepLogFileNum() == longValue); + } + + { // MaxManifestFileSize test + long longValue = rand.nextLong(); + opt.setMaxManifestFileSize(longValue); + assert(opt.maxManifestFileSize() == longValue); + } + + { // TableCacheNumshardbits test + int intValue = rand.nextInt(); + opt.setTableCacheNumshardbits(intValue); + assert(opt.tableCacheNumshardbits() == intValue); + } + + { // TableCacheRemoveScanCountLimit test + int intValue = rand.nextInt(); + opt.setTableCacheRemoveScanCountLimit(intValue); + assert(opt.tableCacheRemoveScanCountLimit() == intValue); + } + + { // WALTtlSeconds test + long longValue = rand.nextLong(); + opt.setWALTtlSeconds(longValue); + assert(opt.walTtlSeconds() == longValue); + } + + { // ManifestPreallocationSize test + long longValue = rand.nextLong(); + opt.setManifestPreallocationSize(longValue); + assert(opt.manifestPreallocationSize() == longValue); + } + + { // AllowOsBuffer test + boolean boolValue = rand.nextBoolean(); + opt.setAllowOsBuffer(boolValue); + assert(opt.allowOsBuffer() == boolValue); + } + + { // AllowMmapReads test + boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapReads(boolValue); + assert(opt.allowMmapReads() == boolValue); + } + + { // AllowMmapWrites test + boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapWrites(boolValue); + assert(opt.allowMmapWrites() == boolValue); + } + + { // IsFdCloseOnExec test + boolean boolValue = rand.nextBoolean(); + opt.setIsFdCloseOnExec(boolValue); + assert(opt.isFdCloseOnExec() == boolValue); + } + + { // SkipLogErrorOnRecovery test + boolean boolValue = rand.nextBoolean(); + opt.setSkipLogErrorOnRecovery(boolValue); + assert(opt.skipLogErrorOnRecovery() == boolValue); + } + + { // StatsDumpPeriodSec test + int intValue = rand.nextInt(); + opt.setStatsDumpPeriodSec(intValue); + assert(opt.statsDumpPeriodSec() == intValue); + } + + { // AdviseRandomOnOpen test + boolean boolValue = rand.nextBoolean(); + opt.setAdviseRandomOnOpen(boolValue); + assert(opt.adviseRandomOnOpen() == boolValue); + } + + { // UseAdaptiveMutex test + boolean boolValue = rand.nextBoolean(); + opt.setUseAdaptiveMutex(boolValue); + assert(opt.useAdaptiveMutex() == boolValue); + } + + { // BytesPerSync test + long longValue = rand.nextLong(); + opt.setBytesPerSync(longValue); + assert(opt.bytesPerSync() == longValue); + } + + { // AllowThreadLocal test + boolean boolValue = rand.nextBoolean(); + opt.setAllowThreadLocal(boolValue); + assert(opt.allowThreadLocal() == boolValue); + } + + opt.dispose(); + System.out.println("Passed OptionsTest"); + } +} diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index e9d3e48ed..bafd81530 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -147,14 +147,197 @@ jboolean Java_org_rocksdb_Options_disableSeekCompaction( /* * Class: org_rocksdb_Options - * Method: setMaxBackgroundCompactions + * Method: errorIfExists + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_errorIfExists( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->error_if_exists; +} + +/* + * Class: org_rocksdb_Options + * Method: setErrorIfExists + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setErrorIfExists( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean error_if_exists) { + reinterpret_cast(jhandle)->error_if_exists = + static_cast(error_if_exists); +} + +/* + * Class: org_rocksdb_Options + * Method: paranoidChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_paranoidChecks( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->paranoid_checks; +} + +/* + * Class: org_rocksdb_Options + * Method: setParanoidChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setParanoidChecks( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean paranoid_checks) { + reinterpret_cast(jhandle)->paranoid_checks = + static_cast(paranoid_checks); +} + +/* + * Class: org_rocksdb_Options + * Method: maxOpenFiles + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxOpenFiles( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_open_files; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxOpenFiles * Signature: (JI)V */ -void Java_org_rocksdb_Options_setMaxBackgroundCompactions( - JNIEnv* env, jobject jobj, jlong jhandle, - jint jmax_background_compactions) { - reinterpret_cast(jhandle)->max_background_compactions = - jmax_background_compactions; +void Java_org_rocksdb_Options_setMaxOpenFiles( + JNIEnv* env, jobject jobj, jlong jhandle, jint max_open_files) { + reinterpret_cast(jhandle)->max_open_files = + static_cast(max_open_files); +} + +/* + * Class: org_rocksdb_Options + * Method: disableDataSync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_disableDataSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->disableDataSync; +} + +/* + * Class: org_rocksdb_Options + * Method: setDisableDataSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setDisableDataSync( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean disableDataSync) { + reinterpret_cast(jhandle)->disableDataSync = + static_cast(disableDataSync); +} + +/* + * Class: org_rocksdb_Options + * Method: useFsync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useFsync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->use_fsync; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseFsync + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseFsync( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_fsync) { + reinterpret_cast(jhandle)->use_fsync = + static_cast(use_fsync); +} + +/* + * Class: org_rocksdb_Options + * Method: dbStatsLogInterval + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_dbStatsLogInterval( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->db_stats_log_interval; +} + +/* + * Class: org_rocksdb_Options + * Method: setDbStatsLogInterval + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setDbStatsLogInterval( + JNIEnv* env, jobject jobj, jlong jhandle, jint db_stats_log_interval) { + reinterpret_cast(jhandle)->db_stats_log_interval = + static_cast(db_stats_log_interval); +} + +/* + * Class: org_rocksdb_Options + * Method: dbLogDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_dbLogDir( + JNIEnv* env, jobject jobj, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle)->db_log_dir.c_str()); +} + +/* + * Class: org_rocksdb_Options + * Method: setDbLogDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_Options_setDbLogDir( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) { + const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0); + reinterpret_cast(jhandle)->db_log_dir.assign(log_dir); + env->ReleaseStringUTFChars(jdb_log_dir, log_dir); +} + +/* + * Class: org_rocksdb_Options + * Method: walDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_walDir( + JNIEnv* env, jobject jobj, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle)->wal_dir.c_str()); +} + +/* + * Class: org_rocksdb_Options + * Method: setWalDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_Options_setWalDir( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) { + const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); + reinterpret_cast(jhandle)->wal_dir.assign(wal_dir); + env->ReleaseStringUTFChars(jwal_dir, wal_dir); +} + +/* + * Class: org_rocksdb_Options + * Method: deleteObsoleteFilesPeriodMicros + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros; +} + +/* + * Class: org_rocksdb_Options + * Method: setDeleteObsoleteFilesPeriodMicros + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros( + JNIEnv* env, jobject jobj, jlong jhandle, jlong micros) { + reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros = + static_cast(micros); } /* @@ -164,10 +347,422 @@ void Java_org_rocksdb_Options_setMaxBackgroundCompactions( */ jint Java_org_rocksdb_Options_maxBackgroundCompactions( JNIEnv* env, jobject jobj, jlong jhandle) { - return - reinterpret_cast(jhandle)->max_background_compactions; + return reinterpret_cast( + jhandle)->max_background_compactions; } +/* + * Class: org_rocksdb_Options + * Method: setMaxBackgroundCompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBackgroundCompactions( + JNIEnv* env, jobject jobj, jlong jhandle, jint max) { + reinterpret_cast(jhandle) + ->max_background_compactions = static_cast(max); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBackgroundFlushes + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBackgroundFlushes( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_background_flushes; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBackgroundFlushes + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBackgroundFlushes( + JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_flushes) { + reinterpret_cast(jhandle)->max_background_flushes = + static_cast(max_background_flushes); +} + +/* + * Class: org_rocksdb_Options + * Method: maxLogFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxLogFileSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_log_file_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxLogFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxLogFileSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) { + reinterpret_cast(jhandle)->max_log_file_size = + static_cast(max_log_file_size); +} + +/* + * Class: org_rocksdb_Options + * Method: logFileTimeToRoll + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_logFileTimeToRoll( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->log_file_time_to_roll; +} + +/* + * Class: org_rocksdb_Options + * Method: setLogFileTimeToRoll + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setLogFileTimeToRoll( + JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) { + reinterpret_cast(jhandle)->log_file_time_to_roll = + static_cast(log_file_time_to_roll); +} + +/* + * Class: org_rocksdb_Options + * Method: keepLogFileNum + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_keepLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->keep_log_file_num; +} + +/* + * Class: org_rocksdb_Options + * Method: setKeepLogFileNum + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setKeepLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) { + reinterpret_cast(jhandle)->keep_log_file_num = + static_cast(keep_log_file_num); +} + +/* + * Class: org_rocksdb_Options + * Method: maxManifestFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxManifestFileSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_manifest_file_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxManifestFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxManifestFileSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong max_manifest_file_size) { + reinterpret_cast(jhandle)->max_manifest_file_size = + static_cast(max_manifest_file_size); +} + +/* + * Class: org_rocksdb_Options + * Method: tableCacheNumshardbits + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_tableCacheNumshardbits( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->table_cache_numshardbits; +} + +/* + * Class: org_rocksdb_Options + * Method: setTableCacheNumshardbits + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTableCacheNumshardbits( + JNIEnv* env, jobject jobj, jlong jhandle, jint table_cache_numshardbits) { + reinterpret_cast(jhandle)->table_cache_numshardbits = + static_cast(table_cache_numshardbits); +} + +/* + * Class: org_rocksdb_Options + * Method: tableCacheRemoveScanCountLimit + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_tableCacheRemoveScanCountLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->table_cache_remove_scan_count_limit; +} + +/* + * Class: org_rocksdb_Options + * Method: setTableCacheRemoveScanCountLimit + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTableCacheRemoveScanCountLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jint limit) { + reinterpret_cast( + jhandle)->table_cache_remove_scan_count_limit = static_cast(limit); +} + +/* + * Class: org_rocksdb_Options + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_walTtlSeconds( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->WAL_ttl_seconds; +} + +/* + * Class: org_rocksdb_Options + * Method: setWALTtlSeconds + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWALTtlSeconds( + JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_ttl_seconds) { + reinterpret_cast(jhandle)->WAL_ttl_seconds = + static_cast(WAL_ttl_seconds); +} + +/* + * Class: org_rocksdb_Options + * Method: manifestPreallocationSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_manifestPreallocationSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->manifest_preallocation_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setManifestPreallocationSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setManifestPreallocationSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) { + reinterpret_cast(jhandle)->manifest_preallocation_size = + static_cast(preallocation_size); +} + +/* + * Class: org_rocksdb_Options + * Method: allowOsBuffer + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowOsBuffer( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_os_buffer; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowOsBuffer + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowOsBuffer( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_os_buffer) { + reinterpret_cast(jhandle)->allow_os_buffer = + static_cast(allow_os_buffer); +} + +/* + * Class: org_rocksdb_Options + * Method: allowMmapReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowMmapReads( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_mmap_reads; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowMmapReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowMmapReads( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_reads) { + reinterpret_cast(jhandle)->allow_mmap_reads = + static_cast(allow_mmap_reads); +} + +/* + * Class: org_rocksdb_Options + * Method: allowMmapWrites + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowMmapWrites( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_mmap_writes; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowMmapWrites + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowMmapWrites( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_writes) { + reinterpret_cast(jhandle)->allow_mmap_writes = + static_cast(allow_mmap_writes); +} + +/* + * Class: org_rocksdb_Options + * Method: isFdCloseOnExec + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_isFdCloseOnExec( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->is_fd_close_on_exec; +} + +/* + * Class: org_rocksdb_Options + * Method: setIsFdCloseOnExec + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setIsFdCloseOnExec( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean is_fd_close_on_exec) { + reinterpret_cast(jhandle)->is_fd_close_on_exec = + static_cast(is_fd_close_on_exec); +} + +/* + * Class: org_rocksdb_Options + * Method: skipLogErrorOnRecovery + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_skipLogErrorOnRecovery( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->skip_log_error_on_recovery; +} + +/* + * Class: org_rocksdb_Options + * Method: setSkipLogErrorOnRecovery + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setSkipLogErrorOnRecovery( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean skip) { + reinterpret_cast(jhandle)->skip_log_error_on_recovery = + static_cast(skip); +} + +/* + * Class: org_rocksdb_Options + * Method: statsDumpPeriodSec + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_statsDumpPeriodSec( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->stats_dump_period_sec; +} + +/* + * Class: org_rocksdb_Options + * Method: setStatsDumpPeriodSec + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setStatsDumpPeriodSec( + JNIEnv* env, jobject jobj, jlong jhandle, jint stats_dump_period_sec) { + reinterpret_cast(jhandle)->stats_dump_period_sec = + static_cast(stats_dump_period_sec); +} + +/* + * Class: org_rocksdb_Options + * Method: adviseRandomOnOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_adviseRandomOnOpen( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->advise_random_on_open; +} + +/* + * Class: org_rocksdb_Options + * Method: setAdviseRandomOnOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAdviseRandomOnOpen( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean advise_random_on_open) { + reinterpret_cast(jhandle)->advise_random_on_open = + static_cast(advise_random_on_open); +} + +/* + * Class: org_rocksdb_Options + * Method: useAdaptiveMutex + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useAdaptiveMutex( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->use_adaptive_mutex; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseAdaptiveMutex + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseAdaptiveMutex( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_adaptive_mutex) { + reinterpret_cast(jhandle)->use_adaptive_mutex = + static_cast(use_adaptive_mutex); +} + +/* + * Class: org_rocksdb_Options + * Method: bytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_bytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->bytes_per_sync; +} + +/* + * Class: org_rocksdb_Options + * Method: setBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setBytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle, jlong bytes_per_sync) { + reinterpret_cast(jhandle)->bytes_per_sync = + static_cast(bytes_per_sync); +} + +/* + * Class: org_rocksdb_Options + * Method: allowThreadLocal + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowThreadLocal( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_thread_local; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowThreadLocal + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowThreadLocal( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_thread_local) { + reinterpret_cast(jhandle)->allow_thread_local = + static_cast(allow_thread_local); +} ////////////////////////////////////////////////////////////////////////////// // WriteOptions From a74508955412fa65d9ed3ec4739b7a22d3c9e396 Mon Sep 17 00:00:00 2001 From: James Pearce Date: Fri, 18 Apr 2014 09:33:27 -0700 Subject: [PATCH 10/10] Added period This is a PR to test some tooling; please do not merge without talking to @jamesgpearce :) --- HISTORY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index 53e720351..831d3ccb1 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -24,7 +24,7 @@ * Added Env::GetThreadPoolQueueLen(), which returns the waiting queue length of thread pools * Added a command "checkconsistency" in ldb tool, which checks if file system state matches DB state (file existence and file sizes) -* Separate options related to block based table to a new struct BlockBasedTableOptions +* Separate options related to block based table to a new struct BlockBasedTableOptions. * WriteBatch has a new function Count() to return total size in the batch, and Data() now returns a reference instead of a copy * Add more counters to perf context. * Supports several more DB properties: compaction-pending, background-errors and cur-size-active-mem-table.