merge master to resolve merge conflicts
This commit is contained in:
commit
2a4d6e7960
2
Makefile
2
Makefile
@ -154,7 +154,7 @@ TOOLS = \
|
|||||||
options_test \
|
options_test \
|
||||||
blob_store_bench
|
blob_store_bench
|
||||||
|
|
||||||
PROGRAMS = db_bench signal_test table_reader_bench log_and_apply_bench cache_bench $(TOOLS)
|
PROGRAMS = db_bench signal_test table_reader_bench log_and_apply_bench cache_bench perf_context_test $(TOOLS)
|
||||||
|
|
||||||
# The library name is configurable since we are maintaining libraries of both
|
# The library name is configurable since we are maintaining libraries of both
|
||||||
# debug/release mode.
|
# debug/release mode.
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
if [ $# == 0 ]; then
|
if [ "$#" = "0" ]; then
|
||||||
echo "Usage: $0 major|minor|patch"
|
echo "Usage: $0 major|minor|patch"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [ $1 = "major" ]; then
|
if [ "$1" = "major" ]; then
|
||||||
cat include/rocksdb/version.h | grep MAJOR | head -n1 | awk '{print $3}'
|
cat include/rocksdb/version.h | grep MAJOR | head -n1 | awk '{print $3}'
|
||||||
fi
|
fi
|
||||||
if [ $1 = "minor" ]; then
|
if [ "$1" = "minor" ]; then
|
||||||
cat include/rocksdb/version.h | grep MINOR | head -n1 | awk '{print $3}'
|
cat include/rocksdb/version.h | grep MINOR | head -n1 | awk '{print $3}'
|
||||||
fi
|
fi
|
||||||
if [ $1 = "patch" ]; then
|
if [ "$1" = "patch" ]; then
|
||||||
cat include/rocksdb/version.h | grep PATCH | head -n1 | awk '{print $3}'
|
cat include/rocksdb/version.h | grep PATCH | head -n1 | awk '{print $3}'
|
||||||
fi
|
fi
|
||||||
|
@ -1614,7 +1614,7 @@ Status DBImpl::FlushMemTableToOutputFile(
|
|||||||
Status s = WriteLevel0Table(cfd, mutable_cf_options, mems, edit,
|
Status s = WriteLevel0Table(cfd, mutable_cf_options, mems, edit,
|
||||||
&file_number, log_buffer);
|
&file_number, log_buffer);
|
||||||
|
|
||||||
if (s.ok() && shutting_down_.Acquire_Load() && cfd->IsDropped()) {
|
if (s.ok() && (shutting_down_.Acquire_Load() || cfd->IsDropped())) {
|
||||||
s = Status::ShutdownInProgress(
|
s = Status::ShutdownInProgress(
|
||||||
"Database shutdown or Column family drop during flush");
|
"Database shutdown or Column family drop during flush");
|
||||||
}
|
}
|
||||||
@ -2694,7 +2694,10 @@ Status DBImpl::ProcessKeyValueCompaction(
|
|||||||
Iterator* input,
|
Iterator* input,
|
||||||
CompactionState* compact,
|
CompactionState* compact,
|
||||||
bool is_compaction_v2,
|
bool is_compaction_v2,
|
||||||
|
int* num_output_records,
|
||||||
LogBuffer* log_buffer) {
|
LogBuffer* log_buffer) {
|
||||||
|
assert(num_output_records != nullptr);
|
||||||
|
|
||||||
size_t combined_idx = 0;
|
size_t combined_idx = 0;
|
||||||
Status status;
|
Status status;
|
||||||
std::string compaction_filter_value;
|
std::string compaction_filter_value;
|
||||||
@ -2965,6 +2968,7 @@ Status DBImpl::ProcessKeyValueCompaction(
|
|||||||
}
|
}
|
||||||
compact->current_output()->largest.DecodeFrom(newkey);
|
compact->current_output()->largest.DecodeFrom(newkey);
|
||||||
compact->builder->Add(newkey, value);
|
compact->builder->Add(newkey, value);
|
||||||
|
(*num_output_records)++,
|
||||||
compact->current_output()->largest_seqno =
|
compact->current_output()->largest_seqno =
|
||||||
std::max(compact->current_output()->largest_seqno, seqno);
|
std::max(compact->current_output()->largest_seqno, seqno);
|
||||||
|
|
||||||
@ -3140,6 +3144,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
log_buffer->FlushBufferToLog();
|
log_buffer->FlushBufferToLog();
|
||||||
|
|
||||||
|
int num_output_records = 0;
|
||||||
const uint64_t start_micros = env_->NowMicros();
|
const uint64_t start_micros = env_->NowMicros();
|
||||||
unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
|
unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
|
||||||
input->SeekToFirst();
|
input->SeekToFirst();
|
||||||
@ -3168,6 +3173,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
input.get(),
|
input.get(),
|
||||||
compact,
|
compact,
|
||||||
false,
|
false,
|
||||||
|
&num_output_records,
|
||||||
log_buffer);
|
log_buffer);
|
||||||
} else {
|
} else {
|
||||||
// temp_backup_input always point to the start of the current buffer
|
// temp_backup_input always point to the start of the current buffer
|
||||||
@ -3249,6 +3255,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
input.get(),
|
input.get(),
|
||||||
compact,
|
compact,
|
||||||
true,
|
true,
|
||||||
|
&num_output_records,
|
||||||
log_buffer);
|
log_buffer);
|
||||||
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
@ -3286,6 +3293,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
input.get(),
|
input.get(),
|
||||||
compact,
|
compact,
|
||||||
true,
|
true,
|
||||||
|
&num_output_records,
|
||||||
log_buffer);
|
log_buffer);
|
||||||
|
|
||||||
compact->CleanupBatchBuffer();
|
compact->CleanupBatchBuffer();
|
||||||
@ -3309,6 +3317,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
input.get(),
|
input.get(),
|
||||||
compact,
|
compact,
|
||||||
true,
|
true,
|
||||||
|
&num_output_records,
|
||||||
log_buffer);
|
log_buffer);
|
||||||
} // checking for compaction filter v2
|
} // checking for compaction filter v2
|
||||||
|
|
||||||
@ -3342,17 +3351,24 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
}
|
}
|
||||||
stats.files_out_levelnp1 = num_output_files;
|
stats.files_out_levelnp1 = num_output_files;
|
||||||
|
|
||||||
|
uint64_t num_input_records = 0;
|
||||||
|
|
||||||
for (int i = 0; i < compact->compaction->num_input_files(0); i++) {
|
for (int i = 0; i < compact->compaction->num_input_files(0); i++) {
|
||||||
stats.bytes_readn += compact->compaction->input(0, i)->fd.GetFileSize();
|
stats.bytes_readn += compact->compaction->input(0, i)->fd.GetFileSize();
|
||||||
|
stats.num_input_records += compact->compaction->input(0, i)->num_entries;
|
||||||
|
num_input_records += compact->compaction->input(0, i)->num_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < compact->compaction->num_input_files(1); i++) {
|
for (int i = 0; i < compact->compaction->num_input_files(1); i++) {
|
||||||
stats.bytes_readnp1 += compact->compaction->input(1, i)->fd.GetFileSize();
|
stats.bytes_readnp1 += compact->compaction->input(1, i)->fd.GetFileSize();
|
||||||
|
num_input_records += compact->compaction->input(1, i)->num_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < num_output_files; i++) {
|
for (int i = 0; i < num_output_files; i++) {
|
||||||
stats.bytes_written += compact->outputs[i].file_size;
|
stats.bytes_written += compact->outputs[i].file_size;
|
||||||
}
|
}
|
||||||
|
stats.num_dropped_records =
|
||||||
|
static_cast<int>(num_input_records) - num_output_records;
|
||||||
|
|
||||||
RecordCompactionIOStats();
|
RecordCompactionIOStats();
|
||||||
|
|
||||||
@ -3375,7 +3391,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
"[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, "
|
"[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, "
|
||||||
"files in(%d, %d) out(%d) "
|
"files in(%d, %d) out(%d) "
|
||||||
"MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
|
"MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
|
||||||
"write-amplify(%.1f) %s\n",
|
"write-amplify(%.1f) %s, records in: %d, records dropped: %d\n",
|
||||||
cfd->GetName().c_str(), cfd->current()->LevelSummary(&tmp),
|
cfd->GetName().c_str(), cfd->current()->LevelSummary(&tmp),
|
||||||
(stats.bytes_readn + stats.bytes_readnp1) /
|
(stats.bytes_readn + stats.bytes_readnp1) /
|
||||||
static_cast<double>(stats.micros),
|
static_cast<double>(stats.micros),
|
||||||
@ -3387,7 +3403,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
(stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
|
(stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
|
||||||
(double)stats.bytes_readn,
|
(double)stats.bytes_readn,
|
||||||
stats.bytes_written / (double)stats.bytes_readn,
|
stats.bytes_written / (double)stats.bytes_readn,
|
||||||
status.ToString().c_str());
|
status.ToString().c_str(), stats.num_input_records,
|
||||||
|
stats.num_dropped_records);
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -3602,6 +3619,7 @@ std::vector<Status> DBImpl::MultiGet(
|
|||||||
} else if (super_version->imm->Get(lkey, value, &s, &merge_context)) {
|
} else if (super_version->imm->Get(lkey, value, &s, &merge_context)) {
|
||||||
// Done
|
// Done
|
||||||
} else {
|
} else {
|
||||||
|
PERF_TIMER_GUARD(get_from_output_files_time);
|
||||||
super_version->current->Get(options, lkey, value, &s, &merge_context);
|
super_version->current->Get(options, lkey, value, &s, &merge_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -419,6 +419,7 @@ class DBImpl : public DB {
|
|||||||
Iterator* input,
|
Iterator* input,
|
||||||
CompactionState* compact,
|
CompactionState* compact,
|
||||||
bool is_compaction_v2,
|
bool is_compaction_v2,
|
||||||
|
int* num_output_records,
|
||||||
LogBuffer* log_buffer);
|
LogBuffer* log_buffer);
|
||||||
|
|
||||||
// Call compaction_filter_v2->Filter() on kv-pairs in compact
|
// Call compaction_filter_v2->Filter() on kv-pairs in compact
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/merge_context.h"
|
#include "db/merge_context.h"
|
||||||
#include "db/db_iter.h"
|
#include "db/db_iter.h"
|
||||||
|
#include "util/perf_context_imp.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -34,6 +35,7 @@ Status DBImplReadOnly::Get(const ReadOptions& read_options,
|
|||||||
LookupKey lkey(key, snapshot);
|
LookupKey lkey(key, snapshot);
|
||||||
if (super_version->mem->Get(lkey, value, &s, &merge_context)) {
|
if (super_version->mem->Get(lkey, value, &s, &merge_context)) {
|
||||||
} else {
|
} else {
|
||||||
|
PERF_TIMER_GUARD(get_from_output_files_time);
|
||||||
super_version->current->Get(read_options, lkey, value, &s, &merge_context);
|
super_version->current->Get(read_options, lkey, value, &s, &merge_context);
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
|
@ -28,6 +28,7 @@ ColumnFamilyData* FlushScheduler::GetNextColumnFamily() {
|
|||||||
if (cfd->IsDropped()) {
|
if (cfd->IsDropped()) {
|
||||||
if (cfd->Unref()) {
|
if (cfd->Unref()) {
|
||||||
delete cfd;
|
delete cfd;
|
||||||
|
cfd = nullptr;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
|
@ -30,7 +30,7 @@ void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name) {
|
|||||||
"Level Files Size(MB) Score Read(GB) Rn(GB) Rnp1(GB) "
|
"Level Files Size(MB) Score Read(GB) Rn(GB) Rnp1(GB) "
|
||||||
"Write(GB) Wnew(GB) RW-Amp W-Amp Rd(MB/s) Wr(MB/s) Rn(cnt) "
|
"Write(GB) Wnew(GB) RW-Amp W-Amp Rd(MB/s) Wr(MB/s) Rn(cnt) "
|
||||||
"Rnp1(cnt) Wnp1(cnt) Wnew(cnt) Comp(sec) Comp(cnt) Avg(sec) "
|
"Rnp1(cnt) Wnp1(cnt) Wnew(cnt) Comp(sec) Comp(cnt) Avg(sec) "
|
||||||
"Stall(sec) Stall(cnt) Avg(ms)\n"
|
"Stall(sec) Stall(cnt) Avg(ms) RecordIn RecordDrop\n"
|
||||||
"--------------------------------------------------------------------"
|
"--------------------------------------------------------------------"
|
||||||
"--------------------------------------------------------------------"
|
"--------------------------------------------------------------------"
|
||||||
"--------------------------------------------------------------------\n",
|
"--------------------------------------------------------------------\n",
|
||||||
@ -65,7 +65,9 @@ void PrintLevelStats(char* buf, size_t len, const std::string& name,
|
|||||||
"%8.3f " /* Avg(sec) */
|
"%8.3f " /* Avg(sec) */
|
||||||
"%10.2f " /* Stall(sec) */
|
"%10.2f " /* Stall(sec) */
|
||||||
"%10" PRIu64 " " /* Stall(cnt) */
|
"%10" PRIu64 " " /* Stall(cnt) */
|
||||||
"%7.2f\n" /* Avg(ms) */,
|
"%7.2f" /* Avg(ms) */
|
||||||
|
"%8d " /* input entries */
|
||||||
|
"%10d\n" /* number of records reduced */,
|
||||||
name.c_str(), num_files, being_compacted, total_file_size / kMB, score,
|
name.c_str(), num_files, being_compacted, total_file_size / kMB, score,
|
||||||
bytes_read / kGB,
|
bytes_read / kGB,
|
||||||
stats.bytes_readn / kGB,
|
stats.bytes_readn / kGB,
|
||||||
@ -85,7 +87,9 @@ void PrintLevelStats(char* buf, size_t len, const std::string& name,
|
|||||||
stats.count == 0 ? 0 : stats.micros / 1000000.0 / stats.count,
|
stats.count == 0 ? 0 : stats.micros / 1000000.0 / stats.count,
|
||||||
stall_us / 1000000.0,
|
stall_us / 1000000.0,
|
||||||
stalls,
|
stalls,
|
||||||
stalls == 0 ? 0 : stall_us / 1000.0 / stalls);
|
stalls == 0 ? 0 : stall_us / 1000.0 / stalls,
|
||||||
|
stats.num_input_records,
|
||||||
|
stats.num_dropped_records);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -123,6 +123,13 @@ class InternalStats {
|
|||||||
// Files written during compaction between levels N and N+1
|
// Files written during compaction between levels N and N+1
|
||||||
int files_out_levelnp1;
|
int files_out_levelnp1;
|
||||||
|
|
||||||
|
// Total incoming entries during compaction between levels N and N+1
|
||||||
|
int num_input_records;
|
||||||
|
|
||||||
|
// Accumulated diff number of entries
|
||||||
|
// (num input entries - num output entires) for compaction levels N and N+1
|
||||||
|
int num_dropped_records;
|
||||||
|
|
||||||
// Number of compactions done
|
// Number of compactions done
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
@ -134,6 +141,8 @@ class InternalStats {
|
|||||||
files_in_leveln(0),
|
files_in_leveln(0),
|
||||||
files_in_levelnp1(0),
|
files_in_levelnp1(0),
|
||||||
files_out_levelnp1(0),
|
files_out_levelnp1(0),
|
||||||
|
num_input_records(0),
|
||||||
|
num_dropped_records(0),
|
||||||
count(count) {}
|
count(count) {}
|
||||||
|
|
||||||
explicit CompactionStats(const CompactionStats& c)
|
explicit CompactionStats(const CompactionStats& c)
|
||||||
@ -144,6 +153,8 @@ class InternalStats {
|
|||||||
files_in_leveln(c.files_in_leveln),
|
files_in_leveln(c.files_in_leveln),
|
||||||
files_in_levelnp1(c.files_in_levelnp1),
|
files_in_levelnp1(c.files_in_levelnp1),
|
||||||
files_out_levelnp1(c.files_out_levelnp1),
|
files_out_levelnp1(c.files_out_levelnp1),
|
||||||
|
num_input_records(c.num_input_records),
|
||||||
|
num_dropped_records(c.num_dropped_records),
|
||||||
count(c.count) {}
|
count(c.count) {}
|
||||||
|
|
||||||
void Add(const CompactionStats& c) {
|
void Add(const CompactionStats& c) {
|
||||||
@ -154,6 +165,8 @@ class InternalStats {
|
|||||||
this->files_in_leveln += c.files_in_leveln;
|
this->files_in_leveln += c.files_in_leveln;
|
||||||
this->files_in_levelnp1 += c.files_in_levelnp1;
|
this->files_in_levelnp1 += c.files_in_levelnp1;
|
||||||
this->files_out_levelnp1 += c.files_out_levelnp1;
|
this->files_out_levelnp1 += c.files_out_levelnp1;
|
||||||
|
this->num_input_records += c.num_input_records;
|
||||||
|
this->num_dropped_records += c.num_dropped_records;
|
||||||
this->count += c.count;
|
this->count += c.count;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,6 +178,8 @@ class InternalStats {
|
|||||||
this->files_in_leveln -= c.files_in_leveln;
|
this->files_in_leveln -= c.files_in_leveln;
|
||||||
this->files_in_levelnp1 -= c.files_in_levelnp1;
|
this->files_in_levelnp1 -= c.files_in_levelnp1;
|
||||||
this->files_out_levelnp1 -= c.files_out_levelnp1;
|
this->files_out_levelnp1 -= c.files_out_levelnp1;
|
||||||
|
this->num_input_records -= c.num_input_records;
|
||||||
|
this->num_dropped_records -= c.num_dropped_records;
|
||||||
this->count -= c.count;
|
this->count -= c.count;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "/usr/include/valgrind/callgrind.h"
|
|
||||||
|
|
||||||
#include "rocksdb/db.h"
|
#include "rocksdb/db.h"
|
||||||
#include "rocksdb/perf_context.h"
|
#include "rocksdb/perf_context.h"
|
||||||
@ -29,7 +28,7 @@ const std::string kDbName = rocksdb::test::TmpDir() + "/perf_context_test";
|
|||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
std::shared_ptr<DB> OpenDb() {
|
std::shared_ptr<DB> OpenDb(bool read_only = false) {
|
||||||
DB* db;
|
DB* db;
|
||||||
Options options;
|
Options options;
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
@ -39,12 +38,16 @@ std::shared_ptr<DB> OpenDb() {
|
|||||||
FLAGS_min_write_buffer_number_to_merge;
|
FLAGS_min_write_buffer_number_to_merge;
|
||||||
|
|
||||||
if (FLAGS_use_set_based_memetable) {
|
if (FLAGS_use_set_based_memetable) {
|
||||||
auto prefix_extractor = rocksdb::NewFixedPrefixTransform(0);
|
options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(0));
|
||||||
options.memtable_factory.reset(
|
options.memtable_factory.reset(NewHashSkipListRepFactory());
|
||||||
NewHashSkipListRepFactory(prefix_extractor));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status s = DB::Open(options, kDbName, &db);
|
Status s;
|
||||||
|
if (!read_only) {
|
||||||
|
s = DB::Open(options, kDbName, &db);
|
||||||
|
} else {
|
||||||
|
s = DB::OpenForReadOnly(options, kDbName, &db);
|
||||||
|
}
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
return std::shared_ptr<DB>(db);
|
return std::shared_ptr<DB>(db);
|
||||||
}
|
}
|
||||||
@ -76,7 +79,8 @@ TEST(PerfContextTest, SeekIntoDeletion) {
|
|||||||
std::string value;
|
std::string value;
|
||||||
|
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
StopWatchNano timer(Env::Default(), true);
|
StopWatchNano timer(Env::Default());
|
||||||
|
timer.Start();
|
||||||
auto status = db->Get(read_options, key, &value);
|
auto status = db->Get(read_options, key, &value);
|
||||||
auto elapsed_nanos = timer.ElapsedNanos();
|
auto elapsed_nanos = timer.ElapsedNanos();
|
||||||
ASSERT_TRUE(status.IsNotFound());
|
ASSERT_TRUE(status.IsNotFound());
|
||||||
@ -149,11 +153,12 @@ TEST(PerfContextTest, StopWatchNanoOverhead) {
|
|||||||
TEST(PerfContextTest, StopWatchOverhead) {
|
TEST(PerfContextTest, StopWatchOverhead) {
|
||||||
// profile the timer cost by itself!
|
// profile the timer cost by itself!
|
||||||
const int kTotalIterations = 1000000;
|
const int kTotalIterations = 1000000;
|
||||||
|
uint64_t elapsed = 0;
|
||||||
std::vector<uint64_t> timings(kTotalIterations);
|
std::vector<uint64_t> timings(kTotalIterations);
|
||||||
|
|
||||||
StopWatch timer(Env::Default());
|
StopWatch timer(Env::Default(), nullptr, 0, &elapsed);
|
||||||
for (auto& timing : timings) {
|
for (auto& timing : timings) {
|
||||||
timing = timer.ElapsedMicros();
|
timing = elapsed;
|
||||||
}
|
}
|
||||||
|
|
||||||
HistogramImpl histogram;
|
HistogramImpl histogram;
|
||||||
@ -166,7 +171,7 @@ TEST(PerfContextTest, StopWatchOverhead) {
|
|||||||
std::cout << histogram.ToString();
|
std::cout << histogram.ToString();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProfileKeyComparison() {
|
void ProfileQueries(bool enabled_time = false) {
|
||||||
DestroyDB(kDbName, Options()); // Start this test with a fresh DB
|
DestroyDB(kDbName, Options()); // Start this test with a fresh DB
|
||||||
|
|
||||||
auto db = OpenDb();
|
auto db = OpenDb();
|
||||||
@ -175,11 +180,21 @@ void ProfileKeyComparison() {
|
|||||||
ReadOptions read_options;
|
ReadOptions read_options;
|
||||||
|
|
||||||
HistogramImpl hist_put;
|
HistogramImpl hist_put;
|
||||||
|
|
||||||
HistogramImpl hist_get;
|
HistogramImpl hist_get;
|
||||||
HistogramImpl hist_get_snapshot;
|
HistogramImpl hist_get_snapshot;
|
||||||
HistogramImpl hist_get_memtable;
|
HistogramImpl hist_get_memtable;
|
||||||
|
HistogramImpl hist_get_files;
|
||||||
HistogramImpl hist_get_post_process;
|
HistogramImpl hist_get_post_process;
|
||||||
HistogramImpl hist_num_memtable_checked;
|
HistogramImpl hist_num_memtable_checked;
|
||||||
|
|
||||||
|
HistogramImpl hist_mget;
|
||||||
|
HistogramImpl hist_mget_snapshot;
|
||||||
|
HistogramImpl hist_mget_memtable;
|
||||||
|
HistogramImpl hist_mget_files;
|
||||||
|
HistogramImpl hist_mget_post_process;
|
||||||
|
HistogramImpl hist_mget_num_memtable_checked;
|
||||||
|
|
||||||
HistogramImpl hist_write_pre_post;
|
HistogramImpl hist_write_pre_post;
|
||||||
HistogramImpl hist_write_wal_time;
|
HistogramImpl hist_write_wal_time;
|
||||||
HistogramImpl hist_write_memtable_time;
|
HistogramImpl hist_write_memtable_time;
|
||||||
@ -187,8 +202,13 @@ void ProfileKeyComparison() {
|
|||||||
std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
|
std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
|
||||||
|
|
||||||
std::vector<int> keys;
|
std::vector<int> keys;
|
||||||
|
const int kFlushFlag = -1;
|
||||||
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
||||||
keys.push_back(i);
|
keys.push_back(i);
|
||||||
|
if (i == FLAGS_total_keys / 2) {
|
||||||
|
// Issuing a flush in the middle.
|
||||||
|
keys.push_back(kFlushFlag);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FLAGS_random_key) {
|
if (FLAGS_random_key) {
|
||||||
@ -196,27 +216,54 @@ void ProfileKeyComparison() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (const int i : keys) {
|
for (const int i : keys) {
|
||||||
|
if (i == kFlushFlag) {
|
||||||
|
FlushOptions fo;
|
||||||
|
db->Flush(fo);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + std::to_string(i);
|
||||||
std::string value = "v" + std::to_string(i);
|
std::string value = "v" + std::to_string(i);
|
||||||
|
|
||||||
|
std::vector<Slice> keys = {Slice(key)};
|
||||||
|
std::vector<std::string> values;
|
||||||
|
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
db->Put(write_options, key, value);
|
db->Put(write_options, key, value);
|
||||||
hist_write_pre_post.Add(perf_context.write_pre_and_post_process_time);
|
hist_write_pre_post.Add(perf_context.write_pre_and_post_process_time);
|
||||||
hist_write_wal_time.Add(perf_context.write_wal_time);
|
hist_write_wal_time.Add(perf_context.write_wal_time);
|
||||||
hist_write_memtable_time.Add(perf_context.write_memtable_time);
|
hist_write_memtable_time.Add(perf_context.write_memtable_time);
|
||||||
hist_put.Add(perf_context.user_key_comparison_count);
|
hist_put.Add(perf_context.user_key_comparison_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const int i : keys) {
|
||||||
|
std::string key = "k" + std::to_string(i);
|
||||||
|
std::string value = "v" + std::to_string(i);
|
||||||
|
|
||||||
|
std::vector<Slice> keys = {Slice(key)};
|
||||||
|
std::vector<std::string> values;
|
||||||
|
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
db->Get(read_options, key, &value);
|
db->Get(read_options, key, &value);
|
||||||
hist_get_snapshot.Add(perf_context.get_snapshot_time);
|
hist_get_snapshot.Add(perf_context.get_snapshot_time);
|
||||||
hist_get_memtable.Add(perf_context.get_from_memtable_time);
|
hist_get_memtable.Add(perf_context.get_from_memtable_time);
|
||||||
|
hist_get_files.Add(perf_context.get_from_output_files_time);
|
||||||
hist_num_memtable_checked.Add(perf_context.get_from_memtable_count);
|
hist_num_memtable_checked.Add(perf_context.get_from_memtable_count);
|
||||||
hist_get_post_process.Add(perf_context.get_post_process_time);
|
hist_get_post_process.Add(perf_context.get_post_process_time);
|
||||||
hist_get.Add(perf_context.user_key_comparison_count);
|
hist_get.Add(perf_context.user_key_comparison_count);
|
||||||
|
|
||||||
|
perf_context.Reset();
|
||||||
|
db->MultiGet(read_options, keys, &values);
|
||||||
|
hist_mget_snapshot.Add(perf_context.get_snapshot_time);
|
||||||
|
hist_mget_memtable.Add(perf_context.get_from_memtable_time);
|
||||||
|
hist_mget_files.Add(perf_context.get_from_output_files_time);
|
||||||
|
hist_mget_num_memtable_checked.Add(perf_context.get_from_memtable_count);
|
||||||
|
hist_mget_post_process.Add(perf_context.get_post_process_time);
|
||||||
|
hist_mget.Add(perf_context.user_key_comparison_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << "Put uesr key comparison: \n" << hist_put.ToString()
|
std::cout << "Put uesr key comparison: \n" << hist_put.ToString()
|
||||||
<< "Get uesr key comparison: \n" << hist_get.ToString();
|
<< "Get uesr key comparison: \n" << hist_get.ToString()
|
||||||
|
<< "MultiGet uesr key comparison: \n" << hist_get.ToString();
|
||||||
std::cout << "Put(): Pre and Post Process Time: \n"
|
std::cout << "Put(): Pre and Post Process Time: \n"
|
||||||
<< hist_write_pre_post.ToString()
|
<< hist_write_pre_post.ToString()
|
||||||
<< " Writing WAL time: \n"
|
<< " Writing WAL time: \n"
|
||||||
@ -224,25 +271,139 @@ void ProfileKeyComparison() {
|
|||||||
<< " Writing Mem Table time: \n"
|
<< " Writing Mem Table time: \n"
|
||||||
<< hist_write_memtable_time.ToString() << "\n";
|
<< hist_write_memtable_time.ToString() << "\n";
|
||||||
|
|
||||||
std::cout << "Get(): Time to get snapshot: \n"
|
std::cout << "Get(): Time to get snapshot: \n" << hist_get_snapshot.ToString()
|
||||||
|
<< " Time to get value from memtables: \n"
|
||||||
|
<< hist_get_memtable.ToString() << "\n"
|
||||||
|
<< " Time to get value from output files: \n"
|
||||||
|
<< hist_get_files.ToString() << "\n"
|
||||||
|
<< " Number of memtables checked: \n"
|
||||||
|
<< hist_num_memtable_checked.ToString() << "\n"
|
||||||
|
<< " Time to post process: \n" << hist_get_post_process.ToString()
|
||||||
|
<< "\n";
|
||||||
|
|
||||||
|
std::cout << "MultiGet(): Time to get snapshot: \n"
|
||||||
|
<< hist_mget_snapshot.ToString()
|
||||||
|
<< " Time to get value from memtables: \n"
|
||||||
|
<< hist_mget_memtable.ToString() << "\n"
|
||||||
|
<< " Time to get value from output files: \n"
|
||||||
|
<< hist_mget_files.ToString() << "\n"
|
||||||
|
<< " Number of memtables checked: \n"
|
||||||
|
<< hist_mget_num_memtable_checked.ToString() << "\n"
|
||||||
|
<< " Time to post process: \n" << hist_mget_post_process.ToString()
|
||||||
|
<< "\n";
|
||||||
|
|
||||||
|
if (enabled_time) {
|
||||||
|
ASSERT_GT(hist_get.Average(), 0);
|
||||||
|
ASSERT_GT(hist_get_snapshot.Average(), 0);
|
||||||
|
ASSERT_GT(hist_get_memtable.Average(), 0);
|
||||||
|
ASSERT_GT(hist_get_files.Average(), 0);
|
||||||
|
ASSERT_GT(hist_get_post_process.Average(), 0);
|
||||||
|
ASSERT_GT(hist_num_memtable_checked.Average(), 0);
|
||||||
|
|
||||||
|
ASSERT_GT(hist_mget.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_snapshot.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_memtable.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_files.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_post_process.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
db.reset();
|
||||||
|
db = OpenDb(true);
|
||||||
|
|
||||||
|
hist_get.Clear();
|
||||||
|
hist_get_snapshot.Clear();
|
||||||
|
hist_get_memtable.Clear();
|
||||||
|
hist_get_files.Clear();
|
||||||
|
hist_get_post_process.Clear();
|
||||||
|
hist_num_memtable_checked.Clear();
|
||||||
|
|
||||||
|
hist_mget.Clear();
|
||||||
|
hist_mget_snapshot.Clear();
|
||||||
|
hist_mget_memtable.Clear();
|
||||||
|
hist_mget_files.Clear();
|
||||||
|
hist_mget_post_process.Clear();
|
||||||
|
hist_mget_num_memtable_checked.Clear();
|
||||||
|
|
||||||
|
for (const int i : keys) {
|
||||||
|
std::string key = "k" + std::to_string(i);
|
||||||
|
std::string value = "v" + std::to_string(i);
|
||||||
|
|
||||||
|
std::vector<Slice> keys = {Slice(key)};
|
||||||
|
std::vector<std::string> values;
|
||||||
|
|
||||||
|
perf_context.Reset();
|
||||||
|
db->Get(read_options, key, &value);
|
||||||
|
hist_get_snapshot.Add(perf_context.get_snapshot_time);
|
||||||
|
hist_get_memtable.Add(perf_context.get_from_memtable_time);
|
||||||
|
hist_get_files.Add(perf_context.get_from_output_files_time);
|
||||||
|
hist_num_memtable_checked.Add(perf_context.get_from_memtable_count);
|
||||||
|
hist_get_post_process.Add(perf_context.get_post_process_time);
|
||||||
|
hist_get.Add(perf_context.user_key_comparison_count);
|
||||||
|
|
||||||
|
perf_context.Reset();
|
||||||
|
db->MultiGet(read_options, keys, &values);
|
||||||
|
hist_mget_snapshot.Add(perf_context.get_snapshot_time);
|
||||||
|
hist_mget_memtable.Add(perf_context.get_from_memtable_time);
|
||||||
|
hist_mget_files.Add(perf_context.get_from_output_files_time);
|
||||||
|
hist_mget_num_memtable_checked.Add(perf_context.get_from_memtable_count);
|
||||||
|
hist_mget_post_process.Add(perf_context.get_post_process_time);
|
||||||
|
hist_mget.Add(perf_context.user_key_comparison_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << "ReadOnly Get uesr key comparison: \n" << hist_get.ToString()
|
||||||
|
<< "ReadOnly MultiGet uesr key comparison: \n"
|
||||||
|
<< hist_mget.ToString();
|
||||||
|
|
||||||
|
std::cout << "ReadOnly Get(): Time to get snapshot: \n"
|
||||||
<< hist_get_snapshot.ToString()
|
<< hist_get_snapshot.ToString()
|
||||||
<< " Time to get value from memtables: \n"
|
<< " Time to get value from memtables: \n"
|
||||||
<< hist_get_memtable.ToString() << "\n"
|
<< hist_get_memtable.ToString() << "\n"
|
||||||
|
<< " Time to get value from output files: \n"
|
||||||
|
<< hist_get_files.ToString() << "\n"
|
||||||
<< " Number of memtables checked: \n"
|
<< " Number of memtables checked: \n"
|
||||||
<< hist_num_memtable_checked.ToString() << "\n"
|
<< hist_num_memtable_checked.ToString() << "\n"
|
||||||
<< " Time to post process: \n"
|
<< " Time to post process: \n" << hist_get_post_process.ToString()
|
||||||
<< hist_get_post_process.ToString() << "\n";
|
<< "\n";
|
||||||
|
|
||||||
|
std::cout << "ReadOnly MultiGet(): Time to get snapshot: \n"
|
||||||
|
<< hist_mget_snapshot.ToString()
|
||||||
|
<< " Time to get value from memtables: \n"
|
||||||
|
<< hist_mget_memtable.ToString() << "\n"
|
||||||
|
<< " Time to get value from output files: \n"
|
||||||
|
<< hist_mget_files.ToString() << "\n"
|
||||||
|
<< " Number of memtables checked: \n"
|
||||||
|
<< hist_mget_num_memtable_checked.ToString() << "\n"
|
||||||
|
<< " Time to post process: \n" << hist_mget_post_process.ToString()
|
||||||
|
<< "\n";
|
||||||
|
|
||||||
|
if (enabled_time) {
|
||||||
|
ASSERT_GT(hist_get.Average(), 0);
|
||||||
|
ASSERT_GT(hist_get_memtable.Average(), 0);
|
||||||
|
ASSERT_GT(hist_get_files.Average(), 0);
|
||||||
|
ASSERT_GT(hist_num_memtable_checked.Average(), 0);
|
||||||
|
// In read-only mode Get(), no super version operation is needed
|
||||||
|
ASSERT_EQ(hist_get_post_process.Average(), 0);
|
||||||
|
ASSERT_EQ(hist_get_snapshot.Average(), 0);
|
||||||
|
|
||||||
|
ASSERT_GT(hist_mget.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_snapshot.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_memtable.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_files.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_post_process.Average(), 0);
|
||||||
|
ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(PerfContextTest, KeyComparisonCount) {
|
TEST(PerfContextTest, KeyComparisonCount) {
|
||||||
SetPerfLevel(kEnableCount);
|
SetPerfLevel(kEnableCount);
|
||||||
ProfileKeyComparison();
|
ProfileQueries();
|
||||||
|
|
||||||
SetPerfLevel(kDisable);
|
SetPerfLevel(kDisable);
|
||||||
ProfileKeyComparison();
|
ProfileQueries();
|
||||||
|
|
||||||
SetPerfLevel(kEnableTime);
|
SetPerfLevel(kEnableTime);
|
||||||
ProfileKeyComparison();
|
ProfileQueries(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// make perf_context_test
|
// make perf_context_test
|
||||||
|
@ -8,19 +8,19 @@ package org.rocksdb;
|
|||||||
/**
|
/**
|
||||||
* A subclass of RocksDB which supports backup-related operations.
|
* A subclass of RocksDB which supports backup-related operations.
|
||||||
*
|
*
|
||||||
* @see BackupableDBOptions
|
* @see org.rocksdb.BackupableDBOptions
|
||||||
*/
|
*/
|
||||||
public class BackupableDB extends RocksDB {
|
public class BackupableDB extends RocksDB {
|
||||||
/**
|
/**
|
||||||
* Open a BackupableDB under the specified path.
|
* Open a {@code BackupableDB} under the specified path.
|
||||||
* Note that the backup path should be set properly in the
|
* Note that the backup path should be set properly in the
|
||||||
* input BackupableDBOptions.
|
* input BackupableDBOptions.
|
||||||
*
|
*
|
||||||
* @param opt options for db.
|
* @param opt {@link org.rocksdb.Options} to set for the database.
|
||||||
* @param bopt backup related options.
|
* @param bopt {@link org.rocksdb.BackupableDBOptions} to use.
|
||||||
* @param the db path for storing data. The path for storing
|
* @param db_path Path to store data to. The path for storing the backup should be
|
||||||
* backup should be specified in the BackupableDBOptions.
|
* specified in the {@link org.rocksdb.BackupableDBOptions}.
|
||||||
* @return reference to the opened BackupableDB.
|
* @return BackupableDB reference to the opened database.
|
||||||
*/
|
*/
|
||||||
public static BackupableDB open(
|
public static BackupableDB open(
|
||||||
Options opt, BackupableDBOptions bopt, String db_path)
|
Options opt, BackupableDBOptions bopt, String db_path)
|
||||||
@ -61,10 +61,9 @@ public class BackupableDB extends RocksDB {
|
|||||||
/**
|
/**
|
||||||
* Close the BackupableDB instance and release resource.
|
* Close the BackupableDB instance and release resource.
|
||||||
*
|
*
|
||||||
* Internally, BackupableDB owns the rocksdb::DB pointer to its
|
* Internally, BackupableDB owns the {@code rocksdb::DB} pointer to its associated
|
||||||
* associated RocksDB. The release of that RocksDB pointer is
|
* {@link org.rocksdb.RocksDB}. The release of that RocksDB pointer is handled in the destructor
|
||||||
* handled in the destructor of the c++ rocksdb::BackupableDB and
|
* of the c++ {@code rocksdb::BackupableDB} and should be transparent to Java developers.
|
||||||
* should be transparent to Java developers.
|
|
||||||
*/
|
*/
|
||||||
@Override public synchronized void close() {
|
@Override public synchronized void close() {
|
||||||
if (isInitialized()) {
|
if (isInitialized()) {
|
||||||
@ -74,7 +73,7 @@ public class BackupableDB extends RocksDB {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* A protected construction that will be used in the static factory
|
* A protected construction that will be used in the static factory
|
||||||
* method BackupableDB.open().
|
* method {@link #open(Options, BackupableDBOptions, String)}.
|
||||||
*/
|
*/
|
||||||
protected BackupableDB() {
|
protected BackupableDB() {
|
||||||
super();
|
super();
|
||||||
@ -82,6 +81,7 @@ public class BackupableDB extends RocksDB {
|
|||||||
|
|
||||||
@Override protected void finalize() {
|
@Override protected void finalize() {
|
||||||
close();
|
close();
|
||||||
|
super.finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected native void open(long rocksDBHandle, long backupDBOptionsHandle);
|
protected native void open(long rocksDBHandle, long backupDBOptionsHandle);
|
||||||
|
@ -7,33 +7,41 @@ package org.rocksdb;
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* BackupableDBOptions to control the behavior of a backupable database.
|
* BackupableDBOptions to control the behavior of a backupable database.
|
||||||
* It will be used during the creation of a BackupableDB.
|
* It will be used during the creation of a {@link org.rocksdb.BackupableDB}.
|
||||||
*
|
*
|
||||||
* Note that dispose() must be called before an Options instance
|
* Note that dispose() must be called before an Options instance
|
||||||
* become out-of-scope to release the allocated memory in c++.
|
* become out-of-scope to release the allocated memory in c++.
|
||||||
*
|
*
|
||||||
* @param path Where to keep the backup files. Has to be different than dbname.
|
* @see org.rocksdb.BackupableDB
|
||||||
Best to set this to dbname_ + "/backups"
|
|
||||||
* @param shareTableFiles If share_table_files == true, backup will assume that
|
|
||||||
* table files with same name have the same contents. This enables
|
|
||||||
* incremental backups and avoids unnecessary data copies. If
|
|
||||||
* share_table_files == false, each backup will be on its own and will not
|
|
||||||
* share any data with other backups. default: true
|
|
||||||
* @param sync If sync == true, we can guarantee you'll get consistent backup
|
|
||||||
* even on a machine crash/reboot. Backup process is slower with sync
|
|
||||||
* enabled. If sync == false, we don't guarantee anything on machine reboot.
|
|
||||||
* However, chances are some of the backups are consistent. Default: true
|
|
||||||
* @param destroyOldData If true, it will delete whatever backups there are
|
|
||||||
* already. Default: false
|
|
||||||
* @param backupLogFiles If false, we won't backup log files. This option can be
|
|
||||||
* useful for backing up in-memory databases where log file are persisted,
|
|
||||||
* but table files are in memory. Default: true
|
|
||||||
* @param backupRateLimit Max bytes that can be transferred in a second during
|
|
||||||
* backup. If 0 or negative, then go as fast as you can. Default: 0
|
|
||||||
* @param restoreRateLimit Max bytes that can be transferred in a second during
|
|
||||||
* restore. If 0 or negative, then go as fast as you can. Default: 0
|
|
||||||
*/
|
*/
|
||||||
public class BackupableDBOptions extends RocksObject {
|
public class BackupableDBOptions extends RocksObject {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* BackupableDBOptions constructor
|
||||||
|
*
|
||||||
|
* @param path Where to keep the backup files. Has to be different than db name.
|
||||||
|
* Best to set this to {@code db name_ + "/backups"}
|
||||||
|
* @param shareTableFiles If {@code share_table_files == true}, backup will assume
|
||||||
|
* that table files with same name have the same contents. This enables incremental
|
||||||
|
* backups and avoids unnecessary data copies. If {@code share_table_files == false},
|
||||||
|
* each backup will be on its own and will not share any data with other backups.
|
||||||
|
* Default: true
|
||||||
|
* @param sync If {@code sync == true}, we can guarantee you'll get consistent backup
|
||||||
|
* even on a machine crash/reboot. Backup process is slower with sync enabled.
|
||||||
|
* If {@code sync == false}, we don't guarantee anything on machine reboot.
|
||||||
|
* However,chances are some of the backups are consistent.
|
||||||
|
* Default: true
|
||||||
|
* @param destroyOldData If true, it will delete whatever backups there are already.
|
||||||
|
* Default: false
|
||||||
|
* @param backupLogFiles If false, we won't backup log files. This option can be
|
||||||
|
* useful for backing up in-memory databases where log file are persisted,but table
|
||||||
|
* files are in memory.
|
||||||
|
* Default: true
|
||||||
|
* @param backupRateLimit Max bytes that can be transferred in a second during backup.
|
||||||
|
* If 0 or negative, then go as fast as you can. Default: 0
|
||||||
|
* @param restoreRateLimit Max bytes that can be transferred in a second during restore.
|
||||||
|
* If 0 or negative, then go as fast as you can. Default: 0
|
||||||
|
*/
|
||||||
public BackupableDBOptions(String path, boolean shareTableFiles, boolean sync,
|
public BackupableDBOptions(String path, boolean shareTableFiles, boolean sync,
|
||||||
boolean destroyOldData, boolean backupLogFiles, long backupRateLimit,
|
boolean destroyOldData, boolean backupLogFiles, long backupRateLimit,
|
||||||
long restoreRateLimit) {
|
long restoreRateLimit) {
|
||||||
|
@ -27,7 +27,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
|
|||||||
/**
|
/**
|
||||||
* Disable block cache. If this is set to true,
|
* Disable block cache. If this is set to true,
|
||||||
* then no block cache should be used, and the block_cache should
|
* then no block cache should be used, and the block_cache should
|
||||||
* point to a nullptr object.
|
* point to a {@code nullptr} object.
|
||||||
* Default: false
|
* Default: false
|
||||||
*
|
*
|
||||||
* @param noBlockCache if use block cache
|
* @param noBlockCache if use block cache
|
||||||
@ -69,7 +69,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
|
|||||||
* Controls the number of shards for the block cache.
|
* Controls the number of shards for the block cache.
|
||||||
* This is applied only if cacheSize is set to non-negative.
|
* This is applied only if cacheSize is set to non-negative.
|
||||||
*
|
*
|
||||||
* @param numShardBits the number of shard bits. The resulting
|
* @param blockCacheNumShardBits the number of shard bits. The resulting
|
||||||
* number of shards would be 2 ^ numShardBits. Any negative
|
* number of shards would be 2 ^ numShardBits. Any negative
|
||||||
* number means use default settings."
|
* number means use default settings."
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
@ -176,13 +176,14 @@ public class BlockBasedTableConfig extends TableFormatConfig {
|
|||||||
/**
|
/**
|
||||||
* Use the specified filter policy to reduce disk reads.
|
* Use the specified filter policy to reduce disk reads.
|
||||||
*
|
*
|
||||||
* Filter should not be disposed before options instances using this filter is
|
* {@link org.rocksdb.Filter} should not be disposed before options instances
|
||||||
* disposed. If dispose() function is not called, then filter object will be
|
* using this filter is disposed. If {@link Filter#dispose()} function is not
|
||||||
* GC'd automatically.
|
* called, then filter object will be GC'd automatically.
|
||||||
*
|
*
|
||||||
* Filter instance can be re-used in multiple options instances.
|
* {@link org.rocksdb.Filter} instance can be re-used in multiple options
|
||||||
|
* instances.
|
||||||
*
|
*
|
||||||
* @param Filter Filter Policy java instance.
|
* @param filter {@link org.rocksdb.Filter} Filter Policy java instance.
|
||||||
* @return the reference to the current config.
|
* @return the reference to the current config.
|
||||||
*/
|
*/
|
||||||
public BlockBasedTableConfig setFilter(Filter filter) {
|
public BlockBasedTableConfig setFilter(Filter filter) {
|
||||||
@ -206,7 +207,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
|
|||||||
If not specified, each "table reader" object will pre-load index/filter
|
If not specified, each "table reader" object will pre-load index/filter
|
||||||
block during table initialization.
|
block during table initialization.
|
||||||
*
|
*
|
||||||
* @param index and filter blocks should be put in block cache.
|
* @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache.
|
||||||
* @return the reference to the current config.
|
* @return the reference to the current config.
|
||||||
*/
|
*/
|
||||||
public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
|
public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
|
||||||
@ -233,7 +234,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
|
|||||||
if true, does not store prefix and allows prefix hash collision
|
if true, does not store prefix and allows prefix hash collision
|
||||||
(less memory consumption)
|
(less memory consumption)
|
||||||
*
|
*
|
||||||
* @param if hash collisions should be allowed.
|
* @param hashIndexAllowCollision points out if hash collisions should be allowed.
|
||||||
* @return the reference to the current config.
|
* @return the reference to the current config.
|
||||||
*/
|
*/
|
||||||
public BlockBasedTableConfig setHashIndexAllowCollision(
|
public BlockBasedTableConfig setHashIndexAllowCollision(
|
||||||
@ -256,7 +257,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
|
|||||||
* Size of compressed block cache. If 0, then block_cache_compressed is set
|
* Size of compressed block cache. If 0, then block_cache_compressed is set
|
||||||
* to null.
|
* to null.
|
||||||
*
|
*
|
||||||
* @param size of compressed block cache.
|
* @param blockCacheCompressedSize of compressed block cache.
|
||||||
* @return the reference to the current config.
|
* @return the reference to the current config.
|
||||||
*/
|
*/
|
||||||
public BlockBasedTableConfig setBlockCacheCompressedSize(
|
public BlockBasedTableConfig setBlockCacheCompressedSize(
|
||||||
@ -281,7 +282,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
|
|||||||
* Controls the number of shards for the block compressed cache.
|
* Controls the number of shards for the block compressed cache.
|
||||||
* This is applied only if blockCompressedCacheSize is set to non-negative.
|
* This is applied only if blockCompressedCacheSize is set to non-negative.
|
||||||
*
|
*
|
||||||
* @param numShardBits the number of shard bits. The resulting
|
* @param blockCacheCompressedNumShardBits the number of shard bits. The resulting
|
||||||
* number of shards would be 2 ^ numShardBits. Any negative
|
* number of shards would be 2 ^ numShardBits. Any negative
|
||||||
* number means use default settings."
|
* number means use default settings."
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
|
@ -7,11 +7,33 @@ package org.rocksdb;
|
|||||||
/**
|
/**
|
||||||
* Config for rate limiter, which is used to control write rate of flush and
|
* Config for rate limiter, which is used to control write rate of flush and
|
||||||
* compaction.
|
* compaction.
|
||||||
|
*
|
||||||
|
* @see RateLimiterConfig
|
||||||
*/
|
*/
|
||||||
public class GenericRateLimiterConfig extends RateLimiterConfig {
|
public class GenericRateLimiterConfig extends RateLimiterConfig {
|
||||||
private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
|
private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
|
||||||
private static final int DEFAULT_FAIRNESS = 10;
|
private static final int DEFAULT_FAIRNESS = 10;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GenericRateLimiterConfig constructor
|
||||||
|
*
|
||||||
|
* @param rateBytesPerSecond this is the only parameter you want to set
|
||||||
|
* most of the time. It controls the total write rate of compaction
|
||||||
|
* and flush in bytes per second. Currently, RocksDB does not enforce
|
||||||
|
* rate limit for anything other than flush and compaction, e.g. write to WAL.
|
||||||
|
* @param refillPeriodMicros this controls how often tokens are refilled. For example,
|
||||||
|
* when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
|
||||||
|
* 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
|
||||||
|
* burstier writes while smaller value introduces more CPU overhead.
|
||||||
|
* The default should work for most cases.
|
||||||
|
* @param fairness RateLimiter accepts high-pri requests and low-pri requests.
|
||||||
|
* A low-pri request is usually blocked in favor of hi-pri request. Currently,
|
||||||
|
* RocksDB assigns low-pri to request from compaction and high-pri to request
|
||||||
|
* from flush. Low-pri requests can get blocked if flush requests come in
|
||||||
|
* continuously. This fairness parameter grants low-pri requests permission by
|
||||||
|
* fairness chance even though high-pri requests exist to avoid starvation.
|
||||||
|
* You should be good by leaving it at default 10.
|
||||||
|
*/
|
||||||
public GenericRateLimiterConfig(long rateBytesPerSecond,
|
public GenericRateLimiterConfig(long rateBytesPerSecond,
|
||||||
long refillPeriodMicros, int fairness) {
|
long refillPeriodMicros, int fairness) {
|
||||||
rateBytesPerSecond_ = rateBytesPerSecond;
|
rateBytesPerSecond_ = rateBytesPerSecond;
|
||||||
@ -19,6 +41,14 @@ public class GenericRateLimiterConfig extends RateLimiterConfig {
|
|||||||
fairness_ = fairness;
|
fairness_ = fairness;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GenericRateLimiterConfig constructor
|
||||||
|
*
|
||||||
|
* @param rateBytesPerSecond this is the only parameter you want to set
|
||||||
|
* most of the time. It controls the total write rate of compaction
|
||||||
|
* and flush in bytes per second. Currently, RocksDB does not enforce
|
||||||
|
* rate limit for anything other than flush and compaction, e.g. write to WAL.
|
||||||
|
*/
|
||||||
public GenericRateLimiterConfig(long rateBytesPerSecond) {
|
public GenericRateLimiterConfig(long rateBytesPerSecond) {
|
||||||
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
|
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ public abstract class MemTableConfig {
|
|||||||
* which will create a c++ shared-pointer to the c++ MemTableRepFactory
|
* which will create a c++ shared-pointer to the c++ MemTableRepFactory
|
||||||
* that associated with the Java MemTableConfig.
|
* that associated with the Java MemTableConfig.
|
||||||
*
|
*
|
||||||
* @see Options.setMemTableFactory()
|
* @see Options#setMemTableConfig(MemTableConfig)
|
||||||
*/
|
*/
|
||||||
abstract protected long newMemTableFactoryHandle();
|
abstract protected long newMemTableFactoryHandle();
|
||||||
}
|
}
|
||||||
|
@ -7,10 +7,10 @@ package org.rocksdb;
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Options to control the behavior of a database. It will be used
|
* Options to control the behavior of a database. It will be used
|
||||||
* during the creation of a RocksDB (i.e., RocksDB.open()).
|
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
|
||||||
*
|
*
|
||||||
* If dispose() function is not called, then it will be GC'd automatically and
|
* If {@link #dispose()} function is not called, then it will be GC'd automatically
|
||||||
* native resources will be released as part of the process.
|
* and native resources will be released as part of the process.
|
||||||
*/
|
*/
|
||||||
public class Options extends RocksObject {
|
public class Options extends RocksObject {
|
||||||
static {
|
static {
|
||||||
@ -30,7 +30,7 @@ public class Options extends RocksObject {
|
|||||||
* Construct options for opening a RocksDB.
|
* Construct options for opening a RocksDB.
|
||||||
*
|
*
|
||||||
* This constructor will create (by allocating a block of memory)
|
* This constructor will create (by allocating a block of memory)
|
||||||
* an rocksdb::Options in the c++ side.
|
* an {@code rocksdb::Options} in the c++ side.
|
||||||
*/
|
*/
|
||||||
public Options() {
|
public Options() {
|
||||||
super();
|
super();
|
||||||
@ -42,13 +42,14 @@ public class Options extends RocksObject {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* If this value is set to true, then the database will be created
|
* If this value is set to true, then the database will be created
|
||||||
* if it is missing during RocksDB.open().
|
* if it is missing during {@code RocksDB.open()}.
|
||||||
* Default: false
|
* Default: false
|
||||||
*
|
*
|
||||||
* @param flag a flag indicating whether to create a database the
|
* @param flag a flag indicating whether to create a database the
|
||||||
* specified database in RocksDB.open() operation is missing.
|
* specified database in {@link org.rocksdb.RocksDB#open(Options, String)} operation
|
||||||
* @return the instance of the current Options.
|
* is missing.
|
||||||
* @see RocksDB.open()
|
* @return the instance of the current Options
|
||||||
|
* @see org.rocksdb.RocksDB#open(Options, String)
|
||||||
*/
|
*/
|
||||||
public Options setCreateIfMissing(boolean flag) {
|
public Options setCreateIfMissing(boolean flag) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -59,7 +60,7 @@ public class Options extends RocksObject {
|
|||||||
/**
|
/**
|
||||||
* Use the specified object to interact with the environment,
|
* Use the specified object to interact with the environment,
|
||||||
* e.g. to read/write files, schedule background work, etc.
|
* e.g. to read/write files, schedule background work, etc.
|
||||||
* Default: RocksEnv.getDefault()
|
* Default: {@link RocksEnv#getDefault()}
|
||||||
*/
|
*/
|
||||||
public Options setEnv(RocksEnv env) {
|
public Options setEnv(RocksEnv env) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -79,7 +80,7 @@ public class Options extends RocksObject {
|
|||||||
* If true, the database will be created if it is missing.
|
* If true, the database will be created if it is missing.
|
||||||
*
|
*
|
||||||
* @return true if the createIfMissing option is set to true.
|
* @return true if the createIfMissing option is set to true.
|
||||||
* @see setCreateIfMissing()
|
* @see #setCreateIfMissing(boolean)
|
||||||
*/
|
*/
|
||||||
public boolean createIfMissing() {
|
public boolean createIfMissing() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -87,12 +88,12 @@ public class Options extends RocksObject {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set BuiltinComparator to be used with RocksDB.
|
* Set {@link org.rocksdb.Options.BuiltinComparator} to be used with RocksDB.
|
||||||
*
|
*
|
||||||
* Note: Comparator can be set once upon database creation.
|
* Note: Comparator can be set once upon database creation.
|
||||||
*
|
*
|
||||||
* Default: BytewiseComparator.
|
* Default: BytewiseComparator.
|
||||||
* @param builtinComparator a BuiltinComparator type.
|
* @param builtinComparator a {@link org.rocksdb.Options.BuiltinComparator} type.
|
||||||
*/
|
*/
|
||||||
public void setBuiltinComparator(BuiltinComparator builtinComparator) {
|
public void setBuiltinComparator(BuiltinComparator builtinComparator) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -106,7 +107,7 @@ public class Options extends RocksObject {
|
|||||||
* on disk) before converting to a sorted on-disk file.
|
* on disk) before converting to a sorted on-disk file.
|
||||||
*
|
*
|
||||||
* Larger values increase performance, especially during bulk loads.
|
* Larger values increase performance, especially during bulk loads.
|
||||||
* Up to max_write_buffer_number write buffers may be held in memory
|
* Up to {@code max_write_buffer_number} write buffers may be held in memory
|
||||||
* at the same time, so you may wish to adjust this parameter
|
* at the same time, so you may wish to adjust this parameter
|
||||||
* to control memory usage.
|
* to control memory usage.
|
||||||
*
|
*
|
||||||
@ -116,7 +117,7 @@ public class Options extends RocksObject {
|
|||||||
* Default: 4MB
|
* Default: 4MB
|
||||||
* @param writeBufferSize the size of write buffer.
|
* @param writeBufferSize the size of write buffer.
|
||||||
* @return the instance of the current Options.
|
* @return the instance of the current Options.
|
||||||
* @see RocksDB.open()
|
* @see org.rocksdb.RocksDB#open(Options, String)
|
||||||
*/
|
*/
|
||||||
public Options setWriteBufferSize(long writeBufferSize) {
|
public Options setWriteBufferSize(long writeBufferSize) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -128,7 +129,7 @@ public class Options extends RocksObject {
|
|||||||
* Return size of write buffer size.
|
* Return size of write buffer size.
|
||||||
*
|
*
|
||||||
* @return size of write buffer.
|
* @return size of write buffer.
|
||||||
* @see setWriteBufferSize()
|
* @see #setWriteBufferSize(long)
|
||||||
*/
|
*/
|
||||||
public long writeBufferSize() {
|
public long writeBufferSize() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -143,7 +144,7 @@ public class Options extends RocksObject {
|
|||||||
*
|
*
|
||||||
* @param maxWriteBufferNumber maximum number of write buffers.
|
* @param maxWriteBufferNumber maximum number of write buffers.
|
||||||
* @return the instance of the current Options.
|
* @return the instance of the current Options.
|
||||||
* @see RocksDB.open()
|
* @see org.rocksdb.RocksDB#open(Options, String)
|
||||||
*/
|
*/
|
||||||
public Options setMaxWriteBufferNumber(int maxWriteBufferNumber) {
|
public Options setMaxWriteBufferNumber(int maxWriteBufferNumber) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -155,7 +156,7 @@ public class Options extends RocksObject {
|
|||||||
* Returns maximum number of write buffers.
|
* Returns maximum number of write buffers.
|
||||||
*
|
*
|
||||||
* @return maximum number of write buffers.
|
* @return maximum number of write buffers.
|
||||||
* @see setMaxWriteBufferNumber()
|
* @see #setMaxWriteBufferNumber(int)
|
||||||
*/
|
*/
|
||||||
public int maxWriteBufferNumber() {
|
public int maxWriteBufferNumber() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -181,9 +182,9 @@ public class Options extends RocksObject {
|
|||||||
* Default: false
|
* Default: false
|
||||||
*
|
*
|
||||||
* @param errorIfExists if true, an exception will be thrown
|
* @param errorIfExists if true, an exception will be thrown
|
||||||
* during RocksDB.open() if the database already exists.
|
* during {@code RocksDB.open()} if the database already exists.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
* @see RocksDB.open()
|
* @see org.rocksdb.RocksDB#open(Options, String)
|
||||||
*/
|
*/
|
||||||
public Options setErrorIfExists(boolean errorIfExists) {
|
public Options setErrorIfExists(boolean errorIfExists) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -237,8 +238,9 @@ public class Options extends RocksObject {
|
|||||||
* Number of open files that can be used by the DB. You may need to
|
* Number of open files that can be used by the DB. You may need to
|
||||||
* increase this if your database has a large working set. Value -1 means
|
* increase this if your database has a large working set. Value -1 means
|
||||||
* files opened are always kept open. You can estimate number of files based
|
* files opened are always kept open. You can estimate number of files based
|
||||||
* on target_file_size_base and target_file_size_multiplier for level-based
|
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
|
||||||
* compaction. For universal-style compaction, you can usually set it to -1.
|
* for level-based compaction. For universal-style compaction, you can usually
|
||||||
|
* set it to -1.
|
||||||
*
|
*
|
||||||
* @return the maximum number of open files.
|
* @return the maximum number of open files.
|
||||||
*/
|
*/
|
||||||
@ -252,8 +254,9 @@ public class Options extends RocksObject {
|
|||||||
* Number of open files that can be used by the DB. You may need to
|
* Number of open files that can be used by the DB. You may need to
|
||||||
* increase this if your database has a large working set. Value -1 means
|
* increase this if your database has a large working set. Value -1 means
|
||||||
* files opened are always kept open. You can estimate number of files based
|
* files opened are always kept open. You can estimate number of files based
|
||||||
* on target_file_size_base and target_file_size_multiplier for level-based
|
* on {@code target_file_size_base} and {@code target_file_size_multiplier}
|
||||||
* compaction. For universal-style compaction, you can usually set it to -1.
|
* for level-based compaction. For universal-style compaction, you can usually
|
||||||
|
* set it to -1.
|
||||||
* Default: 5000
|
* Default: 5000
|
||||||
*
|
*
|
||||||
* @param maxOpenFiles the maximum number of open files.
|
* @param maxOpenFiles the maximum number of open files.
|
||||||
@ -271,7 +274,7 @@ public class Options extends RocksObject {
|
|||||||
* to stable storage. Their contents remain in the OS buffers till the
|
* to stable storage. Their contents remain in the OS buffers till the
|
||||||
* OS decides to flush them. This option is good for bulk-loading
|
* OS decides to flush them. This option is good for bulk-loading
|
||||||
* of data. Once the bulk-loading is complete, please issue a
|
* of data. Once the bulk-loading is complete, please issue a
|
||||||
* sync to the OS to flush all dirty buffesrs to stable storage.
|
* sync to the OS to flush all dirty buffers to stable storage.
|
||||||
*
|
*
|
||||||
* @return if true, then data-sync is disabled.
|
* @return if true, then data-sync is disabled.
|
||||||
*/
|
*/
|
||||||
@ -286,7 +289,7 @@ public class Options extends RocksObject {
|
|||||||
* to stable storage. Their contents remain in the OS buffers till the
|
* to stable storage. Their contents remain in the OS buffers till the
|
||||||
* OS decides to flush them. This option is good for bulk-loading
|
* OS decides to flush them. This option is good for bulk-loading
|
||||||
* of data. Once the bulk-loading is complete, please issue a
|
* of data. Once the bulk-loading is complete, please issue a
|
||||||
* sync to the OS to flush all dirty buffesrs to stable storage.
|
* sync to the OS to flush all dirty buffers to stable storage.
|
||||||
* Default: false
|
* Default: false
|
||||||
*
|
*
|
||||||
* @param disableDataSync a boolean flag to specify whether to
|
* @param disableDataSync a boolean flag to specify whether to
|
||||||
@ -306,7 +309,7 @@ public class Options extends RocksObject {
|
|||||||
* This parameter should be set to true while storing data to
|
* This parameter should be set to true while storing data to
|
||||||
* filesystem like ext3 that can lose files after a reboot.
|
* filesystem like ext3 that can lose files after a reboot.
|
||||||
*
|
*
|
||||||
* @return true if fsync is used.
|
* @return boolean value indicating if fsync is used.
|
||||||
*/
|
*/
|
||||||
public boolean useFsync() {
|
public boolean useFsync() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -438,7 +441,8 @@ public class Options extends RocksObject {
|
|||||||
* Default: 1
|
* Default: 1
|
||||||
*
|
*
|
||||||
* @return the maximum number of concurrent background compaction jobs.
|
* @return the maximum number of concurrent background compaction jobs.
|
||||||
* @see Env.setBackgroundThreads()
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
|
||||||
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
|
||||||
*/
|
*/
|
||||||
public int maxBackgroundCompactions() {
|
public int maxBackgroundCompactions() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -451,7 +455,7 @@ public class Options extends RocksObject {
|
|||||||
it does not use any locks to prevent concurrent updates.
|
it does not use any locks to prevent concurrent updates.
|
||||||
*
|
*
|
||||||
* @return the instance of the current Options.
|
* @return the instance of the current Options.
|
||||||
* @see RocksDB.open()
|
* @see org.rocksdb.RocksDB#open(Options, String)
|
||||||
*/
|
*/
|
||||||
public Options createStatistics() {
|
public Options createStatistics() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -460,11 +464,11 @@ public class Options extends RocksObject {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns statistics object. Calls createStatistics() if
|
* Returns statistics object. Calls {@link #createStatistics()} if
|
||||||
* C++ returns NULL pointer for statistics.
|
* C++ returns {@code nullptr} for statistics.
|
||||||
*
|
*
|
||||||
* @return the instance of the statistics object.
|
* @return the instance of the statistics object.
|
||||||
* @see createStatistics()
|
* @see #createStatistics()
|
||||||
*/
|
*/
|
||||||
public Statistics statisticsPtr() {
|
public Statistics statisticsPtr() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -489,8 +493,9 @@ public class Options extends RocksObject {
|
|||||||
* compaction jobs.
|
* compaction jobs.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
*
|
*
|
||||||
* @see Env.setBackgroundThreads()
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
|
||||||
* @see maxBackgroundFlushes()
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
|
||||||
|
* @see #maxBackgroundFlushes()
|
||||||
*/
|
*/
|
||||||
public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) {
|
public Options setMaxBackgroundCompactions(int maxBackgroundCompactions) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -505,7 +510,8 @@ public class Options extends RocksObject {
|
|||||||
* Default: 1
|
* Default: 1
|
||||||
*
|
*
|
||||||
* @return the maximum number of concurrent background flush jobs.
|
* @return the maximum number of concurrent background flush jobs.
|
||||||
* @see Env.setBackgroundThreads()
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
|
||||||
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
|
||||||
*/
|
*/
|
||||||
public int maxBackgroundFlushes() {
|
public int maxBackgroundFlushes() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -519,11 +525,12 @@ public class Options extends RocksObject {
|
|||||||
* HIGH priority thread pool. For more information, see
|
* HIGH priority thread pool. For more information, see
|
||||||
* Default: 1
|
* Default: 1
|
||||||
*
|
*
|
||||||
* @param maxBackgroundFlushes
|
* @param maxBackgroundFlushes number of max concurrent flush jobs
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
*
|
*
|
||||||
* @see Env.setBackgroundThreads()
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int)
|
||||||
* @see maxBackgroundCompactions()
|
* @see org.rocksdb.RocksEnv#setBackgroundThreads(int, int)
|
||||||
|
* @see #maxBackgroundCompactions()
|
||||||
*/
|
*/
|
||||||
public Options setMaxBackgroundFlushes(int maxBackgroundFlushes) {
|
public Options setMaxBackgroundFlushes(int maxBackgroundFlushes) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -713,20 +720,22 @@ public class Options extends RocksObject {
|
|||||||
/**
|
/**
|
||||||
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
|
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
|
||||||
* will be deleted.
|
* will be deleted.
|
||||||
* 1. If both set to 0, logs will be deleted asap and will not get into
|
* <ol>
|
||||||
* the archive.
|
* <li>If both set to 0, logs will be deleted asap and will not get into
|
||||||
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
* the archive.</li>
|
||||||
* WAL files will be checked every 10 min and if total size is greater
|
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
||||||
* then WAL_size_limit_MB, they will be deleted starting with the
|
* WAL files will be checked every 10 min and if total size is greater
|
||||||
* earliest until size_limit is met. All empty files will be deleted.
|
* then WAL_size_limit_MB, they will be deleted starting with the
|
||||||
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
* earliest until size_limit is met. All empty files will be deleted.</li>
|
||||||
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
|
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
||||||
* are older than WAL_ttl_seconds will be deleted.
|
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
|
||||||
* 4. If both are not 0, WAL files will be checked every 10 min and both
|
* are older than WAL_ttl_seconds will be deleted.</li>
|
||||||
* checks will be performed with ttl being first.
|
* <li>If both are not 0, WAL files will be checked every 10 min and both
|
||||||
|
* checks will be performed with ttl being first.</li>
|
||||||
|
* </ol>
|
||||||
*
|
*
|
||||||
* @return the wal-ttl seconds
|
* @return the wal-ttl seconds
|
||||||
* @see walSizeLimitMB()
|
* @see #walSizeLimitMB()
|
||||||
*/
|
*/
|
||||||
public long walTtlSeconds() {
|
public long walTtlSeconds() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -735,23 +744,24 @@ public class Options extends RocksObject {
|
|||||||
private native long walTtlSeconds(long handle);
|
private native long walTtlSeconds(long handle);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
|
* {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
|
||||||
* will be deleted.
|
* will be deleted.
|
||||||
* 1. If both set to 0, logs will be deleted asap and will not get into
|
* <ol>
|
||||||
* the archive.
|
* <li>If both set to 0, logs will be deleted asap and will not get into
|
||||||
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
* the archive.</li>
|
||||||
|
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
||||||
* WAL files will be checked every 10 min and if total size is greater
|
* WAL files will be checked every 10 min and if total size is greater
|
||||||
* then WAL_size_limit_MB, they will be deleted starting with the
|
* then WAL_size_limit_MB, they will be deleted starting with the
|
||||||
* earliest until size_limit is met. All empty files will be deleted.
|
* earliest until size_limit is met. All empty files will be deleted.</li>
|
||||||
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
||||||
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
|
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
|
||||||
* are older than WAL_ttl_seconds will be deleted.
|
* are older than WAL_ttl_seconds will be deleted.</li>
|
||||||
* 4. If both are not 0, WAL files will be checked every 10 min and both
|
* <li>If both are not 0, WAL files will be checked every 10 min and both
|
||||||
* checks will be performed with ttl being first.
|
* checks will be performed with ttl being first.</li>
|
||||||
*
|
*
|
||||||
* @param walTtlSeconds the ttl seconds
|
* @param walTtlSeconds the ttl seconds
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
* @see setWalSizeLimitMB()
|
* @see #setWalSizeLimitMB(long)
|
||||||
*/
|
*/
|
||||||
public Options setWalTtlSeconds(long walTtlSeconds) {
|
public Options setWalTtlSeconds(long walTtlSeconds) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -761,22 +771,23 @@ public class Options extends RocksObject {
|
|||||||
private native void setWalTtlSeconds(long handle, long walTtlSeconds);
|
private native void setWalTtlSeconds(long handle, long walTtlSeconds);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
|
* {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs
|
||||||
* will be deleted.
|
* will be deleted.
|
||||||
* 1. If both set to 0, logs will be deleted asap and will not get into
|
* <ol>
|
||||||
* the archive.
|
* <li>If both set to 0, logs will be deleted asap and will not get into
|
||||||
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
* the archive.</li>
|
||||||
|
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
||||||
* WAL files will be checked every 10 min and if total size is greater
|
* WAL files will be checked every 10 min and if total size is greater
|
||||||
* then WAL_size_limit_MB, they will be deleted starting with the
|
* then WAL_size_limit_MB, they will be deleted starting with the
|
||||||
* earliest until size_limit is met. All empty files will be deleted.
|
* earliest until size_limit is met. All empty files will be deleted.</li>
|
||||||
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
||||||
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
|
* WAL files will be checked every WAL_ttl_seconds i / 2 and those that
|
||||||
* are older than WAL_ttl_seconds will be deleted.
|
* are older than WAL_ttl_seconds will be deleted.</li>
|
||||||
* 4. If both are not 0, WAL files will be checked every 10 min and both
|
* <li>If both are not 0, WAL files will be checked every 10 min and both
|
||||||
* checks will be performed with ttl being first.
|
* checks will be performed with ttl being first.</li>
|
||||||
*
|
* </ol>
|
||||||
* @return size limit in mega-bytes.
|
* @return size limit in mega-bytes.
|
||||||
* @see walSizeLimitMB()
|
* @see #walSizeLimitMB()
|
||||||
*/
|
*/
|
||||||
public long walSizeLimitMB() {
|
public long walSizeLimitMB() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -787,21 +798,22 @@ public class Options extends RocksObject {
|
|||||||
/**
|
/**
|
||||||
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
|
* WalTtlSeconds() and walSizeLimitMB() affect how archived logs
|
||||||
* will be deleted.
|
* will be deleted.
|
||||||
* 1. If both set to 0, logs will be deleted asap and will not get into
|
* <ol>
|
||||||
* the archive.
|
* <li>If both set to 0, logs will be deleted asap and will not get into
|
||||||
* 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
* the archive.</li>
|
||||||
|
* <li>If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
||||||
* WAL files will be checked every 10 min and if total size is greater
|
* WAL files will be checked every 10 min and if total size is greater
|
||||||
* then WAL_size_limit_MB, they will be deleted starting with the
|
* then WAL_size_limit_MB, they will be deleted starting with the
|
||||||
* earliest until size_limit is met. All empty files will be deleted.
|
* earliest until size_limit is met. All empty files will be deleted.</li>
|
||||||
* 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
* <li>If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then
|
||||||
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
|
* WAL files will be checked every WAL_ttl_secondsi / 2 and those that
|
||||||
* are older than WAL_ttl_seconds will be deleted.
|
* are older than WAL_ttl_seconds will be deleted.</li>
|
||||||
* 4. If both are not 0, WAL files will be checked every 10 min and both
|
* <li>If both are not 0, WAL files will be checked every 10 min and both
|
||||||
* checks will be performed with ttl being first.
|
* checks will be performed with ttl being first.</li>
|
||||||
*
|
*
|
||||||
* @param sizeLimitMB size limit in mega-bytes.
|
* @param sizeLimitMB size limit in mega-bytes.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
* @see setWalSizeLimitMB()
|
* @see #setWalSizeLimitMB(long)
|
||||||
*/
|
*/
|
||||||
public Options setWalSizeLimitMB(long sizeLimitMB) {
|
public Options setWalSizeLimitMB(long sizeLimitMB) {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -857,7 +869,7 @@ public class Options extends RocksObject {
|
|||||||
* Data being read from file storage may be buffered in the OS
|
* Data being read from file storage may be buffered in the OS
|
||||||
* Default: true
|
* Default: true
|
||||||
*
|
*
|
||||||
* @param allowOsBufferif true, then OS buffering is allowed.
|
* @param allowOsBuffer if true, then OS buffering is allowed.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
*/
|
*/
|
||||||
public Options setAllowOsBuffer(boolean allowOsBuffer) {
|
public Options setAllowOsBuffer(boolean allowOsBuffer) {
|
||||||
@ -1122,7 +1134,7 @@ public class Options extends RocksObject {
|
|||||||
* Memtable format can be set using setTableFormatConfig.
|
* Memtable format can be set using setTableFormatConfig.
|
||||||
*
|
*
|
||||||
* @return the name of the currently-used memtable factory.
|
* @return the name of the currently-used memtable factory.
|
||||||
* @see setTableFormatConfig()
|
* @see #setTableFormatConfig(TableFormatConfig)
|
||||||
*/
|
*/
|
||||||
public String memTableFactoryName() {
|
public String memTableFactoryName() {
|
||||||
assert(isInitialized());
|
assert(isInitialized());
|
||||||
@ -1273,7 +1285,7 @@ public class Options extends RocksObject {
|
|||||||
long handle, int numLevels);
|
long handle, int numLevels);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The number of files in leve 0 to trigger compaction from level-0 to
|
* The number of files in level 0 to trigger compaction from level-0 to
|
||||||
* level-1. A value < 0 means that level-0 compaction will not be
|
* level-1. A value < 0 means that level-0 compaction will not be
|
||||||
* triggered by number of files at all.
|
* triggered by number of files at all.
|
||||||
* Default: 4
|
* Default: 4
|
||||||
@ -1400,7 +1412,7 @@ public class Options extends RocksObject {
|
|||||||
*
|
*
|
||||||
* @return the target size of a level-0 file.
|
* @return the target size of a level-0 file.
|
||||||
*
|
*
|
||||||
* @see targetFileSizeMultiplier()
|
* @see #targetFileSizeMultiplier()
|
||||||
*/
|
*/
|
||||||
public int targetFileSizeBase() {
|
public int targetFileSizeBase() {
|
||||||
return targetFileSizeBase(nativeHandle_);
|
return targetFileSizeBase(nativeHandle_);
|
||||||
@ -1421,7 +1433,7 @@ public class Options extends RocksObject {
|
|||||||
* @param targetFileSizeBase the target size of a level-0 file.
|
* @param targetFileSizeBase the target size of a level-0 file.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
*
|
*
|
||||||
* @see setTargetFileSizeMultiplier()
|
* @see #setTargetFileSizeMultiplier(int)
|
||||||
*/
|
*/
|
||||||
public Options setTargetFileSizeBase(int targetFileSizeBase) {
|
public Options setTargetFileSizeBase(int targetFileSizeBase) {
|
||||||
setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
|
setTargetFileSizeBase(nativeHandle_, targetFileSizeBase);
|
||||||
@ -1471,7 +1483,7 @@ public class Options extends RocksObject {
|
|||||||
* by default 'maxBytesForLevelBase' is 10MB.
|
* by default 'maxBytesForLevelBase' is 10MB.
|
||||||
*
|
*
|
||||||
* @return the upper-bound of the total size of leve-1 files in bytes.
|
* @return the upper-bound of the total size of leve-1 files in bytes.
|
||||||
* @see maxBytesForLevelMultiplier()
|
* @see #maxBytesForLevelMultiplier()
|
||||||
*/
|
*/
|
||||||
public long maxBytesForLevelBase() {
|
public long maxBytesForLevelBase() {
|
||||||
return maxBytesForLevelBase(nativeHandle_);
|
return maxBytesForLevelBase(nativeHandle_);
|
||||||
@ -1491,7 +1503,7 @@ public class Options extends RocksObject {
|
|||||||
* @return maxBytesForLevelBase the upper-bound of the total size of
|
* @return maxBytesForLevelBase the upper-bound of the total size of
|
||||||
* leve-1 files in bytes.
|
* leve-1 files in bytes.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
* @see setMaxBytesForLevelMultiplier()
|
* @see #setMaxBytesForLevelMultiplier(int)
|
||||||
*/
|
*/
|
||||||
public Options setMaxBytesForLevelBase(long maxBytesForLevelBase) {
|
public Options setMaxBytesForLevelBase(long maxBytesForLevelBase) {
|
||||||
setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
|
setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase);
|
||||||
@ -1507,7 +1519,7 @@ public class Options extends RocksObject {
|
|||||||
*
|
*
|
||||||
* @return the ratio between the total size of level-(L+1) files and
|
* @return the ratio between the total size of level-(L+1) files and
|
||||||
* the total size of level-L files for all L.
|
* the total size of level-L files for all L.
|
||||||
* @see maxBytesForLevelBase()
|
* @see #maxBytesForLevelBase()
|
||||||
*/
|
*/
|
||||||
public int maxBytesForLevelMultiplier() {
|
public int maxBytesForLevelMultiplier() {
|
||||||
return maxBytesForLevelMultiplier(nativeHandle_);
|
return maxBytesForLevelMultiplier(nativeHandle_);
|
||||||
@ -1522,7 +1534,7 @@ public class Options extends RocksObject {
|
|||||||
* @param multiplier the ratio between the total size of level-(L+1)
|
* @param multiplier the ratio between the total size of level-(L+1)
|
||||||
* files and the total size of level-L files for all L.
|
* files and the total size of level-L files for all L.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
* @see setMaxBytesForLevelBase()
|
* @see #setMaxBytesForLevelBase(long)
|
||||||
*/
|
*/
|
||||||
public Options setMaxBytesForLevelMultiplier(int multiplier) {
|
public Options setMaxBytesForLevelMultiplier(int multiplier) {
|
||||||
setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
|
setMaxBytesForLevelMultiplier(nativeHandle_, multiplier);
|
||||||
@ -1538,7 +1550,7 @@ public class Options extends RocksObject {
|
|||||||
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
|
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
|
||||||
*
|
*
|
||||||
* @return the maximum number of bytes in all compacted files.
|
* @return the maximum number of bytes in all compacted files.
|
||||||
* @see sourceCompactionFactor()
|
* @see #sourceCompactionFactor()
|
||||||
*/
|
*/
|
||||||
public int expandedCompactionFactor() {
|
public int expandedCompactionFactor() {
|
||||||
return expandedCompactionFactor(nativeHandle_);
|
return expandedCompactionFactor(nativeHandle_);
|
||||||
@ -1554,7 +1566,7 @@ public class Options extends RocksObject {
|
|||||||
* @param expandedCompactionFactor the maximum number of bytes in all
|
* @param expandedCompactionFactor the maximum number of bytes in all
|
||||||
* compacted files.
|
* compacted files.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
* @see setSourceCompactionFactor()
|
* @see #setSourceCompactionFactor(int)
|
||||||
*/
|
*/
|
||||||
public Options setExpandedCompactionFactor(int expandedCompactionFactor) {
|
public Options setExpandedCompactionFactor(int expandedCompactionFactor) {
|
||||||
setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor);
|
setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor);
|
||||||
@ -1573,7 +1585,7 @@ public class Options extends RocksObject {
|
|||||||
* a compaction.
|
* a compaction.
|
||||||
*
|
*
|
||||||
* @return the maximum number of bytes in all source files to be compactedo.
|
* @return the maximum number of bytes in all source files to be compactedo.
|
||||||
* @see expendedCompactionFactor()
|
* @see #expandedCompactionFactor()
|
||||||
*/
|
*/
|
||||||
public int sourceCompactionFactor() {
|
public int sourceCompactionFactor() {
|
||||||
return sourceCompactionFactor(nativeHandle_);
|
return sourceCompactionFactor(nativeHandle_);
|
||||||
@ -1592,7 +1604,7 @@ public class Options extends RocksObject {
|
|||||||
* @param sourceCompactionFactor the maximum number of bytes in all
|
* @param sourceCompactionFactor the maximum number of bytes in all
|
||||||
* source files to be compacted in a single compaction run.
|
* source files to be compacted in a single compaction run.
|
||||||
* @return the reference to the current option.
|
* @return the reference to the current option.
|
||||||
* @see setExpendedCompactionFactor()
|
* @see #setExpandedCompactionFactor(int)
|
||||||
*/
|
*/
|
||||||
public Options setSourceCompactionFactor(int sourceCompactionFactor) {
|
public Options setSourceCompactionFactor(int sourceCompactionFactor) {
|
||||||
setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor);
|
setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor);
|
||||||
@ -1979,7 +1991,7 @@ public class Options extends RocksObject {
|
|||||||
* This value will be used only when a prefix-extractor is specified.
|
* This value will be used only when a prefix-extractor is specified.
|
||||||
*
|
*
|
||||||
* @return the number of bloom-bits.
|
* @return the number of bloom-bits.
|
||||||
* @see useFixedLengthPrefixExtractor()
|
* @see #useFixedLengthPrefixExtractor(int)
|
||||||
*/
|
*/
|
||||||
public int memtablePrefixBloomBits() {
|
public int memtablePrefixBloomBits() {
|
||||||
return memtablePrefixBloomBits(nativeHandle_);
|
return memtablePrefixBloomBits(nativeHandle_);
|
||||||
@ -2037,7 +2049,7 @@ public class Options extends RocksObject {
|
|||||||
* Default: 0
|
* Default: 0
|
||||||
*
|
*
|
||||||
* @return the level of locality of bloom-filter probes.
|
* @return the level of locality of bloom-filter probes.
|
||||||
* @see setMemTablePrefixBloomProbes
|
* @see #setMemtablePrefixBloomProbes(int)
|
||||||
*/
|
*/
|
||||||
public int bloomLocality() {
|
public int bloomLocality() {
|
||||||
return bloomLocality(nativeHandle_);
|
return bloomLocality(nativeHandle_);
|
||||||
@ -2149,7 +2161,7 @@ public class Options extends RocksObject {
|
|||||||
*
|
*
|
||||||
* Default: 2
|
* Default: 2
|
||||||
*
|
*
|
||||||
* @return
|
* @return min partial merge operands
|
||||||
*/
|
*/
|
||||||
public int minPartialMergeOperands() {
|
public int minPartialMergeOperands() {
|
||||||
return minPartialMergeOperands(nativeHandle_);
|
return minPartialMergeOperands(nativeHandle_);
|
||||||
|
@ -10,11 +10,12 @@ package org.rocksdb;
|
|||||||
*/
|
*/
|
||||||
public abstract class RateLimiterConfig {
|
public abstract class RateLimiterConfig {
|
||||||
/**
|
/**
|
||||||
* This function should only be called by Options.setRateLimiter(),
|
* This function should only be called by
|
||||||
* which will create a c++ shared-pointer to the c++ RateLimiter
|
* {@link org.rocksdb.Options#setRateLimiter(long, long)}, which will
|
||||||
* that is associated with the Java RateLimtierConifg.
|
* create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated
|
||||||
|
* with a Java RateLimiterConfig.
|
||||||
*
|
*
|
||||||
* @see Options.setRateLimiter()
|
* @see org.rocksdb.Options#setRateLimiter(long, long)
|
||||||
*/
|
*/
|
||||||
abstract protected long newRateLimiterHandle();
|
abstract protected long newRateLimiterHandle();
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ public class ReadOptions extends RocksObject {
|
|||||||
private native boolean fillCache(long handle);
|
private native boolean fillCache(long handle);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fill the cache when loading the block-based sst formated db.
|
* Fill the cache when loading the block-based sst formatted db.
|
||||||
* Callers may wish to set this field to false for bulk scans.
|
* Callers may wish to set this field to false for bulk scans.
|
||||||
* Default: true
|
* Default: true
|
||||||
*
|
*
|
||||||
@ -86,7 +86,8 @@ public class ReadOptions extends RocksObject {
|
|||||||
* added data) and is optimized for sequential reads. It will return records
|
* added data) and is optimized for sequential reads. It will return records
|
||||||
* that were inserted into the database after the creation of the iterator.
|
* that were inserted into the database after the creation of the iterator.
|
||||||
* Default: false
|
* Default: false
|
||||||
* Not supported in ROCKSDB_LITE mode!
|
*
|
||||||
|
* Not supported in {@code ROCKSDB_LITE} mode!
|
||||||
*
|
*
|
||||||
* @return true if tailing iterator is enabled.
|
* @return true if tailing iterator is enabled.
|
||||||
*/
|
*/
|
||||||
|
@ -11,9 +11,13 @@ package org.rocksdb;
|
|||||||
* Note that dispose() must be called before this instance become out-of-scope
|
* Note that dispose() must be called before this instance become out-of-scope
|
||||||
* to release the allocated memory in c++.
|
* to release the allocated memory in c++.
|
||||||
*
|
*
|
||||||
* @param options Instance of BackupableDBOptions.
|
|
||||||
*/
|
*/
|
||||||
public class RestoreBackupableDB extends RocksObject {
|
public class RestoreBackupableDB extends RocksObject {
|
||||||
|
/**
|
||||||
|
* Constructor
|
||||||
|
*
|
||||||
|
* @param options {@link org.rocksdb.BackupableDBOptions} instance
|
||||||
|
*/
|
||||||
public RestoreBackupableDB(BackupableDBOptions options) {
|
public RestoreBackupableDB(BackupableDBOptions options) {
|
||||||
super();
|
super();
|
||||||
nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_);
|
nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_);
|
||||||
@ -30,6 +34,12 @@ public class RestoreBackupableDB extends RocksObject {
|
|||||||
* database will diverge from backups 4 and 5 and the new backup will fail.
|
* database will diverge from backups 4 and 5 and the new backup will fail.
|
||||||
* If you want to create new backup, you will first have to delete backups 4
|
* If you want to create new backup, you will first have to delete backups 4
|
||||||
* and 5.
|
* and 5.
|
||||||
|
*
|
||||||
|
* @param backupId id pointing to backup
|
||||||
|
* @param dbDir database directory to restore to
|
||||||
|
* @param walDir directory where wal files are located
|
||||||
|
* @param restoreOptions {@link org.rocksdb.RestoreOptions} instance
|
||||||
|
* @throws RocksDBException
|
||||||
*/
|
*/
|
||||||
public void restoreDBFromBackup(long backupId, String dbDir, String walDir,
|
public void restoreDBFromBackup(long backupId, String dbDir, String walDir,
|
||||||
RestoreOptions restoreOptions) throws RocksDBException {
|
RestoreOptions restoreOptions) throws RocksDBException {
|
||||||
@ -39,6 +49,11 @@ public class RestoreBackupableDB extends RocksObject {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Restore from the latest backup.
|
* Restore from the latest backup.
|
||||||
|
*
|
||||||
|
* @param dbDir database directory to restore to
|
||||||
|
* @param walDir directory where wal files are located
|
||||||
|
* @param restoreOptions {@link org.rocksdb.RestoreOptions} instance
|
||||||
|
* @throws RocksDBException
|
||||||
*/
|
*/
|
||||||
public void restoreDBFromLatestBackup(String dbDir, String walDir,
|
public void restoreDBFromLatestBackup(String dbDir, String walDir,
|
||||||
RestoreOptions restoreOptions) throws RocksDBException {
|
RestoreOptions restoreOptions) throws RocksDBException {
|
||||||
@ -49,7 +64,7 @@ public class RestoreBackupableDB extends RocksObject {
|
|||||||
/**
|
/**
|
||||||
* Deletes old backups, keeping latest numBackupsToKeep alive.
|
* Deletes old backups, keeping latest numBackupsToKeep alive.
|
||||||
*
|
*
|
||||||
* @param Number of latest backups to keep
|
* @param numBackupsToKeep of latest backups to keep
|
||||||
*/
|
*/
|
||||||
public void purgeOldBackups(int numBackupsToKeep) throws RocksDBException {
|
public void purgeOldBackups(int numBackupsToKeep) throws RocksDBException {
|
||||||
purgeOldBackups0(nativeHandle_, numBackupsToKeep);
|
purgeOldBackups0(nativeHandle_, numBackupsToKeep);
|
||||||
@ -58,7 +73,7 @@ public class RestoreBackupableDB extends RocksObject {
|
|||||||
/**
|
/**
|
||||||
* Deletes a specific backup.
|
* Deletes a specific backup.
|
||||||
*
|
*
|
||||||
* @param ID of backup to delete.
|
* @param backupId of backup to delete.
|
||||||
*/
|
*/
|
||||||
public void deleteBackup(long backupId) throws RocksDBException {
|
public void deleteBackup(long backupId) throws RocksDBException {
|
||||||
deleteBackup0(nativeHandle_, backupId);
|
deleteBackup0(nativeHandle_, backupId);
|
||||||
|
@ -11,13 +11,17 @@ package org.rocksdb;
|
|||||||
* Note that dispose() must be called before this instance become out-of-scope
|
* Note that dispose() must be called before this instance become out-of-scope
|
||||||
* to release the allocated memory in c++.
|
* to release the allocated memory in c++.
|
||||||
*
|
*
|
||||||
* @param If true, restore won't overwrite the existing log files in wal_dir. It
|
|
||||||
* will also move all log files from archive directory to wal_dir. Use this
|
|
||||||
* option in combination with BackupableDBOptions::backup_log_files = false
|
|
||||||
* for persisting in-memory databases.
|
|
||||||
* Default: false
|
|
||||||
*/
|
*/
|
||||||
public class RestoreOptions extends RocksObject {
|
public class RestoreOptions extends RocksObject {
|
||||||
|
/**
|
||||||
|
* Constructor
|
||||||
|
*
|
||||||
|
* @param keepLogFiles If true, restore won't overwrite the existing log files in wal_dir. It
|
||||||
|
* will also move all log files from archive directory to wal_dir. Use this
|
||||||
|
* option in combination with BackupableDBOptions::backup_log_files = false
|
||||||
|
* for persisting in-memory databases.
|
||||||
|
* Default: false
|
||||||
|
*/
|
||||||
public RestoreOptions(boolean keepLogFiles) {
|
public RestoreOptions(boolean keepLogFiles) {
|
||||||
super();
|
super();
|
||||||
nativeHandle_ = newRestoreOptions(keepLogFiles);
|
nativeHandle_ = newRestoreOptions(keepLogFiles);
|
||||||
|
@ -17,7 +17,7 @@ import org.rocksdb.NativeLibraryLoader;
|
|||||||
* A RocksDB is a persistent ordered map from keys to values. It is safe for
|
* A RocksDB is a persistent ordered map from keys to values. It is safe for
|
||||||
* concurrent access from multiple threads without any external synchronization.
|
* concurrent access from multiple threads without any external synchronization.
|
||||||
* All methods of this class could potentially throw RocksDBException, which
|
* All methods of this class could potentially throw RocksDBException, which
|
||||||
* indicates sth wrong at the rocksdb library side and the call failed.
|
* indicates sth wrong at the RocksDB library side and the call failed.
|
||||||
*/
|
*/
|
||||||
public class RocksDB extends RocksObject {
|
public class RocksDB extends RocksObject {
|
||||||
public static final int NOT_FOUND = -1;
|
public static final int NOT_FOUND = -1;
|
||||||
@ -95,16 +95,13 @@ public class RocksDB extends RocksObject {
|
|||||||
* set to true.
|
* set to true.
|
||||||
*
|
*
|
||||||
* @param path the path to the rocksdb.
|
* @param path the path to the rocksdb.
|
||||||
* @param status an out value indicating the status of the Open().
|
|
||||||
* @return a rocksdb instance on success, null if the specified rocksdb can
|
* @return a rocksdb instance on success, null if the specified rocksdb can
|
||||||
* not be opened.
|
* not be opened.
|
||||||
*
|
*
|
||||||
* @see Options.setCreateIfMissing()
|
* @see Options#setCreateIfMissing(boolean)
|
||||||
* @see Options.createIfMissing()
|
* @see org.rocksdb.Options#createIfMissing()
|
||||||
*/
|
*/
|
||||||
public static RocksDB open(String path) throws RocksDBException {
|
public static RocksDB open(String path) throws RocksDBException {
|
||||||
RocksDB db = new RocksDB();
|
|
||||||
|
|
||||||
// This allows to use the rocksjni default Options instead of
|
// This allows to use the rocksjni default Options instead of
|
||||||
// the c++ one.
|
// the c++ one.
|
||||||
Options options = new Options();
|
Options options = new Options();
|
||||||
@ -280,8 +277,8 @@ public class RocksDB extends RocksObject {
|
|||||||
/**
|
/**
|
||||||
* Returns a map of keys for which values were found in DB.
|
* Returns a map of keys for which values were found in DB.
|
||||||
*
|
*
|
||||||
* @param List of keys for which values need to be retrieved.
|
|
||||||
* @param opt Read options.
|
* @param opt Read options.
|
||||||
|
* @param keys of keys for which values need to be retrieved.
|
||||||
* @return Map where key of map is the key passed by user and value for map
|
* @return Map where key of map is the key passed by user and value for map
|
||||||
* entry is the corresponding value in DB.
|
* entry is the corresponding value in DB.
|
||||||
*
|
*
|
||||||
|
@ -7,16 +7,22 @@ package org.rocksdb;
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* RocksObject is the base-class of all RocksDB classes that has a pointer to
|
* RocksObject is the base-class of all RocksDB classes that has a pointer to
|
||||||
* some c++ rocksdb object.
|
* some c++ {@code rocksdb} object.
|
||||||
*
|
*
|
||||||
* RocksObject has dispose() function, which releases its associated c++ resource.
|
* <p>
|
||||||
|
* RocksObject has {@code dispose()} function, which releases its associated c++
|
||||||
|
* resource.
|
||||||
|
* </p>
|
||||||
|
* </p>
|
||||||
* This function can be either called manually, or being called automatically
|
* This function can be either called manually, or being called automatically
|
||||||
* during the regular Java GC process. However, since Java may wrongly assume a
|
* during the regular Java GC process. However, since Java may wrongly assume a
|
||||||
* RocksObject only contains a long member variable and think it is small in size,
|
* RocksObject only contains a long member variable and think it is small in size,
|
||||||
* Java may give RocksObject low priority in the GC process. For this, it is
|
* </p>
|
||||||
* suggested to call dispose() manually. However, it is safe to let RocksObject go
|
* <p>Java may give {@code RocksObject} low priority in the GC process. For this, it is
|
||||||
* out-of-scope without manually calling dispose() as dispose() will be called
|
* suggested to call {@code dispose()} manually. However, it is safe to let
|
||||||
* in the finalizer during the regular GC process.
|
* {@code RocksObject} go out-of-scope without manually calling {@code dispose()}
|
||||||
|
* as {@code dispose()} will be called in the finalizer during the
|
||||||
|
* regular GC process.</p>
|
||||||
*/
|
*/
|
||||||
public abstract class RocksObject {
|
public abstract class RocksObject {
|
||||||
protected RocksObject() {
|
protected RocksObject() {
|
||||||
@ -26,16 +32,18 @@ public abstract class RocksObject {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Release the c++ object manually pointed by the native handle.
|
* Release the c++ object manually pointed by the native handle.
|
||||||
*
|
* <p>
|
||||||
* Note that dispose() will also be called during the GC process
|
* Note that {@code dispose()} will also be called during the GC process
|
||||||
* if it was not called before its RocksObject went out-of-scope.
|
* if it was not called before its {@code RocksObject} went out-of-scope.
|
||||||
* However, since Java may wrongly wrongly assume those objects are
|
* However, since Java may wrongly wrongly assume those objects are
|
||||||
* small in that they seems to only hold a long variable. As a result,
|
* small in that they seems to only hold a long variable. As a result,
|
||||||
* they might have low priority in the GC process. To prevent this,
|
* they might have low priority in the GC process. To prevent this,
|
||||||
* it is suggested to call dispose() manually.
|
* it is suggested to call {@code dispose()} manually.
|
||||||
*
|
* <p>
|
||||||
* Note that once an instance of RocksObject has been disposed,
|
* <p>
|
||||||
|
* Note that once an instance of {@code RocksObject} has been disposed,
|
||||||
* calling its function will lead undefined behavior.
|
* calling its function will lead undefined behavior.
|
||||||
|
* </p>
|
||||||
*/
|
*/
|
||||||
public final synchronized void dispose() {
|
public final synchronized void dispose() {
|
||||||
if (isOwningNativeHandle() && isInitialized()) {
|
if (isOwningNativeHandle() && isInitialized()) {
|
||||||
@ -46,40 +54,41 @@ public abstract class RocksObject {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The helper function of dispose() which all subclasses of RocksObject
|
* The helper function of {@code dispose()} which all subclasses of
|
||||||
* must implement to release their associated C++ resource.
|
* {@code RocksObject} must implement to release their associated
|
||||||
|
* C++ resource.
|
||||||
*/
|
*/
|
||||||
protected abstract void disposeInternal();
|
protected abstract void disposeInternal();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Revoke ownership of the native object.
|
* Revoke ownership of the native object.
|
||||||
*
|
* <p>
|
||||||
* This will prevent the object from attempting to delete the underlying
|
* This will prevent the object from attempting to delete the underlying
|
||||||
* native object in its finalizer. This must be used when another object
|
* native object in its finalizer. This must be used when another object
|
||||||
* takes over ownership of the native object or both will attempt to delete
|
* takes over ownership of the native object or both will attempt to delete
|
||||||
* the underlying object when garbage collected.
|
* the underlying object when garbage collected.
|
||||||
*
|
* <p>
|
||||||
* When disOwnNativeHandle() is called, dispose() will simply set nativeHandle_
|
* When {@code disOwnNativeHandle()} is called, {@code dispose()} will simply set
|
||||||
* to 0 without releasing its associated C++ resource. As a result,
|
* {@code nativeHandle_} to 0 without releasing its associated C++ resource.
|
||||||
* incorrectly use this function may cause memory leak, and this function call
|
* As a result, incorrectly use this function may cause memory leak, and this
|
||||||
* will not affect the return value of isInitialized().
|
* function call will not affect the return value of {@code isInitialized()}.
|
||||||
*
|
* </p>
|
||||||
* @see dispose()
|
* @see #dispose()
|
||||||
* @see isInitialized()
|
* @see #isInitialized()
|
||||||
*/
|
*/
|
||||||
protected void disOwnNativeHandle() {
|
protected void disOwnNativeHandle() {
|
||||||
owningHandle_ = false;
|
owningHandle_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the current RocksObject is responsable to release its
|
* Returns true if the current {@code RocksObject} is responsible to release
|
||||||
* native handle.
|
* its native handle.
|
||||||
*
|
*
|
||||||
* @return true if the current RocksObject is responsible to release its
|
* @return true if the current {@code RocksObject} is responsible to release
|
||||||
* native handle.
|
* its native handle.
|
||||||
*
|
*
|
||||||
* @see disOwnNativeHandle()
|
* @see #disOwnNativeHandle()
|
||||||
* @see dispose()
|
* @see #dispose()
|
||||||
*/
|
*/
|
||||||
protected boolean isOwningNativeHandle() {
|
protected boolean isOwningNativeHandle() {
|
||||||
return owningHandle_;
|
return owningHandle_;
|
||||||
@ -90,14 +99,14 @@ public abstract class RocksObject {
|
|||||||
*
|
*
|
||||||
* @return true if the associated native handle has been initialized.
|
* @return true if the associated native handle has been initialized.
|
||||||
*
|
*
|
||||||
* @see dispose()
|
* @see #dispose()
|
||||||
*/
|
*/
|
||||||
protected boolean isInitialized() {
|
protected boolean isInitialized() {
|
||||||
return (nativeHandle_ != 0);
|
return (nativeHandle_ != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simply calls dispose() and release its c++ resource if it has not
|
* Simply calls {@code dispose()} and release its c++ resource if it has not
|
||||||
* yet released.
|
* yet released.
|
||||||
*/
|
*/
|
||||||
@Override protected void finalize() {
|
@Override protected void finalize() {
|
||||||
@ -110,8 +119,8 @@ public abstract class RocksObject {
|
|||||||
protected long nativeHandle_;
|
protected long nativeHandle_;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A flag indicating whether the current RocksObject is responsible to
|
* A flag indicating whether the current {@code RocksObject} is responsible to
|
||||||
* release the c++ object stored in its nativeHandle_.
|
* release the c++ object stored in its {@code nativeHandle_}.
|
||||||
*/
|
*/
|
||||||
private boolean owningHandle_;
|
private boolean owningHandle_;
|
||||||
}
|
}
|
||||||
|
@ -53,9 +53,9 @@ public class WriteBatchTest {
|
|||||||
WriteBatchInternal.setSequence(batch, 100);
|
WriteBatchInternal.setSequence(batch, 100);
|
||||||
assert(100 == WriteBatchInternal.sequence(batch));
|
assert(100 == WriteBatchInternal.sequence(batch));
|
||||||
assert(3 == batch.count());
|
assert(3 == batch.count());
|
||||||
assert(new String("Put(baz, boo)@102" +
|
assert(("Put(baz, boo)@102" +
|
||||||
"Delete(box)@101" +
|
"Delete(box)@101" +
|
||||||
"Put(foo, bar)@100")
|
"Put(foo, bar)@100")
|
||||||
.equals(new String(getContents(batch), "US-ASCII")));
|
.equals(new String(getContents(batch), "US-ASCII")));
|
||||||
} catch (UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
System.err.println(e);
|
System.err.println(e);
|
||||||
@ -79,16 +79,16 @@ public class WriteBatchTest {
|
|||||||
b2.clear();
|
b2.clear();
|
||||||
b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII"));
|
b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII"));
|
||||||
WriteBatchInternal.append(b1, b2);
|
WriteBatchInternal.append(b1, b2);
|
||||||
assert(new String("Put(a, va)@200" +
|
assert(("Put(a, va)@200" +
|
||||||
"Put(b, vb)@201")
|
"Put(b, vb)@201")
|
||||||
.equals(new String(getContents(b1), "US-ASCII")));
|
.equals(new String(getContents(b1), "US-ASCII")));
|
||||||
assert(2 == b1.count());
|
assert(2 == b1.count());
|
||||||
b2.remove("foo".getBytes("US-ASCII"));
|
b2.remove("foo".getBytes("US-ASCII"));
|
||||||
WriteBatchInternal.append(b1, b2);
|
WriteBatchInternal.append(b1, b2);
|
||||||
assert(new String("Put(a, va)@200" +
|
assert(("Put(a, va)@200" +
|
||||||
"Put(b, vb)@202" +
|
"Put(b, vb)@202" +
|
||||||
"Put(b, vb)@201" +
|
"Put(b, vb)@201" +
|
||||||
"Delete(foo)@203")
|
"Delete(foo)@203")
|
||||||
.equals(new String(getContents(b1), "US-ASCII")));
|
.equals(new String(getContents(b1), "US-ASCII")));
|
||||||
assert(4 == b1.count());
|
assert(4 == b1.count());
|
||||||
} catch (UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
@ -108,11 +108,11 @@ public class WriteBatchTest {
|
|||||||
batch.putLogData("blob2".getBytes("US-ASCII"));
|
batch.putLogData("blob2".getBytes("US-ASCII"));
|
||||||
batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
|
batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
|
||||||
assert(5 == batch.count());
|
assert(5 == batch.count());
|
||||||
assert(new String("Merge(foo, bar)@4" +
|
assert(("Merge(foo, bar)@4" +
|
||||||
"Put(k1, v1)@0" +
|
"Put(k1, v1)@0" +
|
||||||
"Delete(k2)@3" +
|
"Delete(k2)@3" +
|
||||||
"Put(k2, v2)@1" +
|
"Put(k2, v2)@1" +
|
||||||
"Put(k3, v3)@2")
|
"Put(k3, v3)@2")
|
||||||
.equals(new String(getContents(batch), "US-ASCII")));
|
.equals(new String(getContents(batch), "US-ASCII")));
|
||||||
} catch (UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
System.err.println(e);
|
System.err.println(e);
|
||||||
|
@ -523,8 +523,8 @@ public class DbBenchmark {
|
|||||||
BlockBasedTableConfig table_options = new BlockBasedTableConfig();
|
BlockBasedTableConfig table_options = new BlockBasedTableConfig();
|
||||||
table_options.setBlockSize((Long)flags_.get(Flag.block_size))
|
table_options.setBlockSize((Long)flags_.get(Flag.block_size))
|
||||||
.setBlockCacheSize((Long)flags_.get(Flag.cache_size))
|
.setBlockCacheSize((Long)flags_.get(Flag.cache_size))
|
||||||
.setFilterBitsPerKey((Integer)flags_.get(Flag.bloom_bits))
|
.setCacheNumShardBits(
|
||||||
.setCacheNumShardBits((Integer)flags_.get(Flag.cache_numshardbits));
|
(Integer)flags_.get(Flag.cache_numshardbits));
|
||||||
options.setTableFormatConfig(table_options);
|
options.setTableFormatConfig(table_options);
|
||||||
}
|
}
|
||||||
options.setWriteBufferSize(
|
options.setWriteBufferSize(
|
||||||
|
@ -73,11 +73,13 @@ void Java_org_rocksdb_Options_setBuiltinComparator(
|
|||||||
JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) {
|
JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) {
|
||||||
switch (builtinComparator){
|
switch (builtinComparator){
|
||||||
case 1:
|
case 1:
|
||||||
reinterpret_cast<rocksdb::Options*>(jhandle)->comparator = rocksdb::ReverseBytewiseComparator();
|
reinterpret_cast<rocksdb::Options*>(jhandle)->comparator =
|
||||||
break;
|
rocksdb::ReverseBytewiseComparator();
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
reinterpret_cast<rocksdb::Options*>(jhandle)->comparator = rocksdb::BytewiseComparator();
|
reinterpret_cast<rocksdb::Options*>(jhandle)->comparator =
|
||||||
break;
|
rocksdb::BytewiseComparator();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user