[Java] Fixed compile error due to the removal of ReadOptions.prefix_seek, minor improvement on DbBenchmark.java.

This commit is contained in:
Yueh-Hsuan Chiang 2014-04-27 21:46:30 -07:00
parent 7ca06a3974
commit 9895465c6a
6 changed files with 56 additions and 69 deletions

View File

@ -1 +1 @@
java -Djava.library.path=.:../ -cp "rocksdbjni.jar:.:./*" org.rocksdb.benchmark.DbBenchmark $@
java -server -d64 -XX:NewSize=4m -XX:+AggressiveOpts -Djava.library.path=.:../ -cp "rocksdbjni.jar:.:./*" org.rocksdb.benchmark.DbBenchmark $@

View File

@ -93,34 +93,6 @@ public class ReadOptions {
private native void setFillCache(
long handle, boolean fillCache);
/**
* If this option is set and memtable implementation allows, Seek
* might only return keys with the same prefix as the seek-key
* Default: false
*
* @return true if prefix-seek is enabled.
*/
public boolean prefixSeek() {
assert(isInitialized());
return prefixSeek(nativeHandle_);
}
private native boolean prefixSeek(long handle);
/**
* If this option is set and memtable implementation allows, Seek
* might only return keys with the same prefix as the seek-key
*
* @param prefixSeek if true, then prefix-seek will be enabled.
* @return the reference to the current ReadOptions.
*/
public ReadOptions setPrefixSeek(boolean prefixSeek) {
assert(isInitialized());
setPrefixSeek(nativeHandle_, prefixSeek);
return this;
}
private native void setPrefixSeek(
long handle, boolean prefixSeek);
/**
* Specify to create a tailing iterator -- a special iterator that has a
* view of the complete database (i.e. it can also be used to read newly

View File

@ -162,6 +162,15 @@ public class DbBenchmark {
EXISTING
}
enum CompressionType {
NONE,
SNAPPY,
ZLIB,
BZIP2,
LZ4,
LZ4HC
}
static {
System.loadLibrary("rocksdbjni");
}
@ -435,7 +444,6 @@ public class DbBenchmark {
databaseDir_ = (String) flags.get(Flag.db);
writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second);
cacheSize_ = (Long) flags.get(Flag.cache_size);
gen_ = new RandomGenerator(randSeed_, compressionRatio_);
memtable_ = (String) flags.get(Flag.memtablerep);
maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number);
prefixSize_ = (Integer) flags.get(Flag.prefix_size);
@ -446,6 +454,28 @@ public class DbBenchmark {
finishLock_ = new Object();
// options.setPrefixSize((Integer)flags_.get(Flag.prefix_size));
// options.setKeysPerPrefix((Long)flags_.get(Flag.keys_per_prefix));
compressionType_ = (String) flags.get(Flag.compression_type);
compression_ = CompressionType.NONE;
try {
if (compressionType_.equals("snappy")) {
System.loadLibrary("snappy");
} else if (compressionType_.equals("zlib")) {
System.loadLibrary("zlib");
} else if (compressionType_.equals("bzip2")) {
System.loadLibrary("bzip2");
} else if (compressionType_.equals("lz4")) {
System.loadLibrary("lz4");
} else if (compressionType_.equals("lz4hc")) {
System.loadLibrary("lz4hc");
}
} catch (UnsatisfiedLinkError e) {
System.err.format("Unable to load %s library:%s%n" +
"No compression is used.%n",
compressionType_, e.toString());
compressionType_ = "none";
compressionRatio_ = 1.0;
}
gen_ = new RandomGenerator(randSeed_, compressionRatio_);
}
private void prepareReadOptions(ReadOptions options) {
@ -462,6 +492,8 @@ public class DbBenchmark {
options.setCacheSize(cacheSize_);
if (!useExisting_) {
options.setCreateIfMissing(true);
} else {
options.setCreateIfMissing(false);
}
if (memtable_.equals("skip_list")) {
options.setMemTableConfig(new SkipListMemTableConfig());
@ -488,6 +520,8 @@ public class DbBenchmark {
options.setTableFormatConfig(
new PlainTableConfig().setKeySize(keySize_));
}
options.setWriteBufferSize(
(Long)flags_.get(Flag.write_buffer_size));
options.setMaxWriteBufferNumber(
(Integer)flags_.get(Flag.max_write_buffer_number));
options.setMaxBackgroundCompactions(
@ -513,7 +547,7 @@ public class DbBenchmark {
options.setDisableSeekCompaction(
(Boolean)flags_.get(Flag.disable_seek_compaction));
options.setDeleteObsoleteFilesPeriodMicros(
(Long)flags_.get(Flag.delete_obsolete_files_period_micros));
(Integer)flags_.get(Flag.delete_obsolete_files_period_micros));
options.setTableCacheNumshardbits(
(Integer)flags_.get(Flag.table_cache_numshardbits));
options.setAllowMmapReads(
@ -640,12 +674,12 @@ public class DbBenchmark {
} else if (benchmark.equals("readseq")) {
for (int t = 0; t < threadNum_; ++t) {
tasks.add(new ReadSequentialTask(
currentTaskId++, randSeed_, reads_, num_));
currentTaskId++, randSeed_, reads_ / threadNum_, num_));
}
} else if (benchmark.equals("readrandom")) {
for (int t = 0; t < threadNum_; ++t) {
tasks.add(new ReadRandomTask(
currentTaskId++, randSeed_, reads_, num_));
currentTaskId++, randSeed_, reads_ / threadNum_, num_));
}
} else if (benchmark.equals("readwhilewriting")) {
WriteTask writeTask = new WriteRandomTask(
@ -717,12 +751,12 @@ public class DbBenchmark {
(int) (valueSize_ * compressionRatio_ + 0.5));
System.out.printf("Entries: %d\n", num_);
System.out.printf("RawSize: %.1f MB (estimated)\n",
((kKeySize + valueSize_) * num_) / 1048576.0);
((double)(kKeySize + valueSize_) * num_) / SizeUnit.MB);
System.out.printf("FileSize: %.1f MB (estimated)\n",
(((kKeySize + valueSize_ * compressionRatio_) * num_)
/ 1048576.0));
(((kKeySize + valueSize_ * compressionRatio_) * num_) / SizeUnit.MB));
System.out.format("Memtable Factory: %s%n", options.memTableFactoryName());
System.out.format("Prefix: %d bytes%n", prefixSize_);
System.out.format("Compression: %s%n", compressionType_);
printWarnings();
System.out.printf("------------------------------------------------\n");
}
@ -769,7 +803,7 @@ public class DbBenchmark {
System.out.printf(
"%-16s : %11.5f micros/op; %6.1f MB/s; %d / %d task(s) finished.\n",
benchmark, elapsedSeconds * 1e6 / stats.done_,
benchmark, (double) elapsedSeconds / stats.done_ * 1e6,
(stats.bytes_ / 1048576.0) / elapsedSeconds,
taskFinishedCount, concurrentThreads);
}
@ -932,7 +966,7 @@ public class DbBenchmark {
return Integer.parseInt(value);
}
},
write_buffer_size(4 << 20,
write_buffer_size(4 * SizeUnit.MB,
"Number of bytes to buffer in memtable before compacting\n" +
"\t(initialized to default value by 'main'.)") {
@Override public Object parseValue(String value) {
@ -1275,11 +1309,17 @@ public class DbBenchmark {
return Boolean.parseBoolean(value);
}
},
delete_obsolete_files_period_micros(0L,"Option to delete\n" +
delete_obsolete_files_period_micros(0,"Option to delete\n" +
"\tobsolete files periodically. 0 means that obsolete files are\n" +
"\tdeleted after every compaction run.") {
@Override public Object parseValue(String value) {
return Long.parseLong(value);
return Integer.parseInt(value);
}
},
compression_type("snappy",
"Algorithm used to compress the database.") {
@Override public Object parseValue(String value) {
return value;
}
},
compression_level(-1,
@ -1512,7 +1552,7 @@ public class DbBenchmark {
final long cacheSize_;
final boolean useExisting_;
final String databaseDir_;
final double compressionRatio_;
double compressionRatio_;
RandomGenerator gen_;
long startTime_;
@ -1532,4 +1572,6 @@ public class DbBenchmark {
// as the scope of a static member equals to the scope of the problem,
// we let its c++ pointer to be disposed in its finalizer.
static Options defaultOptions_ = new Options();
String compressionType_;
CompressionType compression_;
}

View File

@ -27,12 +27,6 @@ public class ReadOptionsTest {
assert(opt.fillCache() == boolValue);
}
{ // PrefixSeek test
boolean boolValue = rand.nextBoolean();
opt.setPrefixSeek(boolValue);
assert(opt.prefixSeek() == boolValue);
}
{ // Tailing test
boolean boolValue = rand.nextBoolean();
opt.setTailing(boolValue);

View File

@ -1785,27 +1785,6 @@ void Java_org_rocksdb_ReadOptions_setFillCache(
static_cast<bool>(jfill_cache);
}
/*
* Class: org_rocksdb_ReadOptions
* Method: prefixSeek
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ReadOptions_prefixSeek(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_seek;
}
/*
* Class: org_rocksdb_ReadOptions
* Method: setPrefixSeek
* Signature: (JZ)V
*/
void Java_org_rocksdb_ReadOptions_setPrefixSeek(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jprefix_seek) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_seek =
static_cast<bool>(jprefix_seek);
}
/*
* Class: org_rocksdb_ReadOptions
* Method: tailing

View File

@ -212,7 +212,7 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
rocksdb::Status s =
rocksdb::WriteBatchInternal::InsertInto(b, &cf_mems_default);
int count = 0;
rocksdb::Iterator* iter = mem->NewIterator();
rocksdb::Iterator* iter = mem->NewIterator(rocksdb::ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
rocksdb::ParsedInternalKey ikey;
memset(reinterpret_cast<void*>(&ikey), 0, sizeof(ikey));