Minor fix to current codes

Summary:
Minor fix to current codes, including: coding style, output format,
comments. No major logic change. There are only 2 real changes, please see my inline comments.

Test Plan: make all check

Reviewers: haobo, dhruba, emayanke

Differential Revision: https://reviews.facebook.net/D12297
This commit is contained in:
Xing Jin 2013-08-13 13:58:02 -07:00
parent 7612d496ff
commit 0a5afd1afc
3 changed files with 84 additions and 76 deletions

View File

@ -126,9 +126,9 @@ static int FLAGS_max_write_buffer_number = 0;
// The minimum number of write buffers that will be merged together // The minimum number of write buffers that will be merged together
// before writing to storage. This is cheap because it is an // before writing to storage. This is cheap because it is an
// in-memory merge. If this feature is not enabled, then all these // in-memory merge. If this feature is not enabled, then all these
// write buffers are fushed to L0 as separate files and this increases // write buffers are flushed to L0 as separate files and this increases
// read amplification because a get request has to check in all of these // read amplification because a get request has to check in all of these
// files. Also, an in-memory merge may result in writing lesser // files. Also, an in-memory merge may result in writing less
// data to storage if there are duplicate records in each of these // data to storage if there are duplicate records in each of these
// individual write buffers. // individual write buffers.
static int FLAGS_min_write_buffer_number_to_merge = 0; static int FLAGS_min_write_buffer_number_to_merge = 0;
@ -141,10 +141,12 @@ static int FLAGS_max_background_compactions = 0;
// style of compaction: level-based vs universal // style of compaction: level-based vs universal
static leveldb::CompactionStyle FLAGS_compaction_style = leveldb::kCompactionStyleLevel; static leveldb::CompactionStyle FLAGS_compaction_style = leveldb::kCompactionStyleLevel;
// Percentage flexibilty while comparing file size. // Percentage flexibility while comparing file size
// (for universal compaction only).
static int FLAGS_universal_size_ratio = 1; static int FLAGS_universal_size_ratio = 1;
// The minimum number of files in a single compaction run. // The minimum number of files in a single compaction run
// (for universal compaction only).
static int FLAGS_compaction_universal_min_merge_width = 2; static int FLAGS_compaction_universal_min_merge_width = 2;
// Number of bytes to use as a cache of uncompressed data. // Number of bytes to use as a cache of uncompressed data.
@ -212,16 +214,16 @@ static bool FLAGS_get_approx = false;
// The total number of levels // The total number of levels
static int FLAGS_num_levels = 7; static int FLAGS_num_levels = 7;
// Target level-0 file size for compaction // Target file size at level-1
static int FLAGS_target_file_size_base = 2 * 1048576; static int FLAGS_target_file_size_base = 2 * 1048576;
// A multiplier to compute targe level-N file size // A multiplier to compute target level-N file size (N >= 2)
static int FLAGS_target_file_size_multiplier = 1; static int FLAGS_target_file_size_multiplier = 1;
// Max bytes for level-1 // Max bytes for level-1
static uint64_t FLAGS_max_bytes_for_level_base = 10 * 1048576; static uint64_t FLAGS_max_bytes_for_level_base = 10 * 1048576;
// A multiplier to compute max bytes for level-N // A multiplier to compute max bytes for level-N (N >= 2)
static int FLAGS_max_bytes_for_level_multiplier = 10; static int FLAGS_max_bytes_for_level_multiplier = 10;
// A vector that specifies additional fanout per level // A vector that specifies additional fanout per level
@ -236,16 +238,19 @@ static int FLAGS_level0_slowdown_writes_trigger = 8;
// Number of files in level-0 when compactions start // Number of files in level-0 when compactions start
static int FLAGS_level0_file_num_compaction_trigger = 4; static int FLAGS_level0_file_num_compaction_trigger = 4;
// Ratio of reads to writes (expressed as a percentage) // Ratio of reads to reads/writes (expressed as percentage) for the
// for the ReadRandomWriteRandom workload. The default // ReadRandomWriteRandom workload. The default value 90 means 90% operations
// setting is 9 gets for every 1 put. // out of all reads and writes operations are reads. In other words, 9 gets
// for every 1 put.
static int FLAGS_readwritepercent = 90; static int FLAGS_readwritepercent = 90;
// This percent of deletes are done (used in RandomWithVerify only) // Percentage of deletes out of reads/writes/deletes (used in RandomWithVerify
// Must be smaller than total writepercent (i.e 100 - FLAGS_readwritepercent) // only). RandomWithVerify calculates writepercent as
// (100 - FLAGS_readwritepercent - FLAGS_deletepercent), so FLAGS_deletepercent
// must be smaller than (100 - FLAGS_readwritepercent)
static int FLAGS_deletepercent = 2; static int FLAGS_deletepercent = 2;
// Option to disable compation triggered by read. // Option to disable compaction triggered by read.
static int FLAGS_disable_seek_compaction = false; static int FLAGS_disable_seek_compaction = false;
// Option to delete obsolete files periodically // Option to delete obsolete files periodically
@ -253,12 +258,13 @@ static int FLAGS_disable_seek_compaction = false;
// deleted after every compaction run. // deleted after every compaction run.
static uint64_t FLAGS_delete_obsolete_files_period_micros = 0; static uint64_t FLAGS_delete_obsolete_files_period_micros = 0;
// Algorithm to use to compress the database // Algorithm used to compress the database
static enum leveldb::CompressionType FLAGS_compression_type = static enum leveldb::CompressionType FLAGS_compression_type =
leveldb::kSnappyCompression; leveldb::kSnappyCompression;
// Allows compression for levels 0 and 1 to be disabled when // If non-negative, compression starts from this level. Levels with number
// other levels are compressed // < FLAGS_min_level_to_compress are not compressed.
// Otherwise, apply FLAGS_compression_type to all levels.
static int FLAGS_min_level_to_compress = -1; static int FLAGS_min_level_to_compress = -1;
static int FLAGS_table_cache_numshardbits = 4; static int FLAGS_table_cache_numshardbits = 4;
@ -295,8 +301,8 @@ static bool FLAGS_read_only = false;
// Do not auto trigger compactions // Do not auto trigger compactions
static bool FLAGS_disable_auto_compactions = false; static bool FLAGS_disable_auto_compactions = false;
// Cap the size of data in levelK for a compaction run // Cap the size of data in level-K for a compaction run
// that compacts Levelk with LevelK+1 // that compacts Level-K with Level-(K+1) (for K >= 1)
static int FLAGS_source_compaction_factor = 1; static int FLAGS_source_compaction_factor = 1;
// Set the TTL for the WAL Files. // Set the TTL for the WAL Files.
@ -376,6 +382,7 @@ class RandomGenerator {
return Slice(data_.data() + pos_ - len, len); return Slice(data_.data() + pos_ - len, len);
} }
}; };
static Slice TrimSpace(Slice s) { static Slice TrimSpace(Slice s) {
unsigned int start = 0; unsigned int start = 0;
while (start < s.size() && isspace(s[start])) { while (start < s.size() && isspace(s[start])) {
@ -784,20 +791,20 @@ class Benchmark {
delete db_; delete db_;
delete filter_policy_; delete filter_policy_;
} }
//this function will construct string format for key. e.g "%016d"
void ConstructStrFormatForKey(char* str, int keySize) //this function will construct string format for key. e.g "%016d"
{ void ConstructStrFormatForKey(char* str, int keySize) {
str[0] = '%'; str[0] = '%';
str[1] = '0'; str[1] = '0';
sprintf(str+2, "%dd%s", keySize, "%s"); sprintf(str+2, "%dd%s", keySize, "%s");
} }
unique_ptr<char []> GenerateKeyFromInt(int v, const char* suffix = "") unique_ptr<char []> GenerateKeyFromInt(int v, const char* suffix = "") {
{
unique_ptr<char []> keyInStr(new char[MAX_KEY_SIZE]); unique_ptr<char []> keyInStr(new char[MAX_KEY_SIZE]);
snprintf(keyInStr.get(), MAX_KEY_SIZE, keyFormat_, v, suffix); snprintf(keyInStr.get(), MAX_KEY_SIZE, keyFormat_, v, suffix);
return keyInStr; return keyInStr;
} }
void Run() { void Run() {
PrintHeader(); PrintHeader();
Open(); Open();
@ -814,7 +821,7 @@ unique_ptr<char []> GenerateKeyFromInt(int v, const char* suffix = "")
benchmarks = sep + 1; benchmarks = sep + 1;
} }
// Reset parameters that may be overriddden bwlow // Sanitize parameters
num_ = FLAGS_num; num_ = FLAGS_num;
reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads); reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
writes_ = (FLAGS_writes < 0 ? FLAGS_num : FLAGS_writes); writes_ = (FLAGS_writes < 0 ? FLAGS_num : FLAGS_writes);
@ -1378,7 +1385,7 @@ unique_ptr<char []> GenerateKeyFromInt(int v, const char* suffix = "")
// Recalculate number of keys per group, and call MultiGet until done // Recalculate number of keys per group, and call MultiGet until done
long num_keys; long num_keys;
while(num_keys = std::min(keys_left, kpg), !duration.Done(num_keys)) { while(num_keys = std::min(keys_left, kpg), !duration.Done(num_keys)) {
found += MultiGetRandom(options, num_keys, thread->rand, FLAGS_num,""); found += MultiGetRandom(options, num_keys, thread->rand, FLAGS_num, "");
thread->stats.FinishedSingleOp(db_); thread->stats.FinishedSingleOp(db_);
keys_left -= num_keys; keys_left -= num_keys;
} }
@ -1432,7 +1439,7 @@ unique_ptr<char []> GenerateKeyFromInt(int v, const char* suffix = "")
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
void ReadMissing(ThreadState* thread) { void ReadMissing(ThreadState* thread) {
FLAGS_warn_missing_keys = false; // Never warn about missing keys FLAGS_warn_missing_keys = false; // Never warn about missing keys
Duration duration(FLAGS_duration, reads_); Duration duration(FLAGS_duration, reads_);
@ -1446,10 +1453,12 @@ void ReadMissing(ThreadState* thread) {
long num_keys; long num_keys;
long found; long found;
while(num_keys = std::min(keys_left, kpg), !duration.Done(num_keys)) { while(num_keys = std::min(keys_left, kpg), !duration.Done(num_keys)) {
found = MultiGetRandom(options, num_keys, thread->rand, FLAGS_num,"."); found = MultiGetRandom(options, num_keys, thread->rand, FLAGS_num, ".");
if (!found) {
assert(false); // We should not find any key since the key we try to get has a
} // different suffix
assert(!found);
thread->stats.FinishedSingleOp(db_); thread->stats.FinishedSingleOp(db_);
keys_left -= num_keys; keys_left -= num_keys;
} }
@ -1749,10 +1758,8 @@ void ReadMissing(ThreadState* thread) {
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
// // This is different from ReadWhileWriting because it does not use
// This is diffferent from ReadWhileWriting because it does not use
// an extra thread. // an extra thread.
//
void ReadRandomWriteRandom(ThreadState* thread) { void ReadRandomWriteRandom(ThreadState* thread) {
if (FLAGS_use_multiget){ if (FLAGS_use_multiget){
// Separate function for multiget (for ease of reading) // Separate function for multiget (for ease of reading)
@ -1775,7 +1782,7 @@ void ReadMissing(ThreadState* thread) {
const int k = thread->rand.Next() % FLAGS_num; const int k = thread->rand.Next() % FLAGS_num;
unique_ptr<char []> key = GenerateKeyFromInt(k); unique_ptr<char []> key = GenerateKeyFromInt(k);
if (get_weight == 0 && put_weight == 0) { if (get_weight == 0 && put_weight == 0) {
// one batch complated, reinitialize for next batch // one batch completed, reinitialize for next batch
get_weight = FLAGS_readwritepercent; get_weight = FLAGS_readwritepercent;
put_weight = 100 - get_weight; put_weight = 100 - get_weight;
} }
@ -1876,7 +1883,7 @@ void ReadMissing(ThreadState* thread) {
assert(num_keys + num_put_keys <= keys_left); assert(num_keys + num_put_keys <= keys_left);
// Apply the MultiGet operations // Apply the MultiGet operations
found += MultiGetRandom(options, num_keys, thread->rand, FLAGS_num,""); found += MultiGetRandom(options, num_keys, thread->rand, FLAGS_num, "");
++multigets_done; ++multigets_done;
reads_done+=num_keys; reads_done+=num_keys;
thread->stats.FinishedSingleOp(db_); thread->stats.FinishedSingleOp(db_);

View File

@ -520,11 +520,11 @@ void DBImpl::DeleteObsoleteFiles() {
void DBImpl::PurgeObsoleteWALFiles() { void DBImpl::PurgeObsoleteWALFiles() {
int64_t current_time; int64_t current_time;
Status s = env_->GetCurrentTime(&current_time); Status s = env_->GetCurrentTime(&current_time);
uint64_t now_micros = static_cast<uint64_t>(current_time); uint64_t now_seconds = static_cast<uint64_t>(current_time);
assert(s.ok()); assert(s.ok());
if (options_.WAL_ttl_seconds != ULONG_MAX && options_.WAL_ttl_seconds > 0) { if (options_.WAL_ttl_seconds != ULONG_MAX && options_.WAL_ttl_seconds > 0) {
if (purge_wal_files_last_run_ + options_.WAL_ttl_seconds > now_micros) { if (purge_wal_files_last_run_ + options_.WAL_ttl_seconds > now_seconds) {
return; return;
} }
std::vector<std::string> wal_files; std::vector<std::string> wal_files;
@ -534,7 +534,7 @@ void DBImpl::PurgeObsoleteWALFiles() {
uint64_t file_m_time; uint64_t file_m_time;
const std::string file_path = archival_dir + "/" + f; const std::string file_path = archival_dir + "/" + f;
const Status s = env_->GetFileModificationTime(file_path, &file_m_time); const Status s = env_->GetFileModificationTime(file_path, &file_m_time);
if (s.ok() && (now_micros - file_m_time > options_.WAL_ttl_seconds)) { if (s.ok() && (now_seconds - file_m_time > options_.WAL_ttl_seconds)) {
Status status = env_->DeleteFile(file_path); Status status = env_->DeleteFile(file_path);
if (!status.ok()) { if (!status.ok()) {
Log(options_.info_log, Log(options_.info_log,
@ -544,7 +544,7 @@ void DBImpl::PurgeObsoleteWALFiles() {
} // Ignore errors. } // Ignore errors.
} }
} }
purge_wal_files_last_run_ = now_micros; purge_wal_files_last_run_ = now_seconds;
} }
// If externalTable is set, then apply recovered transactions // If externalTable is set, then apply recovered transactions

View File

@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
// //
//The test uses an array to compare against values written to the database. // The test uses an array to compare against values written to the database.
//Keys written to the array are in 1:1 correspondence to the actual values in // Keys written to the array are in 1:1 correspondence to the actual values in
//the database according to the formula in the function GenerateValue // the database according to the formula in the function GenerateValue.
//Space is reserved in the array from 0 to FLAGS_max_key and values are randomly // Space is reserved in the array from 0 to FLAGS_max_key and values are
//written/deleted/read from those positions. During verification we compare all // randomly written/deleted/read from those positions. During verification we
//the positions in the array. Therefore to shorten/elongate the amount of time // compare all the positions in the array. To shorten/elongate the running
//that this test runs for, you should change the settings: // time, you could change the settings: FLAGS_max_key, FLAGS_ops_per_thread,
//FLAGS_max_key, FLAGS_ops_per_thread, (sometimes also FLAGS_threads) // (sometimes also FLAGS_threads).
//NOTE that if FLAGS_test_batches_snapshots is set, the test behaves a little //
//differently. See comment header for the flag. // NOTE that if FLAGS_test_batches_snapshots is set, the test will have
// different behavior. See comment of the flag for details.
#include <sys/types.h> #include <sys/types.h>
#include <stdio.h> #include <stdio.h>
@ -42,17 +43,17 @@ static const long KB = 1024;
static uint32_t FLAGS_seed = 2341234; static uint32_t FLAGS_seed = 2341234;
// Max number of key/values to place in database // Max number of key/values to place in database
static long FLAGS_max_key = 2 * KB * KB * KB; static long FLAGS_max_key = 1 * KB * KB * KB;
// If set, the test uses MultiGet, MultiPrefixScan, MultiPut and // If set, the test uses MultiGet(), MultiPut() and MultiDelete() which
// MultiDelete that do a different kind of validation during the test // read/write/delete multiple keys in a batch. In this mode, we do not verify
// itself, rather than at the end. This is meant to solve the // db content by comparing the content with the pre-allocated array. Instead,
// following problems at the expense of doing less degree of // we do partial verification inside MultiGet() by checking various values in
// validation. // a batch. Benefit of this mode:
// (a) No need to acquire mutexes during writes (less cache flushes in // (a) No need to acquire mutexes during writes (less cache flushes
// multi-core leading to speed up) // in multi-core leading to speed up)
// (b) No long validation at the end (more speed up) // (b) No long validation at the end (more speed up)
// (c) Also test snapshot and atomicity of batch writes // (c) Test snapshot and atomicity of batch writes
static bool FLAGS_test_batches_snapshots = false; static bool FLAGS_test_batches_snapshots = false;
// Number of concurrent threads to run. // Number of concurrent threads to run.
@ -137,16 +138,16 @@ extern int leveldb_kill_odds;
// If true, do not write WAL for write. // If true, do not write WAL for write.
static bool FLAGS_disable_wal = false; static bool FLAGS_disable_wal = false;
// Target level-0 file size for compaction // Target level-1 file size for compaction
static int FLAGS_target_file_size_base = 64 * KB; static int FLAGS_target_file_size_base = 64 * KB;
// A multiplier to compute targe level-N file size // A multiplier to compute targe level-N file size (N >= 2)
static int FLAGS_target_file_size_multiplier = 1; static int FLAGS_target_file_size_multiplier = 1;
// Max bytes for level-0 // Max bytes for level-1
static uint64_t FLAGS_max_bytes_for_level_base = 256 * KB; static uint64_t FLAGS_max_bytes_for_level_base = 256 * KB;
// A multiplier to compute max bytes for level-N // A multiplier to compute max bytes for level-N (N >= 2)
static int FLAGS_max_bytes_for_level_multiplier = 2; static int FLAGS_max_bytes_for_level_multiplier = 2;
// Number of files in level-0 that will trigger put stop. // Number of files in level-0 that will trigger put stop.
@ -341,7 +342,8 @@ class Stats {
"", bytes_mb, rate, (100*writes_)/done_, done_); "", bytes_mb, rate, (100*writes_)/done_, done_);
fprintf(stdout, "%-12s: Wrote %ld times\n", "", writes_); fprintf(stdout, "%-12s: Wrote %ld times\n", "", writes_);
fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_); fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_);
fprintf(stdout, "%-12s: %ld/%ld gets found the key\n", "", founds_, gets_); fprintf(stdout, "%-12s: %ld read and %ld found the key\n", "",
gets_, founds_);
fprintf(stdout, "%-12s: Prefix scanned %ld times\n", "", prefixes_); fprintf(stdout, "%-12s: Prefix scanned %ld times\n", "", prefixes_);
fprintf(stdout, "%-12s: Iterator size sum is %ld\n", "", fprintf(stdout, "%-12s: Iterator size sum is %ld\n", "",
iterator_size_sums_); iterator_size_sums_);
@ -690,7 +692,6 @@ class StressTest {
return s; return s;
} }
// Given a key K, this deletes ("0"+K), ("1"+K),... ("9"+K) // Given a key K, this deletes ("0"+K), ("1"+K),... ("9"+K)
// in DB atomically i.e in a single batch. Also refer MultiGet. // in DB atomically i.e in a single batch. Also refer MultiGet.
Status MultiDelete(ThreadState* thread, Status MultiDelete(ThreadState* thread,
@ -1052,8 +1053,8 @@ class StressTest {
fprintf(stdout, "Write-buffer-size : %d\n", FLAGS_write_buffer_size); fprintf(stdout, "Write-buffer-size : %d\n", FLAGS_write_buffer_size);
fprintf(stdout, "Delete percentage : %d\n", FLAGS_delpercent); fprintf(stdout, "Delete percentage : %d\n", FLAGS_delpercent);
fprintf(stdout, "Max key : %ld\n", FLAGS_max_key); fprintf(stdout, "Max key : %ld\n", FLAGS_max_key);
fprintf(stdout, "Ratio #ops/#keys : %ld\n", fprintf(stdout, "Ratio #ops/#keys : %f\n",
(FLAGS_ops_per_thread * FLAGS_threads)/FLAGS_max_key); (1.0 * FLAGS_ops_per_thread * FLAGS_threads)/FLAGS_max_key);
fprintf(stdout, "Num times DB reopens: %d\n", FLAGS_reopen); fprintf(stdout, "Num times DB reopens: %d\n", FLAGS_reopen);
fprintf(stdout, "Batches/snapshots : %d\n", fprintf(stdout, "Batches/snapshots : %d\n",
FLAGS_test_batches_snapshots); FLAGS_test_batches_snapshots);