Deprecate removeScanCountLimit in NewLRUCache
Summary: It is no longer used by the implementation, so we should also remove it from the public API. Test Plan: make check Reviewers: sdong Reviewed By: sdong Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D34971
This commit is contained in:
parent
b4b69e4f77
commit
c88ff4ca76
@ -1,7 +1,7 @@
|
|||||||
# Rocksdb Change Log
|
# Rocksdb Change Log
|
||||||
|
|
||||||
### Unreleased Features
|
### Unreleased Features
|
||||||
* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted
|
* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted. This change made parameter removeScanCountLimit obsolete. Because of that NewLRUCache doesn't take three arguments anymore. table_cache_remove_scan_limit option is also removed
|
||||||
* By default we now optimize the compilation for the compilation platform (using -march=native). If you want to build portable binary, use 'PORTABLE=1' before the make command.
|
* By default we now optimize the compilation for the compilation platform (using -march=native). If you want to build portable binary, use 'PORTABLE=1' before the make command.
|
||||||
* We now allow level-compaction to place files in different paths by
|
* We now allow level-compaction to place files in different paths by
|
||||||
specifying them in db_paths along with the target_size.
|
specifying them in db_paths along with the target_size.
|
||||||
|
2
db/c.cc
2
db/c.cc
@ -1613,7 +1613,7 @@ void rocksdb_options_set_table_cache_numshardbits(
|
|||||||
|
|
||||||
void rocksdb_options_set_table_cache_remove_scan_count_limit(
|
void rocksdb_options_set_table_cache_remove_scan_count_limit(
|
||||||
rocksdb_options_t* opt, int v) {
|
rocksdb_options_t* opt, int v) {
|
||||||
opt->rep.table_cache_remove_scan_count_limit = v;
|
// this option is deprecated
|
||||||
}
|
}
|
||||||
|
|
||||||
void rocksdb_options_set_arena_block_size(
|
void rocksdb_options_set_arena_block_size(
|
||||||
|
@ -26,7 +26,7 @@ class CompactionJobTest : public testing::Test {
|
|||||||
: env_(Env::Default()),
|
: env_(Env::Default()),
|
||||||
dbname_(test::TmpDir() + "/compaction_job_test"),
|
dbname_(test::TmpDir() + "/compaction_job_test"),
|
||||||
mutable_cf_options_(Options(), ImmutableCFOptions(Options())),
|
mutable_cf_options_(Options(), ImmutableCFOptions(Options())),
|
||||||
table_cache_(NewLRUCache(50000, 16, 8)),
|
table_cache_(NewLRUCache(50000, 16)),
|
||||||
write_buffer_(db_options_.db_write_buffer_size),
|
write_buffer_(db_options_.db_write_buffer_size),
|
||||||
versions_(new VersionSet(dbname_, &db_options_, env_options_,
|
versions_(new VersionSet(dbname_, &db_options_, env_options_,
|
||||||
table_cache_.get(), &write_buffer_,
|
table_cache_.get(), &write_buffer_,
|
||||||
|
@ -307,8 +307,6 @@ DEFINE_int32(cache_numshardbits, -1, "Number of shards for the block cache"
|
|||||||
" is 2 ** cache_numshardbits. Negative means use default settings."
|
" is 2 ** cache_numshardbits. Negative means use default settings."
|
||||||
" This is applied only if FLAGS_cache_size is non-negative.");
|
" This is applied only if FLAGS_cache_size is non-negative.");
|
||||||
|
|
||||||
DEFINE_int32(cache_remove_scan_count_limit, 32, "");
|
|
||||||
|
|
||||||
DEFINE_bool(verify_checksum, false, "Verify checksum for every block read"
|
DEFINE_bool(verify_checksum, false, "Verify checksum for every block read"
|
||||||
" from storage");
|
" from storage");
|
||||||
|
|
||||||
@ -1357,8 +1355,7 @@ class Benchmark {
|
|||||||
: cache_(
|
: cache_(
|
||||||
FLAGS_cache_size >= 0
|
FLAGS_cache_size >= 0
|
||||||
? (FLAGS_cache_numshardbits >= 1
|
? (FLAGS_cache_numshardbits >= 1
|
||||||
? NewLRUCache(FLAGS_cache_size, FLAGS_cache_numshardbits,
|
? NewLRUCache(FLAGS_cache_size, FLAGS_cache_numshardbits)
|
||||||
FLAGS_cache_remove_scan_count_limit)
|
|
||||||
: NewLRUCache(FLAGS_cache_size))
|
: NewLRUCache(FLAGS_cache_size))
|
||||||
: nullptr),
|
: nullptr),
|
||||||
compressed_cache_(FLAGS_compressed_cache_size >= 0
|
compressed_cache_(FLAGS_compressed_cache_size >= 0
|
||||||
|
@ -240,8 +240,7 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname)
|
|||||||
4194304 : db_options_.max_open_files - 10;
|
4194304 : db_options_.max_open_files - 10;
|
||||||
// Reserve ten files or so for other uses and give the rest to TableCache.
|
// Reserve ten files or so for other uses and give the rest to TableCache.
|
||||||
table_cache_ =
|
table_cache_ =
|
||||||
NewLRUCache(table_cache_size, db_options_.table_cache_numshardbits,
|
NewLRUCache(table_cache_size, db_options_.table_cache_numshardbits);
|
||||||
db_options_.table_cache_remove_scan_count_limit);
|
|
||||||
|
|
||||||
versions_.reset(new VersionSet(dbname_, &db_options_, env_options_,
|
versions_.reset(new VersionSet(dbname_, &db_options_, env_options_,
|
||||||
table_cache_.get(), &write_buffer_,
|
table_cache_.get(), &write_buffer_,
|
||||||
|
@ -25,7 +25,7 @@ class FlushJobTest : public testing::Test {
|
|||||||
FlushJobTest()
|
FlushJobTest()
|
||||||
: env_(Env::Default()),
|
: env_(Env::Default()),
|
||||||
dbname_(test::TmpDir() + "/flush_job_test"),
|
dbname_(test::TmpDir() + "/flush_job_test"),
|
||||||
table_cache_(NewLRUCache(50000, 16, 8)),
|
table_cache_(NewLRUCache(50000, 16)),
|
||||||
write_buffer_(db_options_.db_write_buffer_size),
|
write_buffer_(db_options_.db_write_buffer_size),
|
||||||
versions_(new VersionSet(dbname_, &db_options_, env_options_,
|
versions_(new VersionSet(dbname_, &db_options_, env_options_,
|
||||||
table_cache_.get(), &write_buffer_,
|
table_cache_.get(), &write_buffer_,
|
||||||
|
@ -69,8 +69,7 @@ class Repairer {
|
|||||||
raw_table_cache_(
|
raw_table_cache_(
|
||||||
// TableCache can be small since we expect each table to be opened
|
// TableCache can be small since we expect each table to be opened
|
||||||
// once.
|
// once.
|
||||||
NewLRUCache(10, options_.table_cache_numshardbits,
|
NewLRUCache(10, options_.table_cache_numshardbits)),
|
||||||
options_.table_cache_remove_scan_count_limit)),
|
|
||||||
next_file_number_(1) {
|
next_file_number_(1) {
|
||||||
table_cache_ =
|
table_cache_ =
|
||||||
new TableCache(ioptions_, env_options_, raw_table_cache_.get());
|
new TableCache(ioptions_, env_options_, raw_table_cache_.get());
|
||||||
|
@ -2370,9 +2370,8 @@ Status VersionSet::ReduceNumberOfLevels(const std::string& dbname,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ColumnFamilyOptions cf_options(*options);
|
ColumnFamilyOptions cf_options(*options);
|
||||||
std::shared_ptr<Cache> tc(NewLRUCache(
|
std::shared_ptr<Cache> tc(NewLRUCache(options->max_open_files - 10,
|
||||||
options->max_open_files - 10, options->table_cache_numshardbits,
|
options->table_cache_numshardbits));
|
||||||
options->table_cache_remove_scan_count_limit));
|
|
||||||
WriteController wc;
|
WriteController wc;
|
||||||
WriteBuffer wb(options->db_write_buffer_size);
|
WriteBuffer wb(options->db_write_buffer_size);
|
||||||
VersionSet versions(dbname, options, env_options, tc.get(), &wb, &wc);
|
VersionSet versions(dbname, options, env_options, tc.get(), &wb, &wc);
|
||||||
|
@ -28,7 +28,7 @@ class WalManagerTest : public testing::Test {
|
|||||||
WalManagerTest()
|
WalManagerTest()
|
||||||
: env_(Env::Default()),
|
: env_(Env::Default()),
|
||||||
dbname_(test::TmpDir() + "/wal_manager_test"),
|
dbname_(test::TmpDir() + "/wal_manager_test"),
|
||||||
table_cache_(NewLRUCache(50000, 16, 8)),
|
table_cache_(NewLRUCache(50000, 16)),
|
||||||
write_buffer_(db_options_.db_write_buffer_size),
|
write_buffer_(db_options_.db_write_buffer_size),
|
||||||
current_log_number_(0) {
|
current_log_number_(0) {
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDB(dbname_, Options());
|
||||||
|
@ -34,20 +34,11 @@ class Cache;
|
|||||||
|
|
||||||
// Create a new cache with a fixed size capacity. The cache is sharded
|
// Create a new cache with a fixed size capacity. The cache is sharded
|
||||||
// to 2^numShardBits shards, by hash of the key. The total capacity
|
// to 2^numShardBits shards, by hash of the key. The total capacity
|
||||||
// is divided and evenly assigned to each shard. Inside each shard,
|
// is divided and evenly assigned to each shard.
|
||||||
// the eviction is done in two passes: first try to free spaces by
|
|
||||||
// evicting entries that are among the most least used removeScanCountLimit
|
|
||||||
// entries and do not have reference other than by the cache itself, in
|
|
||||||
// the least-used order. If not enough space is freed, further free the
|
|
||||||
// entries in least used order.
|
|
||||||
//
|
//
|
||||||
// The functions without parameter numShardBits and/or removeScanCountLimit
|
// The functions without parameter numShardBits uses default value, which is 4
|
||||||
// use default values. removeScanCountLimit's default value is 0, which
|
|
||||||
// means a strict LRU order inside each shard.
|
|
||||||
extern shared_ptr<Cache> NewLRUCache(size_t capacity);
|
extern shared_ptr<Cache> NewLRUCache(size_t capacity);
|
||||||
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits);
|
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits);
|
||||||
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits,
|
|
||||||
int removeScanCountLimit);
|
|
||||||
|
|
||||||
class Cache {
|
class Cache {
|
||||||
public:
|
public:
|
||||||
|
@ -897,14 +897,8 @@ struct DBOptions {
|
|||||||
// Number of shards used for table cache.
|
// Number of shards used for table cache.
|
||||||
int table_cache_numshardbits;
|
int table_cache_numshardbits;
|
||||||
|
|
||||||
// During data eviction of table's LRU cache, it would be inefficient
|
// DEPRECATED
|
||||||
// to strictly follow LRU because this piece of memory will not really
|
// int table_cache_remove_scan_count_limit;
|
||||||
// be released unless its refcount falls to zero. Instead, make two
|
|
||||||
// passes: the first pass will release items with refcount = 1,
|
|
||||||
// and if not enough space releases after scanning the number of
|
|
||||||
// elements specified by this parameter, we will remove items in LRU
|
|
||||||
// order.
|
|
||||||
int table_cache_remove_scan_count_limit;
|
|
||||||
|
|
||||||
// The following two fields affect how archived logs will be deleted.
|
// The following two fields affect how archived logs will be deleted.
|
||||||
// 1. If both set to 0, logs will be deleted asap and will not get into
|
// 1. If both set to 0, logs will be deleted asap and will not get into
|
||||||
|
@ -702,8 +702,8 @@ void Java_org_rocksdb_Options_setTableCacheNumshardbits(
|
|||||||
*/
|
*/
|
||||||
jint Java_org_rocksdb_Options_tableCacheRemoveScanCountLimit(
|
jint Java_org_rocksdb_Options_tableCacheRemoveScanCountLimit(
|
||||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||||
return reinterpret_cast<rocksdb::Options*>(
|
// deprecated
|
||||||
jhandle)->table_cache_remove_scan_count_limit;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -713,8 +713,7 @@ jint Java_org_rocksdb_Options_tableCacheRemoveScanCountLimit(
|
|||||||
*/
|
*/
|
||||||
void Java_org_rocksdb_Options_setTableCacheRemoveScanCountLimit(
|
void Java_org_rocksdb_Options_setTableCacheRemoveScanCountLimit(
|
||||||
JNIEnv* env, jobject jobj, jlong jhandle, jint limit) {
|
JNIEnv* env, jobject jobj, jlong jhandle, jint limit) {
|
||||||
reinterpret_cast<rocksdb::Options*>(
|
// deprecated
|
||||||
jhandle)->table_cache_remove_scan_count_limit = static_cast<int>(limit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3383,8 +3382,7 @@ jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(
|
|||||||
*/
|
*/
|
||||||
void Java_org_rocksdb_DBOptions_setTableCacheRemoveScanCountLimit(
|
void Java_org_rocksdb_DBOptions_setTableCacheRemoveScanCountLimit(
|
||||||
JNIEnv* env, jobject jobj, jlong jhandle, jint limit) {
|
JNIEnv* env, jobject jobj, jlong jhandle, jint limit) {
|
||||||
reinterpret_cast<rocksdb::DBOptions*>(
|
// deprecated
|
||||||
jhandle)->table_cache_remove_scan_count_limit = static_cast<int>(limit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3394,8 +3392,8 @@ void Java_org_rocksdb_DBOptions_setTableCacheRemoveScanCountLimit(
|
|||||||
*/
|
*/
|
||||||
jint Java_org_rocksdb_DBOptions_tableCacheRemoveScanCountLimit(
|
jint Java_org_rocksdb_DBOptions_tableCacheRemoveScanCountLimit(
|
||||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||||
return reinterpret_cast<rocksdb::DBOptions*>(
|
// deprecated
|
||||||
jhandle)->table_cache_remove_scan_count_limit;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -193,9 +193,6 @@ class LRUCache {
|
|||||||
|
|
||||||
// Separate from constructor so caller can easily make an array of LRUCache
|
// Separate from constructor so caller can easily make an array of LRUCache
|
||||||
void SetCapacity(size_t capacity) { capacity_ = capacity; }
|
void SetCapacity(size_t capacity) { capacity_ = capacity; }
|
||||||
void SetRemoveScanCountLimit(uint32_t remove_scan_count_limit) {
|
|
||||||
remove_scan_count_limit_ = remove_scan_count_limit;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Like Cache methods, but with an extra "hash" parameter.
|
// Like Cache methods, but with an extra "hash" parameter.
|
||||||
Cache::Handle* Insert(const Slice& key, uint32_t hash,
|
Cache::Handle* Insert(const Slice& key, uint32_t hash,
|
||||||
@ -224,7 +221,6 @@ class LRUCache {
|
|||||||
|
|
||||||
// Initialized before use.
|
// Initialized before use.
|
||||||
size_t capacity_;
|
size_t capacity_;
|
||||||
uint32_t remove_scan_count_limit_;
|
|
||||||
|
|
||||||
// mutex_ protects the following state.
|
// mutex_ protects the following state.
|
||||||
// We don't count mutex_ as the cache's internal state so semantically we
|
// We don't count mutex_ as the cache's internal state so semantically we
|
||||||
@ -426,7 +422,6 @@ void LRUCache::Erase(const Slice& key, uint32_t hash) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int kNumShardBits = 4; // default values, can be overridden
|
static int kNumShardBits = 4; // default values, can be overridden
|
||||||
static int kRemoveScanCountLimit = 0; // default values, can be overridden
|
|
||||||
|
|
||||||
class ShardedLRUCache : public Cache {
|
class ShardedLRUCache : public Cache {
|
||||||
private:
|
private:
|
||||||
@ -445,28 +440,16 @@ class ShardedLRUCache : public Cache {
|
|||||||
return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0;
|
return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void init(size_t capacity, int numbits, int removeScanCountLimit) {
|
public:
|
||||||
num_shard_bits_ = numbits;
|
ShardedLRUCache(size_t capacity, int num_shard_bits)
|
||||||
capacity_ = capacity;
|
: last_id_(0), num_shard_bits_(num_shard_bits), capacity_(capacity) {
|
||||||
int num_shards = 1 << num_shard_bits_;
|
int num_shards = 1 << num_shard_bits_;
|
||||||
shards_ = new LRUCache[num_shards];
|
shards_ = new LRUCache[num_shards];
|
||||||
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
|
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
|
||||||
for (int s = 0; s < num_shards; s++) {
|
for (int s = 0; s < num_shards; s++) {
|
||||||
shards_[s].SetCapacity(per_shard);
|
shards_[s].SetCapacity(per_shard);
|
||||||
shards_[s].SetRemoveScanCountLimit(removeScanCountLimit);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
|
||||||
explicit ShardedLRUCache(size_t capacity)
|
|
||||||
: last_id_(0) {
|
|
||||||
init(capacity, kNumShardBits, kRemoveScanCountLimit);
|
|
||||||
}
|
|
||||||
ShardedLRUCache(size_t capacity, int num_shard_bits,
|
|
||||||
int removeScanCountLimit)
|
|
||||||
: last_id_(0) {
|
|
||||||
init(capacity, num_shard_bits, removeScanCountLimit);
|
|
||||||
}
|
|
||||||
virtual ~ShardedLRUCache() {
|
virtual ~ShardedLRUCache() {
|
||||||
delete[] shards_;
|
delete[] shards_;
|
||||||
}
|
}
|
||||||
@ -526,17 +509,10 @@ shared_ptr<Cache> NewLRUCache(size_t capacity) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits) {
|
shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits) {
|
||||||
return NewLRUCache(capacity, num_shard_bits, kRemoveScanCountLimit);
|
|
||||||
}
|
|
||||||
|
|
||||||
shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits,
|
|
||||||
int removeScanCountLimit) {
|
|
||||||
if (num_shard_bits >= 20) {
|
if (num_shard_bits >= 20) {
|
||||||
return nullptr; // the cache cannot be sharded into too many fine pieces
|
return nullptr; // the cache cannot be sharded into too many fine pieces
|
||||||
}
|
}
|
||||||
return std::make_shared<ShardedLRUCache>(capacity,
|
return std::make_shared<ShardedLRUCache>(capacity, num_shard_bits);
|
||||||
num_shard_bits,
|
|
||||||
removeScanCountLimit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
@ -43,11 +43,9 @@ class CacheTest : public testing::Test {
|
|||||||
|
|
||||||
static const int kCacheSize = 1000;
|
static const int kCacheSize = 1000;
|
||||||
static const int kNumShardBits = 4;
|
static const int kNumShardBits = 4;
|
||||||
static const int kRemoveScanCountLimit = 16;
|
|
||||||
|
|
||||||
static const int kCacheSize2 = 100;
|
static const int kCacheSize2 = 100;
|
||||||
static const int kNumShardBits2 = 2;
|
static const int kNumShardBits2 = 2;
|
||||||
static const int kRemoveScanCountLimit2 = 200;
|
|
||||||
|
|
||||||
std::vector<int> deleted_keys_;
|
std::vector<int> deleted_keys_;
|
||||||
std::vector<int> deleted_values_;
|
std::vector<int> deleted_values_;
|
||||||
@ -55,9 +53,8 @@ class CacheTest : public testing::Test {
|
|||||||
shared_ptr<Cache> cache2_;
|
shared_ptr<Cache> cache2_;
|
||||||
|
|
||||||
CacheTest() :
|
CacheTest() :
|
||||||
cache_(NewLRUCache(kCacheSize, kNumShardBits, kRemoveScanCountLimit)),
|
cache_(NewLRUCache(kCacheSize, kNumShardBits)),
|
||||||
cache2_(NewLRUCache(kCacheSize2, kNumShardBits2,
|
cache2_(NewLRUCache(kCacheSize2, kNumShardBits2)) {
|
||||||
kRemoveScanCountLimit2)) {
|
|
||||||
current_ = this;
|
current_ = this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,7 +113,7 @@ void dumbDeleter(const Slice& key, void* value) { }
|
|||||||
TEST_F(CacheTest, UsageTest) {
|
TEST_F(CacheTest, UsageTest) {
|
||||||
// cache is shared_ptr and will be automatically cleaned up.
|
// cache is shared_ptr and will be automatically cleaned up.
|
||||||
const uint64_t kCapacity = 100000;
|
const uint64_t kCapacity = 100000;
|
||||||
auto cache = NewLRUCache(kCapacity, 8, 200);
|
auto cache = NewLRUCache(kCapacity, 8);
|
||||||
|
|
||||||
size_t usage = 0;
|
size_t usage = 0;
|
||||||
const char* value = "abcdef";
|
const char* value = "abcdef";
|
||||||
|
@ -525,9 +525,8 @@ void DumpManifestFile(std::string file, bool verbose, bool hex) {
|
|||||||
Options options;
|
Options options;
|
||||||
EnvOptions sopt;
|
EnvOptions sopt;
|
||||||
std::string dbname("dummy");
|
std::string dbname("dummy");
|
||||||
std::shared_ptr<Cache> tc(
|
std::shared_ptr<Cache> tc(NewLRUCache(options.max_open_files - 10,
|
||||||
NewLRUCache(options.max_open_files - 10, options.table_cache_numshardbits,
|
options.table_cache_numshardbits));
|
||||||
options.table_cache_remove_scan_count_limit));
|
|
||||||
// Notice we are using the default options not through SanitizeOptions(),
|
// Notice we are using the default options not through SanitizeOptions(),
|
||||||
// if VersionSet::DumpManifest() depends on any option done by
|
// if VersionSet::DumpManifest() depends on any option done by
|
||||||
// SanitizeOptions(), we need to initialize it manually.
|
// SanitizeOptions(), we need to initialize it manually.
|
||||||
@ -1134,8 +1133,7 @@ Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
|
|||||||
int* levels) {
|
int* levels) {
|
||||||
EnvOptions soptions;
|
EnvOptions soptions;
|
||||||
std::shared_ptr<Cache> tc(
|
std::shared_ptr<Cache> tc(
|
||||||
NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits,
|
NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits));
|
||||||
opt.table_cache_remove_scan_count_limit));
|
|
||||||
const InternalKeyComparator cmp(opt.comparator);
|
const InternalKeyComparator cmp(opt.comparator);
|
||||||
WriteController wc;
|
WriteController wc;
|
||||||
WriteBuffer wb(opt.db_write_buffer_size);
|
WriteBuffer wb(opt.db_write_buffer_size);
|
||||||
|
@ -238,7 +238,6 @@ DBOptions::DBOptions()
|
|||||||
keep_log_file_num(1000),
|
keep_log_file_num(1000),
|
||||||
max_manifest_file_size(std::numeric_limits<uint64_t>::max()),
|
max_manifest_file_size(std::numeric_limits<uint64_t>::max()),
|
||||||
table_cache_numshardbits(4),
|
table_cache_numshardbits(4),
|
||||||
table_cache_remove_scan_count_limit(16),
|
|
||||||
WAL_ttl_seconds(0),
|
WAL_ttl_seconds(0),
|
||||||
WAL_size_limit_MB(0),
|
WAL_size_limit_MB(0),
|
||||||
manifest_preallocation_size(4 * 1024 * 1024),
|
manifest_preallocation_size(4 * 1024 * 1024),
|
||||||
@ -282,8 +281,6 @@ DBOptions::DBOptions(const Options& options)
|
|||||||
keep_log_file_num(options.keep_log_file_num),
|
keep_log_file_num(options.keep_log_file_num),
|
||||||
max_manifest_file_size(options.max_manifest_file_size),
|
max_manifest_file_size(options.max_manifest_file_size),
|
||||||
table_cache_numshardbits(options.table_cache_numshardbits),
|
table_cache_numshardbits(options.table_cache_numshardbits),
|
||||||
table_cache_remove_scan_count_limit(
|
|
||||||
options.table_cache_remove_scan_count_limit),
|
|
||||||
WAL_ttl_seconds(options.WAL_ttl_seconds),
|
WAL_ttl_seconds(options.WAL_ttl_seconds),
|
||||||
WAL_size_limit_MB(options.WAL_size_limit_MB),
|
WAL_size_limit_MB(options.WAL_size_limit_MB),
|
||||||
manifest_preallocation_size(options.manifest_preallocation_size),
|
manifest_preallocation_size(options.manifest_preallocation_size),
|
||||||
@ -330,8 +327,6 @@ void DBOptions::Dump(Logger* log) const {
|
|||||||
wal_dir.c_str());
|
wal_dir.c_str());
|
||||||
Log(log, " Options.table_cache_numshardbits: %d",
|
Log(log, " Options.table_cache_numshardbits: %d",
|
||||||
table_cache_numshardbits);
|
table_cache_numshardbits);
|
||||||
Log(log, " Options.table_cache_remove_scan_count_limit: %d",
|
|
||||||
table_cache_remove_scan_count_limit);
|
|
||||||
Log(log, " Options.delete_obsolete_files_period_micros: %" PRIu64,
|
Log(log, " Options.delete_obsolete_files_period_micros: %" PRIu64,
|
||||||
delete_obsolete_files_period_micros);
|
delete_obsolete_files_period_micros);
|
||||||
Log(log, " Options.max_background_compactions: %d",
|
Log(log, " Options.max_background_compactions: %d",
|
||||||
|
@ -509,8 +509,6 @@ bool ParseDBOption(const std::string& name, const std::string& value,
|
|||||||
new_options->max_manifest_file_size = ParseUint64(value);
|
new_options->max_manifest_file_size = ParseUint64(value);
|
||||||
} else if (name == "table_cache_numshardbits") {
|
} else if (name == "table_cache_numshardbits") {
|
||||||
new_options->table_cache_numshardbits = ParseInt(value);
|
new_options->table_cache_numshardbits = ParseInt(value);
|
||||||
} else if (name == "table_cache_remove_scan_count_limit") {
|
|
||||||
new_options->table_cache_remove_scan_count_limit = ParseInt(value);
|
|
||||||
} else if (name == "WAL_ttl_seconds") {
|
} else if (name == "WAL_ttl_seconds") {
|
||||||
new_options->WAL_ttl_seconds = ParseUint64(value);
|
new_options->WAL_ttl_seconds = ParseUint64(value);
|
||||||
} else if (name == "WAL_size_limit_MB") {
|
} else if (name == "WAL_size_limit_MB") {
|
||||||
|
@ -159,7 +159,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
|||||||
{"keep_log_file_num", "39"},
|
{"keep_log_file_num", "39"},
|
||||||
{"max_manifest_file_size", "40"},
|
{"max_manifest_file_size", "40"},
|
||||||
{"table_cache_numshardbits", "41"},
|
{"table_cache_numshardbits", "41"},
|
||||||
{"table_cache_remove_scan_count_limit", "42"},
|
|
||||||
{"WAL_ttl_seconds", "43"},
|
{"WAL_ttl_seconds", "43"},
|
||||||
{"WAL_size_limit_MB", "44"},
|
{"WAL_size_limit_MB", "44"},
|
||||||
{"manifest_preallocation_size", "45"},
|
{"manifest_preallocation_size", "45"},
|
||||||
@ -266,7 +265,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
|||||||
ASSERT_EQ(new_db_opt.keep_log_file_num, 39U);
|
ASSERT_EQ(new_db_opt.keep_log_file_num, 39U);
|
||||||
ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast<uint64_t>(40));
|
ASSERT_EQ(new_db_opt.max_manifest_file_size, static_cast<uint64_t>(40));
|
||||||
ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41);
|
ASSERT_EQ(new_db_opt.table_cache_numshardbits, 41);
|
||||||
ASSERT_EQ(new_db_opt.table_cache_remove_scan_count_limit, 42);
|
|
||||||
ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast<uint64_t>(43));
|
ASSERT_EQ(new_db_opt.WAL_ttl_seconds, static_cast<uint64_t>(43));
|
||||||
ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast<uint64_t>(44));
|
ASSERT_EQ(new_db_opt.WAL_size_limit_MB, static_cast<uint64_t>(44));
|
||||||
ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);
|
ASSERT_EQ(new_db_opt.manifest_preallocation_size, 45U);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user