rocksdb/utilities/memory/memory_test.cc

278 lines
9.7 KiB
C++
Raw Normal View History

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#include "db/db_impl/db_impl.h"
#include "rocksdb/cache.h"
#include "rocksdb/table.h"
#include "rocksdb/utilities/memory_util.h"
#include "rocksdb/utilities/stackable_db.h"
#include "table/block_based/block_based_table_factory.h"
#include "test_util/testharness.h"
#include "test_util/testutil.h"
#include "util/random.h"
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {
class MemoryTest : public testing::Test {
public:
MemoryTest() : kDbDir(test::PerThreadDBPath("memory_test")), rnd_(301) {
assert(Env::Default()->CreateDirIfMissing(kDbDir).ok());
}
std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) {
usage_history_[i].push_back(
usage_by_type[static_cast<MemoryUtil::UsageType>(i)]);
}
}
void GetCachePointersFromTableFactory(
const TableFactory* factory,
std::unordered_set<const Cache*>* cache_set) {
const auto bbto = factory->GetOptions<BlockBasedTableOptions>();
if (bbto != nullptr) {
cache_set->insert(bbto->block_cache.get());
cache_set->insert(bbto->block_cache_compressed.get());
}
}
void GetCachePointers(const std::vector<DB*>& dbs,
std::unordered_set<const Cache*>* cache_set) {
cache_set->clear();
for (auto* db : dbs) {
Make clang-analyzer happy (#5821) Summary: clang-analyzer has uncovered a bunch of places where the code is relying on pointers being valid and one case (in VectorIterator) where a moved-from object is being used: In file included from db/range_tombstone_fragmenter.cc:17: ./util/vector_iterator.h:23:18: warning: Method called on moved-from object 'keys' of type 'std::vector' current_(keys.size()) { ^~~~~~~~~~~ 1 warning generated. utilities/persistent_cache/block_cache_tier_file.cc:39:14: warning: Called C++ object pointer is null Status s = env->NewRandomAccessFile(filepath, file, opt); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ utilities/persistent_cache/block_cache_tier_file.cc:47:19: warning: Called C++ object pointer is null Status status = env_->GetFileSize(Path(), size); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ utilities/persistent_cache/block_cache_tier_file.cc:290:14: warning: Called C++ object pointer is null Status s = env_->FileExists(Path()); ^~~~~~~~~~~~~~~~~~~~~~~~ utilities/persistent_cache/block_cache_tier_file.cc:363:35: warning: Called C++ object pointer is null CacheWriteBuffer* const buf = alloc_->Allocate(); ^~~~~~~~~~~~~~~~~~ utilities/persistent_cache/block_cache_tier_file.cc:399:41: warning: Called C++ object pointer is null const uint64_t file_off = buf_doff_ * alloc_->BufferSize(); ^~~~~~~~~~~~~~~~~~~~ utilities/persistent_cache/block_cache_tier_file.cc:463:33: warning: Called C++ object pointer is null size_t start_idx = lba.off_ / alloc_->BufferSize(); ^~~~~~~~~~~~~~~~~~~~ utilities/persistent_cache/block_cache_tier_file.cc:515:5: warning: Called C++ object pointer is null alloc_->Deallocate(bufs_[i]); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ 7 warnings generated. ar: creating librocksdb_debug.a utilities/memory/memory_test.cc:68:25: warning: Called C++ object pointer is null cache_set->insert(db->GetDBOptions().row_cache.get()); ^~~~~~~~~~~~~~~~~~ 1 warning generated. The patch fixes these by adding assertions and explicitly passing in zero when initializing VectorIterator::current_ (which preserves the existing behavior). Pull Request resolved: https://github.com/facebook/rocksdb/pull/5821 Test Plan: Ran make check and make analyze to make sure the warnings have disappeared. Differential Revision: D17455949 Pulled By: ltamasi fbshipit-source-id: 363619618ea649a0674287f9f3b3393e390571ee
2019-09-19 00:22:46 +02:00
assert(db);
// Cache from DBImpl
StackableDB* sdb = dynamic_cast<StackableDB*>(db);
DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);
if (db_impl != nullptr) {
cache_set->insert(db_impl->TEST_table_cache());
}
// Cache from DBOptions
cache_set->insert(db->GetDBOptions().row_cache.get());
// Cache from table factories
std::unordered_map<std::string, const ImmutableCFOptions*> iopts_map;
if (db_impl != nullptr) {
ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
}
for (auto pair : iopts_map) {
GetCachePointersFromTableFactory(pair.second->table_factory, cache_set);
}
}
}
Status GetApproximateMemoryUsageByType(
const std::vector<DB*>& dbs,
std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
std::unordered_set<const Cache*> cache_set;
GetCachePointers(dbs, &cache_set);
return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
usage_by_type);
}
const std::string kDbDir;
Random rnd_;
std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes];
};
TEST_F(MemoryTest, SharedBlockCacheTotal) {
std::vector<DB*> dbs;
std::vector<uint64_t> usage_by_type;
const int kNumDBs = 10;
const int kKeySize = 100;
const int kValueSize = 500;
Options opt;
opt.create_if_missing = true;
opt.write_buffer_size = kKeySize + kValueSize;
opt.max_write_buffer_number = 10;
opt.min_write_buffer_number_to_merge = 10;
opt.disable_auto_compactions = true;
BlockBasedTableOptions bbt_opts;
bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
for (int i = 0; i < kNumDBs; ++i) {
ASSERT_OK(DestroyDB(GetDBName(i), opt));
DB* db = nullptr;
ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
dbs.push_back(db);
}
std::vector<std::string> keys_by_db[kNumDBs];
// Fill one memtable per Put to make memtable use more memory.
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
for (int i = 0; i < kNumDBs; ++i) {
for (int j = 0; j < 100; ++j) {
keys_by_db[i].emplace_back(rnd_.RandomString(kKeySize));
ASSERT_OK(dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
rnd_.RandomString(kValueSize)));
}
ASSERT_OK(dbs[i]->Flush(FlushOptions()));
}
}
for (int i = 0; i < kNumDBs; ++i) {
for (auto& key : keys_by_db[i]) {
std::string value;
ASSERT_OK(dbs[i]->Get(ReadOptions(), key, &value));
}
UpdateUsagesHistory(dbs);
}
for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
++i) {
// Expect EQ as we didn't flush more memtables.
ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
}
for (int i = 0; i < kNumDBs; ++i) {
delete dbs[i];
}
}
TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
std::vector<DB*> dbs;
std::vector<uint64_t> usage_by_type;
std::vector<std::vector<ColumnFamilyHandle*>> vec_handles;
const int kNumDBs = 10;
// These key/value sizes ensure each KV has its own memtable. Note that the
// minimum write_buffer_size allowed is 64 KB.
const int kKeySize = 100;
const int kValueSize = 1 << 16;
Options opt;
opt.create_if_missing = true;
opt.create_missing_column_families = true;
opt.write_buffer_size = kKeySize + kValueSize;
opt.max_write_buffer_number = 10;
opt.min_write_buffer_number_to_merge = 10;
opt.disable_auto_compactions = true;
std::vector<ColumnFamilyDescriptor> cf_descs = {
{kDefaultColumnFamilyName, ColumnFamilyOptions(opt)},
{"one", ColumnFamilyOptions(opt)},
{"two", ColumnFamilyOptions(opt)},
};
for (int i = 0; i < kNumDBs; ++i) {
ASSERT_OK(DestroyDB(GetDBName(i), opt));
std::vector<ColumnFamilyHandle*> handles;
dbs.emplace_back();
vec_handles.emplace_back();
ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs,
&vec_handles.back(), &dbs.back()));
}
// Fill one memtable per Put to make memtable use more memory.
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
for (int i = 0; i < kNumDBs; ++i) {
for (auto* handle : vec_handles[i]) {
ASSERT_OK(dbs[i]->Put(WriteOptions(), handle,
rnd_.RandomString(kKeySize),
rnd_.RandomString(kValueSize)));
UpdateUsagesHistory(dbs);
}
}
}
// Expect the usage history is monotonically increasing
for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
++i) {
ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i],
usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
}
size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
std::vector<Iterator*> iters;
// Create an iterator and flush all memtables for each db
for (int i = 0; i < kNumDBs; ++i) {
iters.push_back(dbs[i]->NewIterator(ReadOptions()));
ASSERT_OK(dbs[i]->Flush(FlushOptions()));
for (int j = 0; j < 100; ++j) {
std::string value;
ASSERT_NOK(
dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value));
}
UpdateUsagesHistory(dbs);
}
for (size_t i = usage_check_point;
i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
// Since memtables are pinned by iterators, we don't expect the
// memory usage of all the memtables decreases as they are pinned
// by iterators.
ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i],
usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
// Expect the usage history from the "usage_decay_point" is
// monotonically decreasing.
ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
// Expect the usage history of the table readers increases
// as we flush tables.
ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i],
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i],
usage_history_[MemoryUtil::kCacheTotal][i - 1]);
}
usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
for (int i = 0; i < kNumDBs; ++i) {
// iterator is not used.
ASSERT_OK(iters[i]->status());
delete iters[i];
UpdateUsagesHistory(dbs);
}
for (size_t i = usage_check_point;
i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
// Expect the usage of all memtables decreasing as we delete iterators.
ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i],
usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
// Since the memory usage of un-flushed memtables is only affected
// by Put and flush, we expect EQ here as we only delete iterators.
ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
// Expect EQ as we didn't flush more memtables.
ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
}
for (int i = 0; i < kNumDBs; ++i) {
for (auto* handle : vec_handles[i]) {
delete handle;
}
delete dbs[i];
}
}
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {
#if !(defined NDEBUG) || !defined(OS_WIN)
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
return 0;
#endif
}
#else
#include <cstdio>
int main(int /*argc*/, char** /*argv*/) {
printf("Skipped in RocksDBLite as utilities are not supported.\n");
return 0;
}
#endif // !ROCKSDB_LITE