Auto-Format two recent diffs and add HISTORY.md (#6685)

Summary:
Two recent diffs can be autoformatted.
Also add HISTORY.md entry for https://github.com/facebook/rocksdb/pull/6214
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6685

Test Plan: Run all existing tests

Reviewed By: cheng-chang

Differential Revision: D20965780

fbshipit-source-id: 195b08d7849513d42fe14073112cd19fdda6af95
This commit is contained in:
sdong 2020-04-10 11:30:49 -07:00 committed by Facebook GitHub Bot
parent f08630b914
commit 1be3be5522
6 changed files with 18 additions and 14 deletions

View File

@ -5,6 +5,7 @@
### New Features
* Added support for pipelined & parallel compression optimization for `BlockBasedTableBuilder`. This optimization makes block building, block compression and block appending a pipeline, and uses multiple threads to accelerate block compression. Users can set `CompressionOptions::parallel_threads` greater than 1 to enable compression parallelism.
* Provide an allocator for memkind to be used with block cache. This is to work with memory technologies (Intel DCPMM is one such technology currently available) that require different libraries for allocation and management (such as PMDK and memkind). The high capacities available make it possible to provision large caches (up to several TBs in size) beyond what is achievable with DRAM.
### Bug Fixes
* Fix a bug when making options.bottommost_compression, options.compression_opts and options.bottommost_compression_opts dynamically changeable: the modified values are not written to option files or returned back to users when being queried.

View File

@ -13,7 +13,7 @@ namespace rocksdb {
void* MemkindKmemAllocator::Allocate(size_t size) {
void* p = memkind_malloc(MEMKIND_DAX_KMEM, size);
if (p == NULL) {
throw std::bad_alloc();
throw std::bad_alloc();
}
return p;
}
@ -23,7 +23,8 @@ void MemkindKmemAllocator::Deallocate(void* p) {
}
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
size_t MemkindKmemAllocator::UsableSize(void* p, size_t /*allocation_size*/) const {
size_t MemkindKmemAllocator::UsableSize(void* p,
size_t /*allocation_size*/) const {
return memkind_malloc_usable_size(MEMKIND_DAX_KMEM, p);
}
#endif // ROCKSDB_MALLOC_USABLE_SIZE

View File

@ -25,4 +25,3 @@ class MemkindKmemAllocator : public MemoryAllocator {
} // namespace rocksdb
#endif // MEMKIND

View File

@ -8,11 +8,11 @@
#ifdef MEMKIND
#include "memkind_kmem_allocator.h"
#include "test_util/testharness.h"
#include "rocksdb/cache.h"
#include "rocksdb/db.h"
#include "rocksdb/options.h"
#include "table/block_based/block_based_table_factory.h"
#include "test_util/testharness.h"
namespace rocksdb {
TEST(MemkindKmemAllocatorTest, Allocate) {
@ -44,8 +44,8 @@ TEST(MemkindKmemAllocatorTest, DatabaseBlockCache) {
ASSERT_OK(DestroyDB(dbname, options));
options.create_if_missing = true;
std::shared_ptr<Cache> cache = NewLRUCache(1024 * 1024, 6, false, false,
std::make_shared<MemkindKmemAllocator>());
std::shared_ptr<Cache> cache = NewLRUCache(
1024 * 1024, 6, false, false, std::make_shared<MemkindKmemAllocator>());
BlockBasedTableOptions table_options;
table_options.block_cache = cache;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
@ -65,7 +65,8 @@ TEST(MemkindKmemAllocatorTest, DatabaseBlockCache) {
s = db->Put(wo, Slice(key), Slice(val));
ASSERT_OK(s);
}
ASSERT_OK(db->Flush(FlushOptions())); // Flush all data from memtable so that reads are from block cache
ASSERT_OK(db->Flush(FlushOptions())); // Flush all data from memtable so that
// reads are from block cache
// Read and check block cache usage
ReadOptions ro;
@ -93,7 +94,9 @@ int main(int argc, char** argv) {
#else
int main(int /*argc*/, char** /*argv*/) {
printf("Skip memkind_kmem_allocator_test as the required library memkind is missing.");
printf(
"Skip memkind_kmem_allocator_test as the required library memkind is "
"missing.");
}
#endif // MEMKIND

View File

@ -2638,7 +2638,7 @@ class Benchmark {
}
return cache;
} else {
if(FLAGS_use_cache_memkind_kmem_allocator) {
if (FLAGS_use_cache_memkind_kmem_allocator) {
#ifdef MEMKIND
return NewLRUCache(
static_cast<size_t>(capacity), FLAGS_cache_numshardbits,

View File

@ -5,12 +5,12 @@
#ifndef ROCKSDB_LITE
#include "utilities/transactions/transaction_lock_mgr.h"
#include "port/port.h"
#include "port/stack_trace.h"
#include "rocksdb/utilities/transaction_db.h"
#include "test_util/testharness.h"
#include "test_util/testutil.h"
#include "rocksdb/utilities/transaction_db.h"
#include "utilities/transactions/transaction_lock_mgr.h"
#include "utilities/transactions/transaction_db_mutex_impl.h"
namespace ROCKSDB_NAMESPACE {
@ -29,9 +29,9 @@ class TransactionLockMgrTest : public testing::Test {
txn_opt.transaction_lock_timeout = 0;
ASSERT_OK(TransactionDB::Open(opt, txn_opt, db_dir_, &db_));
locker_.reset(new TransactionLockMgr(
db_, txn_opt.num_stripes, txn_opt.max_num_locks,
txn_opt.max_num_deadlocks, mutex_factory_));
locker_.reset(
new TransactionLockMgr(db_, txn_opt.num_stripes, txn_opt.max_num_locks,
txn_opt.max_num_deadlocks, mutex_factory_));
}
void TearDown() override {