d9cfaa2b16
Summary: Persistent cache tier is the tier abstraction that can work for any block device based device mounted on a file system. The design/implementation can handle any generic block device. Any generic block support is achieved by generalizing the access patten as {io-size, q-depth, direct-io/buffered}. We have specifically tested and adapted the IO path for NVM and SSD. Persistent cache tier consists of there parts : 1) File layout Provides the implementation for handling IO path for reading and writing data (key/value pair). 2) Meta-data Provides the implementation for handling the index for persistent read cache. 3) Implementation It binds (1) and (2) and flushed out the PersistentCacheTier interface This patch provides implementation for (1)(2). Follow up patch will provide (3) and tests. Test Plan: Compile and run check Subscribers: andrewkr, dhruba, leveldb Differential Revision: https://reviews.facebook.net/D57117
132 lines
3.3 KiB
C++
132 lines
3.3 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
#include <assert.h>
|
|
#include <atomic>
|
|
#include <mutex>
|
|
#include <thread>
|
|
#include "port/port.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
// Helper class that locks a mutex on construction and unlocks the mutex when
|
|
// the destructor of the MutexLock object is invoked.
|
|
//
|
|
// Typical usage:
|
|
//
|
|
// void MyClass::MyMethod() {
|
|
// MutexLock l(&mu_); // mu_ is an instance variable
|
|
// ... some complex code, possibly with multiple return paths ...
|
|
// }
|
|
|
|
class MutexLock {
|
|
public:
|
|
explicit MutexLock(port::Mutex *mu) : mu_(mu) {
|
|
this->mu_->Lock();
|
|
}
|
|
~MutexLock() { this->mu_->Unlock(); }
|
|
|
|
private:
|
|
port::Mutex *const mu_;
|
|
// No copying allowed
|
|
MutexLock(const MutexLock&);
|
|
void operator=(const MutexLock&);
|
|
};
|
|
|
|
//
|
|
// Acquire a ReadLock on the specified RWMutex.
|
|
// The Lock will be automatically released then the
|
|
// object goes out of scope.
|
|
//
|
|
class ReadLock {
|
|
public:
|
|
explicit ReadLock(port::RWMutex *mu) : mu_(mu) {
|
|
this->mu_->ReadLock();
|
|
}
|
|
~ReadLock() { this->mu_->ReadUnlock(); }
|
|
|
|
private:
|
|
port::RWMutex *const mu_;
|
|
// No copying allowed
|
|
ReadLock(const ReadLock&);
|
|
void operator=(const ReadLock&);
|
|
};
|
|
|
|
//
|
|
// Automatically unlock a locked mutex when the object is destroyed
|
|
//
|
|
class ReadUnlock {
|
|
public:
|
|
explicit ReadUnlock(port::RWMutex *mu) : mu_(mu) { mu->AssertHeld(); }
|
|
~ReadUnlock() { mu_->ReadUnlock(); }
|
|
|
|
private:
|
|
port::RWMutex *const mu_;
|
|
// No copying allowed
|
|
ReadUnlock(const ReadUnlock &) = delete;
|
|
ReadUnlock &operator=(const ReadUnlock &) = delete;
|
|
};
|
|
|
|
//
|
|
// Acquire a WriteLock on the specified RWMutex.
|
|
// The Lock will be automatically released then the
|
|
// object goes out of scope.
|
|
//
|
|
class WriteLock {
|
|
public:
|
|
explicit WriteLock(port::RWMutex *mu) : mu_(mu) {
|
|
this->mu_->WriteLock();
|
|
}
|
|
~WriteLock() { this->mu_->WriteUnlock(); }
|
|
|
|
private:
|
|
port::RWMutex *const mu_;
|
|
// No copying allowed
|
|
WriteLock(const WriteLock&);
|
|
void operator=(const WriteLock&);
|
|
};
|
|
|
|
//
|
|
// SpinMutex has very low overhead for low-contention cases. Method names
|
|
// are chosen so you can use std::unique_lock or std::lock_guard with it.
|
|
//
|
|
class SpinMutex {
|
|
public:
|
|
SpinMutex() : locked_(false) {}
|
|
|
|
bool try_lock() {
|
|
auto currently_locked = locked_.load(std::memory_order_relaxed);
|
|
return !currently_locked &&
|
|
locked_.compare_exchange_weak(currently_locked, true,
|
|
std::memory_order_acquire,
|
|
std::memory_order_relaxed);
|
|
}
|
|
|
|
void lock() {
|
|
for (size_t tries = 0;; ++tries) {
|
|
if (try_lock()) {
|
|
// success
|
|
break;
|
|
}
|
|
port::AsmVolatilePause();
|
|
if (tries > 100) {
|
|
std::this_thread::yield();
|
|
}
|
|
}
|
|
}
|
|
|
|
void unlock() { locked_.store(false, std::memory_order_release); }
|
|
|
|
private:
|
|
std::atomic<bool> locked_;
|
|
};
|
|
|
|
} // namespace rocksdb
|