Prevents Table Cache to open same files more times (#6707)
Summary: In highly concurrent requests table cache opens same file more times which lowers purpose of max_open_files. Fixes (https://github.com/facebook/rocksdb/issues/6699) Pull Request resolved: https://github.com/facebook/rocksdb/pull/6707 Reviewed By: ltamasi Differential Revision: D21044965 fbshipit-source-id: f6e91d90b60dad86e518b5147021da42460ee1d2
This commit is contained in:
parent
f9155a3404
commit
6ee66cf8f0
@ -62,6 +62,8 @@ void AppendVarint64(IterKey* key, uint64_t v) {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
const int kLoadConcurency = 128;
|
||||||
|
|
||||||
TableCache::TableCache(const ImmutableCFOptions& ioptions,
|
TableCache::TableCache(const ImmutableCFOptions& ioptions,
|
||||||
const FileOptions& file_options, Cache* const cache,
|
const FileOptions& file_options, Cache* const cache,
|
||||||
BlockCacheTracer* const block_cache_tracer)
|
BlockCacheTracer* const block_cache_tracer)
|
||||||
@ -69,7 +71,8 @@ TableCache::TableCache(const ImmutableCFOptions& ioptions,
|
|||||||
file_options_(file_options),
|
file_options_(file_options),
|
||||||
cache_(cache),
|
cache_(cache),
|
||||||
immortal_tables_(false),
|
immortal_tables_(false),
|
||||||
block_cache_tracer_(block_cache_tracer) {
|
block_cache_tracer_(block_cache_tracer),
|
||||||
|
loader_mutex_(kLoadConcurency, GetSliceNPHash64) {
|
||||||
if (ioptions_.row_cache) {
|
if (ioptions_.row_cache) {
|
||||||
// If the same cache is shared by multiple instances, we need to
|
// If the same cache is shared by multiple instances, we need to
|
||||||
// disambiguate its entries.
|
// disambiguate its entries.
|
||||||
@ -155,6 +158,13 @@ Status TableCache::FindTable(const FileOptions& file_options,
|
|||||||
if (no_io) { // Don't do IO and return a not-found status
|
if (no_io) { // Don't do IO and return a not-found status
|
||||||
return Status::Incomplete("Table not found in table_cache, no_io is set");
|
return Status::Incomplete("Table not found in table_cache, no_io is set");
|
||||||
}
|
}
|
||||||
|
MutexLock load_lock(loader_mutex_.get(key));
|
||||||
|
// We check the cache again under loading mutex
|
||||||
|
*handle = cache_->Lookup(key);
|
||||||
|
if (*handle != nullptr) {
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<TableReader> table_reader;
|
std::unique_ptr<TableReader> table_reader;
|
||||||
s = GetTableReader(file_options, internal_comparator, fd,
|
s = GetTableReader(file_options, internal_comparator, fd,
|
||||||
false /* sequential mode */, record_read_stats,
|
false /* sequential mode */, record_read_stats,
|
||||||
|
@ -221,6 +221,7 @@ class TableCache {
|
|||||||
std::string row_cache_id_;
|
std::string row_cache_id_;
|
||||||
bool immortal_tables_;
|
bool immortal_tables_;
|
||||||
BlockCacheTracer* const block_cache_tracer_;
|
BlockCacheTracer* const block_cache_tracer_;
|
||||||
|
Striped<port::Mutex, Slice> loader_mutex_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
@ -132,4 +132,55 @@ class SpinMutex {
|
|||||||
std::atomic<bool> locked_;
|
std::atomic<bool> locked_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// We want to prevent false sharing
|
||||||
|
template <class T>
|
||||||
|
struct ALIGN_AS(CACHE_LINE_SIZE) LockData {
|
||||||
|
T lock_;
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// Inspired by Guava: https://github.com/google/guava/wiki/StripedExplained
|
||||||
|
// A striped Lock. This offers the underlying lock striping similar
|
||||||
|
// to that of ConcurrentHashMap in a reusable form, and extends it for
|
||||||
|
// semaphores and read-write locks. Conceptually, lock striping is the technique
|
||||||
|
// of dividing a lock into many <i>stripes</i>, increasing the granularity of a
|
||||||
|
// single lock and allowing independent operations to lock different stripes and
|
||||||
|
// proceed concurrently, instead of creating contention for a single lock.
|
||||||
|
//
|
||||||
|
template <class T, class P>
|
||||||
|
class Striped {
|
||||||
|
public:
|
||||||
|
Striped(size_t stripes, std::function<uint64_t(const P &)> hash)
|
||||||
|
: stripes_(stripes), hash_(hash) {
|
||||||
|
|
||||||
|
locks_ = reinterpret_cast<LockData<T> *>(
|
||||||
|
port::cacheline_aligned_alloc(sizeof(LockData<T>) * stripes));
|
||||||
|
for (size_t i = 0; i < stripes; i++) {
|
||||||
|
new (&locks_[i]) LockData<T>();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ~Striped() {
|
||||||
|
if (locks_ != nullptr) {
|
||||||
|
assert(stripes_ > 0);
|
||||||
|
for (size_t i = 0; i < stripes_; i++) {
|
||||||
|
locks_[i].~LockData<T>();
|
||||||
|
}
|
||||||
|
port::cacheline_aligned_free(locks_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
T *get(const P &key) {
|
||||||
|
uint64_t h = hash_(key);
|
||||||
|
size_t index = h % stripes_;
|
||||||
|
return &reinterpret_cast<LockData<T> *>(&locks_[index])->lock_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t stripes_;
|
||||||
|
LockData<T> *locks_;
|
||||||
|
std::function<uint64_t(const P &)> hash_;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
Loading…
Reference in New Issue
Block a user