rocksdb/port/win/port_win.cc

308 lines
7.0 KiB
C++
Raw Normal View History

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#if !defined(OS_WIN) && !defined(WIN32) && !defined(_WIN32)
#error Windows Specific Code
#endif
#include "port/win/port_win.h"
#include <io.h>
#include "port/dirent.h"
#include "port/sys_time.h"
#include <cstdlib>
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <memory>
#include <exception>
#include <chrono>
#include "util/logging.h"
namespace rocksdb {
namespace port {
void gettimeofday(struct timeval* tv, struct timezone* /* tz */) {
using namespace std::chrono;
microseconds usNow(
duration_cast<microseconds>(system_clock::now().time_since_epoch()));
seconds secNow(duration_cast<seconds>(usNow));
tv->tv_sec = static_cast<long>(secNow.count());
tv->tv_usec = static_cast<long>(usNow.count() -
duration_cast<microseconds>(secNow).count());
}
Mutex::~Mutex() {}
CondVar::~CondVar() {}
void CondVar::Wait() {
2015-10-13 00:41:20 +02:00
// Caller must ensure that mutex is held prior to calling this method
std::unique_lock<std::mutex> lk(mu_->getLock(), std::adopt_lock);
#ifndef NDEBUG
mu_->locked_ = false;
#endif
2015-10-13 00:41:20 +02:00
cv_.wait(lk);
#ifndef NDEBUG
mu_->locked_ = true;
#endif
2015-10-13 00:41:20 +02:00
// Release ownership of the lock as we don't want it to be unlocked when
// it goes out of scope (as we adopted the lock and didn't lock it ourselves)
lk.release();
}
bool CondVar::TimedWait(uint64_t abs_time_us) {
using namespace std::chrono;
// MSVC++ library implements wait_until in terms of wait_for so
// we need to convert absolute wait into relative wait.
microseconds usAbsTime(abs_time_us);
microseconds usNow(
2015-10-13 00:41:20 +02:00
duration_cast<microseconds>(system_clock::now().time_since_epoch()));
microseconds relTimeUs =
2015-10-13 00:41:20 +02:00
(usAbsTime > usNow) ? (usAbsTime - usNow) : microseconds::zero();
2015-10-13 00:41:20 +02:00
// Caller must ensure that mutex is held prior to calling this method
std::unique_lock<std::mutex> lk(mu_->getLock(), std::adopt_lock);
#ifndef NDEBUG
mu_->locked_ = false;
#endif
std::cv_status cvStatus = cv_.wait_for(lk, relTimeUs);
#ifndef NDEBUG
mu_->locked_ = true;
#endif
2015-10-13 00:41:20 +02:00
// Release ownership of the lock as we don't want it to be unlocked when
// it goes out of scope (as we adopted the lock and didn't lock it ourselves)
lk.release();
if (cvStatus == std::cv_status::timeout) {
return true;
}
return false;
}
void CondVar::Signal() { cv_.notify_one(); }
void CondVar::SignalAll() { cv_.notify_all(); }
support for concurrent adds to memtable Summary: This diff adds support for concurrent adds to the skiplist memtable implementations. Memory allocation is made thread-safe by the addition of a spinlock, with small per-core buffers to avoid contention. Concurrent memtable writes are made via an additional method and don't impose a performance overhead on the non-concurrent case, so parallelism can be selected on a per-batch basis. Write thread synchronization is an increasing bottleneck for higher levels of concurrency, so this diff adds --enable_write_thread_adaptive_yield (default off). This feature causes threads joining a write batch group to spin for a short time (default 100 usec) using sched_yield, rather than going to sleep on a mutex. If the timing of the yield calls indicates that another thread has actually run during the yield then spinning is avoided. This option improves performance for concurrent situations even without parallel adds, although it has the potential to increase CPU usage (and the heuristic adaptation is not yet mature). Parallel writes are not currently compatible with inplace updates, update callbacks, or delete filtering. Enable it with --allow_concurrent_memtable_write (and --enable_write_thread_adaptive_yield). Parallel memtable writes are performance neutral when there is no actual parallelism, and in my experiments (SSD server-class Linux and varying contention and key sizes for fillrandom) they are always a performance win when there is more than one thread. Statistics are updated earlier in the write path, dropping the number of DB mutex acquisitions from 2 to 1 for almost all cases. This diff was motivated and inspired by Yahoo's cLSM work. It is more conservative than cLSM: RocksDB's write batch group leader role is preserved (along with all of the existing flush and write throttling logic) and concurrent writers are blocked until all memtable insertions have completed and the sequence number has been advanced, to preserve linearizability. My test config is "db_bench -benchmarks=fillrandom -threads=$T -batch_size=1 -memtablerep=skip_list -value_size=100 --num=1000000/$T -level0_slowdown_writes_trigger=9999 -level0_stop_writes_trigger=9999 -disable_auto_compactions --max_write_buffer_number=8 -max_background_flushes=8 --disable_wal --write_buffer_size=160000000 --block_size=16384 --allow_concurrent_memtable_write" on a two-socket Xeon E5-2660 @ 2.2Ghz with lots of memory and an SSD hard drive. With 1 thread I get ~440Kops/sec. Peak performance for 1 socket (numactl -N1) is slightly more than 1Mops/sec, at 16 threads. Peak performance across both sockets happens at 30 threads, and is ~900Kops/sec, although with fewer threads there is less performance loss when the system has background work. Test Plan: 1. concurrent stress tests for InlineSkipList and DynamicBloom 2. make clean; make check 3. make clean; DISABLE_JEMALLOC=1 make valgrind_check; valgrind db_bench 4. make clean; COMPILE_WITH_TSAN=1 make all check; db_bench 5. make clean; COMPILE_WITH_ASAN=1 make all check; db_bench 6. make clean; OPT=-DROCKSDB_LITE make check 7. verify no perf regressions when disabled Reviewers: igor, sdong Reviewed By: sdong Subscribers: MarkCallaghan, IslamAbdelRahman, anthony, yhchiang, rven, sdong, guyg8, kradhakrishnan, dhruba Differential Revision: https://reviews.facebook.net/D50589
2015-08-15 01:59:07 +02:00
int PhysicalCoreID() { return GetCurrentProcessorNumber(); }
void InitOnce(OnceType* once, void (*initializer)()) {
2015-11-21 00:31:47 +01:00
std::call_once(once->flag_, initializer);
}
// Private structure, exposed only by pointer
struct DIR {
intptr_t handle_;
bool firstread_;
struct __finddata64_t data_;
dirent entry_;
DIR() : handle_(-1), firstread_(true) {}
DIR(const DIR&) = delete;
DIR& operator=(const DIR&) = delete;
~DIR() {
if (-1 != handle_) {
_findclose(handle_);
}
}
};
DIR* opendir(const char* name) {
if (!name || *name == 0) {
errno = ENOENT;
return nullptr;
}
std::string pattern(name);
pattern.append("\\").append("*");
std::unique_ptr<DIR> dir(new DIR);
dir->handle_ = _findfirst64(pattern.c_str(), &dir->data_);
if (dir->handle_ == -1) {
return nullptr;
}
strncpy_s(dir->entry_.d_name, dir->data_.name, strlen(dir->data_.name));
return dir.release();
}
struct dirent* readdir(DIR* dirp) {
if (!dirp || dirp->handle_ == -1) {
errno = EBADF;
return nullptr;
}
if (dirp->firstread_) {
dirp->firstread_ = false;
return &dirp->entry_;
}
auto ret = _findnext64(dirp->handle_, &dirp->data_);
if (ret != 0) {
return nullptr;
}
strncpy_s(dirp->entry_.d_name, dirp->data_.name, strlen(dirp->data_.name));
return &dirp->entry_;
}
int closedir(DIR* dirp) {
delete dirp;
return 0;
}
int truncate(const char* path, int64_t len) {
if (path == nullptr) {
errno = EFAULT;
return -1;
}
if (len < 0) {
errno = EINVAL;
return -1;
}
HANDLE hFile =
CreateFile(path, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, // Security attrs
OPEN_EXISTING, // Truncate existing file only
FILE_ATTRIBUTE_NORMAL, NULL);
if (INVALID_HANDLE_VALUE == hFile) {
auto lastError = GetLastError();
if (lastError == ERROR_FILE_NOT_FOUND) {
errno = ENOENT;
} else if (lastError == ERROR_ACCESS_DENIED) {
errno = EACCES;
} else {
errno = EIO;
}
return -1;
}
int result = 0;
FILE_END_OF_FILE_INFO end_of_file;
end_of_file.EndOfFile.QuadPart = len;
if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file,
sizeof(FILE_END_OF_FILE_INFO))) {
errno = EIO;
result = -1;
}
CloseHandle(hFile);
return result;
}
void Crash(const std::string& srcfile, int srcline) {
fprintf(stdout, "Crashing at %s:%d\n", srcfile.c_str(), srcline);
fflush(stdout);
abort();
}
int GetMaxOpenFiles() { return -1; }
} // namespace port
} // namespace rocksdb
#ifdef JEMALLOC
#include "jemalloc/jemalloc.h"
#ifndef JEMALLOC_NON_INIT
namespace rocksdb {
namespace port {
__declspec(noinline) void WINAPI InitializeJemalloc() {
je_init();
atexit(je_uninit);
}
} // port
} // rocksdb
extern "C" {
#ifdef _WIN64
#pragma comment(linker, "/INCLUDE:p_rocksdb_init_jemalloc")
typedef void(WINAPI* CRT_Startup_Routine)(void);
// .CRT section is merged with .rdata on x64 so it must be constant data.
// must be of external linkage
// We put this into XCT since we want to run this earlier than C++ static
// constructors
// which are placed into XCU
#pragma const_seg(".CRT$XCT")
extern const CRT_Startup_Routine p_rocksdb_init_jemalloc;
const CRT_Startup_Routine p_rocksdb_init_jemalloc =
rocksdb::port::InitializeJemalloc;
#pragma const_seg()
#else // _WIN64
// x86 untested
#pragma comment(linker, "/INCLUDE:_p_rocksdb_init_jemalloc")
#pragma section(".CRT$XCT", read)
JEMALLOC_SECTION(".CRT$XCT") JEMALLOC_ATTR(used) static const void(
WINAPI* p_rocksdb_init_jemalloc)(void) = rocksdb::port::InitializeJemalloc;
#endif // _WIN64
} // extern "C"
#endif // JEMALLOC_NON_INIT
// Global operators to be replaced by a linker
void* operator new(size_t size) {
void* p = je_malloc(size);
if (!p) {
throw std::bad_alloc();
}
return p;
}
void* operator new[](size_t size) {
void* p = je_malloc(size);
if (!p) {
throw std::bad_alloc();
}
return p;
}
void operator delete(void* p) { je_free(p); }
void operator delete[](void* p) { je_free(p); }
#endif // JEMALLOC