2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
2017-01-12 01:42:07 +01:00
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors
|
2015-10-23 18:16:46 +02:00
|
|
|
#include <dirent.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#if defined(OS_LINUX)
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#endif
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#ifdef OS_LINUX
|
|
|
|
#include <sys/statfs.h>
|
|
|
|
#include <sys/syscall.h>
|
2017-09-22 21:37:59 +02:00
|
|
|
#include <sys/sysmacros.h>
|
2015-10-23 18:16:46 +02:00
|
|
|
#endif
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <algorithm>
|
2015-03-18 19:26:10 +01:00
|
|
|
// Get nano time includes
|
|
|
|
#if defined(OS_LINUX) || defined(OS_FREEBSD)
|
|
|
|
#elif defined(__MACH__)
|
2013-11-17 08:44:39 +01:00
|
|
|
#include <mach/clock.h>
|
|
|
|
#include <mach/mach.h>
|
2015-03-18 19:26:10 +01:00
|
|
|
#else
|
|
|
|
#include <chrono>
|
2013-11-17 08:44:39 +01:00
|
|
|
#endif
|
2015-10-23 18:16:46 +02:00
|
|
|
#include <deque>
|
|
|
|
#include <set>
|
2016-12-22 21:51:29 +01:00
|
|
|
#include <vector>
|
2017-02-06 23:43:55 +01:00
|
|
|
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "env/io_posix.h"
|
|
|
|
#include "env/posix_logger.h"
|
|
|
|
#include "monitoring/iostats_context_imp.h"
|
|
|
|
#include "monitoring/thread_status_updater.h"
|
2015-10-23 18:16:46 +02:00
|
|
|
#include "port/port.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "rocksdb/options.h"
|
2015-10-23 18:16:46 +02:00
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/logging.h"
|
2016-01-26 01:26:53 +01:00
|
|
|
#include "util/random.h"
|
2015-10-23 18:16:46 +02:00
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "util/sync_point.h"
|
Ensure the destruction order of PosixEnv and ThreadLocalPtr
Summary:
By default, RocksDB initializes the singletons of ThreadLocalPtr first, then initializes PosixEnv
via static initializer. Destructor terminates objects in reverse order, so terminating PosixEnv
(calling pthread_mutex_lock), then ThreadLocal (calling pthread_mutex_destroy).
However, in certain case, application might initialize PosixEnv first, then ThreadLocalPtr.
This will cause core dump at the end of the program (eg. https://github.com/facebook/mysql-5.6/issues/122)
This patch fix this issue by ensuring the destruction order by moving the global static singletons
to function static singletons. Since function static singletons are initialized when the function is first
called, this property allows us invoke to enforce the construction of the static PosixEnv and the
singletons of ThreadLocalPtr by calling the function where the ThreadLocalPtr singletons belongs
right before we initialize the static PosixEnv.
Test Plan: Verified in the MyRocks.
Reviewers: yoshinorim, IslamAbdelRahman, rven, kradhakrishnan, anthony, sdong, MarkCallaghan
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D51789
2015-12-11 09:21:58 +01:00
|
|
|
#include "util/thread_local.h"
|
2016-08-26 19:41:35 +02:00
|
|
|
#include "util/threadpool_imp.h"
|
2013-11-17 08:44:39 +01:00
|
|
|
|
2013-03-13 21:50:26 +01:00
|
|
|
#if !defined(TMPFS_MAGIC)
|
|
|
|
#define TMPFS_MAGIC 0x01021994
|
|
|
|
#endif
|
|
|
|
#if !defined(XFS_SUPER_MAGIC)
|
|
|
|
#define XFS_SUPER_MAGIC 0x58465342
|
|
|
|
#endif
|
|
|
|
#if !defined(EXT4_SUPER_MAGIC)
|
|
|
|
#define EXT4_SUPER_MAGIC 0xEF53
|
|
|
|
#endif
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2014-12-22 21:20:17 +01:00
|
|
|
ThreadStatusUpdater* CreateThreadStatusUpdater() {
|
|
|
|
return new ThreadStatusUpdater();
|
|
|
|
}
|
|
|
|
|
2018-04-13 22:05:28 +02:00
|
|
|
inline mode_t GetDBFileMode(bool allow_non_owner_access) {
|
|
|
|
return allow_non_owner_access ? 0644 : 0600;
|
|
|
|
}
|
|
|
|
|
2012-08-18 09:26:50 +02:00
|
|
|
// list of pathnames that are locked
|
|
|
|
static std::set<std::string> lockedFiles;
|
|
|
|
static port::Mutex mutex_lockedFiles;
|
|
|
|
|
|
|
|
static int LockOrUnlock(const std::string& fname, int fd, bool lock) {
|
|
|
|
mutex_lockedFiles.Lock();
|
|
|
|
if (lock) {
|
|
|
|
// If it already exists in the lockedFiles set, then it is already locked,
|
|
|
|
// and fail this lock attempt. Otherwise, insert it into lockedFiles.
|
|
|
|
// This check is needed because fcntl() does not detect lock conflict
|
|
|
|
// if the fcntl is issued by the same thread that earlier acquired
|
|
|
|
// this lock.
|
|
|
|
if (lockedFiles.insert(fname).second == false) {
|
|
|
|
mutex_lockedFiles.Unlock();
|
|
|
|
errno = ENOLCK;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we are unlocking, then verify that we had locked it earlier,
|
|
|
|
// it should already exist in lockedFiles. Remove it from lockedFiles.
|
|
|
|
if (lockedFiles.erase(fname) != 1) {
|
|
|
|
mutex_lockedFiles.Unlock();
|
|
|
|
errno = ENOLCK;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
errno = 0;
|
|
|
|
struct flock f;
|
|
|
|
memset(&f, 0, sizeof(f));
|
|
|
|
f.l_type = (lock ? F_WRLCK : F_UNLCK);
|
|
|
|
f.l_whence = SEEK_SET;
|
|
|
|
f.l_start = 0;
|
|
|
|
f.l_len = 0; // Lock/unlock entire file
|
2012-08-18 09:26:50 +02:00
|
|
|
int value = fcntl(fd, F_SETLK, &f);
|
|
|
|
if (value == -1 && lock) {
|
|
|
|
// if there is an error in locking, then remove the pathname from lockedfiles
|
|
|
|
lockedFiles.erase(fname);
|
|
|
|
}
|
|
|
|
mutex_lockedFiles.Unlock();
|
|
|
|
return value;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-10-14 10:14:53 +02:00
|
|
|
class PosixFileLock : public FileLock {
|
|
|
|
public:
|
|
|
|
int fd_;
|
|
|
|
std::string filename;
|
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
class PosixEnv : public Env {
|
|
|
|
public:
|
|
|
|
PosixEnv();
|
2013-03-19 22:39:28 +01:00
|
|
|
|
2014-12-22 21:20:17 +01:00
|
|
|
virtual ~PosixEnv() {
|
2013-09-12 09:53:30 +02:00
|
|
|
for (const auto tid : threads_to_join_) {
|
|
|
|
pthread_join(tid, nullptr);
|
|
|
|
}
|
2014-12-22 21:20:17 +01:00
|
|
|
for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) {
|
|
|
|
thread_pools_[pool_id].JoinAllThreads();
|
|
|
|
}
|
2016-08-15 18:04:55 +02:00
|
|
|
// Delete the thread_status_updater_ only when the current Env is not
|
|
|
|
// Env::Default(). This is to avoid the free-after-use error when
|
|
|
|
// Env::Default() is destructed while some other child threads are
|
|
|
|
// still trying to update thread status.
|
|
|
|
if (this != Env::Default()) {
|
|
|
|
delete thread_status_updater_;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-04-10 04:42:07 +02:00
|
|
|
void SetFD_CLOEXEC(int fd, const EnvOptions* options) {
|
2013-06-08 00:35:17 +02:00
|
|
|
if ((options == nullptr || options->set_fd_cloexec) && fd > 0) {
|
2013-04-10 04:42:07 +02:00
|
|
|
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
virtual Status NewSequentialFile(const std::string& fname,
|
2013-03-15 01:00:04 +01:00
|
|
|
unique_ptr<SequentialFile>* result,
|
2015-02-26 20:28:41 +01:00
|
|
|
const EnvOptions& options) override {
|
2013-01-20 11:07:13 +01:00
|
|
|
result->reset();
|
2017-01-12 01:42:07 +01:00
|
|
|
int fd = -1;
|
|
|
|
int flags = O_RDONLY;
|
|
|
|
FILE* file = nullptr;
|
|
|
|
|
|
|
|
if (options.use_direct_reads && !options.use_mmap_reads) {
|
2017-02-16 19:25:06 +01:00
|
|
|
#ifdef ROCKSDB_LITE
|
|
|
|
return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
|
|
|
|
#endif // !ROCKSDB_LITE
|
2017-04-22 05:41:37 +02:00
|
|
|
#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
|
2017-01-12 01:42:07 +01:00
|
|
|
flags |= O_DIRECT;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-03-31 23:45:26 +02:00
|
|
|
do {
|
2015-07-03 02:23:41 +02:00
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
2018-04-13 22:05:28 +02:00
|
|
|
fd = open(fname.c_str(), flags, GetDBFileMode(allow_non_owner_access_));
|
2017-01-12 01:42:07 +01:00
|
|
|
} while (fd < 0 && errno == EINTR);
|
|
|
|
if (fd < 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("While opening a file for sequentially reading", fname,
|
|
|
|
errno);
|
2017-01-12 01:42:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SetFD_CLOEXEC(fd, &options);
|
|
|
|
|
|
|
|
if (options.use_direct_reads && !options.use_mmap_reads) {
|
2016-05-24 01:05:53 +02:00
|
|
|
#ifdef OS_MACOSX
|
|
|
|
if (fcntl(fd, F_NOCACHE, 1) == -1) {
|
|
|
|
close(fd);
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("While fcntl NoCache", fname, errno);
|
2016-05-24 01:05:53 +02:00
|
|
|
}
|
|
|
|
#endif
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
2017-01-12 01:42:07 +01:00
|
|
|
do {
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
file = fdopen(fd, "r");
|
|
|
|
} while (file == nullptr && errno == EINTR);
|
|
|
|
if (file == nullptr) {
|
|
|
|
close(fd);
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("While opening file for sequentially read", fname,
|
|
|
|
errno);
|
2017-01-12 01:42:07 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2017-01-12 01:42:07 +01:00
|
|
|
result->reset(new PosixSequentialFile(fname, file, fd, options));
|
|
|
|
return Status::OK();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status NewRandomAccessFile(const std::string& fname,
|
2013-03-15 01:00:04 +01:00
|
|
|
unique_ptr<RandomAccessFile>* result,
|
2015-02-26 20:28:41 +01:00
|
|
|
const EnvOptions& options) override {
|
2013-01-20 11:07:13 +01:00
|
|
|
result->reset();
|
2012-03-15 17:14:00 +01:00
|
|
|
Status s;
|
2015-07-03 02:23:41 +02:00
|
|
|
int fd;
|
2017-01-12 01:42:07 +01:00
|
|
|
int flags = O_RDONLY;
|
|
|
|
if (options.use_direct_reads && !options.use_mmap_reads) {
|
2017-02-16 19:25:06 +01:00
|
|
|
#ifdef ROCKSDB_LITE
|
|
|
|
return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
|
|
|
|
#endif // !ROCKSDB_LITE
|
2017-04-22 05:41:37 +02:00
|
|
|
#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
|
2017-01-12 01:42:07 +01:00
|
|
|
flags |= O_DIRECT;
|
|
|
|
TEST_SYNC_POINT_CALLBACK("NewRandomAccessFile:O_DIRECT", &flags);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
2015-07-03 02:23:41 +02:00
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
2018-04-13 22:05:28 +02:00
|
|
|
fd = open(fname.c_str(), flags, GetDBFileMode(allow_non_owner_access_));
|
2017-01-12 01:42:07 +01:00
|
|
|
} while (fd < 0 && errno == EINTR);
|
|
|
|
if (fd < 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("While open a file for random read", fname, errno);
|
2015-07-03 02:23:41 +02:00
|
|
|
}
|
2013-04-10 04:42:07 +02:00
|
|
|
SetFD_CLOEXEC(fd, &options);
|
2017-01-12 01:42:07 +01:00
|
|
|
|
|
|
|
if (options.use_mmap_reads && sizeof(void*) >= 8) {
|
2012-09-15 05:57:15 +02:00
|
|
|
// Use of mmap for random reads has been removed because it
|
|
|
|
// kills performance when storage is fast.
|
|
|
|
// Use mmap when virtual address-space is plentiful.
|
|
|
|
uint64_t size;
|
|
|
|
s = GetFileSize(fname, &size);
|
|
|
|
if (s.ok()) {
|
2013-03-01 03:04:58 +01:00
|
|
|
void* base = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0);
|
2012-09-15 05:57:15 +02:00
|
|
|
if (base != MAP_FAILED) {
|
2013-09-21 08:00:13 +02:00
|
|
|
result->reset(new PosixMmapReadableFile(fd, fname, base,
|
|
|
|
size, options));
|
2012-09-15 05:57:15 +02:00
|
|
|
} else {
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("while mmap file for read", fname, errno);
|
2012-09-15 05:57:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
close(fd);
|
2017-01-12 01:42:07 +01:00
|
|
|
} else {
|
|
|
|
if (options.use_direct_reads && !options.use_mmap_reads) {
|
2016-05-24 01:05:53 +02:00
|
|
|
#ifdef OS_MACOSX
|
|
|
|
if (fcntl(fd, F_NOCACHE, 1) == -1) {
|
|
|
|
close(fd);
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("while fcntl NoCache", fname, errno);
|
2016-05-24 01:05:53 +02:00
|
|
|
}
|
|
|
|
#endif
|
2016-04-21 19:37:27 +02:00
|
|
|
}
|
2013-03-15 01:00:04 +01:00
|
|
|
result->reset(new PosixRandomAccessFile(fname, fd, options));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2012-03-15 17:14:00 +01:00
|
|
|
return s;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2017-05-10 23:54:35 +02:00
|
|
|
virtual Status OpenWritableFile(const std::string& fname,
|
|
|
|
unique_ptr<WritableFile>* result,
|
|
|
|
const EnvOptions& options,
|
|
|
|
bool reopen = false) {
|
2013-01-20 11:07:13 +01:00
|
|
|
result->reset();
|
2011-03-18 23:37:00 +01:00
|
|
|
Status s;
|
2014-03-31 23:45:26 +02:00
|
|
|
int fd = -1;
|
2017-05-10 23:54:35 +02:00
|
|
|
int flags = (reopen) ? (O_CREAT | O_APPEND) : (O_CREAT | O_TRUNC);
|
2016-12-22 21:51:29 +01:00
|
|
|
// Direct IO mode with O_DIRECT flag or F_NOCAHCE (MAC OSX)
|
|
|
|
if (options.use_direct_writes && !options.use_mmap_writes) {
|
|
|
|
// Note: we should avoid O_APPEND here due to ta the following bug:
|
|
|
|
// POSIX requires that opening a file with the O_APPEND flag should
|
|
|
|
// have no affect on the location at which pwrite() writes data.
|
|
|
|
// However, on Linux, if a file is opened with O_APPEND, pwrite()
|
|
|
|
// appends data to the end of the file, regardless of the value of
|
|
|
|
// offset.
|
|
|
|
// More info here: https://linux.die.net/man/2/pwrite
|
2017-02-16 19:25:06 +01:00
|
|
|
#ifdef ROCKSDB_LITE
|
|
|
|
return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
|
2017-02-28 20:05:08 +01:00
|
|
|
#endif // ROCKSDB_LITE
|
2016-12-22 21:51:29 +01:00
|
|
|
flags |= O_WRONLY;
|
2017-04-22 05:41:37 +02:00
|
|
|
#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
|
2016-12-22 21:51:29 +01:00
|
|
|
flags |= O_DIRECT;
|
|
|
|
#endif
|
|
|
|
TEST_SYNC_POINT_CALLBACK("NewWritableFile:O_DIRECT", &flags);
|
|
|
|
} else if (options.use_mmap_writes) {
|
|
|
|
// non-direct I/O
|
|
|
|
flags |= O_RDWR;
|
|
|
|
} else {
|
|
|
|
flags |= O_WRONLY;
|
|
|
|
}
|
|
|
|
|
2014-03-31 23:45:26 +02:00
|
|
|
do {
|
2015-07-03 02:23:41 +02:00
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
2018-04-13 22:05:28 +02:00
|
|
|
fd = open(fname.c_str(), flags, GetDBFileMode(allow_non_owner_access_));
|
2014-03-31 23:45:26 +02:00
|
|
|
} while (fd < 0 && errno == EINTR);
|
2016-12-22 21:51:29 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
if (fd < 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("While open a file for appending", fname, errno);
|
2016-12-22 21:51:29 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
SetFD_CLOEXEC(fd, &options);
|
|
|
|
|
|
|
|
if (options.use_mmap_writes) {
|
|
|
|
if (!checkedDiskForMmap_) {
|
|
|
|
// this will be executed once in the program's lifetime.
|
|
|
|
// do not use mmapWrite on non ext-3/xfs/tmpfs systems.
|
|
|
|
if (!SupportsFastAllocate(fname)) {
|
|
|
|
forceMmapOff_ = true;
|
2013-03-13 21:50:26 +01:00
|
|
|
}
|
2016-12-22 21:51:29 +01:00
|
|
|
checkedDiskForMmap_ = true;
|
2013-03-13 21:50:26 +01:00
|
|
|
}
|
2016-12-22 21:51:29 +01:00
|
|
|
}
|
|
|
|
if (options.use_mmap_writes && !forceMmapOff_) {
|
|
|
|
result->reset(new PosixMmapFile(fname, fd, page_size_, options));
|
|
|
|
} else if (options.use_direct_writes && !options.use_mmap_writes) {
|
2016-05-24 01:05:53 +02:00
|
|
|
#ifdef OS_MACOSX
|
2016-12-22 21:51:29 +01:00
|
|
|
if (fcntl(fd, F_NOCACHE, 1) == -1) {
|
|
|
|
close(fd);
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("While fcntl NoCache an opened file for appending", fname,
|
|
|
|
errno);
|
2016-12-22 21:51:29 +01:00
|
|
|
return s;
|
2012-10-02 00:41:44 +02:00
|
|
|
}
|
2017-04-22 05:41:37 +02:00
|
|
|
#elif defined(OS_SOLARIS)
|
|
|
|
if (directio(fd, DIRECTIO_ON) == -1) {
|
|
|
|
if (errno != ENOTTY) { // ZFS filesystems don't support DIRECTIO_ON
|
|
|
|
close(fd);
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("While calling directio()", fname, errno);
|
2017-04-22 05:41:37 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
2016-12-22 21:51:29 +01:00
|
|
|
#endif
|
|
|
|
result->reset(new PosixWritableFile(fname, fd, options));
|
|
|
|
} else {
|
|
|
|
// disable mmap writes
|
|
|
|
EnvOptions no_mmap_writes_options = options;
|
|
|
|
no_mmap_writes_options.use_mmap_writes = false;
|
|
|
|
result->reset(new PosixWritableFile(fname, fd, no_mmap_writes_options));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-05-10 23:54:35 +02:00
|
|
|
virtual Status NewWritableFile(const std::string& fname,
|
|
|
|
unique_ptr<WritableFile>* result,
|
|
|
|
const EnvOptions& options) override {
|
|
|
|
return OpenWritableFile(fname, result, options, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status ReopenWritableFile(const std::string& fname,
|
|
|
|
unique_ptr<WritableFile>* result,
|
|
|
|
const EnvOptions& options) override {
|
|
|
|
return OpenWritableFile(fname, result, options, true);
|
|
|
|
}
|
|
|
|
|
2015-10-08 04:11:09 +02:00
|
|
|
virtual Status ReuseWritableFile(const std::string& fname,
|
|
|
|
const std::string& old_fname,
|
|
|
|
unique_ptr<WritableFile>* result,
|
|
|
|
const EnvOptions& options) override {
|
|
|
|
result->reset();
|
|
|
|
Status s;
|
|
|
|
int fd = -1;
|
2016-12-22 21:51:29 +01:00
|
|
|
|
|
|
|
int flags = 0;
|
|
|
|
// Direct IO mode with O_DIRECT flag or F_NOCAHCE (MAC OSX)
|
|
|
|
if (options.use_direct_writes && !options.use_mmap_writes) {
|
2017-02-16 19:25:06 +01:00
|
|
|
#ifdef ROCKSDB_LITE
|
|
|
|
return Status::IOError(fname, "Direct I/O not supported in RocksDB lite");
|
|
|
|
#endif // !ROCKSDB_LITE
|
2016-12-22 21:51:29 +01:00
|
|
|
flags |= O_WRONLY;
|
2017-04-22 05:41:37 +02:00
|
|
|
#if !defined(OS_MACOSX) && !defined(OS_OPENBSD) && !defined(OS_SOLARIS)
|
2016-12-22 21:51:29 +01:00
|
|
|
flags |= O_DIRECT;
|
|
|
|
#endif
|
|
|
|
TEST_SYNC_POINT_CALLBACK("NewWritableFile:O_DIRECT", &flags);
|
|
|
|
} else if (options.use_mmap_writes) {
|
|
|
|
// mmap needs O_RDWR mode
|
|
|
|
flags |= O_RDWR;
|
|
|
|
} else {
|
|
|
|
flags |= O_WRONLY;
|
|
|
|
}
|
|
|
|
|
2015-10-08 04:11:09 +02:00
|
|
|
do {
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
2018-04-13 22:05:28 +02:00
|
|
|
fd = open(old_fname.c_str(), flags,
|
|
|
|
GetDBFileMode(allow_non_owner_access_));
|
2015-10-08 04:11:09 +02:00
|
|
|
} while (fd < 0 && errno == EINTR);
|
|
|
|
if (fd < 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("while reopen file for write", fname, errno);
|
2016-12-22 21:51:29 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
SetFD_CLOEXEC(fd, &options);
|
|
|
|
// rename into place
|
|
|
|
if (rename(old_fname.c_str(), fname.c_str()) != 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("while rename file to " + fname, old_fname, errno);
|
2016-12-22 21:51:29 +01:00
|
|
|
close(fd);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.use_mmap_writes) {
|
|
|
|
if (!checkedDiskForMmap_) {
|
|
|
|
// this will be executed once in the program's lifetime.
|
|
|
|
// do not use mmapWrite on non ext-3/xfs/tmpfs systems.
|
|
|
|
if (!SupportsFastAllocate(fname)) {
|
|
|
|
forceMmapOff_ = true;
|
2015-10-08 04:11:09 +02:00
|
|
|
}
|
2016-12-22 21:51:29 +01:00
|
|
|
checkedDiskForMmap_ = true;
|
2015-10-08 04:11:09 +02:00
|
|
|
}
|
2016-12-22 21:51:29 +01:00
|
|
|
}
|
|
|
|
if (options.use_mmap_writes && !forceMmapOff_) {
|
|
|
|
result->reset(new PosixMmapFile(fname, fd, page_size_, options));
|
|
|
|
} else if (options.use_direct_writes && !options.use_mmap_writes) {
|
|
|
|
#ifdef OS_MACOSX
|
|
|
|
if (fcntl(fd, F_NOCACHE, 1) == -1) {
|
|
|
|
close(fd);
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("while fcntl NoCache for reopened file for append", fname,
|
|
|
|
errno);
|
2016-12-22 21:51:29 +01:00
|
|
|
return s;
|
2015-10-08 04:11:09 +02:00
|
|
|
}
|
2017-04-22 05:41:37 +02:00
|
|
|
#elif defined(OS_SOLARIS)
|
|
|
|
if (directio(fd, DIRECTIO_ON) == -1) {
|
|
|
|
if (errno != ENOTTY) { // ZFS filesystems don't support DIRECTIO_ON
|
|
|
|
close(fd);
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("while calling directio()", fname, errno);
|
2017-04-22 05:41:37 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
2016-12-22 21:51:29 +01:00
|
|
|
#endif
|
|
|
|
result->reset(new PosixWritableFile(fname, fd, options));
|
|
|
|
} else {
|
|
|
|
// disable mmap writes
|
|
|
|
EnvOptions no_mmap_writes_options = options;
|
|
|
|
no_mmap_writes_options.use_mmap_writes = false;
|
|
|
|
result->reset(new PosixWritableFile(fname, fd, no_mmap_writes_options));
|
2015-10-08 04:11:09 +02:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-09-13 21:08:22 +02:00
|
|
|
virtual Status NewRandomRWFile(const std::string& fname,
|
|
|
|
unique_ptr<RandomRWFile>* result,
|
|
|
|
const EnvOptions& options) override {
|
|
|
|
int fd = -1;
|
|
|
|
while (fd < 0) {
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
2018-04-13 22:05:28 +02:00
|
|
|
fd = open(fname.c_str(), O_CREAT | O_RDWR,
|
|
|
|
GetDBFileMode(allow_non_owner_access_));
|
2016-09-13 21:08:22 +02:00
|
|
|
if (fd < 0) {
|
|
|
|
// Error while opening the file
|
|
|
|
if (errno == EINTR) {
|
|
|
|
continue;
|
|
|
|
}
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("While open file for random read/write", fname, errno);
|
2016-09-13 21:08:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SetFD_CLOEXEC(fd, &options);
|
|
|
|
result->reset(new PosixRandomRWFile(fname, fd, options));
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
Add crash-recovery correctness check to db_stress
Summary:
Previously, our `db_stress` tool held the expected state of the DB in-memory, so after crash-recovery, there was no way to verify data correctness. This PR adds an option, `--expected_values_file`, which specifies a file holding the expected values.
In black-box testing, the `db_stress` process can be killed arbitrarily, so updates to the `--expected_values_file` must be atomic. We achieve this by `mmap`ing the file and relying on `std::atomic<uint32_t>` for atomicity. Actually this doesn't provide a total guarantee on what we want as `std::atomic<uint32_t>` could, in theory, be translated into multiple stores surrounded by a mutex. We can verify our assumption by looking at `std::atomic::is_always_lock_free`.
For the `mmap`'d file, we didn't have an existing way to expose its contents as a raw memory buffer. This PR adds it in the `Env::NewMemoryMappedFileBuffer` function, and `MemoryMappedFileBuffer` class.
`db_crashtest.py` is updated to use an expected values file for black-box testing. On the first iteration (when the DB is created), an empty file is provided as `db_stress` will populate it when it runs. On subsequent iterations, that same filename is provided so `db_stress` can check the data is as expected on startup.
Closes https://github.com/facebook/rocksdb/pull/3629
Differential Revision: D7463144
Pulled By: ajkr
fbshipit-source-id: c8f3e82c93e045a90055e2468316be155633bd8b
2018-04-25 00:46:41 +02:00
|
|
|
virtual Status NewMemoryMappedFileBuffer(
|
|
|
|
const std::string& fname,
|
|
|
|
unique_ptr<MemoryMappedFileBuffer>* result) override {
|
|
|
|
int fd = -1;
|
|
|
|
Status status;
|
|
|
|
while (fd < 0) {
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
fd = open(fname.c_str(), O_RDWR, 0644);
|
|
|
|
if (fd < 0) {
|
|
|
|
// Error while opening the file
|
|
|
|
if (errno == EINTR) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
status =
|
|
|
|
IOError("While open file for raw mmap buffer access", fname, errno);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uint64_t size;
|
|
|
|
if (status.ok()) {
|
|
|
|
status = GetFileSize(fname, &size);
|
|
|
|
}
|
2018-04-25 22:36:26 +02:00
|
|
|
void* base = nullptr;
|
Add crash-recovery correctness check to db_stress
Summary:
Previously, our `db_stress` tool held the expected state of the DB in-memory, so after crash-recovery, there was no way to verify data correctness. This PR adds an option, `--expected_values_file`, which specifies a file holding the expected values.
In black-box testing, the `db_stress` process can be killed arbitrarily, so updates to the `--expected_values_file` must be atomic. We achieve this by `mmap`ing the file and relying on `std::atomic<uint32_t>` for atomicity. Actually this doesn't provide a total guarantee on what we want as `std::atomic<uint32_t>` could, in theory, be translated into multiple stores surrounded by a mutex. We can verify our assumption by looking at `std::atomic::is_always_lock_free`.
For the `mmap`'d file, we didn't have an existing way to expose its contents as a raw memory buffer. This PR adds it in the `Env::NewMemoryMappedFileBuffer` function, and `MemoryMappedFileBuffer` class.
`db_crashtest.py` is updated to use an expected values file for black-box testing. On the first iteration (when the DB is created), an empty file is provided as `db_stress` will populate it when it runs. On subsequent iterations, that same filename is provided so `db_stress` can check the data is as expected on startup.
Closes https://github.com/facebook/rocksdb/pull/3629
Differential Revision: D7463144
Pulled By: ajkr
fbshipit-source-id: c8f3e82c93e045a90055e2468316be155633bd8b
2018-04-25 00:46:41 +02:00
|
|
|
if (status.ok()) {
|
|
|
|
base = mmap(nullptr, static_cast<size_t>(size), PROT_READ | PROT_WRITE,
|
|
|
|
MAP_SHARED, fd, 0);
|
|
|
|
if (base == MAP_FAILED) {
|
|
|
|
status = IOError("while mmap file for read", fname, errno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (status.ok()) {
|
|
|
|
result->reset(
|
|
|
|
new PosixMemoryMappedFileBuffer(base, static_cast<size_t>(size)));
|
|
|
|
}
|
|
|
|
if (fd >= 0) {
|
|
|
|
// don't need to keep it open after mmap has been called
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2014-01-27 20:02:21 +01:00
|
|
|
virtual Status NewDirectory(const std::string& name,
|
2015-02-26 20:28:41 +01:00
|
|
|
unique_ptr<Directory>* result) override {
|
2014-01-27 20:02:21 +01:00
|
|
|
result->reset();
|
2015-07-03 02:23:41 +02:00
|
|
|
int fd;
|
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
fd = open(name.c_str(), 0);
|
|
|
|
}
|
2014-01-27 20:02:21 +01:00
|
|
|
if (fd < 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("While open directory", name, errno);
|
2014-01-27 20:02:21 +01:00
|
|
|
} else {
|
|
|
|
result->reset(new PosixDirectory(fd));
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-07-21 02:20:40 +02:00
|
|
|
virtual Status FileExists(const std::string& fname) override {
|
|
|
|
int result = access(fname.c_str(), F_OK);
|
|
|
|
|
|
|
|
if (result == 0) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (errno) {
|
|
|
|
case EACCES:
|
|
|
|
case ELOOP:
|
|
|
|
case ENAMETOOLONG:
|
|
|
|
case ENOENT:
|
|
|
|
case ENOTDIR:
|
|
|
|
return Status::NotFound();
|
|
|
|
default:
|
|
|
|
assert(result == EIO || result == ENOMEM);
|
|
|
|
return Status::IOError("Unexpected error(" + ToString(result) +
|
|
|
|
") accessing file `" + fname + "' ");
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status GetChildren(const std::string& dir,
|
2015-02-26 20:28:41 +01:00
|
|
|
std::vector<std::string>* result) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
result->clear();
|
|
|
|
DIR* d = opendir(dir.c_str());
|
2013-03-01 03:04:58 +01:00
|
|
|
if (d == nullptr) {
|
2016-12-12 21:38:43 +01:00
|
|
|
switch (errno) {
|
|
|
|
case EACCES:
|
|
|
|
case ENOENT:
|
|
|
|
case ENOTDIR:
|
|
|
|
return Status::NotFound();
|
|
|
|
default:
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("While opendir", dir, errno);
|
2016-12-12 21:38:43 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
struct dirent* entry;
|
2013-03-01 03:04:58 +01:00
|
|
|
while ((entry = readdir(d)) != nullptr) {
|
2011-03-18 23:37:00 +01:00
|
|
|
result->push_back(entry->d_name);
|
|
|
|
}
|
|
|
|
closedir(d);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status DeleteFile(const std::string& fname) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
Status result;
|
|
|
|
if (unlink(fname.c_str()) != 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("while unlink() file", fname, errno);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status CreateDir(const std::string& name) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
Status result;
|
|
|
|
if (mkdir(name.c_str(), 0755) != 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("While mkdir", name, errno);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status CreateDirIfMissing(const std::string& name) override {
|
2012-11-26 22:56:45 +01:00
|
|
|
Status result;
|
|
|
|
if (mkdir(name.c_str(), 0755) != 0) {
|
|
|
|
if (errno != EEXIST) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("While mkdir if missing", name, errno);
|
2013-01-07 19:11:18 +01:00
|
|
|
} else if (!DirExists(name)) { // Check that name is actually a
|
|
|
|
// directory.
|
|
|
|
// Message is taken from mkdir
|
|
|
|
result = Status::IOError("`"+name+"' exists but is not a directory");
|
2012-11-26 22:56:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status DeleteDir(const std::string& name) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
Status result;
|
|
|
|
if (rmdir(name.c_str()) != 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("file rmdir", name, errno);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status GetFileSize(const std::string& fname,
|
|
|
|
uint64_t* size) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
Status s;
|
|
|
|
struct stat sbuf;
|
|
|
|
if (stat(fname.c_str(), &sbuf) != 0) {
|
|
|
|
*size = 0;
|
2017-06-26 21:42:21 +02:00
|
|
|
s = IOError("while stat a file for size", fname, errno);
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
|
|
|
*size = sbuf.st_size;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2012-11-26 22:56:45 +01:00
|
|
|
virtual Status GetFileModificationTime(const std::string& fname,
|
2015-02-26 20:28:41 +01:00
|
|
|
uint64_t* file_mtime) override {
|
2012-11-26 22:56:45 +01:00
|
|
|
struct stat s;
|
|
|
|
if (stat(fname.c_str(), &s) !=0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("while stat a file for modification time", fname, errno);
|
2012-11-26 22:56:45 +01:00
|
|
|
}
|
|
|
|
*file_mtime = static_cast<uint64_t>(s.st_mtime);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status RenameFile(const std::string& src,
|
|
|
|
const std::string& target) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
Status result;
|
|
|
|
if (rename(src.c_str(), target.c_str()) != 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("While renaming a file to " + target, src, errno);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status LinkFile(const std::string& src,
|
|
|
|
const std::string& target) override {
|
2014-11-14 20:38:26 +01:00
|
|
|
Status result;
|
|
|
|
if (link(src.c_str(), target.c_str()) != 0) {
|
|
|
|
if (errno == EXDEV) {
|
|
|
|
return Status::NotSupported("No cross FS links allowed");
|
|
|
|
}
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("while link file to " + target, src, errno);
|
2014-11-14 20:38:26 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-09-22 21:37:59 +02:00
|
|
|
virtual Status AreFilesSame(const std::string& first,
|
|
|
|
const std::string& second, bool* res) override {
|
|
|
|
struct stat statbuf[2];
|
|
|
|
if (stat(first.c_str(), &statbuf[0]) != 0) {
|
|
|
|
return IOError("stat file", first, errno);
|
|
|
|
}
|
|
|
|
if (stat(second.c_str(), &statbuf[1]) != 0) {
|
|
|
|
return IOError("stat file", second, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (major(statbuf[0].st_dev) != major(statbuf[1].st_dev) ||
|
|
|
|
minor(statbuf[0].st_dev) != minor(statbuf[1].st_dev) ||
|
|
|
|
statbuf[0].st_ino != statbuf[1].st_ino) {
|
|
|
|
*res = false;
|
|
|
|
} else {
|
|
|
|
*res = true;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status LockFile(const std::string& fname, FileLock** lock) override {
|
2013-03-01 03:04:58 +01:00
|
|
|
*lock = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
Status result;
|
2015-07-03 02:23:41 +02:00
|
|
|
int fd;
|
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
if (fd < 0) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("while open a file for lock", fname, errno);
|
2012-08-18 09:26:50 +02:00
|
|
|
} else if (LockOrUnlock(fname, fd, true) == -1) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("While lock file", fname, errno);
|
2011-03-18 23:37:00 +01:00
|
|
|
close(fd);
|
|
|
|
} else {
|
2013-04-10 04:42:07 +02:00
|
|
|
SetFD_CLOEXEC(fd, nullptr);
|
2011-03-18 23:37:00 +01:00
|
|
|
PosixFileLock* my_lock = new PosixFileLock;
|
|
|
|
my_lock->fd_ = fd;
|
2012-08-18 09:26:50 +02:00
|
|
|
my_lock->filename = fname;
|
2011-03-18 23:37:00 +01:00
|
|
|
*lock = my_lock;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status UnlockFile(FileLock* lock) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock);
|
|
|
|
Status result;
|
2012-08-18 09:26:50 +02:00
|
|
|
if (LockOrUnlock(my_lock->filename, my_lock->fd_, false) == -1) {
|
2017-06-26 21:42:21 +02:00
|
|
|
result = IOError("unlock", my_lock->filename, errno);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
close(my_lock->fd_);
|
|
|
|
delete my_lock;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-03-17 02:49:14 +01:00
|
|
|
virtual void Schedule(void (*function)(void* arg1), void* arg,
|
Running manual compactions in parallel with other automatic or manual compactions in restricted cases
Summary:
This diff provides a framework for doing manual
compactions in parallel with other compactions. We now have a deque of manual compactions. We also pass manual compactions as an argument from RunManualCompactions down to
BackgroundCompactions, so that RunManualCompactions can be reentrant.
Parallelism is controlled by the two routines
ConflictingManualCompaction to allow/disallow new parallel/manual
compactions based on already existing ManualCompactions. In this diff, by default manual compactions still have to run exclusive of other compactions. However, by setting the compaction option, exclusive_manual_compaction to false, it is possible to run other compactions in parallel with a manual compaction. However, we are still restricted to one manual compaction per column family at a time. All of these restrictions will be relaxed in future diffs.
I will be adding more tests later.
Test Plan: Rocksdb regression + new tests + valgrind
Reviewers: igor, anthony, IslamAbdelRahman, kradhakrishnan, yhchiang, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D47973
2015-12-14 20:20:34 +01:00
|
|
|
Priority pri = LOW, void* tag = nullptr,
|
2018-03-07 21:39:19 +01:00
|
|
|
void (*unschedFunction)(void* arg) = nullptr) override;
|
2015-03-17 02:49:14 +01:00
|
|
|
|
|
|
|
virtual int UnSchedule(void* arg, Priority pri) override;
|
2013-03-19 22:39:28 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void StartThread(void (*function)(void* arg), void* arg) override;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void WaitForJoin() override;
|
2014-02-26 02:47:37 +01:00
|
|
|
|
2014-03-11 00:14:48 +01:00
|
|
|
virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const override;
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status GetTestDirectory(std::string* result) override {
|
2011-03-18 23:37:00 +01:00
|
|
|
const char* env = getenv("TEST_TMPDIR");
|
|
|
|
if (env && env[0] != '\0') {
|
|
|
|
*result = env;
|
|
|
|
} else {
|
|
|
|
char buf[100];
|
2013-10-05 07:32:05 +02:00
|
|
|
snprintf(buf, sizeof(buf), "/tmp/rocksdbtest-%d", int(geteuid()));
|
2011-03-18 23:37:00 +01:00
|
|
|
*result = buf;
|
|
|
|
}
|
|
|
|
// Directory may already exist
|
|
|
|
CreateDir(*result);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-12-22 21:20:17 +01:00
|
|
|
virtual Status GetThreadList(
|
|
|
|
std::vector<ThreadStatus>* thread_list) override {
|
|
|
|
assert(thread_status_updater_);
|
|
|
|
return thread_status_updater_->GetThreadList(thread_list);
|
|
|
|
}
|
|
|
|
|
2014-05-29 19:57:22 +02:00
|
|
|
static uint64_t gettid(pthread_t tid) {
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t thread_id = 0;
|
2011-03-21 20:40:57 +01:00
|
|
|
memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
|
2011-07-21 04:40:18 +02:00
|
|
|
return thread_id;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-05-29 19:57:22 +02:00
|
|
|
static uint64_t gettid() {
|
|
|
|
pthread_t tid = pthread_self();
|
|
|
|
return gettid(tid);
|
|
|
|
}
|
|
|
|
|
2015-06-11 23:32:10 +02:00
|
|
|
virtual uint64_t GetThreadID() const override {
|
2015-06-11 23:18:02 +02:00
|
|
|
return gettid(pthread_self());
|
|
|
|
}
|
|
|
|
|
2013-01-20 11:07:13 +01:00
|
|
|
virtual Status NewLogger(const std::string& fname,
|
2015-02-26 20:28:41 +01:00
|
|
|
shared_ptr<Logger>* result) override {
|
2015-07-03 02:23:41 +02:00
|
|
|
FILE* f;
|
|
|
|
{
|
|
|
|
IOSTATS_TIMER_GUARD(open_nanos);
|
|
|
|
f = fopen(fname.c_str(), "w");
|
|
|
|
}
|
2013-03-01 03:04:58 +01:00
|
|
|
if (f == nullptr) {
|
2013-01-20 11:07:13 +01:00
|
|
|
result->reset();
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("when fopen a file for new logger", fname, errno);
|
2011-07-21 04:40:18 +02:00
|
|
|
} else {
|
2013-04-10 04:42:07 +02:00
|
|
|
int fd = fileno(f);
|
t6913679: Use fallocate on LOG FILESS
Summary: Use fallocate on LOG FILES to
Test Plan:
make check
+
===check with strace===
[arya@devvm1441 ~/rocksdb] strace -e trace=fallocate ./ldb --db=/tmp/test_new scan
fallocate(3, 01, 0, 4194304) = 0
Reviewers: sdong, anthony, IslamAbdelRahman, kradhakrishnan, lgalanis, rven, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D45969
2015-09-02 20:17:02 +02:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2015-10-07 19:04:05 +02:00
|
|
|
fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 4 * 1024);
|
t6913679: Use fallocate on LOG FILESS
Summary: Use fallocate on LOG FILES to
Test Plan:
make check
+
===check with strace===
[arya@devvm1441 ~/rocksdb] strace -e trace=fallocate ./ldb --db=/tmp/test_new scan
fallocate(3, 01, 0, 4194304) = 0
Reviewers: sdong, anthony, IslamAbdelRahman, kradhakrishnan, lgalanis, rven, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D45969
2015-09-02 20:17:02 +02:00
|
|
|
#endif
|
2013-04-10 04:42:07 +02:00
|
|
|
SetFD_CLOEXEC(fd, nullptr);
|
2013-10-31 23:36:40 +01:00
|
|
|
result->reset(new PosixLogger(f, &PosixEnv::gettid, this));
|
2011-07-21 04:40:18 +02:00
|
|
|
return Status::OK();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual uint64_t NowMicros() override {
|
2015-03-03 20:29:31 +01:00
|
|
|
struct timeval tv;
|
|
|
|
gettimeofday(&tv, nullptr);
|
|
|
|
return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual uint64_t NowNanos() override {
|
2017-04-22 05:41:37 +02:00
|
|
|
#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_AIX)
|
2015-03-03 20:29:31 +01:00
|
|
|
struct timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
return static_cast<uint64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
|
2017-04-22 05:41:37 +02:00
|
|
|
#elif defined(OS_SOLARIS)
|
|
|
|
return gethrtime();
|
2015-03-17 12:03:11 +01:00
|
|
|
#elif defined(__MACH__)
|
2015-03-03 20:29:31 +01:00
|
|
|
clock_serv_t cclock;
|
|
|
|
mach_timespec_t ts;
|
|
|
|
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
|
|
|
|
clock_get_time(cclock, &ts);
|
|
|
|
mach_port_deallocate(mach_task_self(), cclock);
|
|
|
|
return static_cast<uint64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
|
2015-03-18 19:26:10 +01:00
|
|
|
#else
|
|
|
|
return std::chrono::duration_cast<std::chrono::nanoseconds>(
|
|
|
|
std::chrono::steady_clock::now().time_since_epoch()).count();
|
|
|
|
#endif
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void SleepForMicroseconds(int micros) override { usleep(micros); }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status GetHostName(char* name, uint64_t len) override {
|
2014-11-13 20:39:30 +01:00
|
|
|
int ret = gethostname(name, static_cast<size_t>(len));
|
2012-08-15 00:20:36 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
if (errno == EFAULT || errno == EINVAL)
|
|
|
|
return Status::InvalidArgument(strerror(errno));
|
|
|
|
else
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("GetHostName", name, errno);
|
2012-08-15 00:20:36 +02:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status GetCurrentTime(int64_t* unix_time) override {
|
2013-03-01 03:04:58 +01:00
|
|
|
time_t ret = time(nullptr);
|
2012-08-15 00:20:36 +02:00
|
|
|
if (ret == (time_t) -1) {
|
2017-06-26 21:42:21 +02:00
|
|
|
return IOError("GetCurrentTime", "", errno);
|
2012-08-15 00:20:36 +02:00
|
|
|
}
|
|
|
|
*unix_time = (int64_t) ret;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status GetAbsolutePath(const std::string& db_path,
|
2015-02-26 20:28:41 +01:00
|
|
|
std::string* output_path) override {
|
2018-02-16 01:43:23 +01:00
|
|
|
if (!db_path.empty() && db_path[0] == '/') {
|
2012-08-15 00:20:36 +02:00
|
|
|
*output_path = db_path;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
char the_path[256];
|
|
|
|
char* ret = getcwd(the_path, 256);
|
2013-03-01 03:04:58 +01:00
|
|
|
if (ret == nullptr) {
|
2012-08-15 00:20:36 +02:00
|
|
|
return Status::IOError(strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
*output_path = ret;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2012-11-29 01:42:36 +01:00
|
|
|
// Allow increasing the number of worker threads.
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void SetBackgroundThreads(int num, Priority pri) override {
|
2017-08-04 00:36:28 +02:00
|
|
|
assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
|
2013-09-12 09:53:30 +02:00
|
|
|
thread_pools_[pri].SetBackgroundThreads(num);
|
2012-09-20 00:21:09 +02:00
|
|
|
}
|
|
|
|
|
2017-05-23 20:04:25 +02:00
|
|
|
virtual int GetBackgroundThreads(Priority pri) override {
|
2017-08-04 00:36:28 +02:00
|
|
|
assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
|
2017-05-23 20:04:25 +02:00
|
|
|
return thread_pools_[pri].GetBackgroundThreads();
|
|
|
|
}
|
|
|
|
|
2018-04-13 22:05:28 +02:00
|
|
|
virtual Status SetAllowNonOwnerAccess(bool allow_non_owner_access) override {
|
|
|
|
allow_non_owner_access_ = allow_non_owner_access;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-11-03 23:11:33 +01:00
|
|
|
// Allow increasing the number of worker threads.
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void IncBackgroundThreadsIfNeeded(int num, Priority pri) override {
|
2017-08-04 00:36:28 +02:00
|
|
|
assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
|
2014-11-03 23:11:33 +01:00
|
|
|
thread_pools_[pri].IncBackgroundThreadsIfNeeded(num);
|
|
|
|
}
|
|
|
|
|
2014-08-14 05:49:58 +02:00
|
|
|
virtual void LowerThreadPoolIOPriority(Priority pool = LOW) override {
|
2017-08-04 00:36:28 +02:00
|
|
|
assert(pool >= Priority::BOTTOM && pool <= Priority::HIGH);
|
2014-08-14 05:49:58 +02:00
|
|
|
#ifdef OS_LINUX
|
|
|
|
thread_pools_[pool].LowerIOPriority();
|
2018-04-24 17:38:01 +02:00
|
|
|
#else
|
|
|
|
(void)pool;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void LowerThreadPoolCPUPriority(Priority pool = LOW) override {
|
|
|
|
assert(pool >= Priority::BOTTOM && pool <= Priority::HIGH);
|
|
|
|
#ifdef OS_LINUX
|
|
|
|
thread_pools_[pool].LowerCPUPriority();
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
|
|
|
(void)pool;
|
2014-08-14 05:49:58 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual std::string TimeToString(uint64_t secondsSince1970) override {
|
2012-10-19 23:00:53 +02:00
|
|
|
const time_t seconds = (time_t)secondsSince1970;
|
|
|
|
struct tm t;
|
|
|
|
int maxsize = 64;
|
|
|
|
std::string dummy;
|
|
|
|
dummy.reserve(maxsize);
|
|
|
|
dummy.resize(maxsize);
|
|
|
|
char* p = &dummy[0];
|
|
|
|
localtime_r(&seconds, &t);
|
|
|
|
snprintf(p, maxsize,
|
|
|
|
"%04d/%02d/%02d-%02d:%02d:%02d ",
|
|
|
|
t.tm_year + 1900,
|
|
|
|
t.tm_mon + 1,
|
|
|
|
t.tm_mday,
|
|
|
|
t.tm_hour,
|
|
|
|
t.tm_min,
|
|
|
|
t.tm_sec);
|
|
|
|
return dummy;
|
|
|
|
}
|
|
|
|
|
2015-05-19 02:03:59 +02:00
|
|
|
EnvOptions OptimizeForLogWrite(const EnvOptions& env_options,
|
|
|
|
const DBOptions& db_options) const override {
|
2014-03-18 05:52:14 +01:00
|
|
|
EnvOptions optimized = env_options;
|
|
|
|
optimized.use_mmap_writes = false;
|
2016-12-22 21:51:29 +01:00
|
|
|
optimized.use_direct_writes = false;
|
2015-05-19 02:03:59 +02:00
|
|
|
optimized.bytes_per_sync = db_options.wal_bytes_per_sync;
|
2014-03-28 23:04:11 +01:00
|
|
|
// TODO(icanadi) it's faster if fallocate_with_keep_size is false, but it
|
|
|
|
// breaks TransactionLogIteratorStallAtLastRecord unit test. Fix the unit
|
|
|
|
// test and make this false
|
|
|
|
optimized.fallocate_with_keep_size = true;
|
2017-10-31 21:49:25 +01:00
|
|
|
optimized.writable_file_max_buffer_size =
|
|
|
|
db_options.writable_file_max_buffer_size;
|
2014-03-18 05:52:14 +01:00
|
|
|
return optimized;
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
EnvOptions OptimizeForManifestWrite(
|
|
|
|
const EnvOptions& env_options) const override {
|
2014-03-18 05:52:14 +01:00
|
|
|
EnvOptions optimized = env_options;
|
|
|
|
optimized.use_mmap_writes = false;
|
2016-12-22 21:51:29 +01:00
|
|
|
optimized.use_direct_writes = false;
|
2014-03-18 05:52:14 +01:00
|
|
|
optimized.fallocate_with_keep_size = true;
|
|
|
|
return optimized;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2013-03-21 19:12:42 +01:00
|
|
|
bool checkedDiskForMmap_;
|
2016-12-22 21:51:29 +01:00
|
|
|
bool forceMmapOff_; // do we override Env options?
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-01-07 19:11:18 +01:00
|
|
|
// Returns true iff the named directory exists and is a directory.
|
|
|
|
virtual bool DirExists(const std::string& dname) {
|
|
|
|
struct stat statbuf;
|
|
|
|
if (stat(dname.c_str(), &statbuf) == 0) {
|
|
|
|
return S_ISDIR(statbuf.st_mode);
|
|
|
|
}
|
|
|
|
return false; // stat() failed return false
|
|
|
|
}
|
|
|
|
|
2013-03-13 21:50:26 +01:00
|
|
|
bool SupportsFastAllocate(const std::string& path) {
|
2013-12-11 20:18:00 +01:00
|
|
|
#ifdef ROCKSDB_FALLOCATE_PRESENT
|
2013-03-13 21:50:26 +01:00
|
|
|
struct statfs s;
|
|
|
|
if (statfs(path.c_str(), &s)){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
switch (s.f_type) {
|
|
|
|
case EXT4_SUPER_MAGIC:
|
|
|
|
return true;
|
|
|
|
case XFS_SUPER_MAGIC:
|
|
|
|
return true;
|
|
|
|
case TMPFS_MAGIC:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2013-11-17 08:44:39 +01:00
|
|
|
#else
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)path;
|
2013-11-17 08:44:39 +01:00
|
|
|
return false;
|
|
|
|
#endif
|
2013-03-13 21:50:26 +01:00
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
size_t page_size_;
|
|
|
|
|
2016-08-26 19:41:35 +02:00
|
|
|
std::vector<ThreadPoolImpl> thread_pools_;
|
2013-09-12 09:53:30 +02:00
|
|
|
pthread_mutex_t mu_;
|
|
|
|
std::vector<pthread_t> threads_to_join_;
|
2018-04-13 22:05:28 +02:00
|
|
|
// If true, allow non owner read access for db files. Otherwise, non-owner
|
|
|
|
// has no access to db files.
|
|
|
|
bool allow_non_owner_access_;
|
2013-09-12 09:53:30 +02:00
|
|
|
};
|
|
|
|
|
2015-10-07 19:04:05 +02:00
|
|
|
PosixEnv::PosixEnv()
|
|
|
|
: checkedDiskForMmap_(false),
|
2016-12-22 21:51:29 +01:00
|
|
|
forceMmapOff_(false),
|
2015-10-07 19:04:05 +02:00
|
|
|
page_size_(getpagesize()),
|
2018-04-13 22:05:28 +02:00
|
|
|
thread_pools_(Priority::TOTAL),
|
|
|
|
allow_non_owner_access_(true) {
|
2016-08-26 19:41:35 +02:00
|
|
|
ThreadPoolImpl::PthreadCall("mutex_init", pthread_mutex_init(&mu_, nullptr));
|
2014-11-20 19:49:32 +01:00
|
|
|
for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) {
|
|
|
|
thread_pools_[pool_id].SetThreadPriority(
|
|
|
|
static_cast<Env::Priority>(pool_id));
|
2014-12-22 21:20:17 +01:00
|
|
|
// This allows later initializing the thread-local-env of each thread.
|
|
|
|
thread_pools_[pool_id].SetHostEnv(this);
|
2014-11-20 19:49:32 +01:00
|
|
|
}
|
2014-12-22 21:20:17 +01:00
|
|
|
thread_status_updater_ = CreateThreadStatusUpdater();
|
2013-09-12 09:53:30 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 02:49:14 +01:00
|
|
|
void PosixEnv::Schedule(void (*function)(void* arg1), void* arg, Priority pri,
|
Running manual compactions in parallel with other automatic or manual compactions in restricted cases
Summary:
This diff provides a framework for doing manual
compactions in parallel with other compactions. We now have a deque of manual compactions. We also pass manual compactions as an argument from RunManualCompactions down to
BackgroundCompactions, so that RunManualCompactions can be reentrant.
Parallelism is controlled by the two routines
ConflictingManualCompaction to allow/disallow new parallel/manual
compactions based on already existing ManualCompactions. In this diff, by default manual compactions still have to run exclusive of other compactions. However, by setting the compaction option, exclusive_manual_compaction to false, it is possible to run other compactions in parallel with a manual compaction. However, we are still restricted to one manual compaction per column family at a time. All of these restrictions will be relaxed in future diffs.
I will be adding more tests later.
Test Plan: Rocksdb regression + new tests + valgrind
Reviewers: igor, anthony, IslamAbdelRahman, kradhakrishnan, yhchiang, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D47973
2015-12-14 20:20:34 +01:00
|
|
|
void* tag, void (*unschedFunction)(void* arg)) {
|
2017-08-04 00:36:28 +02:00
|
|
|
assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
|
Running manual compactions in parallel with other automatic or manual compactions in restricted cases
Summary:
This diff provides a framework for doing manual
compactions in parallel with other compactions. We now have a deque of manual compactions. We also pass manual compactions as an argument from RunManualCompactions down to
BackgroundCompactions, so that RunManualCompactions can be reentrant.
Parallelism is controlled by the two routines
ConflictingManualCompaction to allow/disallow new parallel/manual
compactions based on already existing ManualCompactions. In this diff, by default manual compactions still have to run exclusive of other compactions. However, by setting the compaction option, exclusive_manual_compaction to false, it is possible to run other compactions in parallel with a manual compaction. However, we are still restricted to one manual compaction per column family at a time. All of these restrictions will be relaxed in future diffs.
I will be adding more tests later.
Test Plan: Rocksdb regression + new tests + valgrind
Reviewers: igor, anthony, IslamAbdelRahman, kradhakrishnan, yhchiang, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D47973
2015-12-14 20:20:34 +01:00
|
|
|
thread_pools_[pri].Schedule(function, arg, tag, unschedFunction);
|
2015-03-17 02:49:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int PosixEnv::UnSchedule(void* arg, Priority pri) {
|
|
|
|
return thread_pools_[pri].UnSchedule(arg);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-03-11 00:14:48 +01:00
|
|
|
unsigned int PosixEnv::GetThreadPoolQueueLen(Priority pri) const {
|
2017-08-04 00:36:28 +02:00
|
|
|
assert(pri >= Priority::BOTTOM && pri <= Priority::HIGH);
|
2014-03-11 00:14:48 +01:00
|
|
|
return thread_pools_[pri].GetQueueLen();
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
struct StartThreadState {
|
|
|
|
void (*user_function)(void*);
|
|
|
|
void* arg;
|
|
|
|
};
|
2014-08-30 06:21:49 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
static void* StartThreadWrapper(void* arg) {
|
|
|
|
StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
|
|
|
|
state->user_function(state->arg);
|
|
|
|
delete state;
|
2013-03-01 03:04:58 +01:00
|
|
|
return nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
|
|
|
|
pthread_t t;
|
|
|
|
StartThreadState* state = new StartThreadState;
|
|
|
|
state->user_function = function;
|
|
|
|
state->arg = arg;
|
2016-08-26 19:41:35 +02:00
|
|
|
ThreadPoolImpl::PthreadCall(
|
2015-10-27 13:03:43 +01:00
|
|
|
"start thread", pthread_create(&t, nullptr, &StartThreadWrapper, state));
|
2016-08-26 19:41:35 +02:00
|
|
|
ThreadPoolImpl::PthreadCall("lock", pthread_mutex_lock(&mu_));
|
2013-03-19 22:39:28 +01:00
|
|
|
threads_to_join_.push_back(t);
|
2016-08-26 19:41:35 +02:00
|
|
|
ThreadPoolImpl::PthreadCall("unlock", pthread_mutex_unlock(&mu_));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-02-26 02:47:37 +01:00
|
|
|
void PosixEnv::WaitForJoin() {
|
|
|
|
for (const auto tid : threads_to_join_) {
|
|
|
|
pthread_join(tid, nullptr);
|
|
|
|
}
|
|
|
|
threads_to_join_.clear();
|
|
|
|
}
|
|
|
|
|
2011-10-31 18:22:06 +01:00
|
|
|
} // namespace
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-18 23:50:54 +02:00
|
|
|
std::string Env::GenerateUniqueId() {
|
|
|
|
std::string uuid_file = "/proc/sys/kernel/random/uuid";
|
2015-07-21 02:20:40 +02:00
|
|
|
|
|
|
|
Status s = FileExists(uuid_file);
|
|
|
|
if (s.ok()) {
|
2013-10-18 23:50:54 +02:00
|
|
|
std::string uuid;
|
2015-07-21 02:20:40 +02:00
|
|
|
s = ReadFileToString(this, uuid_file, &uuid);
|
2013-10-18 23:50:54 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
return uuid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Could not read uuid_file - generate uuid using "nanos-random"
|
|
|
|
Random64 r(time(nullptr));
|
|
|
|
uint64_t random_uuid_portion =
|
|
|
|
r.Uniform(std::numeric_limits<uint64_t>::max());
|
|
|
|
uint64_t nanos_uuid_portion = NowNanos();
|
|
|
|
char uuid2[200];
|
2013-11-17 08:44:39 +01:00
|
|
|
snprintf(uuid2,
|
|
|
|
200,
|
|
|
|
"%lx-%lx",
|
|
|
|
(unsigned long)nanos_uuid_portion,
|
|
|
|
(unsigned long)random_uuid_portion);
|
2013-10-18 23:50:54 +02:00
|
|
|
return uuid2;
|
|
|
|
}
|
|
|
|
|
2016-04-21 19:37:27 +02:00
|
|
|
//
|
|
|
|
// Default Posix Env
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
Env* Env::Default() {
|
Ensure the destruction order of PosixEnv and ThreadLocalPtr
Summary:
By default, RocksDB initializes the singletons of ThreadLocalPtr first, then initializes PosixEnv
via static initializer. Destructor terminates objects in reverse order, so terminating PosixEnv
(calling pthread_mutex_lock), then ThreadLocal (calling pthread_mutex_destroy).
However, in certain case, application might initialize PosixEnv first, then ThreadLocalPtr.
This will cause core dump at the end of the program (eg. https://github.com/facebook/mysql-5.6/issues/122)
This patch fix this issue by ensuring the destruction order by moving the global static singletons
to function static singletons. Since function static singletons are initialized when the function is first
called, this property allows us invoke to enforce the construction of the static PosixEnv and the
singletons of ThreadLocalPtr by calling the function where the ThreadLocalPtr singletons belongs
right before we initialize the static PosixEnv.
Test Plan: Verified in the MyRocks.
Reviewers: yoshinorim, IslamAbdelRahman, rven, kradhakrishnan, anthony, sdong, MarkCallaghan
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D51789
2015-12-11 09:21:58 +01:00
|
|
|
// The following function call initializes the singletons of ThreadLocalPtr
|
|
|
|
// right before the static default_env. This guarantees default_env will
|
|
|
|
// always being destructed before the ThreadLocalPtr singletons get
|
|
|
|
// destructed as C++ guarantees that the destructions of static variables
|
|
|
|
// is in the reverse order of their constructions.
|
|
|
|
//
|
|
|
|
// Since static members are destructed in the reverse order
|
|
|
|
// of their construction, having this call here guarantees that
|
|
|
|
// the destructor of static PosixEnv will go first, then the
|
|
|
|
// the singletons of ThreadLocalPtr.
|
|
|
|
ThreadLocalPtr::InitSingletons();
|
2013-04-23 03:10:28 +02:00
|
|
|
static PosixEnv default_env;
|
2013-03-19 22:39:28 +01:00
|
|
|
return &default_env;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|