"make format" against last 10 commits
Summary: This helps Windows port to format their changes, as discussed. Might have formatted some other codes too becasue last 10 commits include more. Test Plan: Build it. Reviewers: anthony, IslamAbdelRahman, kradhakrishnan, yhchiang, igor Reviewed By: igor Subscribers: leveldb, dhruba Differential Revision: https://reviews.facebook.net/D41961
This commit is contained in:
parent
76d3cd3286
commit
f9728640f3
15
db/c.cc
15
db/c.cc
@ -2449,19 +2449,16 @@ extern void rocksdb_livefiles_destroy(
|
||||
delete lf;
|
||||
}
|
||||
|
||||
void rocksdb_get_options_from_string(
|
||||
const rocksdb_options_t* base_options,
|
||||
const char* opts_str, rocksdb_options_t* new_options,
|
||||
void rocksdb_get_options_from_string(const rocksdb_options_t* base_options,
|
||||
const char* opts_str,
|
||||
rocksdb_options_t* new_options,
|
||||
char** errptr) {
|
||||
SaveError(errptr,
|
||||
GetOptionsFromString(base_options->rep,
|
||||
std::string(opts_str), &new_options->rep));
|
||||
GetOptionsFromString(base_options->rep, std::string(opts_str),
|
||||
&new_options->rep));
|
||||
}
|
||||
|
||||
void rocksdb_free(
|
||||
void* ptr){
|
||||
free(ptr);
|
||||
}
|
||||
void rocksdb_free(void* ptr) { free(ptr); }
|
||||
|
||||
} // end extern "C"
|
||||
|
||||
|
@ -270,7 +270,8 @@ const char* Compaction::InputLevelSummary(
|
||||
is_first = false;
|
||||
}
|
||||
len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
|
||||
"%" ROCKSDB_PRIszt "@%d", input_level.size(), input_level.level);
|
||||
"%" ROCKSDB_PRIszt "@%d", input_level.size(),
|
||||
input_level.level);
|
||||
}
|
||||
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
|
||||
" files to L%d", output_level());
|
||||
|
@ -122,9 +122,7 @@ class Compaction {
|
||||
bool IsTrivialMove() const;
|
||||
|
||||
// If true, then the compaction can be done by simply deleting input files.
|
||||
bool deletion_compaction() const {
|
||||
return deletion_compaction_;
|
||||
}
|
||||
bool deletion_compaction() const { return deletion_compaction_; }
|
||||
|
||||
// Add all inputs to this compaction as delete operations to *edit.
|
||||
void AddInputDeletions(VersionEdit* edit);
|
||||
|
@ -777,7 +777,5 @@ int main(int argc, char** argv) {
|
||||
|
||||
#else
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
return 0;
|
||||
}
|
||||
int main(int argc, char** argv) { return 0; }
|
||||
#endif // !defined(IOS_CROSS_COMPILE)
|
||||
|
@ -165,8 +165,7 @@ void VerifyInitializationOfCompactionJobStats(
|
||||
#endif // !defined(IOS_CROSS_COMPILE)
|
||||
}
|
||||
|
||||
void VerifyCompactionJobStats(
|
||||
const CompactionJobStats& compaction_job_stats,
|
||||
void VerifyCompactionJobStats(const CompactionJobStats& compaction_job_stats,
|
||||
const std::vector<FileMetaData*>& files,
|
||||
size_t num_output_files) {
|
||||
ASSERT_GE(compaction_job_stats.elapsed_micros, 0U);
|
||||
@ -219,9 +218,7 @@ TEST_F(CompactionJobTest, Simple) {
|
||||
ASSERT_OK(s);
|
||||
mutex_.Unlock();
|
||||
|
||||
VerifyCompactionJobStats(
|
||||
compaction_job_stats,
|
||||
files, 1);
|
||||
VerifyCompactionJobStats(compaction_job_stats, files, 1);
|
||||
|
||||
mock_table_factory_->AssertLatestFile(expected_results);
|
||||
ASSERT_EQ(yield_callback_called, 20000);
|
||||
|
@ -401,8 +401,9 @@ bool CompactionPicker::SetupOtherInputs(
|
||||
if (expanded1.size() == output_level_inputs->size() &&
|
||||
!FilesInCompaction(expanded1)) {
|
||||
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log,
|
||||
"[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64 "+%" PRIu64
|
||||
" bytes) to %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt " (%" PRIu64 "+%" PRIu64 "bytes)\n",
|
||||
"[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64
|
||||
"+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
|
||||
" (%" PRIu64 "+%" PRIu64 "bytes)\n",
|
||||
cf_name.c_str(), input_level, inputs->size(),
|
||||
output_level_inputs->size(), inputs0_size, inputs1_size,
|
||||
expanded0.size(), expanded1.size(), expanded0_size, inputs1_size);
|
||||
@ -1225,7 +1226,8 @@ Compaction* UniversalCompactionPicker::PickCompaction(
|
||||
return nullptr;
|
||||
}
|
||||
VersionStorageInfo::LevelSummaryStorage tmp;
|
||||
LogToBuffer(log_buffer, 3072, "[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
|
||||
LogToBuffer(log_buffer, 3072,
|
||||
"[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
|
||||
cf_name.c_str(), sorted_runs.size(),
|
||||
vstorage->LevelSummary(&tmp));
|
||||
|
||||
|
@ -3544,8 +3544,8 @@ class Benchmark {
|
||||
|
||||
char msg[100];
|
||||
snprintf(msg, sizeof(msg),
|
||||
"(reads:%" PRIu64 " merges:%" PRIu64 " total:%" PRIu64 " hits:%" \
|
||||
PRIu64 " maxlength:%" ROCKSDB_PRIszt ")",
|
||||
"(reads:%" PRIu64 " merges:%" PRIu64 " total:%" PRIu64
|
||||
" hits:%" PRIu64 " maxlength:%" ROCKSDB_PRIszt ")",
|
||||
num_gets, num_merges, readwrites_, num_hits, max_length);
|
||||
thread->stats.AddMessage(msg);
|
||||
}
|
||||
|
@ -1631,9 +1631,9 @@ Status DBImpl::CompactFilesImpl(
|
||||
CompactionJob compaction_job(
|
||||
job_context->job_id, c.get(), db_options_, env_options_, versions_.get(),
|
||||
&shutting_down_, log_buffer, directories_.GetDbDir(),
|
||||
directories_.GetDataDir(c->output_path_id()), stats_,
|
||||
snapshots_.GetAll(), table_cache_, std::move(yield_callback),
|
||||
&event_logger_, c->mutable_cf_options()->paranoid_file_checks, dbname_,
|
||||
directories_.GetDataDir(c->output_path_id()), stats_, snapshots_.GetAll(),
|
||||
table_cache_, std::move(yield_callback), &event_logger_,
|
||||
c->mutable_cf_options()->paranoid_file_checks, dbname_,
|
||||
nullptr); // Here we pass a nullptr for CompactionJobStats because
|
||||
// CompactFiles does not trigger OnCompactionCompleted(),
|
||||
// which is the only place where CompactionJobStats is
|
||||
@ -2602,8 +2602,8 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress, JobContext* job_context,
|
||||
versions_.get(), &shutting_down_, log_buffer, directories_.GetDbDir(),
|
||||
directories_.GetDataDir(c->output_path_id()), stats_,
|
||||
snapshots_.GetAll(), table_cache_, std::move(yield_callback),
|
||||
&event_logger_, c->mutable_cf_options()->paranoid_file_checks,
|
||||
dbname_, &compaction_job_stats);
|
||||
&event_logger_, c->mutable_cf_options()->paranoid_file_checks, dbname_,
|
||||
&compaction_job_stats);
|
||||
compaction_job.Prepare();
|
||||
|
||||
mutex_.Unlock();
|
||||
|
@ -7,20 +7,21 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
// Introduction of SyncPoint effectively disabled building and running this test in Release build.
|
||||
// Introduction of SyncPoint effectively disabled building and running this test
|
||||
// in Release build.
|
||||
// which is a pity, it is a good test
|
||||
#if !(defined NDEBUG) || !defined(OS_WIN)
|
||||
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <set>
|
||||
#ifndef OS_WIN
|
||||
# include <unistd.h>
|
||||
#endif
|
||||
#include <thread>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <fcntl.h>
|
||||
#ifndef OS_WIN
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include "db/filename.h"
|
||||
#include "db/dbformat.h"
|
||||
@ -8716,7 +8717,6 @@ TEST_F(DBTest, TransactionLogIteratorCorruptedLog) {
|
||||
//
|
||||
class RecoveryTestHelper {
|
||||
public:
|
||||
|
||||
// Number of WAL files to generate
|
||||
static const int kWALFilesCount = 10;
|
||||
// Starting number for the WAL file name like 00010.log
|
||||
@ -8727,8 +8727,8 @@ class RecoveryTestHelper {
|
||||
static const int kValueSize = 10;
|
||||
|
||||
// Create WAL files with values filled in
|
||||
static void FillData(DBTest* test, Options& options,
|
||||
const size_t wal_count, size_t & count) {
|
||||
static void FillData(DBTest* test, Options& options, const size_t wal_count,
|
||||
size_t& count) {
|
||||
DBOptions& db_options = options;
|
||||
|
||||
count = 0;
|
||||
@ -8793,9 +8793,9 @@ class RecoveryTestHelper {
|
||||
}
|
||||
|
||||
// Manuall corrupt the specified WAL
|
||||
static void CorruptWAL(DBTest * test, Options& options,
|
||||
const double off, const double len,
|
||||
const int wal_file_id, const bool trunc = false) {
|
||||
static void CorruptWAL(DBTest* test, Options& options, const double off,
|
||||
const double len, const int wal_file_id,
|
||||
const bool trunc = false) {
|
||||
Env* env = options.env;
|
||||
std::string fname = LogFileName(test->dbname_, wal_file_id);
|
||||
uint64_t size;
|
||||
@ -8912,8 +8912,8 @@ TEST_F(DBTest, kAbsoluteConsistency) {
|
||||
TEST_F(DBTest, kPointInTimeRecovery) {
|
||||
const int jstart = RecoveryTestHelper::kWALFileOffset;
|
||||
const int jend = jstart + RecoveryTestHelper::kWALFilesCount;
|
||||
const int maxkeys = RecoveryTestHelper::kWALFilesCount *
|
||||
RecoveryTestHelper::kKeysPerWALFile;
|
||||
const int maxkeys =
|
||||
RecoveryTestHelper::kWALFilesCount * RecoveryTestHelper::kKeysPerWALFile;
|
||||
|
||||
for (auto trunc : {true, false}) { /* Corruption style */
|
||||
for (int i = 0; i < 4; i++) { /* Offset of corruption */
|
||||
|
@ -216,11 +216,8 @@ TEST_F(EventListenerTest, OnSingleDBCompactionTest) {
|
||||
// This simple Listener can only handle one flush at a time.
|
||||
class TestFlushListener : public EventListener {
|
||||
public:
|
||||
explicit TestFlushListener(Env* env) :
|
||||
slowdown_count(0),
|
||||
stop_count(0),
|
||||
db_closed(),
|
||||
env_(env) {
|
||||
explicit TestFlushListener(Env* env)
|
||||
: slowdown_count(0), stop_count(0), db_closed(), env_(env) {
|
||||
db_closed = false;
|
||||
}
|
||||
void OnTableFileCreated(
|
||||
|
@ -90,7 +90,8 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
|
||||
Env* env;
|
||||
Logger* info_log;
|
||||
virtual void Corruption(size_t bytes, const Status& s) override {
|
||||
Log(InfoLogLevel::ERROR_LEVEL, info_log, "dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
|
||||
Log(InfoLogLevel::ERROR_LEVEL, info_log,
|
||||
"dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
|
||||
s.ToString().c_str());
|
||||
}
|
||||
virtual void Info(const char* s) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -62,19 +62,17 @@ struct BatchResult {
|
||||
// Add empty __ctor and __dtor for the rule of five
|
||||
// However, preserve the original semantics and prohibit copying
|
||||
// as the unique_ptr member does not copy.
|
||||
BatchResult() {
|
||||
}
|
||||
BatchResult() {}
|
||||
|
||||
~BatchResult() {
|
||||
}
|
||||
~BatchResult() {}
|
||||
|
||||
BatchResult(const BatchResult&) = delete;
|
||||
|
||||
BatchResult& operator=(const BatchResult&) = delete;
|
||||
|
||||
BatchResult(BatchResult && bResult) :
|
||||
sequence(std::move(bResult.sequence)), writeBatchPtr(std::move(bResult.writeBatchPtr)) {
|
||||
}
|
||||
BatchResult(BatchResult&& bResult)
|
||||
: sequence(std::move(bResult.sequence)),
|
||||
writeBatchPtr(std::move(bResult.writeBatchPtr)) {}
|
||||
|
||||
BatchResult& operator=(BatchResult&& bResult) {
|
||||
sequence = std::move(bResult.sequence);
|
||||
|
@ -57,21 +57,15 @@ struct Variant {
|
||||
new (&data_.s) std::string(s);
|
||||
}
|
||||
|
||||
Variant(const Variant& v) : type_(v.type_) {
|
||||
Init(v, data_);
|
||||
}
|
||||
Variant(const Variant& v) : type_(v.type_) { Init(v, data_); }
|
||||
|
||||
Variant& operator=(const Variant& v);
|
||||
|
||||
Variant(Variant&& rhs) : type_(kNull) {
|
||||
*this = std::move(rhs);
|
||||
}
|
||||
Variant(Variant&& rhs) : type_(kNull) { *this = std::move(rhs); }
|
||||
|
||||
Variant& operator=(Variant&& v);
|
||||
|
||||
~Variant() {
|
||||
Destroy(type_, data_);
|
||||
}
|
||||
~Variant() { Destroy(type_, data_); }
|
||||
|
||||
Type type() const { return type_; }
|
||||
bool get_bool() const { return data_.b; }
|
||||
@ -83,14 +77,14 @@ struct Variant {
|
||||
bool operator!=(const Variant& other) const { return !(*this == other); }
|
||||
|
||||
private:
|
||||
|
||||
Type type_;
|
||||
|
||||
union Data {
|
||||
bool b;
|
||||
uint64_t i;
|
||||
double d;
|
||||
// Current version of MS compiler not C++11 compliant so can not put std::string
|
||||
// Current version of MS compiler not C++11 compliant so can not put
|
||||
// std::string
|
||||
// however, even then we still need the rest of the maintenance.
|
||||
char s[sizeof(std::string)];
|
||||
} data_;
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "pragma_error.h"
|
||||
|
||||
ROCKSDB_WARNING("Warning: This file was moved to rocksdb/utilities/backupable_db.h")
|
||||
ROCKSDB_WARNING(
|
||||
"Warning: This file was moved to rocksdb/utilities/backupable_db.h")
|
||||
|
||||
#include "rocksdb/utilities/backupable_db.h"
|
||||
|
@ -13,25 +13,24 @@
|
||||
#define RDB_STR__(x) #x
|
||||
#define RDB_STR(x) RDB_STR__(x)
|
||||
|
||||
|
||||
#if defined(ROCKSDB_PLATFORM_POSIX)
|
||||
// Wrap unportable warning macro
|
||||
|
||||
#define ROCKSDB_WARNING(x) _Pragma(RDB_STR(GCC warning(x)))
|
||||
|
||||
|
||||
#elif defined(OS_WIN)
|
||||
|
||||
// Wrap unportable warning macro
|
||||
#if defined(_MSC_VER)
|
||||
// format it according to visual studio output (to get source lines and warnings in the IDE)
|
||||
#define ROCKSDB_WARNING(x) __pragma( message(__FILE__ "(" RDB_STR(__LINE__) ") : warning: " x) )
|
||||
// format it according to visual studio output (to get source lines and warnings
|
||||
// in the IDE)
|
||||
#define ROCKSDB_WARNING(x) \
|
||||
__pragma(message(__FILE__ "(" RDB_STR(__LINE__) ") : warning: " x))
|
||||
#else
|
||||
// make #warning into #pragma GCC warning gcc 4.7+ and clang 3.2+ supported
|
||||
#define ROCKSDB_WARNING(x) _Pragma(RDB_STR(GCC warning(x)))
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#endif // STORAGE_LEVELDB_UTILITIES_PRAGMA_ERROR_H_
|
||||
|
@ -13,8 +13,8 @@
|
||||
#define STORAGE_LEVELDB_PORT_DIRENT_H_
|
||||
|
||||
#ifdef ROCKSDB_PLATFORM_POSIX
|
||||
# include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/types.h>
|
||||
#elif defined(OS_WIN)
|
||||
|
||||
namespace rocksdb {
|
||||
@ -42,10 +42,6 @@ using port::closedir;
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
#endif
|
||||
|
||||
#endif // OS_WIN
|
||||
|
||||
#endif // STORAGE_LEVELDB_PORT_DIRENT_H_
|
||||
|
||||
|
||||
|
||||
|
@ -11,7 +11,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
// size_t printf formatting named in the manner of C99 standard formatting strings such as PRIu64
|
||||
// size_t printf formatting named in the manner of C99 standard formatting
|
||||
// strings such as PRIu64
|
||||
// in fact, we could use that one
|
||||
#define ROCKSDB_PRIszt "zu"
|
||||
|
||||
|
@ -7,7 +7,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
// This file is a portable substitute for sys/time.h which does not exist on Windows
|
||||
// This file is a portable substitute for sys/time.h which does not exist on
|
||||
// Windows
|
||||
|
||||
#ifndef STORAGE_LEVELDB_PORT_SYS_TIME_H_
|
||||
#define STORAGE_LEVELDB_PORT_SYS_TIME_H_
|
||||
@ -28,12 +29,10 @@ typedef struct timeval {
|
||||
|
||||
void gettimeofday(struct timeval* tv, struct timezone* tz);
|
||||
|
||||
inline
|
||||
struct tm* localtime_r(const time_t *timep, struct tm *result) {
|
||||
inline struct tm* localtime_r(const time_t* timep, struct tm* result) {
|
||||
errno_t ret = localtime_s(result, timep);
|
||||
return (ret == 0) ? result : NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
using port::timeval;
|
||||
|
@ -14,7 +14,6 @@
|
||||
// porting to a new platform, see "port_example.h" for documentation
|
||||
// of what the new port_<platform>.h file must provide.
|
||||
|
||||
|
||||
#if defined(ROCKSDB_PLATFORM_POSIX)
|
||||
#include "util/posix_logger.h"
|
||||
#elif defined(OS_WIN)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -28,16 +28,14 @@
|
||||
|
||||
#include "util/logging.h"
|
||||
|
||||
namespace rocksdb
|
||||
{
|
||||
namespace port
|
||||
{
|
||||
namespace rocksdb {
|
||||
namespace port {
|
||||
|
||||
void gettimeofday(struct timeval* tv, struct timezone* /* tz */) {
|
||||
|
||||
using namespace std::chrono;
|
||||
|
||||
microseconds usNow (duration_cast<microseconds>(system_clock::now().time_since_epoch()));
|
||||
microseconds usNow(
|
||||
duration_cast<microseconds>(system_clock::now().time_since_epoch()));
|
||||
|
||||
seconds secNow(duration_cast<seconds>(usNow));
|
||||
|
||||
@ -45,15 +43,11 @@ void gettimeofday(struct timeval* tv, struct timezone* /* tz */) {
|
||||
tv->tv_usec = usNow.count() - duration_cast<microseconds>(secNow).count();
|
||||
}
|
||||
|
||||
Mutex::Mutex(bool adaptive) : lock(m_mutex, std::defer_lock) {}
|
||||
|
||||
Mutex::Mutex(bool adaptive) : lock(m_mutex, std::defer_lock) {
|
||||
}
|
||||
|
||||
Mutex::~Mutex() {
|
||||
}
|
||||
Mutex::~Mutex() {}
|
||||
|
||||
void Mutex::Lock() {
|
||||
|
||||
lock.lock();
|
||||
#ifndef NDEBUG
|
||||
locked_ = true;
|
||||
@ -61,7 +55,6 @@ void Mutex::Lock() {
|
||||
}
|
||||
|
||||
void Mutex::Unlock() {
|
||||
|
||||
#ifndef NDEBUG
|
||||
locked_ = false;
|
||||
#endif
|
||||
@ -74,11 +67,9 @@ void Mutex::AssertHeld() {
|
||||
#endif
|
||||
}
|
||||
|
||||
CondVar::CondVar(Mutex* mu) : mu_(mu) {
|
||||
}
|
||||
CondVar::CondVar(Mutex* mu) : mu_(mu) {}
|
||||
|
||||
CondVar::~CondVar() {
|
||||
}
|
||||
CondVar::~CondVar() {}
|
||||
|
||||
void CondVar::Wait() {
|
||||
#ifndef NDEBUG
|
||||
@ -90,7 +81,6 @@ void CondVar::Wait() {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool CondVar::TimedWait(uint64_t abs_time_us) {
|
||||
#ifndef NDEBUG
|
||||
mu_->locked_ = false;
|
||||
@ -99,8 +89,10 @@ bool CondVar::TimedWait(uint64_t abs_time_us) {
|
||||
using namespace std::chrono;
|
||||
|
||||
microseconds usAbsTime(abs_time_us);
|
||||
microseconds usNow(duration_cast<microseconds>(system_clock::now().time_since_epoch()));
|
||||
microseconds relTimeUs = (usAbsTime > usNow) ? (usAbsTime - usNow) : microseconds::zero();
|
||||
microseconds usNow(
|
||||
duration_cast<microseconds>(system_clock::now().time_since_epoch()));
|
||||
microseconds relTimeUs =
|
||||
(usAbsTime > usNow) ? (usAbsTime - usNow) : microseconds::zero();
|
||||
|
||||
std::_Cv_status cvStatus = cv_.wait_for(mu_->getLock(), relTimeUs);
|
||||
|
||||
@ -115,17 +107,11 @@ bool CondVar::TimedWait(uint64_t abs_time_us) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void CondVar::Signal() {
|
||||
void CondVar::Signal() { cv_.notify_one(); }
|
||||
|
||||
cv_.notify_one();
|
||||
}
|
||||
|
||||
void CondVar::SignalAll() {
|
||||
cv_.notify_all ();
|
||||
}
|
||||
void CondVar::SignalAll() { cv_.notify_all(); }
|
||||
|
||||
void InitOnce(OnceType* once, void (*initializer)()) {
|
||||
|
||||
std::call_once(*once, initializer);
|
||||
}
|
||||
|
||||
@ -142,16 +128,13 @@ struct DIR {
|
||||
DIR& operator=(const DIR&) = delete;
|
||||
|
||||
~DIR() {
|
||||
|
||||
if (-1 != handle_) {
|
||||
_findclose(handle_);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
DIR* opendir(const char* name) {
|
||||
|
||||
if (!name || *name == 0) {
|
||||
errno = ENOENT;
|
||||
return nullptr;
|
||||
@ -174,7 +157,6 @@ DIR* opendir(const char* name) {
|
||||
}
|
||||
|
||||
struct dirent* readdir(DIR* dirp) {
|
||||
|
||||
if (!dirp || dirp->handle_ == -1) {
|
||||
errno = EBADF;
|
||||
return nullptr;
|
||||
@ -202,7 +184,6 @@ int closedir(DIR* dirp) {
|
||||
}
|
||||
|
||||
int truncate(const char* path, int64_t len) {
|
||||
|
||||
if (path == nullptr) {
|
||||
errno = EFAULT;
|
||||
return -1;
|
||||
@ -213,13 +194,12 @@ int truncate(const char* path, int64_t len) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
HANDLE hFile = CreateFile(path,
|
||||
GENERIC_READ | GENERIC_WRITE,
|
||||
HANDLE hFile =
|
||||
CreateFile(path, GENERIC_READ | GENERIC_WRITE,
|
||||
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
|
||||
NULL, // Security attrs
|
||||
OPEN_EXISTING, // Truncate existing file only
|
||||
FILE_ATTRIBUTE_NORMAL,
|
||||
NULL);
|
||||
FILE_ATTRIBUTE_NORMAL, NULL);
|
||||
|
||||
if (INVALID_HANDLE_VALUE == hFile) {
|
||||
auto lastError = GetLastError();
|
||||
@ -237,9 +217,7 @@ int truncate(const char* path, int64_t len) {
|
||||
FILE_END_OF_FILE_INFO end_of_file;
|
||||
end_of_file.EndOfFile.QuadPart = len;
|
||||
|
||||
if (!SetFileInformationByHandle(hFile,
|
||||
FileEndOfFileInfo,
|
||||
&end_of_file,
|
||||
if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file,
|
||||
sizeof(FILE_END_OF_FILE_INFO))) {
|
||||
errno = EIO;
|
||||
result = -1;
|
||||
@ -260,8 +238,7 @@ namespace rocksdb {
|
||||
|
||||
namespace port {
|
||||
|
||||
__declspec(noinline)
|
||||
void WINAPI InitializeJemalloc() {
|
||||
__declspec(noinline) void WINAPI InitializeJemalloc() {
|
||||
je_init();
|
||||
atexit(je_uninit);
|
||||
}
|
||||
@ -279,11 +256,13 @@ typedef void (WINAPI *CRT_Startup_Routine)(void);
|
||||
|
||||
// .CRT section is merged with .rdata on x64 so it must be constant data.
|
||||
// must be of external linkage
|
||||
// We put this into XCT since we want to run this earlier than C++ static constructors
|
||||
// We put this into XCT since we want to run this earlier than C++ static
|
||||
// constructors
|
||||
// which are placed into XCU
|
||||
#pragma const_seg(".CRT$XCT")
|
||||
extern const CRT_Startup_Routine p_rocksdb_init_jemalloc;
|
||||
const CRT_Startup_Routine p_rocksdb_init_jemalloc = rocksdb::port::InitializeJemalloc;
|
||||
const CRT_Startup_Routine p_rocksdb_init_jemalloc =
|
||||
rocksdb::port::InitializeJemalloc;
|
||||
#pragma const_seg()
|
||||
|
||||
#else // _WIN64
|
||||
@ -293,8 +272,8 @@ const CRT_Startup_Routine p_rocksdb_init_jemalloc = rocksdb::port::InitializeJem
|
||||
#pragma comment(linker, "/INCLUDE:_p_rocksdb_init_jemalloc")
|
||||
|
||||
#pragma section(".CRT$XCT", read)
|
||||
JEMALLOC_SECTION(".CRT$XCT") JEMALLOC_ATTR(used)
|
||||
static const void (WINAPI *p_rocksdb_init_jemalloc)(void) = rocksdb::port::InitializeJemalloc;
|
||||
JEMALLOC_SECTION(".CRT$XCT") JEMALLOC_ATTR(used) static const void(
|
||||
WINAPI* p_rocksdb_init_jemalloc)(void) = rocksdb::port::InitializeJemalloc;
|
||||
|
||||
#endif // _WIN64
|
||||
|
||||
@ -318,13 +297,8 @@ void* operator new[](size_t size) {
|
||||
return p;
|
||||
}
|
||||
|
||||
void operator delete(void* p) {
|
||||
je_free(p);
|
||||
}
|
||||
void operator delete(void* p) { je_free(p); }
|
||||
|
||||
void operator delete[](void* p) {
|
||||
je_free(p);
|
||||
}
|
||||
void operator delete[](void* p) { je_free(p); }
|
||||
|
||||
#endif // JEMALLOC
|
||||
|
||||
|
@ -42,7 +42,8 @@
|
||||
|
||||
typedef SSIZE_T ssize_t;
|
||||
|
||||
// size_t printf formatting named in the manner of C99 standard formatting strings such as PRIu64
|
||||
// size_t printf formatting named in the manner of C99 standard formatting
|
||||
// strings such as PRIu64
|
||||
// in fact, we could use that one
|
||||
#define ROCKSDB_PRIszt "Iu"
|
||||
|
||||
@ -77,8 +78,7 @@ namespace rocksdb {
|
||||
|
||||
#define PREFETCH(addr, rw, locality)
|
||||
|
||||
namespace port
|
||||
{
|
||||
namespace port {
|
||||
|
||||
// For use at db/file_indexer.h kLevelMaxIndex
|
||||
const int kMaxInt32 = INT32_MAX;
|
||||
@ -88,8 +88,7 @@ const bool kLittleEndian = true;
|
||||
|
||||
class CondVar;
|
||||
|
||||
class Mutex
|
||||
{
|
||||
class Mutex {
|
||||
public:
|
||||
/* implicit */ Mutex(bool adaptive = false);
|
||||
~Mutex();
|
||||
@ -101,9 +100,7 @@ public:
|
||||
// it does NOT verify that mutex is held by a calling thread
|
||||
void AssertHeld();
|
||||
|
||||
std::unique_lock<std::mutex>& getLock() {
|
||||
return lock;
|
||||
}
|
||||
std::unique_lock<std::mutex>& getLock() { return lock; }
|
||||
|
||||
private:
|
||||
friend class CondVar;
|
||||
@ -118,42 +115,29 @@ private:
|
||||
void operator=(const Mutex&);
|
||||
};
|
||||
|
||||
class RWMutex
|
||||
{
|
||||
class RWMutex {
|
||||
public:
|
||||
RWMutex() {
|
||||
InitializeSRWLock(&srwLock_);
|
||||
}
|
||||
RWMutex() { InitializeSRWLock(&srwLock_); }
|
||||
|
||||
void ReadLock() {
|
||||
AcquireSRWLockShared(&srwLock_);
|
||||
}
|
||||
void ReadLock() { AcquireSRWLockShared(&srwLock_); }
|
||||
|
||||
void WriteLock() {
|
||||
AcquireSRWLockExclusive(&srwLock_);
|
||||
}
|
||||
void WriteLock() { AcquireSRWLockExclusive(&srwLock_); }
|
||||
|
||||
void ReadUnlock() {
|
||||
ReleaseSRWLockShared(&srwLock_);
|
||||
}
|
||||
void ReadUnlock() { ReleaseSRWLockShared(&srwLock_); }
|
||||
|
||||
void WriteUnlock() {
|
||||
ReleaseSRWLockExclusive(&srwLock_);
|
||||
}
|
||||
void WriteUnlock() { ReleaseSRWLockExclusive(&srwLock_); }
|
||||
|
||||
// Empty as in POSIX
|
||||
void AssertHeld() {}
|
||||
|
||||
private:
|
||||
|
||||
SRWLOCK srwLock_;
|
||||
// No copying allowed
|
||||
RWMutex(const RWMutex&);
|
||||
void operator=(const RWMutex&);
|
||||
};
|
||||
|
||||
class CondVar
|
||||
{
|
||||
class CondVar {
|
||||
public:
|
||||
explicit CondVar(Mutex* mu);
|
||||
~CondVar();
|
||||
@ -161,6 +145,7 @@ public:
|
||||
bool TimedWait(uint64_t expiration_time);
|
||||
void Signal();
|
||||
void SignalAll();
|
||||
|
||||
private:
|
||||
std::condition_variable cv_;
|
||||
Mutex* mu_;
|
||||
@ -171,8 +156,7 @@ typedef std::once_flag OnceType;
|
||||
extern void InitOnce(OnceType* once, void (*initializer)());
|
||||
|
||||
inline bool Snappy_Compress(const CompressionOptions& opts, const char* input,
|
||||
size_t length, ::std::string* output)
|
||||
{
|
||||
size_t length, ::std::string* output) {
|
||||
#ifdef SNAPPY
|
||||
output->resize(snappy::MaxCompressedLength(length));
|
||||
size_t outlen;
|
||||
@ -192,8 +176,7 @@ inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
||||
#endif
|
||||
}
|
||||
|
||||
inline bool Snappy_Uncompress(const char* input, size_t length,
|
||||
char* output) {
|
||||
inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
|
||||
#ifdef SNAPPY
|
||||
return snappy::RawUncompress(input, length, output);
|
||||
#else
|
||||
@ -272,8 +255,8 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
|
||||
// For raw inflate, the windowBits should be -8..-15.
|
||||
// If windowBits is bigger than zero, it will use either zlib
|
||||
// header or gzip header. Adding 32 to it will do automatic detection.
|
||||
int st = inflateInit2(&_stream,
|
||||
windowBits > 0 ? windowBits + 32 : windowBits);
|
||||
int st =
|
||||
inflateInit2(&_stream, windowBits > 0 ? windowBits + 32 : windowBits);
|
||||
if (st != Z_OK) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -515,8 +498,7 @@ inline bool LZ4HC_Compress(const CompressionOptions &opts, const char* input,
|
||||
// For Thread Local Storage abstraction
|
||||
typedef DWORD pthread_key_t;
|
||||
|
||||
inline
|
||||
int pthread_key_create(pthread_key_t *key, void(*destructor)(void*)) {
|
||||
inline int pthread_key_create(pthread_key_t* key, void (*destructor)(void*)) {
|
||||
// Not used
|
||||
(void)destructor;
|
||||
|
||||
@ -529,24 +511,21 @@ int pthread_key_create(pthread_key_t *key, void(*destructor)(void*)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline
|
||||
int pthread_key_delete(pthread_key_t key) {
|
||||
inline int pthread_key_delete(pthread_key_t key) {
|
||||
if (!TlsFree(key)) {
|
||||
return EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline
|
||||
int pthread_setspecific(pthread_key_t key, const void *value) {
|
||||
inline int pthread_setspecific(pthread_key_t key, const void* value) {
|
||||
if (!TlsSetValue(key, const_cast<void*>(value))) {
|
||||
return ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline
|
||||
void* pthread_getspecific(pthread_key_t key) {
|
||||
inline void* pthread_getspecific(pthread_key_t key) {
|
||||
void* result = TlsGetValue(key);
|
||||
if (!result) {
|
||||
if (GetLastError() != ERROR_SUCCESS) {
|
||||
|
@ -24,15 +24,15 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
WinLogger::WinLogger(uint64_t (*gettid)(), Env* env, FILE * file, const InfoLogLevel log_level)
|
||||
WinLogger::WinLogger(uint64_t (*gettid)(), Env* env, FILE* file,
|
||||
const InfoLogLevel log_level)
|
||||
: Logger(log_level),
|
||||
gettid_(gettid),
|
||||
log_size_(0),
|
||||
last_flush_micros_(0),
|
||||
env_(env),
|
||||
flush_pending_(false),
|
||||
file_(file) {
|
||||
}
|
||||
file_(file) {}
|
||||
|
||||
void WinLogger::DebugWriter(const char* str, int len) {
|
||||
size_t sz = fwrite(str, 1, len, file_);
|
||||
@ -41,13 +41,9 @@ void WinLogger::DebugWriter(const char* str, int len) {
|
||||
}
|
||||
}
|
||||
|
||||
WinLogger::~WinLogger() {
|
||||
close();
|
||||
}
|
||||
WinLogger::~WinLogger() { close(); }
|
||||
|
||||
void WinLogger::close() {
|
||||
fclose(file_);
|
||||
}
|
||||
void WinLogger::close() { fclose(file_); }
|
||||
|
||||
void WinLogger::Flush() {
|
||||
if (flush_pending_) {
|
||||
@ -59,7 +55,6 @@ void WinLogger::Flush() {
|
||||
}
|
||||
|
||||
void WinLogger::Logv(const char* format, va_list ap) {
|
||||
|
||||
IOSTATS_TIMER_GUARD(logger_nanos);
|
||||
|
||||
const uint64_t thread_id = (*gettid_)();
|
||||
@ -88,11 +83,9 @@ void WinLogger::Logv(const char* format, va_list ap) {
|
||||
const time_t seconds = now_tv.tv_sec;
|
||||
struct tm t;
|
||||
localtime_s(&t, &seconds);
|
||||
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
|
||||
t.tm_hour,
|
||||
t.tm_min,
|
||||
t.tm_sec,
|
||||
static_cast<int>(now_tv.tv_usec),
|
||||
p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
|
||||
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
|
||||
t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec),
|
||||
static_cast<long long unsigned int>(thread_id));
|
||||
|
||||
// Print the message
|
||||
@ -110,8 +103,7 @@ void WinLogger::Logv(const char* format, va_list ap) {
|
||||
|
||||
// Truncate to available space if necessary
|
||||
if (p >= limit) {
|
||||
if (iter == 0)
|
||||
{
|
||||
if (iter == 0) {
|
||||
continue; // Try again with larger buffer
|
||||
} else {
|
||||
p = limit - 1;
|
||||
@ -137,8 +129,8 @@ void WinLogger::Logv(const char* format, va_list ap) {
|
||||
log_size_ += write_size;
|
||||
}
|
||||
|
||||
uint64_t now_micros = static_cast<uint64_t>(now_tv.tv_sec) * 1000000 +
|
||||
now_tv.tv_usec;
|
||||
uint64_t now_micros =
|
||||
static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
|
||||
if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
|
||||
flush_pending_ = false;
|
||||
fflush(file_);
|
||||
@ -148,9 +140,6 @@ void WinLogger::Logv(const char* format, va_list ap) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t WinLogger::GetLogFileSize() const {
|
||||
return log_size_;
|
||||
}
|
||||
|
||||
size_t WinLogger::GetLogFileSize() const { return log_size_; }
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -44,7 +44,6 @@ public:
|
||||
void DebugWriter(const char* str, int len);
|
||||
|
||||
private:
|
||||
|
||||
FILE* file_;
|
||||
uint64_t (*gettid_)(); // Return the thread id for the current thread
|
||||
std::atomic_size_t log_size_;
|
||||
|
@ -374,8 +374,10 @@ Slice CompressBlock(const Slice& raw,
|
||||
// kBlockBasedTableMagicNumber was picked by running
|
||||
// echo rocksdb.table.block_based | sha1sum
|
||||
// and taking the leading 64 bits.
|
||||
// Please note that kBlockBasedTableMagicNumber may also be accessed by other .cc files
|
||||
// for that reason we declare it extern in the header but to get the space allocated
|
||||
// Please note that kBlockBasedTableMagicNumber may also be accessed by other
|
||||
// .cc files
|
||||
// for that reason we declare it extern in the header but to get the space
|
||||
// allocated
|
||||
// it must be not extern in one place.
|
||||
const uint64_t kBlockBasedTableMagicNumber = 0x88e241b785f4cff7ull;
|
||||
// We also support reading and writing legacy block based table format (for
|
||||
|
@ -123,7 +123,8 @@ std::string BlockBasedTableFactory::GetPrintableTableOptions() const {
|
||||
table_options_.block_cache_compressed.get());
|
||||
ret.append(buffer);
|
||||
if (table_options_.block_cache_compressed) {
|
||||
snprintf(buffer, kBufferSize, " block_cache_compressed_size: %" ROCKSDB_PRIszt "\n",
|
||||
snprintf(buffer, kBufferSize,
|
||||
" block_cache_compressed_size: %" ROCKSDB_PRIszt "\n",
|
||||
table_options_.block_cache_compressed->GetCapacity());
|
||||
ret.append(buffer);
|
||||
}
|
||||
|
@ -146,14 +146,13 @@ TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
|
||||
uint32_t num_hash_fun = 4;
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1, 2, 3}},
|
||||
{user_keys[1], {1, 2, 3, 4}},
|
||||
{user_keys[2], {2, 3, 4, 5}},
|
||||
{user_keys[3], {3, 4, 5, 6}}
|
||||
};
|
||||
{user_keys[3], {3, 4, 5, 6}}};
|
||||
hash_map = std::move(hm);
|
||||
|
||||
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
||||
@ -190,9 +189,9 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
|
||||
uint32_t num_hash_fun = 4;
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1, 2, 3}},
|
||||
{user_keys[1], {0, 1, 2, 3}},
|
||||
{user_keys[2], {0, 1, 2, 3}},
|
||||
@ -234,9 +233,9 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
|
||||
uint32_t num_hash_fun = 4;
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1, 2, 3}},
|
||||
{user_keys[1], {0, 1, 2, 3}},
|
||||
{user_keys[2], {0, 1, 2, 3}},
|
||||
@ -284,9 +283,9 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
||||
"key04", "key05"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1}},
|
||||
{user_keys[1], {1, 2}},
|
||||
{user_keys[2], {2, 3}},
|
||||
@ -330,9 +329,9 @@ TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
||||
"key04", "key05"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1}},
|
||||
{user_keys[1], {1, 2}},
|
||||
{user_keys[2], {3, 4}},
|
||||
@ -375,14 +374,13 @@ TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
|
||||
uint32_t num_hash_fun = 4;
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1, 2, 3}},
|
||||
{user_keys[1], {1, 2, 3, 4}},
|
||||
{user_keys[2], {2, 3, 4, 5}},
|
||||
{user_keys[3], {3, 4, 5, 6}}
|
||||
};
|
||||
{user_keys[3], {3, 4, 5, 6}}};
|
||||
hash_map = std::move(hm);
|
||||
|
||||
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
||||
@ -415,9 +413,9 @@ TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
|
||||
uint32_t num_hash_fun = 4;
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1, 2, 3}},
|
||||
{user_keys[1], {0, 1, 2, 3}},
|
||||
{user_keys[2], {0, 1, 2, 3}},
|
||||
@ -456,9 +454,9 @@ TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
||||
"key04", "key05"};
|
||||
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1}},
|
||||
{user_keys[1], {1, 2}},
|
||||
{user_keys[2], {2, 3}},
|
||||
@ -500,9 +498,9 @@ TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
|
||||
uint32_t num_hash_fun = 2;
|
||||
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
||||
"key04", "key05"};
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm =
|
||||
{
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{user_keys[0], {0, 1}},
|
||||
{user_keys[1], {1, 2}},
|
||||
{user_keys[2], {2, 3}},
|
||||
@ -527,8 +525,10 @@ TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
|
||||
}
|
||||
|
||||
TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) {
|
||||
// Need to have a temporary variable here as VS compiler does not currently support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = { { "repeatedkey", { 0, 1, 2, 3 } } };
|
||||
// Need to have a temporary variable here as VS compiler does not currently
|
||||
// support operator= with initializer_list as a parameter
|
||||
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
||||
{"repeatedkey", {0, 1, 2, 3}}};
|
||||
hash_map = std::move(hm);
|
||||
uint32_t num_hash_fun = 4;
|
||||
std::string user_key = "repeatedkey";
|
||||
|
@ -18,9 +18,9 @@ static inline uint64_t CuckooHash(
|
||||
const Slice& user_key, uint32_t hash_cnt, bool use_module_hash,
|
||||
uint64_t table_size_, bool identity_as_first_hash,
|
||||
uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t)) {
|
||||
|
||||
#if !defined NDEBUG || defined OS_WIN
|
||||
// This part is used only in unit tests but we have to keep it for Windows build as we run test in both debug and release modes under Windows.
|
||||
// This part is used only in unit tests but we have to keep it for Windows
|
||||
// build as we run test in both debug and release modes under Windows.
|
||||
if (get_slice_hash != nullptr) {
|
||||
return get_slice_hash(user_key, hash_cnt, table_size_);
|
||||
}
|
||||
|
@ -192,9 +192,7 @@ struct BlockContents {
|
||||
compression_type(_compression_type),
|
||||
allocation(std::move(_data)) {}
|
||||
|
||||
BlockContents(BlockContents&& other) {
|
||||
*this = std::move(other);
|
||||
}
|
||||
BlockContents(BlockContents&& other) { *this = std::move(other); }
|
||||
|
||||
BlockContents& operator=(BlockContents&& other) {
|
||||
data = std::move(other.data);
|
||||
|
@ -203,8 +203,8 @@ Slice PlainTableIndexBuilder::FillIndexes(
|
||||
assert(sub_index_offset == sub_index_size_);
|
||||
|
||||
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log,
|
||||
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt,
|
||||
index_size_, sub_index_size_);
|
||||
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt, index_size_,
|
||||
sub_index_size_);
|
||||
return Slice(allocated, GetTotalSize());
|
||||
}
|
||||
|
||||
|
1
third-party/fbson/FbsonStream.h
vendored
1
third-party/fbson/FbsonStream.h
vendored
@ -34,7 +34,6 @@
|
||||
#define snprintf _snprintf
|
||||
#endif
|
||||
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <iostream>
|
||||
|
||||
|
@ -137,8 +137,10 @@ int main(int argc, const char** argv) {
|
||||
replThread.stop.store(true, std::memory_order_release);
|
||||
if (replThread.no_read < dataPump.no_records) {
|
||||
// no. read should be => than inserted.
|
||||
fprintf(stderr, "No. of Record's written and read not same\nRead : %" ROCKSDB_PRIszt
|
||||
" Written : %" ROCKSDB_PRIszt "\n", replThread.no_read, dataPump.no_records);
|
||||
fprintf(stderr,
|
||||
"No. of Record's written and read not same\nRead : %" ROCKSDB_PRIszt
|
||||
" Written : %" ROCKSDB_PRIszt "\n",
|
||||
replThread.no_read, dataPump.no_records);
|
||||
exit(1);
|
||||
}
|
||||
fprintf(stderr, "Successful!\n");
|
||||
|
@ -939,34 +939,23 @@ class StressTest {
|
||||
|
||||
std::unordered_map<std::string, std::vector<std::string> > options_tbl = {
|
||||
{"write_buffer_size",
|
||||
{
|
||||
ToString(FLAGS_write_buffer_size),
|
||||
{ToString(FLAGS_write_buffer_size),
|
||||
ToString(FLAGS_write_buffer_size * 2),
|
||||
ToString(FLAGS_write_buffer_size * 4)
|
||||
}
|
||||
},
|
||||
ToString(FLAGS_write_buffer_size * 4)}},
|
||||
{"max_write_buffer_number",
|
||||
{
|
||||
ToString(FLAGS_max_write_buffer_number),
|
||||
{ToString(FLAGS_max_write_buffer_number),
|
||||
ToString(FLAGS_max_write_buffer_number * 2),
|
||||
ToString(FLAGS_max_write_buffer_number * 4)
|
||||
}
|
||||
},
|
||||
ToString(FLAGS_max_write_buffer_number * 4)}},
|
||||
{"arena_block_size",
|
||||
{
|
||||
ToString(Options().arena_block_size),
|
||||
ToString(FLAGS_write_buffer_size / 4),
|
||||
ToString(FLAGS_write_buffer_size / 8),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"memtable_prefix_bloom_bits", {"0", "8", "10"}},
|
||||
{"memtable_prefix_bloom_probes", {"4", "5", "6"}},
|
||||
{"memtable_prefix_bloom_huge_page_tlb_size",
|
||||
{
|
||||
"0",
|
||||
ToString(2 * 1024 * 1024)
|
||||
}
|
||||
},
|
||||
{"0", ToString(2 * 1024 * 1024)}},
|
||||
{"max_successive_merges", {"0", "2", "4"}},
|
||||
{"filter_deletes", {"0", "1"}},
|
||||
{"inplace_update_num_locks", {"100", "200", "300"}},
|
||||
@ -979,71 +968,57 @@ class StressTest {
|
||||
ToString(FLAGS_level0_file_num_compaction_trigger),
|
||||
ToString(FLAGS_level0_file_num_compaction_trigger + 2),
|
||||
ToString(FLAGS_level0_file_num_compaction_trigger + 4),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"level0_slowdown_writes_trigger",
|
||||
{
|
||||
ToString(FLAGS_level0_slowdown_writes_trigger),
|
||||
ToString(FLAGS_level0_slowdown_writes_trigger + 2),
|
||||
ToString(FLAGS_level0_slowdown_writes_trigger + 4),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"level0_stop_writes_trigger",
|
||||
{
|
||||
ToString(FLAGS_level0_stop_writes_trigger),
|
||||
ToString(FLAGS_level0_stop_writes_trigger + 2),
|
||||
ToString(FLAGS_level0_stop_writes_trigger + 4),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"max_grandparent_overlap_factor",
|
||||
{
|
||||
ToString(Options().max_grandparent_overlap_factor - 5),
|
||||
ToString(Options().max_grandparent_overlap_factor),
|
||||
ToString(Options().max_grandparent_overlap_factor + 5),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"expanded_compaction_factor",
|
||||
{
|
||||
ToString(Options().expanded_compaction_factor - 5),
|
||||
ToString(Options().expanded_compaction_factor),
|
||||
ToString(Options().expanded_compaction_factor + 5),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"source_compaction_factor",
|
||||
{
|
||||
ToString(Options().source_compaction_factor),
|
||||
ToString(Options().source_compaction_factor * 2),
|
||||
ToString(Options().source_compaction_factor * 4),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"target_file_size_base",
|
||||
{
|
||||
ToString(FLAGS_target_file_size_base),
|
||||
ToString(FLAGS_target_file_size_base * 2),
|
||||
ToString(FLAGS_target_file_size_base * 4),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"target_file_size_multiplier",
|
||||
{
|
||||
ToString(FLAGS_target_file_size_multiplier),
|
||||
"1",
|
||||
"2",
|
||||
}
|
||||
},
|
||||
ToString(FLAGS_target_file_size_multiplier), "1", "2",
|
||||
}},
|
||||
{"max_bytes_for_level_base",
|
||||
{
|
||||
ToString(FLAGS_max_bytes_for_level_base / 2),
|
||||
ToString(FLAGS_max_bytes_for_level_base),
|
||||
ToString(FLAGS_max_bytes_for_level_base * 2),
|
||||
}
|
||||
},
|
||||
}},
|
||||
{"max_bytes_for_level_multiplier",
|
||||
{
|
||||
ToString(FLAGS_max_bytes_for_level_multiplier),
|
||||
"1",
|
||||
"2",
|
||||
}
|
||||
},
|
||||
ToString(FLAGS_max_bytes_for_level_multiplier), "1", "2",
|
||||
}},
|
||||
{"max_mem_compaction_level", {"0", "1", "2"}},
|
||||
{"max_sequential_skip_in_iterations", {"4", "8", "12"}},
|
||||
};
|
||||
|
@ -205,7 +205,8 @@ TEST_F(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) {
|
||||
}
|
||||
|
||||
#ifndef OS_WIN
|
||||
//TODO: does not build for Windows because of PosixLogger use below. Need to port
|
||||
// TODO: does not build for Windows because of PosixLogger use below. Need to
|
||||
// port
|
||||
TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) {
|
||||
DBOptions options;
|
||||
shared_ptr<Logger> logger;
|
||||
|
@ -242,8 +242,7 @@ class autovector {
|
||||
void push_back(const T& item) {
|
||||
if (num_stack_items_ < kSize) {
|
||||
values_[num_stack_items_++] = item;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
vect_.push_back(item);
|
||||
}
|
||||
}
|
||||
|
@ -1827,7 +1827,8 @@ class PosixEnv : public Env {
|
||||
#if defined(_GNU_SOURCE) && defined(__GLIBC_PREREQ)
|
||||
#if __GLIBC_PREREQ(2, 12)
|
||||
char name_buf[16];
|
||||
snprintf(name_buf, sizeof name_buf, "rocksdb:bg%" ROCKSDB_PRIszt, bgthreads_.size());
|
||||
snprintf(name_buf, sizeof name_buf, "rocksdb:bg%" ROCKSDB_PRIszt,
|
||||
bgthreads_.size());
|
||||
name_buf[sizeof name_buf - 1] = '\0';
|
||||
pthread_setname_np(t, name_buf);
|
||||
#endif
|
||||
|
@ -7,11 +7,10 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <sys/types.h>
|
||||
#ifndef OS_WIN
|
||||
#include <sys/ioctl.h>
|
||||
#endif
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <unordered_set>
|
||||
|
@ -40,13 +40,9 @@ struct CuckooStep {
|
||||
CuckooStep() : bucket_id_(-1), prev_step_id_(kNullStep), depth_(1) {}
|
||||
|
||||
// MSVC does not support = default yet
|
||||
CuckooStep(CuckooStep&& o)
|
||||
{
|
||||
*this = std::move(o);
|
||||
}
|
||||
CuckooStep(CuckooStep&& o) { *this = std::move(o); }
|
||||
|
||||
CuckooStep& operator=(CuckooStep&& rhs)
|
||||
{
|
||||
CuckooStep& operator=(CuckooStep&& rhs) {
|
||||
bucket_id_ = std::move(rhs.bucket_id_);
|
||||
prev_step_id_ = std::move(rhs.prev_step_id_);
|
||||
depth_ = std::move(rhs.depth_);
|
||||
@ -411,8 +407,8 @@ bool HashCuckooRep::QuickInsert(const char* internal_key, const Slice& user_key,
|
||||
}
|
||||
|
||||
if (cuckoo_bucket_id != -1) {
|
||||
cuckoo_array_[cuckoo_bucket_id]
|
||||
.store(const_cast<char*>(internal_key), std::memory_order_release);
|
||||
cuckoo_array_[cuckoo_bucket_id].store(const_cast<char*>(internal_key),
|
||||
std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -594,8 +594,8 @@ void HashLinkListRep::Insert(KeyHandle handle) {
|
||||
if (bucket_entries_logging_threshold_ > 0 &&
|
||||
header->GetNumEntries() ==
|
||||
static_cast<uint32_t>(bucket_entries_logging_threshold_)) {
|
||||
Info(logger_,
|
||||
"HashLinkedList bucket %" ROCKSDB_PRIszt " has more than %d "
|
||||
Info(logger_, "HashLinkedList bucket %" ROCKSDB_PRIszt
|
||||
" has more than %d "
|
||||
"entries. Key to insert: %s",
|
||||
GetHash(transformed), header->GetNumEntries(),
|
||||
GetLengthPrefixedSlice(x->key).ToString(true).c_str());
|
||||
|
@ -54,9 +54,7 @@ class HistogramBucketMapper {
|
||||
|
||||
class HistogramImpl {
|
||||
public:
|
||||
HistogramImpl() {
|
||||
memset(buckets_, 0, sizeof(buckets_));
|
||||
}
|
||||
HistogramImpl() { memset(buckets_, 0, sizeof(buckets_)); }
|
||||
virtual void Clear();
|
||||
virtual bool Empty();
|
||||
virtual void Add(uint64_t value);
|
||||
|
@ -590,7 +590,8 @@ void ManifestDumpCommand::DoCommand() {
|
||||
// containing the db for files of the form MANIFEST_[0-9]+
|
||||
|
||||
auto CloseDir = [](DIR* p) { closedir(p); };
|
||||
std::unique_ptr<DIR, decltype(CloseDir)> d(opendir(db_path_.c_str()), CloseDir);
|
||||
std::unique_ptr<DIR, decltype(CloseDir)> d(opendir(db_path_.c_str()),
|
||||
CloseDir);
|
||||
|
||||
if (d == nullptr) {
|
||||
exec_state_ =
|
||||
|
@ -357,12 +357,8 @@ private:
|
||||
* Otherwise an exception is thrown.
|
||||
*/
|
||||
bool StringToBool(string val) {
|
||||
|
||||
std::transform(val.begin(), val.end(), val.begin(),
|
||||
[](char ch) -> char
|
||||
{
|
||||
return ::tolower(ch);
|
||||
});
|
||||
[](char ch) -> char { return ::tolower(ch); });
|
||||
|
||||
if (val == "true") {
|
||||
return true;
|
||||
|
@ -63,10 +63,12 @@ uint64_t MutableCFOptions::ExpandedCompactionByteSizeLimit(int level) const {
|
||||
|
||||
void MutableCFOptions::Dump(Logger* log) const {
|
||||
// Memtable related options
|
||||
Log(log, " write_buffer_size: %" ROCKSDB_PRIszt, write_buffer_size);
|
||||
Log(log, " write_buffer_size: %" ROCKSDB_PRIszt,
|
||||
write_buffer_size);
|
||||
Log(log, " max_write_buffer_number: %d",
|
||||
max_write_buffer_number);
|
||||
Log(log, " arena_block_size: %" ROCKSDB_PRIszt, arena_block_size);
|
||||
Log(log, " arena_block_size: %" ROCKSDB_PRIszt,
|
||||
arena_block_size);
|
||||
Log(log, " memtable_prefix_bloom_bits: %" PRIu32,
|
||||
memtable_prefix_bloom_bits);
|
||||
Log(log, " memtable_prefix_bloom_probes: %" PRIu32,
|
||||
|
@ -307,11 +307,14 @@ void DBOptions::Dump(Logger* log) const {
|
||||
Warn(log, " Options.max_total_wal_size: %" PRIu64, max_total_wal_size);
|
||||
Warn(log, " Options.disableDataSync: %d", disableDataSync);
|
||||
Warn(log, " Options.use_fsync: %d", use_fsync);
|
||||
Warn(log, " Options.max_log_file_size: %" ROCKSDB_PRIszt, max_log_file_size);
|
||||
Warn(log, "Options.max_manifest_file_size: %lu",
|
||||
(unsigned long)max_manifest_file_size);
|
||||
Warn(log, " Options.log_file_time_to_roll: %" ROCKSDB_PRIszt, log_file_time_to_roll);
|
||||
Warn(log, " Options.keep_log_file_num: %" ROCKSDB_PRIszt, keep_log_file_num);
|
||||
Warn(log, " Options.max_log_file_size: %" ROCKSDB_PRIszt,
|
||||
max_log_file_size);
|
||||
Warn(log, "Options.max_manifest_file_size: %" PRIu64,
|
||||
max_manifest_file_size);
|
||||
Warn(log, " Options.log_file_time_to_roll: %" ROCKSDB_PRIszt,
|
||||
log_file_time_to_roll);
|
||||
Warn(log, " Options.keep_log_file_num: %" ROCKSDB_PRIszt,
|
||||
keep_log_file_num);
|
||||
Warn(log, " Options.allow_os_buffer: %d", allow_os_buffer);
|
||||
Warn(log, " Options.allow_mmap_reads: %d", allow_mmap_reads);
|
||||
Warn(log, " Options.allow_mmap_writes: %d", allow_mmap_writes);
|
||||
@ -333,7 +336,8 @@ void DBOptions::Dump(Logger* log) const {
|
||||
WAL_ttl_seconds);
|
||||
Warn(log, " Options.WAL_size_limit_MB: %" PRIu64,
|
||||
WAL_size_limit_MB);
|
||||
Warn(log, " Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
|
||||
Warn(log,
|
||||
" Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
|
||||
manifest_preallocation_size);
|
||||
Warn(log, " Options.allow_os_buffer: %d",
|
||||
allow_os_buffer);
|
||||
@ -347,7 +351,9 @@ void DBOptions::Dump(Logger* log) const {
|
||||
stats_dump_period_sec);
|
||||
Warn(log, " Options.advise_random_on_open: %d",
|
||||
advise_random_on_open);
|
||||
Warn(log, " Options.db_write_buffer_size: %" ROCKSDB_PRIszt "d",
|
||||
Warn(log,
|
||||
" Options.db_write_buffer_size: %" ROCKSDB_PRIszt
|
||||
"d",
|
||||
db_write_buffer_size);
|
||||
Warn(log, " Options.access_hint_on_compaction_start: %s",
|
||||
access_hints[access_hint_on_compaction_start]);
|
||||
@ -384,7 +390,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
|
||||
Warn(log, " Options.table_factory: %s", table_factory->Name());
|
||||
Warn(log, " table_factory options: %s",
|
||||
table_factory->GetPrintableTableOptions().c_str());
|
||||
Warn(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt, write_buffer_size);
|
||||
Warn(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt,
|
||||
write_buffer_size);
|
||||
Warn(log, " Options.max_write_buffer_number: %d", max_write_buffer_number);
|
||||
if (!compression_per_level.empty()) {
|
||||
for (unsigned int i = 0; i < compression_per_level.size(); i++) {
|
||||
@ -430,8 +437,9 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
|
||||
max_bytes_for_level_multiplier);
|
||||
for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size();
|
||||
i++) {
|
||||
Warn(log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt "]: %d", i,
|
||||
max_bytes_for_level_multiplier_additional[i]);
|
||||
Warn(log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt
|
||||
"]: %d",
|
||||
i, max_bytes_for_level_multiplier_additional[i]);
|
||||
}
|
||||
Warn(log, " Options.max_sequential_skip_in_iterations: %" PRIu64,
|
||||
max_sequential_skip_in_iterations);
|
||||
@ -442,7 +450,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
|
||||
Warn(log, " Options.max_grandparent_overlap_factor: %d",
|
||||
max_grandparent_overlap_factor);
|
||||
|
||||
Warn(log, " Options.arena_block_size: %" ROCKSDB_PRIszt,
|
||||
Warn(log,
|
||||
" Options.arena_block_size: %" ROCKSDB_PRIszt,
|
||||
arena_block_size);
|
||||
Warn(log, " Options.soft_rate_limit: %.2f",
|
||||
soft_rate_limit);
|
||||
@ -483,7 +492,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
|
||||
collector_names.c_str());
|
||||
Warn(log, " Options.inplace_update_support: %d",
|
||||
inplace_update_support);
|
||||
Warn(log, " Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
|
||||
Warn(log,
|
||||
" Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
|
||||
inplace_update_num_locks);
|
||||
Warn(log, " Options.min_partial_merge_operands: %u",
|
||||
min_partial_merge_operands);
|
||||
@ -493,12 +503,14 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
|
||||
Warn(log, " Options.memtable_prefix_bloom_probes: %d",
|
||||
memtable_prefix_bloom_probes);
|
||||
|
||||
Warn(log, " Options.memtable_prefix_bloom_huge_page_tlb_size: %" ROCKSDB_PRIszt,
|
||||
Warn(log,
|
||||
" Options.memtable_prefix_bloom_huge_page_tlb_size: %" ROCKSDB_PRIszt,
|
||||
memtable_prefix_bloom_huge_page_tlb_size);
|
||||
Warn(log, " Options.bloom_locality: %d",
|
||||
bloom_locality);
|
||||
|
||||
Warn(log, " Options.max_successive_merges: %" ROCKSDB_PRIszt,
|
||||
Warn(log,
|
||||
" Options.max_successive_merges: %" ROCKSDB_PRIszt,
|
||||
max_successive_merges);
|
||||
Warn(log, " Options.optimize_fllters_for_hits: %d",
|
||||
optimize_filters_for_hits);
|
||||
|
@ -277,8 +277,7 @@ Status GetMutableOptionsFromStrings(
|
||||
namespace {
|
||||
|
||||
std::string trim(const std::string& str) {
|
||||
if (str.empty())
|
||||
return std::string();
|
||||
if (str.empty()) return std::string();
|
||||
size_t start = 0;
|
||||
size_t end = str.size() - 1;
|
||||
while (isspace(str[start]) != 0 && start <= end) {
|
||||
@ -564,8 +563,7 @@ bool ParseDBOption(const std::string& name, const std::string& value,
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
} catch (const std::exception& e) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -680,7 +678,8 @@ Status GetPlainTableOptionsFromMap(
|
||||
} else if (o.first == "full_scan_mode") {
|
||||
new_table_options->full_scan_mode = ParseBoolean(o.first, o.second);
|
||||
} else if (o.first == "store_index_in_file") {
|
||||
new_table_options->store_index_in_file = ParseBoolean(o.first, o.second);
|
||||
new_table_options->store_index_in_file =
|
||||
ParseBoolean(o.first, o.second);
|
||||
} else {
|
||||
return Status::InvalidArgument("Unrecognized option: " + o.first);
|
||||
}
|
||||
|
@ -51,8 +51,8 @@ Options PrintAndGetOptions(size_t total_write_buffer_limit,
|
||||
StderrLogger logger;
|
||||
|
||||
if (FLAGS_enable_print) {
|
||||
printf(
|
||||
"---- total_write_buffer_limit: %" ROCKSDB_PRIszt " "
|
||||
printf("---- total_write_buffer_limit: %" ROCKSDB_PRIszt
|
||||
" "
|
||||
"read_amplification_threshold: %d write_amplification_threshold: %d "
|
||||
"target_db_size %" PRIu64 " ----\n",
|
||||
total_write_buffer_limit, read_amplification_threshold,
|
||||
|
@ -115,7 +115,6 @@ std::string Slice::ToString(bool hex) const {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const SliceTransform* NewFixedPrefixTransform(size_t prefix_len) {
|
||||
return new FixedPrefixTransform(prefix_len);
|
||||
}
|
||||
|
@ -25,15 +25,19 @@ __thread ThreadLocalPtr::ThreadData* ThreadLocalPtr::StaticMeta::tls_ = nullptr;
|
||||
// See http://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way
|
||||
// and http://www.nynaeve.net/?p=183
|
||||
//
|
||||
// really we do this to have clear conscience since using TLS with thread-pools is iffy
|
||||
// although OK within a request. But otherwise, threads have no identity in its modern use.
|
||||
// really we do this to have clear conscience since using TLS with thread-pools
|
||||
// is iffy
|
||||
// although OK within a request. But otherwise, threads have no identity in its
|
||||
// modern use.
|
||||
|
||||
// This runs on windows only called from the System Loader
|
||||
#ifdef OS_WIN
|
||||
|
||||
// Windows cleanup routine is invoked from a System Loader with a different
|
||||
// signature so we can not directly hookup the original OnThreadExit which is private member
|
||||
// so we make StaticMeta class share with the us the address of the function so we can invoke it.
|
||||
// signature so we can not directly hookup the original OnThreadExit which is
|
||||
// private member
|
||||
// so we make StaticMeta class share with the us the address of the function so
|
||||
// we can invoke it.
|
||||
namespace wintlscleanup {
|
||||
|
||||
// This is set to OnThreadExit in StaticMeta singleton constructor
|
||||
@ -81,7 +85,8 @@ extern "C" {
|
||||
// When defining a const variable, it must have external linkage to be sure the
|
||||
// linker doesn't discard it.
|
||||
extern const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit;
|
||||
const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit = wintlscleanup::WinOnThreadExit;
|
||||
const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit =
|
||||
wintlscleanup::WinOnThreadExit;
|
||||
// Reset the default section.
|
||||
#pragma const_seg()
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include "util/autovector.h"
|
||||
#include "port/port.h"
|
||||
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
// Cleanup function that will be called for a stored thread local
|
||||
|
@ -333,9 +333,7 @@ class BackupEngineImpl : public BackupEngine {
|
||||
CopyWorkItem(const CopyWorkItem&) = delete;
|
||||
CopyWorkItem& operator=(const CopyWorkItem&) = delete;
|
||||
|
||||
CopyWorkItem(CopyWorkItem&& o) {
|
||||
*this = std::move(o);
|
||||
}
|
||||
CopyWorkItem(CopyWorkItem&& o) { *this = std::move(o); }
|
||||
|
||||
CopyWorkItem& operator=(CopyWorkItem&& o) {
|
||||
src_path = std::move(o.src_path);
|
||||
@ -390,12 +388,9 @@ class BackupEngineImpl : public BackupEngine {
|
||||
return *this;
|
||||
}
|
||||
|
||||
BackupAfterCopyWorkItem(std::future<CopyResult>&& _result,
|
||||
bool _shared,
|
||||
bool _needed_to_copy,
|
||||
Env* _backup_env,
|
||||
std::string _dst_path_tmp,
|
||||
std::string _dst_path,
|
||||
BackupAfterCopyWorkItem(std::future<CopyResult>&& _result, bool _shared,
|
||||
bool _needed_to_copy, Env* _backup_env,
|
||||
std::string _dst_path_tmp, std::string _dst_path,
|
||||
std::string _dst_relative)
|
||||
: result(std::move(_result)),
|
||||
shared(_shared),
|
||||
@ -412,8 +407,7 @@ class BackupEngineImpl : public BackupEngine {
|
||||
RestoreAfterCopyWorkItem() {}
|
||||
RestoreAfterCopyWorkItem(std::future<CopyResult>&& _result,
|
||||
uint32_t _checksum_value)
|
||||
: result(std::move(_result)),
|
||||
checksum_value(_checksum_value) {}
|
||||
: result(std::move(_result)), checksum_value(_checksum_value) {}
|
||||
RestoreAfterCopyWorkItem(RestoreAfterCopyWorkItem&& o) {
|
||||
*this = std::move(o);
|
||||
}
|
||||
@ -1561,7 +1555,8 @@ Status BackupEngineImpl::BackupMeta::StoreToFile(bool sync) {
|
||||
len += snprintf(buf.get(), buf_size, "%" PRId64 "\n", timestamp_);
|
||||
len += snprintf(buf.get() + len, buf_size - len, "%" PRIu64 "\n",
|
||||
sequence_number_);
|
||||
len += snprintf(buf.get() + len, buf_size - len, "%" ROCKSDB_PRIszt "\n", files_.size());
|
||||
len += snprintf(buf.get() + len, buf_size - len, "%" ROCKSDB_PRIszt "\n",
|
||||
files_.size());
|
||||
for (const auto& file : files_) {
|
||||
// use crc32 for now, switch to something else if needed
|
||||
len += snprintf(buf.get() + len, buf_size - len, "%s crc32 %u\n",
|
||||
|
@ -42,7 +42,6 @@ const double GeoDBImpl::MaxLatitude = 85.05112878;
|
||||
const double GeoDBImpl::MinLongitude = -180;
|
||||
const double GeoDBImpl::MaxLongitude = 180;
|
||||
|
||||
|
||||
GeoDBImpl::GeoDBImpl(DB* db, const GeoDBOptions& options) :
|
||||
GeoDB(db, options), db_(db), options_(options) {
|
||||
}
|
||||
|
@ -51,7 +51,8 @@ class UInt64AddOperator : public AssociativeMergeOperator {
|
||||
} else if (logger != nullptr) {
|
||||
// If value is corrupted, treat it as 0
|
||||
Log(InfoLogLevel::ERROR_LEVEL, logger,
|
||||
"uint64 value corruption, size: %" ROCKSDB_PRIszt " > %" ROCKSDB_PRIszt,
|
||||
"uint64 value corruption, size: %" ROCKSDB_PRIszt
|
||||
" > %" ROCKSDB_PRIszt,
|
||||
value.size(), sizeof(uint64_t));
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,6 @@ void Variant::Init(const Variant& v, Data& d) {
|
||||
}
|
||||
|
||||
Variant& Variant::operator=(const Variant& v) {
|
||||
|
||||
// Construct first a temp so exception from a string ctor
|
||||
// does not change this object
|
||||
Data tmp;
|
||||
@ -104,7 +103,6 @@ Variant& Variant::operator=(const Variant& v) {
|
||||
}
|
||||
|
||||
Variant& Variant::operator=(Variant&& rhs) {
|
||||
|
||||
Destroy(type_, data_);
|
||||
if (rhs.type_ == kString) {
|
||||
new (data_.s) std::string(std::move(*GetStringPtr(rhs.data_)));
|
||||
@ -116,9 +114,7 @@ Variant& Variant::operator=(Variant&& rhs) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
bool Variant::operator==(const Variant& rhs) const {
|
||||
|
||||
if (type_ != rhs.type_) {
|
||||
return false;
|
||||
}
|
||||
|
@ -18,10 +18,7 @@ namespace {
|
||||
|
||||
typedef std::map<std::string, std::string> KVMap;
|
||||
|
||||
enum BatchOperation {
|
||||
OP_PUT = 0,
|
||||
OP_DELETE = 1
|
||||
};
|
||||
enum BatchOperation { OP_PUT = 0, OP_DELETE = 1 };
|
||||
}
|
||||
|
||||
class SpecialTimeEnv : public EnvWrapper {
|
||||
|
Loading…
Reference in New Issue
Block a user