Fix CLANG errors introduced by 7d87f02799
Summary: Fix some CLANG errors introduced in 7d87f02799
Test Plan: Build with both of CLANG and gcc
Reviewers: rven, yhchiang, kradhakrishnan, anthony, IslamAbdelRahman, ngbronson
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D52329
This commit is contained in:
parent
7fafd52dce
commit
11672df19a
@ -229,7 +229,7 @@ class ColumnFamilyData {
|
|||||||
MemTable* mem() { return mem_; }
|
MemTable* mem() { return mem_; }
|
||||||
Version* current() { return current_; }
|
Version* current() { return current_; }
|
||||||
Version* dummy_versions() { return dummy_versions_; }
|
Version* dummy_versions() { return dummy_versions_; }
|
||||||
void SetCurrent(Version* current);
|
void SetCurrent(Version* _current);
|
||||||
uint64_t GetNumLiveVersions() const; // REQUIRE: DB mutex held
|
uint64_t GetNumLiveVersions() const; // REQUIRE: DB mutex held
|
||||||
uint64_t GetTotalSstFilesSize() const; // REQUIRE: DB mutex held
|
uint64_t GetTotalSstFilesSize() const; // REQUIRE: DB mutex held
|
||||||
void SetMemtable(MemTable* new_mem) { mem_ = new_mem; }
|
void SetMemtable(MemTable* new_mem) { mem_ = new_mem; }
|
||||||
@ -531,7 +531,7 @@ class ColumnFamilyMemTablesImpl : public ColumnFamilyMemTables {
|
|||||||
// Cannot be called while another thread is calling Seek().
|
// Cannot be called while another thread is calling Seek().
|
||||||
// REQUIRES: use this function of DBImpl::column_family_memtables_ should be
|
// REQUIRES: use this function of DBImpl::column_family_memtables_ should be
|
||||||
// under a DB mutex OR from a write thread
|
// under a DB mutex OR from a write thread
|
||||||
virtual ColumnFamilyData* current() { return current_; }
|
virtual ColumnFamilyData* current() override { return current_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ColumnFamilySet* column_family_set_;
|
ColumnFamilySet* column_family_set_;
|
||||||
|
@ -4387,7 +4387,8 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
|
|||||||
pg.leader = &w;
|
pg.leader = &w;
|
||||||
pg.last_writer = last_writer;
|
pg.last_writer = last_writer;
|
||||||
pg.early_exit_allowed = !need_log_sync;
|
pg.early_exit_allowed = !need_log_sync;
|
||||||
pg.running.store(write_batch_group.size(), std::memory_order_relaxed);
|
pg.running.store(static_cast<uint32_t>(write_batch_group.size()),
|
||||||
|
std::memory_order_relaxed);
|
||||||
write_thread_.LaunchParallelFollowers(&pg, current_sequence);
|
write_thread_.LaunchParallelFollowers(&pg, current_sequence);
|
||||||
|
|
||||||
ColumnFamilyMemTablesImpl column_family_memtables(
|
ColumnFamilyMemTablesImpl column_family_memtables(
|
||||||
|
@ -420,7 +420,6 @@ void WriteThread::ExitAsBatchGroupLeader(Writer* leader, Writer* last_writer,
|
|||||||
|
|
||||||
void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) {
|
void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) {
|
||||||
static AdaptationContext ctx{"EnterUnbatched"};
|
static AdaptationContext ctx{"EnterUnbatched"};
|
||||||
static std::atomic<uint32_t> adaptation_history{};
|
|
||||||
|
|
||||||
assert(w->batch == nullptr);
|
assert(w->batch == nullptr);
|
||||||
bool linked_as_leader;
|
bool linked_as_leader;
|
||||||
|
@ -36,12 +36,13 @@ ConcurrentArena::Shard* ConcurrentArena::Repick() {
|
|||||||
int cpuid = port::PhysicalCoreID();
|
int cpuid = port::PhysicalCoreID();
|
||||||
if (UNLIKELY(cpuid < 0)) {
|
if (UNLIKELY(cpuid < 0)) {
|
||||||
// cpu id unavailable, just pick randomly
|
// cpu id unavailable, just pick randomly
|
||||||
cpuid = Random::GetTLSInstance()->Uniform(index_mask_ + 1);
|
cpuid =
|
||||||
|
Random::GetTLSInstance()->Uniform(static_cast<int>(index_mask_) + 1);
|
||||||
}
|
}
|
||||||
#if ROCKSDB_SUPPORT_THREAD_LOCAL
|
#if ROCKSDB_SUPPORT_THREAD_LOCAL
|
||||||
// even if we are cpu 0, use a non-zero tls_cpuid so we can tell we
|
// even if we are cpu 0, use a non-zero tls_cpuid so we can tell we
|
||||||
// have repicked
|
// have repicked
|
||||||
tls_cpuid = cpuid | (index_mask_ + 1);
|
tls_cpuid = cpuid | (static_cast<int>(index_mask_) + 1);
|
||||||
#endif
|
#endif
|
||||||
return &shards_[cpuid & index_mask_];
|
return &shards_[cpuid & index_mask_];
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ class ConcurrentArena : public Allocator {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
struct Shard {
|
struct Shard {
|
||||||
char padding[40];
|
char padding[40] __attribute__((__unused__));
|
||||||
mutable SpinMutex mutex;
|
mutable SpinMutex mutex;
|
||||||
char* free_begin_;
|
char* free_begin_;
|
||||||
std::atomic<size_t> allocated_and_unused_;
|
std::atomic<size_t> allocated_and_unused_;
|
||||||
@ -92,7 +92,7 @@ class ConcurrentArena : public Allocator {
|
|||||||
enum ZeroFirstEnum : uint32_t { tls_cpuid = 0 };
|
enum ZeroFirstEnum : uint32_t { tls_cpuid = 0 };
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
char padding0[56];
|
char padding0[56] __attribute__((__unused__));
|
||||||
|
|
||||||
size_t shard_block_size_;
|
size_t shard_block_size_;
|
||||||
|
|
||||||
@ -106,7 +106,7 @@ class ConcurrentArena : public Allocator {
|
|||||||
std::atomic<size_t> memory_allocated_bytes_;
|
std::atomic<size_t> memory_allocated_bytes_;
|
||||||
std::atomic<size_t> irregular_block_num_;
|
std::atomic<size_t> irregular_block_num_;
|
||||||
|
|
||||||
char padding1[56];
|
char padding1[56] __attribute__((__unused__));
|
||||||
|
|
||||||
Shard* Repick();
|
Shard* Repick();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user