Fix unused var warnings in Release mode

Summary:
MSVC does not support unused attribute at this time. A separate assignment line fixes the issue probably by being counted as usage for MSVC and it no longer complains about unused var.
Closes https://github.com/facebook/rocksdb/pull/3048

Differential Revision: D6126272

Pulled By: maysamyabandeh

fbshipit-source-id: 4907865db45fd75a39a15725c0695aaa17509c1f
This commit is contained in:
Dmitri Smirnov 2017-10-23 14:20:53 -07:00 committed by Facebook Github Bot
parent 63822eb761
commit d2a65c59e1
12 changed files with 31 additions and 19 deletions

View File

@ -463,7 +463,8 @@ ColumnFamilyData::~ColumnFamilyData() {
if (dummy_versions_ != nullptr) {
// List must be empty
assert(dummy_versions_->TEST_Next() == dummy_versions_);
bool deleted __attribute__((unused)) = dummy_versions_->Unref();
bool deleted __attribute__((unused));
deleted = dummy_versions_->Unref();
assert(deleted);
}

View File

@ -137,8 +137,8 @@ void CompactionIterator::Next() {
if (merge_out_iter_.Valid()) {
key_ = merge_out_iter_.key();
value_ = merge_out_iter_.value();
bool valid_key __attribute__((__unused__)) =
ParseInternalKey(key_, &ikey_);
bool valid_key __attribute__((__unused__));
valid_key = ParseInternalKey(key_, &ikey_);
// MergeUntil stops when it encounters a corrupt key and does not
// include them in the result, so we expect the keys here to be valid.
assert(valid_key);
@ -334,8 +334,8 @@ void CompactionIterator::NextFromInput() {
// If there are no snapshots, then this kv affect visibility at tip.
// Otherwise, search though all existing snapshots to find the earliest
// snapshot that is affected by this kv.
SequenceNumber last_sequence __attribute__((__unused__)) =
current_user_key_sequence_;
SequenceNumber last_sequence __attribute__((__unused__));
last_sequence = current_user_key_sequence_;
current_user_key_sequence_ = ikey_.sequence;
SequenceNumber last_snapshot = current_user_key_snapshot_;
SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot
@ -538,8 +538,8 @@ void CompactionIterator::NextFromInput() {
// These will be correctly set below.
key_ = merge_out_iter_.key();
value_ = merge_out_iter_.value();
bool valid_key __attribute__((__unused__)) =
ParseInternalKey(key_, &ikey_);
bool valid_key __attribute__((__unused__));
valid_key = ParseInternalKey(key_, &ikey_);
// MergeUntil stops when it encounters a corrupt key and does not
// include them in the result, so we expect the keys here to valid.
assert(valid_key);

View File

@ -1678,10 +1678,10 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
env_->Schedule(&DBImpl::BGWorkBottomCompaction, ca, Env::Priority::BOTTOM,
this, &DBImpl::UnscheduleCallback);
} else {
int output_level __attribute__((unused)) = c->output_level();
int output_level __attribute__((unused));
output_level = c->output_level();
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:NonTrivial",
&output_level);
SequenceNumber earliest_write_conflict_snapshot;
std::vector<SequenceNumber> snapshot_seqs =
snapshots_.GetAll(&earliest_write_conflict_snapshot);

View File

@ -1912,7 +1912,8 @@ void VersionStorageInfo::ExtendFileRangeOverlappingInterval(
#endif
*start_index = mid_index + 1;
*end_index = mid_index;
int count __attribute__((unused)) = 0;
int count __attribute__((unused));
count = 0;
// check backwards from 'mid' to lower indices
for (int i = mid_index; i >= 0 ; i--) {

3
env/mock_env.cc vendored
View File

@ -379,7 +379,8 @@ class TestMemLogger : public Logger {
const time_t seconds = now_tv.tv_sec;
struct tm t;
memset(&t, 0, sizeof(t));
auto ret __attribute__((__unused__)) = localtime_r(&seconds, &t);
struct tm* ret __attribute__((__unused__));
ret = localtime_r(&seconds, &t);
assert(ret);
p += snprintf(p, limit - p,
"%04d/%02d/%02d-%02d:%02d:%02d.%06d ",

View File

@ -252,7 +252,8 @@ void ThreadStatusUpdater::EraseColumnFamilyInfo(const void* cf_key) {
ConstantColumnFamilyInfo& cf_info = cf_pair->second;
auto db_pair = db_key_map_.find(cf_info.db_key);
assert(db_pair != db_key_map_.end());
size_t result __attribute__((unused)) = db_pair->second.erase(cf_key);
size_t result __attribute__((unused));
result = db_pair->second.erase(cf_key);
assert(result);
cf_info_map_.erase(cf_pair);
}

View File

@ -74,7 +74,9 @@ WinEnvIO::WinEnvIO(Env* hosted_env)
{
LARGE_INTEGER qpf;
BOOL ret = QueryPerformanceFrequency(&qpf);
// No init as the compiler complains about unused var
BOOL ret;
ret = QueryPerformanceFrequency(&qpf);
assert(ret == TRUE);
perf_counter_frequency_ = qpf.QuadPart;
}

View File

@ -279,7 +279,8 @@ Status WinMmapFile::MapNewRegion() {
if (hMap_ != NULL) {
// Unmap the previous one
BOOL ret = ::CloseHandle(hMap_);
BOOL ret;
ret = ::CloseHandle(hMap_);
assert(ret);
hMap_ = NULL;
}
@ -1020,7 +1021,8 @@ Status WinDirectory::Fsync() { return Status::OK(); }
/// WinFileLock
WinFileLock::~WinFileLock() {
BOOL ret = ::CloseHandle(hFile_);
BOOL ret;
ret = ::CloseHandle(hFile_);
assert(ret);
}

View File

@ -1312,7 +1312,8 @@ Status BlobDBImpl::CloseBlobFile(std::shared_ptr<BlobFile> bfile) {
WriteLock wl(&mutex_);
if (bfile->HasTTL()) {
size_t erased __attribute__((__unused__)) = open_blob_files_.erase(bfile);
size_t erased __attribute__((__unused__));
erased = open_blob_files_.erase(bfile);
assert(erased == 1);
} else {
auto iter = std::find(open_simple_files_.begin(),

View File

@ -48,7 +48,8 @@ void InitJSONDocument(std::unique_ptr<char[]>* data,
fbson::FbsonWriter writer;
bool res __attribute__((unused)) = writer.writeStartArray();
assert(res);
uint32_t bytesWritten __attribute__((unused)) = f(writer);
uint32_t bytesWritten __attribute__((unused));
bytesWritten = f(writer);
assert(bytesWritten != 0);
res = writer.writeEndArray();
assert(res);

View File

@ -63,7 +63,8 @@ bool BlockCacheTierMetadata::Lookup(const Slice& key, LBA* lba) {
BlockInfo* BlockCacheTierMetadata::Remove(const Slice& key) {
BlockInfo lookup_key(key);
BlockInfo* binfo = nullptr;
bool ok __attribute__((__unused__)) = block_index_.Erase(&lookup_key, &binfo);
bool ok __attribute__((__unused__));
ok = block_index_.Erase(&lookup_key, &binfo);
assert(ok);
return binfo;
}

View File

@ -480,7 +480,8 @@ void WriteBatchWithIndex::Rep::AddNewEntry(uint32_t column_family_id) {
wb_data.size() - last_entry_offset);
// Extract key
Slice key;
bool success __attribute__((__unused__)) =
bool success __attribute__((__unused__));
success =
ReadKeyFromWriteBatchEntry(&entry_ptr, &key, column_family_id != 0);
assert(success);