Enable C4267 warning

* conversion from 'size_t' to 'type', by add static_cast

Tested:
* by build solution on Windows, Linux locally,
* run tests
* build CI system successful
This commit is contained in:
Vasili Svirski 2015-11-15 21:49:14 +03:00 committed by Vasily Svirsky
parent 890f44f46e
commit 41b32c6059
13 changed files with 31 additions and 21 deletions

View File

@ -59,7 +59,7 @@ add_custom_target(GenerateBuildVersion DEPENDS ${BUILD_VERSION_CC})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4244 /wd4267 /wd4800 /wd4996")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4244 /wd4800 /wd4996")
# Used to run CI build and tests so we can run faster
set(OPTIMIZE_DEBUG_DEFAULT 0) # Debug build is unoptimized by default use -DOPTDBG=1 to optimize

View File

@ -128,8 +128,8 @@ bool Compaction::TEST_IsBottommostLevel(
bool Compaction::IsFullCompaction(
VersionStorageInfo* vstorage,
const std::vector<CompactionInputFiles>& inputs) {
int num_files_in_compaction = 0;
int total_num_files = 0;
size_t num_files_in_compaction = 0;
size_t total_num_files = 0;
for (int l = 0; l < vstorage->num_levels(); l++) {
total_num_files += vstorage->NumLevelFiles(l);
}

View File

@ -160,7 +160,7 @@ const SstFileMetaData* PickFileRandomly(
auto result = rand->Uniform(file_id);
return &(level_meta.files[result]);
}
file_id -= level_meta.files.size();
file_id -= static_cast<uint32_t>(level_meta.files.size());
}
assert(false);
return nullptr;

View File

@ -2171,7 +2171,7 @@ TEST_F(DBTest, RecoverWithTableHandle) {
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_GetFilesMetaData(handles_[1], &files);
int total_files = 0;
size_t total_files = 0;
for (const auto& level : files) {
total_files += level.size();
}

View File

@ -661,14 +661,14 @@ uint64_t DBTestBase::SizeAtLevel(int level) {
return sum;
}
int DBTestBase::TotalLiveFiles(int cf) {
size_t DBTestBase::TotalLiveFiles(int cf) {
ColumnFamilyMetaData cf_meta;
if (cf == 0) {
db_->GetColumnFamilyMetaData(&cf_meta);
} else {
db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta);
}
int num_files = 0;
size_t num_files = 0;
for (auto& level : cf_meta.levels) {
num_files += level.files.size();
}

View File

@ -583,7 +583,7 @@ class DBTestBase : public testing::Test {
uint64_t SizeAtLevel(int level);
int TotalLiveFiles(int cf = 0);
size_t TotalLiveFiles(int cf = 0);
size_t CountLiveFiles();
#endif // ROCKSDB_LITE

View File

@ -47,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) {
Status s;
bool begin = true;
do {
const int leftover = kBlockSize - block_offset_;
const int64_t leftover = kBlockSize - block_offset_;
assert(leftover >= 0);
if (leftover < header_size) {
// Switch to a new block
@ -62,7 +62,7 @@ Status Writer::AddRecord(const Slice& slice) {
}
// Invariant: we never leave < header_size bytes in a block.
assert(static_cast<int>(kBlockSize) - block_offset_ >= header_size);
assert(static_cast<int64_t>(kBlockSize - block_offset_) >= header_size);
const size_t avail = kBlockSize - block_offset_ - header_size;
const size_t fragment_length = (left < avail) ? left : avail;

View File

@ -83,7 +83,7 @@ class Writer {
private:
unique_ptr<WritableFileWriter> dest_;
int block_offset_; // Current offset in block
size_t block_offset_; // Current offset in block
uint64_t log_number_;
bool recycle_log_files_;

View File

@ -101,6 +101,7 @@ typedef std::unique_ptr<void, decltype(CloseHandleFunc)> UniqueCloseHandlePtr;
// rely on the current file offset.
SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes,
uint64_t offset) {
assert(numBytes <= std::numeric_limits<DWORD>::max());
OVERLAPPED overlapped = {0};
ULARGE_INTEGER offsetUnion;
offsetUnion.QuadPart = offset;
@ -112,7 +113,8 @@ SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes,
unsigned long bytesWritten = 0;
if (FALSE == WriteFile(hFile, src, numBytes, &bytesWritten, &overlapped)) {
if (FALSE == WriteFile(hFile, src, static_cast<DWORD>(numBytes), &bytesWritten,
&overlapped)) {
result = -1;
} else {
result = bytesWritten;
@ -123,6 +125,7 @@ SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes,
// See comments for pwrite above
SSIZE_T pread(HANDLE hFile, char* src, size_t numBytes, uint64_t offset) {
assert(numBytes <= std::numeric_limits<DWORD>::max());
OVERLAPPED overlapped = {0};
ULARGE_INTEGER offsetUnion;
offsetUnion.QuadPart = offset;
@ -134,7 +137,8 @@ SSIZE_T pread(HANDLE hFile, char* src, size_t numBytes, uint64_t offset) {
unsigned long bytesRead = 0;
if (FALSE == ReadFile(hFile, src, numBytes, &bytesRead, &overlapped)) {
if (FALSE == ReadFile(hFile, src, static_cast<DWORD>(numBytes), &bytesRead,
&overlapped)) {
return -1;
} else {
result = bytesRead;
@ -948,13 +952,13 @@ class WinWritableFile : public WritableFile {
// Used for buffered access ONLY
assert(use_os_buffer_);
assert(data.size() < std::numeric_limits<int>::max());
assert(data.size() < std::numeric_limits<DWORD>::max());
Status s;
DWORD bytesWritten = 0;
if (!WriteFile(hFile_, data.data(),
data.size(), &bytesWritten, NULL)) {
static_cast<DWORD>(data.size()), &bytesWritten, NULL)) {
auto lastError = GetLastError();
s = IOErrorFromWindowsError(
"Failed to WriteFile: " + filename_,

View File

@ -124,7 +124,8 @@ void WinLogger::Logv(const char* format, va_list ap) {
const size_t write_size = p - base;
DWORD bytesWritten = 0;
BOOL ret = WriteFile(file_, base, write_size, &bytesWritten, NULL);
BOOL ret = WriteFile(file_, base, static_cast<DWORD>(write_size),
&bytesWritten, NULL);
if (ret == FALSE) {
std::string errSz = GetWindowsErrSz(GetLastError());
fprintf(stderr, errSz.c_str());

View File

@ -184,11 +184,11 @@ inline void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
inline void PutLengthPrefixedSliceParts(std::string* dst,
const SliceParts& slice_parts) {
uint32_t total_bytes = 0;
size_t total_bytes = 0;
for (int i = 0; i < slice_parts.num_parts; ++i) {
total_bytes += slice_parts.parts[i].size();
}
PutVarint32(dst, total_bytes);
PutVarint32(dst, static_cast<uint32_t>(total_bytes));
for (int i = 0; i < slice_parts.num_parts; ++i) {
dst->append(slice_parts.parts[i].data(), slice_parts.parts[i].size());
}

View File

@ -127,8 +127,8 @@ void OptimizeForLevel(int read_amplification_threshold,
const int kMaxFileNumCompactionTrigger = 4;
const int kMinLevel0StopTrigger = 3;
int file_num_buffer =
kInitialLevel0TotalSize / options->write_buffer_size + 1;
int file_num_buffer = static_cast<int>(
kInitialLevel0TotalSize / options->write_buffer_size + 1);
if (level0_stop_writes_trigger > file_num_buffer) {
// Have sufficient room for multiple level 0 files

View File

@ -4,6 +4,9 @@
// of patent rights can be found in the PATENTS file in the same directory.
#ifndef ROCKSDB_LITE
#include <assert.h>
#include <limits>
#include <stdint.h>
#include "rocksdb/utilities/json_document.h"
#include "third-party/fbson/FbsonWriter.h"
@ -38,7 +41,9 @@ bool JSONDocumentBuilder::WriteEndObject() {
bool JSONDocumentBuilder::WriteKeyValue(const std::string& key,
const JSONDocument& value) {
size_t bytesWritten = writer_->writeKey(key.c_str(), key.size());
assert(key.size() <= std::numeric_limits<uint8_t>::max());
size_t bytesWritten = writer_->writeKey(key.c_str(),
static_cast<uint8_t>(key.size()));
if (bytesWritten == 0) {
return false;
}