Fix potential overflow of unsigned type in for loop (#6902)
Summary: x.size() -1 or y - 1 can overflow to an extremely large value when x.size() pr y is 0 when they are unsigned type. The end condition of i in the for loop will be extremely large, potentially causes segment fault. Fix them. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6902 Test Plan: pass make asan_check Reviewed By: ajkr Differential Revision: D21843767 Pulled By: zhichao-cao fbshipit-source-id: 5b8b88155ac5a93d86246d832e89905a783bb5a1
This commit is contained in:
parent
556972e964
commit
2adb7e3768
@ -542,7 +542,7 @@ void CompactionJob::GenSubcompactionBoundaries() {
|
||||
// Greedily add ranges to the subcompaction until the sum of the ranges'
|
||||
// sizes becomes >= the expected mean size of a subcompaction
|
||||
sum = 0;
|
||||
for (size_t i = 0; i < ranges.size() - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < ranges.size(); i++) {
|
||||
sum += ranges[i].size;
|
||||
if (subcompactions == 1) {
|
||||
// If there's only one left to schedule then it goes to the end so no
|
||||
|
@ -765,7 +765,7 @@ Compaction* UniversalCompactionBuilder::PickCompactionToReduceSizeAmp() {
|
||||
}
|
||||
|
||||
// Skip files that are already being compacted
|
||||
for (size_t loop = 0; loop < sorted_runs_.size() - 1; loop++) {
|
||||
for (size_t loop = 0; loop + 1 < sorted_runs_.size(); loop++) {
|
||||
sr = &sorted_runs_[loop];
|
||||
if (!sr->being_compacted) {
|
||||
start_index = loop; // Consider this as the first candidate.
|
||||
@ -793,7 +793,7 @@ Compaction* UniversalCompactionBuilder::PickCompactionToReduceSizeAmp() {
|
||||
}
|
||||
|
||||
// keep adding up all the remaining files
|
||||
for (size_t loop = start_index; loop < sorted_runs_.size() - 1; loop++) {
|
||||
for (size_t loop = start_index; loop + 1 < sorted_runs_.size(); loop++) {
|
||||
sr = &sorted_runs_[loop];
|
||||
if (sr->being_compacted) {
|
||||
char file_num_buf[kFormatFileNumberBufSize];
|
||||
|
@ -1900,13 +1900,13 @@ TEST_F(DBBasicTest, GetAllKeyVersions) {
|
||||
ASSERT_EQ(kNumInserts + kNumDeletes + kNumUpdates, key_versions.size());
|
||||
|
||||
// Check non-default column family
|
||||
for (size_t i = 0; i != kNumInserts - 1; ++i) {
|
||||
for (size_t i = 0; i + 1 != kNumInserts; ++i) {
|
||||
ASSERT_OK(Put(1, std::to_string(i), "value"));
|
||||
}
|
||||
for (size_t i = 0; i != kNumUpdates - 1; ++i) {
|
||||
for (size_t i = 0; i + 1 != kNumUpdates; ++i) {
|
||||
ASSERT_OK(Put(1, std::to_string(i), "value1"));
|
||||
}
|
||||
for (size_t i = 0; i != kNumDeletes - 1; ++i) {
|
||||
for (size_t i = 0; i + 1 != kNumDeletes; ++i) {
|
||||
ASSERT_OK(Delete(1, std::to_string(i)));
|
||||
}
|
||||
ASSERT_OK(ROCKSDB_NAMESPACE::GetAllKeyVersions(
|
||||
|
@ -187,7 +187,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
|
||||
Iterator* iter = nullptr;
|
||||
|
||||
// Load blocks into cache.
|
||||
for (size_t i = 0; i < kNumBlocks - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
||||
iter = db_->NewIterator(read_options);
|
||||
iter->Seek(ToString(i));
|
||||
ASSERT_OK(iter->status());
|
||||
@ -209,12 +209,12 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
|
||||
iter = nullptr;
|
||||
|
||||
// Release iterators and access cache again.
|
||||
for (size_t i = 0; i < kNumBlocks - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
||||
iterators[i].reset();
|
||||
CheckCacheCounters(options, 0, 0, 0, 0);
|
||||
}
|
||||
ASSERT_EQ(0, cache->GetPinnedUsage());
|
||||
for (size_t i = 0; i < kNumBlocks - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
||||
iter = db_->NewIterator(read_options);
|
||||
iter->Seek(ToString(i));
|
||||
ASSERT_OK(iter->status());
|
||||
@ -243,7 +243,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
|
||||
Iterator* iter = nullptr;
|
||||
|
||||
// Load blocks into cache.
|
||||
for (size_t i = 0; i < kNumBlocks - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < kNumBlocks; i++) {
|
||||
iter = db_->NewIterator(read_options);
|
||||
iter->Seek(ToString(i));
|
||||
ASSERT_OK(iter->status());
|
||||
|
@ -499,13 +499,13 @@ TEST_P(DBAtomicFlushTest, AtomicFlushTriggeredByMemTableFull) {
|
||||
TEST_SYNC_POINT(
|
||||
"DBAtomicFlushTest::AtomicFlushTriggeredByMemTableFull:BeforeCheck");
|
||||
if (options.atomic_flush) {
|
||||
for (size_t i = 0; i != num_cfs - 1; ++i) {
|
||||
for (size_t i = 0; i + 1 != num_cfs; ++i) {
|
||||
auto cfh = static_cast<ColumnFamilyHandleImpl*>(handles_[i]);
|
||||
ASSERT_EQ(0, cfh->cfd()->imm()->NumNotFlushed());
|
||||
ASSERT_TRUE(cfh->cfd()->mem()->IsEmpty());
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i != num_cfs - 1; ++i) {
|
||||
for (size_t i = 0; i + 1 != num_cfs; ++i) {
|
||||
auto cfh = static_cast<ColumnFamilyHandleImpl*>(handles_[i]);
|
||||
ASSERT_EQ(0, cfh->cfd()->imm()->NumNotFlushed());
|
||||
ASSERT_FALSE(cfh->cfd()->mem()->IsEmpty());
|
||||
|
@ -189,7 +189,7 @@ TEST_F(DBRangeDelTest, MaxCompactionBytesCutsOutputFiles) {
|
||||
std::vector<std::vector<FileMetaData>> files;
|
||||
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files);
|
||||
|
||||
for (size_t i = 0; i < files[1].size() - 1; ++i) {
|
||||
for (size_t i = 0; i + 1 < files[1].size(); ++i) {
|
||||
ASSERT_TRUE(InternalKeyComparator(opts.comparator)
|
||||
.Compare(files[1][i].largest, files[1][i + 1].smallest) <
|
||||
0);
|
||||
|
@ -68,7 +68,7 @@ Status ExternalSstFileIngestionJob::Prepare(
|
||||
info2->smallest_internal_key) < 0;
|
||||
});
|
||||
|
||||
for (size_t i = 0; i < num_files - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < num_files; i++) {
|
||||
if (sstableKeyCompare(ucmp, sorted_files[i]->largest_internal_key,
|
||||
sorted_files[i + 1]->smallest_internal_key) >= 0) {
|
||||
files_overlap_ = true;
|
||||
|
@ -245,7 +245,7 @@ int db_stress_tool(int argc, char** argv) {
|
||||
}
|
||||
} else {
|
||||
uint64_t keys_per_level = key_gen_ctx.window / levels;
|
||||
for (unsigned int level = 0; level < levels - 1; ++level) {
|
||||
for (unsigned int level = 0; level + 1 < levels; ++level) {
|
||||
key_gen_ctx.weights.emplace_back(keys_per_level);
|
||||
}
|
||||
key_gen_ctx.weights.emplace_back(key_gen_ctx.window -
|
||||
|
2
env/env_test.cc
vendored
2
env/env_test.cc
vendored
@ -1242,7 +1242,7 @@ TEST_F(EnvPosixTest, MultiReadNonAlignedLargeNum) {
|
||||
for (int so: start_offsets) {
|
||||
offsets.push_back(so);
|
||||
}
|
||||
for (size_t i = 0; i < offsets.size() - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < offsets.size(); i++) {
|
||||
lens.push_back(static_cast<size_t>(rnd.Uniform(static_cast<int>(offsets[i + 1] - offsets[i])) + 1));
|
||||
}
|
||||
lens.push_back(static_cast<size_t>(rnd.Uniform(static_cast<int>(kTotalSize - offsets.back())) + 1));
|
||||
|
@ -110,7 +110,7 @@ class CuckooBuilderTest : public testing::Test {
|
||||
// Check contents of the bucket.
|
||||
std::vector<bool> keys_found(keys.size(), false);
|
||||
size_t bucket_size = expected_unused_bucket.size();
|
||||
for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) {
|
||||
for (uint32_t i = 0; i + 1 < table_size + cuckoo_block_size; ++i) {
|
||||
Slice read_slice;
|
||||
ASSERT_OK(file_reader->Read(IOOptions(), i * bucket_size, bucket_size,
|
||||
&read_slice, nullptr, nullptr));
|
||||
|
@ -161,7 +161,7 @@ TEST(Coding, Varint32Truncation) {
|
||||
std::string s;
|
||||
PutVarint32(&s, large_value);
|
||||
uint32_t result;
|
||||
for (unsigned int len = 0; len < s.size() - 1; len++) {
|
||||
for (unsigned int len = 0; len + 1 < s.size(); len++) {
|
||||
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
|
||||
}
|
||||
ASSERT_TRUE(
|
||||
@ -181,7 +181,7 @@ TEST(Coding, Varint64Truncation) {
|
||||
std::string s;
|
||||
PutVarint64(&s, large_value);
|
||||
uint64_t result;
|
||||
for (unsigned int len = 0; len < s.size() - 1; len++) {
|
||||
for (unsigned int len = 0; len + 1 < s.size(); len++) {
|
||||
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
|
||||
}
|
||||
ASSERT_TRUE(
|
||||
|
@ -255,7 +255,7 @@ void BlobDumpTool::DumpSlice(const Slice s, DisplayType type) {
|
||||
snprintf(buf + j * 3 + 16, 2, "%x", c & 0xf);
|
||||
snprintf(buf + j + 65, 2, "%c", (0x20 <= c && c <= 0x7e) ? c : '.');
|
||||
}
|
||||
for (size_t p = 0; p < sizeof(buf) - 1; p++) {
|
||||
for (size_t p = 0; p + 1 < sizeof(buf); p++) {
|
||||
if (buf[p] == 0) {
|
||||
buf[p] = ' ';
|
||||
}
|
||||
|
@ -722,7 +722,7 @@ TEST_P(TransactionStressTest, DeadlockCycle) {
|
||||
// We want the last transaction in the chain to block and hold everyone
|
||||
// back.
|
||||
std::vector<port::Thread> threads;
|
||||
for (uint32_t i = 0; i < len - 1; i++) {
|
||||
for (uint32_t i = 0; i + 1 < len; i++) {
|
||||
std::function<void()> blocking_thread = [&, i] {
|
||||
auto s = txns[i]->GetForUpdate(read_options, ToString(i + 1), nullptr);
|
||||
ASSERT_OK(s);
|
||||
|
@ -1107,7 +1107,7 @@ TEST_P(WritePreparedTransactionTest, CheckAgainstSnapshots) {
|
||||
355l, 450l, 455l, 550l, 555l, 650l, 655l,
|
||||
750l, 755l, 850l, 855l, 950l, 955l};
|
||||
assert(seqs.size() > 1);
|
||||
for (size_t i = 0; i < seqs.size() - 1; i++) {
|
||||
for (size_t i = 0; i + 1 < seqs.size(); i++) {
|
||||
wp_db->old_commit_map_empty_ = true; // reset
|
||||
CommitEntry commit_entry = {seqs[i], seqs[i + 1]};
|
||||
wp_db->CheckAgainstSnapshots(commit_entry);
|
||||
|
Loading…
Reference in New Issue
Block a user