Apply formatter to some recent commits (#6138)

Summary:
Formatter somehow complains some recent lines changed. Apply them to make the formatter happy.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6138

Test Plan: See CI passes.

Differential Revision: D18895950

fbshipit-source-id: 7d1696cf3e3a682bc10a30cdca748a23c6565255
This commit is contained in:
sdong 2019-12-09 15:48:04 -08:00 committed by Facebook Github Bot
parent a960287dee
commit a68dff5c35
5 changed files with 20 additions and 15 deletions

View File

@ -3057,7 +3057,7 @@ rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_format(int bits_per_ke
const Slice& contents) const override {
return rep_->GetFilterBitsReader(contents);
}
static void DoNothing(void*) { }
static void DoNothing(void*) {}
};
Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key, original_format);

View File

@ -1058,8 +1058,8 @@ int main(int argc, char** argv) {
CheckNoError(err);
rocksdb_filterpolicy_t* policy;
if (run == 0) {
policy = rocksdb_filterpolicy_create(
NULL, FilterDestroy, FilterCreate, FilterKeyMatch, NULL, FilterName);
policy = rocksdb_filterpolicy_create(NULL, FilterDestroy, FilterCreate,
FilterKeyMatch, NULL, FilterName);
} else if (run == 1) {
policy = rocksdb_filterpolicy_create_bloom(8);
} else {

View File

@ -2054,14 +2054,14 @@ TEST_P(DBBasicTestWithTimestampWithParam, PutAndGet) {
#if LZ4_VERSION_NUMBER >= 10400 // r124+
compression_types.push_back(kLZ4Compression);
compression_types.push_back(kLZ4HCCompression);
#endif // LZ4_VERSION_NUMBER >= 10400
#endif // LZ4_VERSION_NUMBER >= 10400
if (ZSTD_Supported()) {
compression_types.push_back(kZSTD);
}
// Switch compression dictionary on/off to check key extraction
// correctness in kBuffered state
std::vector<uint32_t> max_dict_bytes_list = {0, 1 << 14}; // 0 or 16KB
std::vector<uint32_t> max_dict_bytes_list = {0, 1 << 14}; // 0 or 16KB
for (auto compression_type : compression_types) {
for (uint32_t max_dict_bytes : max_dict_bytes_list) {
@ -2070,7 +2070,7 @@ TEST_P(DBBasicTestWithTimestampWithParam, PutAndGet) {
if (compression_type == kZSTD) {
options.compression_opts.zstd_max_train_bytes = max_dict_bytes;
}
options.target_file_size_base = 1 << 26; // 64MB
options.target_file_size_base = 1 << 26; // 64MB
DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options);
@ -2082,16 +2082,18 @@ TEST_P(DBBasicTestWithTimestampWithParam, PutAndGet) {
std::vector<Slice> read_ts_list;
for (size_t i = 0; i != kNumTimestamps; ++i) {
write_ts_list.emplace_back(EncodeTimestamp(i * 2, 0, &write_ts_strs[i]));
read_ts_list.emplace_back(EncodeTimestamp(1 + i * 2, 0, &read_ts_strs[i]));
write_ts_list.emplace_back(
EncodeTimestamp(i * 2, 0, &write_ts_strs[i]));
read_ts_list.emplace_back(
EncodeTimestamp(1 + i * 2, 0, &read_ts_strs[i]));
const Slice& write_ts = write_ts_list.back();
WriteOptions wopts;
wopts.timestamp = &write_ts;
for (int cf = 0; cf != static_cast<int>(num_cfs); ++cf) {
for (size_t j = 0; j != (kNumKeysPerFile - 1) / kNumTimestamps; ++j) {
ASSERT_OK(Put(cf, "key" + std::to_string(j),
"value_" + std::to_string(j) + "_" + std::to_string(i),
wopts));
ASSERT_OK(Put(
cf, "key" + std::to_string(j),
"value_" + std::to_string(j) + "_" + std::to_string(i), wopts));
}
if (!memtable_only) {
ASSERT_OK(Flush(cf));
@ -2104,9 +2106,11 @@ TEST_P(DBBasicTestWithTimestampWithParam, PutAndGet) {
ropts.timestamp = &read_ts_list[i];
for (int cf = 0; cf != static_cast<int>(num_cfs); ++cf) {
ColumnFamilyHandle* cfh = handles_[cf];
for (size_t j = 0; j != (kNumKeysPerFile - 1) / kNumTimestamps; ++j) {
for (size_t j = 0; j != (kNumKeysPerFile - 1) / kNumTimestamps;
++j) {
std::string value;
ASSERT_OK(db_->Get(ropts, cfh, "key" + std::to_string(j), &value));
ASSERT_OK(
db_->Get(ropts, cfh, "key" + std::to_string(j), &value));
ASSERT_EQ("value_" + std::to_string(j) + "_" + std::to_string(i),
value);
}

View File

@ -1108,7 +1108,8 @@ void BlockBasedTableBuilder::EnterUnbuffered() {
for (const auto& key : keys) {
if (r->filter_builder != nullptr) {
size_t ts_sz = r->internal_comparator.user_comparator()->timestamp_size();
size_t ts_sz =
r->internal_comparator.user_comparator()->timestamp_size();
r->filter_builder->Add(ExtractUserKeyAndStripTimestamp(key, ts_sz));
}
r->index_builder->OnKeyAdded(key);

View File

@ -1127,7 +1127,7 @@ TEST(DistributedMutex, StressBigValueReturnSixtyFourThreads) {
concurrentBigValueReturnStress(64, std::chrono::seconds{kStressTestSeconds});
}
} // namespace folly
} // namespace folly
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);