Add rocksdb::ToString() to address cases where std::to_string is not available.
Summary: In some environment such as android, the c++ library does not have std::to_string. This path adds rocksdb::ToString(), which wraps std::to_string when std::to_string is not available, and implements std::to_string in the other case. Test Plan: make dbg -j32 ./db_test make clean make dbg OPT=-DOS_ANDROID -j32 ./db_test Reviewers: ljin, sdong, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D29181
This commit is contained in:
parent
90ee85f8e1
commit
13de000f07
@ -218,7 +218,7 @@ class ColumnFamilyTest {
|
|||||||
|
|
||||||
int NumTableFilesAtLevel(int level, int cf) {
|
int NumTableFilesAtLevel(int level, int cf) {
|
||||||
return GetProperty(cf,
|
return GetProperty(cf,
|
||||||
"rocksdb.num-files-at-level" + std::to_string(level));
|
"rocksdb.num-files-at-level" + ToString(level));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return spread of files per level
|
// Return spread of files per level
|
||||||
@ -387,7 +387,7 @@ TEST(ColumnFamilyTest, DropTest) {
|
|||||||
Open({"default"});
|
Open({"default"});
|
||||||
CreateColumnFamiliesAndReopen({"pikachu"});
|
CreateColumnFamiliesAndReopen({"pikachu"});
|
||||||
for (int i = 0; i < 100; ++i) {
|
for (int i = 0; i < 100; ++i) {
|
||||||
ASSERT_OK(Put(1, std::to_string(i), "bar" + std::to_string(i)));
|
ASSERT_OK(Put(1, ToString(i), "bar" + ToString(i)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
|
|
||||||
@ -774,14 +774,14 @@ TEST(ColumnFamilyTest, DifferentCompactionStyles) {
|
|||||||
for (int i = 0; i < one.level0_file_num_compaction_trigger - 1; ++i) {
|
for (int i = 0; i < one.level0_file_num_compaction_trigger - 1; ++i) {
|
||||||
PutRandomData(1, 11, 10000);
|
PutRandomData(1, 11, 10000);
|
||||||
WaitForFlush(1);
|
WaitForFlush(1);
|
||||||
ASSERT_EQ(std::to_string(i + 1), FilesPerLevel(1));
|
ASSERT_EQ(ToString(i + 1), FilesPerLevel(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
// SETUP column family "two" -- level style with 4 levels
|
// SETUP column family "two" -- level style with 4 levels
|
||||||
for (int i = 0; i < two.level0_file_num_compaction_trigger - 1; ++i) {
|
for (int i = 0; i < two.level0_file_num_compaction_trigger - 1; ++i) {
|
||||||
PutRandomData(2, 15, 10000);
|
PutRandomData(2, 15, 10000);
|
||||||
WaitForFlush(2);
|
WaitForFlush(2);
|
||||||
ASSERT_EQ(std::to_string(i + 1), FilesPerLevel(2));
|
ASSERT_EQ(ToString(i + 1), FilesPerLevel(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TRIGGER compaction "one"
|
// TRIGGER compaction "one"
|
||||||
|
@ -60,8 +60,8 @@ class CompactionJobTest {
|
|||||||
SequenceNumber smallest_seqno = 0, largest_seqno = 0;
|
SequenceNumber smallest_seqno = 0, largest_seqno = 0;
|
||||||
InternalKey smallest, largest;
|
InternalKey smallest, largest;
|
||||||
for (int k = 0; k < kKeysPerFile; ++k) {
|
for (int k = 0; k < kKeysPerFile; ++k) {
|
||||||
auto key = std::to_string(i * (kKeysPerFile / 2) + k);
|
auto key = ToString(i * (kKeysPerFile / 2) + k);
|
||||||
auto value = std::to_string(i * kKeysPerFile + k);
|
auto value = ToString(i * kKeysPerFile + k);
|
||||||
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
||||||
if (k == 0) {
|
if (k == 0) {
|
||||||
smallest = internal_key;
|
smallest = internal_key;
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "util/log_buffer.h"
|
#include "util/log_buffer.h"
|
||||||
#include "util/statistics.h"
|
#include "util/statistics.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -262,7 +263,7 @@ Status CompactionPicker::GetCompactionInputsFromFileNumbers(
|
|||||||
"Cannot find matched SST files for the following file numbers:");
|
"Cannot find matched SST files for the following file numbers:");
|
||||||
for (auto fn : *input_set) {
|
for (auto fn : *input_set) {
|
||||||
message += " ";
|
message += " ";
|
||||||
message += std::to_string(fn);
|
message += ToString(fn);
|
||||||
}
|
}
|
||||||
return Status::InvalidArgument(message);
|
return Status::InvalidArgument(message);
|
||||||
}
|
}
|
||||||
@ -616,7 +617,7 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
|
|||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"Output level for column family " + cf_meta.name +
|
"Output level for column family " + cf_meta.name +
|
||||||
" must between [0, " +
|
" must between [0, " +
|
||||||
std::to_string(cf_meta.levels[cf_meta.levels.size() - 1].level) +
|
ToString(cf_meta.levels[cf_meta.levels.size() - 1].level) +
|
||||||
"].");
|
"].");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -624,7 +625,7 @@ Status CompactionPicker::SanitizeCompactionInputFiles(
|
|||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"Exceed the maximum output level defined by "
|
"Exceed the maximum output level defined by "
|
||||||
"the current compaction algorithm --- " +
|
"the current compaction algorithm --- " +
|
||||||
std::to_string(MaxOutputLevel()));
|
ToString(MaxOutputLevel()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (output_level < 0) {
|
if (output_level < 0) {
|
||||||
|
@ -192,8 +192,8 @@ TEST(CompactionPickerTest, NeedsCompactionLevel) {
|
|||||||
// start a brand new version in each test.
|
// start a brand new version in each test.
|
||||||
NewVersionStorage(kLevels, kCompactionStyleLevel);
|
NewVersionStorage(kLevels, kCompactionStyleLevel);
|
||||||
for (int i = 0; i < file_count; ++i) {
|
for (int i = 0; i < file_count; ++i) {
|
||||||
Add(level, i, std::to_string((i + 100) * 1000).c_str(),
|
Add(level, i, ToString((i + 100) * 1000).c_str(),
|
||||||
std::to_string((i + 100) * 1000 + 999).c_str(),
|
ToString((i + 100) * 1000 + 999).c_str(),
|
||||||
file_size, 0, i * 100, i * 100 + 99);
|
file_size, 0, i * 100, i * 100 + 99);
|
||||||
}
|
}
|
||||||
UpdateVersionStorageInfo();
|
UpdateVersionStorageInfo();
|
||||||
@ -217,8 +217,8 @@ TEST(CompactionPickerTest, NeedsCompactionUniversal) {
|
|||||||
// verify the trigger given different number of L0 files.
|
// verify the trigger given different number of L0 files.
|
||||||
for (int i = 1;
|
for (int i = 1;
|
||||||
i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) {
|
i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) {
|
||||||
Add(0, i, std::to_string((i + 100) * 1000).c_str(),
|
Add(0, i, ToString((i + 100) * 1000).c_str(),
|
||||||
std::to_string((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100,
|
ToString((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100,
|
||||||
i * 100 + 99);
|
i * 100 + 99);
|
||||||
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
||||||
vstorage_->CompactionScore(0) >= 1);
|
vstorage_->CompactionScore(0) >= 1);
|
||||||
@ -243,8 +243,8 @@ TEST(CompactionPickerTest, NeedsCompactionFIFO) {
|
|||||||
// size of L0 files.
|
// size of L0 files.
|
||||||
uint64_t current_size = 0;
|
uint64_t current_size = 0;
|
||||||
for (int i = 1; i <= kFileCount; ++i) {
|
for (int i = 1; i <= kFileCount; ++i) {
|
||||||
Add(0, i, std::to_string((i + 100) * 1000).c_str(),
|
Add(0, i, ToString((i + 100) * 1000).c_str(),
|
||||||
std::to_string((i + 100) * 1000 + 999).c_str(),
|
ToString((i + 100) * 1000 + 999).c_str(),
|
||||||
kFileSize, 0, i * 100, i * 100 + 99);
|
kFileSize, 0, i * 100, i * 100 + 99);
|
||||||
current_size += kFileSize;
|
current_size += kFileSize;
|
||||||
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
||||||
|
@ -377,7 +377,7 @@ TEST(ComparatorDBTest, DoubleComparator) {
|
|||||||
for (uint32_t j = 0; j < divide_order; j++) {
|
for (uint32_t j = 0; j < divide_order; j++) {
|
||||||
to_divide *= 10.0;
|
to_divide *= 10.0;
|
||||||
}
|
}
|
||||||
source_strings.push_back(std::to_string(r / to_divide));
|
source_strings.push_back(ToString(r / to_divide));
|
||||||
}
|
}
|
||||||
|
|
||||||
DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
|
DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66);
|
||||||
|
@ -1350,7 +1350,7 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string GetDbNameForMultiple(std::string base_name, size_t id) {
|
std::string GetDbNameForMultiple(std::string base_name, size_t id) {
|
||||||
return base_name + std::to_string(id);
|
return base_name + ToString(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string ColumnFamilyName(int i) {
|
std::string ColumnFamilyName(int i) {
|
||||||
|
@ -75,6 +75,7 @@
|
|||||||
#include "util/iostats_context_imp.h"
|
#include "util/iostats_context_imp.h"
|
||||||
#include "util/stop_watch.h"
|
#include "util/stop_watch.h"
|
||||||
#include "util/sync_point.h"
|
#include "util/sync_point.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
#include "util/thread_status_impl.h"
|
#include "util/thread_status_impl.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
@ -3121,7 +3122,7 @@ bool DBImpl::GetProperty(ColumnFamilyHandle* column_family,
|
|||||||
bool ret_value = GetIntPropertyInternal(column_family, property_type,
|
bool ret_value = GetIntPropertyInternal(column_family, property_type,
|
||||||
need_out_of_mutex, &int_value);
|
need_out_of_mutex, &int_value);
|
||||||
if (ret_value) {
|
if (ret_value) {
|
||||||
*value = std::to_string(int_value);
|
*value = ToString(int_value);
|
||||||
}
|
}
|
||||||
return ret_value;
|
return ret_value;
|
||||||
} else {
|
} else {
|
||||||
@ -3378,8 +3379,8 @@ Status DBImpl::CheckConsistency() {
|
|||||||
} else if (fsize != md.size) {
|
} else if (fsize != md.size) {
|
||||||
corruption_messages += "Sst file size mismatch: " + file_path +
|
corruption_messages += "Sst file size mismatch: " + file_path +
|
||||||
". Size recorded in manifest " +
|
". Size recorded in manifest " +
|
||||||
std::to_string(md.size) + ", actual size " +
|
ToString(md.size) + ", actual size " +
|
||||||
std::to_string(fsize) + "\n";
|
ToString(fsize) + "\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (corruption_messages.size() == 0) {
|
if (corruption_messages.size() == 0) {
|
||||||
|
@ -366,7 +366,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
internal_iter->AddMerge("b", "merge_1");
|
internal_iter->AddMerge("b", "merge_1");
|
||||||
internal_iter->AddMerge("a", "merge_2");
|
internal_iter->AddMerge("a", "merge_2");
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("c", std::to_string(k));
|
internal_iter->AddPut("c", ToString(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -379,7 +379,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
ASSERT_EQ(db_iter->key().ToString(), "c");
|
ASSERT_EQ(db_iter->key().ToString(), "c");
|
||||||
ASSERT_EQ(db_iter->value().ToString(), std::to_string(i));
|
ASSERT_EQ(db_iter->value().ToString(), ToString(i));
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
@ -513,11 +513,11 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
internal_iter->AddMerge("b", "merge_1");
|
internal_iter->AddMerge("b", "merge_1");
|
||||||
internal_iter->AddMerge("a", "merge_2");
|
internal_iter->AddMerge("a", "merge_2");
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("d", std::to_string(k));
|
internal_iter->AddPut("d", ToString(k));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("c", std::to_string(k));
|
internal_iter->AddPut("c", ToString(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -529,7 +529,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
ASSERT_EQ(db_iter->key().ToString(), "d");
|
ASSERT_EQ(db_iter->key().ToString(), "d");
|
||||||
ASSERT_EQ(db_iter->value().ToString(), std::to_string(i));
|
ASSERT_EQ(db_iter->value().ToString(), ToString(i));
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
|
|
||||||
@ -552,7 +552,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
internal_iter->AddMerge("b", "b");
|
internal_iter->AddMerge("b", "b");
|
||||||
internal_iter->AddMerge("a", "a");
|
internal_iter->AddMerge("a", "a");
|
||||||
for (size_t k = 0; k < 200; ++k) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddMerge("c", std::to_string(k));
|
internal_iter->AddMerge("c", ToString(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -566,7 +566,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
ASSERT_EQ(db_iter->key().ToString(), "c");
|
ASSERT_EQ(db_iter->key().ToString(), "c");
|
||||||
std::string merge_result = "0";
|
std::string merge_result = "0";
|
||||||
for (size_t j = 1; j <= i; ++j) {
|
for (size_t j = 1; j <= i; ++j) {
|
||||||
merge_result += "," + std::to_string(j);
|
merge_result += "," + ToString(j);
|
||||||
}
|
}
|
||||||
ASSERT_EQ(db_iter->value().ToString(), merge_result);
|
ASSERT_EQ(db_iter->value().ToString(), merge_result);
|
||||||
|
|
||||||
|
@ -50,6 +50,7 @@
|
|||||||
#include "util/sync_point.h"
|
#include "util/sync_point.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
#include "util/mock_env.h"
|
#include "util/mock_env.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
#include "util/thread_status_impl.h"
|
#include "util/thread_status_impl.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
@ -1494,7 +1495,7 @@ TEST(DBTest, GetPropertiesOfAllTablesTest) {
|
|||||||
// Create 4 tables
|
// Create 4 tables
|
||||||
for (int table = 0; table < 4; ++table) {
|
for (int table = 0; table < 4; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val");
|
db_->Put(WriteOptions(), ToString(table * 100 + i), "val");
|
||||||
}
|
}
|
||||||
db_->Flush(FlushOptions());
|
db_->Flush(FlushOptions());
|
||||||
}
|
}
|
||||||
@ -1508,7 +1509,7 @@ TEST(DBTest, GetPropertiesOfAllTablesTest) {
|
|||||||
// fetch key from 1st and 2nd table, which will internally place that table to
|
// fetch key from 1st and 2nd table, which will internally place that table to
|
||||||
// the table cache.
|
// the table cache.
|
||||||
for (int i = 0; i < 2; ++i) {
|
for (int i = 0; i < 2; ++i) {
|
||||||
Get(std::to_string(i * 100 + 0));
|
Get(ToString(i * 100 + 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
||||||
@ -1518,7 +1519,7 @@ TEST(DBTest, GetPropertiesOfAllTablesTest) {
|
|||||||
// fetch key from 1st and 2nd table, which will internally place that table to
|
// fetch key from 1st and 2nd table, which will internally place that table to
|
||||||
// the table cache.
|
// the table cache.
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
Get(std::to_string(i * 100 + 0));
|
Get(ToString(i * 100 + 0));
|
||||||
}
|
}
|
||||||
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
|
||||||
}
|
}
|
||||||
@ -4747,7 +4748,7 @@ TEST(DBTest, CompactionFilterDeletesAll) {
|
|||||||
// put some data
|
// put some data
|
||||||
for (int table = 0; table < 4; ++table) {
|
for (int table = 0; table < 4; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
Put(std::to_string(table * 100 + i), "val");
|
Put(ToString(table * 100 + i), "val");
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
@ -6994,7 +6995,7 @@ TEST(DBTest, TransactionLogIteratorCorruptedLog) {
|
|||||||
Options options = OptionsForLogIterTest();
|
Options options = OptionsForLogIterTest();
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
for (int i = 0; i < 1024; i++) {
|
for (int i = 0; i < 1024; i++) {
|
||||||
Put("key"+std::to_string(i), DummyString(10));
|
Put("key"+ToString(i), DummyString(10));
|
||||||
}
|
}
|
||||||
dbfull()->Flush(FlushOptions());
|
dbfull()->Flush(FlushOptions());
|
||||||
// Corrupt this log to create a gap
|
// Corrupt this log to create a gap
|
||||||
@ -7062,20 +7063,20 @@ TEST(DBTest, TransactionLogIteratorBlobs) {
|
|||||||
struct Handler : public WriteBatch::Handler {
|
struct Handler : public WriteBatch::Handler {
|
||||||
std::string seen;
|
std::string seen;
|
||||||
virtual Status PutCF(uint32_t cf, const Slice& key, const Slice& value) {
|
virtual Status PutCF(uint32_t cf, const Slice& key, const Slice& value) {
|
||||||
seen += "Put(" + std::to_string(cf) + ", " + key.ToString() + ", " +
|
seen += "Put(" + ToString(cf) + ", " + key.ToString() + ", " +
|
||||||
std::to_string(value.size()) + ")";
|
ToString(value.size()) + ")";
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
virtual Status MergeCF(uint32_t cf, const Slice& key, const Slice& value) {
|
virtual Status MergeCF(uint32_t cf, const Slice& key, const Slice& value) {
|
||||||
seen += "Merge(" + std::to_string(cf) + ", " + key.ToString() + ", " +
|
seen += "Merge(" + ToString(cf) + ", " + key.ToString() + ", " +
|
||||||
std::to_string(value.size()) + ")";
|
ToString(value.size()) + ")";
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
virtual void LogData(const Slice& blob) {
|
virtual void LogData(const Slice& blob) {
|
||||||
seen += "LogData(" + blob.ToString() + ")";
|
seen += "LogData(" + blob.ToString() + ")";
|
||||||
}
|
}
|
||||||
virtual Status DeleteCF(uint32_t cf, const Slice& key) {
|
virtual Status DeleteCF(uint32_t cf, const Slice& key) {
|
||||||
seen += "Delete(" + std::to_string(cf) + ", " + key.ToString() + ")";
|
seen += "Delete(" + ToString(cf) + ", " + key.ToString() + ")";
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
} handler;
|
} handler;
|
||||||
@ -7203,7 +7204,7 @@ TEST(DBTest, MultiThreaded) {
|
|||||||
do {
|
do {
|
||||||
std::vector<std::string> cfs;
|
std::vector<std::string> cfs;
|
||||||
for (int i = 1; i < kColumnFamilies; ++i) {
|
for (int i = 1; i < kColumnFamilies; ++i) {
|
||||||
cfs.push_back(std::to_string(i));
|
cfs.push_back(ToString(i));
|
||||||
}
|
}
|
||||||
CreateAndReopenWithCF(cfs, CurrentOptions());
|
CreateAndReopenWithCF(cfs, CurrentOptions());
|
||||||
// Initialize state
|
// Initialize state
|
||||||
@ -7256,7 +7257,7 @@ static void GCThreadBody(void* arg) {
|
|||||||
WriteOptions wo;
|
WriteOptions wo;
|
||||||
|
|
||||||
for (int i = 0; i < kGCNumKeys; ++i) {
|
for (int i = 0; i < kGCNumKeys; ++i) {
|
||||||
std::string kv(std::to_string(i + id * kGCNumKeys));
|
std::string kv(ToString(i + id * kGCNumKeys));
|
||||||
ASSERT_OK(db->Put(wo, kv, kv));
|
ASSERT_OK(db->Put(wo, kv, kv));
|
||||||
}
|
}
|
||||||
t->done = true;
|
t->done = true;
|
||||||
@ -7292,7 +7293,7 @@ TEST(DBTest, GroupCommitTest) {
|
|||||||
|
|
||||||
std::vector<std::string> expected_db;
|
std::vector<std::string> expected_db;
|
||||||
for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
|
for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
|
||||||
expected_db.push_back(std::to_string(i));
|
expected_db.push_back(ToString(i));
|
||||||
}
|
}
|
||||||
sort(expected_db.begin(), expected_db.end());
|
sort(expected_db.begin(), expected_db.end());
|
||||||
|
|
||||||
@ -8176,7 +8177,7 @@ TEST(DBTest, FIFOCompactionTest) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
for (int j = 0; j < 100; ++j) {
|
for (int j = 0; j < 100; ++j) {
|
||||||
ASSERT_OK(Put(std::to_string(i * 100 + j), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 1024)));
|
||||||
}
|
}
|
||||||
// flush should happen here
|
// flush should happen here
|
||||||
}
|
}
|
||||||
@ -8189,7 +8190,7 @@ TEST(DBTest, FIFOCompactionTest) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
|
||||||
for (int i = 0; i < 50; ++i) {
|
for (int i = 0; i < 50; ++i) {
|
||||||
// these keys should be deleted in previous compaction
|
// these keys should be deleted in previous compaction
|
||||||
ASSERT_EQ("NOT_FOUND", Get(std::to_string(i)));
|
ASSERT_EQ("NOT_FOUND", Get(ToString(i)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -8517,7 +8518,7 @@ TEST(DBTest, CompactFilesOnLevelCompaction) {
|
|||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_OK(Put(1, std::to_string(key), RandomString(&rnd, kTestValueSize)));
|
ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize)));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -8549,7 +8550,7 @@ TEST(DBTest, CompactFilesOnLevelCompaction) {
|
|||||||
|
|
||||||
// make sure all key-values are still there.
|
// make sure all key-values are still there.
|
||||||
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_NE(Get(1, std::to_string(key)), "NOT_FOUND");
|
ASSERT_NE(Get(1, ToString(key)), "NOT_FOUND");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8571,7 +8572,7 @@ TEST(DBTest, CompactFilesOnUniversalCompaction) {
|
|||||||
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
|
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
|
for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) {
|
||||||
ASSERT_OK(Put(1, std::to_string(key), RandomString(&rnd, kTestValueSize)));
|
ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize)));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
@ -9112,7 +9113,7 @@ TEST(DBTest, DynamicCompactionOptions) {
|
|||||||
// result in 2 32KB L1 files.
|
// result in 2 32KB L1 files.
|
||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"level0_file_num_compaction_trigger", "2"},
|
{"level0_file_num_compaction_trigger", "2"},
|
||||||
{"target_file_size_base", std::to_string(k32KB) }
|
{"target_file_size_base", ToString(k32KB) }
|
||||||
}));
|
}));
|
||||||
|
|
||||||
gen_l0_kb(0, 64, 1);
|
gen_l0_kb(0, 64, 1);
|
||||||
@ -9133,7 +9134,7 @@ TEST(DBTest, DynamicCompactionOptions) {
|
|||||||
// fill L1 and L2. L1 size should be around 256KB while L2 size should be
|
// fill L1 and L2. L1 size should be around 256KB while L2 size should be
|
||||||
// around 256KB x 4.
|
// around 256KB x 4.
|
||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"max_bytes_for_level_base", std::to_string(k1MB) }
|
{"max_bytes_for_level_base", ToString(k1MB) }
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// writing 96 x 64KB => 6 * 1024KB
|
// writing 96 x 64KB => 6 * 1024KB
|
||||||
@ -9155,7 +9156,7 @@ TEST(DBTest, DynamicCompactionOptions) {
|
|||||||
// reduces to 128KB from 256KB which was asserted previously. Same for L2.
|
// reduces to 128KB from 256KB which was asserted previously. Same for L2.
|
||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"max_bytes_for_level_multiplier", "2"},
|
{"max_bytes_for_level_multiplier", "2"},
|
||||||
{"max_bytes_for_level_base", std::to_string(k128KB) }
|
{"max_bytes_for_level_base", ToString(k128KB) }
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// writing 20 x 64KB = 10 x 128KB
|
// writing 20 x 64KB = 10 x 128KB
|
||||||
@ -9255,7 +9256,7 @@ TEST(DBTest, DynamicCompactionOptions) {
|
|||||||
// L1 - L3. Then thrink max_bytes_for_level_base and disable auto compaction
|
// L1 - L3. Then thrink max_bytes_for_level_base and disable auto compaction
|
||||||
// at the same time, we should see some level with score greater than 2.
|
// at the same time, we should see some level with score greater than 2.
|
||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"max_bytes_for_level_base", std::to_string(k1MB) }
|
{"max_bytes_for_level_base", ToString(k1MB) }
|
||||||
}));
|
}));
|
||||||
// writing 40 x 64KB = 10 x 256KB
|
// writing 40 x 64KB = 10 x 256KB
|
||||||
// (L1 + L2 + L3) = (1 + 2 + 4) * 256KB
|
// (L1 + L2 + L3) = (1 + 2 + 4) * 256KB
|
||||||
|
@ -79,7 +79,7 @@ class DeleteFileTest {
|
|||||||
options.sync = false;
|
options.sync = false;
|
||||||
ReadOptions roptions;
|
ReadOptions roptions;
|
||||||
for (int i = startkey; i < (numkeys + startkey) ; i++) {
|
for (int i = startkey; i < (numkeys + startkey) ; i++) {
|
||||||
std::string temp = std::to_string(i);
|
std::string temp = ToString(i);
|
||||||
Slice key(temp);
|
Slice key(temp);
|
||||||
Slice value(temp);
|
Slice value(temp);
|
||||||
ASSERT_OK(db_->Put(options, key, value));
|
ASSERT_OK(db_->Put(options, key, value));
|
||||||
|
@ -97,8 +97,8 @@ TEST(FlushJobTest, NonEmpty) {
|
|||||||
new_mem->Ref();
|
new_mem->Ref();
|
||||||
std::map<std::string, std::string> inserted_keys;
|
std::map<std::string, std::string> inserted_keys;
|
||||||
for (int i = 1; i < 10000; ++i) {
|
for (int i = 1; i < 10000; ++i) {
|
||||||
std::string key(std::to_string(i));
|
std::string key(ToString(i));
|
||||||
std::string value("value" + std::to_string(i));
|
std::string value("value" + ToString(i));
|
||||||
new_mem->Add(SequenceNumber(i), kTypeValue, key, value);
|
new_mem->Add(SequenceNumber(i), kTypeValue, key, value);
|
||||||
InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
|
InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
|
||||||
inserted_keys.insert({internal_key.Encode().ToString(), value});
|
inserted_keys.insert({internal_key.Encode().ToString(), value});
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include "db/column_family.h"
|
#include "db/column_family.h"
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -433,7 +434,7 @@ void InternalStats::DumpCFStats(std::string* value) {
|
|||||||
double w_amp = (comp_stats_[level].bytes_readn == 0) ? 0.0
|
double w_amp = (comp_stats_[level].bytes_readn == 0) ? 0.0
|
||||||
: comp_stats_[level].bytes_written /
|
: comp_stats_[level].bytes_written /
|
||||||
static_cast<double>(comp_stats_[level].bytes_readn);
|
static_cast<double>(comp_stats_[level].bytes_readn);
|
||||||
PrintLevelStats(buf, sizeof(buf), "L" + std::to_string(level), files,
|
PrintLevelStats(buf, sizeof(buf), "L" + ToString(level), files,
|
||||||
files_being_compacted[level],
|
files_being_compacted[level],
|
||||||
vstorage->NumLevelBytes(level), compaction_score[level],
|
vstorage->NumLevelBytes(level), compaction_score[level],
|
||||||
rw_amp, w_amp, stall_us, stalls, comp_stats_[level]);
|
rw_amp, w_amp, stall_us, stalls, comp_stats_[level]);
|
||||||
|
@ -258,10 +258,10 @@ TEST(EventListenerTest, MultiDBMultiListeners) {
|
|||||||
std::vector<std::vector<ColumnFamilyHandle *>> vec_handles;
|
std::vector<std::vector<ColumnFamilyHandle *>> vec_handles;
|
||||||
|
|
||||||
for (int d = 0; d < kNumDBs; ++d) {
|
for (int d = 0; d < kNumDBs; ++d) {
|
||||||
ASSERT_OK(DestroyDB(dbname_ + std::to_string(d), options));
|
ASSERT_OK(DestroyDB(dbname_ + ToString(d), options));
|
||||||
DB* db;
|
DB* db;
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
ASSERT_OK(DB::Open(options, dbname_ + std::to_string(d), &db));
|
ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db));
|
||||||
for (size_t c = 0; c < cf_names.size(); ++c) {
|
for (size_t c = 0; c < cf_names.size(); ++c) {
|
||||||
ColumnFamilyHandle* handle;
|
ColumnFamilyHandle* handle;
|
||||||
db->CreateColumnFamily(cf_opts, cf_names[c], &handle);
|
db->CreateColumnFamily(cf_opts, cf_names[c], &handle);
|
||||||
@ -331,7 +331,7 @@ TEST(EventListenerTest, DisableBGCompaction) {
|
|||||||
db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
||||||
// keep writing until writes are forced to stop.
|
// keep writing until writes are forced to stop.
|
||||||
for (int i = 0; static_cast<int>(cf_meta.file_count) < kStopTrigger; ++i) {
|
for (int i = 0; static_cast<int>(cf_meta.file_count) < kStopTrigger; ++i) {
|
||||||
Put(1, std::to_string(i), std::string(100000, 'x'), wopts);
|
Put(1, ToString(i), std::string(100000, 'x'), wopts);
|
||||||
db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
||||||
}
|
}
|
||||||
ASSERT_GE(listener->slowdown_count, kStopTrigger - kSlowdownTrigger);
|
ASSERT_GE(listener->slowdown_count, kStopTrigger - kSlowdownTrigger);
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include "util/histogram.h"
|
#include "util/histogram.h"
|
||||||
#include "util/stop_watch.h"
|
#include "util/stop_watch.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
|
|
||||||
bool FLAGS_random_key = false;
|
bool FLAGS_random_key = false;
|
||||||
@ -66,21 +67,21 @@ TEST(PerfContextTest, SeekIntoDeletion) {
|
|||||||
ReadOptions read_options;
|
ReadOptions read_options;
|
||||||
|
|
||||||
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value = "v" + std::to_string(i);
|
std::string value = "v" + ToString(i);
|
||||||
|
|
||||||
db->Put(write_options, key, value);
|
db->Put(write_options, key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < FLAGS_total_keys -1 ; ++i) {
|
for (int i = 0; i < FLAGS_total_keys -1 ; ++i) {
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
db->Delete(write_options, key);
|
db->Delete(write_options, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
HistogramImpl hist_get;
|
HistogramImpl hist_get;
|
||||||
HistogramImpl hist_get_time;
|
HistogramImpl hist_get_time;
|
||||||
for (int i = 0; i < FLAGS_total_keys - 1; ++i) {
|
for (int i = 0; i < FLAGS_total_keys - 1; ++i) {
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value;
|
std::string value;
|
||||||
|
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
@ -118,7 +119,7 @@ TEST(PerfContextTest, SeekIntoDeletion) {
|
|||||||
HistogramImpl hist_seek;
|
HistogramImpl hist_seek;
|
||||||
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
||||||
std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
|
std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
|
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
StopWatchNano timer(Env::Default(), true);
|
StopWatchNano timer(Env::Default(), true);
|
||||||
@ -231,8 +232,8 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
db->Flush(fo);
|
db->Flush(fo);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value = "v" + std::to_string(i);
|
std::string value = "v" + ToString(i);
|
||||||
|
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
|
|
||||||
@ -245,8 +246,8 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (const int i : keys) {
|
for (const int i : keys) {
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value = "v" + std::to_string(i);
|
std::string value = "v" + ToString(i);
|
||||||
|
|
||||||
std::vector<Slice> multiget_keys = {Slice(key)};
|
std::vector<Slice> multiget_keys = {Slice(key)};
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
@ -335,8 +336,8 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
hist_mget_num_memtable_checked.Clear();
|
hist_mget_num_memtable_checked.Clear();
|
||||||
|
|
||||||
for (const int i : keys) {
|
for (const int i : keys) {
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value = "v" + std::to_string(i);
|
std::string value = "v" + ToString(i);
|
||||||
|
|
||||||
std::vector<Slice> multiget_keys = {Slice(key)};
|
std::vector<Slice> multiget_keys = {Slice(key)};
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
@ -451,8 +452,8 @@ TEST(PerfContextTest, SeekKeyComparison) {
|
|||||||
SetPerfLevel(kEnableTime);
|
SetPerfLevel(kEnableTime);
|
||||||
StopWatchNano timer(Env::Default());
|
StopWatchNano timer(Env::Default());
|
||||||
for (const int i : keys) {
|
for (const int i : keys) {
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value = "v" + std::to_string(i);
|
std::string value = "v" + ToString(i);
|
||||||
|
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
timer.Start();
|
timer.Start();
|
||||||
@ -471,8 +472,8 @@ TEST(PerfContextTest, SeekKeyComparison) {
|
|||||||
HistogramImpl hist_next;
|
HistogramImpl hist_next;
|
||||||
|
|
||||||
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
for (int i = 0; i < FLAGS_total_keys; ++i) {
|
||||||
std::string key = "k" + std::to_string(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value = "v" + std::to_string(i);
|
std::string value = "v" + ToString(i);
|
||||||
|
|
||||||
std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
|
std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
|
@ -628,7 +628,7 @@ TEST(PlainTableDBTest, IteratorLargeKeys) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
for (size_t i = 0; i < 7; i++) {
|
for (size_t i = 0; i < 7; i++) {
|
||||||
ASSERT_OK(Put(key_list[i], std::to_string(i)));
|
ASSERT_OK(Put(key_list[i], ToString(i)));
|
||||||
}
|
}
|
||||||
|
|
||||||
dbfull()->TEST_FlushMemTable();
|
dbfull()->TEST_FlushMemTable();
|
||||||
@ -639,7 +639,7 @@ TEST(PlainTableDBTest, IteratorLargeKeys) {
|
|||||||
for (size_t i = 0; i < 7; i++) {
|
for (size_t i = 0; i < 7; i++) {
|
||||||
ASSERT_TRUE(iter->Valid());
|
ASSERT_TRUE(iter->Valid());
|
||||||
ASSERT_EQ(key_list[i], iter->key().ToString());
|
ASSERT_EQ(key_list[i], iter->key().ToString());
|
||||||
ASSERT_EQ(std::to_string(i), iter->value().ToString());
|
ASSERT_EQ(ToString(i), iter->value().ToString());
|
||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -676,7 +676,7 @@ TEST(PlainTableDBTest, IteratorLargeKeysWithPrefix) {
|
|||||||
MakeLongKeyWithPrefix(26, '6')};
|
MakeLongKeyWithPrefix(26, '6')};
|
||||||
|
|
||||||
for (size_t i = 0; i < 7; i++) {
|
for (size_t i = 0; i < 7; i++) {
|
||||||
ASSERT_OK(Put(key_list[i], std::to_string(i)));
|
ASSERT_OK(Put(key_list[i], ToString(i)));
|
||||||
}
|
}
|
||||||
|
|
||||||
dbfull()->TEST_FlushMemTable();
|
dbfull()->TEST_FlushMemTable();
|
||||||
@ -687,7 +687,7 @@ TEST(PlainTableDBTest, IteratorLargeKeysWithPrefix) {
|
|||||||
for (size_t i = 0; i < 7; i++) {
|
for (size_t i = 0; i < 7; i++) {
|
||||||
ASSERT_TRUE(iter->Valid());
|
ASSERT_TRUE(iter->Valid());
|
||||||
ASSERT_EQ(key_list[i], iter->key().ToString());
|
ASSERT_EQ(key_list[i], iter->key().ToString());
|
||||||
ASSERT_EQ(std::to_string(i), iter->value().ToString());
|
ASSERT_EQ(ToString(i), iter->value().ToString());
|
||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -442,7 +442,7 @@ TEST(PrefixTest, DynamicPrefixIterator) {
|
|||||||
for (auto prefix : prefixes) {
|
for (auto prefix : prefixes) {
|
||||||
TestKey test_key(prefix, FLAGS_items_per_prefix / 2);
|
TestKey test_key(prefix, FLAGS_items_per_prefix / 2);
|
||||||
Slice key = TestKeyToSlice(test_key);
|
Slice key = TestKeyToSlice(test_key);
|
||||||
std::string value = "v" + std::to_string(0);
|
std::string value = "v" + ToString(0);
|
||||||
|
|
||||||
perf_context.Reset();
|
perf_context.Reset();
|
||||||
StopWatchNano timer(Env::Default(), true);
|
StopWatchNano timer(Env::Default(), true);
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -40,7 +41,7 @@ Status InternalKeyPropertiesCollector::Finish(
|
|||||||
UserCollectedProperties
|
UserCollectedProperties
|
||||||
InternalKeyPropertiesCollector::GetReadableProperties() const {
|
InternalKeyPropertiesCollector::GetReadableProperties() const {
|
||||||
return {
|
return {
|
||||||
{ "kDeletedKeys", std::to_string(deleted_keys_) }
|
{ "kDeletedKeys", ToString(deleted_keys_) }
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,8 +135,8 @@ TEST(VersionBuilderTest, EstimatedActiveKeys) {
|
|||||||
const uint32_t kDeletionsPerFile = 100;
|
const uint32_t kDeletionsPerFile = 100;
|
||||||
for (uint32_t i = 0; i < kNumFiles; ++i) {
|
for (uint32_t i = 0; i < kNumFiles; ++i) {
|
||||||
Add(static_cast<int>(i / kFilesPerLevel), i + 1,
|
Add(static_cast<int>(i / kFilesPerLevel), i + 1,
|
||||||
std::to_string((i + 100) * 1000).c_str(),
|
ToString((i + 100) * 1000).c_str(),
|
||||||
std::to_string((i + 100) * 1000 + 999).c_str(),
|
ToString((i + 100) * 1000 + 999).c_str(),
|
||||||
100U, 0, 100, 100,
|
100U, 0, 100, 100,
|
||||||
kEntriesPerFile, kDeletionsPerFile,
|
kEntriesPerFile, kDeletionsPerFile,
|
||||||
(i < kTotalSamples));
|
(i < kTotalSamples));
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include "util/logging.h"
|
#include "util/logging.h"
|
||||||
#include "util/mutexlock.h"
|
#include "util/mutexlock.h"
|
||||||
#include "util/sync_point.h"
|
#include "util/sync_point.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -355,9 +356,9 @@ Status WalManager::ReadFirstRecord(const WalFileType type,
|
|||||||
SequenceNumber* sequence) {
|
SequenceNumber* sequence) {
|
||||||
if (type != kAliveLogFile && type != kArchivedLogFile) {
|
if (type != kAliveLogFile && type != kArchivedLogFile) {
|
||||||
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
|
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log,
|
||||||
"[WalManger] Unknown file type %s", std::to_string(type).c_str());
|
"[WalManger] Unknown file type %s", ToString(type).c_str());
|
||||||
return Status::NotSupported(
|
return Status::NotSupported(
|
||||||
"File Type Not Known " + std::to_string(type));
|
"File Type Not Known " + ToString(type));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
MutexLock l(&read_first_record_cache_mutex_);
|
MutexLock l(&read_first_record_cache_mutex_);
|
||||||
|
@ -73,7 +73,7 @@ class WalManagerTest {
|
|||||||
for (int i = 1; i <= num_logs; ++i) {
|
for (int i = 1; i <= num_logs; ++i) {
|
||||||
RollTheLog(true);
|
RollTheLog(true);
|
||||||
for (int k = 0; k < entries_per_log; ++k) {
|
for (int k = 0; k < entries_per_log; ++k) {
|
||||||
Put(std::to_string(k), std::string(1024, 'a'));
|
Put(ToString(k), std::string(1024, 'a'));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,7 @@ namespace {
|
|||||||
if (column_family_id == 0) {
|
if (column_family_id == 0) {
|
||||||
seen += "Put(" + key.ToString() + ", " + value.ToString() + ")";
|
seen += "Put(" + key.ToString() + ", " + value.ToString() + ")";
|
||||||
} else {
|
} else {
|
||||||
seen += "PutCF(" + std::to_string(column_family_id) + ", " +
|
seen += "PutCF(" + ToString(column_family_id) + ", " +
|
||||||
key.ToString() + ", " + value.ToString() + ")";
|
key.ToString() + ", " + value.ToString() + ")";
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
@ -165,7 +165,7 @@ namespace {
|
|||||||
if (column_family_id == 0) {
|
if (column_family_id == 0) {
|
||||||
seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")";
|
seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")";
|
||||||
} else {
|
} else {
|
||||||
seen += "MergeCF(" + std::to_string(column_family_id) + ", " +
|
seen += "MergeCF(" + ToString(column_family_id) + ", " +
|
||||||
key.ToString() + ", " + value.ToString() + ")";
|
key.ToString() + ", " + value.ToString() + ")";
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
@ -177,7 +177,7 @@ namespace {
|
|||||||
if (column_family_id == 0) {
|
if (column_family_id == 0) {
|
||||||
seen += "Delete(" + key.ToString() + ")";
|
seen += "Delete(" + key.ToString() + ")";
|
||||||
} else {
|
} else {
|
||||||
seen += "DeleteCF(" + std::to_string(column_family_id) + ", " +
|
seen += "DeleteCF(" + ToString(column_family_id) + ", " +
|
||||||
key.ToString() + ")";
|
key.ToString() + ")";
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
|
@ -156,14 +156,14 @@ int main() {
|
|||||||
// if background compaction is not working, write will stall
|
// if background compaction is not working, write will stall
|
||||||
// because of options.level0_stop_writes_trigger
|
// because of options.level0_stop_writes_trigger
|
||||||
for (int i = 1000; i < 99999; ++i) {
|
for (int i = 1000; i < 99999; ++i) {
|
||||||
db->Put(WriteOptions(), std::to_string(i),
|
db->Put(WriteOptions(), ToString(i),
|
||||||
std::string(500, 'a' + (i % 26)));
|
std::string(500, 'a' + (i % 26)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify the values are still there
|
// verify the values are still there
|
||||||
std::string value;
|
std::string value;
|
||||||
for (int i = 1000; i < 99999; ++i) {
|
for (int i = 1000; i < 99999; ++i) {
|
||||||
db->Get(ReadOptions(), std::to_string(i),
|
db->Get(ReadOptions(), ToString(i),
|
||||||
&value);
|
&value);
|
||||||
assert(value == std::string(500, 'a' + (i % 26)));
|
assert(value == std::string(500, 'a' + (i % 26)));
|
||||||
}
|
}
|
||||||
|
@ -10,13 +10,13 @@
|
|||||||
#ifndef STORAGE_LEVELDB_PORT_PORT_H_
|
#ifndef STORAGE_LEVELDB_PORT_PORT_H_
|
||||||
#define STORAGE_LEVELDB_PORT_PORT_H_
|
#define STORAGE_LEVELDB_PORT_PORT_H_
|
||||||
|
|
||||||
#include <string.h>
|
#include <string>
|
||||||
|
|
||||||
// Include the appropriate platform specific file below. If you are
|
// Include the appropriate platform specific file below. If you are
|
||||||
// porting to a new platform, see "port_example.h" for documentation
|
// porting to a new platform, see "port_example.h" for documentation
|
||||||
// of what the new port_<platform>.h file must provide.
|
// of what the new port_<platform>.h file must provide.
|
||||||
#if defined(ROCKSDB_PLATFORM_POSIX)
|
#if defined(ROCKSDB_PLATFORM_POSIX)
|
||||||
# include "port/port_posix.h"
|
#include "port/port_posix.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_PORT_H_
|
#endif // STORAGE_LEVELDB_PORT_PORT_H_
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/perf_context_imp.h"
|
#include "util/perf_context_imp.h"
|
||||||
#include "util/stop_watch.h"
|
#include "util/stop_watch.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -1264,7 +1265,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
|
|||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
std::string error_message =
|
std::string error_message =
|
||||||
"Unrecognized index type: " + std::to_string(rep_->index_type);
|
"Unrecognized index type: " + ToString(rep_->index_type);
|
||||||
return Status::InvalidArgument(error_message.c_str());
|
return Status::InvalidArgument(error_message.c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include "table/meta_blocks.h"
|
#include "table/meta_blocks.h"
|
||||||
#include "util/autovector.h"
|
#include "util/autovector.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
const std::string CuckooTablePropertyNames::kEmptyKey =
|
const std::string CuckooTablePropertyNames::kEmptyKey =
|
||||||
@ -88,7 +89,7 @@ void CuckooTableBuilder::Add(const Slice& key, const Slice& value) {
|
|||||||
}
|
}
|
||||||
if (ikey.type != kTypeDeletion && ikey.type != kTypeValue) {
|
if (ikey.type != kTypeDeletion && ikey.type != kTypeValue) {
|
||||||
status_ = Status::NotSupported("Unsupported key type " +
|
status_ = Status::NotSupported("Unsupported key type " +
|
||||||
std::to_string(ikey.type));
|
ToString(ikey.type));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,7 +387,7 @@ std::string GetFileName(uint64_t num) {
|
|||||||
FLAGS_file_dir = test::TmpDir();
|
FLAGS_file_dir = test::TmpDir();
|
||||||
}
|
}
|
||||||
return FLAGS_file_dir + "/cuckoo_read_benchmark" +
|
return FLAGS_file_dir + "/cuckoo_read_benchmark" +
|
||||||
std::to_string(num/1000000) + "Mkeys";
|
ToString(num/1000000) + "Mkeys";
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create last level file as we are interested in measuring performance of
|
// Create last level file as we are interested in measuring performance of
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include "util/murmurhash.h"
|
#include "util/murmurhash.h"
|
||||||
#include "util/perf_context_imp.h"
|
#include "util/perf_context_imp.h"
|
||||||
#include "util/stop_watch.h"
|
#include "util/stop_watch.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
@ -380,14 +381,14 @@ Status PlainTableReader::PopulateIndex(TableProperties* props,
|
|||||||
// Fill two table properties.
|
// Fill two table properties.
|
||||||
if (!index_in_file) {
|
if (!index_in_file) {
|
||||||
props->user_collected_properties["plain_table_hash_table_size"] =
|
props->user_collected_properties["plain_table_hash_table_size"] =
|
||||||
std::to_string(index_.GetIndexSize() * PlainTableIndex::kOffsetLen);
|
ToString(index_.GetIndexSize() * PlainTableIndex::kOffsetLen);
|
||||||
props->user_collected_properties["plain_table_sub_index_size"] =
|
props->user_collected_properties["plain_table_sub_index_size"] =
|
||||||
std::to_string(index_.GetSubIndexSize());
|
ToString(index_.GetSubIndexSize());
|
||||||
} else {
|
} else {
|
||||||
props->user_collected_properties["plain_table_hash_table_size"] =
|
props->user_collected_properties["plain_table_hash_table_size"] =
|
||||||
std::to_string(0);
|
ToString(0);
|
||||||
props->user_collected_properties["plain_table_sub_index_size"] =
|
props->user_collected_properties["plain_table_sub_index_size"] =
|
||||||
std::to_string(0);
|
ToString(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
#include "rocksdb/table_properties.h"
|
#include "rocksdb/table_properties.h"
|
||||||
#include "rocksdb/iterator.h"
|
#include "rocksdb/iterator.h"
|
||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
|
#include "port/port.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -31,7 +33,7 @@ namespace {
|
|||||||
const std::string& prop_delim,
|
const std::string& prop_delim,
|
||||||
const std::string& kv_delim) {
|
const std::string& kv_delim) {
|
||||||
AppendProperty(
|
AppendProperty(
|
||||||
props, key, std::to_string(value), prop_delim, kv_delim
|
props, key, ToString(value), prop_delim, kv_delim
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1039,9 +1039,9 @@ TEST(TablePropertyTest, PrefixScanTest) {
|
|||||||
pos->first.compare(0, prefix.size(), prefix) == 0;
|
pos->first.compare(0, prefix.size(), prefix) == 0;
|
||||||
++pos) {
|
++pos) {
|
||||||
++num;
|
++num;
|
||||||
auto key = prefix + "." + std::to_string(num);
|
auto key = prefix + "." + ToString(num);
|
||||||
ASSERT_EQ(key, pos->first);
|
ASSERT_EQ(key, pos->first);
|
||||||
ASSERT_EQ(std::to_string(num), pos->second);
|
ASSERT_EQ(ToString(num), pos->second);
|
||||||
}
|
}
|
||||||
ASSERT_EQ(3, num);
|
ASSERT_EQ(3, num);
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
#include "rocksdb/table.h"
|
#include "rocksdb/table.h"
|
||||||
#include "rocksdb/slice_transform.h"
|
#include "rocksdb/slice_transform.h"
|
||||||
#include "rocksdb/filter_policy.h"
|
#include "rocksdb/filter_policy.h"
|
||||||
|
#include "port/port.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -43,8 +45,8 @@ class SanityTest {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 1000000; ++i) {
|
for (int i = 0; i < 1000000; ++i) {
|
||||||
std::string k = "key" + std::to_string(i);
|
std::string k = "key" + ToString(i);
|
||||||
std::string v = "value" + std::to_string(i);
|
std::string v = "value" + ToString(i);
|
||||||
s = db->Put(WriteOptions(), Slice(k), Slice(v));
|
s = db->Put(WriteOptions(), Slice(k), Slice(v));
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
@ -61,8 +63,8 @@ class SanityTest {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 1000000; ++i) {
|
for (int i = 0; i < 1000000; ++i) {
|
||||||
std::string k = "key" + std::to_string(i);
|
std::string k = "key" + ToString(i);
|
||||||
std::string v = "value" + std::to_string(i);
|
std::string v = "value" + ToString(i);
|
||||||
std::string result;
|
std::string result;
|
||||||
s = db->Get(ReadOptions(), Slice(k), &result);
|
s = db->Get(ReadOptions(), Slice(k), &result);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
|
@ -52,6 +52,7 @@ int main() {
|
|||||||
#include "util/logging.h"
|
#include "util/logging.h"
|
||||||
#include "hdfs/env_hdfs.h"
|
#include "hdfs/env_hdfs.h"
|
||||||
#include "utilities/merge_operators.h"
|
#include "utilities/merge_operators.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
using GFLAGS::ParseCommandLineFlags;
|
using GFLAGS::ParseCommandLineFlags;
|
||||||
using GFLAGS::RegisterFlagValidator;
|
using GFLAGS::RegisterFlagValidator;
|
||||||
@ -801,23 +802,23 @@ class StressTest {
|
|||||||
options_table_ = {
|
options_table_ = {
|
||||||
{"write_buffer_size",
|
{"write_buffer_size",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_write_buffer_size),
|
ToString(FLAGS_write_buffer_size),
|
||||||
std::to_string(FLAGS_write_buffer_size * 2),
|
ToString(FLAGS_write_buffer_size * 2),
|
||||||
std::to_string(FLAGS_write_buffer_size * 4)
|
ToString(FLAGS_write_buffer_size * 4)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"max_write_buffer_number",
|
{"max_write_buffer_number",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_max_write_buffer_number),
|
ToString(FLAGS_max_write_buffer_number),
|
||||||
std::to_string(FLAGS_max_write_buffer_number * 2),
|
ToString(FLAGS_max_write_buffer_number * 2),
|
||||||
std::to_string(FLAGS_max_write_buffer_number * 4)
|
ToString(FLAGS_max_write_buffer_number * 4)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"arena_block_size",
|
{"arena_block_size",
|
||||||
{
|
{
|
||||||
std::to_string(Options().arena_block_size),
|
ToString(Options().arena_block_size),
|
||||||
std::to_string(FLAGS_write_buffer_size / 4),
|
ToString(FLAGS_write_buffer_size / 4),
|
||||||
std::to_string(FLAGS_write_buffer_size / 8),
|
ToString(FLAGS_write_buffer_size / 8),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"memtable_prefix_bloom_bits", {"0", "8", "10"}},
|
{"memtable_prefix_bloom_bits", {"0", "8", "10"}},
|
||||||
@ -825,7 +826,7 @@ class StressTest {
|
|||||||
{"memtable_prefix_bloom_huge_page_tlb_size",
|
{"memtable_prefix_bloom_huge_page_tlb_size",
|
||||||
{
|
{
|
||||||
"0",
|
"0",
|
||||||
std::to_string(2 * 1024 * 1024)
|
ToString(2 * 1024 * 1024)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"max_successive_merges", {"0", "2", "4"}},
|
{"max_successive_merges", {"0", "2", "4"}},
|
||||||
@ -837,70 +838,70 @@ class StressTest {
|
|||||||
{"hard_rate_limit", {"0", "1.1", "2.0"}},
|
{"hard_rate_limit", {"0", "1.1", "2.0"}},
|
||||||
{"level0_file_num_compaction_trigger",
|
{"level0_file_num_compaction_trigger",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_level0_file_num_compaction_trigger),
|
ToString(FLAGS_level0_file_num_compaction_trigger),
|
||||||
std::to_string(FLAGS_level0_file_num_compaction_trigger + 2),
|
ToString(FLAGS_level0_file_num_compaction_trigger + 2),
|
||||||
std::to_string(FLAGS_level0_file_num_compaction_trigger + 4),
|
ToString(FLAGS_level0_file_num_compaction_trigger + 4),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"level0_slowdown_writes_trigger",
|
{"level0_slowdown_writes_trigger",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_level0_slowdown_writes_trigger),
|
ToString(FLAGS_level0_slowdown_writes_trigger),
|
||||||
std::to_string(FLAGS_level0_slowdown_writes_trigger + 2),
|
ToString(FLAGS_level0_slowdown_writes_trigger + 2),
|
||||||
std::to_string(FLAGS_level0_slowdown_writes_trigger + 4),
|
ToString(FLAGS_level0_slowdown_writes_trigger + 4),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"level0_stop_writes_trigger",
|
{"level0_stop_writes_trigger",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_level0_stop_writes_trigger),
|
ToString(FLAGS_level0_stop_writes_trigger),
|
||||||
std::to_string(FLAGS_level0_stop_writes_trigger + 2),
|
ToString(FLAGS_level0_stop_writes_trigger + 2),
|
||||||
std::to_string(FLAGS_level0_stop_writes_trigger + 4),
|
ToString(FLAGS_level0_stop_writes_trigger + 4),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"max_grandparent_overlap_factor",
|
{"max_grandparent_overlap_factor",
|
||||||
{
|
{
|
||||||
std::to_string(Options().max_grandparent_overlap_factor - 5),
|
ToString(Options().max_grandparent_overlap_factor - 5),
|
||||||
std::to_string(Options().max_grandparent_overlap_factor),
|
ToString(Options().max_grandparent_overlap_factor),
|
||||||
std::to_string(Options().max_grandparent_overlap_factor + 5),
|
ToString(Options().max_grandparent_overlap_factor + 5),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"expanded_compaction_factor",
|
{"expanded_compaction_factor",
|
||||||
{
|
{
|
||||||
std::to_string(Options().expanded_compaction_factor - 5),
|
ToString(Options().expanded_compaction_factor - 5),
|
||||||
std::to_string(Options().expanded_compaction_factor),
|
ToString(Options().expanded_compaction_factor),
|
||||||
std::to_string(Options().expanded_compaction_factor + 5),
|
ToString(Options().expanded_compaction_factor + 5),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"source_compaction_factor",
|
{"source_compaction_factor",
|
||||||
{
|
{
|
||||||
std::to_string(Options().source_compaction_factor),
|
ToString(Options().source_compaction_factor),
|
||||||
std::to_string(Options().source_compaction_factor * 2),
|
ToString(Options().source_compaction_factor * 2),
|
||||||
std::to_string(Options().source_compaction_factor * 4),
|
ToString(Options().source_compaction_factor * 4),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"target_file_size_base",
|
{"target_file_size_base",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_target_file_size_base),
|
ToString(FLAGS_target_file_size_base),
|
||||||
std::to_string(FLAGS_target_file_size_base * 2),
|
ToString(FLAGS_target_file_size_base * 2),
|
||||||
std::to_string(FLAGS_target_file_size_base * 4),
|
ToString(FLAGS_target_file_size_base * 4),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"target_file_size_multiplier",
|
{"target_file_size_multiplier",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_target_file_size_multiplier),
|
ToString(FLAGS_target_file_size_multiplier),
|
||||||
"1",
|
"1",
|
||||||
"2",
|
"2",
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"max_bytes_for_level_base",
|
{"max_bytes_for_level_base",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_max_bytes_for_level_base / 2),
|
ToString(FLAGS_max_bytes_for_level_base / 2),
|
||||||
std::to_string(FLAGS_max_bytes_for_level_base),
|
ToString(FLAGS_max_bytes_for_level_base),
|
||||||
std::to_string(FLAGS_max_bytes_for_level_base * 2),
|
ToString(FLAGS_max_bytes_for_level_base * 2),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"max_bytes_for_level_multiplier",
|
{"max_bytes_for_level_multiplier",
|
||||||
{
|
{
|
||||||
std::to_string(FLAGS_max_bytes_for_level_multiplier),
|
ToString(FLAGS_max_bytes_for_level_multiplier),
|
||||||
"1",
|
"1",
|
||||||
"2",
|
"2",
|
||||||
}
|
}
|
||||||
@ -1377,7 +1378,7 @@ class StressTest {
|
|||||||
// drop column family and then create it again (can't drop default)
|
// drop column family and then create it again (can't drop default)
|
||||||
int cf = thread->rand.Next() % (FLAGS_column_families - 1) + 1;
|
int cf = thread->rand.Next() % (FLAGS_column_families - 1) + 1;
|
||||||
std::string new_name =
|
std::string new_name =
|
||||||
std::to_string(new_column_family_name_.fetch_add(1));
|
ToString(new_column_family_name_.fetch_add(1));
|
||||||
{
|
{
|
||||||
MutexLock l(thread->shared->GetMutex());
|
MutexLock l(thread->shared->GetMutex());
|
||||||
fprintf(
|
fprintf(
|
||||||
@ -1881,7 +1882,7 @@ class StressTest {
|
|||||||
cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
|
cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
|
||||||
}
|
}
|
||||||
while (cf_descriptors.size() < (size_t)FLAGS_column_families) {
|
while (cf_descriptors.size() < (size_t)FLAGS_column_families) {
|
||||||
std::string name = std::to_string(new_column_family_name_.load());
|
std::string name = ToString(new_column_family_name_.load());
|
||||||
new_column_family_name_++;
|
new_column_family_name_++;
|
||||||
cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
|
cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_));
|
||||||
column_family_names_.push_back(name);
|
column_family_names_.push_back(name);
|
||||||
|
@ -53,7 +53,7 @@ TEST(AutoVectorTest, EmplaceBack) {
|
|||||||
autovector<ValType, kSize> vec;
|
autovector<ValType, kSize> vec;
|
||||||
|
|
||||||
for (size_t i = 0; i < 1000 * kSize; ++i) {
|
for (size_t i = 0; i < 1000 * kSize; ++i) {
|
||||||
vec.emplace_back(i, std::to_string(i + 123));
|
vec.emplace_back(i, ToString(i + 123));
|
||||||
ASSERT_TRUE(!vec.empty());
|
ASSERT_TRUE(!vec.empty());
|
||||||
if (i < kSize) {
|
if (i < kSize) {
|
||||||
ASSERT_TRUE(vec.only_in_stack());
|
ASSERT_TRUE(vec.only_in_stack());
|
||||||
@ -63,7 +63,7 @@ TEST(AutoVectorTest, EmplaceBack) {
|
|||||||
|
|
||||||
ASSERT_EQ(i + 1, vec.size());
|
ASSERT_EQ(i + 1, vec.size());
|
||||||
ASSERT_EQ(i, vec[i].first);
|
ASSERT_EQ(i, vec[i].first);
|
||||||
ASSERT_EQ(std::to_string(i + 123), vec[i].second);
|
ASSERT_EQ(ToString(i + 123), vec[i].second);
|
||||||
}
|
}
|
||||||
|
|
||||||
vec.clear();
|
vec.clear();
|
||||||
@ -129,7 +129,7 @@ TEST(AutoVectorTest, CopyAndAssignment) {
|
|||||||
TEST(AutoVectorTest, Iterators) {
|
TEST(AutoVectorTest, Iterators) {
|
||||||
autovector<std::string, kSize> vec;
|
autovector<std::string, kSize> vec;
|
||||||
for (size_t i = 0; i < kSize * 1000; ++i) {
|
for (size_t i = 0; i < kSize * 1000; ++i) {
|
||||||
vec.push_back(std::to_string(i));
|
vec.push_back(ToString(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
// basic operator test
|
// basic operator test
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
#ifndef GFLAGS
|
#ifndef GFLAGS
|
||||||
bool FLAGS_benchmark = false;
|
bool FLAGS_benchmark = false;
|
||||||
@ -283,7 +284,7 @@ static const ScaleInfo kMetricSuffixes[] {
|
|||||||
static string HumanReadable(double n, unsigned int decimals,
|
static string HumanReadable(double n, unsigned int decimals,
|
||||||
const ScaleInfo* scales) {
|
const ScaleInfo* scales) {
|
||||||
if (std::isinf(n) || std::isnan(n)) {
|
if (std::isinf(n) || std::isnan(n)) {
|
||||||
return std::to_string(n);
|
return ToString(n);
|
||||||
}
|
}
|
||||||
|
|
||||||
const double absValue = fabs(n);
|
const double absValue = fabs(n);
|
||||||
|
@ -133,7 +133,7 @@ TEST(CacheTest, UsageTest) {
|
|||||||
|
|
||||||
// make sure the cache will be overloaded
|
// make sure the cache will be overloaded
|
||||||
for (uint64_t i = 1; i < kCapacity; ++i) {
|
for (uint64_t i = 1; i < kCapacity; ++i) {
|
||||||
auto key = std::to_string(i);
|
auto key = ToString(i);
|
||||||
cache->Release(
|
cache->Release(
|
||||||
cache->Insert(key, (void*)value, key.size() + 5, dumbDeleter)
|
cache->Insert(key, (void*)value, key.size() + 5, dumbDeleter)
|
||||||
);
|
);
|
||||||
@ -402,13 +402,13 @@ TEST(CacheTest, BadEviction) {
|
|||||||
|
|
||||||
// Insert n+1 entries, but not releasing.
|
// Insert n+1 entries, but not releasing.
|
||||||
for (int i = 0; i < n+1; i++) {
|
for (int i = 0; i < n+1; i++) {
|
||||||
std::string key = std::to_string(i+1);
|
std::string key = ToString(i+1);
|
||||||
handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
|
handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Guess what's in the cache now?
|
// Guess what's in the cache now?
|
||||||
for (int i = 0; i < n+1; i++) {
|
for (int i = 0; i < n+1; i++) {
|
||||||
std::string key = std::to_string(i+1);
|
std::string key = ToString(i+1);
|
||||||
auto h = cache->Lookup(key);
|
auto h = cache->Lookup(key);
|
||||||
std::cout << key << (h?" found\n":" not found\n");
|
std::cout << key << (h?" found\n":" not found\n");
|
||||||
// Only the first entry should be missing
|
// Only the first entry should be missing
|
||||||
|
@ -443,7 +443,7 @@ Status HdfsEnv::NewDirectory(const std::string& name,
|
|||||||
Log(InfoLogLevel::FATAL_LEVEL,
|
Log(InfoLogLevel::FATAL_LEVEL,
|
||||||
mylog, "NewDirectory hdfsExists call failed");
|
mylog, "NewDirectory hdfsExists call failed");
|
||||||
throw HdfsFatalException("hdfsExists call failed with error " +
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
||||||
std::to_string(value) + " on path " + name +
|
ToString(value) + " on path " + name +
|
||||||
".\n");
|
".\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -460,7 +460,7 @@ bool HdfsEnv::FileExists(const std::string& fname) {
|
|||||||
Log(InfoLogLevel::FATAL_LEVEL,
|
Log(InfoLogLevel::FATAL_LEVEL,
|
||||||
mylog, "FileExists hdfsExists call failed");
|
mylog, "FileExists hdfsExists call failed");
|
||||||
throw HdfsFatalException("hdfsExists call failed with error " +
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
||||||
std::to_string(value) + " on path " + fname +
|
ToString(value) + " on path " + fname +
|
||||||
".\n");
|
".\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -499,7 +499,7 @@ Status HdfsEnv::GetChildren(const std::string& path,
|
|||||||
Log(InfoLogLevel::FATAL_LEVEL, mylog,
|
Log(InfoLogLevel::FATAL_LEVEL, mylog,
|
||||||
"GetChildren hdfsExists call failed");
|
"GetChildren hdfsExists call failed");
|
||||||
throw HdfsFatalException("hdfsExists call failed with error " +
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
||||||
std::to_string(value) + ".\n");
|
ToString(value) + ".\n");
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
@ -530,7 +530,7 @@ Status HdfsEnv::CreateDirIfMissing(const std::string& name) {
|
|||||||
Log(InfoLogLevel::FATAL_LEVEL, mylog,
|
Log(InfoLogLevel::FATAL_LEVEL, mylog,
|
||||||
"CreateDirIfMissing hdfsExists call failed");
|
"CreateDirIfMissing hdfsExists call failed");
|
||||||
throw HdfsFatalException("hdfsExists call failed with error " +
|
throw HdfsFatalException("hdfsExists call failed with error " +
|
||||||
std::to_string(value) + ".\n");
|
ToString(value) + ".\n");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -603,7 +603,8 @@ TEST(EnvPosixTest, RandomAccessUniqueIDConcurrent) {
|
|||||||
// Create the files
|
// Create the files
|
||||||
std::vector<std::string> fnames;
|
std::vector<std::string> fnames;
|
||||||
for (int i = 0; i < 1000; ++i) {
|
for (int i = 0; i < 1000; ++i) {
|
||||||
fnames.push_back(GetOnDiskTestDir() + "/" + "testfile" + std::to_string(i));
|
fnames.push_back(
|
||||||
|
GetOnDiskTestDir() + "/" + "testfile" + ToString(i));
|
||||||
|
|
||||||
// Create file.
|
// Create file.
|
||||||
unique_ptr<WritableFile> wfile;
|
unique_ptr<WritableFile> wfile;
|
||||||
|
@ -1293,15 +1293,15 @@ void ChangeCompactionStyleCommand::DoCommand() {
|
|||||||
// level 0 should have only 1 file
|
// level 0 should have only 1 file
|
||||||
if (i == 0 && num_files != 1) {
|
if (i == 0 && num_files != 1) {
|
||||||
exec_state_ = LDBCommandExecuteResult::FAILED("Number of db files at "
|
exec_state_ = LDBCommandExecuteResult::FAILED("Number of db files at "
|
||||||
"level 0 after compaction is " + std::to_string(num_files) +
|
"level 0 after compaction is " + ToString(num_files) +
|
||||||
", not 1.\n");
|
", not 1.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// other levels should have no file
|
// other levels should have no file
|
||||||
if (i > 0 && num_files != 0) {
|
if (i > 0 && num_files != 0) {
|
||||||
exec_state_ = LDBCommandExecuteResult::FAILED("Number of db files at "
|
exec_state_ = LDBCommandExecuteResult::FAILED("Number of db files at "
|
||||||
"level " + std::to_string(i) + " after compaction is " +
|
"level " + ToString(i) + " after compaction is " +
|
||||||
std::to_string(num_files) + ", not 0.\n");
|
ToString(num_files) + ", not 0.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
#include "rocksdb/slice_transform.h"
|
#include "rocksdb/slice_transform.h"
|
||||||
#include "rocksdb/slice.h"
|
#include "rocksdb/slice.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
@ -22,7 +23,7 @@ class FixedPrefixTransform : public SliceTransform {
|
|||||||
public:
|
public:
|
||||||
explicit FixedPrefixTransform(size_t prefix_len)
|
explicit FixedPrefixTransform(size_t prefix_len)
|
||||||
: prefix_len_(prefix_len),
|
: prefix_len_(prefix_len),
|
||||||
name_("rocksdb.FixedPrefix." + std::to_string(prefix_len_)) {}
|
name_("rocksdb.FixedPrefix." + ToString(prefix_len_)) {}
|
||||||
|
|
||||||
virtual const char* Name() const { return name_.c_str(); }
|
virtual const char* Name() const { return name_.c_str(); }
|
||||||
|
|
||||||
|
@ -12,4 +12,17 @@ namespace rocksdb {
|
|||||||
|
|
||||||
extern std::vector<std::string> StringSplit(const std::string& arg, char delim);
|
extern std::vector<std::string> StringSplit(const std::string& arg, char delim);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
inline std::string ToString(T value) {
|
||||||
|
#ifndef OS_ANDROID
|
||||||
|
return std::to_string(value);
|
||||||
|
#else
|
||||||
|
// Andorid doesn't support all of C++11, std::to_string() being
|
||||||
|
// one of the not supported features.
|
||||||
|
std::ostringstream os;
|
||||||
|
os << value;
|
||||||
|
return os.str();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "rocksdb/env.h"
|
#include "rocksdb/env.h"
|
||||||
#include "rocksdb/slice.h"
|
#include "rocksdb/slice.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
|
#include "util/string_util.h"
|
||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
namespace test {
|
namespace test {
|
||||||
|
Loading…
Reference in New Issue
Block a user