From 736a7b5433025a84efc01836c145ee5afa5cffb1 Mon Sep 17 00:00:00 2001 From: sdong Date: Fri, 6 May 2022 13:03:58 -0700 Subject: [PATCH] Remove own ToString() (#9955) Summary: ToString() is created as some platform doesn't support std::to_string(). However, we've already used std::to_string() by mistake for 16 months (in db/db_info_dumper.cc). This commit just remove ToString(). Pull Request resolved: https://github.com/facebook/rocksdb/pull/9955 Test Plan: Watch CI tests Reviewed By: riversand963 Differential Revision: D36176799 fbshipit-source-id: bdb6dcd0e3a3ab96a1ac810f5d0188f684064471 --- cache/cache_test.cc | 28 +++---- db/arena_wrapped_db_iter.cc | 2 +- db/blob/blob_index.h | 6 +- db/blob/db_blob_index_test.cc | 14 ++-- db/column_family_test.cc | 45 +++++----- db/compact_files_test.cc | 22 ++--- db/compaction/compaction_iterator_test.cc | 2 +- db/compaction/compaction_job.cc | 6 +- db/compaction/compaction_job_stats_test.cc | 6 +- db/compaction/compaction_job_test.cc | 4 +- db/compaction/compaction_picker.cc | 10 +-- db/compaction/compaction_picker_test.cc | 38 ++++----- db/compaction/compaction_service_test.cc | 57 +++++++------ db/comparator_db_test.cc | 2 +- db/cuckoo_table_db_test.cc | 4 +- db/db_basic_test.cc | 4 +- db/db_block_cache_test.cc | 46 +++++------ db/db_bloom_filter_test.cc | 10 +-- db/db_compaction_filter_test.cc | 6 +- db/db_compaction_test.cc | 24 +++--- db/db_impl/db_impl.cc | 9 +- db/db_impl/db_impl_open.cc | 4 +- db/db_iter_stress_test.cc | 15 ++-- db/db_iter_test.cc | 16 ++-- db/db_iterator_test.cc | 4 +- db/db_log_iter_test.cc | 12 +-- db/db_memtable_test.cc | 2 +- db/db_options_test.cc | 24 +++--- db/db_properties_test.cc | 31 +++---- db/db_range_del_test.cc | 7 +- db/db_secondary_test.cc | 12 +-- db/db_sst_test.cc | 8 +- db/db_table_properties_test.cc | 11 +-- db/db_test.cc | 82 ++++++++++--------- db/db_test2.cc | 24 +++--- db/db_test_util.cc | 16 ++-- db/db_universal_compaction_test.cc | 2 +- db/db_wal_test.cc | 12 +-- db/db_with_timestamp_basic_test.cc | 8 +- db/db_write_test.cc | 12 +-- db/deletefile_test.cc | 2 +- db/error_handler_fs_test.cc | 16 ++-- db/external_sst_file_basic_test.cc | 24 +++--- db/external_sst_file_test.cc | 12 +-- db/flush_job.cc | 4 +- db/flush_job_test.cc | 16 ++-- db/forward_iterator.cc | 2 +- db/internal_stats.cc | 24 +++--- db/listener_test.cc | 7 +- db/memtable_list_test.cc | 24 +++--- db/obsolete_files_test.cc | 2 +- db/perf_context_test.cc | 36 ++++---- db/plain_table_db_test.cc | 12 +-- db/prefix_test.cc | 2 +- db/repair.cc | 2 +- db/repair_test.cc | 10 +-- db/version_builder_test.cc | 8 +- db/version_edit.cc | 4 +- db/version_set.cc | 2 +- db/wal_manager.cc | 5 +- db/wal_manager_test.cc | 2 +- db/write_batch_test.cc | 12 +-- db_stress_tool/db_stress_test_base.cc | 73 +++++++++-------- db_stress_tool/expected_state.cc | 24 +++--- db_stress_tool/no_batched_ops_stress.cc | 5 +- env/env_test.cc | 2 +- env/fs_posix.cc | 14 ++-- env/io_posix.cc | 76 ++++++++--------- file/delete_scheduler_test.cc | 18 ++-- java/rocksjni/write_batch_test.cc | 2 +- memory/jemalloc_nodump_allocator.cc | 14 ++-- memtable/skiplistrep.cc | 2 +- microbench/db_basic_bench.cc | 4 +- options/configurable_test.cc | 2 +- options/options_helper.cc | 18 ++-- options/options_parser.cc | 20 ++--- options/options_test.cc | 18 ++-- table/block_based/block_based_filter_block.cc | 7 +- .../block_based/block_based_table_factory.cc | 3 +- table/block_based/block_based_table_reader.cc | 15 ++-- table/block_based/filter_policy.cc | 4 +- table/block_based/index_builder.h | 4 +- table/block_based/reader_common.cc | 8 +- table/block_fetcher.cc | 10 +-- table/cuckoo/cuckoo_table_builder.cc | 2 +- table/cuckoo/cuckoo_table_reader_test.cc | 2 +- table/format.cc | 34 ++++---- table/plain/plain_table_reader.cc | 8 +- table/sst_file_writer_collectors.h | 2 +- table/table_properties.cc | 6 +- table/table_test.cc | 6 +- test_util/testutil.cc | 2 +- test_util/transaction_test_util.cc | 4 +- tools/db_bench_tool.cc | 16 ++-- tools/db_sanity_test.cc | 8 +- tools/ldb_cmd.cc | 15 ++-- tools/ldb_cmd_test.cc | 17 ++-- tools/reduce_levels_test.cc | 4 +- util/autovector_test.cc | 8 +- util/bloom_test.cc | 2 +- util/build_version.cc.in | 11 ++- util/compression.h | 14 ++-- util/filelock_test.cc | 4 +- util/ribbon_test.cc | 6 +- util/slice.cc | 17 ++-- util/string_util.cc | 2 +- util/string_util.h | 13 --- utilities/backup/backup_engine.cc | 37 +++++---- utilities/backup/backup_engine_test.cc | 20 ++--- utilities/blob_db/blob_db_test.cc | 74 +++++++++-------- utilities/blob_db/blob_dump_tool.cc | 4 +- utilities/memory/memory_test.cc | 2 +- utilities/options/options_util_test.cc | 2 +- utilities/simulator_cache/sim_cache_test.cc | 10 +-- .../compact_on_deletion_collector.cc | 6 +- utilities/transactions/transaction_test.cc | 32 ++++---- utilities/transactions/transaction_util.cc | 4 +- .../write_prepared_transaction_test.cc | 22 ++--- .../transactions/write_prepared_txn_db.cc | 7 +- .../transactions/write_prepared_txn_db.h | 4 +- .../write_unprepared_transaction_test.cc | 35 ++++---- .../transactions/write_unprepared_txn.cc | 4 +- .../write_batch_with_index.cc | 5 +- .../write_batch_with_index_internal.cc | 4 +- .../write_batch_with_index_test.cc | 12 +-- 125 files changed, 854 insertions(+), 851 deletions(-) diff --git a/cache/cache_test.cc b/cache/cache_test.cc index d7efb6652..735f77744 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -193,7 +193,7 @@ TEST_P(CacheTest, UsageTest) { // make sure the cache will be overloaded for (uint64_t i = 1; i < kCapacity; ++i) { - auto key = ToString(i); + auto key = std::to_string(i); ASSERT_OK(cache->Insert(key, reinterpret_cast(value), key.size() + 5, dumbDeleter)); ASSERT_OK(precise_cache->Insert(key, reinterpret_cast(value), @@ -265,7 +265,7 @@ TEST_P(CacheTest, PinnedUsageTest) { // check that overloading the cache does not change the pinned usage for (uint64_t i = 1; i < 2 * kCapacity; ++i) { - auto key = ToString(i); + auto key = std::to_string(i); ASSERT_OK(cache->Insert(key, reinterpret_cast(value), key.size() + 5, dumbDeleter)); ASSERT_OK(precise_cache->Insert(key, reinterpret_cast(value), @@ -585,7 +585,7 @@ TEST_P(CacheTest, SetCapacity) { std::vector handles(10); // Insert 5 entries, but not releasing. for (size_t i = 0; i < 5; i++) { - std::string key = ToString(i+1); + std::string key = std::to_string(i + 1); Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]); ASSERT_TRUE(s.ok()); } @@ -600,7 +600,7 @@ TEST_P(CacheTest, SetCapacity) { // then decrease capacity to 7, final capacity should be 7 // and usage should be 7 for (size_t i = 5; i < 10; i++) { - std::string key = ToString(i+1); + std::string key = std::to_string(i + 1); Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]); ASSERT_TRUE(s.ok()); } @@ -631,7 +631,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) { std::vector handles(10); Status s; for (size_t i = 0; i < 10; i++) { - std::string key = ToString(i + 1); + std::string key = std::to_string(i + 1); s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]); ASSERT_OK(s); ASSERT_NE(nullptr, handles[i]); @@ -655,7 +655,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) { // test3: init with flag being true. std::shared_ptr cache2 = NewCache(5, 0, true); for (size_t i = 0; i < 5; i++) { - std::string key = ToString(i + 1); + std::string key = std::to_string(i + 1); s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]); ASSERT_OK(s); ASSERT_NE(nullptr, handles[i]); @@ -685,14 +685,14 @@ TEST_P(CacheTest, OverCapacity) { // Insert n+1 entries, but not releasing. for (size_t i = 0; i < n + 1; i++) { - std::string key = ToString(i+1); + std::string key = std::to_string(i + 1); Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]); ASSERT_TRUE(s.ok()); } // Guess what's in the cache now? for (size_t i = 0; i < n + 1; i++) { - std::string key = ToString(i+1); + std::string key = std::to_string(i + 1); auto h = cache->Lookup(key); ASSERT_TRUE(h != nullptr); if (h) cache->Release(h); @@ -713,7 +713,7 @@ TEST_P(CacheTest, OverCapacity) { // This is consistent with the LRU policy since the element 0 // was released first for (size_t i = 0; i < n + 1; i++) { - std::string key = ToString(i+1); + std::string key = std::to_string(i + 1); auto h = cache->Lookup(key); if (h) { ASSERT_NE(i, 0U); @@ -754,9 +754,9 @@ TEST_P(CacheTest, ApplyToAllEntriesTest) { std::vector callback_state; const auto callback = [&](const Slice& key, void* value, size_t charge, Cache::DeleterFn deleter) { - callback_state.push_back(ToString(DecodeKey(key)) + "," + - ToString(DecodeValue(value)) + "," + - ToString(charge)); + callback_state.push_back(std::to_string(DecodeKey(key)) + "," + + std::to_string(DecodeValue(value)) + "," + + std::to_string(charge)); assert(deleter == &CacheTest::Deleter); }; @@ -765,8 +765,8 @@ TEST_P(CacheTest, ApplyToAllEntriesTest) { for (int i = 0; i < 10; ++i) { Insert(i, i * 2, i + 1); - inserted.push_back(ToString(i) + "," + ToString(i * 2) + "," + - ToString(i + 1)); + inserted.push_back(std::to_string(i) + "," + std::to_string(i * 2) + "," + + std::to_string(i + 1)); } cache_->ApplyToAllEntries(callback, /*opts*/ {}); diff --git a/db/arena_wrapped_db_iter.cc b/db/arena_wrapped_db_iter.cc index 20d4655be..bbb2b7493 100644 --- a/db/arena_wrapped_db_iter.cc +++ b/db/arena_wrapped_db_iter.cc @@ -23,7 +23,7 @@ Status ArenaWrappedDBIter::GetProperty(std::string prop_name, if (prop_name == "rocksdb.iterator.super-version-number") { // First try to pass the value returned from inner iterator. if (!db_iter_->GetProperty(prop_name, prop).ok()) { - *prop = ToString(sv_number_); + *prop = std::to_string(sv_number_); } return Status::OK(); } diff --git a/db/blob/blob_index.h b/db/blob/blob_index.h index 5bac36627..67535472c 100644 --- a/db/blob/blob_index.h +++ b/db/blob/blob_index.h @@ -96,9 +96,9 @@ class BlobIndex { assert(slice.size() > 0); type_ = static_cast(*slice.data()); if (type_ >= Type::kUnknown) { - return Status::Corruption( - kErrorMessage, - "Unknown blob index type: " + ToString(static_cast(type_))); + return Status::Corruption(kErrorMessage, + "Unknown blob index type: " + + std::to_string(static_cast(type_))); } slice = Slice(slice.data() + 1, slice.size() - 1); if (HasTTL()) { diff --git a/db/blob/db_blob_index_test.cc b/db/blob/db_blob_index_test.cc index d93aa6bbc..64c550894 100644 --- a/db/blob/db_blob_index_test.cc +++ b/db/blob/db_blob_index_test.cc @@ -153,11 +153,11 @@ TEST_F(DBBlobIndexTest, Write) { key_values.reserve(num_key_values); for (size_t i = 1; i <= num_key_values; ++i) { - std::string key = "key" + ToString(i); + std::string key = "key" + std::to_string(i); std::string blob_index; BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 9876543210, - "blob" + ToString(i)); + "blob" + std::to_string(i)); key_values.emplace_back(std::move(key), std::move(blob_index)); } @@ -230,7 +230,7 @@ TEST_F(DBBlobIndexTest, Updated) { DestroyAndReopen(GetTestOptions()); WriteBatch batch; for (int i = 0; i < 10; i++) { - ASSERT_OK(PutBlobIndex(&batch, "key" + ToString(i), blob_index)); + ASSERT_OK(PutBlobIndex(&batch, "key" + std::to_string(i), blob_index)); } ASSERT_OK(Write(&batch)); // Avoid blob values from being purged. @@ -248,7 +248,7 @@ TEST_F(DBBlobIndexTest, Updated) { ASSERT_OK(dbfull()->DeleteRange(WriteOptions(), cfh(), "key6", "key9")); MoveDataTo(tier); for (int i = 0; i < 10; i++) { - ASSERT_EQ(blob_index, GetBlobIndex("key" + ToString(i), snapshot)); + ASSERT_EQ(blob_index, GetBlobIndex("key" + std::to_string(i), snapshot)); } ASSERT_EQ("new_value", Get("key1")); if (tier <= kImmutableMemtables) { @@ -260,7 +260,7 @@ TEST_F(DBBlobIndexTest, Updated) { ASSERT_EQ("NOT_FOUND", Get("key4")); ASSERT_EQ("a,b,c", GetImpl("key5")); for (int i = 6; i < 9; i++) { - ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i))); + ASSERT_EQ("NOT_FOUND", Get("key" + std::to_string(i))); } ASSERT_EQ(blob_index, GetBlobIndex("key9")); dbfull()->ReleaseSnapshot(snapshot); @@ -301,7 +301,7 @@ TEST_F(DBBlobIndexTest, Iterate) { }; auto get_value = [&](int index, int version) { - return get_key(index) + "_value" + ToString(version); + return get_key(index) + "_value" + std::to_string(version); }; auto check_iterator = [&](Iterator* iterator, Status::Code expected_status, @@ -501,7 +501,7 @@ TEST_F(DBBlobIndexTest, IntegratedBlobIterate) { auto get_key = [](size_t index) { return ("key" + std::to_string(index)); }; auto get_value = [&](size_t index, size_t version) { - return get_key(index) + "_value" + ToString(version); + return get_key(index) + "_value" + std::to_string(version); }; auto check_iterator = [&](Iterator* iterator, Status expected_status, diff --git a/db/column_family_test.cc b/db/column_family_test.cc index c55eb1290..3a1a3a508 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -383,7 +383,7 @@ class ColumnFamilyTestBase : public testing::Test { int NumTableFilesAtLevel(int level, int cf) { return GetProperty(cf, - "rocksdb.num-files-at-level" + ToString(level)); + "rocksdb.num-files-at-level" + std::to_string(level)); } #ifndef ROCKSDB_LITE @@ -783,7 +783,7 @@ TEST_P(ColumnFamilyTest, BulkAddDrop) { std::vector cf_names; std::vector cf_handles; for (int i = 1; i <= kNumCF; i++) { - cf_names.push_back("cf1-" + ToString(i)); + cf_names.push_back("cf1-" + std::to_string(i)); } ASSERT_OK(db_->CreateColumnFamilies(cf_options, cf_names, &cf_handles)); for (int i = 1; i <= kNumCF; i++) { @@ -796,7 +796,8 @@ TEST_P(ColumnFamilyTest, BulkAddDrop) { } cf_handles.clear(); for (int i = 1; i <= kNumCF; i++) { - cf_descriptors.emplace_back("cf2-" + ToString(i), ColumnFamilyOptions()); + cf_descriptors.emplace_back("cf2-" + std::to_string(i), + ColumnFamilyOptions()); } ASSERT_OK(db_->CreateColumnFamilies(cf_descriptors, &cf_handles)); for (int i = 1; i <= kNumCF; i++) { @@ -820,7 +821,7 @@ TEST_P(ColumnFamilyTest, DropTest) { Open({"default"}); CreateColumnFamiliesAndReopen({"pikachu"}); for (int i = 0; i < 100; ++i) { - ASSERT_OK(Put(1, ToString(i), "bar" + ToString(i))); + ASSERT_OK(Put(1, std::to_string(i), "bar" + std::to_string(i))); } ASSERT_OK(Flush(1)); @@ -1344,7 +1345,7 @@ TEST_P(ColumnFamilyTest, DifferentCompactionStyles) { PutRandomData(1, 10, 12000); PutRandomData(1, 1, 10); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } // SETUP column family "two" -- level style with 4 levels @@ -1352,7 +1353,7 @@ TEST_P(ColumnFamilyTest, DifferentCompactionStyles) { PutRandomData(2, 10, 12000); PutRandomData(2, 1, 10); WaitForFlush(2); - AssertFilesPerLevel(ToString(i + 1), 2); + AssertFilesPerLevel(std::to_string(i + 1), 2); } // TRIGGER compaction "one" @@ -1416,7 +1417,7 @@ TEST_P(ColumnFamilyTest, MultipleManualCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } bool cf_1_1 = true; ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency( @@ -1446,7 +1447,7 @@ TEST_P(ColumnFamilyTest, MultipleManualCompactions) { PutRandomData(2, 10, 12000); PutRandomData(2, 1, 10); WaitForFlush(2); - AssertFilesPerLevel(ToString(i + 1), 2); + AssertFilesPerLevel(std::to_string(i + 1), 2); } threads.emplace_back([&] { TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:1"); @@ -1533,7 +1534,7 @@ TEST_P(ColumnFamilyTest, AutomaticAndManualCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:1"); @@ -1543,7 +1544,7 @@ TEST_P(ColumnFamilyTest, AutomaticAndManualCompactions) { PutRandomData(2, 10, 12000); PutRandomData(2, 1, 10); WaitForFlush(2); - AssertFilesPerLevel(ToString(i + 1), 2); + AssertFilesPerLevel(std::to_string(i + 1), 2); } ROCKSDB_NAMESPACE::port::Thread threads([&] { CompactRangeOptions compact_options; @@ -1615,7 +1616,7 @@ TEST_P(ColumnFamilyTest, ManualAndAutomaticCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } bool cf_1_1 = true; bool cf_1_2 = true; @@ -1650,7 +1651,7 @@ TEST_P(ColumnFamilyTest, ManualAndAutomaticCompactions) { PutRandomData(2, 10, 12000); PutRandomData(2, 1, 10); WaitForFlush(2); - AssertFilesPerLevel(ToString(i + 1), 2); + AssertFilesPerLevel(std::to_string(i + 1), 2); } TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5"); threads.join(); @@ -1709,7 +1710,7 @@ TEST_P(ColumnFamilyTest, SameCFManualManualCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } bool cf_1_1 = true; bool cf_1_2 = true; @@ -1748,8 +1749,8 @@ TEST_P(ColumnFamilyTest, SameCFManualManualCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i), - 1); + AssertFilesPerLevel( + std::to_string(one.level0_file_num_compaction_trigger + i), 1); } ROCKSDB_NAMESPACE::port::Thread threads1([&] { @@ -1811,7 +1812,7 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } bool cf_1_1 = true; bool cf_1_2 = true; @@ -1849,8 +1850,8 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i), - 1); + AssertFilesPerLevel( + std::to_string(one.level0_file_num_compaction_trigger + i), 1); } TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1"); @@ -1904,7 +1905,7 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } bool cf_1_1 = true; bool cf_1_2 = true; @@ -1942,8 +1943,8 @@ TEST_P(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i), - 1); + AssertFilesPerLevel( + std::to_string(one.level0_file_num_compaction_trigger + i), 1); } TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1"); @@ -2024,7 +2025,7 @@ TEST_P(ColumnFamilyTest, SameCFAutomaticManualCompactions) { PutRandomData(1, 10, 12000, true); PutRandomData(1, 1, 10, true); WaitForFlush(1); - AssertFilesPerLevel(ToString(i + 1), 1); + AssertFilesPerLevel(std::to_string(i + 1), 1); } TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:5"); diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 29e3494ea..df716f639 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -91,8 +91,8 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) { // create couple files // Background compaction starts and waits in BackgroundCallCompaction:0 for (int i = 0; i < kLevel0Trigger * 4; ++i) { - ASSERT_OK(db->Put(WriteOptions(), ToString(i), "")); - ASSERT_OK(db->Put(WriteOptions(), ToString(100 - i), "")); + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), "")); + ASSERT_OK(db->Put(WriteOptions(), std::to_string(100 - i), "")); ASSERT_OK(db->Flush(FlushOptions())); } @@ -136,7 +136,7 @@ TEST_F(CompactFilesTest, MultipleLevel) { // create couple files in L0, L3, L4 and L5 for (int i = 5; i > 2; --i) { collector->ClearFlushedFiles(); - ASSERT_OK(db->Put(WriteOptions(), ToString(i), "")); + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), "")); ASSERT_OK(db->Flush(FlushOptions())); // Ensure background work is fully finished including listener callbacks // before accessing listener state. @@ -145,11 +145,11 @@ TEST_F(CompactFilesTest, MultipleLevel) { ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, i)); std::string prop; - ASSERT_TRUE( - db->GetProperty("rocksdb.num-files-at-level" + ToString(i), &prop)); + ASSERT_TRUE(db->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(i), &prop)); ASSERT_EQ("1", prop); } - ASSERT_OK(db->Put(WriteOptions(), ToString(0), "")); + ASSERT_OK(db->Put(WriteOptions(), std::to_string(0), "")); ASSERT_OK(db->Flush(FlushOptions())); ColumnFamilyMetaData meta; @@ -218,7 +218,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) { // create couple files for (int i = 1000; i < 2000; ++i) { - ASSERT_OK(db->Put(WriteOptions(), ToString(i), + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), std::string(kWriteBufferSize / 10, 'a' + (i % 26)))); } @@ -257,14 +257,14 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) { // create couple files for (int i = 0; i < 500; ++i) { - ASSERT_OK(db->Put(WriteOptions(), ToString(i), + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), std::string(1000, 'a' + (i % 26)))); } ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); auto l0_files_1 = collector->GetFlushedFiles(); collector->ClearFlushedFiles(); for (int i = 0; i < 500; ++i) { - ASSERT_OK(db->Put(WriteOptions(), ToString(i), + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), std::string(1000, 'a' + (i % 26)))); } ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); @@ -295,7 +295,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { // Create 5 files. for (int i = 0; i < 5; ++i) { - ASSERT_OK(db->Put(WriteOptions(), "key" + ToString(i), "value")); + ASSERT_OK(db->Put(WriteOptions(), "key" + std::to_string(i), "value")); ASSERT_OK(db->Flush(FlushOptions())); } @@ -465,7 +465,7 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) { // create couple files for (int i = 0; i < 500; ++i) { - ASSERT_OK(db->Put(WriteOptions(), ToString(i), + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), std::string(1000, 'a' + (i % 26)))); } ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); diff --git a/db/compaction/compaction_iterator_test.cc b/db/compaction/compaction_iterator_test.cc index 47289db65..d9d7ed121 100644 --- a/db/compaction/compaction_iterator_test.cc +++ b/db/compaction/compaction_iterator_test.cc @@ -313,7 +313,7 @@ class CompactionIteratorTest : public testing::TestWithParam { key_not_exists_beyond_output_level, full_history_ts_low); c_iter_->SeekToFirst(); for (size_t i = 0; i < expected_keys.size(); i++) { - std::string info = "i = " + ToString(i); + std::string info = "i = " + std::to_string(i); ASSERT_TRUE(c_iter_->Valid()) << info; ASSERT_OK(c_iter_->status()) << info; ASSERT_EQ(expected_keys[i], c_iter_->key().ToString()) << info; diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index e83914647..d68d97960 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -2459,7 +2459,7 @@ void CompactionJob::LogCompaction() { << "compaction_reason" << GetCompactionReasonString(compaction->compaction_reason()); for (size_t i = 0; i < compaction->num_input_levels(); ++i) { - stream << ("files_L" + ToString(compaction->level(i))); + stream << ("files_L" + std::to_string(compaction->level(i))); stream.StartArray(); for (auto f : *compaction->inputs(i)) { stream << f->fd.GetNumber(); @@ -3009,7 +3009,7 @@ Status CompactionServiceInput::Read(const std::string& data_str, } else { return Status::NotSupported( "Compaction Service Input data version not supported: " + - ToString(format_version)); + std::to_string(format_version)); } } @@ -3038,7 +3038,7 @@ Status CompactionServiceResult::Read(const std::string& data_str, } else { return Status::NotSupported( "Compaction Service Result data version not supported: " + - ToString(format_version)); + std::to_string(format_version)); } } diff --git a/db/compaction/compaction_job_stats_test.cc b/db/compaction/compaction_job_stats_test.cc index 4aba6897c..b25191f22 100644 --- a/db/compaction/compaction_job_stats_test.cc +++ b/db/compaction/compaction_job_stats_test.cc @@ -268,10 +268,10 @@ class CompactionJobStatsTest : public testing::Test, if (cf == 0) { // default cfd EXPECT_TRUE(db_->GetProperty( - "rocksdb.num-files-at-level" + ToString(level), &property)); + "rocksdb.num-files-at-level" + std::to_string(level), &property)); } else { EXPECT_TRUE(db_->GetProperty( - handles_[cf], "rocksdb.num-files-at-level" + ToString(level), + handles_[cf], "rocksdb.num-files-at-level" + std::to_string(level), &property)); } return atoi(property.c_str()); @@ -672,7 +672,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { snprintf(buf, kBufSize, "%d", ++num_L0_files); ASSERT_EQ(std::string(buf), FilesPerLevel(1)); } - ASSERT_EQ(ToString(num_L0_files), FilesPerLevel(1)); + ASSERT_EQ(std::to_string(num_L0_files), FilesPerLevel(1)); // 2nd Phase: perform L0 -> L1 compaction. int L0_compaction_count = 6; diff --git a/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc index 2032af44a..72b9bd273 100644 --- a/db/compaction/compaction_job_test.cc +++ b/db/compaction/compaction_job_test.cc @@ -236,8 +236,8 @@ class CompactionJobTestBase : public testing::Test { for (int i = 0; i < 2; ++i) { auto contents = mock::MakeMockFile(); for (int k = 0; k < kKeysPerFile; ++k) { - auto key = ToString(i * kMatchingKeys + k); - auto value = ToString(i * kKeysPerFile + k); + auto key = std::to_string(i * kMatchingKeys + k); + auto value = std::to_string(i * kKeysPerFile + k); InternalKey internal_key(key, ++sequence_number, kTypeValue); // This is how the key will look like once it's written in bottommost diff --git a/db/compaction/compaction_picker.cc b/db/compaction/compaction_picker.cc index e7e7e125b..1f29004ae 100644 --- a/db/compaction/compaction_picker.cc +++ b/db/compaction/compaction_picker.cc @@ -401,7 +401,7 @@ Status CompactionPicker::GetCompactionInputsFromFileNumbers( "Cannot find matched SST files for the following file numbers:"); for (auto fn : *input_set) { message += " "; - message += ToString(fn); + message += std::to_string(fn); } return Status::InvalidArgument(message); } @@ -1004,14 +1004,14 @@ Status CompactionPicker::SanitizeCompactionInputFiles( return Status::InvalidArgument( "Output level for column family " + cf_meta.name + " must between [0, " + - ToString(cf_meta.levels[cf_meta.levels.size() - 1].level) + "]."); + std::to_string(cf_meta.levels[cf_meta.levels.size() - 1].level) + "]."); } if (output_level > MaxOutputLevel()) { return Status::InvalidArgument( "Exceed the maximum output level defined by " "the current compaction algorithm --- " + - ToString(MaxOutputLevel())); + std::to_string(MaxOutputLevel())); } if (output_level < 0) { @@ -1061,8 +1061,8 @@ Status CompactionPicker::SanitizeCompactionInputFiles( return Status::InvalidArgument( "Cannot compact file to up level, input file: " + MakeTableFileName("", file_num) + " level " + - ToString(input_file_level) + " > output level " + - ToString(output_level)); + std::to_string(input_file_level) + " > output level " + + std::to_string(output_level)); } } diff --git a/db/compaction/compaction_picker_test.cc b/db/compaction/compaction_picker_test.cc index 03eccca8e..02f704088 100644 --- a/db/compaction/compaction_picker_test.cc +++ b/db/compaction/compaction_picker_test.cc @@ -273,9 +273,9 @@ TEST_F(CompactionPickerTest, NeedsCompactionLevel) { // start a brand new version in each test. NewVersionStorage(kLevels, kCompactionStyleLevel); for (int i = 0; i < file_count; ++i) { - Add(level, i, ToString((i + 100) * 1000).c_str(), - ToString((i + 100) * 1000 + 999).c_str(), - file_size, 0, i * 100, i * 100 + 99); + Add(level, i, std::to_string((i + 100) * 1000).c_str(), + std::to_string((i + 100) * 1000 + 999).c_str(), file_size, 0, + i * 100, i * 100 + 99); } UpdateVersionStorageInfo(); ASSERT_EQ(vstorage_->CompactionScoreLevel(0), level); @@ -439,8 +439,8 @@ TEST_F(CompactionPickerTest, NeedsCompactionUniversal) { for (int i = 1; i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) { NewVersionStorage(1, kCompactionStyleUniversal); - Add(0, i, ToString((i + 100) * 1000).c_str(), - ToString((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100, + Add(0, i, std::to_string((i + 100) * 1000).c_str(), + std::to_string((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100, i * 100 + 99); UpdateVersionStorageInfo(); ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()), @@ -852,17 +852,17 @@ TEST_F(CompactionPickerTest, UniversalIncrementalSpace4) { // L3: (1101, 1180) (1201, 1280) ... (7901, 7908) // L4: (1130, 1150) (1160, 1210) (1230, 1250) (1260 1310) ... (7960, 8010) for (int i = 11; i < 79; i++) { - Add(3, 100 + i * 3, ToString(i * 100).c_str(), - ToString(i * 100 + 80).c_str(), kFileSize, 0, 200, 251); + Add(3, 100 + i * 3, std::to_string(i * 100).c_str(), + std::to_string(i * 100 + 80).c_str(), kFileSize, 0, 200, 251); // Add a tie breaker if (i == 66) { Add(3, 10000U, "6690", "6699", kFileSize, 0, 200, 251); } - Add(4, 100 + i * 3 + 1, ToString(i * 100 + 30).c_str(), - ToString(i * 100 + 50).c_str(), kFileSize, 0, 200, 251); - Add(4, 100 + i * 3 + 2, ToString(i * 100 + 60).c_str(), - ToString(i * 100 + 110).c_str(), kFileSize, 0, 200, 251); + Add(4, 100 + i * 3 + 1, std::to_string(i * 100 + 30).c_str(), + std::to_string(i * 100 + 50).c_str(), kFileSize, 0, 200, 251); + Add(4, 100 + i * 3 + 2, std::to_string(i * 100 + 60).c_str(), + std::to_string(i * 100 + 110).c_str(), kFileSize, 0, 200, 251); } UpdateVersionStorageInfo(); @@ -899,14 +899,14 @@ TEST_F(CompactionPickerTest, UniversalIncrementalSpace5) { // L3: (1101, 1180) (1201, 1280) ... (7901, 7908) // L4: (1130, 1150) (1160, 1210) (1230, 1250) (1260 1310) ... (7960, 8010) for (int i = 11; i < 70; i++) { - Add(3, 100 + i * 3, ToString(i * 100).c_str(), - ToString(i * 100 + 80).c_str(), + Add(3, 100 + i * 3, std::to_string(i * 100).c_str(), + std::to_string(i * 100 + 80).c_str(), i % 10 == 9 ? kFileSize * 100 : kFileSize, 0, 200, 251); - Add(4, 100 + i * 3 + 1, ToString(i * 100 + 30).c_str(), - ToString(i * 100 + 50).c_str(), kFileSize, 0, 200, 251); - Add(4, 100 + i * 3 + 2, ToString(i * 100 + 60).c_str(), - ToString(i * 100 + 110).c_str(), kFileSize, 0, 200, 251); + Add(4, 100 + i * 3 + 1, std::to_string(i * 100 + 30).c_str(), + std::to_string(i * 100 + 50).c_str(), kFileSize, 0, 200, 251); + Add(4, 100 + i * 3 + 2, std::to_string(i * 100 + 60).c_str(), + std::to_string(i * 100 + 110).c_str(), kFileSize, 0, 200, 251); } UpdateVersionStorageInfo(); @@ -941,8 +941,8 @@ TEST_F(CompactionPickerTest, NeedsCompactionFIFO) { // size of L0 files. for (int i = 1; i <= kFileCount; ++i) { NewVersionStorage(1, kCompactionStyleFIFO); - Add(0, i, ToString((i + 100) * 1000).c_str(), - ToString((i + 100) * 1000 + 999).c_str(), kFileSize, 0, i * 100, + Add(0, i, std::to_string((i + 100) * 1000).c_str(), + std::to_string((i + 100) * 1000 + 999).c_str(), kFileSize, 0, i * 100, i * 100 + 99); UpdateVersionStorageInfo(); ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()), diff --git a/db/compaction/compaction_service_test.cc b/db/compaction/compaction_service_test.cc index 926f80782..6fff7aa7a 100644 --- a/db/compaction/compaction_service_test.cc +++ b/db/compaction/compaction_service_test.cc @@ -82,8 +82,7 @@ class MyTestCompactionService : public CompactionService { options.canceled = &canceled_; Status s = DB::OpenAndCompact( - options, db_path_, - db_path_ + "/" + ROCKSDB_NAMESPACE::ToString(info.job_id), + options, db_path_, db_path_ + "/" + std::to_string(info.job_id), compaction_input, compaction_service_result, options_override); if (is_override_wait_result_) { *compaction_service_result = override_wait_result_; @@ -177,7 +176,7 @@ class CompactionServiceTest : public DBTestBase { for (int i = 0; i < 20; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -187,7 +186,7 @@ class CompactionServiceTest : public DBTestBase { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -199,9 +198,9 @@ class CompactionServiceTest : public DBTestBase { for (int i = 0; i < 200; i++) { auto result = Get(Key(i)); if (i % 2) { - ASSERT_EQ(result, "value" + ToString(i)); + ASSERT_EQ(result, "value" + std::to_string(i)); } else { - ASSERT_EQ(result, "value_new" + ToString(i)); + ASSERT_EQ(result, "value_new" + std::to_string(i)); } } } @@ -224,7 +223,7 @@ TEST_F(CompactionServiceTest, BasicCompactions) { for (int i = 0; i < 20; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -232,7 +231,7 @@ TEST_F(CompactionServiceTest, BasicCompactions) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -242,9 +241,9 @@ TEST_F(CompactionServiceTest, BasicCompactions) { for (int i = 0; i < 200; i++) { auto result = Get(Key(i)); if (i % 2) { - ASSERT_EQ(result, "value" + ToString(i)); + ASSERT_EQ(result, "value" + std::to_string(i)); } else { - ASSERT_EQ(result, "value_new" + ToString(i)); + ASSERT_EQ(result, "value_new" + std::to_string(i)); } } auto my_cs = GetCompactionService(); @@ -281,7 +280,7 @@ TEST_F(CompactionServiceTest, BasicCompactions) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - s = Put(Key(key_id), "value_new" + ToString(key_id)); + s = Put(Key(key_id), "value_new" + std::to_string(key_id)); if (s.IsAborted()) { break; } @@ -468,7 +467,7 @@ TEST_F(CompactionServiceTest, CompactionFilter) { for (int i = 0; i < 20; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -476,7 +475,7 @@ TEST_F(CompactionServiceTest, CompactionFilter) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -490,9 +489,9 @@ TEST_F(CompactionServiceTest, CompactionFilter) { if (i > 5 && i <= 105) { ASSERT_EQ(result, "NOT_FOUND"); } else if (i % 2) { - ASSERT_EQ(result, "value" + ToString(i)); + ASSERT_EQ(result, "value" + std::to_string(i)); } else { - ASSERT_EQ(result, "value_new" + ToString(i)); + ASSERT_EQ(result, "value_new" + std::to_string(i)); } } auto my_cs = GetCompactionService(); @@ -547,9 +546,9 @@ TEST_F(CompactionServiceTest, ConcurrentCompaction) { for (int i = 0; i < 200; i++) { auto result = Get(Key(i)); if (i % 2) { - ASSERT_EQ(result, "value" + ToString(i)); + ASSERT_EQ(result, "value" + std::to_string(i)); } else { - ASSERT_EQ(result, "value_new" + ToString(i)); + ASSERT_EQ(result, "value_new" + std::to_string(i)); } } auto my_cs = GetCompactionService(); @@ -564,7 +563,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) { for (int i = 0; i < 20; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -572,7 +571,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -617,7 +616,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) { for (int i = 0; i < 20; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -625,7 +624,7 @@ TEST_F(CompactionServiceTest, CompactionInfo) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -653,7 +652,7 @@ TEST_F(CompactionServiceTest, FallbackLocalAuto) { for (int i = 0; i < 20; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -661,7 +660,7 @@ TEST_F(CompactionServiceTest, FallbackLocalAuto) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -671,9 +670,9 @@ TEST_F(CompactionServiceTest, FallbackLocalAuto) { for (int i = 0; i < 200; i++) { auto result = Get(Key(i)); if (i % 2) { - ASSERT_EQ(result, "value" + ToString(i)); + ASSERT_EQ(result, "value" + std::to_string(i)); } else { - ASSERT_EQ(result, "value_new" + ToString(i)); + ASSERT_EQ(result, "value_new" + std::to_string(i)); } } @@ -796,7 +795,7 @@ TEST_F(CompactionServiceTest, RemoteEventListener) { for (int i = 0; i < 20; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -804,7 +803,7 @@ TEST_F(CompactionServiceTest, RemoteEventListener) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value_new" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value_new" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -821,9 +820,9 @@ TEST_F(CompactionServiceTest, RemoteEventListener) { for (int i = 0; i < 200; i++) { auto result = Get(Key(i)); if (i % 2) { - ASSERT_EQ(result, "value" + ToString(i)); + ASSERT_EQ(result, "value" + std::to_string(i)); } else { - ASSERT_EQ(result, "value_new" + ToString(i)); + ASSERT_EQ(result, "value_new" + std::to_string(i)); } } } diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 672dffde2..c1473a0fe 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -397,7 +397,7 @@ TEST_P(ComparatorDBTest, DoubleComparator) { for (uint32_t j = 0; j < divide_order; j++) { to_divide *= 10.0; } - source_strings.push_back(ToString(r / to_divide)); + source_strings.push_back(std::to_string(r / to_divide)); } DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66); diff --git a/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc index a68f7ea59..eff0cdbf8 100644 --- a/db/cuckoo_table_db_test.cc +++ b/db/cuckoo_table_db_test.cc @@ -95,8 +95,8 @@ class CuckooTableDBTest : public testing::Test { int NumTableFilesAtLevel(int level) { std::string property; - EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level), - &property)); + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(level), &property)); return atoi(property.c_str()); } diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 3833066dd..8fe39baf6 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -3783,7 +3783,7 @@ TEST_P(DBBasicTestDeadline, PointLookupDeadline) { Random rnd(301); for (int i = 0; i < 400; ++i) { - std::string key = "k" + ToString(i); + std::string key = "k" + std::to_string(i); ASSERT_OK(Put(key, rnd.RandomString(100))); } ASSERT_OK(Flush()); @@ -3866,7 +3866,7 @@ TEST_P(DBBasicTestDeadline, IteratorDeadline) { Random rnd(301); for (int i = 0; i < 400; ++i) { - std::string key = "k" + ToString(i); + std::string key = "k" + std::to_string(i); ASSERT_OK(Put(key, rnd.RandomString(100))); } ASSERT_OK(Flush()); diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 518105af2..327be8bd9 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -76,7 +76,7 @@ class DBBlockCacheTest : public DBTestBase { void InitTable(const Options& /*options*/) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks; i++) { - ASSERT_OK(Put(ToString(i), value.c_str())); + ASSERT_OK(Put(std::to_string(i), value.c_str())); } } @@ -205,7 +205,7 @@ TEST_F(DBBlockCacheTest, IteratorBlockCacheUsage) { ASSERT_EQ(0, cache->GetUsage()); iter = db_->NewIterator(read_options); - iter->Seek(ToString(0)); + iter->Seek(std::to_string(0)); ASSERT_LT(0, cache->GetUsage()); delete iter; iter = nullptr; @@ -236,7 +236,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) { // Load blocks into cache. for (size_t i = 0; i + 1 < kNumBlocks; i++) { iter = db_->NewIterator(read_options); - iter->Seek(ToString(i)); + iter->Seek(std::to_string(i)); ASSERT_OK(iter->status()); CheckCacheCounters(options, 1, 0, 1, 0); iterators[i].reset(iter); @@ -249,7 +249,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) { // Test with strict capacity limit. cache->SetStrictCapacityLimit(true); iter = db_->NewIterator(read_options); - iter->Seek(ToString(kNumBlocks - 1)); + iter->Seek(std::to_string(kNumBlocks - 1)); ASSERT_TRUE(iter->status().IsIncomplete()); CheckCacheCounters(options, 1, 0, 0, 1); delete iter; @@ -263,7 +263,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) { ASSERT_EQ(0, cache->GetPinnedUsage()); for (size_t i = 0; i + 1 < kNumBlocks; i++) { iter = db_->NewIterator(read_options); - iter->Seek(ToString(i)); + iter->Seek(std::to_string(i)); ASSERT_OK(iter->status()); CheckCacheCounters(options, 0, 1, 0, 0); iterators[i].reset(iter); @@ -289,7 +289,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks; i++) { - ASSERT_OK(Put(ToString(i), value)); + ASSERT_OK(Put(std::to_string(i), value)); ASSERT_OK(Flush()); } @@ -313,7 +313,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { // Load blocks into cache. for (size_t i = 0; i < kNumBlocks - 1; i++) { - ASSERT_EQ(value, Get(ToString(i))); + ASSERT_EQ(value, Get(std::to_string(i))); CheckCacheCounters(options, 1, 0, 1, 0); CheckCompressedCacheCounters(options, 1, 0, 1, 0); } @@ -334,7 +334,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { // Load last key block. ASSERT_EQ("Result incomplete: Insert failed due to LRU cache being full.", - Get(ToString(kNumBlocks - 1))); + Get(std::to_string(kNumBlocks - 1))); // Failure will also record the miss counter. CheckCacheCounters(options, 1, 0, 0, 1); CheckCompressedCacheCounters(options, 1, 0, 1, 0); @@ -343,7 +343,7 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { // cache and load into block cache. cache->SetStrictCapacityLimit(false); // Load last key block. - ASSERT_EQ(value, Get(ToString(kNumBlocks - 1))); + ASSERT_EQ(value, Get(std::to_string(kNumBlocks - 1))); CheckCacheCounters(options, 1, 0, 1, 0); CheckCompressedCacheCounters(options, 0, 1, 0, 0); } @@ -568,7 +568,7 @@ TEST_F(DBBlockCacheTest, FillCacheAndIterateDB) { Iterator* iter = nullptr; iter = db_->NewIterator(read_options); - iter->Seek(ToString(0)); + iter->Seek(std::to_string(0)); while (iter->Valid()) { iter->Next(); } @@ -646,10 +646,10 @@ TEST_F(DBBlockCacheTest, WarmCacheWithDataBlocksDuringFlush) { std::string value(kValueSize, 'a'); for (size_t i = 1; i <= kNumBlocks; i++) { - ASSERT_OK(Put(ToString(i), value)); + ASSERT_OK(Put(std::to_string(i), value)); ASSERT_OK(Flush()); ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD)); - ASSERT_EQ(value, Get(ToString(i))); + ASSERT_EQ(value, Get(std::to_string(i))); ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS)); ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT)); } @@ -706,7 +706,7 @@ TEST_P(DBBlockCacheTest1, WarmCacheWithBlocksDuringFlush) { std::string value(kValueSize, 'a'); for (size_t i = 1; i <= kNumBlocks; i++) { - ASSERT_OK(Put(ToString(i), value)); + ASSERT_OK(Put(std::to_string(i), value)); ASSERT_OK(Flush()); ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_ADD)); if (filter_type == 1) { @@ -718,7 +718,7 @@ TEST_P(DBBlockCacheTest1, WarmCacheWithBlocksDuringFlush) { ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_INDEX_ADD)); ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_FILTER_ADD)); } - ASSERT_EQ(value, Get(ToString(i))); + ASSERT_EQ(value, Get(std::to_string(i))); ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_DATA_MISS)); ASSERT_EQ(i, options.statistics->getTickerCount(BLOCK_CACHE_DATA_HIT)); @@ -773,12 +773,12 @@ TEST_F(DBBlockCacheTest, DynamicallyWarmCacheDuringFlush) { std::string value(kValueSize, 'a'); for (size_t i = 1; i <= 5; i++) { - ASSERT_OK(Put(ToString(i), value)); + ASSERT_OK(Put(std::to_string(i), value)); ASSERT_OK(Flush()); ASSERT_EQ(1, options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD)); - ASSERT_EQ(value, Get(ToString(i))); + ASSERT_EQ(value, Get(std::to_string(i))); ASSERT_EQ(0, options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD)); ASSERT_EQ( @@ -791,12 +791,12 @@ TEST_F(DBBlockCacheTest, DynamicallyWarmCacheDuringFlush) { {{"block_based_table_factory", "{prepopulate_block_cache=kDisable;}"}})); for (size_t i = 6; i <= kNumBlocks; i++) { - ASSERT_OK(Put(ToString(i), value)); + ASSERT_OK(Put(std::to_string(i), value)); ASSERT_OK(Flush()); ASSERT_EQ(0, options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD)); - ASSERT_EQ(value, Get(ToString(i))); + ASSERT_EQ(value, Get(std::to_string(i))); ASSERT_EQ(1, options.statistics->getAndResetTickerCount(BLOCK_CACHE_DATA_ADD)); ASSERT_EQ( @@ -1409,7 +1409,7 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) { for (size_t i = 0; i < kNumCacheEntryRoles; ++i) { auto role = static_cast(i); - EXPECT_EQ(ToString(expected[i]), + EXPECT_EQ(std::to_string(expected[i]), values[BlockCacheEntryStatsMapKeys::EntryCount(role)]); } @@ -1422,7 +1422,7 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) { // re-scanning stats, but not totally aggressive. // Within some time window, we will get cached entry stats env_->MockSleepForSeconds(1); - EXPECT_EQ(ToString(prev_expected[static_cast( + EXPECT_EQ(std::to_string(prev_expected[static_cast( CacheEntryRole::kWriteBuffer)]), values[BlockCacheEntryStatsMapKeys::EntryCount( CacheEntryRole::kWriteBuffer)]); @@ -1432,7 +1432,7 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) { ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values)); EXPECT_EQ( - ToString( + std::to_string( expected[static_cast(CacheEntryRole::kWriteBuffer)]), values[BlockCacheEntryStatsMapKeys::EntryCount( CacheEntryRole::kWriteBuffer)]); @@ -1640,7 +1640,7 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) { SstFileWriter sst_file_writer(EnvOptions(), options); std::vector external; for (int i = 0; i < 2; ++i) { - std::string f = dbname_ + "/external" + ToString(i) + ".sst"; + std::string f = dbname_ + "/external" + std::to_string(i) + ".sst"; external.push_back(f); ASSERT_OK(sst_file_writer.Open(f)); ASSERT_OK(sst_file_writer.Put(Key(key_count), "abc")); @@ -1724,7 +1724,7 @@ class CacheKeyTest : public testing::Test { // Like SemiStructuredUniqueIdGen::GenerateNext tp_.db_session_id = EncodeSessionId(base_session_upper_, base_session_lower_ ^ session_counter_); - tp_.db_id = ToString(db_id_); + tp_.db_id = std::to_string(db_id_); tp_.orig_file_number = file_number_; bool is_stable; std::string cur_session_id = ""; // ignored diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index 2bfdb61a0..94e710126 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -858,7 +858,7 @@ TEST_F(DBBloomFilterTest, BloomFilterCompatibility) { options.table_factory.reset(NewBlockBasedTableFactory(table_options)); Reopen(options); - std::string prefix = ToString(i) + "_"; + std::string prefix = std::to_string(i) + "_"; ASSERT_OK(Put(prefix + "A", "val")); ASSERT_OK(Put(prefix + "Z", "val")); ASSERT_OK(Flush()); @@ -873,7 +873,7 @@ TEST_F(DBBloomFilterTest, BloomFilterCompatibility) { options.table_factory.reset(NewBlockBasedTableFactory(table_options)); Reopen(options); for (size_t j = 0; j < kCompatibilityConfigs.size(); ++j) { - std::string prefix = ToString(j) + "_"; + std::string prefix = std::to_string(j) + "_"; ASSERT_EQ("val", Get(prefix + "A")); // Filter positive ASSERT_EQ("val", Get(prefix + "Z")); // Filter positive // Filter negative, with high probability @@ -1713,11 +1713,11 @@ class TestingContextCustomFilterPolicy test_report_ += OptionsHelper::compaction_style_to_string[context.compaction_style]; test_report_ += ",n="; - test_report_ += ROCKSDB_NAMESPACE::ToString(context.num_levels); + test_report_ += std::to_string(context.num_levels); test_report_ += ",l="; - test_report_ += ROCKSDB_NAMESPACE::ToString(context.level_at_creation); + test_report_ += std::to_string(context.level_at_creation); test_report_ += ",b="; - test_report_ += ROCKSDB_NAMESPACE::ToString(int{context.is_bottommost}); + test_report_ += std::to_string(int{context.is_bottommost}); test_report_ += ",r="; test_report_ += table_file_creation_reason_to_string[context.reason]; test_report_ += "\n"; diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index 195615191..9ea8a350f 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -454,7 +454,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) { // put some data for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(Put(ToString(table * 100 + i), "val")); + ASSERT_OK(Put(std::to_string(table * 100 + i), "val")); } ASSERT_OK(Flush()); } @@ -755,7 +755,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) { #ifndef ROCKSDB_LITE // Compaction filters aplies to all records, regardless snapshots. TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) { - std::string five = ToString(5); + std::string five = std::to_string(5); Options options = CurrentOptions(); options.compaction_filter_factory = std::make_shared(); options.disable_auto_compactions = true; @@ -766,7 +766,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) { const Snapshot* snapshot = nullptr; for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10; ++i) { - ASSERT_OK(Put(ToString(table * 100 + i), "val")); + ASSERT_OK(Put(std::to_string(table * 100 + i), "val")); } ASSERT_OK(Flush()); diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 29180f224..c49b3c257 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -2817,7 +2817,7 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) { Random rnd(301); for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { - ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize))); + ASSERT_OK(Put(1, std::to_string(key), rnd.RandomString(kTestValueSize))); } ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1])); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -2849,7 +2849,7 @@ TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) { // make sure all key-values are still there. for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { - ASSERT_NE(Get(1, ToString(key)), "NOT_FOUND"); + ASSERT_NE(Get(1, std::to_string(key)), "NOT_FOUND"); } } @@ -4668,9 +4668,9 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) { }); TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PreFlush"); - ASSERT_OK(Put(ToString(0), rnd.RandomString(1024))); + ASSERT_OK(Put(std::to_string(0), rnd.RandomString(1024))); ASSERT_OK(dbfull()->Flush(flush_opts)); - ASSERT_OK(Put(ToString(0), rnd.RandomString(1024))); + ASSERT_OK(Put(std::to_string(0), rnd.RandomString(1024))); TEST_SYNC_POINT("DBCompactionTest::CompactRangeSkipFlushAfterDelay:PostFlush"); manual_compaction_thread.join(); @@ -4679,7 +4679,7 @@ TEST_F(DBCompactionTest, CompactRangeSkipFlushAfterDelay) { std::string num_keys_in_memtable; ASSERT_TRUE(db_->GetProperty(DB::Properties::kNumEntriesActiveMemTable, &num_keys_in_memtable)); - ASSERT_EQ(ToString(1), num_keys_in_memtable); + ASSERT_EQ(std::to_string(1), num_keys_in_memtable); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } @@ -4828,7 +4828,7 @@ TEST_F(DBCompactionTest, SubcompactionEvent) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 10 + j; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -4838,7 +4838,7 @@ TEST_F(DBCompactionTest, SubcompactionEvent) { for (int i = 0; i < 2; i++) { for (int j = 0; j < 10; j++) { int key_id = i * 20 + j * 2; - ASSERT_OK(Put(Key(key_id), "value" + ToString(key_id))); + ASSERT_OK(Put(Key(key_id), "value" + std::to_string(key_id))); } ASSERT_OK(Flush()); } @@ -5830,7 +5830,7 @@ TEST_P(DBCompactionTestWithBottommostParam, SequenceKeysManualCompaction) { } ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); - ASSERT_EQ(ToString(kSstNum), FilesPerLevel(0)); + ASSERT_EQ(std::to_string(kSstNum), FilesPerLevel(0)); auto cro = CompactRangeOptions(); cro.bottommost_level_compaction = bottommost_level_compaction_; @@ -5843,7 +5843,7 @@ TEST_P(DBCompactionTestWithBottommostParam, SequenceKeysManualCompaction) { ASSERT_EQ("0,1", FilesPerLevel(0)); } else { // Just trivial move from level 0 -> 1 - ASSERT_EQ("0," + ToString(kSstNum), FilesPerLevel(0)); + ASSERT_EQ("0," + std::to_string(kSstNum), FilesPerLevel(0)); } } @@ -7174,7 +7174,7 @@ TEST_F(DBCompactionTest, DisableManualCompactionThreadQueueFull) { ASSERT_OK(Put(Key(2), "value2")); ASSERT_OK(Flush()); } - ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0)); + ASSERT_EQ(std::to_string(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0)); db_->DisableManualCompaction(); @@ -7231,7 +7231,7 @@ TEST_F(DBCompactionTest, DisableManualCompactionThreadQueueFullDBClose) { ASSERT_OK(Put(Key(2), "value2")); ASSERT_OK(Flush()); } - ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0)); + ASSERT_EQ(std::to_string(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0)); db_->DisableManualCompaction(); @@ -7291,7 +7291,7 @@ TEST_F(DBCompactionTest, DBCloseWithManualCompaction) { ASSERT_OK(Put(Key(2), "value2")); ASSERT_OK(Flush()); } - ASSERT_EQ(ToString(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0)); + ASSERT_EQ(std::to_string(kNumL0Files + (kNumL0Files / 2)), FilesPerLevel(0)); // Close DB with manual compaction and auto triggered compaction in the queue. auto s = db_->Close(); diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index 3b0fc9519..62589ba1d 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -849,7 +849,8 @@ void DBImpl::PersistStats() { if (stats_slice_.find(stat.first) != stats_slice_.end()) { uint64_t delta = stat.second - stats_slice_[stat.first]; s = batch.Put(persist_stats_cf_handle_, - Slice(key, std::min(100, length)), ToString(delta)); + Slice(key, std::min(100, length)), + std::to_string(delta)); } } } @@ -3355,7 +3356,7 @@ bool DBImpl::GetProperty(ColumnFamilyHandle* column_family, bool ret_value = GetIntPropertyInternal(cfd, *property_info, false, &int_value); if (ret_value) { - *value = ToString(int_value); + *value = std::to_string(int_value); } return ret_value; } else if (property_info->handle_string) { @@ -3990,8 +3991,8 @@ Status DBImpl::CheckConsistency() { } else if (fsize != md.size) { corruption_messages += "Sst file size mismatch: " + file_path + ". Size recorded in manifest " + - ToString(md.size) + ", actual size " + - ToString(fsize) + "\n"; + std::to_string(md.size) + ", actual size " + + std::to_string(fsize) + "\n"; } } } diff --git a/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc index 7a2ff8f49..3d770bc27 100644 --- a/db/db_impl/db_impl_open.cc +++ b/db/db_impl/db_impl_open.cc @@ -760,11 +760,11 @@ Status DBImpl::PersistentStatsProcessFormatVersion() { WriteBatch batch; if (s.ok()) { s = batch.Put(persist_stats_cf_handle_, kFormatVersionKeyString, - ToString(kStatsCFCurrentFormatVersion)); + std::to_string(kStatsCFCurrentFormatVersion)); } if (s.ok()) { s = batch.Put(persist_stats_cf_handle_, kCompatibleVersionKeyString, - ToString(kStatsCFCompatibleFormatVersion)); + std::to_string(kStatsCFCompatibleFormatVersion)); } if (s.ok()) { WriteOptions wo; diff --git a/db/db_iter_stress_test.cc b/db/db_iter_stress_test.cc index f2b200f68..2d26a7a64 100644 --- a/db/db_iter_stress_test.cc +++ b/db/db_iter_stress_test.cc @@ -414,7 +414,7 @@ TEST_F(DBIteratorStressTest, StressTest) { a /= 10; ++len; } - std::string s = ToString(rnd.Next() % static_cast(max_key)); + std::string s = std::to_string(rnd.Next() % static_cast(max_key)); s.insert(0, len - (int)s.size(), '0'); return s; }; @@ -444,12 +444,13 @@ TEST_F(DBIteratorStressTest, StressTest) { for (double mutation_probability : {0.01, 0.5}) { for (double target_hidden_fraction : {0.1, 0.5}) { std::string trace_str = - "entries: " + ToString(num_entries) + - ", key_space: " + ToString(key_space) + - ", error_probability: " + ToString(error_probability) + - ", mutation_probability: " + ToString(mutation_probability) + + "entries: " + std::to_string(num_entries) + + ", key_space: " + std::to_string(key_space) + + ", error_probability: " + std::to_string(error_probability) + + ", mutation_probability: " + + std::to_string(mutation_probability) + ", target_hidden_fraction: " + - ToString(target_hidden_fraction); + std::to_string(target_hidden_fraction); SCOPED_TRACE(trace_str); if (trace) { std::cout << trace_str << std::endl; @@ -470,7 +471,7 @@ TEST_F(DBIteratorStressTest, StressTest) { types[rnd.Next() % (sizeof(types) / sizeof(types[0]))]; } e.sequence = i; - e.value = "v" + ToString(i); + e.value = "v" + std::to_string(i); ParsedInternalKey internal_key(e.key, e.sequence, e.type); AppendInternalKey(&e.ikey, internal_key); diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index d6ffdbb55..c6f30a450 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -766,7 +766,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { internal_iter->AddMerge("b", "merge_1"); internal_iter->AddMerge("a", "merge_2"); for (size_t k = 0; k < 200; ++k) { - internal_iter->AddPut("c", ToString(k)); + internal_iter->AddPut("c", std::to_string(k)); } internal_iter->Finish(); @@ -780,7 +780,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "c"); - ASSERT_EQ(db_iter->value().ToString(), ToString(i)); + ASSERT_EQ(db_iter->value().ToString(), std::to_string(i)); db_iter->Prev(); ASSERT_TRUE(db_iter->Valid()); @@ -925,11 +925,11 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { internal_iter->AddMerge("b", "merge_1"); internal_iter->AddMerge("a", "merge_2"); for (size_t k = 0; k < 200; ++k) { - internal_iter->AddPut("d", ToString(k)); + internal_iter->AddPut("d", std::to_string(k)); } for (size_t k = 0; k < 200; ++k) { - internal_iter->AddPut("c", ToString(k)); + internal_iter->AddPut("c", std::to_string(k)); } internal_iter->Finish(); @@ -942,7 +942,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { ASSERT_TRUE(db_iter->Valid()); ASSERT_EQ(db_iter->key().ToString(), "d"); - ASSERT_EQ(db_iter->value().ToString(), ToString(i)); + ASSERT_EQ(db_iter->value().ToString(), std::to_string(i)); db_iter->Prev(); ASSERT_TRUE(db_iter->Valid()); @@ -966,7 +966,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { internal_iter->AddMerge("b", "b"); internal_iter->AddMerge("a", "a"); for (size_t k = 0; k < 200; ++k) { - internal_iter->AddMerge("c", ToString(k)); + internal_iter->AddMerge("c", std::to_string(k)); } internal_iter->Finish(); @@ -981,7 +981,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) { ASSERT_EQ(db_iter->key().ToString(), "c"); std::string merge_result = "0"; for (size_t j = 1; j <= i; ++j) { - merge_result += "," + ToString(j); + merge_result += "," + std::to_string(j); } ASSERT_EQ(db_iter->value().ToString(), merge_result); @@ -3156,7 +3156,7 @@ TEST_F(DBIteratorTest, ReverseToForwardWithDisappearingKeys) { internal_iter->AddPut("a", "A"); internal_iter->AddPut("b", "B"); for (int i = 0; i < 100; ++i) { - internal_iter->AddPut("c" + ToString(i), ""); + internal_iter->AddPut("c" + std::to_string(i), ""); } internal_iter->Finish(); diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index 819c3f94e..b2d549250 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -3160,7 +3160,7 @@ TEST_F(DBIteratorWithReadCallbackTest, ReadCallback) { uint64_t num_versions = CurrentOptions().max_sequential_skip_in_iterations + 10; for (uint64_t i = 0; i < num_versions; i++) { - ASSERT_OK(Put("bar", ToString(i))); + ASSERT_OK(Put("bar", std::to_string(i))); } SequenceNumber seq3 = db_->GetLatestSequenceNumber(); TestReadCallback callback2(seq3); @@ -3189,7 +3189,7 @@ TEST_F(DBIteratorWithReadCallbackTest, ReadCallback) { ASSERT_TRUE(iter->Valid()); ASSERT_OK(iter->status()); ASSERT_EQ("bar", iter->key()); - ASSERT_EQ(ToString(num_versions - 1), iter->value()); + ASSERT_EQ(std::to_string(num_versions - 1), iter->value()); delete iter; } diff --git a/db/db_log_iter_test.cc b/db/db_log_iter_test.cc index 748aea455..f0cf215e1 100644 --- a/db/db_log_iter_test.cc +++ b/db/db_log_iter_test.cc @@ -187,7 +187,7 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) { DestroyAndReopen(options); for (int i = 0; i < 1024; i++) { - ASSERT_OK(Put("key" + ToString(i), DummyString(10))); + ASSERT_OK(Put("key" + std::to_string(i), DummyString(10))); } ASSERT_OK(Flush()); @@ -263,20 +263,20 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) { struct Handler : public WriteBatch::Handler { std::string seen; Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override { - seen += "Put(" + ToString(cf) + ", " + key.ToString() + ", " + - ToString(value.size()) + ")"; + seen += "Put(" + std::to_string(cf) + ", " + key.ToString() + ", " + + std::to_string(value.size()) + ")"; return Status::OK(); } Status MergeCF(uint32_t cf, const Slice& key, const Slice& value) override { - seen += "Merge(" + ToString(cf) + ", " + key.ToString() + ", " + - ToString(value.size()) + ")"; + seen += "Merge(" + std::to_string(cf) + ", " + key.ToString() + ", " + + std::to_string(value.size()) + ")"; return Status::OK(); } void LogData(const Slice& blob) override { seen += "LogData(" + blob.ToString() + ")"; } Status DeleteCF(uint32_t cf, const Slice& key) override { - seen += "Delete(" + ToString(cf) + ", " + key.ToString() + ")"; + seen += "Delete(" + std::to_string(cf) + ", " + key.ToString() + ")"; return Status::OK(); } } handler; diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 13736daac..306feaa39 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -171,7 +171,7 @@ TEST_F(DBMemTableTest, DuplicateSeq) { if (!insert_dup) { seq++; } - Status s = mem->Add(seq, kTypeValue, "foo", "value" + ToString(seq), + Status s = mem->Add(seq, kTypeValue, "foo", "value" + std::to_string(seq), nullptr /* kv_prot_info */); if (insert_dup) { ASSERT_TRUE(s.IsTryAgain()); diff --git a/db/db_options_test.cc b/db/db_options_test.cc index 360a3c561..a6e011ffa 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -424,8 +424,8 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); int i = 0; for (; i < 3; i++) { - ASSERT_OK(Put("foo", ToString(i))); - ASSERT_OK(Put("bar", ToString(i))); + ASSERT_OK(Put("foo", std::to_string(i))); + ASSERT_OK(Put("bar", std::to_string(i))); ASSERT_OK(Flush()); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -442,8 +442,8 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) { dbfull()->GetDBOptions().writable_file_max_buffer_size); i = 0; for (; i < 3; i++) { - ASSERT_OK(Put("foo", ToString(i))); - ASSERT_OK(Put("bar", ToString(i))); + ASSERT_OK(Put("foo", std::to_string(i))); + ASSERT_OK(Put("bar", std::to_string(i))); ASSERT_OK(Flush()); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -573,8 +573,8 @@ TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) { Reopen(options); for (int i = 0; i < 3; i++) { // Need to insert two keys to avoid trivial move. - ASSERT_OK(Put("foo", ToString(i))); - ASSERT_OK(Put("bar", ToString(i))); + ASSERT_OK(Put("foo", std::to_string(i))); + ASSERT_OK(Put("bar", std::to_string(i))); ASSERT_OK(Flush()); } ASSERT_EQ("3", FilesPerLevel()); @@ -717,8 +717,8 @@ TEST_F(DBOptionsTest, SetStatsDumpPeriodSec) { for (int i = 0; i < 20; i++) { unsigned int num = rand() % 5000 + 1; - ASSERT_OK( - dbfull()->SetDBOptions({{"stats_dump_period_sec", ToString(num)}})); + ASSERT_OK(dbfull()->SetDBOptions( + {{"stats_dump_period_sec", std::to_string(num)}})); ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec); } Close(); @@ -909,7 +909,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); } @@ -940,7 +940,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); } @@ -972,7 +972,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); } @@ -1036,7 +1036,7 @@ TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); } diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index a568bc117..d82c17107 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -593,9 +593,9 @@ TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) { ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ResetTableProperties(&sum_tp); for (int level = 0; level < kMaxLevel; ++level) { - db_->GetProperty( - DB::Properties::kAggregatedTablePropertiesAtLevel + ToString(level), - &level_tp_strings[level]); + db_->GetProperty(DB::Properties::kAggregatedTablePropertiesAtLevel + + std::to_string(level), + &level_tp_strings[level]); ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]); sum_tp.data_size += level_tps[level].data_size; sum_tp.index_size += level_tps[level].index_size; @@ -1091,7 +1091,7 @@ TEST_F(DBPropertiesTest, EstimateCompressionRatio) { for (int j = 0; j < kNumEntriesPerFile; ++j) { // Put common data ("key") at end to prevent delta encoding from // compressing the key effectively - std::string key = ToString(i) + ToString(j) + "key"; + std::string key = std::to_string(i) + std::to_string(j) + "key"; ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal)); } ASSERT_OK(Flush()); @@ -1185,7 +1185,7 @@ class CountingDeleteTabPropCollector : public TablePropertiesCollector { Status Finish(UserCollectedProperties* properties) override { *properties = - UserCollectedProperties{{"num_delete", ToString(num_deletes_)}}; + UserCollectedProperties{{"num_delete", std::to_string(num_deletes_)}}; return Status::OK(); } @@ -1215,7 +1215,7 @@ class BlockCountingTablePropertiesCollector : public TablePropertiesCollector { Status Finish(UserCollectedProperties* properties) override { (*properties)[kNumSampledBlocksPropertyName] = - ToString(num_sampled_blocks_); + std::to_string(num_sampled_blocks_); return Status::OK(); } @@ -1235,7 +1235,7 @@ class BlockCountingTablePropertiesCollector : public TablePropertiesCollector { UserCollectedProperties GetReadableProperties() const override { return UserCollectedProperties{ - {kNumSampledBlocksPropertyName, ToString(num_sampled_blocks_)}, + {kNumSampledBlocksPropertyName, std::to_string(num_sampled_blocks_)}, }; } @@ -1272,7 +1272,8 @@ TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) { // Create 4 tables for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(db_->Put(WriteOptions(), ToString(table * 100 + i), "val")); + ASSERT_OK( + db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val")); } ASSERT_OK(db_->Flush(FlushOptions())); } @@ -1312,7 +1313,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) { // Create 2 files for (int table = 0; table < 2; ++table) { for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(Put(1, ToString(table * 100 + i), "val")); + ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val")); } ASSERT_OK(Flush(1)); } @@ -1322,7 +1323,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) { // Trigger automatic compactions. for (int table = 0; table < 3; ++table) { for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(Put(1, ToString(table * 100 + i), "val")); + ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val")); } ASSERT_OK(Flush(1)); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -1339,7 +1340,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) { // Create 4 tables in default column family for (int table = 0; table < 2; ++table) { for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(Put(ToString(table * 100 + i), "val")); + ASSERT_OK(Put(std::to_string(table * 100 + i), "val")); } ASSERT_OK(Flush()); } @@ -1349,7 +1350,7 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) { // Trigger automatic compactions. for (int table = 0; table < 3; ++table) { for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(Put(ToString(table * 100 + i), "val")); + ASSERT_OK(Put(std::to_string(table * 100 + i), "val")); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -1545,7 +1546,7 @@ TEST_F(DBPropertiesTest, BlockAddForCompressionSampling) { user_props.end()); ASSERT_EQ(user_props.at(BlockCountingTablePropertiesCollector:: kNumSampledBlocksPropertyName), - ToString(sample_for_compression ? 1 : 0)); + std::to_string(sample_for_compression ? 1 : 0)); } } } @@ -1742,11 +1743,11 @@ TEST_F(DBPropertiesTest, SstFilesSize) { Reopen(options); for (int i = 0; i < 10; i++) { - ASSERT_OK(Put("key" + ToString(i), std::string(1000, 'v'))); + ASSERT_OK(Put("key" + std::to_string(i), std::string(1000, 'v'))); } ASSERT_OK(Flush()); for (int i = 0; i < 5; i++) { - ASSERT_OK(Delete("key" + ToString(i))); + ASSERT_OK(Delete("key" + std::to_string(i))); } ASSERT_OK(Flush()); uint64_t sst_size; diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index 2b4fa3ba2..66fe2e892 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -190,9 +190,10 @@ TEST_F(DBRangeDelTest, MaxCompactionBytesCutsOutputFiles) { ASSERT_EQ(0, NumTableFilesAtLevel(0)); ASSERT_EQ(NumTableFilesAtLevel(2), 2); - ASSERT_OK(db_->SetOptions( - db_->DefaultColumnFamily(), - {{"target_file_size_base", ToString(100 * opts.max_compaction_bytes)}})); + ASSERT_OK( + db_->SetOptions(db_->DefaultColumnFamily(), + {{"target_file_size_base", + std::to_string(100 * opts.max_compaction_bytes)}})); // It spans the whole key-range, thus will be included in all output files ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), diff --git a/db/db_secondary_test.cc b/db/db_secondary_test.cc index dd9d8954e..c67e86167 100644 --- a/db/db_secondary_test.cc +++ b/db/db_secondary_test.cc @@ -212,20 +212,20 @@ TEST_F(DBSecondaryTest, InternalCompactionMultiLevels) { const int kRangeL2 = 10; const int kRangeL1 = 30; for (int i = 0; i < 10; i++) { - ASSERT_OK(Put(Key(i * kRangeL2), "value" + ToString(i))); - ASSERT_OK(Put(Key((i + 1) * kRangeL2 - 1), "value" + ToString(i))); + ASSERT_OK(Put(Key(i * kRangeL2), "value" + std::to_string(i))); + ASSERT_OK(Put(Key((i + 1) * kRangeL2 - 1), "value" + std::to_string(i))); ASSERT_OK(Flush()); } MoveFilesToLevel(2); for (int i = 0; i < 5; i++) { - ASSERT_OK(Put(Key(i * kRangeL1), "value" + ToString(i))); - ASSERT_OK(Put(Key((i + 1) * kRangeL1 - 1), "value" + ToString(i))); + ASSERT_OK(Put(Key(i * kRangeL1), "value" + std::to_string(i))); + ASSERT_OK(Put(Key((i + 1) * kRangeL1 - 1), "value" + std::to_string(i))); ASSERT_OK(Flush()); } MoveFilesToLevel(1); for (int i = 0; i < 4; i++) { - ASSERT_OK(Put(Key(i * 30), "value" + ToString(i))); - ASSERT_OK(Put(Key(i * 30 + 50), "value" + ToString(i))); + ASSERT_OK(Put(Key(i * 30), "value" + std::to_string(i))); + ASSERT_OK(Put(Key(i * 30 + 50), "value" + std::to_string(i))); ASSERT_OK(Flush()); } diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 9248814c8..64fe99f4f 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -999,7 +999,7 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) { // Create 4 files in L0 for (int i = 0; i < 4; i++) { - ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A'), wo)); + ASSERT_OK(Put("Key" + std::to_string(i), DummyString(1024, 'A'), wo)); ASSERT_OK(Flush()); } // We created 4 sst files in L0 @@ -1015,7 +1015,7 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) { // Create 4 files in L0 for (int i = 4; i < 8; i++) { - ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'B'), wo)); + ASSERT_OK(Put("Key" + std::to_string(i), DummyString(1024, 'B'), wo)); ASSERT_OK(Flush()); } ASSERT_EQ("4,1", FilesPerLevel(0)); @@ -1061,7 +1061,7 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) { // Create 4 files in L0 for (int i = 0; i < 4; i++) { - ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A'))); + ASSERT_OK(Put("Key" + std::to_string(i), DummyString(1024, 'A'))); ASSERT_OK(Flush()); } // We created 4 sst files in L0 @@ -1530,7 +1530,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) { // Generate 5 files in L0 for (int i = 0; i < 5; i++) { for (int j = 0; j < 10; j++) { - std::string val = "val_file_" + ToString(i); + std::string val = "val_file_" + std::to_string(i); ASSERT_OK(Put(Key(j), val)); } ASSERT_OK(Flush()); diff --git a/db/db_table_properties_test.cc b/db/db_table_properties_test.cc index b3ee8a41a..f71038577 100644 --- a/db/db_table_properties_test.cc +++ b/db/db_table_properties_test.cc @@ -84,7 +84,8 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) { } // Build file for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(db_->Put(WriteOptions(), ToString(table * 100 + i), "val")); + ASSERT_OK( + db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val")); } ASSERT_OK(db_->Flush(FlushOptions())); } @@ -113,7 +114,7 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) { // fetch key from 1st and 2nd table, which will internally place that table to // the table cache. for (int i = 0; i < 2; ++i) { - Get(ToString(i * 100 + 0)); + Get(std::to_string(i * 100 + 0)); } VerifyTableProperties(db_, 10 + 11 + 12 + 13); @@ -122,7 +123,7 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) { Reopen(options); // fetch key from all tables, which will place them in table cache. for (int i = 0; i < 4; ++i) { - Get(ToString(i * 100 + 0)); + Get(std::to_string(i * 100 + 0)); } VerifyTableProperties(db_, 10 + 11 + 12 + 13); @@ -156,7 +157,7 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) { } else { bool found_corruption = false; for (int i = 0; i < 4; ++i) { - std::string result = Get(ToString(i * 100 + 0)); + std::string result = Get(std::to_string(i * 100 + 0)); if (result.find_first_of("Corruption: block checksum mismatch") != std::string::npos) { found_corruption = true; @@ -187,7 +188,7 @@ TEST_F(DBTablePropertiesTest, InvalidIgnored) { // Build file for (int i = 0; i < 10; ++i) { - ASSERT_OK(db_->Put(WriteOptions(), ToString(i), "val")); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), "val")); } ASSERT_OK(db_->Flush(FlushOptions())); diff --git a/db/db_test.cc b/db/db_test.cc index 97d6b2976..c8e394394 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2734,7 +2734,7 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) { Options options = CurrentOptions(options_override); std::vector cfs; for (int i = 1; i < kColumnFamilies; ++i) { - cfs.push_back(ToString(i)); + cfs.push_back(std::to_string(i)); } Reopen(options); CreateAndReopenWithCF(cfs, options); @@ -2786,7 +2786,7 @@ static void GCThreadBody(void* arg) { WriteOptions wo; for (int i = 0; i < kGCNumKeys; ++i) { - std::string kv(ToString(i + id * kGCNumKeys)); + std::string kv(std::to_string(i + id * kGCNumKeys)); ASSERT_OK(db->Put(wo, kv, kv)); } t->done = true; @@ -2822,7 +2822,7 @@ TEST_F(DBTest, GroupCommitTest) { std::vector expected_db; for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) { - expected_db.push_back(ToString(i)); + expected_db.push_back(std::to_string(i)); } std::sort(expected_db.begin(), expected_db.end()); @@ -3591,7 +3591,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) { Random rnd(301); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 110; ++j) { - ASSERT_OK(Put(ToString(i * 100 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 100 + j), rnd.RandomString(980))); } // flush should happen here ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); @@ -3607,7 +3607,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) { ASSERT_EQ(NumTableFilesAtLevel(0), 5); for (int i = 0; i < 50; ++i) { // these keys should be deleted in previous compaction - ASSERT_EQ("NOT_FOUND", Get(ToString(i))); + ASSERT_EQ("NOT_FOUND", Get(std::to_string(i))); } } } @@ -3629,7 +3629,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3640,7 +3640,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j + 2000), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3670,27 +3670,27 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) { Random rnd(301); for (int i = 0; i < 3; i++) { // Each file contains a different key which will be dropped later. - ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500))); - ASSERT_OK(Put("key" + ToString(i), "")); - ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500))); + ASSERT_OK(Put("a" + std::to_string(i), rnd.RandomString(500))); + ASSERT_OK(Put("key" + std::to_string(i), "")); + ASSERT_OK(Put("z" + std::to_string(i), rnd.RandomString(500))); ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 1); for (int i = 0; i < 3; i++) { - ASSERT_EQ("", Get("key" + ToString(i))); + ASSERT_EQ("", Get("key" + std::to_string(i))); } for (int i = 0; i < 3; i++) { // Each file contains a different key which will be dropped later. - ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500))); - ASSERT_OK(Delete("key" + ToString(i))); - ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500))); + ASSERT_OK(Put("a" + std::to_string(i), rnd.RandomString(500))); + ASSERT_OK(Delete("key" + std::to_string(i))); + ASSERT_OK(Put("z" + std::to_string(i), rnd.RandomString(500))); ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 2); for (int i = 0; i < 3; i++) { - ASSERT_EQ("NOT_FOUND", Get("key" + ToString(i))); + ASSERT_EQ("NOT_FOUND", Get("key" + std::to_string(i))); } } @@ -3759,7 +3759,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3791,7 +3791,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3807,7 +3807,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { // Create 1 more file to trigger TTL compaction. The old files are dropped. for (int i = 0; i < 1; i++) { for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); } @@ -3833,7 +3833,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 3; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3848,7 +3848,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 5; i++) { for (int j = 0; j < 140; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3871,7 +3871,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 10; i++) { // Generate and flush a file about 10KB. for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3890,7 +3890,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { // Create 10 more files. The old 5 files are dropped as their ttl expired. for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3915,7 +3915,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); + ASSERT_OK(Put(std::to_string(i * 20 + j), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3926,7 +3926,8 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int i = 0; i < 60; i++) { // Generate and flush a file about 20KB. for (int j = 0; j < 20; j++) { - ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980))); + ASSERT_OK( + Put(std::to_string(i * 20 + j + 2000), rnd.RandomString(980))); } ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -4207,7 +4208,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) { std::vector threads; threads.emplace_back([&] { for (size_t i = 0; i < cnt; i++) { - auto istr = ToString(i); + auto istr = std::to_string(i); ASSERT_OK(db_->Put(wopt, db_->DefaultColumnFamily(), "a" + istr, "b" + istr)); } @@ -4215,7 +4216,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) { if (two_write_queues) { threads.emplace_back([&] { for (size_t i = cnt; i < 2 * cnt; i++) { - auto istr = ToString(i); + auto istr = std::to_string(i); WriteBatch batch; ASSERT_OK(batch.Put("a" + istr, "b" + istr)); ASSERT_OK( @@ -4236,7 +4237,7 @@ TEST_F(DBTest, ConcurrentFlushWAL) { Reopen(options); for (size_t i = 0; i < cnt; i++) { PinnableSlice pval; - auto istr = ToString(i); + auto istr = std::to_string(i); ASSERT_OK( db_->Get(ropt, db_->DefaultColumnFamily(), "a" + istr, &pval)); ASSERT_TRUE(pval == ("b" + istr)); @@ -4259,7 +4260,7 @@ TEST_F(DBTest, ManualFlushWalAndWriteRace) { port::Thread writeThread([&]() { for (int i = 0; i < 100; i++) { - auto istr = ToString(i); + auto istr = std::to_string(i); ASSERT_OK(dbfull()->Put(wopts, "key_" + istr, "value_" + istr)); } }); @@ -4607,7 +4608,7 @@ TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) { // The Put Phase. for (int file = 0; file < kNumL0Files; ++file) { for (int key = 0; key < kEntriesPerBuffer; ++key) { - ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer), + ASSERT_OK(Put(std::to_string(key + file * kEntriesPerBuffer), rnd.RandomString(kTestValueSize))); } ASSERT_OK(Flush()); @@ -4758,7 +4759,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) { int operation_count[ThreadStatus::NUM_OP_TYPES] = {0}; for (int file = 0; file < 16 * kNumL0Files; ++file) { for (int k = 0; k < kEntriesPerBuffer; ++k) { - ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize))); + ASSERT_OK(Put(std::to_string(key++), rnd.RandomString(kTestValueSize))); } ASSERT_OK(env_->GetThreadList(&thread_list)); @@ -4845,7 +4846,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) { int operation_count[ThreadStatus::NUM_OP_TYPES] = {0}; for (int file = 0; file < 16 * kNumL0Files; ++file) { for (int k = 0; k < kEntriesPerBuffer; ++k) { - ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize))); + ASSERT_OK(Put(std::to_string(key++), rnd.RandomString(kTestValueSize))); } ASSERT_OK(env_->GetThreadList(&thread_list)); @@ -5156,8 +5157,9 @@ TEST_F(DBTest, DynamicCompactionOptions) { // Writing to 64KB L0 files should trigger a compaction. Since these // 2 L0 files have the same key range, compaction merge them and should // result in 2 32KB L1 files. - ASSERT_OK(dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"}, - {"target_file_size_base", ToString(k32KB)}})); + ASSERT_OK( + dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"}, + {"target_file_size_base", std::to_string(k32KB)}})); gen_l0_kb(0, 64, 1); ASSERT_EQ("1,1", FilesPerLevel()); @@ -5176,8 +5178,8 @@ TEST_F(DBTest, DynamicCompactionOptions) { // Increase level base size to 256KB and write enough data that will // fill L1 and L2. L1 size should be around 256KB while L2 size should be // around 256KB x 4. - ASSERT_OK( - dbfull()->SetOptions({{"max_bytes_for_level_base", ToString(k1MB)}})); + ASSERT_OK(dbfull()->SetOptions( + {{"max_bytes_for_level_base", std::to_string(k1MB)}})); // writing 96 x 64KB => 6 * 1024KB // (L1 + L2) = (1 + 4) * 1024KB @@ -5196,9 +5198,9 @@ TEST_F(DBTest, DynamicCompactionOptions) { // max_bytes_for_level_base. Now, reduce both mulitplier and level base, // After filling enough data that can fit in L1 - L3, we should see L1 size // reduces to 128KB from 256KB which was asserted previously. Same for L2. - ASSERT_OK( - dbfull()->SetOptions({{"max_bytes_for_level_multiplier", "2"}, - {"max_bytes_for_level_base", ToString(k128KB)}})); + ASSERT_OK(dbfull()->SetOptions( + {{"max_bytes_for_level_multiplier", "2"}, + {"max_bytes_for_level_base", std::to_string(k128KB)}})); // writing 20 x 64KB = 10 x 128KB // (L1 + L2 + L3) = (1 + 2 + 4) * 128KB @@ -5854,7 +5856,7 @@ TEST_P(DBTestWithParam, FilterCompactionTimeTest) { // put some data for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10 + table; ++i) { - ASSERT_OK(Put(ToString(table * 100 + i), "val")); + ASSERT_OK(Put(std::to_string(table * 100 + i), "val")); ++n; } ASSERT_OK(Flush()); @@ -6238,7 +6240,7 @@ TEST_F(DBTest, LargeBatchWithColumnFamilies) { (write_size / 1024 / 1024), pass); for (;;) { std::string data(3000, j++ % 127 + 20); - data += ToString(j); + data += std::to_string(j); ASSERT_OK(batch.Put(handles_[0], Slice(data), Slice(data))); if (batch.GetDataSize() > write_size) { break; diff --git a/db/db_test2.cc b/db/db_test2.cc index 4ff979962..8cde53f9c 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -38,10 +38,10 @@ class DBTest2 : public DBTestBase { #ifndef ROCKSDB_LITE uint64_t GetSstSizeHelper(Temperature temperature) { std::string prop; - EXPECT_TRUE( - dbfull()->GetProperty(DB::Properties::kLiveSstFilesSizeAtTemperature + - ToString(static_cast(temperature)), - &prop)); + EXPECT_TRUE(dbfull()->GetProperty( + DB::Properties::kLiveSstFilesSizeAtTemperature + + std::to_string(static_cast(temperature)), + &prop)); return static_cast(std::atoi(prop.c_str())); } #endif // ROCKSDB_LITE @@ -1694,9 +1694,9 @@ class CompactionCompressionListener : public EventListener { int bottommost_level = 0; for (int level = 0; level < db->NumberLevels(); level++) { std::string files_at_level; - ASSERT_TRUE(db->GetProperty( - "rocksdb.num-files-at-level" + ROCKSDB_NAMESPACE::ToString(level), - &files_at_level)); + ASSERT_TRUE( + db->GetProperty("rocksdb.num-files-at-level" + std::to_string(level), + &files_at_level)); if (files_at_level != "0") { bottommost_level = level; } @@ -2492,14 +2492,14 @@ TEST_F(DBTest2, TestPerfContextIterCpuTime) { const size_t kNumEntries = 10; for (size_t i = 0; i < kNumEntries; ++i) { - ASSERT_OK(Put("k" + ToString(i), "v" + ToString(i))); + ASSERT_OK(Put("k" + std::to_string(i), "v" + std::to_string(i))); } ASSERT_OK(Flush()); for (size_t i = 0; i < kNumEntries; ++i) { - ASSERT_EQ("v" + ToString(i), Get("k" + ToString(i))); + ASSERT_EQ("v" + std::to_string(i), Get("k" + std::to_string(i))); } - std::string last_key = "k" + ToString(kNumEntries - 1); - std::string last_value = "v" + ToString(kNumEntries - 1); + std::string last_key = "k" + std::to_string(kNumEntries - 1); + std::string last_value = "v" + std::to_string(kNumEntries - 1); env_->now_cpu_count_.store(0); env_->SetMockSleep(); @@ -5553,7 +5553,7 @@ TEST_F(DBTest2, MultiDBParallelOpenTest) { Options options = CurrentOptions(); std::vector dbnames; for (int i = 0; i < kNumDbs; ++i) { - dbnames.emplace_back(test::PerThreadDBPath(env_, "db" + ToString(i))); + dbnames.emplace_back(test::PerThreadDBPath(env_, "db" + std::to_string(i))); ASSERT_OK(DestroyDB(dbnames.back(), options)); } diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 2fa5c0997..9529b2489 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -1086,12 +1086,12 @@ int DBTestBase::NumTableFilesAtLevel(int level, int cf) { std::string property; if (cf == 0) { // default cfd - EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level), - &property)); + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(level), &property)); } else { - EXPECT_TRUE(db_->GetProperty(handles_[cf], - "rocksdb.num-files-at-level" + ToString(level), - &property)); + EXPECT_TRUE(db_->GetProperty( + handles_[cf], "rocksdb.num-files-at-level" + std::to_string(level), + &property)); } return atoi(property.c_str()); } @@ -1101,10 +1101,12 @@ double DBTestBase::CompressionRatioAtLevel(int level, int cf) { if (cf == 0) { // default cfd EXPECT_TRUE(db_->GetProperty( - "rocksdb.compression-ratio-at-level" + ToString(level), &property)); + "rocksdb.compression-ratio-at-level" + std::to_string(level), + &property)); } else { EXPECT_TRUE(db_->GetProperty( - handles_[cf], "rocksdb.compression-ratio-at-level" + ToString(level), + handles_[cf], + "rocksdb.compression-ratio-at-level" + std::to_string(level), &property)); } return std::stod(property); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index 95e3e6609..9b63e6e16 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -549,7 +549,7 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) { ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal); Random rnd(301); for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) { - ASSERT_OK(Put(1, ToString(key), rnd.RandomString(kTestValueSize))); + ASSERT_OK(Put(1, std::to_string(key), rnd.RandomString(kTestValueSize))); } ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1])); ASSERT_OK(dbfull()->TEST_WaitForCompact()); diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index 5483fcad7..10668b9f9 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -1288,7 +1288,7 @@ class RecoveryTestHelper { WriteBatch batch; for (int i = 0; i < kKeysPerWALFile; i++) { - std::string key = "key" + ToString((*count)++); + std::string key = "key" + std::to_string((*count)++); std::string value = test->DummyString(kValueSize); ASSERT_NE(current_log_writer.get(), nullptr); uint64_t seq = versions->LastSequence() + 1; @@ -1319,7 +1319,7 @@ class RecoveryTestHelper { static size_t GetData(DBWALTestBase* test) { size_t count = 0; for (size_t i = 0; i < kWALFilesCount * kKeysPerWALFile; i++) { - if (test->Get("key" + ToString(i)) != "NOT_FOUND") { + if (test->Get("key" + std::to_string(i)) != "NOT_FOUND") { ++count; } } @@ -1616,7 +1616,7 @@ TEST_P(DBWALTestWithParams, kPointInTimeRecovery) { if (!trunc || corrupt_offset != 0) { bool expect_data = true; for (size_t k = 0; k < maxkeys; ++k) { - bool found = Get("key" + ToString(k)) != "NOT_FOUND"; + bool found = Get("key" + std::to_string(k)) != "NOT_FOUND"; if (expect_data && !found) { expect_data = false; } @@ -1752,7 +1752,7 @@ TEST_F(DBWALTest, RecoverWithoutFlush) { size_t count = RecoveryTestHelper::FillData(this, &options); auto validateData = [this, count]() { for (size_t i = 0; i < count; i++) { - ASSERT_NE(Get("key" + ToString(i)), "NOT_FOUND"); + ASSERT_NE(Get("key" + std::to_string(i)), "NOT_FOUND"); } }; Reopen(options); @@ -1891,7 +1891,7 @@ TEST_P(DBWALTestWithParamsVaryingRecoveryMode, ASSERT_OK(TryReopen(options)); // Append some more data. for (int k = 0; k < kAppendKeys; k++) { - std::string key = "extra_key" + ToString(k); + std::string key = "extra_key" + std::to_string(k); std::string value = DummyString(RecoveryTestHelper::kValueSize); ASSERT_OK(Put(key, value)); } @@ -1925,7 +1925,7 @@ TEST_F(DBWALTest, RestoreTotalLogSizeAfterRecoverWithoutFlush) { std::string value_300k(300 * kKB, 'v'); ASSERT_OK(Put(0, "foo", "v1")); for (int i = 0; i < 9; i++) { - ASSERT_OK(Put(1, "key" + ToString(i), value_100k)); + ASSERT_OK(Put(1, "key" + std::to_string(i), value_100k)); } // Get log files before reopen. VectorLogPtr log_files_before; diff --git a/db/db_with_timestamp_basic_test.cc b/db/db_with_timestamp_basic_test.cc index 0c14e4333..cd7465b59 100644 --- a/db/db_with_timestamp_basic_test.cc +++ b/db/db_with_timestamp_basic_test.cc @@ -1492,8 +1492,8 @@ TEST_F(DBBasicTestWithTimestamp, MultiGetRangeFiltering) { // random data for (int i = 0; i < 3; i++) { - auto key = ToString(i * 10); - auto value = ToString(i * 10); + auto key = std::to_string(i * 10); + auto value = std::to_string(i * 10); Slice key_slice = key; Slice value_slice = value; ASSERT_OK(db_->Put(write_opts, key_slice, ts, value_slice)); @@ -1824,8 +1824,8 @@ class DataVisibilityTest : public DBBasicTestWithTimestampBase { DataVisibilityTest() : DBBasicTestWithTimestampBase("data_visibility_test") { // Initialize test data for (int i = 0; i < kTestDataSize; i++) { - test_data_[i].key = "key" + ToString(i); - test_data_[i].value = "value" + ToString(i); + test_data_[i].key = "key" + std::to_string(i); + test_data_[i].value = "value" + std::to_string(i); test_data_[i].timestamp = Timestamp(i, 0); test_data_[i].ts = i; test_data_[i].seq_num = kMaxSequenceNumber; diff --git a/db/db_write_test.cc b/db/db_write_test.cc index c2dfd9dcd..aae97ef0c 100644 --- a/db/db_write_test.cc +++ b/db/db_write_test.cc @@ -289,7 +289,7 @@ TEST_P(DBWriteTest, IOErrorOnWALWritePropagateToWriteThreadFollower) { threads.push_back(port::Thread( [&](int index) { // All threads should fail. - auto res = Put("key" + ToString(index), "value"); + auto res = Put("key" + std::to_string(index), "value"); if (options.manual_wal_flush) { ASSERT_TRUE(res.ok()); // we should see fs error when we do the flush @@ -322,13 +322,13 @@ TEST_P(DBWriteTest, ManualWalFlushInEffect) { Options options = GetOptions(); Reopen(options); // try the 1st WAL created during open - ASSERT_TRUE(Put("key" + ToString(0), "value").ok()); + ASSERT_TRUE(Put("key" + std::to_string(0), "value").ok()); ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty()); ASSERT_TRUE(dbfull()->FlushWAL(false).ok()); ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty()); // try the 2nd wal created during SwitchWAL ASSERT_OK(dbfull()->TEST_SwitchWAL()); - ASSERT_TRUE(Put("key" + ToString(0), "value").ok()); + ASSERT_TRUE(Put("key" + std::to_string(0), "value").ok()); ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty()); ASSERT_TRUE(dbfull()->FlushWAL(false).ok()); ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty()); @@ -344,7 +344,7 @@ TEST_P(DBWriteTest, IOErrorOnWALWriteTriggersReadOnlyMode) { // Forcibly fail WAL write for the first Put only. Subsequent Puts should // fail due to read-only mode mock_env->SetFilesystemActive(i != 0); - auto res = Put("key" + ToString(i), "value"); + auto res = Put("key" + std::to_string(i), "value"); // TSAN reports a false alarm for lock-order-inversion but Open and // FlushWAL are not run concurrently. Disabling this until TSAN is // fixed. @@ -398,14 +398,14 @@ TEST_P(DBWriteTest, LockWalInEffect) { Options options = GetOptions(); Reopen(options); // try the 1st WAL created during open - ASSERT_OK(Put("key" + ToString(0), "value")); + ASSERT_OK(Put("key" + std::to_string(0), "value")); ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty()); ASSERT_OK(dbfull()->LockWAL()); ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty(false)); ASSERT_OK(dbfull()->UnlockWAL()); // try the 2nd wal created during SwitchWAL ASSERT_OK(dbfull()->TEST_SwitchWAL()); - ASSERT_OK(Put("key" + ToString(0), "value")); + ASSERT_OK(Put("key" + std::to_string(0), "value")); ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty()); ASSERT_OK(dbfull()->LockWAL()); ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty(false)); diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index b4cf0cbb7..18f2577e9 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -56,7 +56,7 @@ class DeleteFileTest : public DBTestBase { options.sync = false; ReadOptions roptions; for (int i = startkey; i < (numkeys + startkey) ; i++) { - std::string temp = ToString(i); + std::string temp = std::to_string(i); Slice key(temp); Slice value(temp); ASSERT_OK(db_->Put(options, key, value)); diff --git a/db/error_handler_fs_test.cc b/db/error_handler_fs_test.cc index 547d08732..41f93bc64 100644 --- a/db/error_handler_fs_test.cc +++ b/db/error_handler_fs_test.cc @@ -1583,11 +1583,11 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) { std::string prop; ASSERT_EQ(listener[i]->WaitForRecovery(5000000), true); ASSERT_OK(static_cast(db[i])->TEST_WaitForCompact(true)); - EXPECT_TRUE( - db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(0), &prop)); + EXPECT_TRUE(db[i]->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(0), &prop)); EXPECT_EQ(atoi(prop.c_str()), 0); - EXPECT_TRUE( - db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(1), &prop)); + EXPECT_TRUE(db[i]->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(1), &prop)); EXPECT_EQ(atoi(prop.c_str()), 1); } @@ -1720,11 +1720,11 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) { if (i == 1) { ASSERT_OK(static_cast(db[i])->TEST_WaitForCompact(true)); } - EXPECT_TRUE( - db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(0), &prop)); + EXPECT_TRUE(db[i]->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(0), &prop)); EXPECT_EQ(atoi(prop.c_str()), 0); - EXPECT_TRUE( - db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(1), &prop)); + EXPECT_TRUE(db[i]->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(1), &prop)); EXPECT_EQ(atoi(prop.c_str()), 1); } diff --git a/db/external_sst_file_basic_test.cc b/db/external_sst_file_basic_test.cc index 8aa4b6cf5..1ef3fe784 100644 --- a/db/external_sst_file_basic_test.cc +++ b/db/external_sst_file_basic_test.cc @@ -91,7 +91,7 @@ class ExternalSSTFileBasicTest bool write_global_seqno, bool verify_checksums_before_ingest, std::map* true_data) { assert(value_types.size() == 1 || keys.size() == value_types.size()); - std::string file_path = sst_files_dir_ + ToString(file_id); + std::string file_path = sst_files_dir_ + std::to_string(file_id); SstFileWriter sst_file_writer(EnvOptions(), options); Status s = sst_file_writer.Open(file_path); @@ -123,7 +123,7 @@ class ExternalSSTFileBasicTest } for (size_t i = 0; i < keys.size(); i++) { std::string key = Key(keys[i]); - std::string value = Key(keys[i]) + ToString(file_id); + std::string value = Key(keys[i]) + std::to_string(file_id); ValueType value_type = (value_types.size() == 1 ? value_types[0] : value_types[i]); switch (value_type) { @@ -190,10 +190,10 @@ class ExternalSSTFileBasicTest #ifndef ROCKSDB_LITE uint64_t GetSstSizeHelper(Temperature temperature) { std::string prop; - EXPECT_TRUE( - dbfull()->GetProperty(DB::Properties::kLiveSstFilesSizeAtTemperature + - ToString(static_cast(temperature)), - &prop)); + EXPECT_TRUE(dbfull()->GetProperty( + DB::Properties::kLiveSstFilesSizeAtTemperature + + std::to_string(static_cast(temperature)), + &prop)); return static_cast(std::atoi(prop.c_str())); } #endif // ROCKSDB_LITE @@ -1184,7 +1184,7 @@ TEST_F(ExternalSSTFileBasicTest, SyncFailure) { std::unique_ptr sst_file_writer( new SstFileWriter(EnvOptions(), sst_file_writer_options)); std::string file_name = - sst_files_dir_ + "sync_failure_test_" + ToString(i) + ".sst"; + sst_files_dir_ + "sync_failure_test_" + std::to_string(i) + ".sst"; ASSERT_OK(sst_file_writer->Open(file_name)); ASSERT_OK(sst_file_writer->Put("bar", "v2")); ASSERT_OK(sst_file_writer->Finish()); @@ -1514,13 +1514,13 @@ TEST_P(ExternalSSTFileBasicTest, IngestFileWithFirstByteTampered) { EnvOptions env_options; do { Options options = CurrentOptions(); - std::string file_path = sst_files_dir_ + ToString(file_id++); + std::string file_path = sst_files_dir_ + std::to_string(file_id++); SstFileWriter sst_file_writer(env_options, options); Status s = sst_file_writer.Open(file_path); ASSERT_OK(s); for (int i = 0; i != 100; ++i) { std::string key = Key(i); - std::string value = Key(i) + ToString(0); + std::string value = Key(i) + std::to_string(0); ASSERT_OK(sst_file_writer.Put(key, value)); } ASSERT_OK(sst_file_writer.Finish()); @@ -1585,14 +1585,14 @@ TEST_P(ExternalSSTFileBasicTest, IngestExternalFileWithCorruptedPropsBlock) { int file_id = 0; Random64 rand(time(nullptr)); do { - std::string file_path = sst_files_dir_ + ToString(file_id++); + std::string file_path = sst_files_dir_ + std::to_string(file_id++); Options options = CurrentOptions(); SstFileWriter sst_file_writer(EnvOptions(), options); Status s = sst_file_writer.Open(file_path); ASSERT_OK(s); for (int i = 0; i != 100; ++i) { std::string key = Key(i); - std::string value = Key(i) + ToString(0); + std::string value = Key(i) + std::to_string(0); ASSERT_OK(sst_file_writer.Put(key, value)); } ASSERT_OK(sst_file_writer.Finish()); @@ -1799,7 +1799,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestWithTemperature) { TEST_F(ExternalSSTFileBasicTest, FailIfNotBottommostLevel) { Options options = GetDefaultOptions(); - std::string file_path = sst_files_dir_ + ToString(1); + std::string file_path = sst_files_dir_ + std::to_string(1); SstFileWriter sfw(EnvOptions(), options); ASSERT_OK(sfw.Open(file_path)); diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 0341bdcc3..559f9957d 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -118,7 +118,7 @@ class ExternalSSTFileTest }); data.resize(uniq_iter - data.begin()); } - std::string file_path = sst_files_dir_ + ToString(file_id); + std::string file_path = sst_files_dir_ + std::to_string(file_id); SstFileWriter sst_file_writer(EnvOptions(), options, cfh); Status s = sst_file_writer.Open(file_path); if (!s.ok()) { @@ -172,7 +172,7 @@ class ExternalSSTFileTest }); data.resize(uniq_iter - data.begin()); } - std::string file_path = sst_files_dir_ + ToString(file_id); + std::string file_path = sst_files_dir_ + std::to_string(file_id); SstFileWriter sst_file_writer(EnvOptions(), options, cfh); Status s = sst_file_writer.Open(file_path); @@ -270,7 +270,7 @@ class ExternalSSTFileTest ColumnFamilyHandle* cfh = nullptr) { std::vector> file_data; for (auto& k : keys) { - file_data.emplace_back(Key(k), Key(k) + ToString(file_id)); + file_data.emplace_back(Key(k), Key(k) + std::to_string(file_id)); } return GenerateAndAddExternalFile(options, file_data, file_id, allow_global_seqno, write_global_seqno, @@ -966,7 +966,7 @@ TEST_F(ExternalSSTFileTest, MultiThreaded) { // Generate file names std::vector file_names; for (int i = 0; i < num_files; i++) { - std::string file_name = "file_" + ToString(i) + ".sst"; + std::string file_name = "file_" + std::to_string(i) + ".sst"; file_names.push_back(sst_files_dir_ + file_name); } @@ -1116,7 +1116,7 @@ TEST_F(ExternalSSTFileTest, OverlappingRanges) { int range_end = key_ranges[i].second; Status s; - std::string range_val = "range_" + ToString(i); + std::string range_val = "range_" + std::to_string(i); // For 20% of ranges we use DB::Put, for 80% we use DB::AddFile if (i && i % 5 == 0) { @@ -1456,7 +1456,7 @@ TEST_F(ExternalSSTFileTest, CompactDuringAddFileRandom) { ASSERT_EQ(Get(Key(range_start)), Key(range_start)) << rid; ASSERT_EQ(Get(Key(range_end)), Key(range_end)) << rid; for (int k = range_start + 1; k < range_end; k++) { - std::string v = Key(k) + ToString(rid); + std::string v = Key(k) + std::to_string(rid); ASSERT_EQ(Get(Key(k)), v) << rid; } } diff --git a/db/flush_job.cc b/db/flush_job.cc index 3ccce2af1..66e198f74 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -931,9 +931,9 @@ Status FlushJob::WriteLevel0Table() { assert(!s.ok() || io_s.ok()); io_s.PermitUncheckedError(); if (num_input_entries != total_num_entries && s.ok()) { - std::string msg = "Expected " + ToString(total_num_entries) + + std::string msg = "Expected " + std::to_string(total_num_entries) + " entries in memtables, but read " + - ToString(num_input_entries); + std::to_string(num_input_entries); ROCKS_LOG_WARN(db_options_.info_log, "[%s] [JOB %d] Level-0 flush %s", cfd_->GetName().c_str(), job_context_->job_id, msg.c_str()); diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index e276ba836..e326db9e0 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -191,7 +191,7 @@ TEST_F(FlushJobTest, NonEmpty) { // range-delete "9995" -> "9999" at seqno 10000 // blob references with seqnos 10001..10006 for (int i = 1; i < 10000; ++i) { - std::string key(ToString((i + 1000) % 10000)); + std::string key(std::to_string((i + 1000) % 10000)); std::string value("value" + key); ASSERT_OK(new_mem->Add(SequenceNumber(i), kTypeValue, key, value, nullptr /* kv_prot_info */)); @@ -214,7 +214,7 @@ TEST_F(FlushJobTest, NonEmpty) { constexpr std::array blob_file_numbers{{ kInvalidBlobFileNumber, 5, 103, 17, 102, 101}}; for (size_t i = 0; i < blob_file_numbers.size(); ++i) { - std::string key(ToString(i + 10001)); + std::string key(std::to_string(i + 10001)); std::string blob_index; if (i == 0) { BlobIndex::EncodeInlinedTTL(&blob_index, /* expiration */ 1234567890ULL, @@ -265,7 +265,7 @@ TEST_F(FlushJobTest, NonEmpty) { db_options_.statistics->histogramData(FLUSH_TIME, &hist); ASSERT_GT(hist.average, 0.0); - ASSERT_EQ(ToString(0), file_meta.smallest.user_key().ToString()); + ASSERT_EQ(std::to_string(0), file_meta.smallest.user_key().ToString()); ASSERT_EQ("9999a", file_meta.largest.user_key().ToString()); ASSERT_EQ(1, file_meta.fd.smallest_seqno); ASSERT_EQ(10006, file_meta.fd.largest_seqno); @@ -291,7 +291,7 @@ TEST_F(FlushJobTest, FlushMemTablesSingleColumnFamily) { memtable_ids.push_back(mem->GetID()); for (size_t j = 0; j < num_keys_per_table; ++j) { - std::string key(ToString(j + i * num_keys_per_table)); + std::string key(std::to_string(j + i * num_keys_per_table)); std::string value("value" + key); ASSERT_OK(mem->Add(SequenceNumber(j + i * num_keys_per_table), kTypeValue, key, value, nullptr /* kv_prot_info */)); @@ -326,7 +326,7 @@ TEST_F(FlushJobTest, FlushMemTablesSingleColumnFamily) { db_options_.statistics->histogramData(FLUSH_TIME, &hist); ASSERT_GT(hist.average, 0.0); - ASSERT_EQ(ToString(0), file_meta.smallest.user_key().ToString()); + ASSERT_EQ(std::to_string(0), file_meta.smallest.user_key().ToString()); ASSERT_EQ("99", file_meta.largest.user_key().ToString()); ASSERT_EQ(0, file_meta.fd.smallest_seqno); ASSERT_EQ(SequenceNumber(num_mems_to_flush * num_keys_per_table - 1), @@ -364,7 +364,7 @@ TEST_F(FlushJobTest, FlushMemtablesMultipleColumnFamilies) { mem->Ref(); for (size_t j = 0; j != num_keys_per_memtable; ++j) { - std::string key(ToString(j + i * num_keys_per_memtable)); + std::string key(std::to_string(j + i * num_keys_per_memtable)); std::string value("value" + key); ASSERT_OK(mem->Add(curr_seqno++, kTypeValue, key, value, nullptr /* kv_prot_info */)); @@ -439,7 +439,7 @@ TEST_F(FlushJobTest, FlushMemtablesMultipleColumnFamilies) { ASSERT_GT(hist.average, 0.0); k = 0; for (const auto& file_meta : file_metas) { - ASSERT_EQ(ToString(0), file_meta.smallest.user_key().ToString()); + ASSERT_EQ(std::to_string(0), file_meta.smallest.user_key().ToString()); ASSERT_EQ("999", file_meta.largest.user_key() .ToString()); // max key by bytewise comparator ASSERT_EQ(smallest_seqs[k], file_meta.fd.smallest_seqno); @@ -480,7 +480,7 @@ TEST_F(FlushJobTest, Snapshots) { SequenceNumber current_seqno = 0; auto inserted_keys = mock::MakeMockFile(); for (int i = 1; i < keys; ++i) { - std::string key(ToString(i)); + std::string key(std::to_string(i)); int insertions = rnd.Uniform(max_inserts_per_keys); for (int j = 0; j < insertions; ++j) { std::string value(rnd.HumanReadableString(10)); diff --git a/db/forward_iterator.cc b/db/forward_iterator.cc index a38e1e4bb..683a15164 100644 --- a/db/forward_iterator.cc +++ b/db/forward_iterator.cc @@ -604,7 +604,7 @@ bool ForwardIterator::PrepareValue() { Status ForwardIterator::GetProperty(std::string prop_name, std::string* prop) { assert(prop != nullptr); if (prop_name == "rocksdb.iterator.super-version-number") { - *prop = ToString(sv_->version_number); + *prop = std::to_string(sv_->version_number); return Status::OK(); } return Status::InvalidArgument(); diff --git a/db/internal_stats.cc b/db/internal_stats.cc index 929b6f82d..1bdccf7cc 100644 --- a/db/internal_stats.cc +++ b/db/internal_stats.cc @@ -704,20 +704,19 @@ void InternalStats::CacheEntryRoleStats::ToMap( auto& v = *values; v[BlockCacheEntryStatsMapKeys::CacheId()] = cache_id; v[BlockCacheEntryStatsMapKeys::CacheCapacityBytes()] = - ROCKSDB_NAMESPACE::ToString(cache_capacity); + std::to_string(cache_capacity); v[BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds()] = - ROCKSDB_NAMESPACE::ToString(GetLastDurationMicros() / 1000000.0); + std::to_string(GetLastDurationMicros() / 1000000.0); v[BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds()] = - ROCKSDB_NAMESPACE::ToString((clock->NowMicros() - last_end_time_micros_) / - 1000000U); + std::to_string((clock->NowMicros() - last_end_time_micros_) / 1000000U); for (size_t i = 0; i < kNumCacheEntryRoles; ++i) { auto role = static_cast(i); v[BlockCacheEntryStatsMapKeys::EntryCount(role)] = - ROCKSDB_NAMESPACE::ToString(entry_counts[i]); + std::to_string(entry_counts[i]); v[BlockCacheEntryStatsMapKeys::UsedBytes(role)] = - ROCKSDB_NAMESPACE::ToString(total_charges[i]); + std::to_string(total_charges[i]); v[BlockCacheEntryStatsMapKeys::UsedPercent(role)] = - ROCKSDB_NAMESPACE::ToString(100.0 * total_charges[i] / cache_capacity); + std::to_string(100.0 * total_charges[i] / cache_capacity); } } @@ -763,7 +762,7 @@ bool InternalStats::HandleLiveSstFilesSizeAtTemperature(std::string* value, } } - *value = ToString(size); + *value = std::to_string(size); return true; } @@ -919,7 +918,7 @@ bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value, if (!ok || level >= static_cast(number_levels_)) { return false; } - *value = ToString( + *value = std::to_string( vstorage->GetEstimatedCompressionRatioAtLevel(static_cast(level))); return true; } @@ -1006,7 +1005,7 @@ static std::map MapUint64ValuesToString( const std::map& from) { std::map to; for (const auto& e : from) { - to[e.first] = ToString(e.second); + to[e.first] = std::to_string(e.second); } return to; } @@ -1500,7 +1499,7 @@ void InternalStats::DumpCFMapStats( DumpCFMapStats(vstorage, &levels_stats, &compaction_stats_sum); for (auto const& level_ent : levels_stats) { auto level_str = - level_ent.first == -1 ? "Sum" : "L" + ToString(level_ent.first); + level_ent.first == -1 ? "Sum" : "L" + std::to_string(level_ent.first); for (auto const& stat_ent : level_ent.second) { auto stat_type = stat_ent.first; auto key_str = @@ -1651,7 +1650,8 @@ void InternalStats::DumpCFStatsNoFileHistogram(std::string* value) { DumpCFMapStats(vstorage, &levels_stats, &compaction_stats_sum); for (int l = 0; l < number_levels_; ++l) { if (levels_stats.find(l) != levels_stats.end()) { - PrintLevelStats(buf, sizeof(buf), "L" + ToString(l), levels_stats[l]); + PrintLevelStats(buf, sizeof(buf), "L" + std::to_string(l), + levels_stats[l]); value->append(buf); } } diff --git a/db/listener_test.cc b/db/listener_test.cc index 036762a44..2c0732750 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -436,10 +436,10 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { std::vector> vec_handles; for (int d = 0; d < kNumDBs; ++d) { - ASSERT_OK(DestroyDB(dbname_ + ToString(d), options)); + ASSERT_OK(DestroyDB(dbname_ + std::to_string(d), options)); DB* db; std::vector handles; - ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db)); + ASSERT_OK(DB::Open(options, dbname_ + std::to_string(d), &db)); for (size_t c = 0; c < cf_names.size(); ++c) { ColumnFamilyHandle* handle; ASSERT_OK(db->CreateColumnFamily(cf_opts, cf_names[c], &handle)); @@ -527,7 +527,8 @@ TEST_F(EventListenerTest, DisableBGCompaction) { // keep writing until writes are forced to stop. for (int i = 0; static_cast(cf_meta.file_count) < kSlowdownTrigger * 10; ++i) { - ASSERT_OK(Put(1, ToString(i), std::string(10000, 'x'), WriteOptions())); + ASSERT_OK( + Put(1, std::to_string(i), std::string(10000, 'x'), WriteOptions())); FlushOptions fo; fo.allow_write_stall = true; ASSERT_OK(db_->Flush(fo, handles_[1])); diff --git a/db/memtable_list_test.cc b/db/memtable_list_test.cc index 29de3b662..06cfdb062 100644 --- a/db/memtable_list_test.cc +++ b/db/memtable_list_test.cc @@ -578,15 +578,15 @@ TEST_F(MemTableListTest, FlushPendingTest) { std::string value; MergeContext merge_context; - ASSERT_OK(mem->Add(++seq, kTypeValue, "key1", ToString(i), + ASSERT_OK(mem->Add(++seq, kTypeValue, "key1", std::to_string(i), nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeValue, "keyN" + ToString(i), "valueN", + ASSERT_OK(mem->Add(++seq, kTypeValue, "keyN" + std::to_string(i), "valueN", nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeValue, "keyX" + ToString(i), "value", + ASSERT_OK(mem->Add(++seq, kTypeValue, "keyX" + std::to_string(i), "value", nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeValue, "keyM" + ToString(i), "valueM", + ASSERT_OK(mem->Add(++seq, kTypeValue, "keyM" + std::to_string(i), "valueM", nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeDeletion, "keyX" + ToString(i), "", + ASSERT_OK(mem->Add(++seq, kTypeDeletion, "keyX" + std::to_string(i), "", nullptr /* kv_prot_info */)); tables.push_back(mem); @@ -860,15 +860,15 @@ TEST_F(MemTableListTest, AtomicFlusTest) { std::string value; - ASSERT_OK(mem->Add(++seq, kTypeValue, "key1", ToString(i), + ASSERT_OK(mem->Add(++seq, kTypeValue, "key1", std::to_string(i), nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeValue, "keyN" + ToString(i), "valueN", + ASSERT_OK(mem->Add(++seq, kTypeValue, "keyN" + std::to_string(i), + "valueN", nullptr /* kv_prot_info */)); + ASSERT_OK(mem->Add(++seq, kTypeValue, "keyX" + std::to_string(i), "value", nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeValue, "keyX" + ToString(i), "value", - nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeValue, "keyM" + ToString(i), "valueM", - nullptr /* kv_prot_info */)); - ASSERT_OK(mem->Add(++seq, kTypeDeletion, "keyX" + ToString(i), "", + ASSERT_OK(mem->Add(++seq, kTypeValue, "keyM" + std::to_string(i), + "valueM", nullptr /* kv_prot_info */)); + ASSERT_OK(mem->Add(++seq, kTypeDeletion, "keyX" + std::to_string(i), "", nullptr /* kv_prot_info */)); elem.push_back(mem); diff --git a/db/obsolete_files_test.cc b/db/obsolete_files_test.cc index efd6a4316..8fc47e3f5 100644 --- a/db/obsolete_files_test.cc +++ b/db/obsolete_files_test.cc @@ -41,7 +41,7 @@ class ObsoleteFilesTest : public DBTestBase { WriteOptions options; options.sync = false; for (int i = startkey; i < (numkeys + startkey) ; i++) { - std::string temp = ToString(i); + std::string temp = std::to_string(i); Slice key(temp); Slice value(temp); ASSERT_OK(db_->Put(options, key, value)); diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index 908e684f7..3bb8dd53c 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -75,21 +75,21 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { ReadOptions read_options; for (int i = 0; i < FLAGS_total_keys; ++i) { - std::string key = "k" + ToString(i); - std::string value = "v" + ToString(i); + std::string key = "k" + std::to_string(i); + std::string value = "v" + std::to_string(i); ASSERT_OK(db->Put(write_options, key, value)); } for (int i = 0; i < FLAGS_total_keys -1 ; ++i) { - std::string key = "k" + ToString(i); + std::string key = "k" + std::to_string(i); ASSERT_OK(db->Delete(write_options, key)); } HistogramImpl hist_get; HistogramImpl hist_get_time; for (int i = 0; i < FLAGS_total_keys - 1; ++i) { - std::string key = "k" + ToString(i); + std::string key = "k" + std::to_string(i); std::string value; get_perf_context()->Reset(); @@ -130,7 +130,7 @@ TEST_F(PerfContextTest, SeekIntoDeletion) { HistogramImpl hist_seek; for (int i = 0; i < FLAGS_total_keys; ++i) { std::unique_ptr iter(db->NewIterator(read_options)); - std::string key = "k" + ToString(i); + std::string key = "k" + std::to_string(i); get_perf_context()->Reset(); StopWatchNano timer(SystemClock::Default().get(), true); @@ -265,8 +265,8 @@ void ProfileQueries(bool enabled_time = false) { continue; } - std::string key = "k" + ToString(i); - std::string value = "v" + ToString(i); + std::string key = "k" + std::to_string(i); + std::string value = "v" + std::to_string(i); std::vector values; @@ -297,8 +297,8 @@ void ProfileQueries(bool enabled_time = false) { if (i == kFlushFlag) { continue; } - std::string key = "k" + ToString(i); - std::string expected_value = "v" + ToString(i); + std::string key = "k" + std::to_string(i); + std::string expected_value = "v" + std::to_string(i); std::string value; std::vector multiget_keys = {Slice(key)}; @@ -415,8 +415,8 @@ void ProfileQueries(bool enabled_time = false) { if (i == kFlushFlag) { continue; } - std::string key = "k" + ToString(i); - std::string expected_value = "v" + ToString(i); + std::string key = "k" + std::to_string(i); + std::string expected_value = "v" + std::to_string(i); std::string value; std::vector multiget_keys = {Slice(key)}; @@ -543,8 +543,8 @@ TEST_F(PerfContextTest, SeekKeyComparison) { SetPerfLevel(kEnableTime); StopWatchNano timer(SystemClock::Default().get()); for (const int i : keys) { - std::string key = "k" + ToString(i); - std::string value = "v" + ToString(i); + std::string key = "k" + std::to_string(i); + std::string value = "v" + std::to_string(i); get_perf_context()->Reset(); timer.Start(); @@ -565,8 +565,8 @@ TEST_F(PerfContextTest, SeekKeyComparison) { HistogramImpl hist_next; for (int i = 0; i < FLAGS_total_keys; ++i) { - std::string key = "k" + ToString(i); - std::string value = "v" + ToString(i); + std::string key = "k" + std::to_string(i); + std::string value = "v" + std::to_string(i); std::unique_ptr iter(db->NewIterator(read_options)); get_perf_context()->Reset(); @@ -841,7 +841,7 @@ TEST_F(PerfContextTest, CPUTimer) { std::string max_str = "0"; for (int i = 0; i < FLAGS_total_keys; ++i) { - std::string i_str = ToString(i); + std::string i_str = std::to_string(i); std::string key = "k" + i_str; std::string value = "v" + i_str; max_str = max_str > i_str ? max_str : i_str; @@ -935,9 +935,9 @@ TEST_F(PerfContextTest, CPUTimer) { get_perf_context()->Reset(); auto count = get_perf_context()->iter_seek_cpu_nanos; for (int i = 0; i < FLAGS_total_keys; ++i) { - iter->Seek("k" + ToString(i)); + iter->Seek("k" + std::to_string(i)); ASSERT_TRUE(iter->Valid()); - ASSERT_EQ("v" + ToString(i), iter->value().ToString()); + ASSERT_EQ("v" + std::to_string(i), iter->value().ToString()); auto next_count = get_perf_context()->iter_seek_cpu_nanos; ASSERT_GT(next_count, count); count = next_count; diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index 9ac55e0fe..b18864595 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -220,8 +220,8 @@ class PlainTableDBTest : public testing::Test, int NumTableFilesAtLevel(int level) { std::string property; - EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level), - &property)); + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(level), &property)); return atoi(property.c_str()); } @@ -889,7 +889,7 @@ TEST_P(PlainTableDBTest, IteratorLargeKeys) { }; for (size_t i = 0; i < 7; i++) { - ASSERT_OK(Put(key_list[i], ToString(i))); + ASSERT_OK(Put(key_list[i], std::to_string(i))); } ASSERT_OK(dbfull()->TEST_FlushMemTable()); @@ -900,7 +900,7 @@ TEST_P(PlainTableDBTest, IteratorLargeKeys) { for (size_t i = 0; i < 7; i++) { ASSERT_TRUE(iter->Valid()); ASSERT_EQ(key_list[i], iter->key().ToString()); - ASSERT_EQ(ToString(i), iter->value().ToString()); + ASSERT_EQ(std::to_string(i), iter->value().ToString()); iter->Next(); } @@ -937,7 +937,7 @@ TEST_P(PlainTableDBTest, IteratorLargeKeysWithPrefix) { MakeLongKeyWithPrefix(26, '6')}; for (size_t i = 0; i < 7; i++) { - ASSERT_OK(Put(key_list[i], ToString(i))); + ASSERT_OK(Put(key_list[i], std::to_string(i))); } ASSERT_OK(dbfull()->TEST_FlushMemTable()); @@ -948,7 +948,7 @@ TEST_P(PlainTableDBTest, IteratorLargeKeysWithPrefix) { for (size_t i = 0; i < 7; i++) { ASSERT_TRUE(iter->Valid()); ASSERT_EQ(key_list[i], iter->key().ToString()); - ASSERT_EQ(ToString(i), iter->value().ToString()); + ASSERT_EQ(std::to_string(i), iter->value().ToString()); iter->Next(); } diff --git a/db/prefix_test.cc b/db/prefix_test.cc index 37673eb8c..57845d9cc 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -628,7 +628,7 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { TestKey test_key(prefix, FLAGS_items_per_prefix / 2); std::string s; Slice key = TestKeyToSlice(s, test_key); - std::string value = "v" + ToString(0); + std::string value = "v" + std::to_string(0); get_perf_context()->Reset(); StopWatchNano timer(SystemClock::Default().get(), true); diff --git a/db/repair.cc b/db/repair.cc index bef5ac72c..ac237001c 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -147,7 +147,7 @@ class Repairer { const auto* cf_opts = GetColumnFamilyOptions(cf_name); if (cf_opts == nullptr) { return Status::Corruption("Encountered unknown column family with name=" + - cf_name + ", id=" + ToString(cf_id)); + cf_name + ", id=" + std::to_string(cf_id)); } Options opts(db_options_, *cf_opts); MutableCFOptions mut_cf_opts(opts); diff --git a/db/repair_test.cc b/db/repair_test.cc index 2b9750052..eb0fa0446 100644 --- a/db/repair_test.cc +++ b/db/repair_test.cc @@ -289,7 +289,7 @@ TEST_F(RepairTest, RepairMultipleColumnFamilies) { CreateAndReopenWithCF({"pikachu1", "pikachu2"}, CurrentOptions()); for (int i = 0; i < kNumCfs; ++i) { for (int j = 0; j < kEntriesPerCf; ++j) { - ASSERT_OK(Put(i, "key" + ToString(j), "val" + ToString(j))); + ASSERT_OK(Put(i, "key" + std::to_string(j), "val" + std::to_string(j))); if (j == kEntriesPerCf - 1 && i == kNumCfs - 1) { // Leave one unflushed so we can verify WAL entries are properly // associated with column families. @@ -313,7 +313,7 @@ TEST_F(RepairTest, RepairMultipleColumnFamilies) { CurrentOptions()); for (int i = 0; i < kNumCfs; ++i) { for (int j = 0; j < kEntriesPerCf; ++j) { - ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j)); + ASSERT_EQ(Get(i, "key" + std::to_string(j)), "val" + std::to_string(j)); } } } @@ -334,7 +334,7 @@ TEST_F(RepairTest, RepairColumnFamilyOptions) { std::vector{opts, rev_opts}); for (int i = 0; i < kNumCfs; ++i) { for (int j = 0; j < kEntriesPerCf; ++j) { - ASSERT_OK(Put(i, "key" + ToString(j), "val" + ToString(j))); + ASSERT_OK(Put(i, "key" + std::to_string(j), "val" + std::to_string(j))); if (i == kNumCfs - 1 && j == kEntriesPerCf - 1) { // Leave one unflushed so we can verify RepairDB's flush logic continue; @@ -352,7 +352,7 @@ TEST_F(RepairTest, RepairColumnFamilyOptions) { std::vector{opts, rev_opts})); for (int i = 0; i < kNumCfs; ++i) { for (int j = 0; j < kEntriesPerCf; ++j) { - ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j)); + ASSERT_EQ(Get(i, "key" + std::to_string(j)), "val" + std::to_string(j)); } } @@ -377,7 +377,7 @@ TEST_F(RepairTest, RepairColumnFamilyOptions) { std::vector{opts, rev_opts})); for (int i = 0; i < kNumCfs; ++i) { for (int j = 0; j < kEntriesPerCf; ++j) { - ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j)); + ASSERT_EQ(Get(i, "key" + std::to_string(j)), "val" + std::to_string(j)); } } } diff --git a/db/version_builder_test.cc b/db/version_builder_test.cc index 82eb25684..6476a3150 100644 --- a/db/version_builder_test.cc +++ b/db/version_builder_test.cc @@ -1702,11 +1702,9 @@ TEST_F(VersionBuilderTest, EstimatedActiveKeys) { const uint32_t kDeletionsPerFile = 100; for (uint32_t i = 0; i < kNumFiles; ++i) { Add(static_cast(i / kFilesPerLevel), i + 1, - ToString((i + 100) * 1000).c_str(), - ToString((i + 100) * 1000 + 999).c_str(), - 100U, 0, 100, 100, - kEntriesPerFile, kDeletionsPerFile, - (i < kTotalSamples)); + std::to_string((i + 100) * 1000).c_str(), + std::to_string((i + 100) * 1000 + 999).c_str(), 100U, 0, 100, 100, + kEntriesPerFile, kDeletionsPerFile, (i < kTotalSamples)); } // minus 2X for the number of deletion entries because: // 1x for deletion entry does not count as a data entry. diff --git a/db/version_edit.cc b/db/version_edit.cc index c5627eff5..b9bf9d685 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -817,7 +817,7 @@ std::string VersionEdit::DebugString(bool hex_key) const { r.append(" temperature: "); // Maybe change to human readable format whenthe feature becomes // permanent - r.append(ToString(static_cast(f.temperature))); + r.append(std::to_string(static_cast(f.temperature))); } } @@ -928,7 +928,7 @@ std::string VersionEdit::DebugJSON(int edit_num, bool hex_key) const { jw << "FileChecksum" << Slice(f.file_checksum).ToString(true); jw << "FileChecksumFuncName" << f.file_checksum_func_name; if (f.temperature != Temperature::kUnknown) { - jw << "temperature" << ToString(static_cast(f.temperature)); + jw << "temperature" << std::to_string(static_cast(f.temperature)); } if (f.oldest_blob_file_number != kInvalidBlobFileNumber) { jw << "OldestBlobFile" << f.oldest_blob_file_number; diff --git a/db/version_set.cc b/db/version_set.cc index b0e7080bd..b21761c5b 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -3986,7 +3986,7 @@ std::string Version::DebugString(bool hex, bool print_stats) const { } if (print_stats) { r.append("("); - r.append(ToString( + r.append(std::to_string( files[i]->stats.num_reads_sampled.load(std::memory_order_relaxed))); r.append(")"); } diff --git a/db/wal_manager.cc b/db/wal_manager.cc index a5d59422c..83a3636fb 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -378,9 +378,8 @@ Status WalManager::ReadFirstRecord(const WalFileType type, *sequence = 0; if (type != kAliveLogFile && type != kArchivedLogFile) { ROCKS_LOG_ERROR(db_options_.info_log, "[WalManger] Unknown file type %s", - ToString(type).c_str()); - return Status::NotSupported( - "File Type Not Known " + ToString(type)); + std::to_string(type).c_str()); + return Status::NotSupported("File Type Not Known " + std::to_string(type)); } { MutexLock l(&read_first_record_cache_mutex_); diff --git a/db/wal_manager_test.cc b/db/wal_manager_test.cc index a579c4dad..d1e79ba09 100644 --- a/db/wal_manager_test.cc +++ b/db/wal_manager_test.cc @@ -94,7 +94,7 @@ class WalManagerTest : public testing::Test { for (int i = 1; i <= num_logs; ++i) { RollTheLog(true); for (int k = 0; k < entries_per_log; ++k) { - Put(ToString(k), std::string(1024, 'a')); + Put(std::to_string(k), std::string(1024, 'a')); } } } diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 6e6ad5578..d8779435b 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -118,7 +118,7 @@ static std::string PrintContents(WriteBatch* b, break; } state.append("@"); - state.append(ToString(ikey.sequence)); + state.append(std::to_string(ikey.sequence)); } EXPECT_OK(iter->status()); } @@ -253,7 +253,7 @@ namespace { if (column_family_id == 0) { seen += "Put(" + key.ToString() + ", " + value.ToString() + ")"; } else { - seen += "PutCF(" + ToString(column_family_id) + ", " + + seen += "PutCF(" + std::to_string(column_family_id) + ", " + key.ToString() + ", " + value.ToString() + ")"; } return Status::OK(); @@ -262,7 +262,7 @@ namespace { if (column_family_id == 0) { seen += "Delete(" + key.ToString() + ")"; } else { - seen += "DeleteCF(" + ToString(column_family_id) + ", " + + seen += "DeleteCF(" + std::to_string(column_family_id) + ", " + key.ToString() + ")"; } return Status::OK(); @@ -272,7 +272,7 @@ namespace { if (column_family_id == 0) { seen += "SingleDelete(" + key.ToString() + ")"; } else { - seen += "SingleDeleteCF(" + ToString(column_family_id) + ", " + + seen += "SingleDeleteCF(" + std::to_string(column_family_id) + ", " + key.ToString() + ")"; } return Status::OK(); @@ -283,7 +283,7 @@ namespace { seen += "DeleteRange(" + begin_key.ToString() + ", " + end_key.ToString() + ")"; } else { - seen += "DeleteRangeCF(" + ToString(column_family_id) + ", " + + seen += "DeleteRangeCF(" + std::to_string(column_family_id) + ", " + begin_key.ToString() + ", " + end_key.ToString() + ")"; } return Status::OK(); @@ -293,7 +293,7 @@ namespace { if (column_family_id == 0) { seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")"; } else { - seen += "MergeCF(" + ToString(column_family_id) + ", " + + seen += "MergeCF(" + std::to_string(column_family_id) + ", " + key.ToString() + ", " + value.ToString() + ")"; } return Status::OK(); diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index edc869002..3fd497901 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -185,69 +185,69 @@ bool StressTest::BuildOptionsTable() { std::unordered_map> options_tbl = { {"write_buffer_size", - {ToString(options_.write_buffer_size), - ToString(options_.write_buffer_size * 2), - ToString(options_.write_buffer_size * 4)}}, + {std::to_string(options_.write_buffer_size), + std::to_string(options_.write_buffer_size * 2), + std::to_string(options_.write_buffer_size * 4)}}, {"max_write_buffer_number", - {ToString(options_.max_write_buffer_number), - ToString(options_.max_write_buffer_number * 2), - ToString(options_.max_write_buffer_number * 4)}}, + {std::to_string(options_.max_write_buffer_number), + std::to_string(options_.max_write_buffer_number * 2), + std::to_string(options_.max_write_buffer_number * 4)}}, {"arena_block_size", { - ToString(options_.arena_block_size), - ToString(options_.write_buffer_size / 4), - ToString(options_.write_buffer_size / 8), + std::to_string(options_.arena_block_size), + std::to_string(options_.write_buffer_size / 4), + std::to_string(options_.write_buffer_size / 8), }}, - {"memtable_huge_page_size", {"0", ToString(2 * 1024 * 1024)}}, + {"memtable_huge_page_size", {"0", std::to_string(2 * 1024 * 1024)}}, {"max_successive_merges", {"0", "2", "4"}}, {"inplace_update_num_locks", {"100", "200", "300"}}, // TODO(ljin): enable test for this option // {"disable_auto_compactions", {"100", "200", "300"}}, {"level0_file_num_compaction_trigger", { - ToString(options_.level0_file_num_compaction_trigger), - ToString(options_.level0_file_num_compaction_trigger + 2), - ToString(options_.level0_file_num_compaction_trigger + 4), + std::to_string(options_.level0_file_num_compaction_trigger), + std::to_string(options_.level0_file_num_compaction_trigger + 2), + std::to_string(options_.level0_file_num_compaction_trigger + 4), }}, {"level0_slowdown_writes_trigger", { - ToString(options_.level0_slowdown_writes_trigger), - ToString(options_.level0_slowdown_writes_trigger + 2), - ToString(options_.level0_slowdown_writes_trigger + 4), + std::to_string(options_.level0_slowdown_writes_trigger), + std::to_string(options_.level0_slowdown_writes_trigger + 2), + std::to_string(options_.level0_slowdown_writes_trigger + 4), }}, {"level0_stop_writes_trigger", { - ToString(options_.level0_stop_writes_trigger), - ToString(options_.level0_stop_writes_trigger + 2), - ToString(options_.level0_stop_writes_trigger + 4), + std::to_string(options_.level0_stop_writes_trigger), + std::to_string(options_.level0_stop_writes_trigger + 2), + std::to_string(options_.level0_stop_writes_trigger + 4), }}, {"max_compaction_bytes", { - ToString(options_.target_file_size_base * 5), - ToString(options_.target_file_size_base * 15), - ToString(options_.target_file_size_base * 100), + std::to_string(options_.target_file_size_base * 5), + std::to_string(options_.target_file_size_base * 15), + std::to_string(options_.target_file_size_base * 100), }}, {"target_file_size_base", { - ToString(options_.target_file_size_base), - ToString(options_.target_file_size_base * 2), - ToString(options_.target_file_size_base * 4), + std::to_string(options_.target_file_size_base), + std::to_string(options_.target_file_size_base * 2), + std::to_string(options_.target_file_size_base * 4), }}, {"target_file_size_multiplier", { - ToString(options_.target_file_size_multiplier), + std::to_string(options_.target_file_size_multiplier), "1", "2", }}, {"max_bytes_for_level_base", { - ToString(options_.max_bytes_for_level_base / 2), - ToString(options_.max_bytes_for_level_base), - ToString(options_.max_bytes_for_level_base * 2), + std::to_string(options_.max_bytes_for_level_base / 2), + std::to_string(options_.max_bytes_for_level_base), + std::to_string(options_.max_bytes_for_level_base * 2), }}, {"max_bytes_for_level_multiplier", { - ToString(options_.max_bytes_for_level_multiplier), + std::to_string(options_.max_bytes_for_level_multiplier), "1", "2", }}, @@ -418,7 +418,7 @@ Status StressTest::AssertSame(DB* db, ColumnFamilyHandle* cf, if (snap_state.status != s) { return Status::Corruption( "The snapshot gave inconsistent results for key " + - ToString(Hash(snap_state.key.c_str(), snap_state.key.size(), 0)) + + std::to_string(Hash(snap_state.key.c_str(), snap_state.key.size(), 0)) + " in cf " + cf->GetName() + ": (" + snap_state.status.ToString() + ") vs. (" + s.ToString() + ")"); } @@ -1424,8 +1424,9 @@ void StressTest::TestCompactFiles(ThreadState* /* thread */, Status StressTest::TestBackupRestore( ThreadState* thread, const std::vector& rand_column_families, const std::vector& rand_keys) { - std::string backup_dir = FLAGS_db + "/.backup" + ToString(thread->tid); - std::string restore_dir = FLAGS_db + "/.restore" + ToString(thread->tid); + std::string backup_dir = FLAGS_db + "/.backup" + std::to_string(thread->tid); + std::string restore_dir = + FLAGS_db + "/.restore" + std::to_string(thread->tid); BackupEngineOptions backup_opts(backup_dir); // For debugging, get info_log from live options backup_opts.info_log = db_->GetDBOptions().info_log.get(); @@ -1717,7 +1718,7 @@ Status StressTest::TestCheckpoint(ThreadState* thread, const std::vector& rand_column_families, const std::vector& rand_keys) { std::string checkpoint_dir = - FLAGS_db + "/.checkpoint" + ToString(thread->tid); + FLAGS_db + "/.checkpoint" + std::to_string(thread->tid); Options tmp_opts(options_); tmp_opts.listeners.clear(); tmp_opts.env = db_stress_env; @@ -2204,7 +2205,7 @@ void StressTest::PrintEnv() const { (unsigned long)FLAGS_ops_per_thread); std::string ttl_state("unused"); if (FLAGS_ttl > 0) { - ttl_state = ToString(FLAGS_ttl); + ttl_state = std::to_string(FLAGS_ttl); } fprintf(stdout, "Time to live(sec) : %s\n", ttl_state.c_str()); fprintf(stdout, "Read percentage : %d%%\n", FLAGS_readpercent); @@ -2608,7 +2609,7 @@ void StressTest::Open(SharedState* shared) { cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_)); } while (cf_descriptors.size() < (size_t)FLAGS_column_families) { - std::string name = ToString(new_column_family_name_.load()); + std::string name = std::to_string(new_column_family_name_.load()); new_column_family_name_++; cf_descriptors.emplace_back(name, ColumnFamilyOptions(options_)); column_family_names_.push_back(name); diff --git a/db_stress_tool/expected_state.cc b/db_stress_tool/expected_state.cc index 24474de81..f014b2e73 100644 --- a/db_stress_tool/expected_state.cc +++ b/db_stress_tool/expected_state.cc @@ -187,8 +187,8 @@ Status FileExpectedStateManager::Open() { // Check if crash happened after creating state file but before creating // trace file. if (saved_seqno_ != kMaxSequenceNumber) { - std::string saved_seqno_trace_path = - GetPathForFilename(ToString(saved_seqno_) + kTraceFilenameSuffix); + std::string saved_seqno_trace_path = GetPathForFilename( + std::to_string(saved_seqno_) + kTraceFilenameSuffix); Status exists_status = Env::Default()->FileExists(saved_seqno_trace_path); if (exists_status.ok()) { found_trace = true; @@ -205,7 +205,7 @@ Status FileExpectedStateManager::Open() { std::unique_ptr wfile; const EnvOptions soptions; std::string saved_seqno_trace_path = - GetPathForFilename(ToString(saved_seqno_) + kTraceFilenameSuffix); + GetPathForFilename(std::to_string(saved_seqno_) + kTraceFilenameSuffix); s = Env::Default()->NewWritableFile(saved_seqno_trace_path, &wfile, soptions); } @@ -257,14 +257,14 @@ Status FileExpectedStateManager::Open() { Status FileExpectedStateManager::SaveAtAndAfter(DB* db) { SequenceNumber seqno = db->GetLatestSequenceNumber(); - std::string state_filename = ToString(seqno) + kStateFilenameSuffix; + std::string state_filename = std::to_string(seqno) + kStateFilenameSuffix; std::string state_file_temp_path = GetTempPathForFilename(state_filename); std::string state_file_path = GetPathForFilename(state_filename); std::string latest_file_path = GetPathForFilename(kLatestBasename + kStateFilenameSuffix); - std::string trace_filename = ToString(seqno) + kTraceFilenameSuffix; + std::string trace_filename = std::to_string(seqno) + kTraceFilenameSuffix; std::string trace_file_path = GetPathForFilename(trace_filename); // Populate a tempfile and then rename it to atomically create ".state" @@ -311,13 +311,13 @@ Status FileExpectedStateManager::SaveAtAndAfter(DB* db) { // again, even if we crash. if (s.ok() && old_saved_seqno != kMaxSequenceNumber && old_saved_seqno != saved_seqno_) { - s = Env::Default()->DeleteFile( - GetPathForFilename(ToString(old_saved_seqno) + kStateFilenameSuffix)); + s = Env::Default()->DeleteFile(GetPathForFilename( + std::to_string(old_saved_seqno) + kStateFilenameSuffix)); } if (s.ok() && old_saved_seqno != kMaxSequenceNumber && old_saved_seqno != saved_seqno_) { - s = Env::Default()->DeleteFile( - GetPathForFilename(ToString(old_saved_seqno) + kTraceFilenameSuffix)); + s = Env::Default()->DeleteFile(GetPathForFilename( + std::to_string(old_saved_seqno) + kTraceFilenameSuffix)); } return s; } @@ -461,7 +461,8 @@ Status FileExpectedStateManager::Restore(DB* db) { return Status::Corruption("DB is older than any restorable expected state"); } - std::string state_filename = ToString(saved_seqno_) + kStateFilenameSuffix; + std::string state_filename = + std::to_string(saved_seqno_) + kStateFilenameSuffix; std::string state_file_path = GetPathForFilename(state_filename); std::string latest_file_temp_path = @@ -469,7 +470,8 @@ Status FileExpectedStateManager::Restore(DB* db) { std::string latest_file_path = GetPathForFilename(kLatestBasename + kStateFilenameSuffix); - std::string trace_filename = ToString(saved_seqno_) + kTraceFilenameSuffix; + std::string trace_filename = + std::to_string(saved_seqno_) + kTraceFilenameSuffix; std::string trace_file_path = GetPathForFilename(trace_filename); std::unique_ptr trace_reader; diff --git a/db_stress_tool/no_batched_ops_stress.cc b/db_stress_tool/no_batched_ops_stress.cc index 0a5001a4b..fe7c3a4b2 100644 --- a/db_stress_tool/no_batched_ops_stress.cc +++ b/db_stress_tool/no_batched_ops_stress.cc @@ -189,7 +189,8 @@ class NonBatchedOpsStressTest : public StressTest { if (thread->rand.OneInOpt(FLAGS_clear_column_family_one_in)) { // drop column family and then create it again (can't drop default) int cf = thread->rand.Next() % (FLAGS_column_families - 1) + 1; - std::string new_name = ToString(new_column_family_name_.fetch_add(1)); + std::string new_name = + std::to_string(new_column_family_name_.fetch_add(1)); { MutexLock l(thread->shared->GetMutex()); fprintf( @@ -789,7 +790,7 @@ class NonBatchedOpsStressTest : public StressTest { const std::vector& rand_keys, std::unique_ptr& lock) override { const std::string sst_filename = - FLAGS_db + "/." + ToString(thread->tid) + ".sst"; + FLAGS_db + "/." + std::to_string(thread->tid) + ".sst"; Status s; if (db_stress_env->FileExists(sst_filename).ok()) { // Maybe we terminated abnormally before, so cleanup to give this file diff --git a/env/env_test.cc b/env/env_test.cc index 1b30f093a..5826fcc35 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -1153,7 +1153,7 @@ TEST_P(EnvPosixTestWithParam, RandomAccessUniqueIDConcurrent) { IoctlFriendlyTmpdir ift; std::vector fnames; for (int i = 0; i < 1000; ++i) { - fnames.push_back(ift.name() + "/" + "testfile" + ToString(i)); + fnames.push_back(ift.name() + "/" + "testfile" + std::to_string(i)); // Create file. std::unique_ptr wfile; diff --git a/env/fs_posix.cc b/env/fs_posix.cc index 057058d24..8678e4151 100644 --- a/env/fs_posix.cc +++ b/env/fs_posix.cc @@ -606,8 +606,7 @@ class PosixFileSystem : public FileSystem { return IOStatus::NotFound(); default: assert(err == EIO || err == ENOMEM); - return IOStatus::IOError("Unexpected error(" + - ROCKSDB_NAMESPACE::ToString(err) + + return IOStatus::IOError("Unexpected error(" + std::to_string(err) + ") accessing file `" + fname + "' "); } } @@ -810,12 +809,11 @@ class PosixFileSystem : public FileSystem { errno = ENOLCK; // Note that the thread ID printed is the same one as the one in // posix logger, but posix logger prints it hex format. - return IOError( - "lock hold by current process, acquire time " + - ROCKSDB_NAMESPACE::ToString(prev_info.acquire_time) + - " acquiring thread " + - ROCKSDB_NAMESPACE::ToString(prev_info.acquiring_thread), - fname, errno); + return IOError("lock hold by current process, acquire time " + + std::to_string(prev_info.acquire_time) + + " acquiring thread " + + std::to_string(prev_info.acquiring_thread), + fname, errno); } IOStatus result = IOStatus::OK(); diff --git a/env/io_posix.cc b/env/io_posix.cc index 012026cbc..de941b5d7 100644 --- a/env/io_posix.cc +++ b/env/io_posix.cc @@ -284,9 +284,9 @@ IOStatus PosixSequentialFile::PositionedRead(uint64_t offset, size_t n, } if (r < 0) { // An error: return a non-ok status - s = IOError( - "While pread " + ToString(n) + " bytes from offset " + ToString(offset), - filename_, errno); + s = IOError("While pread " + std::to_string(n) + " bytes from offset " + + std::to_string(offset), + filename_, errno); } *result = Slice(scratch, (r < 0) ? 0 : n - left); return s; @@ -294,8 +294,8 @@ IOStatus PosixSequentialFile::PositionedRead(uint64_t offset, size_t n, IOStatus PosixSequentialFile::Skip(uint64_t n) { if (fseek(file_, static_cast(n), SEEK_CUR)) { - return IOError("While fseek to skip " + ToString(n) + " bytes", filename_, - errno); + return IOError("While fseek to skip " + std::to_string(n) + " bytes", + filename_, errno); } return IOStatus::OK(); } @@ -310,8 +310,9 @@ IOStatus PosixSequentialFile::InvalidateCache(size_t offset, size_t length) { // free OS pages int ret = Fadvise(fd_, offset, length, POSIX_FADV_DONTNEED); if (ret != 0) { - return IOError("While fadvise NotNeeded offset " + ToString(offset) + - " len " + ToString(length), + return IOError("While fadvise NotNeeded offset " + + std::to_string(offset) + " len " + + std::to_string(length), filename_, errno); } } @@ -596,9 +597,9 @@ IOStatus PosixRandomAccessFile::Read(uint64_t offset, size_t n, } if (r < 0) { // An error: return a non-ok status - s = IOError( - "While pread offset " + ToString(offset) + " len " + ToString(n), - filename_, errno); + s = IOError("While pread offset " + std::to_string(offset) + " len " + + std::to_string(n), + filename_, errno); } *result = Slice(scratch, (r < 0) ? 0 : n - left); return s; @@ -704,8 +705,8 @@ IOStatus PosixRandomAccessFile::MultiRead(FSReadRequest* reqs, } } return IOStatus::IOError("io_uring_submit_and_wait() requested " + - ToString(this_reqs) + " but returned " + - ToString(ret)); + std::to_string(this_reqs) + " but returned " + + std::to_string(ret)); } for (size_t i = 0; i < this_reqs; i++) { @@ -718,7 +719,8 @@ IOStatus PosixRandomAccessFile::MultiRead(FSReadRequest* reqs, TEST_SYNC_POINT_CALLBACK( "PosixRandomAccessFile::MultiRead:io_uring_wait_cqe:return", &ret); if (ret) { - ios = IOStatus::IOError("io_uring_wait_cqe() returns " + ToString(ret)); + ios = IOStatus::IOError("io_uring_wait_cqe() returns " + + std::to_string(ret)); if (cqe != nullptr) { io_uring_cqe_seen(iu, cqe); @@ -738,7 +740,7 @@ IOStatus PosixRandomAccessFile::MultiRead(FSReadRequest* reqs, req_wrap); port::PrintStack(); ios = IOStatus::IOError("io_uring_cqe_get_data() returned " + - ToString((uint64_t)req_wrap)); + std::to_string((uint64_t)req_wrap)); continue; } wrap_cache.erase(wrap_check); @@ -801,8 +803,8 @@ IOStatus PosixRandomAccessFile::Prefetch(uint64_t offset, size_t n, r = fcntl(fd_, F_RDADVISE, &advice); #endif if (r == -1) { - s = IOError("While prefetching offset " + ToString(offset) + " len " + - ToString(n), + s = IOError("While prefetching offset " + std::to_string(offset) + + " len " + std::to_string(n), filename_, errno); } } @@ -855,8 +857,8 @@ IOStatus PosixRandomAccessFile::InvalidateCache(size_t offset, size_t length) { if (ret == 0) { return IOStatus::OK(); } - return IOError("While fadvise NotNeeded offset " + ToString(offset) + - " len " + ToString(length), + return IOError("While fadvise NotNeeded offset " + std::to_string(offset) + + " len " + std::to_string(length), filename_, errno); #endif } @@ -922,7 +924,7 @@ IOStatus PosixRandomAccessFile::ReadAsync( if (ret < 0) { fprintf(stderr, "io_uring_submit error: %ld\n", long(ret)); return IOStatus::IOError("io_uring_submit() requested but returned " + - ToString(ret)); + std::to_string(ret)); } return IOStatus::OK(); #else @@ -970,8 +972,8 @@ IOStatus PosixMmapReadableFile::Read(uint64_t offset, size_t n, IOStatus s; if (offset > length_) { *result = Slice(); - return IOError("While mmap read offset " + ToString(offset) + - " larger than file length " + ToString(length_), + return IOError("While mmap read offset " + std::to_string(offset) + + " larger than file length " + std::to_string(length_), filename_, EINVAL); } else if (offset + n > length_) { n = static_cast(length_ - offset); @@ -991,8 +993,8 @@ IOStatus PosixMmapReadableFile::InvalidateCache(size_t offset, size_t length) { if (ret == 0) { return IOStatus::OK(); } - return IOError("While fadvise not needed. Offset " + ToString(offset) + - " len" + ToString(length), + return IOError("While fadvise not needed. Offset " + std::to_string(offset) + + " len" + std::to_string(length), filename_, errno); #endif } @@ -1244,9 +1246,9 @@ IOStatus PosixMmapFile::Allocate(uint64_t offset, uint64_t len, if (alloc_status == 0) { return IOStatus::OK(); } else { - return IOError( - "While fallocate offset " + ToString(offset) + " len " + ToString(len), - filename_, errno); + return IOError("While fallocate offset " + std::to_string(offset) + + " len " + std::to_string(len), + filename_, errno); } } #endif @@ -1311,7 +1313,7 @@ IOStatus PosixWritableFile::PositionedAppend(const Slice& data, uint64_t offset, const char* src = data.data(); size_t nbytes = data.size(); if (!PosixPositionedWrite(fd_, src, nbytes, static_cast(offset))) { - return IOError("While pwrite to file at offset " + ToString(offset), + return IOError("While pwrite to file at offset " + std::to_string(offset), filename_, errno); } filesize_ = offset + nbytes; @@ -1323,8 +1325,8 @@ IOStatus PosixWritableFile::Truncate(uint64_t size, const IOOptions& /*opts*/, IOStatus s; int r = ftruncate(fd_, size); if (r < 0) { - s = IOError("While ftruncate file to size " + ToString(size), filename_, - errno); + s = IOError("While ftruncate file to size " + std::to_string(size), + filename_, errno); } else { filesize_ = size; } @@ -1481,9 +1483,9 @@ IOStatus PosixWritableFile::Allocate(uint64_t offset, uint64_t len, if (alloc_status == 0) { return IOStatus::OK(); } else { - return IOError( - "While fallocate offset " + ToString(offset) + " len " + ToString(len), - filename_, errno); + return IOError("While fallocate offset " + std::to_string(offset) + + " len " + std::to_string(len), + filename_, errno); } } #endif @@ -1508,7 +1510,7 @@ IOStatus PosixWritableFile::RangeSync(uint64_t offset, uint64_t nbytes, static_cast(nbytes), SYNC_FILE_RANGE_WRITE); } if (ret != 0) { - return IOError("While sync_file_range returned " + ToString(ret), + return IOError("While sync_file_range returned " + std::to_string(ret), filename_, errno); } return IOStatus::OK(); @@ -1544,9 +1546,9 @@ IOStatus PosixRandomRWFile::Write(uint64_t offset, const Slice& data, const char* src = data.data(); size_t nbytes = data.size(); if (!PosixPositionedWrite(fd_, src, nbytes, static_cast(offset))) { - return IOError( - "While write random read/write file at offset " + ToString(offset), - filename_, errno); + return IOError("While write random read/write file at offset " + + std::to_string(offset), + filename_, errno); } return IOStatus::OK(); @@ -1566,7 +1568,7 @@ IOStatus PosixRandomRWFile::Read(uint64_t offset, size_t n, continue; } return IOError("While reading random read/write file offset " + - ToString(offset) + " len " + ToString(n), + std::to_string(offset) + " len " + std::to_string(n), filename_, errno); } else if (done == 0) { // Nothing more to read diff --git a/file/delete_scheduler_test.cc b/file/delete_scheduler_test.cc index 96d2de496..4da0623c7 100644 --- a/file/delete_scheduler_test.cc +++ b/file/delete_scheduler_test.cc @@ -30,7 +30,7 @@ class DeleteSchedulerTest : public testing::Test { for (size_t i = 0; i < kNumDataDirs; ++i) { dummy_files_dirs_.emplace_back( test::PerThreadDBPath(env_, "delete_scheduler_dummy_data_dir") + - ToString(i)); + std::to_string(i)); DestroyAndCreateDir(dummy_files_dirs_.back()); } stats_ = ROCKSDB_NAMESPACE::CreateDBStatistics(); @@ -153,7 +153,7 @@ TEST_F(DeleteSchedulerTest, BasicRateLimiting) { // Create 100 dummy files, every file is 1 Kb std::vector generated_files; for (int i = 0; i < num_files; i++) { - std::string file_name = "file" + ToString(i) + ".data"; + std::string file_name = "file" + std::to_string(i) + ".data"; generated_files.push_back(NewDummyFile(file_name, file_size)); } @@ -265,7 +265,7 @@ TEST_F(DeleteSchedulerTest, RateLimitingMultiThreaded) { // Create 100 dummy files, every file is 1 Kb std::vector generated_files; for (int i = 0; i < num_files * thread_cnt; i++) { - std::string file_name = "file" + ToString(i) + ".data"; + std::string file_name = "file" + std::to_string(i) + ".data"; generated_files.push_back(NewDummyFile(file_name, file_size)); } @@ -405,7 +405,7 @@ TEST_F(DeleteSchedulerTest, BackgroundError) { // Generate 10 dummy files and move them to trash for (int i = 0; i < 10; i++) { - std::string file_name = "data_" + ToString(i) + ".data"; + std::string file_name = "data_" + std::to_string(i) + ".data"; ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name), "")); } ASSERT_EQ(CountNormalFiles(), 0); @@ -415,7 +415,7 @@ TEST_F(DeleteSchedulerTest, BackgroundError) { // BackgroundEmptyTrash since we already deleted the files it was // goind to delete for (int i = 0; i < 10; i++) { - std::string file_name = "data_" + ToString(i) + ".data.trash"; + std::string file_name = "data_" + std::to_string(i) + ".data.trash"; ASSERT_OK(env_->DeleteFile(dummy_files_dirs_[0] + "/" + file_name)); } @@ -455,7 +455,7 @@ TEST_F(DeleteSchedulerTest, StartBGEmptyTrashMultipleTimes) { for (int run = 1; run <= 5; run++) { // Generate kTestFileNum dummy files and move them to trash for (int i = 0; i < kTestFileNum; i++) { - std::string file_name = "data_" + ToString(i) + ".data"; + std::string file_name = "data_" + std::to_string(i) + ".data"; ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name), "")); } ASSERT_EQ(CountNormalFiles(), 0); @@ -555,7 +555,7 @@ TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) { NewDeleteScheduler(); for (int i = 0; i < 100; i++) { - std::string file_name = "data_" + ToString(i) + ".data"; + std::string file_name = "data_" + std::to_string(i) + ".data"; ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name), "")); } @@ -610,7 +610,7 @@ TEST_F(DeleteSchedulerTest, DISABLED_DynamicRateLimiting1) { // Create 100 dummy files, every file is 1 Kb std::vector generated_files; for (int i = 0; i < num_files; i++) { - std::string file_name = "file" + ToString(i) + ".data"; + std::string file_name = "file" + std::to_string(i) + ".data"; generated_files.push_back(NewDummyFile(file_name, file_size)); } @@ -671,7 +671,7 @@ TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) { std::vector generated_files; for (int i = 0; i < num_files; i++) { - std::string file_name = "file" + ToString(i) + ".data"; + std::string file_name = "file" + std::to_string(i) + ".data"; generated_files.push_back(NewDummyFile(file_name, file_size)); } diff --git a/java/rocksjni/write_batch_test.cc b/java/rocksjni/write_batch_test.cc index 36bfdeb9c..940429417 100644 --- a/java/rocksjni/write_batch_test.cc +++ b/java/rocksjni/write_batch_test.cc @@ -119,7 +119,7 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(JNIEnv* env, break; } state.append("@"); - state.append(ROCKSDB_NAMESPACE::ToString(ikey.sequence)); + state.append(std::to_string(ikey.sequence)); } if (!s.ok()) { state.append(s.ToString()); diff --git a/memory/jemalloc_nodump_allocator.cc b/memory/jemalloc_nodump_allocator.cc index 821916aac..62ee661d2 100644 --- a/memory/jemalloc_nodump_allocator.cc +++ b/memory/jemalloc_nodump_allocator.cc @@ -114,19 +114,18 @@ Status JemallocNodumpAllocator::InitializeArenas() { mallctl("arenas.create", &arena_index_, &arena_index_size, nullptr, 0); if (ret != 0) { return Status::Incomplete("Failed to create jemalloc arena, error code: " + - ROCKSDB_NAMESPACE::ToString(ret)); + std::to_string(ret)); } assert(arena_index_ != 0); // Read existing hooks. - std::string key = - "arena." + ROCKSDB_NAMESPACE::ToString(arena_index_) + ".extent_hooks"; + std::string key = "arena." + std::to_string(arena_index_) + ".extent_hooks"; extent_hooks_t* hooks; size_t hooks_size = sizeof(hooks); ret = mallctl(key.c_str(), &hooks, &hooks_size, nullptr, 0); if (ret != 0) { return Status::Incomplete("Failed to read existing hooks, error code: " + - ROCKSDB_NAMESPACE::ToString(ret)); + std::to_string(ret)); } // Store existing alloc. @@ -146,7 +145,7 @@ Status JemallocNodumpAllocator::InitializeArenas() { ret = mallctl(key.c_str(), nullptr, nullptr, &hooks_ptr, sizeof(hooks_ptr)); if (ret != 0) { return Status::Incomplete("Failed to set custom hook, error code: " + - ROCKSDB_NAMESPACE::ToString(ret)); + std::to_string(ret)); } return Status::OK(); } @@ -226,12 +225,11 @@ void* JemallocNodumpAllocator::Alloc(extent_hooks_t* extent, void* new_addr, Status JemallocNodumpAllocator::DestroyArena(unsigned arena_index) { assert(arena_index != 0); - std::string key = - "arena." + ROCKSDB_NAMESPACE::ToString(arena_index) + ".destroy"; + std::string key = "arena." + std::to_string(arena_index) + ".destroy"; int ret = mallctl(key.c_str(), nullptr, 0, nullptr, 0); if (ret != 0) { return Status::Incomplete("Failed to destroy jemalloc arena, error code: " + - ROCKSDB_NAMESPACE::ToString(ret)); + std::to_string(ret)); } return Status::OK(); } diff --git a/memtable/skiplistrep.cc b/memtable/skiplistrep.cc index 016515c44..5b8577e87 100644 --- a/memtable/skiplistrep.cc +++ b/memtable/skiplistrep.cc @@ -353,7 +353,7 @@ SkipListFactory::SkipListFactory(size_t lookahead) : lookahead_(lookahead) { std::string SkipListFactory::GetId() const { std::string id = Name(); if (lookahead_ > 0) { - id.append(":").append(ROCKSDB_NAMESPACE::ToString(lookahead_)); + id.append(":").append(std::to_string(lookahead_)); } return id; } diff --git a/microbench/db_basic_bench.cc b/microbench/db_basic_bench.cc index 64bff08fc..fea11319e 100644 --- a/microbench/db_basic_bench.cc +++ b/microbench/db_basic_bench.cc @@ -1342,7 +1342,7 @@ static void RandomAccessFileReaderRead(benchmark::State& state) { auto statistics_share = CreateDBStatistics(); Statistics* statistics = enable_statistics ? statistics_share.get() : nullptr; for (int i = 0; i < kFileNum; i++) { - std::string fname = fname_base + ToString(i); + std::string fname = fname_base + std::to_string(i); std::string content = rand.RandomString(kDefaultPageSize); std::unique_ptr tgt_file; env->NewWritableFile(fname, &tgt_file, EnvOptions()); @@ -1375,7 +1375,7 @@ static void RandomAccessFileReaderRead(benchmark::State& state) { // clean up for (int i = 0; i < kFileNum; i++) { - std::string fname = fname_base + ToString(i); + std::string fname = fname_base + std::to_string(i); env->DeleteFile(fname); // ignore return, okay to fail cleanup } } diff --git a/options/configurable_test.cc b/options/configurable_test.cc index 6cc83b847..8643193d8 100644 --- a/options/configurable_test.cc +++ b/options/configurable_test.cc @@ -173,7 +173,7 @@ TEST_F(ConfigurableTest, GetOptionsTest) { int i = 11; for (auto opt : {"", "shared.", "unique.", "pointer."}) { std::string value; - std::string expected = ToString(i); + std::string expected = std::to_string(i); std::string opt_name = opt; ASSERT_OK( simple->ConfigureOption(config_options_, opt_name + "int", expected)); diff --git a/options/options_helper.cc b/options/options_helper.cc index 7004568a3..0a530d24d 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -463,43 +463,43 @@ bool SerializeSingleOptionHelper(const void* opt_address, *value = *(static_cast(opt_address)) ? "true" : "false"; break; case OptionType::kInt: - *value = ToString(*(static_cast(opt_address))); + *value = std::to_string(*(static_cast(opt_address))); break; case OptionType::kInt32T: - *value = ToString(*(static_cast(opt_address))); + *value = std::to_string(*(static_cast(opt_address))); break; case OptionType::kInt64T: { int64_t v; GetUnaligned(static_cast(opt_address), &v); - *value = ToString(v); + *value = std::to_string(v); } break; case OptionType::kUInt: - *value = ToString(*(static_cast(opt_address))); + *value = std::to_string(*(static_cast(opt_address))); break; case OptionType::kUInt8T: - *value = ToString(*(static_cast(opt_address))); + *value = std::to_string(*(static_cast(opt_address))); break; case OptionType::kUInt32T: - *value = ToString(*(static_cast(opt_address))); + *value = std::to_string(*(static_cast(opt_address))); break; case OptionType::kUInt64T: { uint64_t v; GetUnaligned(static_cast(opt_address), &v); - *value = ToString(v); + *value = std::to_string(v); } break; case OptionType::kSizeT: { size_t v; GetUnaligned(static_cast(opt_address), &v); - *value = ToString(v); + *value = std::to_string(v); } break; case OptionType::kDouble: - *value = ToString(*(static_cast(opt_address))); + *value = std::to_string(*(static_cast(opt_address))); break; case OptionType::kString: *value = diff --git a/options/options_parser.cc b/options/options_parser.cc index 426e30013..73ccbdc4e 100644 --- a/options/options_parser.cc +++ b/options/options_parser.cc @@ -79,16 +79,16 @@ Status PersistRocksDBOptions(const ConfigOptions& config_options_in, std::string options_file_content; - s = writable->Append(option_file_header + "[" + - opt_section_titles[kOptionSectionVersion] + - "]\n" - " rocksdb_version=" + - ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR) + - "." + ToString(ROCKSDB_PATCH) + "\n"); + s = writable->Append( + option_file_header + "[" + opt_section_titles[kOptionSectionVersion] + + "]\n" + " rocksdb_version=" + + std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR) + + "." + std::to_string(ROCKSDB_PATCH) + "\n"); if (s.ok()) { s = writable->Append( - " options_file_version=" + ToString(ROCKSDB_OPTION_FILE_MAJOR) + "." + - ToString(ROCKSDB_OPTION_FILE_MINOR) + "\n"); + " options_file_version=" + std::to_string(ROCKSDB_OPTION_FILE_MAJOR) + + "." + std::to_string(ROCKSDB_OPTION_FILE_MINOR) + "\n"); } if (s.ok()) { s = writable->Append("\n[" + opt_section_titles[kOptionSectionDBOptions] + @@ -216,7 +216,7 @@ Status RocksDBOptionsParser::InvalidArgument(const int line_num, const std::string& message) { return Status::InvalidArgument( "[RocksDBOptionsParser Error] ", - message + " (at line " + ToString(line_num) + ")"); + message + " (at line " + std::to_string(line_num) + ")"); } Status RocksDBOptionsParser::ParseStatement(std::string* name, @@ -590,7 +590,7 @@ Status RocksDBOptionsParser::VerifyRocksDBOptionsFromFile( return Status::InvalidArgument( "[RocksDBOptionParser Error] The persisted options and the db" "instance does not have the same name for column family ", - ToString(i)); + std::to_string(i)); } } diff --git a/options/options_test.cc b/options/options_test.cc index 3ff230eff..e01c3db66 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -3346,31 +3346,31 @@ TEST_F(OptionsParserTest, IgnoreUnknownOptions) { if (case_id == 0) { // same version should_ignore = false; - version_string = - ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR) + ".0"; + version_string = std::to_string(ROCKSDB_MAJOR) + "." + + std::to_string(ROCKSDB_MINOR) + ".0"; } else if (case_id == 1) { // higher minor version should_ignore = true; - version_string = - ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR + 1) + ".0"; + version_string = std::to_string(ROCKSDB_MAJOR) + "." + + std::to_string(ROCKSDB_MINOR + 1) + ".0"; } else if (case_id == 2) { // higher major version. should_ignore = true; - version_string = ToString(ROCKSDB_MAJOR + 1) + ".0.0"; + version_string = std::to_string(ROCKSDB_MAJOR + 1) + ".0.0"; } else if (case_id == 3) { // lower minor version #if ROCKSDB_MINOR == 0 continue; #else - version_string = - ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR - 1) + ".0"; + version_string = std::to_string(ROCKSDB_MAJOR) + "." + + std::to_string(ROCKSDB_MINOR - 1) + ".0"; should_ignore = false; #endif } else { // lower major version should_ignore = false; - version_string = - ToString(ROCKSDB_MAJOR - 1) + "." + ToString(ROCKSDB_MINOR) + ".0"; + version_string = std::to_string(ROCKSDB_MAJOR - 1) + "." + + std::to_string(ROCKSDB_MINOR) + ".0"; } std::string options_file_content = diff --git a/table/block_based/block_based_filter_block.cc b/table/block_based/block_based_filter_block.cc index c56936474..09e30f74b 100644 --- a/table/block_based/block_based_filter_block.cc +++ b/table/block_based/block_based_filter_block.cc @@ -51,7 +51,7 @@ void AppendItem(std::string* props, const std::string& key, template void AppendItem(std::string* props, const TKey& key, const std::string& value) { - std::string key_str = ROCKSDB_NAMESPACE::ToString(key); + std::string key_str = std::to_string(key); AppendItem(props, key_str, value); } } // namespace @@ -337,7 +337,7 @@ std::string BlockBasedFilterBlockReader::ToString() const { result.reserve(1024); std::string s_bo("Block offset"), s_hd("Hex dump"), s_fb("# filter blocks"); - AppendItem(&result, s_fb, ROCKSDB_NAMESPACE::ToString(num)); + AppendItem(&result, s_fb, std::to_string(num)); AppendItem(&result, s_bo, s_hd); for (size_t index = 0; index < num; index++) { @@ -345,8 +345,7 @@ std::string BlockBasedFilterBlockReader::ToString() const { uint32_t limit = DecodeFixed32(offset + index * 4 + 4); if (start != limit) { - result.append(" filter block # " + - ROCKSDB_NAMESPACE::ToString(index + 1) + "\n"); + result.append(" filter block # " + std::to_string(index + 1) + "\n"); Slice filter = Slice(data + start, limit - start); AppendItem(&result, start, filter.ToString(true)); } diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index db2858b19..f737a2f34 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -686,8 +686,7 @@ Status BlockBasedTableFactory::ValidateOptions( table_options_.checksum, &garbage)) { return Status::InvalidArgument( "Unrecognized ChecksumType for checksum: " + - ROCKSDB_NAMESPACE::ToString( - static_cast(table_options_.checksum))); + std::to_string(static_cast(table_options_.checksum))); } return TableFactory::ValidateOptions(db_opts, cf_opts); } diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index 20538c38e..baef3bf89 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -1811,10 +1811,11 @@ void BlockBasedTable::RetrieveMultipleBlocks( if (s.ok()) { if ((req.result.size() != req.len) || (req_offset + BlockSizeWithTrailer(handle) > req.result.size())) { - s = Status::Corruption( - "truncated block read from " + rep_->file->file_name() + - " offset " + ToString(handle.offset()) + ", expected " + - ToString(req.len) + " bytes, got " + ToString(req.result.size())); + s = Status::Corruption("truncated block read from " + + rep_->file->file_name() + " offset " + + std::to_string(handle.offset()) + ", expected " + + std::to_string(req.len) + " bytes, got " + + std::to_string(req.result.size())); } } @@ -3236,7 +3237,7 @@ Status BlockBasedTable::CreateIndexReader( } default: { std::string error_message = - "Unrecognized index type: " + ToString(rep_->index_type); + "Unrecognized index type: " + std::to_string(rep_->index_type); return Status::InvalidArgument(error_message.c_str()); } } @@ -3659,8 +3660,8 @@ Status BlockBasedTable::DumpDataBlocks(std::ostream& out_stream) { out_stream << " # data blocks: " << num_datablocks << "\n"; out_stream << " min data block size: " << datablock_size_min << "\n"; out_stream << " max data block size: " << datablock_size_max << "\n"; - out_stream << " avg data block size: " << ToString(datablock_size_avg) - << "\n"; + out_stream << " avg data block size: " + << std::to_string(datablock_size_avg) << "\n"; } return Status::OK(); diff --git a/table/block_based/filter_policy.cc b/table/block_based/filter_policy.cc index 6ac4b9142..6005d11e7 100644 --- a/table/block_based/filter_policy.cc +++ b/table/block_based/filter_policy.cc @@ -1542,7 +1542,7 @@ BloomLikeFilterPolicy::GetStandard128RibbonBuilderWithContext( } std::string BloomLikeFilterPolicy::GetBitsPerKeySuffix() const { - std::string rv = ":" + ROCKSDB_NAMESPACE::ToString(millibits_per_key_ / 1000); + std::string rv = ":" + std::to_string(millibits_per_key_ / 1000); int frac = millibits_per_key_ % 1000; if (frac > 0) { rv.push_back('.'); @@ -1837,7 +1837,7 @@ const char* RibbonFilterPolicy::kNickName() { return "rocksdb.RibbonFilter"; } std::string RibbonFilterPolicy::GetId() const { return BloomLikeFilterPolicy::GetId() + ":" + - ROCKSDB_NAMESPACE::ToString(bloom_before_level_); + std::to_string(bloom_before_level_); } const FilterPolicy* NewRibbonFilterPolicy(double bloom_equivalent_bits_per_key, diff --git a/table/block_based/index_builder.h b/table/block_based/index_builder.h index 23e4a76d5..522b67361 100644 --- a/table/block_based/index_builder.h +++ b/table/block_based/index_builder.h @@ -285,8 +285,8 @@ class HashIndexBuilder : public IndexBuilder { } // need a hard copy otherwise the underlying data changes all the time. - // TODO(kailiu) ToString() is expensive. We may speed up can avoid data - // copy. + // TODO(kailiu) std::to_string() is expensive. We may speed up can avoid + // data copy. pending_entry_prefix_ = key_prefix.ToString(); pending_block_num_ = 1; pending_entry_index_ = static_cast(current_restart_index_); diff --git a/table/block_based/reader_common.cc b/table/block_based/reader_common.cc index d0f47c779..0ff43e9b4 100644 --- a/table/block_based/reader_common.cc +++ b/table/block_based/reader_common.cc @@ -43,10 +43,10 @@ Status VerifyBlockChecksum(ChecksumType type, const char* data, computed = crc32c::Unmask(computed); } return Status::Corruption( - "block checksum mismatch: stored = " + ToString(stored) + - ", computed = " + ToString(computed) + ", type = " + ToString(type) + - " in " + file_name + " offset " + ToString(offset) + " size " + - ToString(block_size)); + "block checksum mismatch: stored = " + std::to_string(stored) + + ", computed = " + std::to_string(computed) + + ", type = " + std::to_string(type) + " in " + file_name + " offset " + + std::to_string(offset) + " size " + std::to_string(block_size)); } } } // namespace ROCKSDB_NAMESPACE diff --git a/table/block_fetcher.cc b/table/block_fetcher.cc index 110c4bfe8..d11b25c44 100644 --- a/table/block_fetcher.cc +++ b/table/block_fetcher.cc @@ -305,11 +305,11 @@ IOStatus BlockFetcher::ReadBlockContents() { } if (slice_.size() != block_size_with_trailer_) { - return IOStatus::Corruption("truncated block read from " + - file_->file_name() + " offset " + - ToString(handle_.offset()) + ", expected " + - ToString(block_size_with_trailer_) + - " bytes, got " + ToString(slice_.size())); + return IOStatus::Corruption( + "truncated block read from " + file_->file_name() + " offset " + + std::to_string(handle_.offset()) + ", expected " + + std::to_string(block_size_with_trailer_) + " bytes, got " + + std::to_string(slice_.size())); } ProcessTrailerIfPresent(); diff --git a/table/cuckoo/cuckoo_table_builder.cc b/table/cuckoo/cuckoo_table_builder.cc index 0068770b2..ea56fdae8 100644 --- a/table/cuckoo/cuckoo_table_builder.cc +++ b/table/cuckoo/cuckoo_table_builder.cc @@ -103,7 +103,7 @@ void CuckooTableBuilder::Add(const Slice& key, const Slice& value) { } if (ikey.type != kTypeDeletion && ikey.type != kTypeValue) { status_ = Status::NotSupported("Unsupported key type " + - ToString(ikey.type)); + std::to_string(ikey.type)); return; } diff --git a/table/cuckoo/cuckoo_table_reader_test.cc b/table/cuckoo/cuckoo_table_reader_test.cc index 5547e6d27..ed8f642a7 100644 --- a/table/cuckoo/cuckoo_table_reader_test.cc +++ b/table/cuckoo/cuckoo_table_reader_test.cc @@ -400,7 +400,7 @@ std::string GetFileName(uint64_t num) { FLAGS_file_dir = test::TmpDir(); } return test::PerThreadDBPath(FLAGS_file_dir, "cuckoo_read_benchmark") + - ToString(num / 1000000) + "Mkeys"; + std::to_string(num / 1000000) + "Mkeys"; } // Create last level file as we are interested in measuring performance of diff --git a/table/format.cc b/table/format.cc index 4db3367ea..e7720e901 100644 --- a/table/format.cc +++ b/table/format.cc @@ -295,7 +295,7 @@ Status Footer::DecodeFrom(Slice input, uint64_t input_offset) { format_version_ = DecodeFixed32(part3_ptr); if (!IsSupportedFormatVersion(format_version_)) { return Status::Corruption("Corrupt or unsupported format_version: " + - ROCKSDB_NAMESPACE::ToString(format_version_)); + std::to_string(format_version_)); } // All known format versions >= 1 occupy exactly this many bytes. if (input.size() < kNewVersionsEncodedLength) { @@ -308,9 +308,8 @@ Status Footer::DecodeFrom(Slice input, uint64_t input_offset) { char chksum = input.data()[0]; checksum_type_ = lossless_cast(chksum); if (!IsSupportedChecksumType(checksum_type())) { - return Status::Corruption( - "Corrupt or unsupported checksum type: " + - ROCKSDB_NAMESPACE::ToString(lossless_cast(chksum))); + return Status::Corruption("Corrupt or unsupported checksum type: " + + std::to_string(lossless_cast(chksum))); } // Consume checksum type field input.remove_prefix(1); @@ -333,15 +332,15 @@ std::string Footer::ToString() const { if (legacy) { result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n "); result.append("index handle: " + index_handle_.ToString() + "\n "); - result.append("table_magic_number: " + - ROCKSDB_NAMESPACE::ToString(table_magic_number_) + "\n "); + result.append("table_magic_number: " + std::to_string(table_magic_number_) + + "\n "); } else { result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n "); result.append("index handle: " + index_handle_.ToString() + "\n "); - result.append("table_magic_number: " + - ROCKSDB_NAMESPACE::ToString(table_magic_number_) + "\n "); - result.append("format version: " + - ROCKSDB_NAMESPACE::ToString(format_version_) + "\n "); + result.append("table_magic_number: " + std::to_string(table_magic_number_) + + "\n "); + result.append("format version: " + std::to_string(format_version_) + + "\n "); } return result; } @@ -351,7 +350,8 @@ Status ReadFooterFromFile(const IOOptions& opts, RandomAccessFileReader* file, uint64_t file_size, Footer* footer, uint64_t enforce_table_magic_number) { if (file_size < Footer::kMinEncodedLength) { - return Status::Corruption("file is too short (" + ToString(file_size) + + return Status::Corruption("file is too short (" + + std::to_string(file_size) + " bytes) to be an " "sstable: " + file->file_name()); @@ -390,7 +390,8 @@ Status ReadFooterFromFile(const IOOptions& opts, RandomAccessFileReader* file, // Check that we actually read the whole footer from the file. It may be // that size isn't correct. if (footer_input.size() < Footer::kMinEncodedLength) { - return Status::Corruption("file is too short (" + ToString(file_size) + + return Status::Corruption("file is too short (" + + std::to_string(file_size) + " bytes) to be an " "sstable" + file->file_name()); @@ -402,10 +403,11 @@ Status ReadFooterFromFile(const IOOptions& opts, RandomAccessFileReader* file, } if (enforce_table_magic_number != 0 && enforce_table_magic_number != footer->table_magic_number()) { - return Status::Corruption( - "Bad table magic number: expected " + - ToString(enforce_table_magic_number) + ", found " + - ToString(footer->table_magic_number()) + " in " + file->file_name()); + return Status::Corruption("Bad table magic number: expected " + + std::to_string(enforce_table_magic_number) + + ", found " + + std::to_string(footer->table_magic_number()) + + " in " + file->file_name()); } return Status::OK(); } diff --git a/table/plain/plain_table_reader.cc b/table/plain/plain_table_reader.cc index 25f2ba95b..5e04f3799 100644 --- a/table/plain/plain_table_reader.cc +++ b/table/plain/plain_table_reader.cc @@ -416,14 +416,14 @@ Status PlainTableReader::PopulateIndex(TableProperties* props, // Fill two table properties. if (!index_in_file) { props->user_collected_properties["plain_table_hash_table_size"] = - ToString(index_.GetIndexSize() * PlainTableIndex::kOffsetLen); + std::to_string(index_.GetIndexSize() * PlainTableIndex::kOffsetLen); props->user_collected_properties["plain_table_sub_index_size"] = - ToString(index_.GetSubIndexSize()); + std::to_string(index_.GetSubIndexSize()); } else { props->user_collected_properties["plain_table_hash_table_size"] = - ToString(0); + std::to_string(0); props->user_collected_properties["plain_table_sub_index_size"] = - ToString(0); + std::to_string(0); } return Status::OK(); diff --git a/table/sst_file_writer_collectors.h b/table/sst_file_writer_collectors.h index 54cdb1ea7..7610af573 100644 --- a/table/sst_file_writer_collectors.h +++ b/table/sst_file_writer_collectors.h @@ -63,7 +63,7 @@ class SstFileWriterPropertiesCollector : public IntTblPropCollector { } virtual UserCollectedProperties GetReadableProperties() const override { - return {{ExternalSstFilePropertyNames::kVersion, ToString(version_)}}; + return {{ExternalSstFilePropertyNames::kVersion, std::to_string(version_)}}; } private: diff --git a/table/table_properties.cc b/table/table_properties.cc index 49b474758..e15da6a66 100644 --- a/table/table_properties.cc +++ b/table/table_properties.cc @@ -39,9 +39,7 @@ namespace { const TValue& value, const std::string& prop_delim, const std::string& kv_delim) { - AppendProperty( - props, key, ToString(value), prop_delim, kv_delim - ); + AppendProperty(props, key, std::to_string(value), prop_delim, kv_delim); } } @@ -107,7 +105,7 @@ std::string TableProperties::ToString( ROCKSDB_NAMESPACE::TablePropertiesCollectorFactory:: Context::kUnknownColumnFamily ? std::string("N/A") - : ROCKSDB_NAMESPACE::ToString(column_family_id), + : std::to_string(column_family_id), prop_delim, kv_delim); AppendProperty( result, "column family name", diff --git a/table/table_test.cc b/table/table_test.cc index 52a062286..8a3f4f7d8 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -1377,9 +1377,9 @@ TEST_F(TablePropertyTest, PrefixScanTest) { pos->first.compare(0, prefix.size(), prefix) == 0; ++pos) { ++num; - auto key = prefix + "." + ToString(num); + auto key = prefix + "." + std::to_string(num); ASSERT_EQ(key, pos->first); - ASSERT_EQ(ToString(num), pos->second); + ASSERT_EQ(std::to_string(num), pos->second); } ASSERT_EQ(3, num); } @@ -1705,7 +1705,7 @@ uint64_t BlockBasedTableTest::IndexUncompressedHelper(bool compressed) { constexpr size_t kNumKeys = 10000; for (size_t k = 0; k < kNumKeys; ++k) { - c.Add("key" + ToString(k), "val" + ToString(k)); + c.Add("key" + std::to_string(k), "val" + std::to_string(k)); } std::vector keys; diff --git a/test_util/testutil.cc b/test_util/testutil.cc index cc95d8956..b9869c348 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -656,7 +656,7 @@ class SpecialSkipListFactory : public MemTableRepFactory { std::string GetId() const override { std::string id = Name(); if (num_entries_flush_ > 0) { - id.append(":").append(ROCKSDB_NAMESPACE::ToString(num_entries_flush_)); + id.append(":").append(std::to_string(num_entries_flush_)); } return id; } diff --git a/test_util/transaction_test_util.cc b/test_util/transaction_test_util.cc index 3eaa7fd6f..b90534341 100644 --- a/test_util/transaction_test_util.cc +++ b/test_util/transaction_test_util.cc @@ -96,7 +96,7 @@ Status RandomTransactionInserter::DBGet( assert(set_i + 1 <= 9999); snprintf(prefix_buf, sizeof(prefix_buf), "%.4u", set_i + 1); // key format: [SET#][random#] - std::string skey = ToString(ikey); + std::string skey = std::to_string(ikey); Slice base_key(skey); *full_key = std::string(prefix_buf) + base_key.ToString(); Slice key(*full_key); @@ -163,7 +163,7 @@ bool RandomTransactionInserter::DoInsert(DB* db, Transaction* txn, if (s.ok()) { // Increment key - std::string sum = ToString(int_value + incr); + std::string sum = std::to_string(int_value + incr); if (txn != nullptr) { if ((set_i % 4) != 0) { s = txn->SingleDelete(key); diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index f04eff4f1..24970cf8c 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -1952,9 +1952,9 @@ class ReporterAgent { auto secs_elapsed = (clock->NowMicros() - time_started + kMicrosInSecond / 2) / kMicrosInSecond; - std::string report = ToString(secs_elapsed) + "," + - ToString(total_ops_done_snapshot - last_report_) + - "\n"; + std::string report = + std::to_string(secs_elapsed) + "," + + std::to_string(total_ops_done_snapshot - last_report_) + "\n"; auto s = report_file_->Append(report); if (s.ok()) { s = report_file_->Flush(); @@ -2208,7 +2208,7 @@ class Stats { if (db->GetProperty( db_with_cfh->cfh[i], "rocksdb.aggregated-table-properties-at-level" + - ToString(level), + std::to_string(level), &stats)) { if (stats.find("# entries=0") == std::string::npos) { fprintf(stderr, "Level[%d]: %s\n", level, @@ -2232,7 +2232,7 @@ class Stats { for (int level = 0; level < FLAGS_num_levels; ++level) { if (db->GetProperty( "rocksdb.aggregated-table-properties-at-level" + - ToString(level), + std::to_string(level), &stats)) { if (stats.find("# entries=0") == std::string::npos) { fprintf(stderr, "Level[%d]: %s\n", level, stats.c_str()); @@ -3142,7 +3142,7 @@ class Benchmark { } #endif } - return base_name + ToString(id); + return base_name + std::to_string(id); } void VerifyDBFromDB(std::string& truth_db_name) { @@ -3791,7 +3791,7 @@ class Benchmark { static inline void ChecksumBenchmark(FnType fn, ThreadState* thread, Args... args) { const int size = FLAGS_block_size; // use --block_size option for db_bench - std::string labels = "(" + ToString(FLAGS_block_size) + " per op)"; + std::string labels = "(" + std::to_string(FLAGS_block_size) + " per op)"; const char* label = labels.c_str(); std::string data(size, 'x'); @@ -4429,7 +4429,7 @@ class Benchmark { Status s = FilterPolicy::CreateFromString( ConfigOptions(), "rocksdb.internal.DeprecatedBlockBasedBloomFilter:" + - ROCKSDB_NAMESPACE::ToString(FLAGS_bloom_bits), + std::to_string(FLAGS_bloom_bits), &table_options->filter_policy); if (!s.ok()) { fprintf(stderr, diff --git a/tools/db_sanity_test.cc b/tools/db_sanity_test.cc index b483ee84f..21475a7e9 100644 --- a/tools/db_sanity_test.cc +++ b/tools/db_sanity_test.cc @@ -45,8 +45,8 @@ class SanityTest { return s; } for (int i = 0; i < 1000000; ++i) { - std::string k = "key" + ToString(i); - std::string v = "value" + ToString(i); + std::string k = "key" + std::to_string(i); + std::string v = "value" + std::to_string(i); s = db->Put(WriteOptions(), Slice(k), Slice(v)); if (!s.ok()) { return s; @@ -63,8 +63,8 @@ class SanityTest { return s; } for (int i = 0; i < 1000000; ++i) { - std::string k = "key" + ToString(i); - std::string v = "value" + ToString(i); + std::string k = "key" + std::to_string(i); + std::string v = "value" + std::to_string(i); std::string result; s = db->Get(ReadOptions(), Slice(k), &result); if (!s.ok()) { diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 91a0b2775..a7ba92b73 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -2198,8 +2198,7 @@ std::vector ReduceDBLevelsCommand::PrepareArgs( std::vector ret; ret.push_back("reduce_levels"); ret.push_back("--" + ARG_DB + "=" + db_path); - ret.push_back("--" + ARG_NEW_LEVELS + "=" + - ROCKSDB_NAMESPACE::ToString(new_levels)); + ret.push_back("--" + ARG_NEW_LEVELS + "=" + std::to_string(new_levels)); if(print_old_level) { ret.push_back("--" + ARG_PRINT_OLD_LEVELS); } @@ -2393,7 +2392,8 @@ void ChangeCompactionStyleCommand::DoCommand() { std::string property; std::string files_per_level; for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) { - db_->GetProperty(GetCfHandle(), "rocksdb.num-files-at-level" + ToString(i), + db_->GetProperty(GetCfHandle(), + "rocksdb.num-files-at-level" + std::to_string(i), &property); // format print string @@ -2421,7 +2421,8 @@ void ChangeCompactionStyleCommand::DoCommand() { files_per_level = ""; int num_files = 0; for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) { - db_->GetProperty(GetCfHandle(), "rocksdb.num-files-at-level" + ToString(i), + db_->GetProperty(GetCfHandle(), + "rocksdb.num-files-at-level" + std::to_string(i), &property); // format print string @@ -2436,7 +2437,7 @@ void ChangeCompactionStyleCommand::DoCommand() { exec_state_ = LDBCommandExecuteResult::Failed( "Number of db files at " "level 0 after compaction is " + - ToString(num_files) + ", not 1.\n"); + std::to_string(num_files) + ", not 1.\n"); return; } // other levels should have no file @@ -2444,8 +2445,8 @@ void ChangeCompactionStyleCommand::DoCommand() { exec_state_ = LDBCommandExecuteResult::Failed( "Number of db files at " "level " + - ToString(i) + " after compaction is " + ToString(num_files) + - ", not 0.\n"); + std::to_string(i) + " after compaction is " + + std::to_string(num_files) + ", not 0.\n"); return; } } diff --git a/tools/ldb_cmd_test.cc b/tools/ldb_cmd_test.cc index e831461ed..a46df317b 100644 --- a/tools/ldb_cmd_test.cc +++ b/tools/ldb_cmd_test.cc @@ -244,7 +244,7 @@ class FileChecksumTestHelper { live_files[i].file_checksum_func_name != stored_func_name) { return Status::Corruption( "Checksum does not match! The file: " + - ToString(live_files[i].file_number) + + std::to_string(live_files[i].file_number) + ". In Manifest, checksum name: " + stored_func_name + " and checksum " + stored_checksum + ". However, expected checksum name: " + @@ -937,7 +937,7 @@ TEST_F(LdbCmdTest, UnsafeRemoveSstFile) { // Create three SST files for (size_t i = 0; i < 3; ++i) { - ASSERT_OK(db->Put(WriteOptions(), ToString(i), ToString(i))); + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), std::to_string(i))); ASSERT_OK(db->Flush(FlushOptions())); } @@ -985,7 +985,8 @@ TEST_F(LdbCmdTest, UnsafeRemoveSstFile) { ColumnFamilyOptions cf_opts; ASSERT_OK(db->CreateColumnFamily(cf_opts, "cf1", &cf_handle)); for (size_t i = 3; i < 5; ++i) { - ASSERT_OK(db->Put(WriteOptions(), cf_handle, ToString(i), ToString(i))); + ASSERT_OK(db->Put(WriteOptions(), cf_handle, std::to_string(i), + std::to_string(i))); ASSERT_OK(db->Flush(FlushOptions(), cf_handle)); } @@ -1048,7 +1049,7 @@ TEST_F(LdbCmdTest, FileTemperatureUpdateManifest) { Temperature::kWarm, Temperature::kCold}; std::map number_to_temp; for (size_t i = 0; i < kTestTemps.size(); ++i) { - ASSERT_OK(db->Put(WriteOptions(), ToString(i), ToString(i))); + ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), std::to_string(i))); ASSERT_OK(db->Flush(FlushOptions())); std::map current_temps; @@ -1069,8 +1070,8 @@ TEST_F(LdbCmdTest, FileTemperatureUpdateManifest) { for (size_t i = 0; i < kTestTemps.size(); ++i) { std::string val; - ASSERT_OK(db->Get(ReadOptions(), ToString(i), &val)); - ASSERT_EQ(val, ToString(i)); + ASSERT_OK(db->Get(ReadOptions(), std::to_string(i), &val)); + ASSERT_EQ(val, std::to_string(i)); } // Still all unknown @@ -1101,8 +1102,8 @@ TEST_F(LdbCmdTest, FileTemperatureUpdateManifest) { for (size_t i = 0; i < kTestTemps.size(); ++i) { std::string val; - ASSERT_OK(db->Get(ReadOptions(), ToString(i), &val)); - ASSERT_EQ(val, ToString(i)); + ASSERT_OK(db->Get(ReadOptions(), std::to_string(i), &val)); + ASSERT_EQ(val, std::to_string(i)); } requests.clear(); diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index 2f7256051..c809c6423 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -70,8 +70,8 @@ public: int FilesOnLevel(int level) { std::string property; - EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level), - &property)); + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + std::to_string(level), &property)); return atoi(property.c_str()); } diff --git a/util/autovector_test.cc b/util/autovector_test.cc index d73b1ee6a..bc7fbc3f1 100644 --- a/util/autovector_test.cc +++ b/util/autovector_test.cc @@ -68,7 +68,7 @@ TEST_F(AutoVectorTest, EmplaceBack) { autovector vec; for (size_t i = 0; i < 1000 * kSize; ++i) { - vec.emplace_back(i, ToString(i + 123)); + vec.emplace_back(i, std::to_string(i + 123)); ASSERT_TRUE(!vec.empty()); if (i < kSize) { AssertAutoVectorOnlyInStack(&vec, true); @@ -78,7 +78,7 @@ TEST_F(AutoVectorTest, EmplaceBack) { ASSERT_EQ(i + 1, vec.size()); ASSERT_EQ(i, vec[i].first); - ASSERT_EQ(ToString(i + 123), vec[i].second); + ASSERT_EQ(std::to_string(i + 123), vec[i].second); } vec.clear(); @@ -146,7 +146,7 @@ TEST_F(AutoVectorTest, CopyAndAssignment) { TEST_F(AutoVectorTest, Iterators) { autovector vec; for (size_t i = 0; i < kSize * 1000; ++i) { - vec.push_back(ToString(i)); + vec.push_back(std::to_string(i)); } // basic operator test @@ -208,7 +208,7 @@ std::vector GetTestKeys(size_t size) { int index = 0; for (auto& key : keys) { - key = "item-" + ROCKSDB_NAMESPACE::ToString(index++); + key = "item-" + std::to_string(index++); } return keys; } diff --git a/util/bloom_test.cc b/util/bloom_test.cc index e20d3d403..18cd801c8 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -1257,7 +1257,7 @@ TEST_P(FullBloomTest, CorruptFilters) { ASSERT_TRUE(Matches("hello")); ASSERT_TRUE(Matches("world")); // Need many queries to find a "true negative" - for (int i = 0; Matches(ToString(i)); ++i) { + for (int i = 0; Matches(std::to_string(i)); ++i) { ASSERT_LT(i, 1000); } diff --git a/util/build_version.cc.in b/util/build_version.cc.in index 64c86a563..c1706dc1f 100644 --- a/util/build_version.cc.in +++ b/util/build_version.cc.in @@ -27,7 +27,7 @@ extern "C" { } // extern "C" std::unordered_map ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = { - @ROCKSDB_PLUGIN_BUILTINS@ + @ROCKSDB_PLUGIN_BUILTINS@ }; #endif //ROCKSDB_LITE @@ -43,7 +43,7 @@ static void AddProperty(std::unordered_map *props, con } } } - + static std::unordered_map* LoadPropertiesSet() { auto * properties = new std::unordered_map(); AddProperty(properties, rocksdb_build_git_sha); @@ -58,14 +58,14 @@ const std::unordered_map& GetRocksBuildProperties() { } std::string GetRocksVersionAsString(bool with_patch) { - std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR); + std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR); if (with_patch) { - return version + "." + ToString(ROCKSDB_PATCH); + return version + "." + std::to_string(ROCKSDB_PATCH); } else { return version; } } - + std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) { std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true); if (verbose) { @@ -79,4 +79,3 @@ std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) return info; } } // namespace ROCKSDB_NAMESPACE - diff --git a/util/compression.h b/util/compression.h index 1a0488240..6cb9e670f 100644 --- a/util/compression.h +++ b/util/compression.h @@ -633,25 +633,25 @@ inline std::string CompressionOptionsToString( std::string result; result.reserve(512); result.append("window_bits=") - .append(ToString(compression_options.window_bits)) + .append(std::to_string(compression_options.window_bits)) .append("; "); result.append("level=") - .append(ToString(compression_options.level)) + .append(std::to_string(compression_options.level)) .append("; "); result.append("strategy=") - .append(ToString(compression_options.strategy)) + .append(std::to_string(compression_options.strategy)) .append("; "); result.append("max_dict_bytes=") - .append(ToString(compression_options.max_dict_bytes)) + .append(std::to_string(compression_options.max_dict_bytes)) .append("; "); result.append("zstd_max_train_bytes=") - .append(ToString(compression_options.zstd_max_train_bytes)) + .append(std::to_string(compression_options.zstd_max_train_bytes)) .append("; "); result.append("enabled=") - .append(ToString(compression_options.enabled)) + .append(std::to_string(compression_options.enabled)) .append("; "); result.append("max_dict_buffer_bytes=") - .append(ToString(compression_options.max_dict_buffer_bytes)) + .append(std::to_string(compression_options.max_dict_buffer_bytes)) .append("; "); return result; } diff --git a/util/filelock_test.cc b/util/filelock_test.cc index fe2f7e0d8..2cf21d6cd 100644 --- a/util/filelock_test.cc +++ b/util/filelock_test.cc @@ -129,8 +129,8 @@ TEST_F(LockTest, LockBySameThread) { ASSERT_TRUE(s.IsIOError()); #ifndef OS_WIN // Validate that error message contains current thread ID. - ASSERT_TRUE(s.ToString().find(ToString(Env::Default()->GetThreadID())) != - std::string::npos); + ASSERT_TRUE(s.ToString().find(std::to_string( + Env::Default()->GetThreadID())) != std::string::npos); #endif // check the file is locked diff --git a/util/ribbon_test.cc b/util/ribbon_test.cc index e69e62673..c8fd436aa 100644 --- a/util/ribbon_test.cc +++ b/util/ribbon_test.cc @@ -1128,8 +1128,7 @@ TYPED_TEST(RibbonTypeParamTest, FindOccupancy) { return; } - KeyGen cur(ROCKSDB_NAMESPACE::ToString( - testing::UnitTest::GetInstance()->random_seed()), + KeyGen cur(std::to_string(testing::UnitTest::GetInstance()->random_seed()), 0); Banding banding; @@ -1247,8 +1246,7 @@ TYPED_TEST(RibbonTypeParamTest, OptimizeHomogAtScale) { return; } - KeyGen cur(ROCKSDB_NAMESPACE::ToString( - testing::UnitTest::GetInstance()->random_seed()), + KeyGen cur(std::to_string(testing::UnitTest::GetInstance()->random_seed()), 0); Banding banding; diff --git a/util/slice.cc b/util/slice.cc index 27263ad3c..f9f4ddd59 100644 --- a/util/slice.cc +++ b/util/slice.cc @@ -30,8 +30,7 @@ class FixedPrefixTransform : public SliceTransform { public: explicit FixedPrefixTransform(size_t prefix_len) : prefix_len_(prefix_len) { - id_ = std::string(kClassName()) + "." + - ROCKSDB_NAMESPACE::ToString(prefix_len_); + id_ = std::string(kClassName()) + "." + std::to_string(prefix_len_); } static const char* kClassName() { return "rocksdb.FixedPrefix"; } @@ -43,8 +42,8 @@ class FixedPrefixTransform : public SliceTransform { if (name == id_) { return true; } else if (StartsWith(name, kNickName())) { - std::string alt_id = std::string(kNickName()) + ":" + - ROCKSDB_NAMESPACE::ToString(prefix_len_); + std::string alt_id = + std::string(kNickName()) + ":" + std::to_string(prefix_len_); if (name == alt_id) { return true; } @@ -84,8 +83,7 @@ class CappedPrefixTransform : public SliceTransform { public: explicit CappedPrefixTransform(size_t cap_len) : cap_len_(cap_len) { - id_ = - std::string(kClassName()) + "." + ROCKSDB_NAMESPACE::ToString(cap_len_); + id_ = std::string(kClassName()) + "." + std::to_string(cap_len_); } static const char* kClassName() { return "rocksdb.CappedPrefix"; } @@ -98,8 +96,8 @@ class CappedPrefixTransform : public SliceTransform { if (name == id_) { return true; } else if (StartsWith(name, kNickName())) { - std::string alt_id = std::string(kNickName()) + ":" + - ROCKSDB_NAMESPACE::ToString(cap_len_); + std::string alt_id = + std::string(kNickName()) + ":" + std::to_string(cap_len_); if (name == alt_id) { return true; } @@ -291,7 +289,8 @@ std::string SliceTransform::AsString() const { // 2 small internal utility functions, for efficient hex conversions // and no need for snprintf, toupper etc... -// Originally from wdt/util/EncryptionUtils.cpp - for ToString(true)/DecodeHex: +// Originally from wdt/util/EncryptionUtils.cpp - for +// std::to_string(true)/DecodeHex: char toHex(unsigned char v) { if (v <= 9) { return '0' + v; diff --git a/util/string_util.cc b/util/string_util.cc index 24b70ba10..0a74c6966 100644 --- a/util/string_util.cc +++ b/util/string_util.cc @@ -432,7 +432,7 @@ bool SerializeIntVector(const std::vector& vec, std::string* value) { if (i > 0) { *value += ":"; } - *value += ToString(vec[i]); + *value += std::to_string(vec[i]); } return true; } diff --git a/util/string_util.h b/util/string_util.h index 7794dbb06..55d106fff 100644 --- a/util/string_util.h +++ b/util/string_util.h @@ -19,19 +19,6 @@ class Slice; extern std::vector StringSplit(const std::string& arg, char delim); -template -inline std::string ToString(T value) { -#if !(defined OS_ANDROID) && !(defined CYGWIN) && !(defined OS_FREEBSD) - return std::to_string(value); -#else - // Andorid or cygwin doesn't support all of C++11, std::to_string() being - // one of the not supported features. - std::ostringstream os; - os << value; - return os.str(); -#endif -} - // Append a human-readable printout of "num" to *str extern void AppendNumberTo(std::string* str, uint64_t num); diff --git a/utilities/backup/backup_engine.cc b/utilities/backup/backup_engine.cc index 1c6a2cb0c..e2759e8b2 100644 --- a/utilities/backup/backup_engine.cc +++ b/utilities/backup/backup_engine.cc @@ -496,8 +496,8 @@ class BackupEngineImpl { bool tmp = false, const std::string& file = "") const { assert(file.size() == 0 || file[0] != '/'); - return kPrivateDirSlash + ROCKSDB_NAMESPACE::ToString(backup_id) + - (tmp ? ".tmp" : "") + "/" + file; + return kPrivateDirSlash + std::to_string(backup_id) + (tmp ? ".tmp" : "") + + "/" + file; } inline std::string GetSharedFileRel(const std::string& file = "", bool tmp = false) const { @@ -524,13 +524,13 @@ class BackupEngineImpl { if (UseLegacyNaming(db_session_id)) { assert(!checksum_hex.empty()); file_copy.insert(file_copy.find_last_of('.'), - "_" + ToString(ChecksumHexToInt32(checksum_hex)) + "_" + - ToString(file_size)); + "_" + std::to_string(ChecksumHexToInt32(checksum_hex)) + + "_" + std::to_string(file_size)); } else { file_copy.insert(file_copy.find_last_of('.'), "_s" + db_session_id); if (GetNamingFlags() & BackupEngineOptions::kFlagIncludeFileSize) { file_copy.insert(file_copy.find_last_of('.'), - "_" + ToString(file_size)); + "_" + std::to_string(file_size)); } } return file_copy; @@ -544,7 +544,7 @@ class BackupEngineImpl { } inline std::string GetBackupMetaFile(BackupID backup_id, bool tmp) const { return GetAbsolutePath(kMetaDirName) + "/" + (tmp ? "." : "") + - ROCKSDB_NAMESPACE::ToString(backup_id) + (tmp ? ".tmp" : ""); + std::to_string(backup_id) + (tmp ? ".tmp" : ""); } // If size_limit == 0, there is no size limit, copy everything. @@ -1067,7 +1067,7 @@ IOStatus BackupEngineImpl::Initialize() { ROCKS_LOG_INFO(options_.info_log, "Detected backup %s", file.c_str()); BackupID backup_id = 0; sscanf(file.c_str(), "%u", &backup_id); - if (backup_id == 0 || file != ROCKSDB_NAMESPACE::ToString(backup_id)) { + if (backup_id == 0 || file != std::to_string(backup_id)) { // Invalid file name, will be deleted with auto-GC when user // initiates an append or write operation. (Behave as read-only until // then.) @@ -1666,8 +1666,8 @@ void BackupEngineImpl::SetBackupInfoFromBackupMeta( bool include_file_details) const { *backup_info = BackupInfo(id, meta.GetTimestamp(), meta.GetSize(), meta.GetNumberFiles(), meta.GetAppMetadata()); - std::string dir = options_.backup_dir + "/" + kPrivateDirSlash + - ROCKSDB_NAMESPACE::ToString(id); + std::string dir = + options_.backup_dir + "/" + kPrivateDirSlash + std::to_string(id); if (include_file_details) { auto& file_details = backup_info->file_details; file_details.reserve(meta.GetFiles().size()); @@ -1962,9 +1962,9 @@ IOStatus BackupEngineImpl::VerifyBackup(BackupID backup_id, // verify file size if (file_info->size != curr_abs_path_to_size[abs_path]) { std::string size_info("Expected file size is " + - ToString(file_info->size) + + std::to_string(file_info->size) + " while found file size is " + - ToString(curr_abs_path_to_size[abs_path])); + std::to_string(curr_abs_path_to_size[abs_path])); return IOStatus::Corruption("File corrupted: File size mismatch for " + abs_path + ": " + size_info); } @@ -2645,8 +2645,8 @@ IOStatus BackupEngineImpl::BackupMeta::AddFile( if (itr->second->size != file_info->size) { std::string msg = "Size mismatch for existing backup file: "; msg.append(file_info->filename); - msg.append(" Size in backup is " + ToString(itr->second->size) + - " while size in DB is " + ToString(file_info->size)); + msg.append(" Size in backup is " + std::to_string(itr->second->size) + + " while size in DB is " + std::to_string(file_info->size)); msg.append( " If this DB file checks as not corrupt, try deleting old" " backups or backing up to a different backup directory."); @@ -2939,7 +2939,7 @@ IOStatus BackupEngineImpl::BackupMeta::LoadFromFile( if (field_name == kFileCrc32cFieldName) { uint32_t checksum_value = static_cast(strtoul(field_data.c_str(), nullptr, 10)); - if (field_data != ROCKSDB_NAMESPACE::ToString(checksum_value)) { + if (field_data != std::to_string(checksum_value)) { return IOStatus::Corruption("Invalid checksum value for " + filename + " in " + meta_filename_); } @@ -2949,8 +2949,9 @@ IOStatus BackupEngineImpl::BackupMeta::LoadFromFile( std::strtoull(field_data.c_str(), nullptr, /*base*/ 10); if (ex_size != actual_size) { return IOStatus::Corruption( - "For file " + filename + " expected size " + ToString(ex_size) + - " but found size" + ToString(actual_size)); + "For file " + filename + " expected size " + + std::to_string(ex_size) + " but found size" + + std::to_string(actual_size)); } } else if (field_name == kTemperatureFieldName) { auto iter = temperature_string_map.find(field_data); @@ -3048,7 +3049,7 @@ IOStatus BackupEngineImpl::BackupMeta::StoreToFile( if (schema_version > static_cast(minor_version_strings.size() - 1)) { return IOStatus::NotSupported( "Only BackupEngineOptions::schema_version <= " + - ToString(minor_version_strings.size() - 1) + " is supported"); + std::to_string(minor_version_strings.size() - 1) + " is supported"); } std::string ver = minor_version_strings[schema_version]; @@ -3103,7 +3104,7 @@ IOStatus BackupEngineImpl::BackupMeta::StoreToFile( << temperature_to_string[file->temp]; } if (schema_test_options && schema_test_options->file_sizes) { - buf << " " << kFileSizeFieldName << " " << ToString(file->size); + buf << " " << kFileSizeFieldName << " " << std::to_string(file->size); } if (schema_test_options) { for (auto& e : schema_test_options->file_fields) { diff --git a/utilities/backup/backup_engine_test.cc b/utilities/backup/backup_engine_test.cc index 00b71cfa0..7a2c17ad2 100644 --- a/utilities/backup/backup_engine_test.cc +++ b/utilities/backup/backup_engine_test.cc @@ -564,8 +564,8 @@ size_t FillDB(DB* db, int from, int to, FillDBFlushAction flush_action = kFlushMost) { size_t bytes_written = 0; for (int i = from; i < to; ++i) { - std::string key = "testkey" + ToString(i); - std::string value = "testvalue" + ToString(i); + std::string key = "testkey" + std::to_string(i); + std::string value = "testvalue" + std::to_string(i); bytes_written += key.size() + value.size(); EXPECT_OK(db->Put(WriteOptions(), Slice(key), Slice(value))); @@ -582,17 +582,17 @@ size_t FillDB(DB* db, int from, int to, void AssertExists(DB* db, int from, int to) { for (int i = from; i < to; ++i) { - std::string key = "testkey" + ToString(i); + std::string key = "testkey" + std::to_string(i); std::string value; Status s = db->Get(ReadOptions(), Slice(key), &value); - ASSERT_EQ(value, "testvalue" + ToString(i)); + ASSERT_EQ(value, "testvalue" + std::to_string(i)); } } void AssertEmpty(DB* db, int from, int to) { for (int i = from; i < to; ++i) { - std::string key = "testkey" + ToString(i); - std::string value = "testvalue" + ToString(i); + std::string key = "testkey" + std::to_string(i); + std::string value = "testvalue" + std::to_string(i); Status s = db->Get(ReadOptions(), Slice(key), &value); ASSERT_TRUE(s.IsNotFound()); @@ -955,7 +955,7 @@ class BackupEngineTest : public testing::Test { ASSERT_LT(last_underscore, last_dot); std::string s = child.name.substr(last_underscore + 1, last_dot - (last_underscore + 1)); - ASSERT_EQ(s, ToString(child.size_bytes)); + ASSERT_EQ(s, std::to_string(child.size_bytes)); ++found_count; } ASSERT_GE(found_count, minimum_count); @@ -3285,7 +3285,7 @@ TEST_F(BackupEngineTest, MetaSchemaVersion2_SizeCorruption) { for (int id = 1; id <= 3; ++id) { ASSERT_OK(file_manager_->WriteToFile( - private_dir + "/" + ToString(id) + "/CURRENT", "x")); + private_dir + "/" + std::to_string(id) + "/CURRENT", "x")); } // Except corrupt Backup 4 with same size CURRENT file { @@ -3518,7 +3518,7 @@ TEST_F(BackupEngineTest, Concurrency) { ASSERT_EQ(ids.size(), 0U); // (Eventually, see below) Restore one of the backups, or "latest" - std::string restore_db_dir = dbname_ + "/restore" + ToString(i); + std::string restore_db_dir = dbname_ + "/restore" + std::to_string(i); DestroyDir(test_db_env_.get(), restore_db_dir).PermitUncheckedError(); BackupID to_restore; if (latest) { @@ -4111,7 +4111,7 @@ TEST_F(BackupEngineTest, FileTemperatures) { } // Restore backup to another virtual (tiered) dir - const std::string restore_dir = "/restore" + ToString(i); + const std::string restore_dir = "/restore" + std::to_string(i); ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup( RestoreOptions(), restore_dir, restore_dir)); diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 28ff67fa4..623286668 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -306,11 +306,11 @@ class BlobDBTest : public testing::Test { Random rnd(301); for (size_t i = 0; i < 100000; i++) { uint64_t ttl = rnd.Next() % 86400; - PutRandomWithTTL("key" + ToString(i % 500), ttl, &rnd, nullptr); + PutRandomWithTTL("key" + std::to_string(i % 500), ttl, &rnd, nullptr); } for (size_t i = 0; i < 10; i++) { - Delete("key" + ToString(i % 500)); + Delete("key" + std::to_string(i % 500)); } } @@ -329,7 +329,7 @@ TEST_F(BlobDBTest, Put) { Open(bdb_options); std::map data; for (size_t i = 0; i < 100; i++) { - PutRandom("key" + ToString(i), &rnd, &data); + PutRandom("key" + std::to_string(i), &rnd, &data); } VerifyDB(data); } @@ -348,7 +348,7 @@ TEST_F(BlobDBTest, PutWithTTL) { mock_clock_->SetCurrentTime(50); for (size_t i = 0; i < 100; i++) { uint64_t ttl = rnd.Next() % 100; - PutRandomWithTTL("key" + ToString(i), ttl, &rnd, + PutRandomWithTTL("key" + std::to_string(i), ttl, &rnd, (ttl <= 50 ? nullptr : &data)); } mock_clock_->SetCurrentTime(100); @@ -374,7 +374,7 @@ TEST_F(BlobDBTest, PutUntil) { mock_clock_->SetCurrentTime(50); for (size_t i = 0; i < 100; i++) { uint64_t expiration = rnd.Next() % 100 + 50; - PutRandomUntil("key" + ToString(i), expiration, &rnd, + PutRandomUntil("key" + std::to_string(i), expiration, &rnd, (expiration <= 100 ? nullptr : &data)); } mock_clock_->SetCurrentTime(100); @@ -394,12 +394,12 @@ TEST_F(BlobDBTest, StackableDBGet) { Open(bdb_options); std::map data; for (size_t i = 0; i < 100; i++) { - PutRandom("key" + ToString(i), &rnd, &data); + PutRandom("key" + std::to_string(i), &rnd, &data); } for (size_t i = 0; i < 100; i++) { StackableDB *db = blob_db_; ColumnFamilyHandle *column_family = db->DefaultColumnFamily(); - std::string key = "key" + ToString(i); + std::string key = "key" + std::to_string(i); PinnableSlice pinnable_value; ASSERT_OK(db->Get(ReadOptions(), column_family, key, &pinnable_value)); std::string string_value; @@ -468,7 +468,8 @@ TEST_F(BlobDBTest, WriteBatch) { for (size_t i = 0; i < 100; i++) { WriteBatch batch; for (size_t j = 0; j < 10; j++) { - PutRandomToWriteBatch("key" + ToString(j * 100 + i), &rnd, &batch, &data); + PutRandomToWriteBatch("key" + std::to_string(j * 100 + i), &rnd, &batch, + &data); } ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); @@ -484,10 +485,10 @@ TEST_F(BlobDBTest, Delete) { Open(bdb_options); std::map data; for (size_t i = 0; i < 100; i++) { - PutRandom("key" + ToString(i), &rnd, &data); + PutRandom("key" + std::to_string(i), &rnd, &data); } for (size_t i = 0; i < 100; i += 5) { - Delete("key" + ToString(i), &data); + Delete("key" + std::to_string(i), &data); } VerifyDB(data); } @@ -499,11 +500,11 @@ TEST_F(BlobDBTest, DeleteBatch) { bdb_options.disable_background_tasks = true; Open(bdb_options); for (size_t i = 0; i < 100; i++) { - PutRandom("key" + ToString(i), &rnd); + PutRandom("key" + std::to_string(i), &rnd); } WriteBatch batch; for (size_t i = 0; i < 100; i++) { - ASSERT_OK(batch.Delete("key" + ToString(i))); + ASSERT_OK(batch.Delete("key" + std::to_string(i))); } ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); // DB should be empty. @@ -518,11 +519,11 @@ TEST_F(BlobDBTest, Override) { Open(bdb_options); std::map data; for (int i = 0; i < 10000; i++) { - PutRandom("key" + ToString(i), &rnd, nullptr); + PutRandom("key" + std::to_string(i), &rnd, nullptr); } // override all the keys for (int i = 0; i < 10000; i++) { - PutRandom("key" + ToString(i), &rnd, &data); + PutRandom("key" + std::to_string(i), &rnd, &data); } VerifyDB(data); } @@ -537,13 +538,13 @@ TEST_F(BlobDBTest, Compression) { Open(bdb_options); std::map data; for (size_t i = 0; i < 100; i++) { - PutRandom("put-key" + ToString(i), &rnd, &data); + PutRandom("put-key" + std::to_string(i), &rnd, &data); } for (int i = 0; i < 100; i++) { WriteBatch batch; for (size_t j = 0; j < 10; j++) { - PutRandomToWriteBatch("write-batch-key" + ToString(j * 100 + i), &rnd, - &batch, &data); + PutRandomToWriteBatch("write-batch-key" + std::to_string(j * 100 + i), + &rnd, &batch, &data); } ASSERT_OK(blob_db_->Write(WriteOptions(), &batch)); } @@ -559,7 +560,7 @@ TEST_F(BlobDBTest, DecompressAfterReopen) { Open(bdb_options); std::map data; for (size_t i = 0; i < 100; i++) { - PutRandom("put-key" + ToString(i), &rnd, &data); + PutRandom("put-key" + std::to_string(i), &rnd, &data); } VerifyDB(data); bdb_options.compression = CompressionType::kNoCompression; @@ -578,7 +579,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) { std::map data; size_t data_idx = 0; for (; data_idx < 100; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); @@ -591,7 +592,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) { // Add more data with new compression type for (; data_idx < 200; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); @@ -620,7 +621,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) { // Add more data with new compression type for (; data_idx < 300; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); @@ -649,7 +650,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) { std::map data; size_t data_idx = 0; for (; data_idx < 100; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); auto blob_files = blob_db_impl()->TEST_GetBlobFiles(); @@ -662,7 +663,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) { // Add more data with Snappy compression type for (; data_idx < 200; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); @@ -689,7 +690,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) { bdb_options.compression = kNoCompression; Reopen(bdb_options); for (; data_idx < 300; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); @@ -706,14 +707,14 @@ TEST_F(BlobDBTest, ChangeCompressionGC) { bdb_options.compression = kSnappyCompression; Reopen(bdb_options); for (; data_idx < 400; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); bdb_options.compression = kLZ4Compression; Reopen(bdb_options); for (; data_idx < 500; data_idx++) { - PutRandom("put-key" + ToString(data_idx), &rnd, &data); + PutRandom("put-key" + std::to_string(data_idx), &rnd, &data); } VerifyDB(data); @@ -739,7 +740,8 @@ TEST_F(BlobDBTest, MultipleWriters) { [&](uint32_t id) { Random rnd(301 + id); for (int j = 0; j < 100; j++) { - std::string key = "key" + ToString(id) + "_" + ToString(j); + std::string key = + "key" + std::to_string(id) + "_" + std::to_string(j); if (id < 5) { PutRandom(key, &rnd, &data_set[id]); } else { @@ -986,7 +988,7 @@ TEST_F(BlobDBTest, GetLiveFilesMetaData) { std::map data; for (size_t i = 0; i < 100; i++) { - PutRandom("key" + ToString(i), &rnd, &data); + PutRandom("key" + std::to_string(i), &rnd, &data); } constexpr uint64_t expiration = 1000ULL; @@ -1032,7 +1034,7 @@ TEST_F(BlobDBTest, MigrateFromPlainRocksDB) { ASSERT_OK(DB::Open(options, dbname_, &db)); for (size_t i = 0; i < kNumIteration; i++) { auto key_index = rnd.Next() % kNumKey; - std::string key = "key" + ToString(key_index); + std::string key = "key" + std::to_string(key_index); PutRandom(db, key, &rnd, &data); } VerifyDB(db, data); @@ -1044,7 +1046,7 @@ TEST_F(BlobDBTest, MigrateFromPlainRocksDB) { VerifyDB(blob_db_, data); for (size_t i = 0; i < kNumIteration; i++) { auto key_index = rnd.Next() % kNumKey; - std::string key = "key" + ToString(key_index); + std::string key = "key" + std::to_string(key_index); is_blob[key_index] = true; PutRandom(blob_db_, key, &rnd, &data); } @@ -1056,7 +1058,7 @@ TEST_F(BlobDBTest, MigrateFromPlainRocksDB) { ASSERT_OK(DB::Open(options, dbname_, &db)); std::string value; for (size_t i = 0; i < kNumKey; i++) { - std::string key = "key" + ToString(i); + std::string key = "key" + std::to_string(i); Status s = db->Get(ReadOptions(), key, &value); if (data.count(key) == 0) { ASSERT_TRUE(s.IsNotFound()); @@ -1193,7 +1195,7 @@ TEST_F(BlobDBTest, FIFOEviction_NoEnoughBlobFilesToEvict) { // Insert some data into LSM tree to make sure FIFO eviction take SST // file size into account. for (int i = 0; i < 1000; i++) { - ASSERT_OK(Put("key" + ToString(i), small_value, &data)); + ASSERT_OK(Put("key" + std::to_string(i), small_value, &data)); } ASSERT_OK(blob_db_->Flush(FlushOptions())); uint64_t live_sst_size = 0; @@ -1250,7 +1252,7 @@ TEST_F(BlobDBTest, FIFOEviction_TriggerOnSSTSizeChange) { // Insert some small keys and flush to bring DB out of space. std::map data; for (int i = 0; i < 10; i++) { - ASSERT_OK(Put("key" + ToString(i), "v", &data)); + ASSERT_OK(Put("key" + std::to_string(i), "v", &data)); } ASSERT_OK(blob_db_->Flush(FlushOptions())); @@ -1280,7 +1282,7 @@ TEST_F(BlobDBTest, InlineSmallValues) { bool has_ttl = rnd.Next() % 2; uint64_t expiration = rnd.Next() % kMaxExpiration; int len = is_small_value ? 50 : 200; - std::string key = "key" + ToString(i); + std::string key = "key" + std::to_string(i); std::string value = rnd.HumanReadableString(len); std::string blob_index; data[key] = value; @@ -1520,7 +1522,7 @@ TEST_F(BlobDBTest, FilterExpiredBlobIndex) { bool has_ttl = rnd.Next() % 2; uint64_t expiration = rnd.Next() % kMaxExpiration; int len = is_small_value ? 10 : 200; - std::string key = "key" + ToString(rnd.Next() % kNumKeys); + std::string key = "key" + std::to_string(rnd.Next() % kNumKeys); std::string value = rnd.HumanReadableString(len); if (!has_ttl) { if (is_small_value) { @@ -1641,7 +1643,7 @@ TEST_F(BlobDBTest, FilterForFIFOEviction) { std::map data_after_compact; // Insert some small values that will be inlined. for (int i = 0; i < 1000; i++) { - std::string key = "key" + ToString(i); + std::string key = "key" + std::to_string(i); std::string value = rnd.HumanReadableString(50); uint64_t ttl = rnd.Next() % 120 + 1; ASSERT_OK(PutWithTTL(key, value, ttl, &data)); diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index 3ea627553..47ab16218 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -134,7 +134,7 @@ Status BlobDumpTool::DumpBlobLogHeader(uint64_t* offset, if (!GetStringFromCompressionType(&compression_str, header.compression) .ok()) { compression_str = "Unrecongnized compression type (" + - ToString((int)header.compression) + ")"; + std::to_string((int)header.compression) + ")"; } fprintf(stdout, " Compression : %s\n", compression_str.c_str()); fprintf(stdout, " Expiration range : %s\n", @@ -271,7 +271,7 @@ std::string BlobDumpTool::GetString(std::pair p) { if (p.first == 0 && p.second == 0) { return "nil"; } - return "(" + ToString(p.first) + ", " + ToString(p.second) + ")"; + return "(" + std::to_string(p.first) + ", " + std::to_string(p.second) + ")"; } } // namespace blob_db diff --git a/utilities/memory/memory_test.cc b/utilities/memory/memory_test.cc index bef22fc2f..33d0666e8 100644 --- a/utilities/memory/memory_test.cc +++ b/utilities/memory/memory_test.cc @@ -24,7 +24,7 @@ class MemoryTest : public testing::Test { assert(Env::Default()->CreateDirIfMissing(kDbDir).ok()); } - std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); } + std::string GetDBName(int id) { return kDbDir + "db_" + std::to_string(id); } void UpdateUsagesHistory(const std::vector& dbs) { std::map usage_by_type; diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 2522072fc..f88e3a56c 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -505,7 +505,7 @@ static void WriteOptionsFile(Env* env, const std::string& path, "\n" "[Version]\n" " rocksdb_version=" + - ToString(major) + "." + ToString(minor) + + std::to_string(major) + "." + std::to_string(minor) + ".0\n" " options_file_version=1\n"; diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index 83d4c6fa9..ceb91c154 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -44,7 +44,7 @@ class SimCacheTest : public DBTestBase { void InitTable(const Options& /*options*/) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks * 2; i++) { - ASSERT_OK(Put(ToString(i), value.c_str())); + ASSERT_OK(Put(std::to_string(i), value.c_str())); } } @@ -98,7 +98,7 @@ TEST_F(SimCacheTest, SimCache) { // Load blocks into cache. for (size_t i = 0; i < kNumBlocks; i++) { iter = db_->NewIterator(read_options); - iter->Seek(ToString(i)); + iter->Seek(std::to_string(i)); ASSERT_OK(iter->status()); CheckCacheCounters(options, 1, 0, 1, 0); iterators[i].reset(iter); @@ -115,7 +115,7 @@ TEST_F(SimCacheTest, SimCache) { // Test with strict capacity limit. simCache->SetStrictCapacityLimit(true); iter = db_->NewIterator(read_options); - iter->Seek(ToString(kNumBlocks * 2 - 1)); + iter->Seek(std::to_string(kNumBlocks * 2 - 1)); ASSERT_TRUE(iter->status().IsIncomplete()); CheckCacheCounters(options, 1, 0, 0, 1); delete iter; @@ -129,14 +129,14 @@ TEST_F(SimCacheTest, SimCache) { // Add kNumBlocks again for (size_t i = 0; i < kNumBlocks; i++) { std::unique_ptr it(db_->NewIterator(read_options)); - it->Seek(ToString(i)); + it->Seek(std::to_string(i)); ASSERT_OK(it->status()); CheckCacheCounters(options, 0, 1, 0, 0); } ASSERT_EQ(5, simCache->get_hit_counter()); for (size_t i = kNumBlocks; i < kNumBlocks * 2; i++) { std::unique_ptr it(db_->NewIterator(read_options)); - it->Seek(ToString(i)); + it->Seek(std::to_string(i)); ASSERT_OK(it->status()); CheckCacheCounters(options, 1, 0, 1, 0); } diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector.cc b/utilities/table_properties_collectors/compact_on_deletion_collector.cc index 0c0d0751f..16f33934d 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector.cc @@ -115,7 +115,7 @@ static std::unordered_map std::string* value) { const auto* factory = static_cast(addr); - *value = ToString(factory->GetWindowSize()); + *value = std::to_string(factory->GetWindowSize()); return Status::OK(); }, nullptr}}, @@ -133,7 +133,7 @@ static std::unordered_map std::string* value) { const auto* factory = static_cast(addr); - *value = ToString(factory->GetDeletionTrigger()); + *value = std::to_string(factory->GetDeletionTrigger()); return Status::OK(); }, nullptr}}, @@ -151,7 +151,7 @@ static std::unordered_map std::string* value) { const auto* factory = static_cast(addr); - *value = ToString(factory->GetDeletionRatio()); + *value = std::to_string(factory->GetDeletionRatio()); return Status::OK(); }, nullptr}}, diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 20ee73e27..b0c34a649 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -574,8 +574,8 @@ TEST_P(TransactionTest, DeadlockCycleShared) { for (uint32_t i = 0; i < 31; i++) { txns[i] = db->BeginTransaction(write_options, txn_options); ASSERT_TRUE(txns[i]); - auto s = txns[i]->GetForUpdate(read_options, ToString((i + 1) / 2), nullptr, - false /* exclusive */); + auto s = txns[i]->GetForUpdate(read_options, std::to_string((i + 1) / 2), + nullptr, false /* exclusive */); ASSERT_OK(s); } @@ -589,8 +589,8 @@ TEST_P(TransactionTest, DeadlockCycleShared) { std::vector threads; for (uint32_t i = 0; i < 15; i++) { std::function blocking_thread = [&, i] { - auto s = txns[i]->GetForUpdate(read_options, ToString(i + 1), nullptr, - true /* exclusive */); + auto s = txns[i]->GetForUpdate(read_options, std::to_string(i + 1), + nullptr, true /* exclusive */); ASSERT_OK(s); ASSERT_OK(txns[i]->Rollback()); delete txns[i]; @@ -641,7 +641,7 @@ TEST_P(TransactionTest, DeadlockCycleShared) { auto dl_node = *it; ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id); ASSERT_EQ(dl_node.m_cf_id, 0U); - ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key)); + ASSERT_EQ(dl_node.m_waiting_key, std::to_string(curr_waiting_key)); ASSERT_EQ(dl_node.m_exclusive, true); if (curr_waiting_key == 0) { @@ -708,7 +708,8 @@ TEST_P(TransactionTest, DeadlockCycleShared) { for (uint32_t i = 0; i < 2; i++) { txns_shared[i] = db->BeginTransaction(write_options, txn_options); ASSERT_TRUE(txns_shared[i]); - auto s = txns_shared[i]->GetForUpdate(read_options, ToString(i), nullptr); + auto s = + txns_shared[i]->GetForUpdate(read_options, std::to_string(i), nullptr); ASSERT_OK(s); } @@ -721,8 +722,8 @@ TEST_P(TransactionTest, DeadlockCycleShared) { std::vector threads_shared; for (uint32_t i = 0; i < 1; i++) { std::function blocking_thread = [&, i] { - auto s = - txns_shared[i]->GetForUpdate(read_options, ToString(i + 1), nullptr); + auto s = txns_shared[i]->GetForUpdate(read_options, std::to_string(i + 1), + nullptr); ASSERT_OK(s); ASSERT_OK(txns_shared[i]->Rollback()); delete txns_shared[i]; @@ -781,7 +782,7 @@ TEST_P(TransactionStressTest, DeadlockCycle) { for (uint32_t i = 0; i < len; i++) { txns[i] = db->BeginTransaction(write_options, txn_options); ASSERT_TRUE(txns[i]); - auto s = txns[i]->GetForUpdate(read_options, ToString(i), nullptr); + auto s = txns[i]->GetForUpdate(read_options, std::to_string(i), nullptr); ASSERT_OK(s); } @@ -796,7 +797,8 @@ TEST_P(TransactionStressTest, DeadlockCycle) { std::vector threads; for (uint32_t i = 0; i + 1 < len; i++) { std::function blocking_thread = [&, i] { - auto s = txns[i]->GetForUpdate(read_options, ToString(i + 1), nullptr); + auto s = + txns[i]->GetForUpdate(read_options, std::to_string(i + 1), nullptr); ASSERT_OK(s); ASSERT_OK(txns[i]->Rollback()); delete txns[i]; @@ -848,7 +850,7 @@ TEST_P(TransactionStressTest, DeadlockCycle) { auto dl_node = *it; ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1); ASSERT_EQ(dl_node.m_cf_id, 0u); - ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key)); + ASSERT_EQ(dl_node.m_waiting_key, std::to_string(curr_waiting_key)); ASSERT_EQ(dl_node.m_exclusive, true); curr_txn_id--; @@ -882,8 +884,8 @@ TEST_P(TransactionStressTest, DeadlockStress) { std::vector keys; for (uint32_t i = 0; i < NUM_KEYS; i++) { - ASSERT_OK(db->Put(write_options, Slice(ToString(i)), Slice(""))); - keys.push_back(ToString(i)); + ASSERT_OK(db->Put(write_options, Slice(std::to_string(i)), Slice(""))); + keys.push_back(std::to_string(i)); } size_t tid = std::hash()(std::this_thread::get_id()); @@ -959,8 +961,8 @@ TEST_P(TransactionTest, LogMarkLeakTest) { ASSERT_EQ(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0); for (size_t i = 0; i < 100; i++) { Transaction* txn = db->BeginTransaction(write_options, txn_options); - ASSERT_OK(txn->SetName("xid" + ToString(i))); - ASSERT_OK(txn->Put(Slice("foo" + ToString(i)), Slice("bar"))); + ASSERT_OK(txn->SetName("xid" + std::to_string(i))); + ASSERT_OK(txn->Put(Slice("foo" + std::to_string(i)), Slice("bar"))); ASSERT_OK(txn->Prepare()); ASSERT_GT(db_impl->TEST_FindMinLogContainingOutstandingPrep(), 0); if (rnd.OneIn(5)) { diff --git a/utilities/transactions/transaction_util.cc b/utilities/transactions/transaction_util.cc index f1d72ec07..360edc8ec 100644 --- a/utilities/transactions/transaction_util.cc +++ b/utilities/transactions/transaction_util.cc @@ -79,7 +79,7 @@ Status TransactionUtil::CheckKey(DBImpl* db_impl, SuperVersion* sv, result = Status::TryAgain( "Transaction could not check for conflicts as the MemTable does not " "contain a long enough history to check write at SequenceNumber: ", - ToString(snap_seq)); + std::to_string(snap_seq)); } } else if (snap_seq < earliest_seq || min_uncommitted <= earliest_seq) { // Use <= for min_uncommitted since earliest_seq is actually the largest sec @@ -164,7 +164,7 @@ Status TransactionUtil::CheckKeysForConflicts(DBImpl* db_impl, SuperVersion* sv = db_impl->GetAndRefSuperVersion(cf); if (sv == nullptr) { result = Status::InvalidArgument("Could not access column family " + - ToString(cf)); + std::to_string(cf)); break; } diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index 299b8b332..3a8de5115 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -1596,9 +1596,9 @@ TEST_P(WritePreparedTransactionTest, SmallestUnCommittedSeq) { const int cnt = 100; for (int i = 0; i < cnt; i++) { Transaction* txn = db->BeginTransaction(write_options, txn_options); - ASSERT_OK(txn->SetName("xid" + ToString(i))); - auto key = "key1" + ToString(i); - auto value = "value1" + ToString(i); + ASSERT_OK(txn->SetName("xid" + std::to_string(i))); + auto key = "key1" + std::to_string(i); + auto value = "value1" + std::to_string(i); ASSERT_OK(txn->Put(Slice(key), Slice(value))); ASSERT_OK(txn->Prepare()); txns.push_back(txn); @@ -2229,7 +2229,7 @@ TEST_P(WritePreparedTransactionTest, Rollback) { for (bool crash : {false, true}) { ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); - std::string key_str = "key" + ToString(ikey); + std::string key_str = "key" + std::to_string(ikey); switch (ivalue) { case 0: break; @@ -2335,7 +2335,7 @@ TEST_P(WritePreparedTransactionTest, DisableGCDuringRecovery) { std::vector versions; uint64_t seq = 0; for (uint64_t i = 1; i <= 1024; i++) { - std::string v = "bar" + ToString(i); + std::string v = "bar" + std::to_string(i); ASSERT_OK(db->Put(WriteOptions(), "foo", v)); VerifyKeys({{"foo", v}}); seq++; // one for the key/value @@ -3292,7 +3292,7 @@ TEST_P(WritePreparedTransactionTest, ASSERT_OK(ReOpen()); for (size_t i = 0; i < kNumTransactions; i++) { - std::string key = "key" + ToString(i); + std::string key = "key" + std::to_string(i); std::string value = "value0"; ASSERT_OK(db->Put(WriteOptions(), key, value)); current_data[key] = value; @@ -3302,16 +3302,16 @@ TEST_P(WritePreparedTransactionTest, for (size_t iter = 0; iter < kNumIterations; iter++) { auto r = rnd.Next() % (kNumTransactions + 1); if (r < kNumTransactions) { - std::string key = "key" + ToString(r); + std::string key = "key" + std::to_string(r); if (transactions[r] == nullptr) { - std::string value = "value" + ToString(versions[r] + 1); + std::string value = "value" + std::to_string(versions[r] + 1); auto* txn = db->BeginTransaction(WriteOptions()); - ASSERT_OK(txn->SetName("txn" + ToString(r))); + ASSERT_OK(txn->SetName("txn" + std::to_string(r))); ASSERT_OK(txn->Put(key, value)); ASSERT_OK(txn->Prepare()); transactions[r] = txn; } else { - std::string value = "value" + ToString(++versions[r]); + std::string value = "value" + std::to_string(++versions[r]); ASSERT_OK(transactions[r]->Commit()); delete transactions[r]; transactions[r] = nullptr; @@ -3888,7 +3888,7 @@ TEST_P(WritePreparedTransactionTest, CommitOfDelayedPrepared) { Transaction* txn = db->BeginTransaction(WriteOptions(), TransactionOptions()); ASSERT_OK(txn->SetName("xid")); - std::string val_str = "value" + ToString(i); + std::string val_str = "value" + std::to_string(i); for (size_t b = 0; b < sub_batch_cnt; b++) { ASSERT_OK(txn->Put(Slice("key2"), val_str)); } diff --git a/utilities/transactions/write_prepared_txn_db.cc b/utilities/transactions/write_prepared_txn_db.cc index 139afc37a..d70d9591d 100644 --- a/utilities/transactions/write_prepared_txn_db.cc +++ b/utilities/transactions/write_prepared_txn_db.cc @@ -726,9 +726,10 @@ SnapshotImpl* WritePreparedTxnDB::GetSnapshotInternal( assert(snap_impl->GetSequenceNumber() > max); if (snap_impl->GetSequenceNumber() <= max) { throw std::runtime_error( - "Snapshot seq " + ToString(snap_impl->GetSequenceNumber()) + - " after " + ToString(retry) + - " retries is still less than futre_max_evicted_seq_" + ToString(max)); + "Snapshot seq " + std::to_string(snap_impl->GetSequenceNumber()) + + " after " + std::to_string(retry) + + " retries is still less than futre_max_evicted_seq_" + + std::to_string(max)); } } EnhanceSnapshot(snap_impl, min_uncommitted); diff --git a/utilities/transactions/write_prepared_txn_db.h b/utilities/transactions/write_prepared_txn_db.h index 105dfe09f..25a382473 100644 --- a/utilities/transactions/write_prepared_txn_db.h +++ b/utilities/transactions/write_prepared_txn_db.h @@ -397,8 +397,8 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { if (delta >= format.DELTA_UPPERBOUND) { throw std::runtime_error( "commit_seq >> prepare_seq. The allowed distance is " + - ToString(format.DELTA_UPPERBOUND) + " commit_seq is " + - ToString(cs) + " prepare_seq is " + ToString(ps)); + std::to_string(format.DELTA_UPPERBOUND) + " commit_seq is " + + std::to_string(cs) + " prepare_seq is " + std::to_string(ps)); } rep_ = (ps << format.PAD_BITS) & ~format.COMMIT_FILTER; rep_ = rep_ | delta; diff --git a/utilities/transactions/write_unprepared_transaction_test.cc b/utilities/transactions/write_unprepared_transaction_test.cc index 4940487ea..b0c4bae52 100644 --- a/utilities/transactions/write_unprepared_transaction_test.cc +++ b/utilities/transactions/write_unprepared_transaction_test.cc @@ -87,7 +87,7 @@ TEST_P(WriteUnpreparedTransactionTest, ReadYourOwnWrite) { txn->SetSnapshot(); for (int i = 0; i < 5; i++) { - std::string stored_value = "v" + ToString(i); + std::string stored_value = "v" + std::to_string(i); ASSERT_OK(txn->Put("a", stored_value)); ASSERT_OK(txn->Put("b", stored_value)); ASSERT_OK(wup_txn->FlushWriteBatchToDB(false)); @@ -159,7 +159,7 @@ TEST_P(WriteUnpreparedStressTest, ReadYourOwnWriteStress) { std::vector keys; for (uint32_t k = 0; k < kNumKeys * kNumThreads; k++) { - keys.push_back("k" + ToString(k)); + keys.push_back("k" + std::to_string(k)); } RandomShuffle(keys.begin(), keys.end()); @@ -188,7 +188,7 @@ TEST_P(WriteUnpreparedStressTest, ReadYourOwnWriteStress) { } txn = db->BeginTransaction(write_options, txn_options); - ASSERT_OK(txn->SetName(ToString(id))); + ASSERT_OK(txn->SetName(std::to_string(id))); txn->SetSnapshot(); if (a >= RO_SNAPSHOT) { read_options.snapshot = txn->GetSnapshot(); @@ -342,8 +342,8 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { wup_db = dynamic_cast(db); if (!empty) { for (int i = 0; i < num_batches; i++) { - ASSERT_OK(db->Put(WriteOptions(), "k" + ToString(i), - "before value" + ToString(i))); + ASSERT_OK(db->Put(WriteOptions(), "k" + std::to_string(i), + "before value" + std::to_string(i))); } } @@ -352,7 +352,8 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { WriteUnpreparedTxn* wup_txn = dynamic_cast(txn); ASSERT_OK(txn->SetName("xid")); for (int i = 0; i < num_batches; i++) { - ASSERT_OK(txn->Put("k" + ToString(i), "value" + ToString(i))); + ASSERT_OK( + txn->Put("k" + std::to_string(i), "value" + std::to_string(i))); if (txn_options.write_batch_flush_threshold == 1) { // WriteUnprepared will check write_batch_flush_threshold and // possibly flush before appending to the write batch. No flush @@ -396,12 +397,13 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { if (!empty || a == COMMIT) { for (int i = 0; i < num_batches; i++) { ASSERT_TRUE(iter->Valid()); - ASSERT_EQ(iter->key().ToString(), "k" + ToString(i)); + ASSERT_EQ(iter->key().ToString(), "k" + std::to_string(i)); if (a == COMMIT) { - ASSERT_EQ(iter->value().ToString(), "value" + ToString(i)); + ASSERT_EQ(iter->value().ToString(), + "value" + std::to_string(i)); } else { ASSERT_EQ(iter->value().ToString(), - "before value" + ToString(i)); + "before value" + std::to_string(i)); } iter->Next(); } @@ -434,7 +436,7 @@ TEST_P(WriteUnpreparedTransactionTest, UnpreparedBatch) { ASSERT_OK(txn->SetName("xid")); for (int i = 0; i < kNumKeys; i++) { - ASSERT_OK(txn->Put("k" + ToString(i), "v" + ToString(i))); + ASSERT_OK(txn->Put("k" + std::to_string(i), "v" + std::to_string(i))); if (txn_options.write_batch_flush_threshold == 1) { // WriteUnprepared will check write_batch_flush_threshold and // possibly flush before appending to the write batch. No flush will @@ -471,8 +473,8 @@ TEST_P(WriteUnpreparedTransactionTest, UnpreparedBatch) { for (int i = 0; i < (commit ? kNumKeys : 0); i++) { ASSERT_TRUE(iter->Valid()); - ASSERT_EQ(iter->key().ToString(), "k" + ToString(i)); - ASSERT_EQ(iter->value().ToString(), "v" + ToString(i)); + ASSERT_EQ(iter->key().ToString(), "k" + std::to_string(i)); + ASSERT_EQ(iter->value().ToString(), "v" + std::to_string(i)); iter->Next(); } ASSERT_FALSE(iter->Valid()); @@ -512,9 +514,10 @@ TEST_P(WriteUnpreparedTransactionTest, MarkLogWithPrepSection) { // Spread this transaction across multiple log files. for (int i = 0; i < kNumKeys; i++) { - ASSERT_OK(txn1->Put("k1" + ToString(i), "v" + ToString(i))); + ASSERT_OK(txn1->Put("k1" + std::to_string(i), "v" + std::to_string(i))); if (i >= kNumKeys / 2) { - ASSERT_OK(txn2->Put("k2" + ToString(i), "v" + ToString(i))); + ASSERT_OK( + txn2->Put("k2" + std::to_string(i), "v" + std::to_string(i))); } if (i > 0) { @@ -601,7 +604,7 @@ TEST_P(WriteUnpreparedTransactionTest, IterateAndWrite) { for (Action a : {DO_DELETE, DO_UPDATE}) { for (int i = 0; i < 100; i++) { - ASSERT_OK(db->Put(woptions, ToString(i), ToString(i))); + ASSERT_OK(db->Put(woptions, std::to_string(i), std::to_string(i))); } Transaction* txn = db->BeginTransaction(woptions, txn_options); @@ -662,7 +665,7 @@ TEST_P(WriteUnpreparedTransactionTest, IterateAfterClear) { for (Action a : {kCommit, kRollback}) { for (int i = 0; i < 100; i++) { - ASSERT_OK(db->Put(woptions, ToString(i), ToString(i))); + ASSERT_OK(db->Put(woptions, std::to_string(i), std::to_string(i))); } Transaction* txn = db->BeginTransaction(woptions, txn_options); diff --git a/utilities/transactions/write_unprepared_txn.cc b/utilities/transactions/write_unprepared_txn.cc index 7623c6d73..2e375d54e 100644 --- a/utilities/transactions/write_unprepared_txn.cc +++ b/utilities/transactions/write_unprepared_txn.cc @@ -281,8 +281,8 @@ Status WriteUnpreparedTxn::FlushWriteBatchToDBInternal(bool prepared) { static std::atomic_ullong autogen_id{0}; // To avoid changing all tests to call SetName, just autogenerate one. if (wupt_db_->txn_db_options_.autogenerate_name) { - auto s = - SetName(std::string("autoxid") + ToString(autogen_id.fetch_add(1))); + auto s = SetName(std::string("autoxid") + + std::to_string(autogen_id.fetch_add(1))); assert(s.ok()); } else #endif diff --git a/utilities/write_batch_with_index/write_batch_with_index.cc b/utilities/write_batch_with_index/write_batch_with_index.cc index 3c22f41a8..af13d901a 100644 --- a/utilities/write_batch_with_index/write_batch_with_index.cc +++ b/utilities/write_batch_with_index/write_batch_with_index.cc @@ -247,8 +247,9 @@ Status WriteBatchWithIndex::Rep::ReBuildIndex() { case kTypeNoop: break; default: - return Status::Corruption("unknown WriteBatch tag in ReBuildIndex", - ToString(static_cast(tag))); + return Status::Corruption( + "unknown WriteBatch tag in ReBuildIndex", + std::to_string(static_cast(tag))); } } diff --git a/utilities/write_batch_with_index/write_batch_with_index_internal.cc b/utilities/write_batch_with_index/write_batch_with_index_internal.cc index 297d0e706..7ff6fbfaf 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_internal.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_internal.cc @@ -514,7 +514,7 @@ Status ReadableWriteBatch::GetEntryFromDataOffset(size_t data_offset, break; default: return Status::Corruption("unknown WriteBatch tag ", - ToString(static_cast(tag))); + std::to_string(static_cast(tag))); } return Status::OK(); } @@ -700,7 +700,7 @@ WBWIIteratorImpl::Result WriteBatchWithIndexInternal::GetFromBatch( auto result = iter->FindLatestUpdate(key, context); if (result == WBWIIteratorImpl::kError) { (*s) = Status::Corruption("Unexpected entry in WriteBatchWithIndex:", - ToString(iter->Entry().type)); + std::to_string(iter->Entry().type)); return result; } else if (result == WBWIIteratorImpl::kNotFound) { return result; diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index f03933823..87ef859ca 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -265,10 +265,10 @@ class WBWIBaseTest : public testing::Test { batch_->Delete(cf, key); result = ""; } else if (key[i] == 'p') { - result = key + ToString(i); + result = key + std::to_string(i); batch_->Put(cf, key, result); } else if (key[i] == 'm') { - std::string value = key + ToString(i); + std::string value = key + std::to_string(i); batch_->Merge(cf, key, value); if (result.empty()) { result = value; @@ -1192,11 +1192,11 @@ TEST_P(WriteBatchWithIndexTest, TestGetFromBatchMerge) { std::string expected = "X"; for (int i = 0; i < 5; i++) { - ASSERT_OK(batch_->Merge("x", ToString(i))); - expected = expected + "," + ToString(i); + ASSERT_OK(batch_->Merge("x", std::to_string(i))); + expected = expected + "," + std::to_string(i); if (i % 2 == 0) { - ASSERT_OK(batch_->Put("y", ToString(i / 2))); + ASSERT_OK(batch_->Put("y", std::to_string(i / 2))); } ASSERT_OK(batch_->Merge("z", "z")); @@ -1207,7 +1207,7 @@ TEST_P(WriteBatchWithIndexTest, TestGetFromBatchMerge) { s = batch_->GetFromBatch(column_family, options_, "y", &value); ASSERT_OK(s); - ASSERT_EQ(ToString(i / 2), value); + ASSERT_EQ(std::to_string(i / 2), value); s = batch_->GetFromBatch(column_family, options_, "z", &value); ASSERT_TRUE(s.IsMergeInProgress());