diff --git a/CMakeLists.txt b/CMakeLists.txt index ac9f3a6ab..e16e46434 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,6 +40,8 @@ include(GoogleTest) get_rocksdb_version(rocksdb_VERSION) project(rocksdb VERSION ${rocksdb_VERSION} + DESCRIPTION "An embeddable persistent key-value store for fast storage" + HOMEPAGE_URL https://rocksdb.org/ LANGUAGES CXX C ASM) if(POLICY CMP0042) @@ -1121,6 +1123,12 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS) COMPATIBILITY SameMajorVersion ) + configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}.pc.in + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc + @ONLY + ) + install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") install(DIRECTORY "${PROJECT_SOURCE_DIR}/cmake/modules" COMPONENT devel DESTINATION ${package_config_destination}) @@ -1159,6 +1167,13 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS) COMPONENT devel DESTINATION ${package_config_destination} ) + + install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc + COMPONENT devel + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig + ) endif() option(WITH_ALL_TESTS "Build all test, rather than a small subset" ON) diff --git a/HISTORY.md b/HISTORY.md index baf61150a..12548a7d8 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -14,6 +14,7 @@ * Add rollback_deletion_type_callback to TransactionDBOptions so that write-prepared transactions know whether to issue a Delete or SingleDelete to cancel a previous key written during prior prepare phase. The PR aims to prevent mixing SingleDeletes and Deletes for the same key that can lead to undefined behaviors for write-prepared transactions. * EXPERIMENTAL: Add new API AbortIO in file_system to abort the read requests submitted asynchronously. * CompactionFilter::Decision has a new value: kRemoveWithSingleDelete. If CompactionFilter returns this decision, then CompactionIterator will use `SingleDelete` to mark a key as removed. +* Renamed CompactionFilter::Decision::kRemoveWithSingleDelete to kPurge since the latter sounds more general and hides the implementation details of how compaction iterator handles keys. ### Bug Fixes * RocksDB calls FileSystem::Poll API during FilePrefetchBuffer destruction which impacts performance as it waits for read requets completion which is not needed anymore. Calling FileSystem::AbortIO to abort those requests instead fixes that performance issue. @@ -21,6 +22,7 @@ ### Behavior changes * Enforce the existing contract of SingleDelete so that SingleDelete cannot be mixed with Delete because it leads to undefined behavior. Fix a number of unit tests that violate the contract but happen to pass. +* ldb `--try_load_options` default to true if `--db` is specified and not creating a new DB, the user can still explicitly disable that by `--try_load_options=false` (or explicitly enable that by `--try_load_options`). ## 7.2.0 (04/15/2022) ### Bug Fixes diff --git a/build_tools/fbcode_config_platform010.sh b/build_tools/fbcode_config_platform010.sh index 588dd88c2..d978b0aac 100644 --- a/build_tools/fbcode_config_platform010.sh +++ b/build_tools/fbcode_config_platform010.sh @@ -118,10 +118,19 @@ if [ -z "$USE_CLANG" ]; then CXX="$GCC_BASE/bin/g++" AR="$GCC_BASE/bin/gcc-ar" - - CFLAGS+=" -B$BINUTILS" + CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib" + CFLAGS+=" -I$GCC_BASE/include" + CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include" + CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/install-tools/include" + CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include-fixed/" CFLAGS+=" -isystem $LIBGCC_INCLUDE" CFLAGS+=" -isystem $GLIBC_INCLUDE" + CFLAGS+=" -I$GLIBC_INCLUDE" + CFLAGS+=" -I$LIBGCC_BASE/include" + CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/" + CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/x86_64-facebook-linux/" + CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/backward" + CFLAGS+=" -isystem $GLIBC_INCLUDE -I$GLIBC_INCLUDE" JEMALLOC=1 else # clang diff --git a/db/builder.cc b/db/builder.cc index 5d2eaaa0e..00d78de77 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -115,6 +115,7 @@ Status BuildTable( assert(fs); TableProperties tp; + bool table_file_created = false; if (iter->Valid() || !range_del_agg->IsEmpty()) { std::unique_ptr compaction_filter; if (ioptions.compaction_filter_factory != nullptr && @@ -158,6 +159,8 @@ Status BuildTable( file_checksum_func_name); return s; } + + table_file_created = true; FileTypeSet tmp_set = ioptions.checksum_handoff_file_types; file->SetIOPriority(io_priority); file->SetWriteLifeTimeHint(write_hint); @@ -371,15 +374,17 @@ Status BuildTable( constexpr IODebugContext* dbg = nullptr; - Status ignored = fs->DeleteFile(fname, IOOptions(), dbg); - ignored.PermitUncheckedError(); + if (table_file_created) { + Status ignored = fs->DeleteFile(fname, IOOptions(), dbg); + ignored.PermitUncheckedError(); + } assert(blob_file_additions || blob_file_paths.empty()); if (blob_file_additions) { for (const std::string& blob_file_path : blob_file_paths) { - ignored = DeleteDBFile(&db_options, blob_file_path, dbname, - /*force_bg=*/false, /*force_fg=*/false); + Status ignored = DeleteDBFile(&db_options, blob_file_path, dbname, + /*force_bg=*/false, /*force_fg=*/false); ignored.PermitUncheckedError(); TEST_SYNC_POINT("BuildTable::AfterDeleteFile"); } diff --git a/db/column_family.cc b/db/column_family.cc index 4c38546eb..3eb4aab8e 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -501,7 +501,8 @@ std::vector ColumnFamilyData::GetDbPaths() const { return paths; } -const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId = port::kMaxUint32; +const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId = + std::numeric_limits::max(); ColumnFamilyData::ColumnFamilyData( uint32_t id, const std::string& name, Version* _dummy_versions, @@ -826,8 +827,8 @@ int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger, // condition. // Or twice as compaction trigger, if it is smaller. int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown); - if (res >= port::kMaxInt32) { - return port::kMaxInt32; + if (res >= std::numeric_limits::max()) { + return std::numeric_limits::max(); } else { // res fits in int return static_cast(res); diff --git a/db/compaction/compaction.cc b/db/compaction/compaction.cc index edda2fe71..4d5245443 100644 --- a/db/compaction/compaction.cc +++ b/db/compaction/compaction.cc @@ -518,7 +518,7 @@ uint64_t Compaction::OutputFilePreallocationSize() const { } } - if (max_output_file_size_ != port::kMaxUint64 && + if (max_output_file_size_ != std::numeric_limits::max() && (immutable_options_.compaction_style == kCompactionStyleLevel || output_level() > 0)) { preallocation_size = std::min(max_output_file_size_, preallocation_size); @@ -616,7 +616,7 @@ bool Compaction::DoesInputReferenceBlobFiles() const { uint64_t Compaction::MinInputFileOldestAncesterTime( const InternalKey* start, const InternalKey* end) const { - uint64_t min_oldest_ancester_time = port::kMaxUint64; + uint64_t min_oldest_ancester_time = std::numeric_limits::max(); const InternalKeyComparator& icmp = column_family_data()->internal_comparator(); for (const auto& level_files : inputs_) { diff --git a/db/compaction/compaction_iterator.cc b/db/compaction/compaction_iterator.cc index be7b94997..574c02ae6 100644 --- a/db/compaction/compaction_iterator.cc +++ b/db/compaction/compaction_iterator.cc @@ -307,7 +307,7 @@ bool CompactionIterator::InvokeFilterIfNeeded(bool* need_skip, // no value associated with delete value_.clear(); iter_stats_.num_record_drop_user++; - } else if (filter == CompactionFilter::Decision::kRemoveWithSingleDelete) { + } else if (filter == CompactionFilter::Decision::kPurge) { // convert the current key to a single delete; key_ is pointing into // current_key_ at this point, so updating current_key_ updates key() ikey_.type = kTypeSingleDeletion; diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index 4cd8ee802..e83914647 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -1974,7 +1974,8 @@ Status CompactionJob::FinishCompactionOutputFile( refined_oldest_ancester_time = sub_compact->compaction->MinInputFileOldestAncesterTime( &(meta->smallest), &(meta->largest)); - if (refined_oldest_ancester_time != port::kMaxUint64) { + if (refined_oldest_ancester_time != + std::numeric_limits::max()) { meta->oldest_ancester_time = refined_oldest_ancester_time; } } @@ -2264,7 +2265,7 @@ Status CompactionJob::OpenCompactionOutputFile( sub_compact->compaction->MinInputFileOldestAncesterTime( (sub_compact->start != nullptr) ? &tmp_start : nullptr, (sub_compact->end != nullptr) ? &tmp_end : nullptr); - if (oldest_ancester_time == port::kMaxUint64) { + if (oldest_ancester_time == std::numeric_limits::max()) { oldest_ancester_time = current_time; } diff --git a/db/compaction/compaction_picker.cc b/db/compaction/compaction_picker.cc index ae86d7894..e7e7e125b 100644 --- a/db/compaction/compaction_picker.cc +++ b/db/compaction/compaction_picker.cc @@ -65,7 +65,7 @@ bool FindIntraL0Compaction(const std::vector& level_files, size_t compact_bytes = static_cast(level_files[start]->fd.file_size); uint64_t compensated_compact_bytes = level_files[start]->compensated_file_size; - size_t compact_bytes_per_del_file = port::kMaxSizet; + size_t compact_bytes_per_del_file = std::numeric_limits::max(); // Compaction range will be [start, limit). size_t limit; // Pull in files until the amount of compaction work per deleted file begins @@ -717,7 +717,7 @@ Compaction* CompactionPicker::CompactRange( // files that are created during the current compaction. if (compact_range_options.bottommost_level_compaction == BottommostLevelCompaction::kForceOptimized && - max_file_num_to_ignore != port::kMaxUint64) { + max_file_num_to_ignore != std::numeric_limits::max()) { assert(input_level == output_level); // inputs_shrunk holds a continuous subset of input files which were all // created before the current manual compaction diff --git a/db/compaction/compaction_picker_level.cc b/db/compaction/compaction_picker_level.cc index 31b76fb69..87d1e8e63 100644 --- a/db/compaction/compaction_picker_level.cc +++ b/db/compaction/compaction_picker_level.cc @@ -504,7 +504,7 @@ bool LevelCompactionBuilder::PickIntraL0Compaction() { return false; } return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction, - port::kMaxUint64, + std::numeric_limits::max(), mutable_cf_options_.max_compaction_bytes, &start_level_inputs_, earliest_mem_seqno_); } diff --git a/db/compaction/compaction_picker_test.cc b/db/compaction/compaction_picker_test.cc index 695d730fb..03eccca8e 100644 --- a/db/compaction/compaction_picker_test.cc +++ b/db/compaction/compaction_picker_test.cc @@ -2653,8 +2653,8 @@ TEST_F(CompactionPickerTest, UniversalMarkedManualCompaction) { universal_compaction_picker.CompactRange( cf_name_, mutable_cf_options_, mutable_db_options_, vstorage_.get(), ColumnFamilyData::kCompactAllLevels, 6, CompactRangeOptions(), - nullptr, nullptr, &manual_end, &manual_conflict, port::kMaxUint64, - "")); + nullptr, nullptr, &manual_end, &manual_conflict, + std::numeric_limits::max(), "")); ASSERT_TRUE(compaction); diff --git a/db/compaction/compaction_picker_universal.cc b/db/compaction/compaction_picker_universal.cc index 5ca2c41ea..c5c043c0f 100644 --- a/db/compaction/compaction_picker_universal.cc +++ b/db/compaction/compaction_picker_universal.cc @@ -1371,7 +1371,7 @@ Compaction* UniversalCompactionBuilder::PickPeriodicCompaction() { uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const { if (!mutable_cf_options_.compaction_options_universal.incremental) { - return port::kMaxUint64; + return std::numeric_limits::max(); } else { // Try to align cutting boundary with files at the next level if the // file isn't end up with 1/2 of target size, or it would overlap diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index 5a3f27d12..195615191 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -998,7 +998,7 @@ TEST_F(DBTestCompactionFilter, DropKeyWithSingleDelete) { std::string* /*new_value*/, std::string* /*skip_until*/) const override { if (key.starts_with("b")) { - return Decision::kRemoveWithSingleDelete; + return Decision::kPurge; } return Decision::kRemove; } diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 0d99e8779..29180f224 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -4404,7 +4404,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) { for (CompactionFilterType comp_filter_type : {kUseCompactionFilter, kUseCompactionFilterFactory}) { // Assert that periodic compactions are not enabled. - ASSERT_EQ(port::kMaxUint64 - 1, options.periodic_compaction_seconds); + ASSERT_EQ(std::numeric_limits::max() - 1, + options.periodic_compaction_seconds); if (comp_filter_type == kUseCompactionFilter) { options.compaction_filter = &test_compaction_filter; diff --git a/db/db_filesnapshot.cc b/db/db_filesnapshot.cc index a3ec9fb8f..e30071341 100644 --- a/db/db_filesnapshot.cc +++ b/db/db_filesnapshot.cc @@ -177,7 +177,7 @@ Status DBImpl::GetLiveFilesStorageInfo( VectorLogPtr live_wal_files; bool flush_memtable = true; if (!immutable_db_options_.allow_2pc) { - if (opts.wal_size_for_flush == port::kMaxUint64) { + if (opts.wal_size_for_flush == std::numeric_limits::max()) { flush_memtable = false; } else if (opts.wal_size_for_flush > 0) { // If the outstanding log files are small, we skip the flush. diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index 76442086d..e661d74ea 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -2356,7 +2356,7 @@ TEST_P(DBAtomicFlushTest, PrecomputeMinLogNumberToKeepNon2PC) { ASSERT_OK(Flush(cf_ids)); uint64_t log_num_after_flush = dbfull()->TEST_GetCurrentLogNumber(); - uint64_t min_log_number_to_keep = port::kMaxUint64; + uint64_t min_log_number_to_keep = std::numeric_limits::max(); autovector flushed_cfds; autovector> flush_edits; for (size_t i = 0; i != num_cfs; ++i) { diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index c44612100..aa2cdac51 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -5338,7 +5338,7 @@ Status DBImpl::ReserveFileNumbersBeforeIngestion( Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) { if (mutable_db_options_.max_open_files == -1) { - uint64_t oldest_time = port::kMaxUint64; + uint64_t oldest_time = std::numeric_limits::max(); for (auto cfd : *versions_->GetColumnFamilySet()) { if (!cfd->IsDropped()) { uint64_t ctime; diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h index 97e3d1b8a..339399bbb 100644 --- a/db/db_impl/db_impl.h +++ b/db/db_impl/db_impl.h @@ -2299,7 +2299,7 @@ class DBImpl : public DB { static const int KEEP_LOG_FILE_NUM = 1000; // MSVC version 1800 still does not have constexpr for ::max() - static const uint64_t kNoTimeOut = port::kMaxUint64; + static const uint64_t kNoTimeOut = std::numeric_limits::max(); std::string db_absolute_path_; diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 5b508c00d..92043350f 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -188,7 +188,7 @@ Status DBImpl::FlushMemTableToOutputFile( // a memtable without knowing such snapshot(s). uint64_t max_memtable_id = needs_to_sync_closed_wals ? cfd->imm()->GetLatestMemTableID() - : port::kMaxUint64; + : std::numeric_limits::max(); // If needs_to_sync_closed_wals is false, then the flush job will pick ALL // existing memtables of the column family when PickMemTable() is called @@ -1041,7 +1041,8 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options, } s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels, final_output_level, options, begin, end, exclusive, - false, port::kMaxUint64, trim_ts); + false, std::numeric_limits::max(), + trim_ts); } else { int first_overlapped_level = kInvalidLevel; int max_overlapped_level = kInvalidLevel; @@ -1078,7 +1079,7 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options, if (s.ok() && first_overlapped_level != kInvalidLevel) { // max_file_num_to_ignore can be used to filter out newly created SST // files, useful for bottom level compaction in a manual compaction - uint64_t max_file_num_to_ignore = port::kMaxUint64; + uint64_t max_file_num_to_ignore = std::numeric_limits::max(); uint64_t next_file_number = versions_->current_next_file_number(); final_output_level = max_overlapped_level; int output_level; @@ -2015,7 +2016,7 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, // be created and scheduled, status::OK() will be returned. s = SwitchMemtable(cfd, &context); } - const uint64_t flush_memtable_id = port::kMaxUint64; + const uint64_t flush_memtable_id = std::numeric_limits::max(); if (s.ok()) { if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() || !cached_recoverable_state_empty_.load()) { diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc index 7bbd207d9..5647aa26f 100644 --- a/db/db_impl/db_impl_debug.cc +++ b/db/db_impl/db_impl_debug.cc @@ -118,10 +118,11 @@ Status DBImpl::TEST_CompactRange(int level, const Slice* begin, cfd->ioptions()->compaction_style == kCompactionStyleFIFO) ? level : level + 1; - return RunManualCompaction(cfd, level, output_level, CompactRangeOptions(), - begin, end, true, disallow_trivial_move, - port::kMaxUint64 /*max_file_num_to_ignore*/, - "" /*trim_ts*/); + return RunManualCompaction( + cfd, level, output_level, CompactRangeOptions(), begin, end, true, + disallow_trivial_move, + std::numeric_limits::max() /*max_file_num_to_ignore*/, + "" /*trim_ts*/); } Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) { diff --git a/db/db_impl/db_impl_files.cc b/db/db_impl/db_impl_files.cc index 1790ed836..86a7808b2 100644 --- a/db/db_impl/db_impl_files.cc +++ b/db/db_impl/db_impl_files.cc @@ -761,7 +761,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC( assert(!cfds_to_flush.empty()); assert(cfds_to_flush.size() == edit_lists.size()); - uint64_t min_log_number_to_keep = port::kMaxUint64; + uint64_t min_log_number_to_keep = std::numeric_limits::max(); for (const auto& edit_list : edit_lists) { uint64_t log = 0; for (const auto& e : edit_list) { @@ -773,7 +773,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC( min_log_number_to_keep = std::min(min_log_number_to_keep, log); } } - if (min_log_number_to_keep == port::kMaxUint64) { + if (min_log_number_to_keep == std::numeric_limits::max()) { min_log_number_to_keep = cfds_to_flush[0]->GetLogNumber(); for (size_t i = 1; i < cfds_to_flush.size(); i++) { min_log_number_to_keep = diff --git a/db/db_impl/db_impl_secondary.cc b/db/db_impl/db_impl_secondary.cc index 1e3c9f2ac..fb93a4408 100644 --- a/db/db_impl/db_impl_secondary.cc +++ b/db/db_impl/db_impl_secondary.cc @@ -247,15 +247,16 @@ Status DBImplSecondary::RecoverLogFiles( if (seq_of_batch <= seq) { continue; } - auto curr_log_num = port::kMaxUint64; + auto curr_log_num = std::numeric_limits::max(); if (cfd_to_current_log_.count(cfd) > 0) { curr_log_num = cfd_to_current_log_[cfd]; } // If the active memtable contains records added by replaying an // earlier WAL, then we need to seal the memtable, add it to the // immutable memtable list and create a new active memtable. - if (!cfd->mem()->IsEmpty() && (curr_log_num == port::kMaxUint64 || - curr_log_num != log_number)) { + if (!cfd->mem()->IsEmpty() && + (curr_log_num == std::numeric_limits::max() || + curr_log_num != log_number)) { const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions(); MemTable* new_mem = diff --git a/db/db_info_dumper.cc b/db/db_info_dumper.cc index efdb39c66..df17a5c96 100644 --- a/db/db_info_dumper.cc +++ b/db/db_info_dumper.cc @@ -35,10 +35,12 @@ void DumpDBFileSummary(const ImmutableDBOptions& options, Header(options.info_log, "DB SUMMARY\n"); Header(options.info_log, "DB Session ID: %s\n", session_id.c_str()); + Status s; // Get files in dbname dir - if (!env->GetChildren(dbname, &files).ok()) { - Error(options.info_log, - "Error when reading %s dir\n", dbname.c_str()); + s = env->GetChildren(dbname, &files); + if (!s.ok()) { + Error(options.info_log, "Error when reading %s dir %s\n", dbname.c_str(), + s.ToString().c_str()); } std::sort(files.begin(), files.end()); for (const std::string& file : files) { @@ -53,24 +55,27 @@ void DumpDBFileSummary(const ImmutableDBOptions& options, Header(options.info_log, "IDENTITY file: %s\n", file.c_str()); break; case kDescriptorFile: - if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) { + s = env->GetFileSize(dbname + "/" + file, &file_size); + if (s.ok()) { Header(options.info_log, "MANIFEST file: %s size: %" PRIu64 " Bytes\n", file.c_str(), file_size); } else { - Error(options.info_log, "Error when reading MANIFEST file: %s/%s\n", - dbname.c_str(), file.c_str()); + Error(options.info_log, + "Error when reading MANIFEST file: %s/%s %s\n", dbname.c_str(), + file.c_str(), s.ToString().c_str()); } break; case kWalFile: - if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) { + s = env->GetFileSize(dbname + "/" + file, &file_size); + if (s.ok()) { wal_info.append(file) .append(" size: ") .append(std::to_string(file_size)) .append(" ; "); } else { - Error(options.info_log, "Error when reading LOG file: %s/%s\n", - dbname.c_str(), file.c_str()); + Error(options.info_log, "Error when reading LOG file: %s/%s %s\n", + dbname.c_str(), file.c_str(), s.ToString().c_str()); } break; case kTableFile: @@ -86,10 +91,10 @@ void DumpDBFileSummary(const ImmutableDBOptions& options, // Get sst files in db_path dir for (auto& db_path : options.db_paths) { if (dbname.compare(db_path.path) != 0) { - if (!env->GetChildren(db_path.path, &files).ok()) { - Error(options.info_log, - "Error when reading %s dir\n", - db_path.path.c_str()); + s = env->GetChildren(db_path.path, &files); + if (!s.ok()) { + Error(options.info_log, "Error when reading %s dir %s\n", + db_path.path.c_str(), s.ToString().c_str()); continue; } std::sort(files.begin(), files.end()); @@ -111,22 +116,25 @@ void DumpDBFileSummary(const ImmutableDBOptions& options, // Get wal file in wal_dir const auto& wal_dir = options.GetWalDir(dbname); if (!options.IsWalDirSameAsDBPath(dbname)) { - if (!env->GetChildren(wal_dir, &files).ok()) { - Error(options.info_log, "Error when reading %s dir\n", wal_dir.c_str()); + s = env->GetChildren(wal_dir, &files); + if (!s.ok()) { + Error(options.info_log, "Error when reading %s dir %s\n", wal_dir.c_str(), + s.ToString().c_str()); return; } wal_info.clear(); for (const std::string& file : files) { if (ParseFileName(file, &number, &type)) { if (type == kWalFile) { - if (env->GetFileSize(wal_dir + "/" + file, &file_size).ok()) { + s = env->GetFileSize(wal_dir + "/" + file, &file_size); + if (s.ok()) { wal_info.append(file) .append(" size: ") .append(std::to_string(file_size)) .append(" ; "); } else { - Error(options.info_log, "Error when reading LOG file %s/%s\n", - wal_dir.c_str(), file.c_str()); + Error(options.info_log, "Error when reading LOG file %s/%s %s\n", + wal_dir.c_str(), file.c_str(), s.ToString().c_str()); } } } diff --git a/db/db_kv_checksum_test.cc b/db/db_kv_checksum_test.cc index b50681e5d..44ee56786 100644 --- a/db/db_kv_checksum_test.cc +++ b/db/db_kv_checksum_test.cc @@ -79,7 +79,7 @@ class DbKvChecksumTest void CorruptNextByteCallBack(void* arg) { Slice encoded = *static_cast(arg); - if (entry_len_ == port::kMaxSizet) { + if (entry_len_ == std::numeric_limits::max()) { // We learn the entry size on the first attempt entry_len_ = encoded.size(); } @@ -96,7 +96,7 @@ class DbKvChecksumTest WriteBatchOpType op_type_; char corrupt_byte_addend_; size_t corrupt_byte_offset_ = 0; - size_t entry_len_ = port::kMaxSizet; + size_t entry_len_ = std::numeric_limits::max(); }; std::string GetTestNameSuffix( diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 62b50b60a..13736daac 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -97,7 +97,7 @@ class MockMemTableRepFactory : public MemTableRepFactory { private: MockMemTableRep* mock_rep_; - // workaround since there's no port::kMaxUint32 yet. + // workaround since there's no std::numeric_limits::max() yet. uint32_t last_column_family_id_ = static_cast(-1); }; diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index 845114339..2b4fa3ba2 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -500,7 +500,8 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) { 1 /* input_level */, 2 /* output_level */, CompactRangeOptions(), nullptr /* begin */, nullptr /* end */, true /* exclusive */, true /* disallow_trivial_move */, - port::kMaxUint64 /* max_file_num_to_ignore */, "" /*trim_ts*/)); + std::numeric_limits::max() /* max_file_num_to_ignore */, + "" /*trim_ts*/)); } #endif // ROCKSDB_LITE diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index fac924d31..9248814c8 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -280,6 +280,58 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { listener->VerifyMatchedCount(1); } +// Test that producing an empty .sst file does not write it out to +// disk, and that the DeleteFile() env method is not called for +// removing the non-existing file later. +TEST_F(DBSSTTest, DeleteFileNotCalledForNotCreatedSSTFile) { + Options options = CurrentOptions(); + options.env = env_; + + OnFileDeletionListener* listener = new OnFileDeletionListener(); + options.listeners.emplace_back(listener); + + Reopen(options); + + // Flush the empty database. + ASSERT_OK(Flush()); + ASSERT_EQ("", FilesPerLevel(0)); + + // We expect no .sst files. + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(metadata.size(), 0U); + + // We expect no file deletions. + listener->VerifyMatchedCount(0); +} + +// Test that producing a non-empty .sst file does write it out to +// disk, and that the DeleteFile() env method is not called for removing +// the file later. +TEST_F(DBSSTTest, DeleteFileNotCalledForCreatedSSTFile) { + Options options = CurrentOptions(); + options.env = env_; + + OnFileDeletionListener* listener = new OnFileDeletionListener(); + options.listeners.emplace_back(listener); + + Reopen(options); + + ASSERT_OK(Put("pika", "choo")); + + // Flush the non-empty database. + ASSERT_OK(Flush()); + ASSERT_EQ("1", FilesPerLevel(0)); + + // We expect 1 .sst files. + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(metadata.size(), 1U); + + // We expect no file deletions. + listener->VerifyMatchedCount(0); +} + TEST_F(DBSSTTest, DBWithSstFileManager) { std::shared_ptr sst_file_manager(NewSstFileManager(env_)); auto sfm = static_cast(sst_file_manager.get()); diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index 9a953a178..5483fcad7 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -1009,7 +1009,7 @@ TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) { if (log_files.size() > 0) { earliest_log_nums[i] = log_files[0]->LogNumber(); } else { - earliest_log_nums[i] = port::kMaxUint64; + earliest_log_nums[i] = std::numeric_limits::max(); } } // Check at least the first WAL was cleaned up during the recovery. diff --git a/db/dbformat.h b/db/dbformat.h index ee9c27e76..670c188c7 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -90,7 +90,8 @@ inline bool IsExtendedValueType(ValueType t) { // can be packed together into 64-bits. static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1); -static const SequenceNumber kDisableGlobalSequenceNumber = port::kMaxUint64; +static const SequenceNumber kDisableGlobalSequenceNumber = + std::numeric_limits::max(); constexpr uint64_t kNumInternalBytes = 8; diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 1f47d2ab6..0341bdcc3 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -2405,7 +2405,7 @@ TEST_P(ExternalSSTBlockChecksumTest, DISABLED_HugeBlockChecksum) { SstFileWriter sst_file_writer(EnvOptions(), options); // 2^32 - 1, will lead to data block with more than 2^32 bytes - size_t huge_size = port::kMaxUint32; + size_t huge_size = std::numeric_limits::max(); std::string f = sst_files_dir_ + "f.sst"; ASSERT_OK(sst_file_writer.Open(f)); diff --git a/db/file_indexer.h b/db/file_indexer.h index ad7553f2c..fd889b031 100644 --- a/db/file_indexer.h +++ b/db/file_indexer.h @@ -58,10 +58,7 @@ class FileIndexer { void UpdateIndex(Arena* arena, const size_t num_levels, std::vector* const files); - enum { - // MSVC version 1800 still does not have constexpr for ::max() - kLevelMaxIndex = ROCKSDB_NAMESPACE::port::kMaxInt32 - }; + enum { kLevelMaxIndex = std::numeric_limits::max() }; private: size_t num_levels_; diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index b8ef21fc6..e276ba836 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -164,12 +164,12 @@ TEST_F(FlushJobTest, Empty) { SnapshotChecker* snapshot_checker = nullptr; // not relavant FlushJob flush_job( dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, - *cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, - env_options_, versions_.get(), &mutex_, &shutting_down_, {}, - kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, - nullptr, kNoCompression, nullptr, &event_logger, false, - true /* sync_output_directory */, true /* write_manifest */, - Env::Priority::USER, nullptr /*IOTracer*/); + *cfd->GetLatestMutableCFOptions(), + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, + nullptr, &event_logger, false, true /* sync_output_directory */, + true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/); { InstrumentedMutexLock l(&mutex_); flush_job.PickMemTable(); @@ -248,11 +248,12 @@ TEST_F(FlushJobTest, NonEmpty) { SnapshotChecker* snapshot_checker = nullptr; // not relavant FlushJob flush_job( dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, - *cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, - env_options_, versions_.get(), &mutex_, &shutting_down_, {}, - kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + *cfd->GetLatestMutableCFOptions(), + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/); HistogramData hist; @@ -509,11 +510,12 @@ TEST_F(FlushJobTest, Snapshots) { SnapshotChecker* snapshot_checker = nullptr; // not relavant FlushJob flush_job( dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, - *cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, - env_options_, versions_.get(), &mutex_, &shutting_down_, snapshots, - kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + *cfd->GetLatestMutableCFOptions(), + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/); mutex_.Lock(); flush_job.PickMemTable(); @@ -577,9 +579,9 @@ TEST_F(FlushJobTimestampTest, AllKeysExpired) { PutFixed64(&full_history_ts_low, std::numeric_limits::max()); FlushJob flush_job( dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(), - port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(), - &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, - &job_context, nullptr, nullptr, nullptr, kNoCompression, + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, true, true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"", @@ -628,9 +630,9 @@ TEST_F(FlushJobTimestampTest, NoKeyExpired) { PutFixed64(&full_history_ts_low, 0); FlushJob flush_job( dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(), - port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(), - &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, - &job_context, nullptr, nullptr, nullptr, kNoCompression, + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, true, true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"", diff --git a/db/memtable.cc b/db/memtable.cc index 3ce44ea1d..6a4d2e127 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -140,8 +140,8 @@ size_t MemTable::ApproximateMemoryUsage() { for (size_t usage : usages) { // If usage + total_usage >= kMaxSizet, return kMaxSizet. // the following variation is to avoid numeric overflow. - if (usage >= port::kMaxSizet - total_usage) { - return port::kMaxSizet; + if (usage >= std::numeric_limits::max() - total_usage) { + return std::numeric_limits::max(); } total_usage += usage; } diff --git a/db/memtable_list_test.cc b/db/memtable_list_test.cc index df1694c21..29de3b662 100644 --- a/db/memtable_list_test.cc +++ b/db/memtable_list_test.cc @@ -209,7 +209,8 @@ TEST_F(MemTableListTest, Empty) { ASSERT_FALSE(list.IsFlushPending()); autovector mems; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &mems); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &mems); ASSERT_EQ(0, mems.size()); autovector to_delete; @@ -418,7 +419,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) { // Flush this memtable from the list. // (It will then be a part of the memtable history). autovector to_flush; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(1, to_flush.size()); MutableCFOptions mutable_cf_options(options); @@ -472,7 +474,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) { ASSERT_EQ(0, to_delete.size()); to_flush.clear(); - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(1, to_flush.size()); // Flush second memtable @@ -593,7 +596,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); autovector to_flush; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(0, to_flush.size()); // Request a flush even though there is nothing to flush @@ -602,7 +606,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); // Attempt to 'flush' to clear request for flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(0, to_flush.size()); ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); @@ -626,7 +631,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); // Pick tables to flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(2, to_flush.size()); ASSERT_EQ(2, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -647,7 +653,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_EQ(0, to_delete.size()); // Pick tables to flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(3, to_flush.size()); ASSERT_EQ(3, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -655,7 +662,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { // Pick tables to flush again autovector to_flush2; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush2); ASSERT_EQ(0, to_flush2.size()); ASSERT_EQ(3, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -673,7 +681,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); // Pick tables to flush again - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush2); ASSERT_EQ(1, to_flush2.size()); ASSERT_EQ(4, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -694,7 +703,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_EQ(0, to_delete.size()); // Pick tables to flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); // Should pick 4 of 5 since 1 table has been picked in to_flush2 ASSERT_EQ(4, to_flush.size()); ASSERT_EQ(5, list.NumNotFlushed()); @@ -703,7 +713,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { // Pick tables to flush again autovector to_flush3; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush3); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush3); ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed ASSERT_EQ(5, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -872,8 +883,9 @@ TEST_F(MemTableListTest, AtomicFlusTest) { auto* list = lists[i]; ASSERT_FALSE(list->IsFlushPending()); ASSERT_FALSE(list->imm_flush_needed.load(std::memory_order_acquire)); - list->PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, - &flush_candidates[i]); + list->PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, + &flush_candidates[i]); ASSERT_EQ(0, flush_candidates[i].size()); } // Request flush even though there is nothing to flush diff --git a/db/version_builder.cc b/db/version_builder.cc index e76985687..b785adfdd 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -1144,7 +1144,7 @@ class VersionBuilder::Rep { size_t table_cache_capacity = table_cache_->get_cache()->GetCapacity(); bool always_load = (table_cache_capacity == TableCache::kInfiniteCapacity); - size_t max_load = port::kMaxSizet; + size_t max_load = std::numeric_limits::max(); if (!always_load) { // If it is initial loading and not set to always loading all the diff --git a/db/version_set.cc b/db/version_set.cc index 81d254f2b..b0e7080bd 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1517,7 +1517,7 @@ uint64_t Version::GetSstFilesSize() { } void Version::GetCreationTimeOfOldestFile(uint64_t* creation_time) { - uint64_t oldest_time = port::kMaxUint64; + uint64_t oldest_time = std::numeric_limits::max(); for (int level = 0; level < storage_info_.num_non_empty_levels_; level++) { for (FileMetaData* meta : storage_info_.LevelFiles(level)) { assert(meta->fd.table_reader != nullptr); diff --git a/db/version_set.h b/db/version_set.h index abb4046c7..5afd1202f 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -1213,7 +1213,7 @@ class VersionSet { // new_log_number_for_empty_cf. uint64_t PreComputeMinLogNumberWithUnflushedData( uint64_t new_log_number_for_empty_cf) const { - uint64_t min_log_num = port::kMaxUint64; + uint64_t min_log_num = std::numeric_limits::max(); for (auto cfd : *column_family_set_) { // It's safe to ignore dropped column families here: // cfd->IsDropped() becomes true after the drop is persisted in MANIFEST. @@ -1229,7 +1229,7 @@ class VersionSet { // file, except data from `cfd_to_skip`. uint64_t PreComputeMinLogNumberWithUnflushedData( const ColumnFamilyData* cfd_to_skip) const { - uint64_t min_log_num = port::kMaxUint64; + uint64_t min_log_num = std::numeric_limits::max(); for (auto cfd : *column_family_set_) { if (cfd == cfd_to_skip) { continue; @@ -1246,7 +1246,7 @@ class VersionSet { // file, except data from `cfds_to_skip`. uint64_t PreComputeMinLogNumberWithUnflushedData( const std::unordered_set& cfds_to_skip) const { - uint64_t min_log_num = port::kMaxUint64; + uint64_t min_log_num = std::numeric_limits::max(); for (auto cfd : *column_family_set_) { if (cfds_to_skip.count(cfd)) { continue; diff --git a/db/wal_edit.h b/db/wal_edit.h index 7e1f9a576..23dc58905 100644 --- a/db/wal_edit.h +++ b/db/wal_edit.h @@ -44,7 +44,8 @@ class WalMetadata { private: // The size of WAL is unknown, used when the WAL is not synced yet or is // empty. - constexpr static uint64_t kUnknownWalSize = port::kMaxUint64; + constexpr static uint64_t kUnknownWalSize = + std::numeric_limits::max(); // Size of the most recently synced WAL in bytes. uint64_t synced_size_bytes_ = kUnknownWalSize; diff --git a/db/write_batch.cc b/db/write_batch.cc index 77e91504e..788b9bae4 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -745,10 +745,10 @@ Status CheckColumnFamilyTimestampSize(ColumnFamilyHandle* column_family, Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id, const Slice& key, const Slice& value) { - if (key.size() > size_t{port::kMaxUint32}) { + if (key.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("key is too large"); } - if (value.size() > size_t{port::kMaxUint32}) { + if (value.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("value is too large"); } @@ -825,7 +825,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key, for (int i = 0; i < key.num_parts; ++i) { total_key_bytes += key.parts[i].size(); } - if (total_key_bytes >= size_t{port::kMaxUint32}) { + if (total_key_bytes >= size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("key is too large"); } @@ -833,7 +833,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key, for (int i = 0; i < value.num_parts; ++i) { total_value_bytes += value.parts[i].size(); } - if (total_value_bytes >= size_t{port::kMaxUint32}) { + if (total_value_bytes >= size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("value is too large"); } return Status::OK(); @@ -1292,10 +1292,10 @@ Status WriteBatch::DeleteRange(ColumnFamilyHandle* column_family, Status WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id, const Slice& key, const Slice& value) { - if (key.size() > size_t{port::kMaxUint32}) { + if (key.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("key is too large"); } - if (value.size() > size_t{port::kMaxUint32}) { + if (value.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("value is too large"); } diff --git a/db_stress_tool/db_stress_common.h b/db_stress_tool/db_stress_common.h index 65f1c3295..2f79a22ce 100644 --- a/db_stress_tool/db_stress_common.h +++ b/db_stress_tool/db_stress_common.h @@ -167,6 +167,8 @@ DECLARE_bool(mock_direct_io); DECLARE_bool(statistics); DECLARE_bool(sync); DECLARE_bool(use_fsync); +DECLARE_uint64(bytes_per_sync); +DECLARE_uint64(wal_bytes_per_sync); DECLARE_int32(kill_random_test); DECLARE_string(kill_exclude_prefixes); DECLARE_bool(disable_wal); diff --git a/db_stress_tool/db_stress_compaction_filter.h b/db_stress_tool/db_stress_compaction_filter.h index c967622db..d79ba4780 100644 --- a/db_stress_tool/db_stress_compaction_filter.h +++ b/db_stress_tool/db_stress_compaction_filter.h @@ -51,8 +51,7 @@ class DbStressCompactionFilter : public CompactionFilter { key_mutex->Unlock(); if (!key_exists) { - return allow_overwrite ? Decision::kRemove - : Decision::kRemoveWithSingleDelete; + return allow_overwrite ? Decision::kRemove : Decision::kPurge; } return Decision::kKeep; } diff --git a/db_stress_tool/db_stress_gflags.cc b/db_stress_tool/db_stress_gflags.cc index 841a3cc94..4afee088b 100644 --- a/db_stress_tool/db_stress_gflags.cc +++ b/db_stress_tool/db_stress_gflags.cc @@ -522,6 +522,15 @@ DEFINE_bool(sync, false, "Sync all writes to disk"); DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync"); +DEFINE_uint64(bytes_per_sync, ROCKSDB_NAMESPACE::Options().bytes_per_sync, + "If nonzero, sync SST file data incrementally after every " + "`bytes_per_sync` bytes are written"); + +DEFINE_uint64(wal_bytes_per_sync, + ROCKSDB_NAMESPACE::Options().wal_bytes_per_sync, + "If nonzero, sync WAL file data incrementally after every " + "`bytes_per_sync` bytes are written"); + DEFINE_int32(kill_random_test, 0, "If non-zero, kill at various points in source code with " "probability 1/this"); diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index 8f5bc8127..5e8d0436a 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -2030,11 +2030,11 @@ void StressTest::TestAcquireSnapshot(ThreadState* thread, if (FLAGS_long_running_snapshots) { // Hold 10% of snapshots for 10x more if (thread->rand.OneIn(10)) { - assert(hold_for < port::kMaxInt64 / 10); + assert(hold_for < std::numeric_limits::max() / 10); hold_for *= 10; // Hold 1% of snapshots for 100x more if (thread->rand.OneIn(10)) { - assert(hold_for < port::kMaxInt64 / 10); + assert(hold_for < std::numeric_limits::max() / 10); hold_for *= 10; } } @@ -2066,8 +2066,9 @@ void StressTest::TestCompactRange(ThreadState* thread, int64_t rand_key, const Slice& start_key, ColumnFamilyHandle* column_family) { int64_t end_key_num; - if (port::kMaxInt64 - rand_key < FLAGS_compact_range_width) { - end_key_num = port::kMaxInt64; + if (std::numeric_limits::max() - rand_key < + FLAGS_compact_range_width) { + end_key_num = std::numeric_limits::max(); } else { end_key_num = FLAGS_compact_range_width + rand_key; } diff --git a/file/file_prefetch_buffer.h b/file/file_prefetch_buffer.h index 94d09bba4..88b350ceb 100644 --- a/file/file_prefetch_buffer.h +++ b/file/file_prefetch_buffer.h @@ -71,7 +71,7 @@ class FilePrefetchBuffer { readahead_size_(readahead_size), initial_auto_readahead_size_(readahead_size), max_readahead_size_(max_readahead_size), - min_offset_read_(port::kMaxSizet), + min_offset_read_(std::numeric_limits::max()), enable_(enable), track_min_offset_(track_min_offset), implicit_auto_readahead_(implicit_auto_readahead), diff --git a/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h index 29f7fae8f..57668a24e 100644 --- a/include/rocksdb/compaction_filter.h +++ b/include/rocksdb/compaction_filter.h @@ -39,11 +39,11 @@ class CompactionFilter : public Customizable { enum class Decision { kKeep, kRemove, - kRemoveWithSingleDelete, kChangeValue, kRemoveAndSkipUntil, kChangeBlobIndex, // used internally by BlobDB. kIOError, // used internally by BlobDB. + kPurge, // used for keys that can only be SingleDelete'ed kUndetermined, }; diff --git a/include/rocksdb/utilities/ldb_cmd.h b/include/rocksdb/utilities/ldb_cmd.h index 71fedc5d4..e75d21857 100644 --- a/include/rocksdb/utilities/ldb_cmd.h +++ b/include/rocksdb/utilities/ldb_cmd.h @@ -288,6 +288,9 @@ class LDBCommand { bool IsValueHex(const std::map& options, const std::vector& flags); + bool IsTryLoadOptions(const std::map& options, + const std::vector& flags); + /** * Converts val to a boolean. * val must be either true or false (case insensitive). diff --git a/monitoring/histogram.cc b/monitoring/histogram.cc index c6ef856b0..323a08efb 100644 --- a/monitoring/histogram.cc +++ b/monitoring/histogram.cc @@ -26,7 +26,8 @@ HistogramBucketMapper::HistogramBucketMapper() { // size of array buckets_ in HistogramImpl bucketValues_ = {1, 2}; double bucket_val = static_cast(bucketValues_.back()); - while ((bucket_val = 1.5 * bucket_val) <= static_cast(port::kMaxUint64)) { + while ((bucket_val = 1.5 * bucket_val) <= + static_cast(std::numeric_limits::max())) { bucketValues_.push_back(static_cast(bucket_val)); // Extracts two most significant digits to make histogram buckets more // human-readable. E.g., 172 becomes 170. diff --git a/monitoring/persistent_stats_history.cc b/monitoring/persistent_stats_history.cc index 86fe98f1f..9bde38b3a 100644 --- a/monitoring/persistent_stats_history.cc +++ b/monitoring/persistent_stats_history.cc @@ -98,13 +98,13 @@ std::pair parseKey(const Slice& key, std::string::size_type pos = key_str.find("#"); // TODO(Zhongyi): add counters to track parse failures? if (pos == std::string::npos) { - result.first = port::kMaxUint64; + result.first = std::numeric_limits::max(); result.second.clear(); } else { uint64_t parsed_time = ParseUint64(key_str.substr(0, pos)); // skip entries with timestamp smaller than start_time if (parsed_time < start_time) { - result.first = port::kMaxUint64; + result.first = std::numeric_limits::max(); result.second = ""; } else { result.first = parsed_time; diff --git a/options/cf_options.cc b/options/cf_options.cc index d1e6f13d5..8c927fff5 100644 --- a/options/cf_options.cc +++ b/options/cf_options.cc @@ -886,7 +886,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, double op2) { if (op1 == 0 || op2 <= 0) { return 0; } - if (port::kMaxUint64 / op1 < op2) { + if (std::numeric_limits::max() / op1 < op2) { return op1; } return static_cast(op1 * op2); @@ -915,8 +915,9 @@ size_t MaxFileSizeForL0MetaPin(const MutableCFOptions& cf_options) { // or a former larger `write_buffer_size` value to avoid surprising users with // pinned memory usage. We use a factor of 1.5 to account for overhead // introduced during flush in most cases. - if (port::kMaxSizet / 3 < cf_options.write_buffer_size / 2) { - return port::kMaxSizet; + if (std::numeric_limits::max() / 3 < + cf_options.write_buffer_size / 2) { + return std::numeric_limits::max(); } return cf_options.write_buffer_size / 2 * 3; } diff --git a/options/options_test.cc b/options/options_test.cc index 58070b3ff..3ff230eff 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -4082,9 +4082,10 @@ TEST_F(OptionsParserTest, IntegerParsing) { ASSERT_EQ(ParseUint32("4294967295"), 4294967295U); ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U); ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807); - ASSERT_EQ(ParseInt64("-9223372036854775808"), port::kMinInt64); + ASSERT_EQ(ParseInt64("-9223372036854775808"), + std::numeric_limits::min()); ASSERT_EQ(ParseInt32("2147483647"), 2147483647); - ASSERT_EQ(ParseInt32("-2147483648"), port::kMinInt32); + ASSERT_EQ(ParseInt32("-2147483648"), std::numeric_limits::min()); ASSERT_EQ(ParseInt("-32767"), -32767); ASSERT_EQ(ParseDouble("-1.234567"), -1.234567); } diff --git a/port/port_posix.h b/port/port_posix.h index 1bb0841fd..e23b82823 100644 --- a/port/port_posix.h +++ b/port/port_posix.h @@ -95,16 +95,6 @@ namespace ROCKSDB_NAMESPACE { extern const bool kDefaultToAdaptiveMutex; namespace port { - -// For use at db/file_indexer.h kLevelMaxIndex -const uint32_t kMaxUint32 = std::numeric_limits::max(); -const int kMaxInt32 = std::numeric_limits::max(); -const int kMinInt32 = std::numeric_limits::min(); -const uint64_t kMaxUint64 = std::numeric_limits::max(); -const int64_t kMaxInt64 = std::numeric_limits::max(); -const int64_t kMinInt64 = std::numeric_limits::min(); -const size_t kMaxSizet = std::numeric_limits::max(); - constexpr bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; #undef PLATFORM_IS_LITTLE_ENDIAN diff --git a/port/win/port_win.h b/port/win/port_win.h index a1d8e02bf..6894758d3 100644 --- a/port/win/port_win.h +++ b/port/win/port_win.h @@ -82,37 +82,11 @@ namespace port { #define snprintf _snprintf #define ROCKSDB_NOEXCEPT -// std::numeric_limits::max() is not constexpr just yet -// therefore, use the same limits - -// For use at db/file_indexer.h kLevelMaxIndex -const uint32_t kMaxUint32 = UINT32_MAX; -const int kMaxInt32 = INT32_MAX; -const int kMinInt32 = INT32_MIN; -const int64_t kMaxInt64 = INT64_MAX; -const int64_t kMinInt64 = INT64_MIN; -const uint64_t kMaxUint64 = UINT64_MAX; - -#ifdef _WIN64 -const size_t kMaxSizet = UINT64_MAX; -#else -const size_t kMaxSizet = UINT_MAX; -#endif #else // VS >= 2015 or MinGW #define ROCKSDB_NOEXCEPT noexcept -// For use at db/file_indexer.h kLevelMaxIndex -const uint32_t kMaxUint32 = std::numeric_limits::max(); -const int kMaxInt32 = std::numeric_limits::max(); -const int kMinInt32 = std::numeric_limits::min(); -const uint64_t kMaxUint64 = std::numeric_limits::max(); -const int64_t kMaxInt64 = std::numeric_limits::max(); -const int64_t kMinInt64 = std::numeric_limits::min(); - -const size_t kMaxSizet = std::numeric_limits::max(); - #endif //_MSC_VER // "Windows is designed to run on little-endian computer architectures." diff --git a/rocksdb.pc.in b/rocksdb.pc.in new file mode 100644 index 000000000..0bbb625fe --- /dev/null +++ b/rocksdb.pc.in @@ -0,0 +1,11 @@ +prefix="@CMAKE_INSTALL_PREFIX@" +exec_prefix="${prefix}" +libdir="${prefix}/lib" +includedir="${prefix}/include" + +Name: @PROJECT_NAME@ +Description: @CMAKE_PROJECT_DESCRIPTION@ +URL: @CMAKE_PROJECT_HOMEPAGE_URL@ +Version: @PROJECT_VERSION@ +Cflags: -I"${includedir}" +Libs: -L"${libdir}" -lrocksdb diff --git a/table/block_based/block.cc b/table/block_based/block.cc index ba1489f93..ef02bc869 100644 --- a/table/block_based/block.cc +++ b/table/block_based/block.cc @@ -721,7 +721,7 @@ void BlockIter::FindKeyAfterBinarySeek(const Slice& target, } else { // We are in the last restart interval. The while-loop will terminate by // `Valid()` returning false upon advancing past the block's last key. - max_offset = port::kMaxUint32; + max_offset = std::numeric_limits::max(); } while (true) { NextImpl(); diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index 2a2258a40..db2858b19 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -658,7 +658,7 @@ Status BlockBasedTableFactory::ValidateOptions( return Status::InvalidArgument( "Block alignment requested but block size is not a power of 2"); } - if (table_options_.block_size > port::kMaxUint32) { + if (table_options_.block_size > std::numeric_limits::max()) { return Status::InvalidArgument( "block size exceeds maximum number (4GiB) allowed"); } diff --git a/table/cuckoo/cuckoo_table_builder.h b/table/cuckoo/cuckoo_table_builder.h index a72d5183a..20ed71bfc 100644 --- a/table/cuckoo/cuckoo_table_builder.h +++ b/table/cuckoo/cuckoo_table_builder.h @@ -85,7 +85,7 @@ class CuckooTableBuilder: public TableBuilder { // We assume number of items is <= 2^32. uint32_t make_space_for_key_call_id; }; - static const uint32_t kMaxVectorIdx = port::kMaxInt32; + static const uint32_t kMaxVectorIdx = std::numeric_limits::max(); bool MakeSpaceForKey(const autovector& hash_vals, const uint32_t call_id, diff --git a/table/meta_blocks.cc b/table/meta_blocks.cc index 6ffa4a14f..13ecf8714 100644 --- a/table/meta_blocks.cc +++ b/table/meta_blocks.cc @@ -53,8 +53,8 @@ Slice MetaIndexBuilder::Finish() { // object, so there's no need for restart points. Thus we set the restart // interval to infinity to save space. PropertyBlockBuilder::PropertyBlockBuilder() - : properties_block_( - new BlockBuilder(port::kMaxInt32 /* restart interval */)) {} + : properties_block_(new BlockBuilder( + std::numeric_limits::max() /* restart interval */)) {} void PropertyBlockBuilder::Add(const std::string& name, const std::string& val) { diff --git a/table/table_properties.cc b/table/table_properties.cc index 8af0315ba..49b474758 100644 --- a/table/table_properties.cc +++ b/table/table_properties.cc @@ -17,7 +17,7 @@ namespace ROCKSDB_NAMESPACE { const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily = - port::kMaxInt32; + std::numeric_limits::max(); namespace { void AppendProperty( diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc index ff618f4b5..59ad7004b 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc @@ -412,7 +412,7 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const { } std::map>> cs_name_timeline; - uint64_t start_time = port::kMaxUint64; + uint64_t start_time = std::numeric_limits::max(); uint64_t end_time = 0; const std::map& trace_num_misses = adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit); @@ -427,7 +427,8 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const { auto it = trace_num_accesses.find(time); assert(it != trace_num_accesses.end()); uint64_t access = it->second; - cs_name_timeline[port::kMaxUint64]["trace"][time] = percent(miss, access); + cs_name_timeline[std::numeric_limits::max()]["trace"][time] = + percent(miss, access); } for (auto const& config_caches : cache_simulator_->sim_caches()) { const CacheConfiguration& config = config_caches.first; @@ -492,7 +493,7 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const { } std::map>> cs_name_timeline; - uint64_t start_time = port::kMaxUint64; + uint64_t start_time = std::numeric_limits::max(); uint64_t end_time = 0; const std::map& trace_num_misses = adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit); @@ -501,7 +502,8 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const { start_time = std::min(start_time, time); end_time = std::max(end_time, time); uint64_t miss = num_miss.second; - cs_name_timeline[port::kMaxUint64]["trace"][time] = miss; + cs_name_timeline[std::numeric_limits::max()]["trace"][time] = + miss; } for (auto const& config_caches : cache_simulator_->sim_caches()) { const CacheConfiguration& config = config_caches.first; @@ -589,7 +591,7 @@ void BlockCacheTraceAnalyzer::WriteSkewness( for (auto const& percent : percent_buckets) { label_bucket_naccesses[label_str][percent] = 0; size_t end_index = 0; - if (percent == port::kMaxUint64) { + if (percent == std::numeric_limits::max()) { end_index = label_naccesses.size(); } else { end_index = percent * label_naccesses.size() / 100; @@ -856,7 +858,7 @@ void BlockCacheTraceAnalyzer::WriteAccessTimeline(const std::string& label_str, uint64_t time_unit, bool user_access_only) const { std::set labels = ParseLabelStr(label_str); - uint64_t start_time = port::kMaxUint64; + uint64_t start_time = std::numeric_limits::max(); uint64_t end_time = 0; std::map> label_access_timeline; std::map> access_count_block_id_map; @@ -1091,7 +1093,7 @@ void BlockCacheTraceAnalyzer::WriteReuseInterval( kMicrosInSecond) / block.num_accesses; } else { - avg_reuse_interval = port::kMaxUint64 - 1; + avg_reuse_interval = std::numeric_limits::max() - 1; } if (labels.find(kGroupbyCaller) != labels.end()) { for (auto const& timeline : block.caller_num_accesses_timeline) { @@ -1152,7 +1154,7 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime( lifetime = (block.last_access_time - block.first_access_time) / kMicrosInSecond; } else { - lifetime = port::kMaxUint64 - 1; + lifetime = std::numeric_limits::max() - 1; } const std::string label = BuildLabel( labels, cf_name, fd, level, type, @@ -2103,7 +2105,7 @@ std::vector parse_buckets(const std::string& bucket_str) { getline(ss, bucket, ','); buckets.push_back(ParseUint64(bucket)); } - buckets.push_back(port::kMaxUint64); + buckets.push_back(std::numeric_limits::max()); return buckets; } diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc b/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc index 896a6ced1..5b8300a81 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc @@ -277,7 +277,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) { ASSERT_OK(env_->DeleteFile(mrc_path)); const std::vector time_units{"1", "60", "3600"}; - expected_capacities.push_back(port::kMaxUint64); + expected_capacities.push_back(std::numeric_limits::max()); for (auto const& expected_capacity : expected_capacities) { for (auto const& time_unit : time_units) { const std::string miss_ratio_timeline_path = @@ -293,7 +293,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) { std::string substr; getline(ss, substr, ','); if (!read_header) { - if (expected_capacity == port::kMaxUint64) { + if (expected_capacity == std::numeric_limits::max()) { ASSERT_EQ("trace", substr); } else { ASSERT_EQ("lru-1-0", substr); @@ -321,7 +321,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) { std::string substr; getline(ss, substr, ','); if (num_misses == 0) { - if (expected_capacity == port::kMaxUint64) { + if (expected_capacity == std::numeric_limits::max()) { ASSERT_EQ("trace", substr); } else { ASSERT_EQ("lru-1-0", substr); diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 367bacfb2..f04eff4f1 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -2265,25 +2265,23 @@ class Stats { if (done_ < 1) done_ = 1; std::string extra; + double elapsed = (finish_ - start_) * 1e-6; if (bytes_ > 0) { // Rate is computed on actual elapsed time, not the sum of per-thread // elapsed times. - double elapsed = (finish_ - start_) * 1e-6; char rate[100]; snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / elapsed); extra = rate; } AppendWithSpace(&extra, message_); - double elapsed = (finish_ - start_) * 1e-6; double throughput = (double)done_/elapsed; - fprintf(stdout, "%-12s : %11.3f micros/op %ld ops/sec;%s%s\n", - name.ToString().c_str(), - seconds_ * 1e6 / done_, - (long)throughput, - (extra.empty() ? "" : " "), - extra.c_str()); + fprintf(stdout, + "%-12s : %11.3f micros/op %ld ops/sec %.3f seconds %" PRIu64 + " operations;%s%s\n", + name.ToString().c_str(), seconds_ * 1e6 / done_, (long)throughput, + elapsed, done_, (extra.empty() ? "" : " "), extra.c_str()); if (FLAGS_histogram) { for (auto it = hist_.begin(); it != hist_.end(); ++it) { fprintf(stdout, "Microseconds per %s:\n%s\n", @@ -8075,7 +8073,8 @@ class Benchmark { } std::unique_ptr shi; - Status s = db->GetStatsHistory(0, port::kMaxUint64, &shi); + Status s = + db->GetStatsHistory(0, std::numeric_limits::max(), &shi); if (!s.ok()) { fprintf(stdout, "%s\n", s.ToString().c_str()); return; diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py index 53aa95ee0..a7c050f37 100644 --- a/tools/db_crashtest.py +++ b/tools/db_crashtest.py @@ -132,6 +132,8 @@ default_params = { # Sync mode might make test runs slower so running it in a smaller chance "sync" : lambda : random.choice( [1 if t == 0 else 0 for t in range(0, 20)]), + "bytes_per_sync": lambda: random.choice([0, 262144]), + "wal_bytes_per_sync": lambda: random.choice([0, 524288]), # Disable compaction_readahead_size because the test is not passing. #"compaction_readahead_size" : lambda : random.choice( # [0, 0, 1024 * 1024]), @@ -153,7 +155,7 @@ default_params = { "open_metadata_write_fault_one_in": lambda: random.choice([0, 0, 8]), "open_write_fault_one_in": lambda: random.choice([0, 0, 16]), "open_read_fault_one_in": lambda: random.choice([0, 0, 32]), - "sync_fault_injection": False, + "sync_fault_injection": lambda: random.randint(0, 1), "get_property_one_in": 1000000, "paranoid_file_checks": lambda: random.choice([0, 1, 1, 1]), "max_write_buffer_size_to_maintain": lambda: random.choice( diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 2228ea47e..91a0b2775 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -408,7 +408,7 @@ LDBCommand::LDBCommand(const std::map& options, is_value_hex_ = IsValueHex(options, flags); is_db_ttl_ = IsFlagPresent(flags, ARG_TTL); timestamp_ = IsFlagPresent(flags, ARG_TIMESTAMP); - try_load_options_ = IsFlagPresent(flags, ARG_TRY_LOAD_OPTIONS); + try_load_options_ = IsTryLoadOptions(options, flags); force_consistency_checks_ = !IsFlagPresent(flags, ARG_DISABLE_CONSISTENCY_CHECKS); enable_blob_files_ = IsFlagPresent(flags, ARG_ENABLE_BLOB_FILES); @@ -1064,6 +1064,24 @@ bool LDBCommand::IsValueHex(const std::map& options, ParseBooleanOption(options, ARG_VALUE_HEX, false)); } +bool LDBCommand::IsTryLoadOptions( + const std::map& options, + const std::vector& flags) { + if (IsFlagPresent(flags, ARG_TRY_LOAD_OPTIONS)) { + return true; + } + // if `DB` is specified and not explicitly to create a new db, default + // `try_load_options` to true. The user could still disable that by set + // `try_load_options=false`. + // Note: Opening as TTL DB doesn't support `try_load_options`, so it's default + // to false. TODO: TTL_DB may need to fix that, otherwise it's unable to open + // DB which has incompatible setting with default options. + bool default_val = (options.find(ARG_DB) != options.end()) && + !IsFlagPresent(flags, ARG_CREATE_IF_MISSING) && + !IsFlagPresent(flags, ARG_TTL); + return ParseBooleanOption(options, ARG_TRY_LOAD_OPTIONS, default_val); +} + bool LDBCommand::ParseBooleanOption( const std::map& options, const std::string& option, bool default_val) { diff --git a/tools/ldb_tool.cc b/tools/ldb_tool.cc index e3c684b66..402516419 100644 --- a/tools/ldb_tool.cc +++ b/tools/ldb_tool.cc @@ -50,7 +50,10 @@ void LDBCommandRunner::PrintHelp(const LDBOptions& ldb_options, " with 'put','get','scan','dump','query','batchput'" " : DB supports ttl and value is internally timestamp-suffixed\n"); ret.append(" --" + LDBCommand::ARG_TRY_LOAD_OPTIONS + - " : Try to load option file from DB.\n"); + " : Try to load option file from DB. Default to true if " + + LDBCommand::ARG_DB + + " is specified and not creating a new DB and not open as TTL DB. " + "Can be set to false explicitly.\n"); ret.append(" --" + LDBCommand::ARG_DISABLE_CONSISTENCY_CHECKS + " : Set options.force_consistency_checks = false.\n"); ret.append(" --" + LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS + diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 622344e88..1b27cc33c 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -282,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { } else if (ParseIntArg(argv[i], "--compression_max_dict_bytes=", "compression_max_dict_bytes must be numeric", &tmp_val)) { - if (tmp_val < 0 || tmp_val > port::kMaxUint32) { + if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n", argv[i]); print_help(/*to_stderr*/ true); @@ -292,7 +292,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { } else if (ParseIntArg(argv[i], "--compression_zstd_max_train_bytes=", "compression_zstd_max_train_bytes must be numeric", &tmp_val)) { - if (tmp_val < 0 || tmp_val > port::kMaxUint32) { + if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { fprintf(stderr, "compression_zstd_max_train_bytes must be a uint32_t: '%s'\n", argv[i]); diff --git a/tools/trace_analyzer_tool.cc b/tools/trace_analyzer_tool.cc index 972eff863..6423352cd 100644 --- a/tools/trace_analyzer_tool.cc +++ b/tools/trace_analyzer_tool.cc @@ -190,7 +190,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, uint64_t op2) { if (op1 == 0 || op2 == 0) { return 0; } - if (port::kMaxUint64 / op1 < op2) { + if (std::numeric_limits::max() / op1 < op2) { return op1; } return (op1 * op2); diff --git a/trace_replay/block_cache_tracer.h b/trace_replay/block_cache_tracer.h index 23672e1df..feea5ad51 100644 --- a/trace_replay/block_cache_tracer.h +++ b/trace_replay/block_cache_tracer.h @@ -281,7 +281,7 @@ class BlockCacheTracer { const Slice& block_key, const Slice& cf_name, const Slice& referenced_key); - // GetId cycles from 1 to port::kMaxUint64. + // GetId cycles from 1 to std::numeric_limits::max(). uint64_t NextGetId(); private: diff --git a/util/heap.h b/util/heap.h index e0737581e..3f4cddeb9 100644 --- a/util/heap.h +++ b/util/heap.h @@ -101,7 +101,9 @@ class BinaryHeap { size_t size() const { return data_.size(); } - void reset_root_cmp_cache() { root_cmp_cache_ = port::kMaxSizet; } + void reset_root_cmp_cache() { + root_cmp_cache_ = std::numeric_limits::max(); + } private: static inline size_t get_root() { return 0; } @@ -126,7 +128,7 @@ class BinaryHeap { void downheap(size_t index) { T v = std::move(data_[index]); - size_t picked_child = port::kMaxSizet; + size_t picked_child = std::numeric_limits::max(); while (1) { const size_t left_child = get_left(index); if (get_left(index) >= data_.size()) { @@ -165,7 +167,7 @@ class BinaryHeap { Compare cmp_; autovector data_; // Used to reduce number of cmp_ calls in downheap() - size_t root_cmp_cache_ = port::kMaxSizet; + size_t root_cmp_cache_ = std::numeric_limits::max(); }; } // namespace ROCKSDB_NAMESPACE diff --git a/util/rate_limiter.cc b/util/rate_limiter.cc index 17e573ef7..f369e3220 100644 --- a/util/rate_limiter.cc +++ b/util/rate_limiter.cc @@ -31,8 +31,8 @@ size_t RateLimiter::RequestToken(size_t bytes, size_t alignment, if (alignment > 0) { // Here we may actually require more than burst and block - // but we can not write less than one page at a time on direct I/O - // thus we may want not to use ratelimiter + // as we can not write/read less than one page at a time on direct I/O + // thus we do not want to be strictly constrained by burst bytes = std::max(alignment, TruncateToPageBoundary(alignment, bytes)); } Request(bytes, io_priority, stats, op_type); @@ -347,10 +347,11 @@ void GenericRateLimiter::RefillBytesAndGrantRequests() { int64_t GenericRateLimiter::CalculateRefillBytesPerPeriod( int64_t rate_bytes_per_sec) { - if (port::kMaxInt64 / rate_bytes_per_sec < options_.refill_period_us) { + if (std::numeric_limits::max() / rate_bytes_per_sec < + options_.refill_period_us) { // Avoid unexpected result in the overflow case. The result now is still // inaccurate but is a number that is large enough. - return port::kMaxInt64 / 1000000; + return std::numeric_limits::max() / 1000000; } else { return rate_bytes_per_sec * options_.refill_period_us / 1000000; } @@ -374,7 +375,7 @@ Status GenericRateLimiter::Tune() { std::chrono::microseconds(options_.refill_period_us); // We tune every kRefillsPerTune intervals, so the overflow and division-by- // zero conditions should never happen. - assert(num_drains_ <= port::kMaxInt64 / 100); + assert(num_drains_ <= std::numeric_limits::max() / 100); assert(elapsed_intervals > 0); int64_t drained_pct = num_drains_ * 100 / elapsed_intervals; @@ -385,14 +386,15 @@ Status GenericRateLimiter::Tune() { } else if (drained_pct < kLowWatermarkPct) { // sanitize to prevent overflow int64_t sanitized_prev_bytes_per_sec = - std::min(prev_bytes_per_sec, port::kMaxInt64 / 100); + std::min(prev_bytes_per_sec, std::numeric_limits::max() / 100); new_bytes_per_sec = std::max(options_.max_bytes_per_sec / kAllowedRangeFactor, sanitized_prev_bytes_per_sec * 100 / (100 + kAdjustFactorPct)); } else if (drained_pct > kHighWatermarkPct) { // sanitize to prevent overflow - int64_t sanitized_prev_bytes_per_sec = std::min( - prev_bytes_per_sec, port::kMaxInt64 / (100 + kAdjustFactorPct)); + int64_t sanitized_prev_bytes_per_sec = + std::min(prev_bytes_per_sec, std::numeric_limits::max() / + (100 + kAdjustFactorPct)); new_bytes_per_sec = std::min(options_.max_bytes_per_sec, sanitized_prev_bytes_per_sec * (100 + kAdjustFactorPct) / 100); @@ -433,7 +435,8 @@ static int RegisterBuiltinRateLimiters(ObjectLibrary& library, GenericRateLimiter::kClassName(), [](const std::string& /*uri*/, std::unique_ptr* guard, std::string* /*errmsg*/) { - guard->reset(new GenericRateLimiter(port::kMaxInt64)); + guard->reset( + new GenericRateLimiter(std::numeric_limits::max())); return guard->get(); }); size_t num_types; diff --git a/util/rate_limiter_test.cc b/util/rate_limiter_test.cc index ad44d5736..cd809d183 100644 --- a/util/rate_limiter_test.cc +++ b/util/rate_limiter_test.cc @@ -36,7 +36,7 @@ class RateLimiterTest : public testing::Test { }; TEST_F(RateLimiterTest, OverflowRate) { - GenericRateLimiter limiter(port::kMaxInt64, 1000, 10, + GenericRateLimiter limiter(std::numeric_limits::max(), 1000, 10, RateLimiter::Mode::kWritesOnly, SystemClock::Default(), false /* auto_tuned */); ASSERT_GT(limiter.GetSingleBurstBytes(), 1000000000ll); diff --git a/util/string_util.cc b/util/string_util.cc index 03bf5a40c..24b70ba10 100644 --- a/util/string_util.cc +++ b/util/string_util.cc @@ -315,7 +315,8 @@ uint32_t ParseUint32(const std::string& value) { int32_t ParseInt32(const std::string& value) { int64_t num = ParseInt64(value); - if (num <= port::kMaxInt32 && num >= port::kMinInt32) { + if (num <= std::numeric_limits::max() && + num >= std::numeric_limits::min()) { return static_cast(num); } else { throw std::out_of_range(value); diff --git a/utilities/backup/backup_engine.cc b/utilities/backup/backup_engine.cc index af633bff4..1c6a2cb0c 100644 --- a/utilities/backup/backup_engine.cc +++ b/utilities/backup/backup_engine.cc @@ -1012,8 +1012,9 @@ IOStatus BackupEngineImpl::Initialize() { // we might need to clean up from previous crash or I/O errors might_need_garbage_collect_ = true; - if (options_.max_valid_backups_to_open != port::kMaxInt32) { - options_.max_valid_backups_to_open = port::kMaxInt32; + if (options_.max_valid_backups_to_open != + std::numeric_limits::max()) { + options_.max_valid_backups_to_open = std::numeric_limits::max(); ROCKS_LOG_WARN( options_.info_log, "`max_valid_backups_to_open` is not set to the default value. Ignoring " @@ -1434,7 +1435,8 @@ IOStatus BackupEngineImpl::CreateNewBackupWithMetadata( contents.size(), db_options.statistics.get(), 0 /* size_limit */, false /* shared_checksum */, options.progress_callback, contents); } /* create_file_cb */, - &sequence_number, options.flush_before_backup ? 0 : port::kMaxUint64, + &sequence_number, + options.flush_before_backup ? 0 : std::numeric_limits::max(), compare_checksum)); if (io_s.ok()) { new_backup->SetSequenceNumber(sequence_number); @@ -2171,7 +2173,7 @@ IOStatus BackupEngineImpl::AddBackupFileWorkItem( return io_s; } } - if (size_bytes == port::kMaxUint64) { + if (size_bytes == std::numeric_limits::max()) { return IOStatus::NotFound("File missing: " + src_path); } // dst_relative depends on the following conditions: diff --git a/utilities/backup/backup_engine_test.cc b/utilities/backup/backup_engine_test.cc index 8585dbf12..00b71cfa0 100644 --- a/utilities/backup/backup_engine_test.cc +++ b/utilities/backup/backup_engine_test.cc @@ -3756,7 +3756,8 @@ TEST_F(BackupEngineTest, WriteOnlyEngineNoSharedFileDeletion) { } CloseDBAndBackupEngine(); - engine_options_->max_valid_backups_to_open = port::kMaxInt32; + engine_options_->max_valid_backups_to_open = + std::numeric_limits::max(); AssertBackupConsistency(i + 1, 0, (i + 1) * kNumKeys); } } diff --git a/utilities/fault_injection_fs.cc b/utilities/fault_injection_fs.cc index a07476bcd..161118672 100644 --- a/utilities/fault_injection_fs.cc +++ b/utilities/fault_injection_fs.cc @@ -16,6 +16,7 @@ #include "utilities/fault_injection_fs.h" +#include #include #include @@ -290,6 +291,33 @@ IOStatus TestFSWritableFile::Sync(const IOOptions& options, return io_s; } +IOStatus TestFSWritableFile::RangeSync(uint64_t offset, uint64_t nbytes, + const IOOptions& options, + IODebugContext* dbg) { + if (!fs_->IsFilesystemActive()) { + return fs_->GetError(); + } + // Assumes caller passes consecutive byte ranges. + uint64_t sync_limit = offset + nbytes; + uint64_t buf_begin = + state_.pos_at_last_sync_ < 0 ? 0 : state_.pos_at_last_sync_; + + IOStatus io_s; + if (sync_limit < buf_begin) { + return io_s; + } + uint64_t num_to_sync = std::min(static_cast(state_.buffer_.size()), + sync_limit - buf_begin); + Slice buf_to_sync(state_.buffer_.data(), num_to_sync); + io_s = target_->Append(buf_to_sync, options, dbg); + state_.buffer_ = state_.buffer_.substr(num_to_sync); + // Ignore sync errors + target_->RangeSync(offset, nbytes, options, dbg).PermitUncheckedError(); + state_.pos_at_last_sync_ = offset + num_to_sync; + fs_->WritableFileSynced(state_); + return io_s; +} + TestFSRandomRWFile::TestFSRandomRWFile(const std::string& /*fname*/, std::unique_ptr&& f, FaultInjectionTestFS* fs) diff --git a/utilities/fault_injection_fs.h b/utilities/fault_injection_fs.h index b33964489..bca85ed07 100644 --- a/utilities/fault_injection_fs.h +++ b/utilities/fault_injection_fs.h @@ -76,6 +76,9 @@ class TestFSWritableFile : public FSWritableFile { IODebugContext* dbg) override; virtual IOStatus Flush(const IOOptions&, IODebugContext*) override; virtual IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override; + virtual IOStatus RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/, + const IOOptions& options, + IODebugContext* dbg) override; virtual bool IsSyncThreadSafe() const override { return true; } virtual IOStatus PositionedAppend(const Slice& data, uint64_t offset, const IOOptions& options, diff --git a/utilities/write_batch_with_index/write_batch_with_index_internal.h b/utilities/write_batch_with_index/write_batch_with_index_internal.h index cf8c46e5c..edabc95bc 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_internal.h +++ b/utilities/write_batch_with_index/write_batch_with_index_internal.h @@ -95,7 +95,7 @@ struct WriteBatchIndexEntry { bool is_forward_direction, bool is_seek_to_first) // For SeekForPrev(), we need to make the dummy entry larger than any // entry who has the same search key. Otherwise, we'll miss those entries. - : offset(is_forward_direction ? 0 : port::kMaxSizet), + : offset(is_forward_direction ? 0 : std::numeric_limits::max()), column_family(_column_family), key_offset(0), key_size(is_seek_to_first ? kFlagMinInCf : 0), @@ -105,7 +105,7 @@ struct WriteBatchIndexEntry { // If this flag appears in the key_size, it indicates a // key that is smaller than any other entry for the same column family. - static const size_t kFlagMinInCf = port::kMaxSizet; + static const size_t kFlagMinInCf = std::numeric_limits::max(); bool is_min_in_cf() const { assert(key_size != kFlagMinInCf ||