Merge remote-tracking branch 'upstream/main' into dbstress-opt-file
This commit is contained in:
commit
093a70a2d3
@ -40,6 +40,8 @@ include(GoogleTest)
|
||||
get_rocksdb_version(rocksdb_VERSION)
|
||||
project(rocksdb
|
||||
VERSION ${rocksdb_VERSION}
|
||||
DESCRIPTION "An embeddable persistent key-value store for fast storage"
|
||||
HOMEPAGE_URL https://rocksdb.org/
|
||||
LANGUAGES CXX C ASM)
|
||||
|
||||
if(POLICY CMP0042)
|
||||
@ -1121,6 +1123,12 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
|
||||
COMPATIBILITY SameMajorVersion
|
||||
)
|
||||
|
||||
configure_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}.pc.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc
|
||||
@ONLY
|
||||
)
|
||||
|
||||
install(DIRECTORY include/rocksdb COMPONENT devel DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
|
||||
|
||||
install(DIRECTORY "${PROJECT_SOURCE_DIR}/cmake/modules" COMPONENT devel DESTINATION ${package_config_destination})
|
||||
@ -1159,6 +1167,13 @@ if(NOT WIN32 OR ROCKSDB_INSTALL_ON_WINDOWS)
|
||||
COMPONENT devel
|
||||
DESTINATION ${package_config_destination}
|
||||
)
|
||||
|
||||
install(
|
||||
FILES
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc
|
||||
COMPONENT devel
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig
|
||||
)
|
||||
endif()
|
||||
|
||||
option(WITH_ALL_TESTS "Build all test, rather than a small subset" ON)
|
||||
|
@ -14,6 +14,7 @@
|
||||
* Add rollback_deletion_type_callback to TransactionDBOptions so that write-prepared transactions know whether to issue a Delete or SingleDelete to cancel a previous key written during prior prepare phase. The PR aims to prevent mixing SingleDeletes and Deletes for the same key that can lead to undefined behaviors for write-prepared transactions.
|
||||
* EXPERIMENTAL: Add new API AbortIO in file_system to abort the read requests submitted asynchronously.
|
||||
* CompactionFilter::Decision has a new value: kRemoveWithSingleDelete. If CompactionFilter returns this decision, then CompactionIterator will use `SingleDelete` to mark a key as removed.
|
||||
* Renamed CompactionFilter::Decision::kRemoveWithSingleDelete to kPurge since the latter sounds more general and hides the implementation details of how compaction iterator handles keys.
|
||||
|
||||
### Bug Fixes
|
||||
* RocksDB calls FileSystem::Poll API during FilePrefetchBuffer destruction which impacts performance as it waits for read requets completion which is not needed anymore. Calling FileSystem::AbortIO to abort those requests instead fixes that performance issue.
|
||||
@ -21,6 +22,7 @@
|
||||
|
||||
### Behavior changes
|
||||
* Enforce the existing contract of SingleDelete so that SingleDelete cannot be mixed with Delete because it leads to undefined behavior. Fix a number of unit tests that violate the contract but happen to pass.
|
||||
* ldb `--try_load_options` default to true if `--db` is specified and not creating a new DB, the user can still explicitly disable that by `--try_load_options=false` (or explicitly enable that by `--try_load_options`).
|
||||
|
||||
## 7.2.0 (04/15/2022)
|
||||
### Bug Fixes
|
||||
|
@ -118,10 +118,19 @@ if [ -z "$USE_CLANG" ]; then
|
||||
CXX="$GCC_BASE/bin/g++"
|
||||
AR="$GCC_BASE/bin/gcc-ar"
|
||||
|
||||
|
||||
CFLAGS+=" -B$BINUTILS"
|
||||
CFLAGS+=" -B$BINUTILS -nostdinc -nostdlib"
|
||||
CFLAGS+=" -I$GCC_BASE/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/install-tools/include"
|
||||
CFLAGS+=" -isystem $GCC_BASE/lib/gcc/x86_64-redhat-linux-gnu/11.2.1/include-fixed/"
|
||||
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE"
|
||||
CFLAGS+=" -I$GLIBC_INCLUDE"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/x86_64-facebook-linux/"
|
||||
CFLAGS+=" -I$LIBGCC_BASE/include/c++/11.x/backward"
|
||||
CFLAGS+=" -isystem $GLIBC_INCLUDE -I$GLIBC_INCLUDE"
|
||||
JEMALLOC=1
|
||||
else
|
||||
# clang
|
||||
|
@ -115,6 +115,7 @@ Status BuildTable(
|
||||
assert(fs);
|
||||
|
||||
TableProperties tp;
|
||||
bool table_file_created = false;
|
||||
if (iter->Valid() || !range_del_agg->IsEmpty()) {
|
||||
std::unique_ptr<CompactionFilter> compaction_filter;
|
||||
if (ioptions.compaction_filter_factory != nullptr &&
|
||||
@ -158,6 +159,8 @@ Status BuildTable(
|
||||
file_checksum_func_name);
|
||||
return s;
|
||||
}
|
||||
|
||||
table_file_created = true;
|
||||
FileTypeSet tmp_set = ioptions.checksum_handoff_file_types;
|
||||
file->SetIOPriority(io_priority);
|
||||
file->SetWriteLifeTimeHint(write_hint);
|
||||
@ -371,15 +374,17 @@ Status BuildTable(
|
||||
|
||||
constexpr IODebugContext* dbg = nullptr;
|
||||
|
||||
Status ignored = fs->DeleteFile(fname, IOOptions(), dbg);
|
||||
ignored.PermitUncheckedError();
|
||||
if (table_file_created) {
|
||||
Status ignored = fs->DeleteFile(fname, IOOptions(), dbg);
|
||||
ignored.PermitUncheckedError();
|
||||
}
|
||||
|
||||
assert(blob_file_additions || blob_file_paths.empty());
|
||||
|
||||
if (blob_file_additions) {
|
||||
for (const std::string& blob_file_path : blob_file_paths) {
|
||||
ignored = DeleteDBFile(&db_options, blob_file_path, dbname,
|
||||
/*force_bg=*/false, /*force_fg=*/false);
|
||||
Status ignored = DeleteDBFile(&db_options, blob_file_path, dbname,
|
||||
/*force_bg=*/false, /*force_fg=*/false);
|
||||
ignored.PermitUncheckedError();
|
||||
TEST_SYNC_POINT("BuildTable::AfterDeleteFile");
|
||||
}
|
||||
|
@ -501,7 +501,8 @@ std::vector<std::string> ColumnFamilyData::GetDbPaths() const {
|
||||
return paths;
|
||||
}
|
||||
|
||||
const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId = port::kMaxUint32;
|
||||
const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId =
|
||||
std::numeric_limits<uint32_t>::max();
|
||||
|
||||
ColumnFamilyData::ColumnFamilyData(
|
||||
uint32_t id, const std::string& name, Version* _dummy_versions,
|
||||
@ -826,8 +827,8 @@ int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger,
|
||||
// condition.
|
||||
// Or twice as compaction trigger, if it is smaller.
|
||||
int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown);
|
||||
if (res >= port::kMaxInt32) {
|
||||
return port::kMaxInt32;
|
||||
if (res >= std::numeric_limits<int32_t>::max()) {
|
||||
return std::numeric_limits<int32_t>::max();
|
||||
} else {
|
||||
// res fits in int
|
||||
return static_cast<int>(res);
|
||||
|
@ -518,7 +518,7 @@ uint64_t Compaction::OutputFilePreallocationSize() const {
|
||||
}
|
||||
}
|
||||
|
||||
if (max_output_file_size_ != port::kMaxUint64 &&
|
||||
if (max_output_file_size_ != std::numeric_limits<uint64_t>::max() &&
|
||||
(immutable_options_.compaction_style == kCompactionStyleLevel ||
|
||||
output_level() > 0)) {
|
||||
preallocation_size = std::min(max_output_file_size_, preallocation_size);
|
||||
@ -616,7 +616,7 @@ bool Compaction::DoesInputReferenceBlobFiles() const {
|
||||
|
||||
uint64_t Compaction::MinInputFileOldestAncesterTime(
|
||||
const InternalKey* start, const InternalKey* end) const {
|
||||
uint64_t min_oldest_ancester_time = port::kMaxUint64;
|
||||
uint64_t min_oldest_ancester_time = std::numeric_limits<uint64_t>::max();
|
||||
const InternalKeyComparator& icmp =
|
||||
column_family_data()->internal_comparator();
|
||||
for (const auto& level_files : inputs_) {
|
||||
|
@ -307,7 +307,7 @@ bool CompactionIterator::InvokeFilterIfNeeded(bool* need_skip,
|
||||
// no value associated with delete
|
||||
value_.clear();
|
||||
iter_stats_.num_record_drop_user++;
|
||||
} else if (filter == CompactionFilter::Decision::kRemoveWithSingleDelete) {
|
||||
} else if (filter == CompactionFilter::Decision::kPurge) {
|
||||
// convert the current key to a single delete; key_ is pointing into
|
||||
// current_key_ at this point, so updating current_key_ updates key()
|
||||
ikey_.type = kTypeSingleDeletion;
|
||||
|
@ -1974,7 +1974,8 @@ Status CompactionJob::FinishCompactionOutputFile(
|
||||
refined_oldest_ancester_time =
|
||||
sub_compact->compaction->MinInputFileOldestAncesterTime(
|
||||
&(meta->smallest), &(meta->largest));
|
||||
if (refined_oldest_ancester_time != port::kMaxUint64) {
|
||||
if (refined_oldest_ancester_time !=
|
||||
std::numeric_limits<uint64_t>::max()) {
|
||||
meta->oldest_ancester_time = refined_oldest_ancester_time;
|
||||
}
|
||||
}
|
||||
@ -2264,7 +2265,7 @@ Status CompactionJob::OpenCompactionOutputFile(
|
||||
sub_compact->compaction->MinInputFileOldestAncesterTime(
|
||||
(sub_compact->start != nullptr) ? &tmp_start : nullptr,
|
||||
(sub_compact->end != nullptr) ? &tmp_end : nullptr);
|
||||
if (oldest_ancester_time == port::kMaxUint64) {
|
||||
if (oldest_ancester_time == std::numeric_limits<uint64_t>::max()) {
|
||||
oldest_ancester_time = current_time;
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
|
||||
size_t compact_bytes = static_cast<size_t>(level_files[start]->fd.file_size);
|
||||
uint64_t compensated_compact_bytes =
|
||||
level_files[start]->compensated_file_size;
|
||||
size_t compact_bytes_per_del_file = port::kMaxSizet;
|
||||
size_t compact_bytes_per_del_file = std::numeric_limits<size_t>::max();
|
||||
// Compaction range will be [start, limit).
|
||||
size_t limit;
|
||||
// Pull in files until the amount of compaction work per deleted file begins
|
||||
@ -717,7 +717,7 @@ Compaction* CompactionPicker::CompactRange(
|
||||
// files that are created during the current compaction.
|
||||
if (compact_range_options.bottommost_level_compaction ==
|
||||
BottommostLevelCompaction::kForceOptimized &&
|
||||
max_file_num_to_ignore != port::kMaxUint64) {
|
||||
max_file_num_to_ignore != std::numeric_limits<uint64_t>::max()) {
|
||||
assert(input_level == output_level);
|
||||
// inputs_shrunk holds a continuous subset of input files which were all
|
||||
// created before the current manual compaction
|
||||
|
@ -504,7 +504,7 @@ bool LevelCompactionBuilder::PickIntraL0Compaction() {
|
||||
return false;
|
||||
}
|
||||
return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction,
|
||||
port::kMaxUint64,
|
||||
std::numeric_limits<uint64_t>::max(),
|
||||
mutable_cf_options_.max_compaction_bytes,
|
||||
&start_level_inputs_, earliest_mem_seqno_);
|
||||
}
|
||||
|
@ -2653,8 +2653,8 @@ TEST_F(CompactionPickerTest, UniversalMarkedManualCompaction) {
|
||||
universal_compaction_picker.CompactRange(
|
||||
cf_name_, mutable_cf_options_, mutable_db_options_, vstorage_.get(),
|
||||
ColumnFamilyData::kCompactAllLevels, 6, CompactRangeOptions(),
|
||||
nullptr, nullptr, &manual_end, &manual_conflict, port::kMaxUint64,
|
||||
""));
|
||||
nullptr, nullptr, &manual_end, &manual_conflict,
|
||||
std::numeric_limits<uint64_t>::max(), ""));
|
||||
|
||||
ASSERT_TRUE(compaction);
|
||||
|
||||
|
@ -1371,7 +1371,7 @@ Compaction* UniversalCompactionBuilder::PickPeriodicCompaction() {
|
||||
|
||||
uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const {
|
||||
if (!mutable_cf_options_.compaction_options_universal.incremental) {
|
||||
return port::kMaxUint64;
|
||||
return std::numeric_limits<uint64_t>::max();
|
||||
} else {
|
||||
// Try to align cutting boundary with files at the next level if the
|
||||
// file isn't end up with 1/2 of target size, or it would overlap
|
||||
|
@ -998,7 +998,7 @@ TEST_F(DBTestCompactionFilter, DropKeyWithSingleDelete) {
|
||||
std::string* /*new_value*/,
|
||||
std::string* /*skip_until*/) const override {
|
||||
if (key.starts_with("b")) {
|
||||
return Decision::kRemoveWithSingleDelete;
|
||||
return Decision::kPurge;
|
||||
}
|
||||
return Decision::kRemove;
|
||||
}
|
||||
|
@ -4404,7 +4404,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) {
|
||||
for (CompactionFilterType comp_filter_type :
|
||||
{kUseCompactionFilter, kUseCompactionFilterFactory}) {
|
||||
// Assert that periodic compactions are not enabled.
|
||||
ASSERT_EQ(port::kMaxUint64 - 1, options.periodic_compaction_seconds);
|
||||
ASSERT_EQ(std::numeric_limits<uint64_t>::max() - 1,
|
||||
options.periodic_compaction_seconds);
|
||||
|
||||
if (comp_filter_type == kUseCompactionFilter) {
|
||||
options.compaction_filter = &test_compaction_filter;
|
||||
|
@ -177,7 +177,7 @@ Status DBImpl::GetLiveFilesStorageInfo(
|
||||
VectorLogPtr live_wal_files;
|
||||
bool flush_memtable = true;
|
||||
if (!immutable_db_options_.allow_2pc) {
|
||||
if (opts.wal_size_for_flush == port::kMaxUint64) {
|
||||
if (opts.wal_size_for_flush == std::numeric_limits<uint64_t>::max()) {
|
||||
flush_memtable = false;
|
||||
} else if (opts.wal_size_for_flush > 0) {
|
||||
// If the outstanding log files are small, we skip the flush.
|
||||
|
@ -2356,7 +2356,7 @@ TEST_P(DBAtomicFlushTest, PrecomputeMinLogNumberToKeepNon2PC) {
|
||||
ASSERT_OK(Flush(cf_ids));
|
||||
uint64_t log_num_after_flush = dbfull()->TEST_GetCurrentLogNumber();
|
||||
|
||||
uint64_t min_log_number_to_keep = port::kMaxUint64;
|
||||
uint64_t min_log_number_to_keep = std::numeric_limits<uint64_t>::max();
|
||||
autovector<ColumnFamilyData*> flushed_cfds;
|
||||
autovector<autovector<VersionEdit*>> flush_edits;
|
||||
for (size_t i = 0; i != num_cfs; ++i) {
|
||||
|
@ -5338,7 +5338,7 @@ Status DBImpl::ReserveFileNumbersBeforeIngestion(
|
||||
|
||||
Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
|
||||
if (mutable_db_options_.max_open_files == -1) {
|
||||
uint64_t oldest_time = port::kMaxUint64;
|
||||
uint64_t oldest_time = std::numeric_limits<uint64_t>::max();
|
||||
for (auto cfd : *versions_->GetColumnFamilySet()) {
|
||||
if (!cfd->IsDropped()) {
|
||||
uint64_t ctime;
|
||||
|
@ -2299,7 +2299,7 @@ class DBImpl : public DB {
|
||||
|
||||
static const int KEEP_LOG_FILE_NUM = 1000;
|
||||
// MSVC version 1800 still does not have constexpr for ::max()
|
||||
static const uint64_t kNoTimeOut = port::kMaxUint64;
|
||||
static const uint64_t kNoTimeOut = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
std::string db_absolute_path_;
|
||||
|
||||
|
@ -188,7 +188,7 @@ Status DBImpl::FlushMemTableToOutputFile(
|
||||
// a memtable without knowing such snapshot(s).
|
||||
uint64_t max_memtable_id = needs_to_sync_closed_wals
|
||||
? cfd->imm()->GetLatestMemTableID()
|
||||
: port::kMaxUint64;
|
||||
: std::numeric_limits<uint64_t>::max();
|
||||
|
||||
// If needs_to_sync_closed_wals is false, then the flush job will pick ALL
|
||||
// existing memtables of the column family when PickMemTable() is called
|
||||
@ -1041,7 +1041,8 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
|
||||
}
|
||||
s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
|
||||
final_output_level, options, begin, end, exclusive,
|
||||
false, port::kMaxUint64, trim_ts);
|
||||
false, std::numeric_limits<uint64_t>::max(),
|
||||
trim_ts);
|
||||
} else {
|
||||
int first_overlapped_level = kInvalidLevel;
|
||||
int max_overlapped_level = kInvalidLevel;
|
||||
@ -1078,7 +1079,7 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
|
||||
if (s.ok() && first_overlapped_level != kInvalidLevel) {
|
||||
// max_file_num_to_ignore can be used to filter out newly created SST
|
||||
// files, useful for bottom level compaction in a manual compaction
|
||||
uint64_t max_file_num_to_ignore = port::kMaxUint64;
|
||||
uint64_t max_file_num_to_ignore = std::numeric_limits<uint64_t>::max();
|
||||
uint64_t next_file_number = versions_->current_next_file_number();
|
||||
final_output_level = max_overlapped_level;
|
||||
int output_level;
|
||||
@ -2015,7 +2016,7 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
|
||||
// be created and scheduled, status::OK() will be returned.
|
||||
s = SwitchMemtable(cfd, &context);
|
||||
}
|
||||
const uint64_t flush_memtable_id = port::kMaxUint64;
|
||||
const uint64_t flush_memtable_id = std::numeric_limits<uint64_t>::max();
|
||||
if (s.ok()) {
|
||||
if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
|
||||
!cached_recoverable_state_empty_.load()) {
|
||||
|
@ -118,10 +118,11 @@ Status DBImpl::TEST_CompactRange(int level, const Slice* begin,
|
||||
cfd->ioptions()->compaction_style == kCompactionStyleFIFO)
|
||||
? level
|
||||
: level + 1;
|
||||
return RunManualCompaction(cfd, level, output_level, CompactRangeOptions(),
|
||||
begin, end, true, disallow_trivial_move,
|
||||
port::kMaxUint64 /*max_file_num_to_ignore*/,
|
||||
"" /*trim_ts*/);
|
||||
return RunManualCompaction(
|
||||
cfd, level, output_level, CompactRangeOptions(), begin, end, true,
|
||||
disallow_trivial_move,
|
||||
std::numeric_limits<uint64_t>::max() /*max_file_num_to_ignore*/,
|
||||
"" /*trim_ts*/);
|
||||
}
|
||||
|
||||
Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) {
|
||||
|
@ -761,7 +761,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC(
|
||||
assert(!cfds_to_flush.empty());
|
||||
assert(cfds_to_flush.size() == edit_lists.size());
|
||||
|
||||
uint64_t min_log_number_to_keep = port::kMaxUint64;
|
||||
uint64_t min_log_number_to_keep = std::numeric_limits<uint64_t>::max();
|
||||
for (const auto& edit_list : edit_lists) {
|
||||
uint64_t log = 0;
|
||||
for (const auto& e : edit_list) {
|
||||
@ -773,7 +773,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC(
|
||||
min_log_number_to_keep = std::min(min_log_number_to_keep, log);
|
||||
}
|
||||
}
|
||||
if (min_log_number_to_keep == port::kMaxUint64) {
|
||||
if (min_log_number_to_keep == std::numeric_limits<uint64_t>::max()) {
|
||||
min_log_number_to_keep = cfds_to_flush[0]->GetLogNumber();
|
||||
for (size_t i = 1; i < cfds_to_flush.size(); i++) {
|
||||
min_log_number_to_keep =
|
||||
|
@ -247,15 +247,16 @@ Status DBImplSecondary::RecoverLogFiles(
|
||||
if (seq_of_batch <= seq) {
|
||||
continue;
|
||||
}
|
||||
auto curr_log_num = port::kMaxUint64;
|
||||
auto curr_log_num = std::numeric_limits<uint64_t>::max();
|
||||
if (cfd_to_current_log_.count(cfd) > 0) {
|
||||
curr_log_num = cfd_to_current_log_[cfd];
|
||||
}
|
||||
// If the active memtable contains records added by replaying an
|
||||
// earlier WAL, then we need to seal the memtable, add it to the
|
||||
// immutable memtable list and create a new active memtable.
|
||||
if (!cfd->mem()->IsEmpty() && (curr_log_num == port::kMaxUint64 ||
|
||||
curr_log_num != log_number)) {
|
||||
if (!cfd->mem()->IsEmpty() &&
|
||||
(curr_log_num == std::numeric_limits<uint64_t>::max() ||
|
||||
curr_log_num != log_number)) {
|
||||
const MutableCFOptions mutable_cf_options =
|
||||
*cfd->GetLatestMutableCFOptions();
|
||||
MemTable* new_mem =
|
||||
|
@ -35,10 +35,12 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
||||
Header(options.info_log, "DB SUMMARY\n");
|
||||
Header(options.info_log, "DB Session ID: %s\n", session_id.c_str());
|
||||
|
||||
Status s;
|
||||
// Get files in dbname dir
|
||||
if (!env->GetChildren(dbname, &files).ok()) {
|
||||
Error(options.info_log,
|
||||
"Error when reading %s dir\n", dbname.c_str());
|
||||
s = env->GetChildren(dbname, &files);
|
||||
if (!s.ok()) {
|
||||
Error(options.info_log, "Error when reading %s dir %s\n", dbname.c_str(),
|
||||
s.ToString().c_str());
|
||||
}
|
||||
std::sort(files.begin(), files.end());
|
||||
for (const std::string& file : files) {
|
||||
@ -53,24 +55,27 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
||||
Header(options.info_log, "IDENTITY file: %s\n", file.c_str());
|
||||
break;
|
||||
case kDescriptorFile:
|
||||
if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) {
|
||||
s = env->GetFileSize(dbname + "/" + file, &file_size);
|
||||
if (s.ok()) {
|
||||
Header(options.info_log,
|
||||
"MANIFEST file: %s size: %" PRIu64 " Bytes\n", file.c_str(),
|
||||
file_size);
|
||||
} else {
|
||||
Error(options.info_log, "Error when reading MANIFEST file: %s/%s\n",
|
||||
dbname.c_str(), file.c_str());
|
||||
Error(options.info_log,
|
||||
"Error when reading MANIFEST file: %s/%s %s\n", dbname.c_str(),
|
||||
file.c_str(), s.ToString().c_str());
|
||||
}
|
||||
break;
|
||||
case kWalFile:
|
||||
if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) {
|
||||
s = env->GetFileSize(dbname + "/" + file, &file_size);
|
||||
if (s.ok()) {
|
||||
wal_info.append(file)
|
||||
.append(" size: ")
|
||||
.append(std::to_string(file_size))
|
||||
.append(" ; ");
|
||||
} else {
|
||||
Error(options.info_log, "Error when reading LOG file: %s/%s\n",
|
||||
dbname.c_str(), file.c_str());
|
||||
Error(options.info_log, "Error when reading LOG file: %s/%s %s\n",
|
||||
dbname.c_str(), file.c_str(), s.ToString().c_str());
|
||||
}
|
||||
break;
|
||||
case kTableFile:
|
||||
@ -86,10 +91,10 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
||||
// Get sst files in db_path dir
|
||||
for (auto& db_path : options.db_paths) {
|
||||
if (dbname.compare(db_path.path) != 0) {
|
||||
if (!env->GetChildren(db_path.path, &files).ok()) {
|
||||
Error(options.info_log,
|
||||
"Error when reading %s dir\n",
|
||||
db_path.path.c_str());
|
||||
s = env->GetChildren(db_path.path, &files);
|
||||
if (!s.ok()) {
|
||||
Error(options.info_log, "Error when reading %s dir %s\n",
|
||||
db_path.path.c_str(), s.ToString().c_str());
|
||||
continue;
|
||||
}
|
||||
std::sort(files.begin(), files.end());
|
||||
@ -111,22 +116,25 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
||||
// Get wal file in wal_dir
|
||||
const auto& wal_dir = options.GetWalDir(dbname);
|
||||
if (!options.IsWalDirSameAsDBPath(dbname)) {
|
||||
if (!env->GetChildren(wal_dir, &files).ok()) {
|
||||
Error(options.info_log, "Error when reading %s dir\n", wal_dir.c_str());
|
||||
s = env->GetChildren(wal_dir, &files);
|
||||
if (!s.ok()) {
|
||||
Error(options.info_log, "Error when reading %s dir %s\n", wal_dir.c_str(),
|
||||
s.ToString().c_str());
|
||||
return;
|
||||
}
|
||||
wal_info.clear();
|
||||
for (const std::string& file : files) {
|
||||
if (ParseFileName(file, &number, &type)) {
|
||||
if (type == kWalFile) {
|
||||
if (env->GetFileSize(wal_dir + "/" + file, &file_size).ok()) {
|
||||
s = env->GetFileSize(wal_dir + "/" + file, &file_size);
|
||||
if (s.ok()) {
|
||||
wal_info.append(file)
|
||||
.append(" size: ")
|
||||
.append(std::to_string(file_size))
|
||||
.append(" ; ");
|
||||
} else {
|
||||
Error(options.info_log, "Error when reading LOG file %s/%s\n",
|
||||
wal_dir.c_str(), file.c_str());
|
||||
Error(options.info_log, "Error when reading LOG file %s/%s %s\n",
|
||||
wal_dir.c_str(), file.c_str(), s.ToString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ class DbKvChecksumTest
|
||||
|
||||
void CorruptNextByteCallBack(void* arg) {
|
||||
Slice encoded = *static_cast<Slice*>(arg);
|
||||
if (entry_len_ == port::kMaxSizet) {
|
||||
if (entry_len_ == std::numeric_limits<size_t>::max()) {
|
||||
// We learn the entry size on the first attempt
|
||||
entry_len_ = encoded.size();
|
||||
}
|
||||
@ -96,7 +96,7 @@ class DbKvChecksumTest
|
||||
WriteBatchOpType op_type_;
|
||||
char corrupt_byte_addend_;
|
||||
size_t corrupt_byte_offset_ = 0;
|
||||
size_t entry_len_ = port::kMaxSizet;
|
||||
size_t entry_len_ = std::numeric_limits<size_t>::max();
|
||||
};
|
||||
|
||||
std::string GetTestNameSuffix(
|
||||
|
@ -97,7 +97,7 @@ class MockMemTableRepFactory : public MemTableRepFactory {
|
||||
|
||||
private:
|
||||
MockMemTableRep* mock_rep_;
|
||||
// workaround since there's no port::kMaxUint32 yet.
|
||||
// workaround since there's no std::numeric_limits<uint32_t>::max() yet.
|
||||
uint32_t last_column_family_id_ = static_cast<uint32_t>(-1);
|
||||
};
|
||||
|
||||
|
@ -500,7 +500,8 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) {
|
||||
1 /* input_level */, 2 /* output_level */, CompactRangeOptions(),
|
||||
nullptr /* begin */, nullptr /* end */, true /* exclusive */,
|
||||
true /* disallow_trivial_move */,
|
||||
port::kMaxUint64 /* max_file_num_to_ignore */, "" /*trim_ts*/));
|
||||
std::numeric_limits<uint64_t>::max() /* max_file_num_to_ignore */,
|
||||
"" /*trim_ts*/));
|
||||
}
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
|
@ -280,6 +280,58 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) {
|
||||
listener->VerifyMatchedCount(1);
|
||||
}
|
||||
|
||||
// Test that producing an empty .sst file does not write it out to
|
||||
// disk, and that the DeleteFile() env method is not called for
|
||||
// removing the non-existing file later.
|
||||
TEST_F(DBSSTTest, DeleteFileNotCalledForNotCreatedSSTFile) {
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
|
||||
OnFileDeletionListener* listener = new OnFileDeletionListener();
|
||||
options.listeners.emplace_back(listener);
|
||||
|
||||
Reopen(options);
|
||||
|
||||
// Flush the empty database.
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_EQ("", FilesPerLevel(0));
|
||||
|
||||
// We expect no .sst files.
|
||||
std::vector<LiveFileMetaData> metadata;
|
||||
db_->GetLiveFilesMetaData(&metadata);
|
||||
ASSERT_EQ(metadata.size(), 0U);
|
||||
|
||||
// We expect no file deletions.
|
||||
listener->VerifyMatchedCount(0);
|
||||
}
|
||||
|
||||
// Test that producing a non-empty .sst file does write it out to
|
||||
// disk, and that the DeleteFile() env method is not called for removing
|
||||
// the file later.
|
||||
TEST_F(DBSSTTest, DeleteFileNotCalledForCreatedSSTFile) {
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
|
||||
OnFileDeletionListener* listener = new OnFileDeletionListener();
|
||||
options.listeners.emplace_back(listener);
|
||||
|
||||
Reopen(options);
|
||||
|
||||
ASSERT_OK(Put("pika", "choo"));
|
||||
|
||||
// Flush the non-empty database.
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_EQ("1", FilesPerLevel(0));
|
||||
|
||||
// We expect 1 .sst files.
|
||||
std::vector<LiveFileMetaData> metadata;
|
||||
db_->GetLiveFilesMetaData(&metadata);
|
||||
ASSERT_EQ(metadata.size(), 1U);
|
||||
|
||||
// We expect no file deletions.
|
||||
listener->VerifyMatchedCount(0);
|
||||
}
|
||||
|
||||
TEST_F(DBSSTTest, DBWithSstFileManager) {
|
||||
std::shared_ptr<SstFileManager> sst_file_manager(NewSstFileManager(env_));
|
||||
auto sfm = static_cast<SstFileManagerImpl*>(sst_file_manager.get());
|
||||
|
@ -1009,7 +1009,7 @@ TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) {
|
||||
if (log_files.size() > 0) {
|
||||
earliest_log_nums[i] = log_files[0]->LogNumber();
|
||||
} else {
|
||||
earliest_log_nums[i] = port::kMaxUint64;
|
||||
earliest_log_nums[i] = std::numeric_limits<uint64_t>::max();
|
||||
}
|
||||
}
|
||||
// Check at least the first WAL was cleaned up during the recovery.
|
||||
|
@ -90,7 +90,8 @@ inline bool IsExtendedValueType(ValueType t) {
|
||||
// can be packed together into 64-bits.
|
||||
static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
|
||||
|
||||
static const SequenceNumber kDisableGlobalSequenceNumber = port::kMaxUint64;
|
||||
static const SequenceNumber kDisableGlobalSequenceNumber =
|
||||
std::numeric_limits<uint64_t>::max();
|
||||
|
||||
constexpr uint64_t kNumInternalBytes = 8;
|
||||
|
||||
|
@ -2405,7 +2405,7 @@ TEST_P(ExternalSSTBlockChecksumTest, DISABLED_HugeBlockChecksum) {
|
||||
SstFileWriter sst_file_writer(EnvOptions(), options);
|
||||
|
||||
// 2^32 - 1, will lead to data block with more than 2^32 bytes
|
||||
size_t huge_size = port::kMaxUint32;
|
||||
size_t huge_size = std::numeric_limits<uint32_t>::max();
|
||||
|
||||
std::string f = sst_files_dir_ + "f.sst";
|
||||
ASSERT_OK(sst_file_writer.Open(f));
|
||||
|
@ -58,10 +58,7 @@ class FileIndexer {
|
||||
void UpdateIndex(Arena* arena, const size_t num_levels,
|
||||
std::vector<FileMetaData*>* const files);
|
||||
|
||||
enum {
|
||||
// MSVC version 1800 still does not have constexpr for ::max()
|
||||
kLevelMaxIndex = ROCKSDB_NAMESPACE::port::kMaxInt32
|
||||
};
|
||||
enum { kLevelMaxIndex = std::numeric_limits<int32_t>::max() };
|
||||
|
||||
private:
|
||||
size_t num_levels_;
|
||||
|
@ -164,12 +164,12 @@ TEST_F(FlushJobTest, Empty) {
|
||||
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
||||
FlushJob flush_job(
|
||||
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
||||
*cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */,
|
||||
env_options_, versions_.get(), &mutex_, &shutting_down_, {},
|
||||
kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr,
|
||||
nullptr, kNoCompression, nullptr, &event_logger, false,
|
||||
true /* sync_output_directory */, true /* write_manifest */,
|
||||
Env::Priority::USER, nullptr /*IOTracer*/);
|
||||
*cfd->GetLatestMutableCFOptions(),
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
||||
versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
||||
snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
|
||||
nullptr, &event_logger, false, true /* sync_output_directory */,
|
||||
true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/);
|
||||
{
|
||||
InstrumentedMutexLock l(&mutex_);
|
||||
flush_job.PickMemTable();
|
||||
@ -248,11 +248,12 @@ TEST_F(FlushJobTest, NonEmpty) {
|
||||
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
||||
FlushJob flush_job(
|
||||
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
||||
*cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */,
|
||||
env_options_, versions_.get(), &mutex_, &shutting_down_, {},
|
||||
kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr,
|
||||
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
||||
true, true /* sync_output_directory */, true /* write_manifest */,
|
||||
*cfd->GetLatestMutableCFOptions(),
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
||||
versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
|
||||
snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
|
||||
db_options_.statistics.get(), &event_logger, true,
|
||||
true /* sync_output_directory */, true /* write_manifest */,
|
||||
Env::Priority::USER, nullptr /*IOTracer*/);
|
||||
|
||||
HistogramData hist;
|
||||
@ -509,11 +510,12 @@ TEST_F(FlushJobTest, Snapshots) {
|
||||
SnapshotChecker* snapshot_checker = nullptr; // not relavant
|
||||
FlushJob flush_job(
|
||||
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
|
||||
*cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */,
|
||||
env_options_, versions_.get(), &mutex_, &shutting_down_, snapshots,
|
||||
kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr,
|
||||
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger,
|
||||
true, true /* sync_output_directory */, true /* write_manifest */,
|
||||
*cfd->GetLatestMutableCFOptions(),
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
||||
versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
|
||||
snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
|
||||
db_options_.statistics.get(), &event_logger, true,
|
||||
true /* sync_output_directory */, true /* write_manifest */,
|
||||
Env::Priority::USER, nullptr /*IOTracer*/);
|
||||
mutex_.Lock();
|
||||
flush_job.PickMemTable();
|
||||
@ -577,9 +579,9 @@ TEST_F(FlushJobTimestampTest, AllKeysExpired) {
|
||||
PutFixed64(&full_history_ts_low, std::numeric_limits<uint64_t>::max());
|
||||
FlushJob flush_job(
|
||||
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
|
||||
port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(),
|
||||
&mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker,
|
||||
&job_context, nullptr, nullptr, nullptr, kNoCompression,
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
||||
versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
|
||||
snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
|
||||
db_options_.statistics.get(), &event_logger, true,
|
||||
true /* sync_output_directory */, true /* write_manifest */,
|
||||
Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"",
|
||||
@ -628,9 +630,9 @@ TEST_F(FlushJobTimestampTest, NoKeyExpired) {
|
||||
PutFixed64(&full_history_ts_low, 0);
|
||||
FlushJob flush_job(
|
||||
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
|
||||
port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(),
|
||||
&mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker,
|
||||
&job_context, nullptr, nullptr, nullptr, kNoCompression,
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
|
||||
versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
|
||||
snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
|
||||
db_options_.statistics.get(), &event_logger, true,
|
||||
true /* sync_output_directory */, true /* write_manifest */,
|
||||
Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"",
|
||||
|
@ -140,8 +140,8 @@ size_t MemTable::ApproximateMemoryUsage() {
|
||||
for (size_t usage : usages) {
|
||||
// If usage + total_usage >= kMaxSizet, return kMaxSizet.
|
||||
// the following variation is to avoid numeric overflow.
|
||||
if (usage >= port::kMaxSizet - total_usage) {
|
||||
return port::kMaxSizet;
|
||||
if (usage >= std::numeric_limits<size_t>::max() - total_usage) {
|
||||
return std::numeric_limits<size_t>::max();
|
||||
}
|
||||
total_usage += usage;
|
||||
}
|
||||
|
@ -209,7 +209,8 @@ TEST_F(MemTableListTest, Empty) {
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
|
||||
autovector<MemTable*> mems;
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &mems);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &mems);
|
||||
ASSERT_EQ(0, mems.size());
|
||||
|
||||
autovector<MemTable*> to_delete;
|
||||
@ -418,7 +419,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) {
|
||||
// Flush this memtable from the list.
|
||||
// (It will then be a part of the memtable history).
|
||||
autovector<MemTable*> to_flush;
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
|
||||
ASSERT_EQ(1, to_flush.size());
|
||||
|
||||
MutableCFOptions mutable_cf_options(options);
|
||||
@ -472,7 +474,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) {
|
||||
ASSERT_EQ(0, to_delete.size());
|
||||
|
||||
to_flush.clear();
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
|
||||
ASSERT_EQ(1, to_flush.size());
|
||||
|
||||
// Flush second memtable
|
||||
@ -593,7 +596,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
||||
autovector<MemTable*> to_flush;
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
|
||||
ASSERT_EQ(0, to_flush.size());
|
||||
|
||||
// Request a flush even though there is nothing to flush
|
||||
@ -602,7 +606,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
||||
|
||||
// Attempt to 'flush' to clear request for flush
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
|
||||
ASSERT_EQ(0, to_flush.size());
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
|
||||
@ -626,7 +631,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
||||
|
||||
// Pick tables to flush
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
|
||||
ASSERT_EQ(2, to_flush.size());
|
||||
ASSERT_EQ(2, list.NumNotFlushed());
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
@ -647,7 +653,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
ASSERT_EQ(0, to_delete.size());
|
||||
|
||||
// Pick tables to flush
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
|
||||
ASSERT_EQ(3, to_flush.size());
|
||||
ASSERT_EQ(3, list.NumNotFlushed());
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
@ -655,7 +662,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
|
||||
// Pick tables to flush again
|
||||
autovector<MemTable*> to_flush2;
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush2);
|
||||
ASSERT_EQ(0, to_flush2.size());
|
||||
ASSERT_EQ(3, list.NumNotFlushed());
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
@ -673,7 +681,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
|
||||
|
||||
// Pick tables to flush again
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush2);
|
||||
ASSERT_EQ(1, to_flush2.size());
|
||||
ASSERT_EQ(4, list.NumNotFlushed());
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
@ -694,7 +703,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
ASSERT_EQ(0, to_delete.size());
|
||||
|
||||
// Pick tables to flush
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
|
||||
// Should pick 4 of 5 since 1 table has been picked in to_flush2
|
||||
ASSERT_EQ(4, to_flush.size());
|
||||
ASSERT_EQ(5, list.NumNotFlushed());
|
||||
@ -703,7 +713,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
|
||||
|
||||
// Pick tables to flush again
|
||||
autovector<MemTable*> to_flush3;
|
||||
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush3);
|
||||
list.PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush3);
|
||||
ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed
|
||||
ASSERT_EQ(5, list.NumNotFlushed());
|
||||
ASSERT_FALSE(list.IsFlushPending());
|
||||
@ -872,8 +883,9 @@ TEST_F(MemTableListTest, AtomicFlusTest) {
|
||||
auto* list = lists[i];
|
||||
ASSERT_FALSE(list->IsFlushPending());
|
||||
ASSERT_FALSE(list->imm_flush_needed.load(std::memory_order_acquire));
|
||||
list->PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */,
|
||||
&flush_candidates[i]);
|
||||
list->PickMemtablesToFlush(
|
||||
std::numeric_limits<uint64_t>::max() /* memtable_id */,
|
||||
&flush_candidates[i]);
|
||||
ASSERT_EQ(0, flush_candidates[i].size());
|
||||
}
|
||||
// Request flush even though there is nothing to flush
|
||||
|
@ -1144,7 +1144,7 @@ class VersionBuilder::Rep {
|
||||
|
||||
size_t table_cache_capacity = table_cache_->get_cache()->GetCapacity();
|
||||
bool always_load = (table_cache_capacity == TableCache::kInfiniteCapacity);
|
||||
size_t max_load = port::kMaxSizet;
|
||||
size_t max_load = std::numeric_limits<size_t>::max();
|
||||
|
||||
if (!always_load) {
|
||||
// If it is initial loading and not set to always loading all the
|
||||
|
@ -1517,7 +1517,7 @@ uint64_t Version::GetSstFilesSize() {
|
||||
}
|
||||
|
||||
void Version::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
|
||||
uint64_t oldest_time = port::kMaxUint64;
|
||||
uint64_t oldest_time = std::numeric_limits<uint64_t>::max();
|
||||
for (int level = 0; level < storage_info_.num_non_empty_levels_; level++) {
|
||||
for (FileMetaData* meta : storage_info_.LevelFiles(level)) {
|
||||
assert(meta->fd.table_reader != nullptr);
|
||||
|
@ -1213,7 +1213,7 @@ class VersionSet {
|
||||
// new_log_number_for_empty_cf.
|
||||
uint64_t PreComputeMinLogNumberWithUnflushedData(
|
||||
uint64_t new_log_number_for_empty_cf) const {
|
||||
uint64_t min_log_num = port::kMaxUint64;
|
||||
uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
|
||||
for (auto cfd : *column_family_set_) {
|
||||
// It's safe to ignore dropped column families here:
|
||||
// cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
|
||||
@ -1229,7 +1229,7 @@ class VersionSet {
|
||||
// file, except data from `cfd_to_skip`.
|
||||
uint64_t PreComputeMinLogNumberWithUnflushedData(
|
||||
const ColumnFamilyData* cfd_to_skip) const {
|
||||
uint64_t min_log_num = port::kMaxUint64;
|
||||
uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
|
||||
for (auto cfd : *column_family_set_) {
|
||||
if (cfd == cfd_to_skip) {
|
||||
continue;
|
||||
@ -1246,7 +1246,7 @@ class VersionSet {
|
||||
// file, except data from `cfds_to_skip`.
|
||||
uint64_t PreComputeMinLogNumberWithUnflushedData(
|
||||
const std::unordered_set<const ColumnFamilyData*>& cfds_to_skip) const {
|
||||
uint64_t min_log_num = port::kMaxUint64;
|
||||
uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
|
||||
for (auto cfd : *column_family_set_) {
|
||||
if (cfds_to_skip.count(cfd)) {
|
||||
continue;
|
||||
|
@ -44,7 +44,8 @@ class WalMetadata {
|
||||
private:
|
||||
// The size of WAL is unknown, used when the WAL is not synced yet or is
|
||||
// empty.
|
||||
constexpr static uint64_t kUnknownWalSize = port::kMaxUint64;
|
||||
constexpr static uint64_t kUnknownWalSize =
|
||||
std::numeric_limits<uint64_t>::max();
|
||||
|
||||
// Size of the most recently synced WAL in bytes.
|
||||
uint64_t synced_size_bytes_ = kUnknownWalSize;
|
||||
|
@ -745,10 +745,10 @@ Status CheckColumnFamilyTimestampSize(ColumnFamilyHandle* column_family,
|
||||
|
||||
Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id,
|
||||
const Slice& key, const Slice& value) {
|
||||
if (key.size() > size_t{port::kMaxUint32}) {
|
||||
if (key.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
|
||||
return Status::InvalidArgument("key is too large");
|
||||
}
|
||||
if (value.size() > size_t{port::kMaxUint32}) {
|
||||
if (value.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
|
||||
return Status::InvalidArgument("value is too large");
|
||||
}
|
||||
|
||||
@ -825,7 +825,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key,
|
||||
for (int i = 0; i < key.num_parts; ++i) {
|
||||
total_key_bytes += key.parts[i].size();
|
||||
}
|
||||
if (total_key_bytes >= size_t{port::kMaxUint32}) {
|
||||
if (total_key_bytes >= size_t{std::numeric_limits<uint32_t>::max()}) {
|
||||
return Status::InvalidArgument("key is too large");
|
||||
}
|
||||
|
||||
@ -833,7 +833,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key,
|
||||
for (int i = 0; i < value.num_parts; ++i) {
|
||||
total_value_bytes += value.parts[i].size();
|
||||
}
|
||||
if (total_value_bytes >= size_t{port::kMaxUint32}) {
|
||||
if (total_value_bytes >= size_t{std::numeric_limits<uint32_t>::max()}) {
|
||||
return Status::InvalidArgument("value is too large");
|
||||
}
|
||||
return Status::OK();
|
||||
@ -1292,10 +1292,10 @@ Status WriteBatch::DeleteRange(ColumnFamilyHandle* column_family,
|
||||
|
||||
Status WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id,
|
||||
const Slice& key, const Slice& value) {
|
||||
if (key.size() > size_t{port::kMaxUint32}) {
|
||||
if (key.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
|
||||
return Status::InvalidArgument("key is too large");
|
||||
}
|
||||
if (value.size() > size_t{port::kMaxUint32}) {
|
||||
if (value.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
|
||||
return Status::InvalidArgument("value is too large");
|
||||
}
|
||||
|
||||
|
@ -167,6 +167,8 @@ DECLARE_bool(mock_direct_io);
|
||||
DECLARE_bool(statistics);
|
||||
DECLARE_bool(sync);
|
||||
DECLARE_bool(use_fsync);
|
||||
DECLARE_uint64(bytes_per_sync);
|
||||
DECLARE_uint64(wal_bytes_per_sync);
|
||||
DECLARE_int32(kill_random_test);
|
||||
DECLARE_string(kill_exclude_prefixes);
|
||||
DECLARE_bool(disable_wal);
|
||||
|
@ -51,8 +51,7 @@ class DbStressCompactionFilter : public CompactionFilter {
|
||||
key_mutex->Unlock();
|
||||
|
||||
if (!key_exists) {
|
||||
return allow_overwrite ? Decision::kRemove
|
||||
: Decision::kRemoveWithSingleDelete;
|
||||
return allow_overwrite ? Decision::kRemove : Decision::kPurge;
|
||||
}
|
||||
return Decision::kKeep;
|
||||
}
|
||||
|
@ -522,6 +522,15 @@ DEFINE_bool(sync, false, "Sync all writes to disk");
|
||||
|
||||
DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync");
|
||||
|
||||
DEFINE_uint64(bytes_per_sync, ROCKSDB_NAMESPACE::Options().bytes_per_sync,
|
||||
"If nonzero, sync SST file data incrementally after every "
|
||||
"`bytes_per_sync` bytes are written");
|
||||
|
||||
DEFINE_uint64(wal_bytes_per_sync,
|
||||
ROCKSDB_NAMESPACE::Options().wal_bytes_per_sync,
|
||||
"If nonzero, sync WAL file data incrementally after every "
|
||||
"`bytes_per_sync` bytes are written");
|
||||
|
||||
DEFINE_int32(kill_random_test, 0,
|
||||
"If non-zero, kill at various points in source code with "
|
||||
"probability 1/this");
|
||||
|
@ -2030,11 +2030,11 @@ void StressTest::TestAcquireSnapshot(ThreadState* thread,
|
||||
if (FLAGS_long_running_snapshots) {
|
||||
// Hold 10% of snapshots for 10x more
|
||||
if (thread->rand.OneIn(10)) {
|
||||
assert(hold_for < port::kMaxInt64 / 10);
|
||||
assert(hold_for < std::numeric_limits<uint64_t>::max() / 10);
|
||||
hold_for *= 10;
|
||||
// Hold 1% of snapshots for 100x more
|
||||
if (thread->rand.OneIn(10)) {
|
||||
assert(hold_for < port::kMaxInt64 / 10);
|
||||
assert(hold_for < std::numeric_limits<uint64_t>::max() / 10);
|
||||
hold_for *= 10;
|
||||
}
|
||||
}
|
||||
@ -2066,8 +2066,9 @@ void StressTest::TestCompactRange(ThreadState* thread, int64_t rand_key,
|
||||
const Slice& start_key,
|
||||
ColumnFamilyHandle* column_family) {
|
||||
int64_t end_key_num;
|
||||
if (port::kMaxInt64 - rand_key < FLAGS_compact_range_width) {
|
||||
end_key_num = port::kMaxInt64;
|
||||
if (std::numeric_limits<int64_t>::max() - rand_key <
|
||||
FLAGS_compact_range_width) {
|
||||
end_key_num = std::numeric_limits<int64_t>::max();
|
||||
} else {
|
||||
end_key_num = FLAGS_compact_range_width + rand_key;
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ class FilePrefetchBuffer {
|
||||
readahead_size_(readahead_size),
|
||||
initial_auto_readahead_size_(readahead_size),
|
||||
max_readahead_size_(max_readahead_size),
|
||||
min_offset_read_(port::kMaxSizet),
|
||||
min_offset_read_(std::numeric_limits<size_t>::max()),
|
||||
enable_(enable),
|
||||
track_min_offset_(track_min_offset),
|
||||
implicit_auto_readahead_(implicit_auto_readahead),
|
||||
|
@ -39,11 +39,11 @@ class CompactionFilter : public Customizable {
|
||||
enum class Decision {
|
||||
kKeep,
|
||||
kRemove,
|
||||
kRemoveWithSingleDelete,
|
||||
kChangeValue,
|
||||
kRemoveAndSkipUntil,
|
||||
kChangeBlobIndex, // used internally by BlobDB.
|
||||
kIOError, // used internally by BlobDB.
|
||||
kPurge, // used for keys that can only be SingleDelete'ed
|
||||
kUndetermined,
|
||||
};
|
||||
|
||||
|
@ -288,6 +288,9 @@ class LDBCommand {
|
||||
bool IsValueHex(const std::map<std::string, std::string>& options,
|
||||
const std::vector<std::string>& flags);
|
||||
|
||||
bool IsTryLoadOptions(const std::map<std::string, std::string>& options,
|
||||
const std::vector<std::string>& flags);
|
||||
|
||||
/**
|
||||
* Converts val to a boolean.
|
||||
* val must be either true or false (case insensitive).
|
||||
|
@ -26,7 +26,8 @@ HistogramBucketMapper::HistogramBucketMapper() {
|
||||
// size of array buckets_ in HistogramImpl
|
||||
bucketValues_ = {1, 2};
|
||||
double bucket_val = static_cast<double>(bucketValues_.back());
|
||||
while ((bucket_val = 1.5 * bucket_val) <= static_cast<double>(port::kMaxUint64)) {
|
||||
while ((bucket_val = 1.5 * bucket_val) <=
|
||||
static_cast<double>(std::numeric_limits<uint64_t>::max())) {
|
||||
bucketValues_.push_back(static_cast<uint64_t>(bucket_val));
|
||||
// Extracts two most significant digits to make histogram buckets more
|
||||
// human-readable. E.g., 172 becomes 170.
|
||||
|
@ -98,13 +98,13 @@ std::pair<uint64_t, std::string> parseKey(const Slice& key,
|
||||
std::string::size_type pos = key_str.find("#");
|
||||
// TODO(Zhongyi): add counters to track parse failures?
|
||||
if (pos == std::string::npos) {
|
||||
result.first = port::kMaxUint64;
|
||||
result.first = std::numeric_limits<uint64_t>::max();
|
||||
result.second.clear();
|
||||
} else {
|
||||
uint64_t parsed_time = ParseUint64(key_str.substr(0, pos));
|
||||
// skip entries with timestamp smaller than start_time
|
||||
if (parsed_time < start_time) {
|
||||
result.first = port::kMaxUint64;
|
||||
result.first = std::numeric_limits<uint64_t>::max();
|
||||
result.second = "";
|
||||
} else {
|
||||
result.first = parsed_time;
|
||||
|
@ -886,7 +886,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, double op2) {
|
||||
if (op1 == 0 || op2 <= 0) {
|
||||
return 0;
|
||||
}
|
||||
if (port::kMaxUint64 / op1 < op2) {
|
||||
if (std::numeric_limits<uint64_t>::max() / op1 < op2) {
|
||||
return op1;
|
||||
}
|
||||
return static_cast<uint64_t>(op1 * op2);
|
||||
@ -915,8 +915,9 @@ size_t MaxFileSizeForL0MetaPin(const MutableCFOptions& cf_options) {
|
||||
// or a former larger `write_buffer_size` value to avoid surprising users with
|
||||
// pinned memory usage. We use a factor of 1.5 to account for overhead
|
||||
// introduced during flush in most cases.
|
||||
if (port::kMaxSizet / 3 < cf_options.write_buffer_size / 2) {
|
||||
return port::kMaxSizet;
|
||||
if (std::numeric_limits<size_t>::max() / 3 <
|
||||
cf_options.write_buffer_size / 2) {
|
||||
return std::numeric_limits<size_t>::max();
|
||||
}
|
||||
return cf_options.write_buffer_size / 2 * 3;
|
||||
}
|
||||
|
@ -4082,9 +4082,10 @@ TEST_F(OptionsParserTest, IntegerParsing) {
|
||||
ASSERT_EQ(ParseUint32("4294967295"), 4294967295U);
|
||||
ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U);
|
||||
ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807);
|
||||
ASSERT_EQ(ParseInt64("-9223372036854775808"), port::kMinInt64);
|
||||
ASSERT_EQ(ParseInt64("-9223372036854775808"),
|
||||
std::numeric_limits<int64_t>::min());
|
||||
ASSERT_EQ(ParseInt32("2147483647"), 2147483647);
|
||||
ASSERT_EQ(ParseInt32("-2147483648"), port::kMinInt32);
|
||||
ASSERT_EQ(ParseInt32("-2147483648"), std::numeric_limits<int32_t>::min());
|
||||
ASSERT_EQ(ParseInt("-32767"), -32767);
|
||||
ASSERT_EQ(ParseDouble("-1.234567"), -1.234567);
|
||||
}
|
||||
|
@ -95,16 +95,6 @@ namespace ROCKSDB_NAMESPACE {
|
||||
extern const bool kDefaultToAdaptiveMutex;
|
||||
|
||||
namespace port {
|
||||
|
||||
// For use at db/file_indexer.h kLevelMaxIndex
|
||||
const uint32_t kMaxUint32 = std::numeric_limits<uint32_t>::max();
|
||||
const int kMaxInt32 = std::numeric_limits<int32_t>::max();
|
||||
const int kMinInt32 = std::numeric_limits<int32_t>::min();
|
||||
const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
|
||||
const int64_t kMaxInt64 = std::numeric_limits<int64_t>::max();
|
||||
const int64_t kMinInt64 = std::numeric_limits<int64_t>::min();
|
||||
const size_t kMaxSizet = std::numeric_limits<size_t>::max();
|
||||
|
||||
constexpr bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
|
||||
#undef PLATFORM_IS_LITTLE_ENDIAN
|
||||
|
||||
|
@ -82,37 +82,11 @@ namespace port {
|
||||
#define snprintf _snprintf
|
||||
|
||||
#define ROCKSDB_NOEXCEPT
|
||||
// std::numeric_limits<size_t>::max() is not constexpr just yet
|
||||
// therefore, use the same limits
|
||||
|
||||
// For use at db/file_indexer.h kLevelMaxIndex
|
||||
const uint32_t kMaxUint32 = UINT32_MAX;
|
||||
const int kMaxInt32 = INT32_MAX;
|
||||
const int kMinInt32 = INT32_MIN;
|
||||
const int64_t kMaxInt64 = INT64_MAX;
|
||||
const int64_t kMinInt64 = INT64_MIN;
|
||||
const uint64_t kMaxUint64 = UINT64_MAX;
|
||||
|
||||
#ifdef _WIN64
|
||||
const size_t kMaxSizet = UINT64_MAX;
|
||||
#else
|
||||
const size_t kMaxSizet = UINT_MAX;
|
||||
#endif
|
||||
|
||||
#else // VS >= 2015 or MinGW
|
||||
|
||||
#define ROCKSDB_NOEXCEPT noexcept
|
||||
|
||||
// For use at db/file_indexer.h kLevelMaxIndex
|
||||
const uint32_t kMaxUint32 = std::numeric_limits<uint32_t>::max();
|
||||
const int kMaxInt32 = std::numeric_limits<int>::max();
|
||||
const int kMinInt32 = std::numeric_limits<int>::min();
|
||||
const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
|
||||
const int64_t kMaxInt64 = std::numeric_limits<int64_t>::max();
|
||||
const int64_t kMinInt64 = std::numeric_limits<int64_t>::min();
|
||||
|
||||
const size_t kMaxSizet = std::numeric_limits<size_t>::max();
|
||||
|
||||
#endif //_MSC_VER
|
||||
|
||||
// "Windows is designed to run on little-endian computer architectures."
|
||||
|
11
rocksdb.pc.in
Normal file
11
rocksdb.pc.in
Normal file
@ -0,0 +1,11 @@
|
||||
prefix="@CMAKE_INSTALL_PREFIX@"
|
||||
exec_prefix="${prefix}"
|
||||
libdir="${prefix}/lib"
|
||||
includedir="${prefix}/include"
|
||||
|
||||
Name: @PROJECT_NAME@
|
||||
Description: @CMAKE_PROJECT_DESCRIPTION@
|
||||
URL: @CMAKE_PROJECT_HOMEPAGE_URL@
|
||||
Version: @PROJECT_VERSION@
|
||||
Cflags: -I"${includedir}"
|
||||
Libs: -L"${libdir}" -lrocksdb
|
@ -721,7 +721,7 @@ void BlockIter<TValue>::FindKeyAfterBinarySeek(const Slice& target,
|
||||
} else {
|
||||
// We are in the last restart interval. The while-loop will terminate by
|
||||
// `Valid()` returning false upon advancing past the block's last key.
|
||||
max_offset = port::kMaxUint32;
|
||||
max_offset = std::numeric_limits<uint32_t>::max();
|
||||
}
|
||||
while (true) {
|
||||
NextImpl();
|
||||
|
@ -658,7 +658,7 @@ Status BlockBasedTableFactory::ValidateOptions(
|
||||
return Status::InvalidArgument(
|
||||
"Block alignment requested but block size is not a power of 2");
|
||||
}
|
||||
if (table_options_.block_size > port::kMaxUint32) {
|
||||
if (table_options_.block_size > std::numeric_limits<uint32_t>::max()) {
|
||||
return Status::InvalidArgument(
|
||||
"block size exceeds maximum number (4GiB) allowed");
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ class CuckooTableBuilder: public TableBuilder {
|
||||
// We assume number of items is <= 2^32.
|
||||
uint32_t make_space_for_key_call_id;
|
||||
};
|
||||
static const uint32_t kMaxVectorIdx = port::kMaxInt32;
|
||||
static const uint32_t kMaxVectorIdx = std::numeric_limits<int32_t>::max();
|
||||
|
||||
bool MakeSpaceForKey(const autovector<uint64_t>& hash_vals,
|
||||
const uint32_t call_id,
|
||||
|
@ -53,8 +53,8 @@ Slice MetaIndexBuilder::Finish() {
|
||||
// object, so there's no need for restart points. Thus we set the restart
|
||||
// interval to infinity to save space.
|
||||
PropertyBlockBuilder::PropertyBlockBuilder()
|
||||
: properties_block_(
|
||||
new BlockBuilder(port::kMaxInt32 /* restart interval */)) {}
|
||||
: properties_block_(new BlockBuilder(
|
||||
std::numeric_limits<int32_t>::max() /* restart interval */)) {}
|
||||
|
||||
void PropertyBlockBuilder::Add(const std::string& name,
|
||||
const std::string& val) {
|
||||
|
@ -17,7 +17,7 @@
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily =
|
||||
port::kMaxInt32;
|
||||
std::numeric_limits<int32_t>::max();
|
||||
|
||||
namespace {
|
||||
void AppendProperty(
|
||||
|
@ -412,7 +412,7 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const {
|
||||
}
|
||||
std::map<uint64_t, std::map<std::string, std::map<uint64_t, double>>>
|
||||
cs_name_timeline;
|
||||
uint64_t start_time = port::kMaxUint64;
|
||||
uint64_t start_time = std::numeric_limits<uint64_t>::max();
|
||||
uint64_t end_time = 0;
|
||||
const std::map<uint64_t, uint64_t>& trace_num_misses =
|
||||
adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit);
|
||||
@ -427,7 +427,8 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const {
|
||||
auto it = trace_num_accesses.find(time);
|
||||
assert(it != trace_num_accesses.end());
|
||||
uint64_t access = it->second;
|
||||
cs_name_timeline[port::kMaxUint64]["trace"][time] = percent(miss, access);
|
||||
cs_name_timeline[std::numeric_limits<uint64_t>::max()]["trace"][time] =
|
||||
percent(miss, access);
|
||||
}
|
||||
for (auto const& config_caches : cache_simulator_->sim_caches()) {
|
||||
const CacheConfiguration& config = config_caches.first;
|
||||
@ -492,7 +493,7 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const {
|
||||
}
|
||||
std::map<uint64_t, std::map<std::string, std::map<uint64_t, uint64_t>>>
|
||||
cs_name_timeline;
|
||||
uint64_t start_time = port::kMaxUint64;
|
||||
uint64_t start_time = std::numeric_limits<uint64_t>::max();
|
||||
uint64_t end_time = 0;
|
||||
const std::map<uint64_t, uint64_t>& trace_num_misses =
|
||||
adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit);
|
||||
@ -501,7 +502,8 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const {
|
||||
start_time = std::min(start_time, time);
|
||||
end_time = std::max(end_time, time);
|
||||
uint64_t miss = num_miss.second;
|
||||
cs_name_timeline[port::kMaxUint64]["trace"][time] = miss;
|
||||
cs_name_timeline[std::numeric_limits<uint64_t>::max()]["trace"][time] =
|
||||
miss;
|
||||
}
|
||||
for (auto const& config_caches : cache_simulator_->sim_caches()) {
|
||||
const CacheConfiguration& config = config_caches.first;
|
||||
@ -589,7 +591,7 @@ void BlockCacheTraceAnalyzer::WriteSkewness(
|
||||
for (auto const& percent : percent_buckets) {
|
||||
label_bucket_naccesses[label_str][percent] = 0;
|
||||
size_t end_index = 0;
|
||||
if (percent == port::kMaxUint64) {
|
||||
if (percent == std::numeric_limits<uint64_t>::max()) {
|
||||
end_index = label_naccesses.size();
|
||||
} else {
|
||||
end_index = percent * label_naccesses.size() / 100;
|
||||
@ -856,7 +858,7 @@ void BlockCacheTraceAnalyzer::WriteAccessTimeline(const std::string& label_str,
|
||||
uint64_t time_unit,
|
||||
bool user_access_only) const {
|
||||
std::set<std::string> labels = ParseLabelStr(label_str);
|
||||
uint64_t start_time = port::kMaxUint64;
|
||||
uint64_t start_time = std::numeric_limits<uint64_t>::max();
|
||||
uint64_t end_time = 0;
|
||||
std::map<std::string, std::map<uint64_t, uint64_t>> label_access_timeline;
|
||||
std::map<uint64_t, std::vector<std::string>> access_count_block_id_map;
|
||||
@ -1091,7 +1093,7 @@ void BlockCacheTraceAnalyzer::WriteReuseInterval(
|
||||
kMicrosInSecond) /
|
||||
block.num_accesses;
|
||||
} else {
|
||||
avg_reuse_interval = port::kMaxUint64 - 1;
|
||||
avg_reuse_interval = std::numeric_limits<uint64_t>::max() - 1;
|
||||
}
|
||||
if (labels.find(kGroupbyCaller) != labels.end()) {
|
||||
for (auto const& timeline : block.caller_num_accesses_timeline) {
|
||||
@ -1152,7 +1154,7 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime(
|
||||
lifetime =
|
||||
(block.last_access_time - block.first_access_time) / kMicrosInSecond;
|
||||
} else {
|
||||
lifetime = port::kMaxUint64 - 1;
|
||||
lifetime = std::numeric_limits<uint64_t>::max() - 1;
|
||||
}
|
||||
const std::string label = BuildLabel(
|
||||
labels, cf_name, fd, level, type,
|
||||
@ -2103,7 +2105,7 @@ std::vector<uint64_t> parse_buckets(const std::string& bucket_str) {
|
||||
getline(ss, bucket, ',');
|
||||
buckets.push_back(ParseUint64(bucket));
|
||||
}
|
||||
buckets.push_back(port::kMaxUint64);
|
||||
buckets.push_back(std::numeric_limits<uint64_t>::max());
|
||||
return buckets;
|
||||
}
|
||||
|
||||
|
@ -277,7 +277,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) {
|
||||
ASSERT_OK(env_->DeleteFile(mrc_path));
|
||||
|
||||
const std::vector<std::string> time_units{"1", "60", "3600"};
|
||||
expected_capacities.push_back(port::kMaxUint64);
|
||||
expected_capacities.push_back(std::numeric_limits<uint64_t>::max());
|
||||
for (auto const& expected_capacity : expected_capacities) {
|
||||
for (auto const& time_unit : time_units) {
|
||||
const std::string miss_ratio_timeline_path =
|
||||
@ -293,7 +293,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) {
|
||||
std::string substr;
|
||||
getline(ss, substr, ',');
|
||||
if (!read_header) {
|
||||
if (expected_capacity == port::kMaxUint64) {
|
||||
if (expected_capacity == std::numeric_limits<uint64_t>::max()) {
|
||||
ASSERT_EQ("trace", substr);
|
||||
} else {
|
||||
ASSERT_EQ("lru-1-0", substr);
|
||||
@ -321,7 +321,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) {
|
||||
std::string substr;
|
||||
getline(ss, substr, ',');
|
||||
if (num_misses == 0) {
|
||||
if (expected_capacity == port::kMaxUint64) {
|
||||
if (expected_capacity == std::numeric_limits<uint64_t>::max()) {
|
||||
ASSERT_EQ("trace", substr);
|
||||
} else {
|
||||
ASSERT_EQ("lru-1-0", substr);
|
||||
|
@ -2265,25 +2265,23 @@ class Stats {
|
||||
if (done_ < 1) done_ = 1;
|
||||
|
||||
std::string extra;
|
||||
double elapsed = (finish_ - start_) * 1e-6;
|
||||
if (bytes_ > 0) {
|
||||
// Rate is computed on actual elapsed time, not the sum of per-thread
|
||||
// elapsed times.
|
||||
double elapsed = (finish_ - start_) * 1e-6;
|
||||
char rate[100];
|
||||
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
||||
(bytes_ / 1048576.0) / elapsed);
|
||||
extra = rate;
|
||||
}
|
||||
AppendWithSpace(&extra, message_);
|
||||
double elapsed = (finish_ - start_) * 1e-6;
|
||||
double throughput = (double)done_/elapsed;
|
||||
|
||||
fprintf(stdout, "%-12s : %11.3f micros/op %ld ops/sec;%s%s\n",
|
||||
name.ToString().c_str(),
|
||||
seconds_ * 1e6 / done_,
|
||||
(long)throughput,
|
||||
(extra.empty() ? "" : " "),
|
||||
extra.c_str());
|
||||
fprintf(stdout,
|
||||
"%-12s : %11.3f micros/op %ld ops/sec %.3f seconds %" PRIu64
|
||||
" operations;%s%s\n",
|
||||
name.ToString().c_str(), seconds_ * 1e6 / done_, (long)throughput,
|
||||
elapsed, done_, (extra.empty() ? "" : " "), extra.c_str());
|
||||
if (FLAGS_histogram) {
|
||||
for (auto it = hist_.begin(); it != hist_.end(); ++it) {
|
||||
fprintf(stdout, "Microseconds per %s:\n%s\n",
|
||||
@ -8075,7 +8073,8 @@ class Benchmark {
|
||||
}
|
||||
|
||||
std::unique_ptr<StatsHistoryIterator> shi;
|
||||
Status s = db->GetStatsHistory(0, port::kMaxUint64, &shi);
|
||||
Status s =
|
||||
db->GetStatsHistory(0, std::numeric_limits<uint64_t>::max(), &shi);
|
||||
if (!s.ok()) {
|
||||
fprintf(stdout, "%s\n", s.ToString().c_str());
|
||||
return;
|
||||
|
@ -132,6 +132,8 @@ default_params = {
|
||||
# Sync mode might make test runs slower so running it in a smaller chance
|
||||
"sync" : lambda : random.choice(
|
||||
[1 if t == 0 else 0 for t in range(0, 20)]),
|
||||
"bytes_per_sync": lambda: random.choice([0, 262144]),
|
||||
"wal_bytes_per_sync": lambda: random.choice([0, 524288]),
|
||||
# Disable compaction_readahead_size because the test is not passing.
|
||||
#"compaction_readahead_size" : lambda : random.choice(
|
||||
# [0, 0, 1024 * 1024]),
|
||||
@ -153,7 +155,7 @@ default_params = {
|
||||
"open_metadata_write_fault_one_in": lambda: random.choice([0, 0, 8]),
|
||||
"open_write_fault_one_in": lambda: random.choice([0, 0, 16]),
|
||||
"open_read_fault_one_in": lambda: random.choice([0, 0, 32]),
|
||||
"sync_fault_injection": False,
|
||||
"sync_fault_injection": lambda: random.randint(0, 1),
|
||||
"get_property_one_in": 1000000,
|
||||
"paranoid_file_checks": lambda: random.choice([0, 1, 1, 1]),
|
||||
"max_write_buffer_size_to_maintain": lambda: random.choice(
|
||||
|
@ -408,7 +408,7 @@ LDBCommand::LDBCommand(const std::map<std::string, std::string>& options,
|
||||
is_value_hex_ = IsValueHex(options, flags);
|
||||
is_db_ttl_ = IsFlagPresent(flags, ARG_TTL);
|
||||
timestamp_ = IsFlagPresent(flags, ARG_TIMESTAMP);
|
||||
try_load_options_ = IsFlagPresent(flags, ARG_TRY_LOAD_OPTIONS);
|
||||
try_load_options_ = IsTryLoadOptions(options, flags);
|
||||
force_consistency_checks_ =
|
||||
!IsFlagPresent(flags, ARG_DISABLE_CONSISTENCY_CHECKS);
|
||||
enable_blob_files_ = IsFlagPresent(flags, ARG_ENABLE_BLOB_FILES);
|
||||
@ -1064,6 +1064,24 @@ bool LDBCommand::IsValueHex(const std::map<std::string, std::string>& options,
|
||||
ParseBooleanOption(options, ARG_VALUE_HEX, false));
|
||||
}
|
||||
|
||||
bool LDBCommand::IsTryLoadOptions(
|
||||
const std::map<std::string, std::string>& options,
|
||||
const std::vector<std::string>& flags) {
|
||||
if (IsFlagPresent(flags, ARG_TRY_LOAD_OPTIONS)) {
|
||||
return true;
|
||||
}
|
||||
// if `DB` is specified and not explicitly to create a new db, default
|
||||
// `try_load_options` to true. The user could still disable that by set
|
||||
// `try_load_options=false`.
|
||||
// Note: Opening as TTL DB doesn't support `try_load_options`, so it's default
|
||||
// to false. TODO: TTL_DB may need to fix that, otherwise it's unable to open
|
||||
// DB which has incompatible setting with default options.
|
||||
bool default_val = (options.find(ARG_DB) != options.end()) &&
|
||||
!IsFlagPresent(flags, ARG_CREATE_IF_MISSING) &&
|
||||
!IsFlagPresent(flags, ARG_TTL);
|
||||
return ParseBooleanOption(options, ARG_TRY_LOAD_OPTIONS, default_val);
|
||||
}
|
||||
|
||||
bool LDBCommand::ParseBooleanOption(
|
||||
const std::map<std::string, std::string>& options,
|
||||
const std::string& option, bool default_val) {
|
||||
|
@ -50,7 +50,10 @@ void LDBCommandRunner::PrintHelp(const LDBOptions& ldb_options,
|
||||
" with 'put','get','scan','dump','query','batchput'"
|
||||
" : DB supports ttl and value is internally timestamp-suffixed\n");
|
||||
ret.append(" --" + LDBCommand::ARG_TRY_LOAD_OPTIONS +
|
||||
" : Try to load option file from DB.\n");
|
||||
" : Try to load option file from DB. Default to true if " +
|
||||
LDBCommand::ARG_DB +
|
||||
" is specified and not creating a new DB and not open as TTL DB. "
|
||||
"Can be set to false explicitly.\n");
|
||||
ret.append(" --" + LDBCommand::ARG_DISABLE_CONSISTENCY_CHECKS +
|
||||
" : Set options.force_consistency_checks = false.\n");
|
||||
ret.append(" --" + LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS +
|
||||
|
@ -282,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
|
||||
} else if (ParseIntArg(argv[i], "--compression_max_dict_bytes=",
|
||||
"compression_max_dict_bytes must be numeric",
|
||||
&tmp_val)) {
|
||||
if (tmp_val < 0 || tmp_val > port::kMaxUint32) {
|
||||
if (tmp_val < 0 || tmp_val > std::numeric_limits<uint32_t>::max()) {
|
||||
fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n",
|
||||
argv[i]);
|
||||
print_help(/*to_stderr*/ true);
|
||||
@ -292,7 +292,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
|
||||
} else if (ParseIntArg(argv[i], "--compression_zstd_max_train_bytes=",
|
||||
"compression_zstd_max_train_bytes must be numeric",
|
||||
&tmp_val)) {
|
||||
if (tmp_val < 0 || tmp_val > port::kMaxUint32) {
|
||||
if (tmp_val < 0 || tmp_val > std::numeric_limits<uint32_t>::max()) {
|
||||
fprintf(stderr,
|
||||
"compression_zstd_max_train_bytes must be a uint32_t: '%s'\n",
|
||||
argv[i]);
|
||||
|
@ -190,7 +190,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, uint64_t op2) {
|
||||
if (op1 == 0 || op2 == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (port::kMaxUint64 / op1 < op2) {
|
||||
if (std::numeric_limits<uint64_t>::max() / op1 < op2) {
|
||||
return op1;
|
||||
}
|
||||
return (op1 * op2);
|
||||
|
@ -281,7 +281,7 @@ class BlockCacheTracer {
|
||||
const Slice& block_key, const Slice& cf_name,
|
||||
const Slice& referenced_key);
|
||||
|
||||
// GetId cycles from 1 to port::kMaxUint64.
|
||||
// GetId cycles from 1 to std::numeric_limits<uint64_t>::max().
|
||||
uint64_t NextGetId();
|
||||
|
||||
private:
|
||||
|
@ -101,7 +101,9 @@ class BinaryHeap {
|
||||
|
||||
size_t size() const { return data_.size(); }
|
||||
|
||||
void reset_root_cmp_cache() { root_cmp_cache_ = port::kMaxSizet; }
|
||||
void reset_root_cmp_cache() {
|
||||
root_cmp_cache_ = std::numeric_limits<size_t>::max();
|
||||
}
|
||||
|
||||
private:
|
||||
static inline size_t get_root() { return 0; }
|
||||
@ -126,7 +128,7 @@ class BinaryHeap {
|
||||
void downheap(size_t index) {
|
||||
T v = std::move(data_[index]);
|
||||
|
||||
size_t picked_child = port::kMaxSizet;
|
||||
size_t picked_child = std::numeric_limits<size_t>::max();
|
||||
while (1) {
|
||||
const size_t left_child = get_left(index);
|
||||
if (get_left(index) >= data_.size()) {
|
||||
@ -165,7 +167,7 @@ class BinaryHeap {
|
||||
Compare cmp_;
|
||||
autovector<T> data_;
|
||||
// Used to reduce number of cmp_ calls in downheap()
|
||||
size_t root_cmp_cache_ = port::kMaxSizet;
|
||||
size_t root_cmp_cache_ = std::numeric_limits<size_t>::max();
|
||||
};
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
@ -31,8 +31,8 @@ size_t RateLimiter::RequestToken(size_t bytes, size_t alignment,
|
||||
|
||||
if (alignment > 0) {
|
||||
// Here we may actually require more than burst and block
|
||||
// but we can not write less than one page at a time on direct I/O
|
||||
// thus we may want not to use ratelimiter
|
||||
// as we can not write/read less than one page at a time on direct I/O
|
||||
// thus we do not want to be strictly constrained by burst
|
||||
bytes = std::max(alignment, TruncateToPageBoundary(alignment, bytes));
|
||||
}
|
||||
Request(bytes, io_priority, stats, op_type);
|
||||
@ -347,10 +347,11 @@ void GenericRateLimiter::RefillBytesAndGrantRequests() {
|
||||
|
||||
int64_t GenericRateLimiter::CalculateRefillBytesPerPeriod(
|
||||
int64_t rate_bytes_per_sec) {
|
||||
if (port::kMaxInt64 / rate_bytes_per_sec < options_.refill_period_us) {
|
||||
if (std::numeric_limits<int64_t>::max() / rate_bytes_per_sec <
|
||||
options_.refill_period_us) {
|
||||
// Avoid unexpected result in the overflow case. The result now is still
|
||||
// inaccurate but is a number that is large enough.
|
||||
return port::kMaxInt64 / 1000000;
|
||||
return std::numeric_limits<int64_t>::max() / 1000000;
|
||||
} else {
|
||||
return rate_bytes_per_sec * options_.refill_period_us / 1000000;
|
||||
}
|
||||
@ -374,7 +375,7 @@ Status GenericRateLimiter::Tune() {
|
||||
std::chrono::microseconds(options_.refill_period_us);
|
||||
// We tune every kRefillsPerTune intervals, so the overflow and division-by-
|
||||
// zero conditions should never happen.
|
||||
assert(num_drains_ <= port::kMaxInt64 / 100);
|
||||
assert(num_drains_ <= std::numeric_limits<int64_t>::max() / 100);
|
||||
assert(elapsed_intervals > 0);
|
||||
int64_t drained_pct = num_drains_ * 100 / elapsed_intervals;
|
||||
|
||||
@ -385,14 +386,15 @@ Status GenericRateLimiter::Tune() {
|
||||
} else if (drained_pct < kLowWatermarkPct) {
|
||||
// sanitize to prevent overflow
|
||||
int64_t sanitized_prev_bytes_per_sec =
|
||||
std::min(prev_bytes_per_sec, port::kMaxInt64 / 100);
|
||||
std::min(prev_bytes_per_sec, std::numeric_limits<int64_t>::max() / 100);
|
||||
new_bytes_per_sec =
|
||||
std::max(options_.max_bytes_per_sec / kAllowedRangeFactor,
|
||||
sanitized_prev_bytes_per_sec * 100 / (100 + kAdjustFactorPct));
|
||||
} else if (drained_pct > kHighWatermarkPct) {
|
||||
// sanitize to prevent overflow
|
||||
int64_t sanitized_prev_bytes_per_sec = std::min(
|
||||
prev_bytes_per_sec, port::kMaxInt64 / (100 + kAdjustFactorPct));
|
||||
int64_t sanitized_prev_bytes_per_sec =
|
||||
std::min(prev_bytes_per_sec, std::numeric_limits<int64_t>::max() /
|
||||
(100 + kAdjustFactorPct));
|
||||
new_bytes_per_sec =
|
||||
std::min(options_.max_bytes_per_sec,
|
||||
sanitized_prev_bytes_per_sec * (100 + kAdjustFactorPct) / 100);
|
||||
@ -433,7 +435,8 @@ static int RegisterBuiltinRateLimiters(ObjectLibrary& library,
|
||||
GenericRateLimiter::kClassName(),
|
||||
[](const std::string& /*uri*/, std::unique_ptr<RateLimiter>* guard,
|
||||
std::string* /*errmsg*/) {
|
||||
guard->reset(new GenericRateLimiter(port::kMaxInt64));
|
||||
guard->reset(
|
||||
new GenericRateLimiter(std::numeric_limits<int64_t>::max()));
|
||||
return guard->get();
|
||||
});
|
||||
size_t num_types;
|
||||
|
@ -36,7 +36,7 @@ class RateLimiterTest : public testing::Test {
|
||||
};
|
||||
|
||||
TEST_F(RateLimiterTest, OverflowRate) {
|
||||
GenericRateLimiter limiter(port::kMaxInt64, 1000, 10,
|
||||
GenericRateLimiter limiter(std::numeric_limits<int64_t>::max(), 1000, 10,
|
||||
RateLimiter::Mode::kWritesOnly,
|
||||
SystemClock::Default(), false /* auto_tuned */);
|
||||
ASSERT_GT(limiter.GetSingleBurstBytes(), 1000000000ll);
|
||||
|
@ -315,7 +315,8 @@ uint32_t ParseUint32(const std::string& value) {
|
||||
|
||||
int32_t ParseInt32(const std::string& value) {
|
||||
int64_t num = ParseInt64(value);
|
||||
if (num <= port::kMaxInt32 && num >= port::kMinInt32) {
|
||||
if (num <= std::numeric_limits<int32_t>::max() &&
|
||||
num >= std::numeric_limits<int32_t>::min()) {
|
||||
return static_cast<int32_t>(num);
|
||||
} else {
|
||||
throw std::out_of_range(value);
|
||||
|
@ -1012,8 +1012,9 @@ IOStatus BackupEngineImpl::Initialize() {
|
||||
// we might need to clean up from previous crash or I/O errors
|
||||
might_need_garbage_collect_ = true;
|
||||
|
||||
if (options_.max_valid_backups_to_open != port::kMaxInt32) {
|
||||
options_.max_valid_backups_to_open = port::kMaxInt32;
|
||||
if (options_.max_valid_backups_to_open !=
|
||||
std::numeric_limits<int32_t>::max()) {
|
||||
options_.max_valid_backups_to_open = std::numeric_limits<int32_t>::max();
|
||||
ROCKS_LOG_WARN(
|
||||
options_.info_log,
|
||||
"`max_valid_backups_to_open` is not set to the default value. Ignoring "
|
||||
@ -1434,7 +1435,8 @@ IOStatus BackupEngineImpl::CreateNewBackupWithMetadata(
|
||||
contents.size(), db_options.statistics.get(), 0 /* size_limit */,
|
||||
false /* shared_checksum */, options.progress_callback, contents);
|
||||
} /* create_file_cb */,
|
||||
&sequence_number, options.flush_before_backup ? 0 : port::kMaxUint64,
|
||||
&sequence_number,
|
||||
options.flush_before_backup ? 0 : std::numeric_limits<uint64_t>::max(),
|
||||
compare_checksum));
|
||||
if (io_s.ok()) {
|
||||
new_backup->SetSequenceNumber(sequence_number);
|
||||
@ -2171,7 +2173,7 @@ IOStatus BackupEngineImpl::AddBackupFileWorkItem(
|
||||
return io_s;
|
||||
}
|
||||
}
|
||||
if (size_bytes == port::kMaxUint64) {
|
||||
if (size_bytes == std::numeric_limits<uint64_t>::max()) {
|
||||
return IOStatus::NotFound("File missing: " + src_path);
|
||||
}
|
||||
// dst_relative depends on the following conditions:
|
||||
|
@ -3756,7 +3756,8 @@ TEST_F(BackupEngineTest, WriteOnlyEngineNoSharedFileDeletion) {
|
||||
}
|
||||
CloseDBAndBackupEngine();
|
||||
|
||||
engine_options_->max_valid_backups_to_open = port::kMaxInt32;
|
||||
engine_options_->max_valid_backups_to_open =
|
||||
std::numeric_limits<int32_t>::max();
|
||||
AssertBackupConsistency(i + 1, 0, (i + 1) * kNumKeys);
|
||||
}
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "utilities/fault_injection_fs.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <utility>
|
||||
|
||||
@ -290,6 +291,33 @@ IOStatus TestFSWritableFile::Sync(const IOOptions& options,
|
||||
return io_s;
|
||||
}
|
||||
|
||||
IOStatus TestFSWritableFile::RangeSync(uint64_t offset, uint64_t nbytes,
|
||||
const IOOptions& options,
|
||||
IODebugContext* dbg) {
|
||||
if (!fs_->IsFilesystemActive()) {
|
||||
return fs_->GetError();
|
||||
}
|
||||
// Assumes caller passes consecutive byte ranges.
|
||||
uint64_t sync_limit = offset + nbytes;
|
||||
uint64_t buf_begin =
|
||||
state_.pos_at_last_sync_ < 0 ? 0 : state_.pos_at_last_sync_;
|
||||
|
||||
IOStatus io_s;
|
||||
if (sync_limit < buf_begin) {
|
||||
return io_s;
|
||||
}
|
||||
uint64_t num_to_sync = std::min(static_cast<uint64_t>(state_.buffer_.size()),
|
||||
sync_limit - buf_begin);
|
||||
Slice buf_to_sync(state_.buffer_.data(), num_to_sync);
|
||||
io_s = target_->Append(buf_to_sync, options, dbg);
|
||||
state_.buffer_ = state_.buffer_.substr(num_to_sync);
|
||||
// Ignore sync errors
|
||||
target_->RangeSync(offset, nbytes, options, dbg).PermitUncheckedError();
|
||||
state_.pos_at_last_sync_ = offset + num_to_sync;
|
||||
fs_->WritableFileSynced(state_);
|
||||
return io_s;
|
||||
}
|
||||
|
||||
TestFSRandomRWFile::TestFSRandomRWFile(const std::string& /*fname*/,
|
||||
std::unique_ptr<FSRandomRWFile>&& f,
|
||||
FaultInjectionTestFS* fs)
|
||||
|
@ -76,6 +76,9 @@ class TestFSWritableFile : public FSWritableFile {
|
||||
IODebugContext* dbg) override;
|
||||
virtual IOStatus Flush(const IOOptions&, IODebugContext*) override;
|
||||
virtual IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override;
|
||||
virtual IOStatus RangeSync(uint64_t /*offset*/, uint64_t /*nbytes*/,
|
||||
const IOOptions& options,
|
||||
IODebugContext* dbg) override;
|
||||
virtual bool IsSyncThreadSafe() const override { return true; }
|
||||
virtual IOStatus PositionedAppend(const Slice& data, uint64_t offset,
|
||||
const IOOptions& options,
|
||||
|
@ -95,7 +95,7 @@ struct WriteBatchIndexEntry {
|
||||
bool is_forward_direction, bool is_seek_to_first)
|
||||
// For SeekForPrev(), we need to make the dummy entry larger than any
|
||||
// entry who has the same search key. Otherwise, we'll miss those entries.
|
||||
: offset(is_forward_direction ? 0 : port::kMaxSizet),
|
||||
: offset(is_forward_direction ? 0 : std::numeric_limits<size_t>::max()),
|
||||
column_family(_column_family),
|
||||
key_offset(0),
|
||||
key_size(is_seek_to_first ? kFlagMinInCf : 0),
|
||||
@ -105,7 +105,7 @@ struct WriteBatchIndexEntry {
|
||||
|
||||
// If this flag appears in the key_size, it indicates a
|
||||
// key that is smaller than any other entry for the same column family.
|
||||
static const size_t kFlagMinInCf = port::kMaxSizet;
|
||||
static const size_t kFlagMinInCf = std::numeric_limits<size_t>::max();
|
||||
|
||||
bool is_min_in_cf() const {
|
||||
assert(key_size != kFlagMinInCf ||
|
||||
|
Loading…
Reference in New Issue
Block a user