Fix NewFileNumber()

Summary: I mistakenly changed the behavior to ++next_file_number_ instead of next_file_number_++, as it should have been: 344edbb044/db/version_set.h (L539)

Test Plan: none. not sure if this would break anything. It's just different behavior, so I'd rather not risk

Reviewers: ljin, rven, yhchiang, sdong

Reviewed By: sdong

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D28557
This commit is contained in:
Igor Canadi 2014-11-11 06:58:47 -08:00
parent 625e162c69
commit 113796c493
3 changed files with 5 additions and 19 deletions

View File

@ -3003,8 +3003,6 @@ Status DBImpl::SetNewMemtableAndNewLogFile(ColumnFamilyData* cfd,
if (!s.ok()) {
// how do we fail if we're not creating new log?
assert(creating_new_log);
// Avoid chewing through file number space in a tight loop.
versions_->ReuseLogFileNumber(new_log_number);
assert(!new_mem);
assert(!new_log);
return s;

View File

@ -535,16 +535,7 @@ class VersionSet {
uint64_t current_next_file_number() const { return next_file_number_.load(); }
// Allocate and return a new file number
uint64_t NewFileNumber() { return next_file_number_.fetch_add(1) + 1; }
// Arrange to reuse "file_number" unless a newer file number has
// already been allocated.
// REQUIRES: "file_number" was returned by a call to NewFileNumber().
void ReuseLogFileNumber(uint64_t file_number) {
auto expected = file_number + 1;
std::atomic_compare_exchange_strong(&next_file_number_, &expected,
file_number);
}
uint64_t NewFileNumber() { return next_file_number_.fetch_add(1); }
// Return the last sequence number.
uint64_t LastSequence() const {

View File

@ -625,14 +625,11 @@ struct DBOptions {
// Default: false
bool error_if_exists;
// If true, the implementation will do aggressive checking of the
// data it is processing and will stop early if it detects any
// errors. This may have unforeseen ramifications: for example, a
// corruption of one DB entry may cause a large number of entries to
// become unreadable or for the entire DB to become unopenable.
// If any of the writes to the database fails (Put, Delete, Merge, Write),
// the database will switch to read-only mode and fail all other
// If true, RocksDB will aggressively check consistency of the data.
// Also, if any of the writes to the database fails (Put, Delete, Merge,
// Write), the database will switch to read-only mode and fail all other
// Write operations.
// In most cases you want this to be set to true.
// Default: true
bool paranoid_checks;