Add more tests to ASSERT_STATUS_CHECKED (3), API change (#7715)
Summary: Third batch of adding more tests to ASSERT_STATUS_CHECKED. * db_compaction_filter_test * db_compaction_test * db_dynamic_level_test * db_inplace_update_test * db_sst_test * db_tailing_iter_test * db_io_failure_test Also update GetApproximateSizes APIs to all return Status. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7715 Reviewed By: jay-zhuang Differential Revision: D25806896 Pulled By: pdillinger fbshipit-source-id: 6cb9d62ba5a756c645812754c596ad3995d7c262
This commit is contained in:
parent
5792b73fdc
commit
6e0f62f2b6
@ -7,6 +7,10 @@
|
||||
### Behavior Changes
|
||||
* Attempting to write a merge operand without explicitly configuring `merge_operator` now fails immediately, causing the DB to enter read-only mode. Previously, failure was deferred until the `merge_operator` was needed by a user read or a background operation.
|
||||
|
||||
### API Changes
|
||||
* `rocksdb_approximate_sizes` and `rocksdb_approximate_sizes_cf` in the C API now requires an error pointer (`char** errptr`) for receiving any error.
|
||||
* All overloads of DB::GetApproximateSizes now return Status, so that any failure to obtain the sizes is indicated to the caller.
|
||||
|
||||
### Bug Fixes
|
||||
* Truncated WALs ending in incomplete records can no longer produce gaps in the recovered data when `WALRecoveryMode::kPointInTimeRecovery` is used. Gaps are still possible when WALs are truncated exactly on record boundaries; for complete protection, users should enable `track_and_verify_wals_in_manifest`.
|
||||
* Fix a bug where compressed blocks read by MultiGet are not inserted into the compressed block cache when use_direct_reads = true.
|
||||
|
6
Makefile
6
Makefile
@ -612,7 +612,12 @@ ifdef ASSERT_STATUS_CHECKED
|
||||
db_blob_basic_test \
|
||||
db_blob_index_test \
|
||||
db_block_cache_test \
|
||||
db_compaction_test \
|
||||
db_compaction_filter_test \
|
||||
db_dynamic_level_test \
|
||||
db_flush_test \
|
||||
db_inplace_update_test \
|
||||
db_io_failure_test \
|
||||
db_iterator_test \
|
||||
db_logical_block_size_cache_test \
|
||||
db_memtable_test \
|
||||
@ -629,6 +634,7 @@ ifdef ASSERT_STATUS_CHECKED
|
||||
deletefile_test \
|
||||
external_sst_file_test \
|
||||
options_file_test \
|
||||
db_sst_test \
|
||||
db_statistics_test \
|
||||
db_table_properties_test \
|
||||
db_tailing_iter_test \
|
||||
|
33
db/c.cc
33
db/c.cc
@ -1388,34 +1388,39 @@ char* rocksdb_property_value_cf(
|
||||
}
|
||||
}
|
||||
|
||||
void rocksdb_approximate_sizes(
|
||||
rocksdb_t* db,
|
||||
int num_ranges,
|
||||
const char* const* range_start_key, const size_t* range_start_key_len,
|
||||
const char* const* range_limit_key, const size_t* range_limit_key_len,
|
||||
uint64_t* sizes) {
|
||||
void rocksdb_approximate_sizes(rocksdb_t* db, int num_ranges,
|
||||
const char* const* range_start_key,
|
||||
const size_t* range_start_key_len,
|
||||
const char* const* range_limit_key,
|
||||
const size_t* range_limit_key_len,
|
||||
uint64_t* sizes, char** errptr) {
|
||||
Range* ranges = new Range[num_ranges];
|
||||
for (int i = 0; i < num_ranges; i++) {
|
||||
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
||||
ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
|
||||
}
|
||||
db->rep->GetApproximateSizes(ranges, num_ranges, sizes);
|
||||
Status s = db->rep->GetApproximateSizes(ranges, num_ranges, sizes);
|
||||
if (!s.ok()) {
|
||||
SaveError(errptr, s);
|
||||
}
|
||||
delete[] ranges;
|
||||
}
|
||||
|
||||
void rocksdb_approximate_sizes_cf(
|
||||
rocksdb_t* db,
|
||||
rocksdb_column_family_handle_t* column_family,
|
||||
int num_ranges,
|
||||
const char* const* range_start_key, const size_t* range_start_key_len,
|
||||
const char* const* range_limit_key, const size_t* range_limit_key_len,
|
||||
uint64_t* sizes) {
|
||||
rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
|
||||
int num_ranges, const char* const* range_start_key,
|
||||
const size_t* range_start_key_len, const char* const* range_limit_key,
|
||||
const size_t* range_limit_key_len, uint64_t* sizes, char** errptr) {
|
||||
Range* ranges = new Range[num_ranges];
|
||||
for (int i = 0; i < num_ranges; i++) {
|
||||
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
||||
ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]);
|
||||
}
|
||||
db->rep->GetApproximateSizes(column_family->rep, ranges, num_ranges, sizes);
|
||||
Status s = db->rep->GetApproximateSizes(column_family->rep, ranges,
|
||||
num_ranges, sizes);
|
||||
if (!s.ok()) {
|
||||
SaveError(errptr, s);
|
||||
}
|
||||
delete[] ranges;
|
||||
}
|
||||
|
||||
|
@ -988,7 +988,9 @@ int main(int argc, char** argv) {
|
||||
&err);
|
||||
CheckNoError(err);
|
||||
}
|
||||
rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes);
|
||||
rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes,
|
||||
&err);
|
||||
CheckNoError(err);
|
||||
CheckCondition(sizes[0] > 0);
|
||||
CheckCondition(sizes[1] > 0);
|
||||
}
|
||||
|
@ -297,15 +297,14 @@ class CompactionJobStatsTest : public testing::Test,
|
||||
return result;
|
||||
}
|
||||
|
||||
uint64_t Size(const Slice& start, const Slice& limit, int cf = 0) {
|
||||
Status Size(uint64_t* size, const Slice& start, const Slice& limit,
|
||||
int cf = 0) {
|
||||
Range r(start, limit);
|
||||
uint64_t size;
|
||||
if (cf == 0) {
|
||||
db_->GetApproximateSizes(&r, 1, &size);
|
||||
return db_->GetApproximateSizes(&r, 1, size);
|
||||
} else {
|
||||
db_->GetApproximateSizes(handles_[1], &r, 1, &size);
|
||||
return db_->GetApproximateSizes(handles_[1], &r, 1, size);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
void Compact(int cf, const Slice& start, const Slice& limit,
|
||||
|
@ -42,7 +42,7 @@ class DBTestCompactionFilterWithCompactParam
|
||||
option_config_ == kUniversalSubcompactions) {
|
||||
assert(options.max_subcompactions > 1);
|
||||
}
|
||||
TryReopen(options);
|
||||
Reopen(options);
|
||||
}
|
||||
};
|
||||
|
||||
@ -276,7 +276,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
char key[100];
|
||||
snprintf(key, sizeof(key), "B%010d", i);
|
||||
Put(1, key, value);
|
||||
ASSERT_OK(Put(1, key, value));
|
||||
}
|
||||
ASSERT_OK(Flush(1));
|
||||
|
||||
@ -284,10 +284,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
// the compaction is each level invokes the filter for
|
||||
// all the keys in that level.
|
||||
cfilter_count = 0;
|
||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_EQ(cfilter_count, 100000);
|
||||
cfilter_count = 0;
|
||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_EQ(cfilter_count, 100000);
|
||||
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
||||
@ -321,6 +321,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
}
|
||||
iter->Next();
|
||||
}
|
||||
ASSERT_OK(iter->status());
|
||||
}
|
||||
ASSERT_EQ(total, 100000);
|
||||
ASSERT_EQ(count, 0);
|
||||
@ -337,10 +338,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
// means that all keys should pass at least once
|
||||
// via the compaction filter
|
||||
cfilter_count = 0;
|
||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_EQ(cfilter_count, 100000);
|
||||
cfilter_count = 0;
|
||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_EQ(cfilter_count, 100000);
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
||||
ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
|
||||
@ -369,10 +370,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
// verify that at the end of the compaction process,
|
||||
// nothing is left.
|
||||
cfilter_count = 0;
|
||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_EQ(cfilter_count, 100000);
|
||||
cfilter_count = 0;
|
||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_EQ(cfilter_count, 0);
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
||||
ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
|
||||
@ -387,6 +388,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
count++;
|
||||
iter->Next();
|
||||
}
|
||||
ASSERT_OK(iter->status());
|
||||
ASSERT_EQ(count, 0);
|
||||
}
|
||||
|
||||
@ -427,9 +429,9 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
|
||||
// put some data
|
||||
for (int table = 0; table < 4; ++table) {
|
||||
for (int i = 0; i < 10 + table; ++i) {
|
||||
Put(ToString(table * 100 + i), "val");
|
||||
ASSERT_OK(Put(ToString(table * 100 + i), "val"));
|
||||
}
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
|
||||
// this will produce empty file (delete compaction filter)
|
||||
@ -440,6 +442,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
|
||||
|
||||
Iterator* itr = db_->NewIterator(ReadOptions());
|
||||
itr->SeekToFirst();
|
||||
ASSERT_OK(itr->status());
|
||||
// empty db
|
||||
ASSERT_TRUE(!itr->Valid());
|
||||
|
||||
@ -463,25 +466,25 @@ TEST_P(DBTestCompactionFilterWithCompactParam,
|
||||
for (int i = 0; i < 100001; i++) {
|
||||
char key[100];
|
||||
snprintf(key, sizeof(key), "B%010d", i);
|
||||
Put(1, key, value);
|
||||
ASSERT_OK(Put(1, key, value));
|
||||
}
|
||||
|
||||
// push all files to lower levels
|
||||
ASSERT_OK(Flush(1));
|
||||
if (option_config_ != kUniversalCompactionMultiLevel &&
|
||||
option_config_ != kUniversalSubcompactions) {
|
||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
|
||||
} else {
|
||||
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||
nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1],
|
||||
nullptr, nullptr));
|
||||
}
|
||||
|
||||
// re-write all data again
|
||||
for (int i = 0; i < 100001; i++) {
|
||||
char key[100];
|
||||
snprintf(key, sizeof(key), "B%010d", i);
|
||||
Put(1, key, value);
|
||||
ASSERT_OK(Put(1, key, value));
|
||||
}
|
||||
|
||||
// push all files to lower levels. This should
|
||||
@ -489,11 +492,11 @@ TEST_P(DBTestCompactionFilterWithCompactParam,
|
||||
ASSERT_OK(Flush(1));
|
||||
if (option_config_ != kUniversalCompactionMultiLevel &&
|
||||
option_config_ != kUniversalSubcompactions) {
|
||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
|
||||
ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]));
|
||||
} else {
|
||||
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||
nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1],
|
||||
nullptr, nullptr));
|
||||
}
|
||||
|
||||
// verify that all keys now have the new value that
|
||||
@ -531,7 +534,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
|
||||
ASSERT_OK(Flush());
|
||||
std::string newvalue = Get("foo");
|
||||
ASSERT_EQ(newvalue, three);
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
newvalue = Get("foo");
|
||||
ASSERT_EQ(newvalue, three);
|
||||
|
||||
@ -539,12 +542,12 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
|
||||
// merge keys.
|
||||
ASSERT_OK(db_->Put(WriteOptions(), "bar", two));
|
||||
ASSERT_OK(Flush());
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
newvalue = Get("bar");
|
||||
ASSERT_EQ("NOT_FOUND", newvalue);
|
||||
ASSERT_OK(db_->Merge(WriteOptions(), "bar", two));
|
||||
ASSERT_OK(Flush());
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
newvalue = Get("bar");
|
||||
ASSERT_EQ(two, two);
|
||||
|
||||
@ -555,7 +558,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
|
||||
ASSERT_OK(Flush());
|
||||
newvalue = Get("foobar");
|
||||
ASSERT_EQ(newvalue, three);
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
newvalue = Get("foobar");
|
||||
ASSERT_EQ(newvalue, three);
|
||||
|
||||
@ -568,7 +571,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
|
||||
ASSERT_OK(Flush());
|
||||
newvalue = Get("barfoo");
|
||||
ASSERT_EQ(newvalue, four);
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
newvalue = Get("barfoo");
|
||||
ASSERT_EQ(newvalue, four);
|
||||
}
|
||||
@ -590,21 +593,21 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) {
|
||||
for (int i = 0; i < num_keys_per_file; i++) {
|
||||
char key[100];
|
||||
snprintf(key, sizeof(key), "B%08d%02d", i, j);
|
||||
Put(key, value);
|
||||
ASSERT_OK(Put(key, value));
|
||||
}
|
||||
dbfull()->TEST_FlushMemTable();
|
||||
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||
// Make sure next file is much smaller so automatic compaction will not
|
||||
// be triggered.
|
||||
num_keys_per_file /= 2;
|
||||
}
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
|
||||
// Force a manual compaction
|
||||
cfilter_count = 0;
|
||||
filter->expect_manual_compaction_.store(true);
|
||||
filter->expect_full_compaction_.store(true);
|
||||
filter->expect_cf_id_.store(0);
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
ASSERT_EQ(cfilter_count, 700);
|
||||
ASSERT_EQ(NumSortedRuns(0), 1);
|
||||
ASSERT_TRUE(filter->compaction_filter_created());
|
||||
@ -654,14 +657,14 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) {
|
||||
for (int i = 0; i < num_keys_per_file; i++) {
|
||||
char key[100];
|
||||
snprintf(key, sizeof(key), "B%08d%02d", i, j);
|
||||
Put(1, key, value);
|
||||
ASSERT_OK(Put(1, key, value));
|
||||
}
|
||||
Flush(1);
|
||||
ASSERT_OK(Flush(1));
|
||||
// Make sure next file is much smaller so automatic compaction will not
|
||||
// be triggered.
|
||||
num_keys_per_file /= 2;
|
||||
}
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
|
||||
ASSERT_TRUE(filter->compaction_filter_created());
|
||||
}
|
||||
@ -680,9 +683,9 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
||||
const Snapshot* snapshot = nullptr;
|
||||
for (int table = 0; table < 4; ++table) {
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
Put(ToString(table * 100 + i), "val");
|
||||
ASSERT_OK(Put(ToString(table * 100 + i), "val"));
|
||||
}
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
if (table == 0) {
|
||||
snapshot = db_->GetSnapshot();
|
||||
@ -702,6 +705,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
||||
read_options.snapshot = snapshot;
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
|
||||
iter->SeekToFirst();
|
||||
ASSERT_OK(iter->status());
|
||||
int count = 0;
|
||||
while (iter->Valid()) {
|
||||
count++;
|
||||
@ -710,6 +714,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
||||
ASSERT_EQ(count, 6);
|
||||
read_options.snapshot = nullptr;
|
||||
std::unique_ptr<Iterator> iter1(db_->NewIterator(read_options));
|
||||
ASSERT_OK(iter1->status());
|
||||
iter1->SeekToFirst();
|
||||
count = 0;
|
||||
while (iter1->Valid()) {
|
||||
@ -740,9 +745,9 @@ TEST_F(DBTestCompactionFilter, SkipUntil) {
|
||||
for (int i = table * 6; i < 39 + table * 11; ++i) {
|
||||
char key[100];
|
||||
snprintf(key, sizeof(key), "%010d", table * 100 + i);
|
||||
Put(key, std::to_string(table * 1000 + i));
|
||||
ASSERT_OK(Put(key, std::to_string(table * 1000 + i)));
|
||||
}
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
|
||||
cfilter_skips = 0;
|
||||
@ -781,10 +786,10 @@ TEST_F(DBTestCompactionFilter, SkipUntilWithBloomFilter) {
|
||||
options.create_if_missing = true;
|
||||
DestroyAndReopen(options);
|
||||
|
||||
Put("0000000010", "v10");
|
||||
Put("0000000020", "v20"); // skipped
|
||||
Put("0000000050", "v50");
|
||||
Flush();
|
||||
ASSERT_OK(Put("0000000010", "v10"));
|
||||
ASSERT_OK(Put("0000000020", "v20")); // skipped
|
||||
ASSERT_OK(Put("0000000050", "v50"));
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
cfilter_skips = 0;
|
||||
EXPECT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
@ -822,13 +827,13 @@ TEST_F(DBTestCompactionFilter, IgnoreSnapshotsFalse) {
|
||||
options.compaction_filter = new TestNotSupportedFilter();
|
||||
DestroyAndReopen(options);
|
||||
|
||||
Put("a", "v10");
|
||||
Put("z", "v20");
|
||||
Flush();
|
||||
ASSERT_OK(Put("a", "v10"));
|
||||
ASSERT_OK(Put("z", "v20"));
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
Put("a", "v10");
|
||||
Put("z", "v20");
|
||||
Flush();
|
||||
ASSERT_OK(Put("a", "v10"));
|
||||
ASSERT_OK(Put("z", "v20"));
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
// Comapction should fail because IgnoreSnapshots() = false
|
||||
EXPECT_TRUE(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -102,7 +102,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) {
|
||||
}
|
||||
|
||||
// Test compact range works
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
// All data should be in the last level.
|
||||
ColumnFamilyMetaData cf_meta;
|
||||
db_->GetColumnFamilyMetaData(&cf_meta);
|
||||
@ -166,8 +167,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
||||
ASSERT_OK(dbfull()->SetOptions({
|
||||
{"disable_auto_compactions", "false"},
|
||||
}));
|
||||
Flush();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
|
||||
ASSERT_EQ(4U, int_prop);
|
||||
|
||||
@ -184,8 +185,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
||||
ASSERT_OK(dbfull()->SetOptions({
|
||||
{"disable_auto_compactions", "false"},
|
||||
}));
|
||||
Flush();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
|
||||
ASSERT_EQ(3U, int_prop);
|
||||
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop));
|
||||
@ -205,8 +206,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
||||
ASSERT_OK(dbfull()->SetOptions({
|
||||
{"disable_auto_compactions", "false"},
|
||||
}));
|
||||
Flush();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
|
||||
ASSERT_EQ(3U, int_prop);
|
||||
|
||||
@ -234,8 +235,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
||||
}));
|
||||
|
||||
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0");
|
||||
Flush();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
|
||||
ASSERT_EQ(2U, int_prop);
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||
@ -264,7 +265,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
||||
}
|
||||
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:2");
|
||||
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
thread.join();
|
||||
|
||||
@ -302,7 +303,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
|
||||
DestroyAndReopen(options);
|
||||
|
||||
// Compact against empty DB
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
|
||||
uint64_t int_prop;
|
||||
std::string str_prop;
|
||||
@ -316,13 +317,13 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
|
||||
ASSERT_OK(
|
||||
Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80)));
|
||||
}
|
||||
Flush();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
if (NumTableFilesAtLevel(0) == 0) {
|
||||
// Make sure level 0 is not empty
|
||||
ASSERT_OK(
|
||||
Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))), rnd.RandomString(80)));
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
|
||||
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
|
||||
@ -343,7 +344,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
|
||||
});
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
ASSERT_EQ(output_levels.size(), 2);
|
||||
ASSERT_TRUE(output_levels.find(3) != output_levels.end());
|
||||
ASSERT_TRUE(output_levels.find(4) != output_levels.end());
|
||||
@ -389,8 +390,8 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) {
|
||||
PutFixed32(&value, static_cast<uint32_t>(i));
|
||||
ASSERT_OK(Put(Key(i), value));
|
||||
}
|
||||
Flush();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||
|
||||
ASSERT_EQ(non_trivial, 0);
|
||||
@ -449,7 +450,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
|
||||
ASSERT_OK(Delete(Key(i / 10)));
|
||||
}
|
||||
verify_func(total_keys, false);
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
|
||||
options.level_compaction_dynamic_level_bytes = true;
|
||||
options.disable_auto_compactions = true;
|
||||
@ -464,7 +465,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
|
||||
CompactRangeOptions compact_options;
|
||||
compact_options.change_level = true;
|
||||
compact_options.target_level = options.num_levels - 1;
|
||||
dbfull()->CompactRange(compact_options, nullptr, nullptr);
|
||||
ASSERT_OK(dbfull()->CompactRange(compact_options, nullptr, nullptr));
|
||||
compaction_finished.store(true);
|
||||
});
|
||||
do {
|
||||
@ -484,7 +485,7 @@ TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
|
||||
}
|
||||
|
||||
verify_func(total_keys2, false);
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
verify_func(total_keys2, false);
|
||||
|
||||
// Base level is not level 1
|
||||
|
@ -306,18 +306,21 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) {
|
||||
mutex_.AssertHeld();
|
||||
WaitForBackgroundWork();
|
||||
|
||||
Status bg_error = error_handler_.GetBGError();
|
||||
Status s;
|
||||
if (shutdown_initiated_) {
|
||||
// Returning shutdown status to SFM during auto recovery will cause it
|
||||
// to abort the recovery and allow the shutdown to progress
|
||||
s = Status::ShutdownInProgress();
|
||||
}
|
||||
if (s.ok() && bg_error.severity() > Status::Severity::kHardError) {
|
||||
ROCKS_LOG_INFO(
|
||||
immutable_db_options_.info_log,
|
||||
"DB resume requested but failed due to Fatal/Unrecoverable error");
|
||||
s = bg_error;
|
||||
|
||||
if (s.ok()) {
|
||||
Status bg_error = error_handler_.GetBGError();
|
||||
if (bg_error.severity() > Status::Severity::kHardError) {
|
||||
ROCKS_LOG_INFO(
|
||||
immutable_db_options_.info_log,
|
||||
"DB resume requested but failed due to Fatal/Unrecoverable error");
|
||||
s = bg_error;
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the IO Status stored in version set is set to OK.
|
||||
@ -392,6 +395,11 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) {
|
||||
FindObsoleteFiles(&job_context, true);
|
||||
if (s.ok()) {
|
||||
s = error_handler_.ClearBGError();
|
||||
} else {
|
||||
// NOTE: this is needed to pass ASSERT_STATUS_CHECKED
|
||||
// in the DBSSTTest.DBWithMaxSpaceAllowedRandomized test.
|
||||
// See https://github.com/facebook/rocksdb/pull/7715#issuecomment-754947952
|
||||
error_handler_.GetRecoveryError().PermitUncheckedError();
|
||||
}
|
||||
mutex_.Unlock();
|
||||
|
||||
@ -408,6 +416,12 @@ Status DBImpl::ResumeImpl(DBRecoverContext context) {
|
||||
if (file_deletion_disabled) {
|
||||
// Always return ok
|
||||
s = EnableFileDeletions(/*force=*/true);
|
||||
if (!s.ok()) {
|
||||
ROCKS_LOG_INFO(
|
||||
immutable_db_options_.info_log,
|
||||
"DB resume requested but could not enable file deletions [%s]",
|
||||
s.ToString().c_str());
|
||||
}
|
||||
}
|
||||
ROCKS_LOG_INFO(immutable_db_options_.info_log, "Successfully resumed DB");
|
||||
}
|
||||
@ -3573,7 +3587,7 @@ Status DBImpl::DeleteFile(std::string name) {
|
||||
Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
|
||||
const RangePtr* ranges, size_t n,
|
||||
bool include_end) {
|
||||
Status status;
|
||||
Status status = Status::OK();
|
||||
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
||||
ColumnFamilyData* cfd = cfh->cfd();
|
||||
VersionEdit edit;
|
||||
@ -3632,7 +3646,7 @@ Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
|
||||
}
|
||||
if (edit.GetDeletedFiles().empty()) {
|
||||
job_context.Clean();
|
||||
return Status::OK();
|
||||
return status;
|
||||
}
|
||||
input_version->Ref();
|
||||
status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
|
||||
|
@ -35,8 +35,10 @@ bool DBImpl::EnoughRoomForCompaction(
|
||||
// Pass the current bg_error_ to SFM so it can decide what checks to
|
||||
// perform. If this DB instance hasn't seen any error yet, the SFM can be
|
||||
// optimistic and not do disk space checks
|
||||
enough_room =
|
||||
sfm->EnoughRoomForCompaction(cfd, inputs, error_handler_.GetBGError());
|
||||
Status bg_error = error_handler_.GetBGError();
|
||||
enough_room = sfm->EnoughRoomForCompaction(cfd, inputs, bg_error);
|
||||
bg_error.PermitUncheckedError(); // bg_error is just a copy of the Status
|
||||
// from the error_handler_
|
||||
if (enough_room) {
|
||||
*sfm_reserved_compact_space = true;
|
||||
}
|
||||
|
@ -43,11 +43,15 @@ TEST_F(DBIOFailureTest, DropWrites) {
|
||||
if (level > 0 && level == dbfull()->NumberLevels() - 1) {
|
||||
break;
|
||||
}
|
||||
dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr,
|
||||
true /* disallow trivial move */);
|
||||
Status s =
|
||||
dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr,
|
||||
true /* disallow trivial move */);
|
||||
ASSERT_TRUE(s.ok() || s.IsCorruption());
|
||||
}
|
||||
} else {
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
Status s =
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_TRUE(s.ok() || s.IsCorruption());
|
||||
}
|
||||
}
|
||||
|
||||
@ -56,7 +60,8 @@ TEST_F(DBIOFailureTest, DropWrites) {
|
||||
ASSERT_EQ("5", property_value);
|
||||
|
||||
env_->drop_writes_.store(false, std::memory_order_release);
|
||||
ASSERT_LT(CountFiles(), num_files + 3);
|
||||
const size_t count = CountFiles();
|
||||
ASSERT_LT(count, num_files + 3);
|
||||
|
||||
// Check that compaction attempts slept after errors
|
||||
// TODO @krad: Figure out why ASSERT_EQ 5 keeps failing in certain compiler
|
||||
@ -82,7 +87,8 @@ TEST_F(DBIOFailureTest, DropWritesFlush) {
|
||||
ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
|
||||
ASSERT_EQ("0", property_value);
|
||||
|
||||
dbfull()->TEST_FlushMemTable(true);
|
||||
// ASSERT file is too short
|
||||
ASSERT_TRUE(dbfull()->TEST_FlushMemTable(true).IsCorruption());
|
||||
|
||||
ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
|
||||
ASSERT_EQ("1", property_value);
|
||||
@ -166,7 +172,7 @@ TEST_F(DBIOFailureTest, ManifestWriteError) {
|
||||
ASSERT_EQ("bar", Get("foo"));
|
||||
|
||||
// Memtable compaction (will succeed)
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_EQ("bar", Get("foo"));
|
||||
const int last = 2;
|
||||
MoveFilesToLevel(2);
|
||||
@ -174,7 +180,8 @@ TEST_F(DBIOFailureTest, ManifestWriteError) {
|
||||
|
||||
// Merging compaction (will fail)
|
||||
error_type->store(true, std::memory_order_release);
|
||||
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
|
||||
ASSERT_NOK(
|
||||
dbfull()->TEST_CompactRange(last, nullptr, nullptr)); // Should fail
|
||||
ASSERT_EQ("bar", Get("foo"));
|
||||
|
||||
error_type->store(false, std::memory_order_release);
|
||||
@ -192,7 +199,13 @@ TEST_F(DBIOFailureTest, ManifestWriteError) {
|
||||
|
||||
// Merging compaction (will fail)
|
||||
error_type->store(true, std::memory_order_release);
|
||||
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
|
||||
Status s =
|
||||
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
|
||||
if (iter == 0) {
|
||||
ASSERT_OK(s);
|
||||
} else {
|
||||
ASSERT_TRUE(s.IsIOError());
|
||||
}
|
||||
ASSERT_EQ("bar", Get("foo"));
|
||||
|
||||
// Recovery: should not lose data
|
||||
@ -220,18 +233,15 @@ TEST_F(DBIOFailureTest, PutFailsParanoid) {
|
||||
options.paranoid_checks = true;
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
Status s;
|
||||
|
||||
ASSERT_OK(Put(1, "foo", "bar"));
|
||||
ASSERT_OK(Put(1, "foo1", "bar1"));
|
||||
// simulate error
|
||||
env_->log_write_error_.store(true, std::memory_order_release);
|
||||
s = Put(1, "foo2", "bar2");
|
||||
ASSERT_TRUE(!s.ok());
|
||||
ASSERT_NOK(Put(1, "foo2", "bar2"));
|
||||
env_->log_write_error_.store(false, std::memory_order_release);
|
||||
s = Put(1, "foo3", "bar3");
|
||||
// the next put should fail, too
|
||||
ASSERT_TRUE(!s.ok());
|
||||
ASSERT_NOK(Put(1, "foo3", "bar3"));
|
||||
// but we're still able to read
|
||||
ASSERT_EQ("bar", Get(1, "foo"));
|
||||
|
||||
@ -244,12 +254,10 @@ TEST_F(DBIOFailureTest, PutFailsParanoid) {
|
||||
ASSERT_OK(Put(1, "foo1", "bar1"));
|
||||
// simulate error
|
||||
env_->log_write_error_.store(true, std::memory_order_release);
|
||||
s = Put(1, "foo2", "bar2");
|
||||
ASSERT_TRUE(!s.ok());
|
||||
ASSERT_NOK(Put(1, "foo2", "bar2"));
|
||||
env_->log_write_error_.store(false, std::memory_order_release);
|
||||
s = Put(1, "foo3", "bar3");
|
||||
// the next put should NOT fail
|
||||
ASSERT_TRUE(s.ok());
|
||||
ASSERT_OK(Put(1, "foo3", "bar3"));
|
||||
}
|
||||
#if !(defined NDEBUG) || !defined(OS_WIN)
|
||||
TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
|
||||
@ -269,14 +277,14 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
|
||||
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
Status s;
|
||||
|
||||
const char* io_error_msg = "range sync dummy error";
|
||||
std::atomic<int> range_sync_called(0);
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||
"SpecialEnv::SStableFile::RangeSync", [&](void* arg) {
|
||||
if (range_sync_called.fetch_add(1) == 0) {
|
||||
Status* st = static_cast<Status*>(arg);
|
||||
*st = Status::IOError("range sync dummy error");
|
||||
*st = Status::IOError(io_error_msg);
|
||||
}
|
||||
});
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||
@ -298,7 +306,9 @@ TEST_F(DBIOFailureTest, FlushSstRangeSyncError) {
|
||||
ASSERT_OK(Put(1, "foo3_2", rnd_str));
|
||||
ASSERT_OK(Put(1, "foo3_3", rnd_str));
|
||||
ASSERT_OK(Put(1, "foo4", "bar"));
|
||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||
Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||
ASSERT_TRUE(s.IsIOError());
|
||||
ASSERT_STREQ(s.getState(), io_error_msg);
|
||||
|
||||
// Following writes should fail as flush failed.
|
||||
ASSERT_NOK(Put(1, "foo2", "bar3"));
|
||||
@ -328,7 +338,6 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
Status s;
|
||||
|
||||
Random rnd(301);
|
||||
std::string rnd_str =
|
||||
@ -342,21 +351,22 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
|
||||
ASSERT_OK(Put(1, "foo1_1", rnd_str));
|
||||
ASSERT_OK(Put(1, "foo1_2", rnd_str));
|
||||
ASSERT_OK(Put(1, "foo1_3", rnd_str));
|
||||
Flush(1);
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(Put(1, "foo", "bar"));
|
||||
ASSERT_OK(Put(1, "foo3_1", rnd_str));
|
||||
ASSERT_OK(Put(1, "foo3_2", rnd_str));
|
||||
ASSERT_OK(Put(1, "foo3_3", rnd_str));
|
||||
ASSERT_OK(Put(1, "foo4", "bar"));
|
||||
Flush(1);
|
||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[1]));
|
||||
|
||||
const char* io_error_msg = "range sync dummy error";
|
||||
std::atomic<int> range_sync_called(0);
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||
"SpecialEnv::SStableFile::RangeSync", [&](void* arg) {
|
||||
if (range_sync_called.fetch_add(1) == 0) {
|
||||
Status* st = static_cast<Status*>(arg);
|
||||
*st = Status::IOError("range sync dummy error");
|
||||
*st = Status::IOError(io_error_msg);
|
||||
}
|
||||
});
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||
@ -365,7 +375,9 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) {
|
||||
{
|
||||
{"disable_auto_compactions", "false"},
|
||||
}));
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
Status s = dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_TRUE(s.IsIOError());
|
||||
ASSERT_STREQ(s.getState(), io_error_msg);
|
||||
|
||||
// Following writes should fail as flush failed.
|
||||
ASSERT_NOK(Put(1, "foo2", "bar3"));
|
||||
@ -389,13 +401,14 @@ TEST_F(DBIOFailureTest, FlushSstCloseError) {
|
||||
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
Status s;
|
||||
|
||||
const char* io_error_msg = "close dummy error";
|
||||
std::atomic<int> close_called(0);
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||
"SpecialEnv::SStableFile::Close", [&](void* arg) {
|
||||
if (close_called.fetch_add(1) == 0) {
|
||||
Status* st = static_cast<Status*>(arg);
|
||||
*st = Status::IOError("close dummy error");
|
||||
*st = Status::IOError(io_error_msg);
|
||||
}
|
||||
});
|
||||
|
||||
@ -404,7 +417,9 @@ TEST_F(DBIOFailureTest, FlushSstCloseError) {
|
||||
ASSERT_OK(Put(1, "foo", "bar"));
|
||||
ASSERT_OK(Put(1, "foo1", "bar1"));
|
||||
ASSERT_OK(Put(1, "foo", "bar2"));
|
||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||
Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||
ASSERT_TRUE(s.IsIOError());
|
||||
ASSERT_STREQ(s.getState(), io_error_msg);
|
||||
|
||||
// Following writes should fail as flush failed.
|
||||
ASSERT_NOK(Put(1, "foo2", "bar3"));
|
||||
@ -429,25 +444,25 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) {
|
||||
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
Status s;
|
||||
|
||||
ASSERT_OK(Put(1, "foo", "bar"));
|
||||
ASSERT_OK(Put(1, "foo2", "bar"));
|
||||
Flush(1);
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(Put(1, "foo", "bar2"));
|
||||
ASSERT_OK(Put(1, "foo2", "bar"));
|
||||
Flush(1);
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(Put(1, "foo", "bar3"));
|
||||
ASSERT_OK(Put(1, "foo2", "bar"));
|
||||
Flush(1);
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
|
||||
const char* io_error_msg = "close dummy error";
|
||||
std::atomic<int> close_called(0);
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||
"SpecialEnv::SStableFile::Close", [&](void* arg) {
|
||||
if (close_called.fetch_add(1) == 0) {
|
||||
Status* st = static_cast<Status*>(arg);
|
||||
*st = Status::IOError("close dummy error");
|
||||
*st = Status::IOError(io_error_msg);
|
||||
}
|
||||
});
|
||||
|
||||
@ -456,7 +471,9 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) {
|
||||
{
|
||||
{"disable_auto_compactions", "false"},
|
||||
}));
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
Status s = dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_TRUE(s.IsIOError());
|
||||
ASSERT_STREQ(s.getState(), io_error_msg);
|
||||
|
||||
// Following writes should fail as compaction failed.
|
||||
ASSERT_NOK(Put(1, "foo2", "bar3"));
|
||||
@ -480,13 +497,14 @@ TEST_F(DBIOFailureTest, FlushSstSyncError) {
|
||||
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
Status s;
|
||||
|
||||
const char* io_error_msg = "sync dummy error";
|
||||
std::atomic<int> sync_called(0);
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||
"SpecialEnv::SStableFile::Sync", [&](void* arg) {
|
||||
if (sync_called.fetch_add(1) == 0) {
|
||||
Status* st = static_cast<Status*>(arg);
|
||||
*st = Status::IOError("sync dummy error");
|
||||
*st = Status::IOError(io_error_msg);
|
||||
}
|
||||
});
|
||||
|
||||
@ -495,7 +513,9 @@ TEST_F(DBIOFailureTest, FlushSstSyncError) {
|
||||
ASSERT_OK(Put(1, "foo", "bar"));
|
||||
ASSERT_OK(Put(1, "foo1", "bar1"));
|
||||
ASSERT_OK(Put(1, "foo", "bar2"));
|
||||
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||
Status s = dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
|
||||
ASSERT_TRUE(s.IsIOError());
|
||||
ASSERT_STREQ(s.getState(), io_error_msg);
|
||||
|
||||
// Following writes should fail as flush failed.
|
||||
ASSERT_NOK(Put(1, "foo2", "bar3"));
|
||||
@ -521,25 +541,25 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) {
|
||||
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
Status s;
|
||||
|
||||
ASSERT_OK(Put(1, "foo", "bar"));
|
||||
ASSERT_OK(Put(1, "foo2", "bar"));
|
||||
Flush(1);
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(Put(1, "foo", "bar2"));
|
||||
ASSERT_OK(Put(1, "foo2", "bar"));
|
||||
Flush(1);
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(Put(1, "foo", "bar3"));
|
||||
ASSERT_OK(Put(1, "foo2", "bar"));
|
||||
Flush(1);
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_OK(Flush(1));
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
|
||||
const char* io_error_msg = "sync dummy error";
|
||||
std::atomic<int> sync_called(0);
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||
"SpecialEnv::SStableFile::Sync", [&](void* arg) {
|
||||
if (sync_called.fetch_add(1) == 0) {
|
||||
Status* st = static_cast<Status*>(arg);
|
||||
*st = Status::IOError("close dummy error");
|
||||
*st = Status::IOError(io_error_msg);
|
||||
}
|
||||
});
|
||||
|
||||
@ -548,7 +568,9 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) {
|
||||
{
|
||||
{"disable_auto_compactions", "false"},
|
||||
}));
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
Status s = dbfull()->TEST_WaitForCompact();
|
||||
ASSERT_TRUE(s.IsIOError());
|
||||
ASSERT_STREQ(s.getState(), io_error_msg);
|
||||
|
||||
// Following writes should fail as compaction failed.
|
||||
ASSERT_NOK(Put(1, "foo2", "bar3"));
|
||||
|
@ -98,7 +98,7 @@ TEST_F(DBSSTTest, SSTsWithLdbSuffixHandling) {
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
GenerateNewFile(&rnd, &key_id, false);
|
||||
}
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
Close();
|
||||
int const num_files = GetSstFileCount(dbname_);
|
||||
ASSERT_GT(num_files, 0);
|
||||
@ -393,7 +393,7 @@ TEST_F(DBSSTTest, RateLimitedDelete) {
|
||||
|
||||
WriteOptions wo;
|
||||
wo.disableWAL = true;
|
||||
ASSERT_OK(TryReopen(options));
|
||||
Reopen(options);
|
||||
// Create 4 files in L0
|
||||
for (char v = 'a'; v <= 'd'; v++) {
|
||||
ASSERT_OK(Put("Key2", DummyString(1024, v), wo));
|
||||
@ -540,7 +540,7 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) {
|
||||
auto sfm = static_cast<SstFileManagerImpl*>(options.sst_file_manager.get());
|
||||
sfm->delete_scheduler()->SetMaxTrashDBRatio(3.1);
|
||||
|
||||
ASSERT_OK(TryReopen(options));
|
||||
Reopen(options);
|
||||
|
||||
// Create 4 files in L0
|
||||
for (char v = 'a'; v <= 'd'; v++) {
|
||||
@ -567,11 +567,11 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) {
|
||||
if (!wal_dir_same_as_dbname_) {
|
||||
// Forcibly create some trash log files
|
||||
std::unique_ptr<WritableFile> result;
|
||||
env->NewWritableFile(options.wal_dir + "/1000.log.trash", &result,
|
||||
EnvOptions());
|
||||
ASSERT_OK(env->NewWritableFile(options.wal_dir + "/1000.log.trash", &result,
|
||||
EnvOptions()));
|
||||
result.reset();
|
||||
}
|
||||
env->GetChildren(options.wal_dir, &filenames);
|
||||
ASSERT_OK(env->GetChildren(options.wal_dir, &filenames));
|
||||
for (const std::string& fname : filenames) {
|
||||
if (fname.find(".log.trash") != std::string::npos) {
|
||||
trash_log_count++;
|
||||
@ -580,11 +580,11 @@ TEST_P(DBWALTestWithParam, WALTrashCleanupOnOpen) {
|
||||
ASSERT_GE(trash_log_count, 1);
|
||||
|
||||
env->set_fake_log_delete(false);
|
||||
ASSERT_OK(TryReopen(options));
|
||||
Reopen(options);
|
||||
|
||||
filenames.clear();
|
||||
trash_log_count = 0;
|
||||
env->GetChildren(options.wal_dir, &filenames);
|
||||
ASSERT_OK(env->GetChildren(options.wal_dir, &filenames));
|
||||
for (const std::string& fname : filenames) {
|
||||
if (fname.find(".log.trash") != std::string::npos) {
|
||||
trash_log_count++;
|
||||
@ -614,7 +614,7 @@ TEST_F(DBSSTTest, OpenDBWithExistingTrash) {
|
||||
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "003.sst.trash"));
|
||||
|
||||
// Reopen the DB and verify that it deletes existing trash files
|
||||
ASSERT_OK(TryReopen(options));
|
||||
Reopen(options);
|
||||
sfm->WaitForEmptyTrash();
|
||||
ASSERT_NOK(env_->FileExists(dbname_ + "/" + "001.sst.trash"));
|
||||
ASSERT_NOK(env_->FileExists(dbname_ + "/" + "002.sst.trash"));
|
||||
@ -872,10 +872,12 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
// OK, now trigger a manual compaction
|
||||
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_TRUE(dbfull()
|
||||
->CompactRange(CompactRangeOptions(), nullptr, nullptr)
|
||||
.IsCompactionTooLarge());
|
||||
|
||||
// Wait for manual compaction to get scheduled and finish
|
||||
dbfull()->TEST_WaitForCompact(true);
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
|
||||
|
||||
ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0);
|
||||
// Make sure the stat is bumped
|
||||
@ -885,10 +887,13 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
|
||||
|
||||
// Now make sure CompactFiles also gets cancelled
|
||||
auto l0_files = collector->GetFlushedFiles();
|
||||
dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0);
|
||||
ASSERT_TRUE(
|
||||
dbfull()
|
||||
->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0)
|
||||
.IsCompactionTooLarge());
|
||||
|
||||
// Wait for manual compaction to get scheduled and finish
|
||||
dbfull()->TEST_WaitForCompact(true);
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
|
||||
|
||||
ASSERT_EQ(dbfull()->immutable_db_options().statistics.get()->getTickerCount(
|
||||
COMPACTION_CANCELLED),
|
||||
@ -903,8 +908,9 @@ TEST_F(DBSSTTest, CancellingManualCompactionsWorks) {
|
||||
"CompactFilesImpl:End", [&](void* /*arg*/) { completed_compactions++; });
|
||||
|
||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
||||
dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), l0_files, 0);
|
||||
dbfull()->TEST_WaitForCompact(true);
|
||||
ASSERT_OK(dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(),
|
||||
l0_files, 0));
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
|
||||
|
||||
ASSERT_EQ(sfm->GetCompactionsReservedSize(), 0);
|
||||
ASSERT_GT(completed_compactions, 0);
|
||||
@ -1008,7 +1014,7 @@ TEST_F(DBSSTTest, OpenDBWithInfiniteMaxOpenFiles) {
|
||||
CompactRangeOptions compact_options;
|
||||
compact_options.change_level = true;
|
||||
compact_options.target_level = 2;
|
||||
db_->CompactRange(compact_options, nullptr, nullptr);
|
||||
ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
|
||||
|
||||
// Create 12 Files in L0
|
||||
for (int i = 0; i < 12; i++) {
|
||||
@ -1060,7 +1066,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
|
||||
std::string val = "val_file_" + ToString(i);
|
||||
ASSERT_OK(Put(Key(j), val));
|
||||
}
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
ASSERT_EQ("5", FilesPerLevel(0));
|
||||
|
||||
@ -1084,6 +1090,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
|
||||
|
||||
// hold current version
|
||||
std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
|
||||
ASSERT_OK(iter1->status());
|
||||
|
||||
// Compact 5 files into 1 file in L0
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
@ -1107,12 +1114,13 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
|
||||
|
||||
// hold current version
|
||||
std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
|
||||
ASSERT_OK(iter2->status());
|
||||
|
||||
// Delete all keys and compact, this will delete all live files
|
||||
for (int i = 0; i < 10; i++) {
|
||||
ASSERT_OK(Delete(Key(i)));
|
||||
}
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
ASSERT_EQ("", FilesPerLevel(0));
|
||||
|
||||
@ -1126,6 +1134,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
|
||||
// Total SST files = 6 (5 original files + compacted file)
|
||||
ASSERT_EQ(total_sst_files_size, 6 * single_file_size);
|
||||
|
||||
ASSERT_OK(iter1->status());
|
||||
iter1.reset();
|
||||
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
|
||||
&total_sst_files_size));
|
||||
@ -1133,6 +1142,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSize) {
|
||||
// Total SST files = 1 (compacted file)
|
||||
ASSERT_EQ(total_sst_files_size, 1 * single_file_size);
|
||||
|
||||
ASSERT_OK(iter2->status());
|
||||
iter2.reset();
|
||||
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
|
||||
&total_sst_files_size));
|
||||
@ -1151,7 +1161,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
|
||||
// Generate 5 files in L0
|
||||
for (int i = 0; i < 5; i++) {
|
||||
ASSERT_OK(Put(Key(i), "val"));
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
ASSERT_EQ("5", FilesPerLevel(0));
|
||||
|
||||
@ -1176,6 +1186,7 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
|
||||
|
||||
// hold current version
|
||||
std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
|
||||
ASSERT_OK(iter1->status());
|
||||
|
||||
// Compaction will do trivial move from L0 to L1
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
@ -1199,12 +1210,13 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
|
||||
|
||||
// hold current version
|
||||
std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
|
||||
ASSERT_OK(iter2->status());
|
||||
|
||||
// Delete all keys and compact, this will delete all live files
|
||||
for (int i = 0; i < 5; i++) {
|
||||
ASSERT_OK(Delete(Key(i)));
|
||||
}
|
||||
Flush();
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
ASSERT_EQ("", FilesPerLevel(0));
|
||||
|
||||
@ -1218,7 +1230,9 @@ TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) {
|
||||
// Total SST files = 5 (used in 2 version)
|
||||
ASSERT_EQ(total_sst_files_size, 5 * single_file_size);
|
||||
|
||||
ASSERT_OK(iter1->status());
|
||||
iter1.reset();
|
||||
ASSERT_OK(iter2->status());
|
||||
iter2.reset();
|
||||
|
||||
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size",
|
||||
|
@ -31,6 +31,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorSingle) {
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
|
||||
iter->SeekToFirst();
|
||||
ASSERT_TRUE(!iter->Valid());
|
||||
ASSERT_OK(iter->status());
|
||||
|
||||
// add a record and check that iter can see it
|
||||
ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor"));
|
||||
@ -48,6 +49,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorKeepAdding) {
|
||||
read_options.tailing = true;
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iter->status());
|
||||
std::string value(1024, 'a');
|
||||
|
||||
const int num_records = 10000;
|
||||
@ -70,7 +72,9 @@ TEST_F(DBTestTailingIterator, TailingIteratorSeekToNext) {
|
||||
read_options.tailing = true;
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iter->status());
|
||||
std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(itern->status());
|
||||
std::string value(1024, 'a');
|
||||
|
||||
const int num_records = 1000;
|
||||
@ -138,8 +142,11 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
|
||||
Slice keyu(bufe, 20);
|
||||
read_options.iterate_upper_bound = &keyu;
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iter->status());
|
||||
std::unique_ptr<Iterator> itern(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(itern->status());
|
||||
std::unique_ptr<Iterator> iterh(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iterh->status());
|
||||
std::string value(1024, 'a');
|
||||
bool file_iters_deleted = false;
|
||||
bool file_iters_renewed_null = false;
|
||||
@ -225,6 +232,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
|
||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||
read_options.read_tier = kBlockCacheTier;
|
||||
std::unique_ptr<Iterator> iteri(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iteri->status());
|
||||
char buf5[32];
|
||||
snprintf(buf5, sizeof(buf5), "00a0%016d", (num_records / 2) * 5 - 2);
|
||||
Slice target1(buf5, 20);
|
||||
@ -236,6 +244,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
|
||||
options.table_factory.reset(NewBlockBasedTableFactory());
|
||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||
iter.reset(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iter->status());
|
||||
for (int i = 2 * num_records; i > 0; --i) {
|
||||
char buf1[32];
|
||||
char buf2[32];
|
||||
@ -262,6 +271,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorDeletes) {
|
||||
read_options.tailing = true;
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iter->status());
|
||||
|
||||
// write a single record, read it using the iterator, then delete it
|
||||
ASSERT_OK(Put(1, "0test", "test"));
|
||||
@ -309,6 +319,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorPrefixSeek) {
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(iter->status());
|
||||
ASSERT_OK(Put(1, "0101", "test"));
|
||||
|
||||
ASSERT_OK(Flush(1));
|
||||
@ -339,6 +350,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorIncomplete) {
|
||||
ASSERT_OK(db_->Put(WriteOptions(), key, value));
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
|
||||
ASSERT_OK(iter->status());
|
||||
iter->SeekToFirst();
|
||||
// we either see the entry or it's not in cache
|
||||
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
||||
@ -369,6 +381,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorSeekToSame) {
|
||||
}
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
|
||||
ASSERT_OK(iter->status());
|
||||
// Seek to 00001. We expect to find 00002.
|
||||
std::string start_key = "00001";
|
||||
iter->Seek(start_key);
|
||||
@ -404,6 +417,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) {
|
||||
ASSERT_OK(Put(1, "21", "21"));
|
||||
|
||||
std::unique_ptr<Iterator> it(db_->NewIterator(read_options, handles_[1]));
|
||||
ASSERT_OK(it->status());
|
||||
it->Seek("12");
|
||||
ASSERT_TRUE(it->Valid());
|
||||
ASSERT_EQ("12", it->key().ToString());
|
||||
@ -479,6 +493,8 @@ TEST_F(DBTestTailingIterator, TailingIteratorGap) {
|
||||
it->Next();
|
||||
ASSERT_TRUE(it->Valid());
|
||||
ASSERT_EQ("40", it->key().ToString());
|
||||
|
||||
ASSERT_OK(it->status());
|
||||
}
|
||||
|
||||
TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) {
|
||||
@ -497,6 +513,7 @@ TEST_F(DBTestTailingIterator, SeekWithUpperBoundBug) {
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
|
||||
ASSERT_OK(iter->status());
|
||||
|
||||
iter->Seek("aa");
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
@ -519,6 +536,7 @@ TEST_F(DBTestTailingIterator, SeekToFirstWithUpperBoundBug) {
|
||||
ASSERT_OK(Flush());
|
||||
|
||||
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
|
||||
ASSERT_OK(iter->status());
|
||||
|
||||
iter->SeekToFirst();
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
|
117
db/db_test.cc
117
db/db_test.cc
@ -1337,17 +1337,19 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
||||
SizeApproximationOptions size_approx_options;
|
||||
size_approx_options.include_memtabtles = true;
|
||||
size_approx_options.include_files = true;
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_GT(size, 6000);
|
||||
ASSERT_LT(size, 204800);
|
||||
// Zero if not including mem table
|
||||
db_->GetApproximateSizes(&r, 1, &size);
|
||||
ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size));
|
||||
ASSERT_EQ(size, 0);
|
||||
|
||||
start = Key(500);
|
||||
end = Key(600);
|
||||
r = Range(start, end);
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_EQ(size, 0);
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
@ -1357,13 +1359,15 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
||||
start = Key(500);
|
||||
end = Key(600);
|
||||
r = Range(start, end);
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_EQ(size, 0);
|
||||
|
||||
start = Key(100);
|
||||
end = Key(1020);
|
||||
r = Range(start, end);
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_GT(size, 6000);
|
||||
|
||||
options.max_write_buffer_number = 8;
|
||||
@ -1389,29 +1393,32 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
||||
start = Key(100);
|
||||
end = Key(300);
|
||||
r = Range(start, end);
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_EQ(size, 0);
|
||||
|
||||
start = Key(1050);
|
||||
end = Key(1080);
|
||||
r = Range(start, end);
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_GT(size, 6000);
|
||||
|
||||
start = Key(2100);
|
||||
end = Key(2300);
|
||||
r = Range(start, end);
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_EQ(size, 0);
|
||||
|
||||
start = Key(1050);
|
||||
end = Key(1080);
|
||||
r = Range(start, end);
|
||||
uint64_t size_with_mt, size_without_mt;
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
|
||||
&size_with_mt);
|
||||
ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
|
||||
&size_with_mt));
|
||||
ASSERT_GT(size_with_mt, 6000);
|
||||
db_->GetApproximateSizes(&r, 1, &size_without_mt);
|
||||
ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size_without_mt));
|
||||
ASSERT_EQ(size_without_mt, 0);
|
||||
|
||||
Flush();
|
||||
@ -1423,15 +1430,16 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
||||
start = Key(1050);
|
||||
end = Key(1080);
|
||||
r = Range(start, end);
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
|
||||
&size_with_mt);
|
||||
db_->GetApproximateSizes(&r, 1, &size_without_mt);
|
||||
ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
|
||||
&size_with_mt));
|
||||
ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size_without_mt));
|
||||
ASSERT_GT(size_with_mt, size_without_mt);
|
||||
ASSERT_GT(size_without_mt, 6000);
|
||||
|
||||
// Check that include_memtabtles flag works as expected
|
||||
size_approx_options.include_memtabtles = false;
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
ASSERT_EQ(size, size_without_mt);
|
||||
|
||||
// Check that files_size_error_margin works as expected, when the heuristic
|
||||
@ -1440,10 +1448,12 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
||||
end = Key(1000 + N - 2);
|
||||
r = Range(start, end);
|
||||
size_approx_options.files_size_error_margin = -1.0; // disabled
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size));
|
||||
uint64_t size2;
|
||||
size_approx_options.files_size_error_margin = 0.5; // enabled, but not used
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
|
||||
ASSERT_OK(
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2));
|
||||
ASSERT_EQ(size, size2);
|
||||
}
|
||||
|
||||
@ -1494,14 +1504,16 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
|
||||
|
||||
// Get the precise size without any approximation heuristic
|
||||
uint64_t size;
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size);
|
||||
ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
|
||||
&size));
|
||||
ASSERT_NE(size, 0);
|
||||
|
||||
// Get the size with an approximation heuristic
|
||||
uint64_t size2;
|
||||
const double error_margin = 0.2;
|
||||
size_approx_options.files_size_error_margin = error_margin;
|
||||
db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1, &size2);
|
||||
ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf, &r, 1,
|
||||
&size2));
|
||||
ASSERT_LT(size2, size * (1 + error_margin));
|
||||
ASSERT_GT(size2, size * (1 - error_margin));
|
||||
}
|
||||
@ -1517,7 +1529,7 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) {
|
||||
const std::string end = Key(i + 11); // overlap by 1 key
|
||||
const Range r(start, end);
|
||||
uint64_t size;
|
||||
db_->GetApproximateSizes(&r, 1, &size);
|
||||
ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size));
|
||||
ASSERT_LE(size, 11 * 100);
|
||||
}
|
||||
}
|
||||
@ -1585,9 +1597,12 @@ TEST_F(DBTest, ApproximateSizes) {
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
|
||||
uint64_t size;
|
||||
ASSERT_OK(Size("", "xyz", 1, &size));
|
||||
ASSERT_TRUE(Between(size, 0, 0));
|
||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||
ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0));
|
||||
ASSERT_OK(Size("", "xyz", 1, &size));
|
||||
ASSERT_TRUE(Between(size, 0, 0));
|
||||
|
||||
// Write 8MB (80 values, each 100K)
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
||||
@ -1600,7 +1615,8 @@ TEST_F(DBTest, ApproximateSizes) {
|
||||
}
|
||||
|
||||
// 0 because GetApproximateSizes() does not account for memtable space
|
||||
ASSERT_TRUE(Between(Size("", Key(50), 1), 0, 0));
|
||||
ASSERT_OK(Size("", Key(50), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 0, 0));
|
||||
|
||||
// Check sizes across recovery by reopening a few times
|
||||
for (int run = 0; run < 3; run++) {
|
||||
@ -1608,14 +1624,17 @@ TEST_F(DBTest, ApproximateSizes) {
|
||||
|
||||
for (int compact_start = 0; compact_start < N; compact_start += 10) {
|
||||
for (int i = 0; i < N; i += 10) {
|
||||
ASSERT_TRUE(Between(Size("", Key(i), 1), S1 * i, S2 * i));
|
||||
ASSERT_TRUE(Between(Size("", Key(i) + ".suffix", 1), S1 * (i + 1),
|
||||
S2 * (i + 1)));
|
||||
ASSERT_TRUE(Between(Size(Key(i), Key(i + 10), 1), S1 * 10, S2 * 10));
|
||||
ASSERT_OK(Size("", Key(i), 1, &size));
|
||||
ASSERT_TRUE(Between(size, S1 * i, S2 * i));
|
||||
ASSERT_OK(Size("", Key(i) + ".suffix", 1, &size));
|
||||
ASSERT_TRUE(Between(size, S1 * (i + 1), S2 * (i + 1)));
|
||||
ASSERT_OK(Size(Key(i), Key(i + 10), 1, &size));
|
||||
ASSERT_TRUE(Between(size, S1 * 10, S2 * 10));
|
||||
}
|
||||
ASSERT_TRUE(Between(Size("", Key(50), 1), S1 * 50, S2 * 50));
|
||||
ASSERT_TRUE(
|
||||
Between(Size("", Key(50) + ".suffix", 1), S1 * 50, S2 * 50));
|
||||
ASSERT_OK(Size("", Key(50), 1, &size));
|
||||
ASSERT_TRUE(Between(size, S1 * 50, S2 * 50));
|
||||
ASSERT_OK(Size("", Key(50) + ".suffix", 1, &size));
|
||||
ASSERT_TRUE(Between(size, S1 * 50, S2 * 50));
|
||||
|
||||
std::string cstart_str = Key(compact_start);
|
||||
std::string cend_str = Key(compact_start + 9);
|
||||
@ -1650,21 +1669,32 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
|
||||
ASSERT_OK(Put(1, Key(7), rnd.RandomString(10000)));
|
||||
|
||||
// Check sizes across recovery by reopening a few times
|
||||
uint64_t size;
|
||||
for (int run = 0; run < 3; run++) {
|
||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||
|
||||
ASSERT_TRUE(Between(Size("", Key(0), 1), 0, 0));
|
||||
ASSERT_TRUE(Between(Size("", Key(1), 1), 10000, 11000));
|
||||
ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000));
|
||||
ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000));
|
||||
ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000));
|
||||
ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 232000));
|
||||
ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 242000));
|
||||
ASSERT_OK(Size("", Key(0), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 0, 0));
|
||||
ASSERT_OK(Size("", Key(1), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 10000, 11000));
|
||||
ASSERT_OK(Size("", Key(2), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 20000, 21000));
|
||||
ASSERT_OK(Size("", Key(3), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 120000, 121000));
|
||||
ASSERT_OK(Size("", Key(4), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 130000, 131000));
|
||||
ASSERT_OK(Size("", Key(5), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 230000, 232000));
|
||||
ASSERT_OK(Size("", Key(6), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 240000, 242000));
|
||||
// Ensure some overhead is accounted for, even without including all
|
||||
ASSERT_TRUE(Between(Size("", Key(7), 1), 540500, 545000));
|
||||
ASSERT_TRUE(Between(Size("", Key(8), 1), 550500, 555000));
|
||||
ASSERT_OK(Size("", Key(7), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 540500, 545000));
|
||||
ASSERT_OK(Size("", Key(8), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 550500, 555000));
|
||||
|
||||
ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110100, 111000));
|
||||
ASSERT_OK(Size(Key(3), Key(5), 1, &size));
|
||||
ASSERT_TRUE(Between(size, 110100, 111000));
|
||||
|
||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||
}
|
||||
@ -1748,6 +1778,7 @@ TEST_F(DBTest, Snapshot) {
|
||||
TEST_F(DBTest, HiddenValuesAreRemoved) {
|
||||
anon::OptionsOverride options_override;
|
||||
options_override.skip_policy = kSkipNoSnapshot;
|
||||
uint64_t size;
|
||||
do {
|
||||
Options options = CurrentOptions(options_override);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
@ -1765,7 +1796,8 @@ TEST_F(DBTest, HiddenValuesAreRemoved) {
|
||||
ASSERT_GT(NumTableFilesAtLevel(0, 1), 0);
|
||||
|
||||
ASSERT_EQ(big, Get(1, "foo", snapshot));
|
||||
ASSERT_TRUE(Between(Size("", "pastfoo", 1), 50000, 60000));
|
||||
ASSERT_OK(Size("", "pastfoo", 1, &size));
|
||||
ASSERT_TRUE(Between(size, 50000, 60000));
|
||||
db_->ReleaseSnapshot(snapshot);
|
||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]");
|
||||
Slice x("x");
|
||||
@ -1776,7 +1808,8 @@ TEST_F(DBTest, HiddenValuesAreRemoved) {
|
||||
dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]);
|
||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]");
|
||||
|
||||
ASSERT_TRUE(Between(Size("", "pastfoo", 1), 0, 1000));
|
||||
ASSERT_OK(Size("", "pastfoo", 1, &size));
|
||||
ASSERT_TRUE(Between(size, 0, 1000));
|
||||
// ApproximateOffsetOf() is not yet implemented in plain table format,
|
||||
// which is used by Size().
|
||||
} while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction |
|
||||
|
@ -1128,27 +1128,48 @@ std::string DBTestBase::FilesPerLevel(int cf) {
|
||||
#endif // !ROCKSDB_LITE
|
||||
|
||||
size_t DBTestBase::CountFiles() {
|
||||
size_t count = 0;
|
||||
std::vector<std::string> files;
|
||||
EXPECT_OK(env_->GetChildren(dbname_, &files));
|
||||
|
||||
std::vector<std::string> logfiles;
|
||||
if (dbname_ != last_options_.wal_dir) {
|
||||
Status s = env_->GetChildren(last_options_.wal_dir, &logfiles);
|
||||
EXPECT_TRUE(s.ok() || s.IsNotFound());
|
||||
if (env_->GetChildren(dbname_, &files).ok()) {
|
||||
count += files.size();
|
||||
}
|
||||
|
||||
return files.size() + logfiles.size();
|
||||
if (dbname_ != last_options_.wal_dir) {
|
||||
if (env_->GetChildren(last_options_.wal_dir, &files).ok()) {
|
||||
count += files.size();
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
};
|
||||
|
||||
Status DBTestBase::CountFiles(size_t* count) {
|
||||
std::vector<std::string> files;
|
||||
Status s = env_->GetChildren(dbname_, &files);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
size_t files_count = files.size();
|
||||
|
||||
if (dbname_ != last_options_.wal_dir) {
|
||||
s = env_->GetChildren(last_options_.wal_dir, &files);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
*count = files_count + files.size();
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) {
|
||||
Status DBTestBase::Size(const Slice& start, const Slice& limit, int cf,
|
||||
uint64_t* size) {
|
||||
Range r(start, limit);
|
||||
uint64_t size;
|
||||
if (cf == 0) {
|
||||
db_->GetApproximateSizes(&r, 1, &size);
|
||||
return db_->GetApproximateSizes(&r, 1, size);
|
||||
} else {
|
||||
db_->GetApproximateSizes(handles_[1], &r, 1, &size);
|
||||
return db_->GetApproximateSizes(handles_[1], &r, 1, size);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit,
|
||||
|
@ -1070,7 +1070,13 @@ class DBTestBase : public testing::Test {
|
||||
|
||||
size_t CountFiles();
|
||||
|
||||
uint64_t Size(const Slice& start, const Slice& limit, int cf = 0);
|
||||
Status CountFiles(size_t* count);
|
||||
|
||||
Status Size(const Slice& start, const Slice& limit, uint64_t* size) {
|
||||
return Size(start, limit, 0, size);
|
||||
}
|
||||
|
||||
Status Size(const Slice& start, const Slice& limit, int cf, uint64_t* size);
|
||||
|
||||
void Compact(int cf, const Slice& start, const Slice& limit,
|
||||
uint32_t target_path_id);
|
||||
|
@ -270,7 +270,7 @@ TEST_F(DBBasicTestWithTimestamp, GetApproximateSizes) {
|
||||
ASSERT_EQ(range_sizes[1], size);
|
||||
|
||||
// Zero if not including mem table
|
||||
db_->GetApproximateSizes(&r, 1, &size);
|
||||
ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size));
|
||||
ASSERT_EQ(size, 0);
|
||||
|
||||
start = Key(500);
|
||||
|
@ -213,17 +213,16 @@ void EventHelpers::NotifyOnErrorRecoveryCompleted(
|
||||
const std::vector<std::shared_ptr<EventListener>>& listeners,
|
||||
Status old_bg_error, InstrumentedMutex* db_mutex) {
|
||||
#ifndef ROCKSDB_LITE
|
||||
if (listeners.size() == 0U) {
|
||||
return;
|
||||
}
|
||||
db_mutex->AssertHeld();
|
||||
// release lock while notifying events
|
||||
db_mutex->Unlock();
|
||||
for (auto& listener : listeners) {
|
||||
listener->OnErrorRecoveryCompleted(old_bg_error);
|
||||
if (listeners.size() > 0) {
|
||||
db_mutex->AssertHeld();
|
||||
// release lock while notifying events
|
||||
db_mutex->Unlock();
|
||||
for (auto& listener : listeners) {
|
||||
listener->OnErrorRecoveryCompleted(old_bg_error);
|
||||
}
|
||||
db_mutex->Lock();
|
||||
}
|
||||
old_bg_error.PermitUncheckedError();
|
||||
db_mutex->Lock();
|
||||
#else
|
||||
(void)listeners;
|
||||
(void)old_bg_error;
|
||||
|
@ -98,11 +98,13 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path,
|
||||
|
||||
// Update the total trash size
|
||||
uint64_t trash_file_size = 0;
|
||||
Status ignored =
|
||||
IOStatus io_s =
|
||||
fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr);
|
||||
ignored.PermitUncheckedError(); //**TODO: What should we do if we failed to
|
||||
// get the file size?
|
||||
total_trash_size_.fetch_add(trash_file_size);
|
||||
if (io_s.ok()) {
|
||||
total_trash_size_.fetch_add(trash_file_size);
|
||||
}
|
||||
//**TODO: What should we do if we failed to
|
||||
// get the file size?
|
||||
|
||||
// Add file to delete queue
|
||||
{
|
||||
@ -199,9 +201,7 @@ Status DeleteScheduler::MarkAsTrash(const std::string& file_path,
|
||||
cnt++;
|
||||
}
|
||||
if (s.ok()) {
|
||||
//**TODO: What should we do if this returns an error?
|
||||
sst_file_manager_->OnMoveFile(file_path, *trash_file)
|
||||
.PermitUncheckedError();
|
||||
s = sst_file_manager_->OnMoveFile(file_path, *trash_file);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ bool SstFileManagerImpl::IsMaxAllowedSpaceReachedIncludingCompactions() {
|
||||
|
||||
bool SstFileManagerImpl::EnoughRoomForCompaction(
|
||||
ColumnFamilyData* cfd, const std::vector<CompactionInputFiles>& inputs,
|
||||
Status bg_error) {
|
||||
const Status& bg_error) {
|
||||
MutexLock l(&mu_);
|
||||
uint64_t size_added_by_compaction = 0;
|
||||
// First check if we even have the space to do the compaction
|
||||
@ -183,7 +183,7 @@ bool SstFileManagerImpl::EnoughRoomForCompaction(
|
||||
// seen a NoSpace() error. This is tin order to contain a single potentially
|
||||
// misbehaving DB instance and prevent it from slowing down compactions of
|
||||
// other DB instances
|
||||
if (bg_error == Status::NoSpace() && CheckFreeSpace()) {
|
||||
if (bg_error.IsNoSpace() && CheckFreeSpace()) {
|
||||
auto fn =
|
||||
TableFileName(cfd->ioptions()->cf_paths, inputs[0][0]->fd.GetNumber(),
|
||||
inputs[0][0]->fd.GetPathId());
|
||||
|
@ -22,7 +22,7 @@ namespace ROCKSDB_NAMESPACE {
|
||||
class Env;
|
||||
class Logger;
|
||||
|
||||
// SstFileManager is used to track SST files in the DB and control there
|
||||
// SstFileManager is used to track SST files in the DB and control their
|
||||
// deletion rate.
|
||||
// All SstFileManager public functions are thread-safe.
|
||||
class SstFileManagerImpl : public SstFileManager {
|
||||
@ -77,7 +77,7 @@ class SstFileManagerImpl : public SstFileManager {
|
||||
// the full compaction size).
|
||||
bool EnoughRoomForCompaction(ColumnFamilyData* cfd,
|
||||
const std::vector<CompactionInputFiles>& inputs,
|
||||
Status bg_error);
|
||||
const Status& bg_error);
|
||||
|
||||
// Bookkeeping so total_file_sizes_ goes back to normal after compaction
|
||||
// finishes
|
||||
|
@ -496,13 +496,13 @@ extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf(
|
||||
extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes(
|
||||
rocksdb_t* db, int num_ranges, const char* const* range_start_key,
|
||||
const size_t* range_start_key_len, const char* const* range_limit_key,
|
||||
const size_t* range_limit_key_len, uint64_t* sizes);
|
||||
const size_t* range_limit_key_len, uint64_t* sizes, char** errptr);
|
||||
|
||||
extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf(
|
||||
rocksdb_t* db, rocksdb_column_family_handle_t* column_family,
|
||||
int num_ranges, const char* const* range_start_key,
|
||||
const size_t* range_start_key_len, const char* const* range_limit_key,
|
||||
const size_t* range_limit_key_len, uint64_t* sizes);
|
||||
const size_t* range_limit_key_len, uint64_t* sizes, char** errptr);
|
||||
|
||||
extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db,
|
||||
const char* start_key,
|
||||
|
@ -1027,20 +1027,22 @@ class DB {
|
||||
// Simpler versions of the GetApproximateSizes() method above.
|
||||
// The include_flags argumenbt must of type DB::SizeApproximationFlags
|
||||
// and can not be NONE.
|
||||
virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
|
||||
const Range* ranges, int n, uint64_t* sizes,
|
||||
uint8_t include_flags = INCLUDE_FILES) {
|
||||
virtual Status GetApproximateSizes(ColumnFamilyHandle* column_family,
|
||||
const Range* ranges, int n,
|
||||
uint64_t* sizes,
|
||||
uint8_t include_flags = INCLUDE_FILES) {
|
||||
SizeApproximationOptions options;
|
||||
options.include_memtabtles =
|
||||
(include_flags & SizeApproximationFlags::INCLUDE_MEMTABLES) != 0;
|
||||
options.include_files =
|
||||
(include_flags & SizeApproximationFlags::INCLUDE_FILES) != 0;
|
||||
Status s = GetApproximateSizes(options, column_family, ranges, n, sizes);
|
||||
s.PermitUncheckedError();
|
||||
return GetApproximateSizes(options, column_family, ranges, n, sizes);
|
||||
}
|
||||
virtual void GetApproximateSizes(const Range* ranges, int n, uint64_t* sizes,
|
||||
uint8_t include_flags = INCLUDE_FILES) {
|
||||
GetApproximateSizes(DefaultColumnFamily(), ranges, n, sizes, include_flags);
|
||||
virtual Status GetApproximateSizes(const Range* ranges, int n,
|
||||
uint64_t* sizes,
|
||||
uint8_t include_flags = INCLUDE_FILES) {
|
||||
return GetApproximateSizes(DefaultColumnFamily(), ranges, n, sizes,
|
||||
include_flags);
|
||||
}
|
||||
|
||||
// The method is similar to GetApproximateSizes, except it
|
||||
|
@ -1452,20 +1452,23 @@ void BlockBasedTableBuilder::WriteIndexBlock(
|
||||
}
|
||||
}
|
||||
// If there are more index partitions, finish them and write them out
|
||||
Status s = index_builder_status;
|
||||
while (ok() && s.IsIncomplete()) {
|
||||
s = rep_->index_builder->Finish(&index_blocks, *index_block_handle);
|
||||
if (!s.ok() && !s.IsIncomplete()) {
|
||||
rep_->SetStatus(s);
|
||||
return;
|
||||
if (index_builder_status.IsIncomplete()) {
|
||||
Status s = Status::Incomplete();
|
||||
while (ok() && s.IsIncomplete()) {
|
||||
s = rep_->index_builder->Finish(&index_blocks, *index_block_handle);
|
||||
if (!s.ok() && !s.IsIncomplete()) {
|
||||
rep_->SetStatus(s);
|
||||
return;
|
||||
}
|
||||
if (rep_->table_options.enable_index_compression) {
|
||||
WriteBlock(index_blocks.index_block_contents, index_block_handle,
|
||||
false);
|
||||
} else {
|
||||
WriteRawBlock(index_blocks.index_block_contents, kNoCompression,
|
||||
index_block_handle);
|
||||
}
|
||||
// The last index_block_handle will be for the partition index block
|
||||
}
|
||||
if (rep_->table_options.enable_index_compression) {
|
||||
WriteBlock(index_blocks.index_block_contents, index_block_handle, false);
|
||||
} else {
|
||||
WriteRawBlock(index_blocks.index_block_contents, kNoCompression,
|
||||
index_block_handle);
|
||||
}
|
||||
// The last index_block_handle will be for the partition index block
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2493,14 +2493,12 @@ void ApproxSizeCommand::DoCommand() {
|
||||
Range ranges[1];
|
||||
ranges[0] = Range(start_key_, end_key_);
|
||||
uint64_t sizes[1];
|
||||
db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes);
|
||||
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
|
||||
/* Weird that GetApproximateSizes() returns void, although documentation
|
||||
* says that it returns a Status object.
|
||||
if (!st.ok()) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
||||
Status s = db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes);
|
||||
if (!s.ok()) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(s.ToString());
|
||||
} else {
|
||||
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
Loading…
x
Reference in New Issue
Block a user