Status check enforcement for timestamp_basic_test (#7454)
Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/7454 Reviewed By: riversand963 Differential Revision: D23981719 Pulled By: jay-zhuang fbshipit-source-id: 01073f73e54c17067b886c4a2f179b2804198399
This commit is contained in:
parent
8115eb520d
commit
1bdaef7a06
2
Makefile
2
Makefile
@ -587,6 +587,8 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
crc32c_test \
|
crc32c_test \
|
||||||
dbformat_test \
|
dbformat_test \
|
||||||
db_basic_test \
|
db_basic_test \
|
||||||
|
db_with_timestamp_basic_test \
|
||||||
|
db_with_timestamp_compaction_test \
|
||||||
db_options_test \
|
db_options_test \
|
||||||
options_file_test \
|
options_file_test \
|
||||||
defer_test \
|
defer_test \
|
||||||
|
@ -1162,6 +1162,7 @@ Status DBImpl::CompactFilesImpl(
|
|||||||
|
|
||||||
Status status = compaction_job.Install(*c->mutable_cf_options());
|
Status status = compaction_job.Install(*c->mutable_cf_options());
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
|
assert(compaction_job.io_status().ok());
|
||||||
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
||||||
&job_context->superversion_contexts[0],
|
&job_context->superversion_contexts[0],
|
||||||
*c->mutable_cf_options());
|
*c->mutable_cf_options());
|
||||||
|
@ -468,8 +468,8 @@ TEST_F(DBBasicTestWithTimestamp, ReseekToNextUserKey) {
|
|||||||
{
|
{
|
||||||
std::string ts_str = Timestamp(static_cast<uint64_t>(kNumKeys + 1), 0);
|
std::string ts_str = Timestamp(static_cast<uint64_t>(kNumKeys + 1), 0);
|
||||||
WriteBatch batch(0, 0, kTimestampSize);
|
WriteBatch batch(0, 0, kTimestampSize);
|
||||||
batch.Put("a", "new_value");
|
ASSERT_OK(batch.Put("a", "new_value"));
|
||||||
batch.Put("b", "new_value");
|
ASSERT_OK(batch.Put("b", "new_value"));
|
||||||
s = batch.AssignTimestamp(ts_str);
|
s = batch.AssignTimestamp(ts_str);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
s = db_->Write(write_opts, &batch);
|
s = db_->Write(write_opts, &batch);
|
||||||
@ -1485,9 +1485,9 @@ TEST_P(DBBasicTestWithTimestampCompressionSettings, PutAndGetWithCompaction) {
|
|||||||
// at higher levels.
|
// at higher levels.
|
||||||
CompactionOptions compact_opt;
|
CompactionOptions compact_opt;
|
||||||
compact_opt.compression = kNoCompression;
|
compact_opt.compression = kNoCompression;
|
||||||
db_->CompactFiles(compact_opt, handles_[cf],
|
ASSERT_OK(db_->CompactFiles(compact_opt, handles_[cf],
|
||||||
collector->GetFlushedFiles(),
|
collector->GetFlushedFiles(),
|
||||||
static_cast<int>(kNumTimestamps - i));
|
static_cast<int>(kNumTimestamps - i)));
|
||||||
collector->ClearFlushedFiles();
|
collector->ClearFlushedFiles();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1576,7 +1576,7 @@ TEST_F(DBBasicTestWithTimestamp, BatchWriteAndMultiGet) {
|
|||||||
batch.Put(handles_[cf], Key1(j),
|
batch.Put(handles_[cf], Key1(j),
|
||||||
"value_" + std::to_string(j) + "_" + std::to_string(i)));
|
"value_" + std::to_string(j) + "_" + std::to_string(i)));
|
||||||
}
|
}
|
||||||
batch.AssignTimestamp(write_ts);
|
ASSERT_OK(batch.AssignTimestamp(write_ts));
|
||||||
ASSERT_OK(db_->Write(wopts, &batch));
|
ASSERT_OK(db_->Write(wopts, &batch));
|
||||||
|
|
||||||
verify_records_func(i, handles_[cf]);
|
verify_records_func(i, handles_[cf]);
|
||||||
|
@ -345,7 +345,7 @@ Status ErrorHandler::SetBGError(const IOStatus& bg_io_err,
|
|||||||
Status s;
|
Status s;
|
||||||
DBRecoverContext context;
|
DBRecoverContext context;
|
||||||
if (bg_io_err.GetDataLoss()) {
|
if (bg_io_err.GetDataLoss()) {
|
||||||
// FIrst, data loss is treated as unrecoverable error. So it can directly
|
// First, data loss is treated as unrecoverable error. So it can directly
|
||||||
// overwrite any existing bg_error_.
|
// overwrite any existing bg_error_.
|
||||||
bool auto_recovery = false;
|
bool auto_recovery = false;
|
||||||
Status bg_err(new_bg_io_err, Status::Severity::kUnrecoverableError);
|
Status bg_err(new_bg_io_err, Status::Severity::kUnrecoverableError);
|
||||||
|
@ -727,7 +727,7 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
|
|||||||
if (r->props.num_entries > r->props.num_range_deletions) {
|
if (r->props.num_entries > r->props.num_range_deletions) {
|
||||||
assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0);
|
assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0);
|
||||||
}
|
}
|
||||||
#endif // NDEBUG
|
#endif // !NDEBUG
|
||||||
|
|
||||||
auto should_flush = r->flush_block_policy->Update(key, value);
|
auto should_flush = r->flush_block_policy->Update(key, value);
|
||||||
if (should_flush) {
|
if (should_flush) {
|
||||||
@ -1653,6 +1653,11 @@ Status BlockBasedTableBuilder::Finish() {
|
|||||||
r->pc_rep->write_queue.finish();
|
r->pc_rep->write_queue.finish();
|
||||||
r->pc_rep->write_thread->join();
|
r->pc_rep->write_thread->join();
|
||||||
r->pc_rep->finished = true;
|
r->pc_rep->finished = true;
|
||||||
|
#ifndef NDEBUG
|
||||||
|
for (const auto& br : r->pc_rep->block_rep_buf) {
|
||||||
|
assert(br.status.ok());
|
||||||
|
}
|
||||||
|
#endif // !NDEBUG
|
||||||
} else {
|
} else {
|
||||||
// To make sure properties block is able to keep the accurate size of index
|
// To make sure properties block is able to keep the accurate size of index
|
||||||
// block, we will finish writing all index entries first.
|
// block, we will finish writing all index entries first.
|
||||||
|
@ -1999,7 +1999,6 @@ bool BlockBasedTable::PrefixMayMatch(
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool may_match = true;
|
bool may_match = true;
|
||||||
Status s;
|
|
||||||
|
|
||||||
// First, try check with full filter
|
// First, try check with full filter
|
||||||
FilterBlockReader* const filter = rep_->filter.get();
|
FilterBlockReader* const filter = rep_->filter.get();
|
||||||
@ -2585,6 +2584,10 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
|
|||||||
read_options, results[idx_in_batch], &first_biter,
|
read_options, results[idx_in_batch], &first_biter,
|
||||||
statuses[idx_in_batch]);
|
statuses[idx_in_batch]);
|
||||||
reusing_block = false;
|
reusing_block = false;
|
||||||
|
} else {
|
||||||
|
// If handler is null and result is empty, then the status is never
|
||||||
|
// set, which should be the initial value: ok().
|
||||||
|
assert(statuses[idx_in_batch].ok());
|
||||||
}
|
}
|
||||||
biter = &first_biter;
|
biter = &first_biter;
|
||||||
idx_in_batch++;
|
idx_in_batch++;
|
||||||
|
@ -87,9 +87,9 @@ struct KeyContext {
|
|||||||
class MultiGetContext {
|
class MultiGetContext {
|
||||||
public:
|
public:
|
||||||
// Limit the number of keys in a batch to this number. Benchmarks show that
|
// Limit the number of keys in a batch to this number. Benchmarks show that
|
||||||
// there is negligible benefit for batches exceeding this. Keeping this < 64
|
// there is negligible benefit for batches exceeding this. Keeping this < 32
|
||||||
// simplifies iteration, as well as reduces the amount of stack allocations
|
// simplifies iteration, as well as reduces the amount of stack allocations
|
||||||
// htat need to be performed
|
// that need to be performed
|
||||||
static const int MAX_BATCH_SIZE = 32;
|
static const int MAX_BATCH_SIZE = 32;
|
||||||
|
|
||||||
MultiGetContext(autovector<KeyContext*, MAX_BATCH_SIZE>* sorted_keys,
|
MultiGetContext(autovector<KeyContext*, MAX_BATCH_SIZE>* sorted_keys,
|
||||||
|
Loading…
Reference in New Issue
Block a user