rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary: gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes. In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases. In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed: ```lang=bash % USE_CLANG=1 make all -j55 -k 2> build.log % perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \ build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number' % make format ``` After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest. This diff is independent and contains manual changes only in `util/testharness.h`. Test Plan: Make sure all tests are passing. ```lang=bash % USE_CLANG=1 make check ``` Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering Reviewed By: meyering Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D33333
This commit is contained in:
parent
d4d42c02ea
commit
9fd6edf81c
@ -115,7 +115,7 @@ class ColumnFamilyTest {
|
||||
|
||||
int GetProperty(int cf, std::string property) {
|
||||
std::string value;
|
||||
ASSERT_TRUE(dbfull()->GetProperty(handles_[cf], property, &value));
|
||||
EXPECT_TRUE(dbfull()->GetProperty(handles_[cf], property, &value));
|
||||
return std::stoi(value);
|
||||
}
|
||||
|
||||
@ -274,7 +274,7 @@ class ColumnFamilyTest {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT_OK(s);
|
||||
EXPECT_OK(s);
|
||||
for (const auto& wal : wal_files) {
|
||||
if (wal->Type() == kAliveLogFile) {
|
||||
++ret;
|
||||
|
@ -33,7 +33,7 @@ class CompactionJobTest {
|
||||
&write_controller_)),
|
||||
shutting_down_(false),
|
||||
mock_table_factory_(new mock::MockTableFactory()) {
|
||||
ASSERT_OK(env_->CreateDirIfMissing(dbname_));
|
||||
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
||||
db_options_.db_paths.emplace_back(dbname_,
|
||||
std::numeric_limits<uint64_t>::max());
|
||||
NewDB();
|
||||
@ -41,8 +41,7 @@ class CompactionJobTest {
|
||||
cf_options_.table_factory = mock_table_factory_;
|
||||
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
|
||||
|
||||
|
||||
ASSERT_OK(versions_->Recover(column_families, false));
|
||||
EXPECT_OK(versions_->Recover(column_families, false));
|
||||
}
|
||||
|
||||
std::string GenerateFileName(uint64_t file_number) {
|
||||
@ -82,7 +81,7 @@ class CompactionJobTest {
|
||||
}
|
||||
|
||||
uint64_t file_number = versions_->NewFileNumber();
|
||||
ASSERT_OK(mock_table_factory_->CreateMockTable(
|
||||
EXPECT_OK(mock_table_factory_->CreateMockTable(
|
||||
env_, GenerateFileName(file_number), std::move(contents)));
|
||||
|
||||
VersionEdit edit;
|
||||
|
@ -260,12 +260,12 @@ class ComparatorDBTest {
|
||||
ComparatorDBTest() : env_(Env::Default()), db_(nullptr) {
|
||||
comparator = BytewiseComparator();
|
||||
dbname_ = test::TmpDir() + "/comparator_db_test";
|
||||
ASSERT_OK(DestroyDB(dbname_, last_options_));
|
||||
EXPECT_OK(DestroyDB(dbname_, last_options_));
|
||||
}
|
||||
|
||||
~ComparatorDBTest() {
|
||||
delete db_;
|
||||
ASSERT_OK(DestroyDB(dbname_, last_options_));
|
||||
EXPECT_OK(DestroyDB(dbname_, last_options_));
|
||||
comparator = BytewiseComparator();
|
||||
}
|
||||
|
||||
|
@ -23,14 +23,14 @@ class CuckooTableDBTest {
|
||||
public:
|
||||
CuckooTableDBTest() : env_(Env::Default()) {
|
||||
dbname_ = test::TmpDir() + "/cuckoo_table_db_test";
|
||||
ASSERT_OK(DestroyDB(dbname_, Options()));
|
||||
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||
db_ = nullptr;
|
||||
Reopen();
|
||||
}
|
||||
|
||||
~CuckooTableDBTest() {
|
||||
delete db_;
|
||||
ASSERT_OK(DestroyDB(dbname_, Options()));
|
||||
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||
}
|
||||
|
||||
Options CurrentOptions() {
|
||||
@ -83,9 +83,8 @@ class CuckooTableDBTest {
|
||||
|
||||
int NumTableFilesAtLevel(int level) {
|
||||
std::string property;
|
||||
ASSERT_TRUE(
|
||||
db_->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
|
||||
&property));
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
|
||||
|
@ -464,7 +464,7 @@ class DBTest {
|
||||
env_->SetBackgroundThreads(1, Env::HIGH);
|
||||
dbname_ = test::TmpDir(env_) + "/db_test";
|
||||
auto options = CurrentOptions();
|
||||
ASSERT_OK(DestroyDB(dbname_, options));
|
||||
EXPECT_OK(DestroyDB(dbname_, options));
|
||||
db_ = nullptr;
|
||||
Reopen(options);
|
||||
}
|
||||
@ -476,7 +476,7 @@ class DBTest {
|
||||
options.db_paths.emplace_back(dbname_ + "_2", 0);
|
||||
options.db_paths.emplace_back(dbname_ + "_3", 0);
|
||||
options.db_paths.emplace_back(dbname_ + "_4", 0);
|
||||
ASSERT_OK(DestroyDB(dbname_, options));
|
||||
EXPECT_OK(DestroyDB(dbname_, options));
|
||||
delete env_;
|
||||
}
|
||||
|
||||
@ -747,7 +747,7 @@ class DBTest {
|
||||
const std::vector<std::string>& cfs,
|
||||
const std::vector<Options>& options) {
|
||||
Close();
|
||||
ASSERT_EQ(cfs.size(), options.size());
|
||||
EXPECT_EQ(cfs.size(), options.size());
|
||||
std::vector<ColumnFamilyDescriptor> column_families;
|
||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
|
||||
@ -861,13 +861,13 @@ class DBTest {
|
||||
|
||||
uint64_t GetNumSnapshots() {
|
||||
uint64_t int_num;
|
||||
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.num-snapshots", &int_num));
|
||||
EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.num-snapshots", &int_num));
|
||||
return int_num;
|
||||
}
|
||||
|
||||
uint64_t GetTimeOldestSnapshots() {
|
||||
uint64_t int_num;
|
||||
ASSERT_TRUE(
|
||||
EXPECT_TRUE(
|
||||
dbfull()->GetIntProperty("rocksdb.oldest-snapshot-time", &int_num));
|
||||
return int_num;
|
||||
}
|
||||
@ -890,11 +890,11 @@ class DBTest {
|
||||
// Check reverse iteration results are the reverse of forward results
|
||||
unsigned int matched = 0;
|
||||
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
|
||||
ASSERT_LT(matched, forward.size());
|
||||
ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
|
||||
EXPECT_LT(matched, forward.size());
|
||||
EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
|
||||
matched++;
|
||||
}
|
||||
ASSERT_EQ(matched, forward.size());
|
||||
EXPECT_EQ(matched, forward.size());
|
||||
|
||||
delete iter;
|
||||
return result;
|
||||
@ -958,10 +958,10 @@ class DBTest {
|
||||
std::string property;
|
||||
if (cf == 0) {
|
||||
// default cfd
|
||||
ASSERT_TRUE(db_->GetProperty(
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
} else {
|
||||
ASSERT_TRUE(db_->GetProperty(
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level),
|
||||
&property));
|
||||
}
|
||||
@ -1137,8 +1137,8 @@ class DBTest {
|
||||
const SequenceNumber seq) {
|
||||
unique_ptr<TransactionLogIterator> iter;
|
||||
Status status = dbfull()->GetUpdatesSince(seq, &iter);
|
||||
ASSERT_OK(status);
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
EXPECT_OK(status);
|
||||
EXPECT_TRUE(iter->Valid());
|
||||
return std::move(iter);
|
||||
}
|
||||
|
||||
@ -3762,8 +3762,8 @@ class KeepFilterFactory : public CompactionFilterFactory {
|
||||
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
|
||||
const CompactionFilter::Context& context) override {
|
||||
if (check_context_) {
|
||||
ASSERT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
|
||||
ASSERT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
|
||||
EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
|
||||
EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
|
||||
}
|
||||
return std::unique_ptr<CompactionFilter>(new KeepFilter());
|
||||
}
|
||||
@ -6409,11 +6409,11 @@ TEST(DBTest, CustomComparator) {
|
||||
private:
|
||||
static int ToNumber(const Slice& x) {
|
||||
// Check that there are no extra characters.
|
||||
ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
|
||||
EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
|
||||
<< EscapeString(x);
|
||||
int val;
|
||||
char ignored;
|
||||
ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
|
||||
EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
|
||||
<< EscapeString(x);
|
||||
return val;
|
||||
}
|
||||
@ -7652,10 +7652,10 @@ SequenceNumber ReadRecords(
|
||||
BatchResult res;
|
||||
while (iter->Valid()) {
|
||||
res = iter->GetBatch();
|
||||
ASSERT_TRUE(res.sequence > lastSequence);
|
||||
EXPECT_TRUE(res.sequence > lastSequence);
|
||||
++count;
|
||||
lastSequence = res.sequence;
|
||||
ASSERT_OK(iter->status());
|
||||
EXPECT_OK(iter->status());
|
||||
iter->Next();
|
||||
}
|
||||
return res.sequence;
|
||||
|
@ -57,7 +57,7 @@ class DeleteFileTest {
|
||||
|
||||
DestroyDB(dbname_, options_);
|
||||
numlevels_ = 7;
|
||||
ASSERT_OK(ReopenDB(true));
|
||||
EXPECT_OK(ReopenDB(true));
|
||||
}
|
||||
|
||||
Status ReopenDB(bool create) {
|
||||
|
@ -174,7 +174,7 @@ class FaultInjectionTestEnv : public EnvWrapper {
|
||||
unique_ptr<Directory>* result) override {
|
||||
unique_ptr<Directory> r;
|
||||
Status s = target()->NewDirectory(name, &r);
|
||||
ASSERT_OK(s);
|
||||
EXPECT_OK(s);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
@ -206,7 +206,7 @@ class FaultInjectionTestEnv : public EnvWrapper {
|
||||
fprintf(stderr, "Cannot delete file %s: %s\n", f.c_str(),
|
||||
s.ToString().c_str());
|
||||
}
|
||||
ASSERT_OK(s);
|
||||
EXPECT_OK(s);
|
||||
if (s.ok()) {
|
||||
UntrackFile(f);
|
||||
}
|
||||
@ -450,7 +450,7 @@ class FaultInjectionTest {
|
||||
NewDB();
|
||||
}
|
||||
|
||||
~FaultInjectionTest() { ASSERT_OK(TearDown()); }
|
||||
~FaultInjectionTest() { EXPECT_OK(TearDown()); }
|
||||
|
||||
bool ChangeOptions() {
|
||||
option_config_++;
|
||||
@ -524,7 +524,7 @@ class FaultInjectionTest {
|
||||
|
||||
dbname_ = test::TmpDir() + "/fault_test";
|
||||
|
||||
ASSERT_OK(DestroyDB(dbname_, options_));
|
||||
EXPECT_OK(DestroyDB(dbname_, options_));
|
||||
|
||||
options_.create_if_missing = true;
|
||||
Status s = OpenDB();
|
||||
@ -581,7 +581,7 @@ class FaultInjectionTest {
|
||||
Value(i, &value_space);
|
||||
s = ReadValue(i, &val);
|
||||
if (s.ok()) {
|
||||
ASSERT_EQ(value_space, val);
|
||||
EXPECT_EQ(value_space, val);
|
||||
}
|
||||
if (expected == kValExpectFound) {
|
||||
if (!s.ok()) {
|
||||
|
@ -32,7 +32,7 @@ class FlushJobTest {
|
||||
&write_controller_)),
|
||||
shutting_down_(false),
|
||||
mock_table_factory_(new mock::MockTableFactory()) {
|
||||
ASSERT_OK(env_->CreateDirIfMissing(dbname_));
|
||||
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
||||
db_options_.db_paths.emplace_back(dbname_,
|
||||
std::numeric_limits<uint64_t>::max());
|
||||
// TODO(icanadi) Remove this once we mock out VersionSet
|
||||
@ -41,7 +41,7 @@ class FlushJobTest {
|
||||
cf_options_.table_factory = mock_table_factory_;
|
||||
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
|
||||
|
||||
ASSERT_OK(versions_->Recover(column_families, false));
|
||||
EXPECT_OK(versions_->Recover(column_families, false));
|
||||
}
|
||||
|
||||
void NewDB() {
|
||||
|
@ -39,7 +39,7 @@ class EventListenerTest {
|
||||
public:
|
||||
EventListenerTest() {
|
||||
dbname_ = test::TmpDir() + "/listener_test";
|
||||
ASSERT_OK(DestroyDB(dbname_, Options()));
|
||||
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||
db_ = nullptr;
|
||||
Reopen();
|
||||
}
|
||||
@ -51,7 +51,7 @@ class EventListenerTest {
|
||||
options.db_paths.emplace_back(dbname_ + "_2", 0);
|
||||
options.db_paths.emplace_back(dbname_ + "_3", 0);
|
||||
options.db_paths.emplace_back(dbname_ + "_4", 0);
|
||||
ASSERT_OK(DestroyDB(dbname_, options));
|
||||
EXPECT_OK(DestroyDB(dbname_, options));
|
||||
}
|
||||
|
||||
void CreateColumnFamilies(const std::vector<std::string>& cfs,
|
||||
@ -91,7 +91,7 @@ class EventListenerTest {
|
||||
const std::vector<std::string>& cfs,
|
||||
const std::vector<const Options*>& options) {
|
||||
Close();
|
||||
ASSERT_EQ(cfs.size(), options.size());
|
||||
EXPECT_EQ(cfs.size(), options.size());
|
||||
std::vector<ColumnFamilyDescriptor> column_families;
|
||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], *options[i]));
|
||||
|
@ -57,7 +57,7 @@ class LogTest {
|
||||
|
||||
virtual Status Close() override { return Status::OK(); }
|
||||
virtual Status Flush() override {
|
||||
ASSERT_TRUE(reader_contents_.size() <= last_flush_);
|
||||
EXPECT_TRUE(reader_contents_.size() <= last_flush_);
|
||||
size_t offset = last_flush_ - reader_contents_.size();
|
||||
reader_contents_ = Slice(
|
||||
contents_.data() + offset,
|
||||
@ -100,7 +100,7 @@ class LogTest {
|
||||
returned_partial_(false) { }
|
||||
|
||||
virtual Status Read(size_t n, Slice* result, char* scratch) override {
|
||||
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
||||
EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
||||
|
||||
if (force_error_) {
|
||||
if (force_error_position_ >= n) {
|
||||
|
@ -55,7 +55,7 @@ std::shared_ptr<DB> OpenDb(bool read_only = false) {
|
||||
} else {
|
||||
s = DB::OpenForReadOnly(options, kDbName, &db);
|
||||
}
|
||||
ASSERT_OK(s);
|
||||
EXPECT_OK(s);
|
||||
return std::shared_ptr<DB>(db);
|
||||
}
|
||||
|
||||
|
@ -49,14 +49,14 @@ class PlainTableDBTest {
|
||||
public:
|
||||
PlainTableDBTest() : env_(Env::Default()) {
|
||||
dbname_ = test::TmpDir() + "/plain_table_db_test";
|
||||
ASSERT_OK(DestroyDB(dbname_, Options()));
|
||||
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||
db_ = nullptr;
|
||||
Reopen();
|
||||
}
|
||||
|
||||
~PlainTableDBTest() {
|
||||
delete db_;
|
||||
ASSERT_OK(DestroyDB(dbname_, Options()));
|
||||
EXPECT_OK(DestroyDB(dbname_, Options()));
|
||||
}
|
||||
|
||||
// Return the current option configuration.
|
||||
@ -149,9 +149,8 @@ class PlainTableDBTest {
|
||||
|
||||
int NumTableFilesAtLevel(int level) {
|
||||
std::string property;
|
||||
ASSERT_TRUE(
|
||||
db_->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
|
||||
&property));
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
|
||||
@ -206,23 +205,23 @@ class TestPlainTableReader : public PlainTableReader {
|
||||
encoding_type, file_size, table_properties),
|
||||
expect_bloom_not_match_(expect_bloom_not_match) {
|
||||
Status s = MmapDataFile();
|
||||
ASSERT_TRUE(s.ok());
|
||||
EXPECT_TRUE(s.ok());
|
||||
|
||||
s = PopulateIndex(const_cast<TableProperties*>(table_properties),
|
||||
bloom_bits_per_key, hash_table_ratio, index_sparseness,
|
||||
2 * 1024 * 1024);
|
||||
ASSERT_TRUE(s.ok());
|
||||
EXPECT_TRUE(s.ok());
|
||||
|
||||
TableProperties* props = const_cast<TableProperties*>(table_properties);
|
||||
if (store_index_in_file) {
|
||||
auto bloom_version_ptr = props->user_collected_properties.find(
|
||||
PlainTablePropertyNames::kBloomVersion);
|
||||
ASSERT_TRUE(bloom_version_ptr != props->user_collected_properties.end());
|
||||
ASSERT_EQ(bloom_version_ptr->second, std::string("1"));
|
||||
EXPECT_TRUE(bloom_version_ptr != props->user_collected_properties.end());
|
||||
EXPECT_EQ(bloom_version_ptr->second, std::string("1"));
|
||||
if (ioptions.bloom_locality > 0) {
|
||||
auto num_blocks_ptr = props->user_collected_properties.find(
|
||||
PlainTablePropertyNames::kNumBloomBlocks);
|
||||
ASSERT_TRUE(num_blocks_ptr != props->user_collected_properties.end());
|
||||
EXPECT_TRUE(num_blocks_ptr != props->user_collected_properties.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -233,9 +232,9 @@ class TestPlainTableReader : public PlainTableReader {
|
||||
virtual bool MatchBloom(uint32_t hash) const override {
|
||||
bool ret = PlainTableReader::MatchBloom(hash);
|
||||
if (*expect_bloom_not_match_) {
|
||||
ASSERT_TRUE(!ret);
|
||||
EXPECT_TRUE(!ret);
|
||||
} else {
|
||||
ASSERT_TRUE(ret);
|
||||
EXPECT_TRUE(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -262,20 +261,20 @@ class TestPlainTableFactory : public PlainTableFactory {
|
||||
TableProperties* props = nullptr;
|
||||
auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber,
|
||||
ioptions.env, ioptions.info_log, &props);
|
||||
ASSERT_TRUE(s.ok());
|
||||
EXPECT_TRUE(s.ok());
|
||||
|
||||
if (store_index_in_file_) {
|
||||
BlockHandle bloom_block_handle;
|
||||
s = FindMetaBlock(file.get(), file_size, kPlainTableMagicNumber,
|
||||
ioptions.env, BloomBlockBuilder::kBloomBlock,
|
||||
&bloom_block_handle);
|
||||
ASSERT_TRUE(s.ok());
|
||||
EXPECT_TRUE(s.ok());
|
||||
|
||||
BlockHandle index_block_handle;
|
||||
s = FindMetaBlock(
|
||||
file.get(), file_size, kPlainTableMagicNumber, ioptions.env,
|
||||
PlainTableIndexBuilder::kPlainTableIndexBlock, &index_block_handle);
|
||||
ASSERT_TRUE(s.ok());
|
||||
EXPECT_TRUE(s.ok());
|
||||
}
|
||||
|
||||
auto& user_props = props->user_collected_properties;
|
||||
|
@ -77,13 +77,13 @@ class TestKeyComparator : public Comparator {
|
||||
if (key_a->prefix < key_b->prefix) return -1;
|
||||
if (key_a->prefix > key_b->prefix) return 1;
|
||||
} else {
|
||||
ASSERT_TRUE(key_a->prefix == key_b->prefix);
|
||||
EXPECT_TRUE(key_a->prefix == key_b->prefix);
|
||||
// note, both a and b could be prefix only
|
||||
if (a.size() != b.size()) {
|
||||
// one of them is prefix
|
||||
ASSERT_TRUE(
|
||||
(a.size() == sizeof(uint64_t) && b.size() == sizeof(TestKey)) ||
|
||||
(b.size() == sizeof(uint64_t) && a.size() == sizeof(TestKey)));
|
||||
EXPECT_TRUE(
|
||||
(a.size() == sizeof(uint64_t) && b.size() == sizeof(TestKey)) ||
|
||||
(b.size() == sizeof(uint64_t) && a.size() == sizeof(TestKey)));
|
||||
if (a.size() < b.size()) return -1;
|
||||
if (a.size() > b.size()) return 1;
|
||||
} else {
|
||||
@ -93,7 +93,7 @@ class TestKeyComparator : public Comparator {
|
||||
}
|
||||
|
||||
// both a and b are whole key
|
||||
ASSERT_TRUE(a.size() == sizeof(TestKey) && b.size() == sizeof(TestKey));
|
||||
EXPECT_TRUE(a.size() == sizeof(TestKey) && b.size() == sizeof(TestKey));
|
||||
if (key_a->sorted < key_b->sorted) return -1;
|
||||
if (key_a->sorted > key_b->sorted) return 1;
|
||||
if (key_a->sorted == key_b->sorted) return 0;
|
||||
@ -161,7 +161,7 @@ class PrefixTest {
|
||||
FLAGS_memtable_prefix_bloom_huge_page_tlb_size;
|
||||
|
||||
Status s = DB::Open(options, kDbName, &db);
|
||||
ASSERT_OK(s);
|
||||
EXPECT_OK(s);
|
||||
return std::shared_ptr<DB>(db);
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ class WalManagerTest {
|
||||
unique_ptr<TransactionLogIterator> iter;
|
||||
Status status = wal_manager_->GetUpdatesSince(
|
||||
seq, &iter, TransactionLogIterator::ReadOptions(), versions_.get());
|
||||
ASSERT_OK(status);
|
||||
EXPECT_OK(status);
|
||||
return std::move(iter);
|
||||
}
|
||||
|
||||
@ -182,10 +182,10 @@ int CountRecords(TransactionLogIterator* iter) {
|
||||
BatchResult res;
|
||||
while (iter->Valid()) {
|
||||
res = iter->GetBatch();
|
||||
ASSERT_TRUE(res.sequence > lastSequence);
|
||||
EXPECT_TRUE(res.sequence > lastSequence);
|
||||
++count;
|
||||
lastSequence = res.sequence;
|
||||
ASSERT_OK(iter->status());
|
||||
EXPECT_OK(iter->status());
|
||||
iter->Next();
|
||||
}
|
||||
return count;
|
||||
|
@ -42,7 +42,7 @@ static std::string PrintContents(WriteBatch* b) {
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
ParsedInternalKey ikey;
|
||||
memset((void *)&ikey, 0, sizeof(ikey));
|
||||
ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey));
|
||||
EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
|
||||
switch (ikey.type) {
|
||||
case kTypeValue:
|
||||
state.append("Put(");
|
||||
|
@ -361,12 +361,12 @@ class TableConstructor: public Constructor {
|
||||
} else {
|
||||
builder->Add(kv.first, kv.second);
|
||||
}
|
||||
ASSERT_TRUE(builder->status().ok());
|
||||
EXPECT_TRUE(builder->status().ok());
|
||||
}
|
||||
Status s = builder->Finish();
|
||||
ASSERT_TRUE(s.ok()) << s.ToString();
|
||||
EXPECT_TRUE(s.ok()) << s.ToString();
|
||||
|
||||
ASSERT_EQ(sink_->contents().size(), builder->FileSize());
|
||||
EXPECT_EQ(sink_->contents().size(), builder->FileSize());
|
||||
|
||||
// Open the table
|
||||
uniq_id_ = cur_uniq_id_++;
|
||||
@ -502,7 +502,7 @@ class DBConstructor: public Constructor {
|
||||
for (const auto kv : kv_map) {
|
||||
WriteBatch batch;
|
||||
batch.Put(kv.first, kv.second);
|
||||
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
||||
EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -701,8 +701,9 @@ class FixedOrLessPrefixTransform : public SliceTransform {
|
||||
class HarnessTest {
|
||||
public:
|
||||
HarnessTest()
|
||||
: ioptions_(options_), constructor_(nullptr),
|
||||
write_buffer_(options_.db_write_buffer_size) {}
|
||||
: ioptions_(options_),
|
||||
constructor_(nullptr),
|
||||
write_buffer_(options_.db_write_buffer_size) {}
|
||||
|
||||
void Init(const TestArgs& args) {
|
||||
delete constructor_;
|
||||
@ -793,9 +794,7 @@ class HarnessTest {
|
||||
ioptions_ = ImmutableCFOptions(options_);
|
||||
}
|
||||
|
||||
~HarnessTest() {
|
||||
delete constructor_;
|
||||
}
|
||||
~HarnessTest() { delete constructor_; }
|
||||
|
||||
void Add(const std::string& key, const std::string& value) {
|
||||
constructor_->Add(key, value);
|
||||
|
@ -59,9 +59,8 @@ public:
|
||||
|
||||
int FilesOnLevel(int level) {
|
||||
std::string property;
|
||||
ASSERT_TRUE(
|
||||
db_->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
|
||||
&property));
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ uint64_t AutoRollLoggerTest::RollLogFileByTimeTest(
|
||||
uint64_t expected_create_time;
|
||||
uint64_t actual_create_time;
|
||||
uint64_t total_log_size;
|
||||
ASSERT_OK(env->GetFileSize(kLogFile, &total_log_size));
|
||||
EXPECT_OK(env->GetFileSize(kLogFile, &total_log_size));
|
||||
GetFileCreateTime(kLogFile, &expected_create_time);
|
||||
logger->SetCallNowMicrosEveryNRecords(0);
|
||||
|
||||
@ -111,14 +111,14 @@ uint64_t AutoRollLoggerTest::RollLogFileByTimeTest(
|
||||
// to be finished before time.
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
LogMessage(logger, log_message.c_str());
|
||||
ASSERT_OK(logger->GetStatus());
|
||||
EXPECT_OK(logger->GetStatus());
|
||||
// Make sure we always write to the same log file (by
|
||||
// checking the create time);
|
||||
GetFileCreateTime(kLogFile, &actual_create_time);
|
||||
|
||||
// Also make sure the log size is increasing.
|
||||
ASSERT_EQ(expected_create_time, actual_create_time);
|
||||
ASSERT_GT(logger->GetLogFileSize(), total_log_size);
|
||||
EXPECT_EQ(expected_create_time, actual_create_time);
|
||||
EXPECT_GT(logger->GetLogFileSize(), total_log_size);
|
||||
total_log_size = logger->GetLogFileSize();
|
||||
}
|
||||
|
||||
@ -128,8 +128,8 @@ uint64_t AutoRollLoggerTest::RollLogFileByTimeTest(
|
||||
|
||||
// At this time, the new log file should be created.
|
||||
GetFileCreateTime(kLogFile, &actual_create_time);
|
||||
ASSERT_GT(actual_create_time, expected_create_time);
|
||||
ASSERT_LT(logger->GetLogFileSize(), total_log_size);
|
||||
EXPECT_GT(actual_create_time, expected_create_time);
|
||||
EXPECT_LT(logger->GetLogFileSize(), total_log_size);
|
||||
expected_create_time = actual_create_time;
|
||||
|
||||
return expected_create_time;
|
||||
|
109
util/ldb_cmd.cc
109
util/ldb_cmd.cc
@ -195,11 +195,11 @@ bool LDBCommand::ParseIntOption(const map<string, string>& options,
|
||||
value = stoi(itr->second);
|
||||
return true;
|
||||
} catch(const invalid_argument&) {
|
||||
exec_state = LDBCommandExecuteResult::Failed(option +
|
||||
" has an invalid value.");
|
||||
exec_state =
|
||||
LDBCommandExecuteResult::Failed(option + " has an invalid value.");
|
||||
} catch(const out_of_range&) {
|
||||
exec_state = LDBCommandExecuteResult::Failed(option +
|
||||
" has a value out-of-range.");
|
||||
exec_state = LDBCommandExecuteResult::Failed(
|
||||
option + " has a value out-of-range.");
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@ -235,8 +235,8 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
|
||||
use_table_options = true;
|
||||
table_options.filter_policy.reset(NewBloomFilterPolicy(bits));
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_BLOOM_BITS +
|
||||
" must be > 0.");
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed(ARG_BLOOM_BITS + " must be > 0.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -246,8 +246,8 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
|
||||
use_table_options = true;
|
||||
table_options.block_size = block_size;
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_BLOCK_SIZE +
|
||||
" must be > 0.");
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed(ARG_BLOCK_SIZE + " must be > 0.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -277,8 +277,8 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
|
||||
opt.compression = kLZ4HCCompression;
|
||||
} else {
|
||||
// Unknown compression.
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Unknown compression level: " + comp);
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed("Unknown compression level: " + comp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -289,7 +289,7 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
|
||||
opt.db_write_buffer_size = db_write_buffer_size;
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_DB_WRITE_BUFFER_SIZE +
|
||||
" must be >= 0.");
|
||||
" must be >= 0.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,7 +300,7 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
|
||||
opt.write_buffer_size = write_buffer_size;
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_WRITE_BUFFER_SIZE +
|
||||
" must be > 0.");
|
||||
" must be > 0.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -309,8 +309,8 @@ Options LDBCommand::PrepareOptionsForOpenDB() {
|
||||
if (file_size > 0) {
|
||||
opt.target_file_size_base = file_size;
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_FILE_SIZE +
|
||||
" must be > 0.");
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed(ARG_FILE_SIZE + " must be > 0.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -585,8 +585,8 @@ void ManifestDumpCommand::DoCommand() {
|
||||
// containing the db for files of the form MANIFEST_[0-9]+
|
||||
DIR* d = opendir(db_path_.c_str());
|
||||
if (d == nullptr) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
db_path_ + " is not a directory");
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed(db_path_ + " is not a directory");
|
||||
return;
|
||||
}
|
||||
struct dirent* entry;
|
||||
@ -603,7 +603,7 @@ void ManifestDumpCommand::DoCommand() {
|
||||
found = true;
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Multiple MANIFEST files found; use --path to select one");
|
||||
"Multiple MANIFEST files found; use --path to select one");
|
||||
closedir(d);
|
||||
return;
|
||||
}
|
||||
@ -792,8 +792,8 @@ void InternalDumpCommand::DoCommand() {
|
||||
ScopedArenaIterator iter(idb->TEST_NewInternalIterator(&arena));
|
||||
Status st = iter->status();
|
||||
if (!st.ok()) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed("Iterator error:"
|
||||
+ st.ToString());
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed("Iterator error:" + st.ToString());
|
||||
}
|
||||
|
||||
if (has_from_) {
|
||||
@ -900,10 +900,10 @@ DBDumperCommand::DBDumperCommand(const vector<string>& params,
|
||||
max_keys_ = stoi(itr->second);
|
||||
} catch(const invalid_argument&) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
||||
" has an invalid value");
|
||||
" has an invalid value");
|
||||
} catch(const out_of_range&) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
||||
" has a value out-of-range");
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
ARG_MAX_KEYS + " has a value out-of-range");
|
||||
}
|
||||
}
|
||||
itr = options.find(ARG_COUNT_DELIM);
|
||||
@ -961,8 +961,8 @@ void DBDumperCommand::DoCommand() {
|
||||
Iterator* iter = db_->NewIterator(ReadOptions());
|
||||
Status st = iter->status();
|
||||
if (!st.ok()) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed("Iterator error."
|
||||
+ st.ToString());
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed("Iterator error." + st.ToString());
|
||||
}
|
||||
|
||||
if (!null_from_) {
|
||||
@ -1095,7 +1095,7 @@ ReduceDBLevelsCommand::ReduceDBLevelsCommand(const vector<string>& params,
|
||||
|
||||
if(new_levels_ <= 0) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
" Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
|
||||
" Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1165,8 +1165,8 @@ Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
|
||||
|
||||
void ReduceDBLevelsCommand::DoCommand() {
|
||||
if (new_levels_ <= 1) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Invalid number of levels.\n");
|
||||
exec_state_ =
|
||||
LDBCommandExecuteResult::Failed("Invalid number of levels.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1225,8 +1225,8 @@ ChangeCompactionStyleCommand::ChangeCompactionStyleCommand(
|
||||
if (old_compaction_style_ != kCompactionStyleLevel &&
|
||||
old_compaction_style_ != kCompactionStyleUniversal) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
|
||||
"style. Check ldb help for proper compaction style value.\n");
|
||||
"Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
|
||||
"style. Check ldb help for proper compaction style value.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1235,23 +1235,23 @@ ChangeCompactionStyleCommand::ChangeCompactionStyleCommand(
|
||||
if (new_compaction_style_ != kCompactionStyleLevel &&
|
||||
new_compaction_style_ != kCompactionStyleUniversal) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
|
||||
"style. Check ldb help for proper compaction style value.\n");
|
||||
"Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
|
||||
"style. Check ldb help for proper compaction style value.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (new_compaction_style_ == old_compaction_style_) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Old compaction style is the same as new compaction style. "
|
||||
"Nothing to do.\n");
|
||||
"Old compaction style is the same as new compaction style. "
|
||||
"Nothing to do.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (old_compaction_style_ == kCompactionStyleUniversal &&
|
||||
new_compaction_style_ == kCompactionStyleLevel) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Convert from universal compaction to level compaction. "
|
||||
"Nothing to do.\n");
|
||||
"Convert from universal compaction to level compaction. "
|
||||
"Nothing to do.\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1320,16 +1320,19 @@ void ChangeCompactionStyleCommand::DoCommand() {
|
||||
|
||||
// level 0 should have only 1 file
|
||||
if (i == 0 && num_files != 1) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed("Number of db files at "
|
||||
"level 0 after compaction is " + ToString(num_files) +
|
||||
", not 1.\n");
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Number of db files at "
|
||||
"level 0 after compaction is " +
|
||||
ToString(num_files) + ", not 1.\n");
|
||||
return;
|
||||
}
|
||||
// other levels should have no file
|
||||
if (i > 0 && num_files != 0) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed("Number of db files at "
|
||||
"level " + ToString(i) + " after compaction is " +
|
||||
ToString(num_files) + ", not 0.\n");
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Number of db files at "
|
||||
"level " +
|
||||
ToString(i) + " after compaction is " + ToString(num_files) +
|
||||
", not 0.\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1459,8 +1462,8 @@ WALDumperCommand::WALDumperCommand(const vector<string>& params,
|
||||
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
|
||||
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
|
||||
if (wal_file_.empty()) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"Argument " + ARG_WAL_FILE + " must be specified.");
|
||||
exec_state_ = LDBCommandExecuteResult::Failed("Argument " + ARG_WAL_FILE +
|
||||
" must be specified.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1487,7 +1490,7 @@ GetCommand::GetCommand(const vector<string>& params,
|
||||
|
||||
if (params.size() != 1) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"<key> must be specified for the get command");
|
||||
"<key> must be specified for the get command");
|
||||
} else {
|
||||
key_ = params.at(0);
|
||||
}
|
||||
@ -1527,16 +1530,16 @@ ApproxSizeCommand::ApproxSizeCommand(const vector<string>& params,
|
||||
if (options.find(ARG_FROM) != options.end()) {
|
||||
start_key_ = options.find(ARG_FROM)->second;
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_FROM +
|
||||
" must be specified for approxsize command");
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
ARG_FROM + " must be specified for approxsize command");
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.find(ARG_TO) != options.end()) {
|
||||
end_key_ = options.find(ARG_TO)->second;
|
||||
} else {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_TO +
|
||||
" must be specified for approxsize command");
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
ARG_TO + " must be specified for approxsize command");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1657,10 +1660,10 @@ ScanCommand::ScanCommand(const vector<string>& params,
|
||||
max_keys_scanned_ = stoi(itr->second);
|
||||
} catch(const invalid_argument&) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
||||
" has an invalid value");
|
||||
" has an invalid value");
|
||||
} catch(const out_of_range&) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
||||
" has a value out-of-range");
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
ARG_MAX_KEYS + " has a value out-of-range");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1743,7 +1746,7 @@ DeleteCommand::DeleteCommand(const vector<string>& params,
|
||||
|
||||
if (params.size() != 1) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"KEY must be specified for the delete command");
|
||||
"KEY must be specified for the delete command");
|
||||
} else {
|
||||
key_ = params.at(0);
|
||||
if (is_key_hex_) {
|
||||
@ -1776,7 +1779,7 @@ PutCommand::PutCommand(const vector<string>& params,
|
||||
|
||||
if (params.size() != 2) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"<key> and <value> must be specified for the put command");
|
||||
"<key> and <value> must be specified for the put command");
|
||||
} else {
|
||||
key_ = params.at(0);
|
||||
value_ = params.at(1);
|
||||
|
@ -54,12 +54,12 @@ class SliceTransformDBTest {
|
||||
public:
|
||||
SliceTransformDBTest() : env_(Env::Default()), db_(nullptr) {
|
||||
dbname_ = test::TmpDir() + "/slice_transform_db_test";
|
||||
ASSERT_OK(DestroyDB(dbname_, last_options_));
|
||||
EXPECT_OK(DestroyDB(dbname_, last_options_));
|
||||
}
|
||||
|
||||
~SliceTransformDBTest() {
|
||||
delete db_;
|
||||
ASSERT_OK(DestroyDB(dbname_, last_options_));
|
||||
EXPECT_OK(DestroyDB(dbname_, last_options_));
|
||||
}
|
||||
|
||||
DB* db() { return db_; }
|
||||
|
@ -78,7 +78,7 @@ int RunAllTests() {
|
||||
std::string TmpDir(Env* env) {
|
||||
std::string dir;
|
||||
Status s = env->GetTestDirectory(&dir);
|
||||
ASSERT_TRUE(s.ok()) << s.ToString();
|
||||
EXPECT_TRUE(s.ok()) << s.ToString();
|
||||
return dir;
|
||||
}
|
||||
|
||||
|
@ -170,6 +170,22 @@ class TesterHelper {
|
||||
#define ASSERT_LT(a, b) \
|
||||
TEST_EXPRESSION_(::rocksdb::test::Tester().IsLt((a), (b)))
|
||||
|
||||
#define EXPECT_TRUE(c) TEST_EXPRESSION_(::rocksdb::test::Tester().Is((c), #c))
|
||||
#define EXPECT_OK(s) TEST_EXPRESSION_(::rocksdb::test::Tester().IsOk((s)))
|
||||
#define EXPECT_NOK(s) TEST_EXPRESSION_(::rocksdb::test::Tester().IsNotOk((s)))
|
||||
#define EXPECT_EQ(a, b) \
|
||||
TEST_EXPRESSION_(::rocksdb::test::Tester().IsEq((a), (b)))
|
||||
#define EXPECT_NE(a, b) \
|
||||
TEST_EXPRESSION_(::rocksdb::test::Tester().IsNe((a), (b)))
|
||||
#define EXPECT_GE(a, b) \
|
||||
TEST_EXPRESSION_(::rocksdb::test::Tester().IsGe((a), (b)))
|
||||
#define EXPECT_GT(a, b) \
|
||||
TEST_EXPRESSION_(::rocksdb::test::Tester().IsGt((a), (b)))
|
||||
#define EXPECT_LE(a, b) \
|
||||
TEST_EXPRESSION_(::rocksdb::test::Tester().IsLe((a), (b)))
|
||||
#define EXPECT_LT(a, b) \
|
||||
TEST_EXPRESSION_(::rocksdb::test::Tester().IsLt((a), (b)))
|
||||
|
||||
#define TCONCAT(a, b) TCONCAT1(a, b)
|
||||
#define TCONCAT1(a, b) a##b
|
||||
|
||||
|
@ -53,20 +53,20 @@ class DummyDB : public StackableDB {
|
||||
}
|
||||
|
||||
virtual Status EnableFileDeletions(bool force) override {
|
||||
ASSERT_TRUE(!deletions_enabled_);
|
||||
EXPECT_TRUE(!deletions_enabled_);
|
||||
deletions_enabled_ = true;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
virtual Status DisableFileDeletions() override {
|
||||
ASSERT_TRUE(deletions_enabled_);
|
||||
EXPECT_TRUE(deletions_enabled_);
|
||||
deletions_enabled_ = false;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
virtual Status GetLiveFiles(std::vector<std::string>& vec, uint64_t* mfs,
|
||||
bool flush_memtable = true) override {
|
||||
ASSERT_TRUE(!deletions_enabled_);
|
||||
EXPECT_TRUE(!deletions_enabled_);
|
||||
vec = live_files_;
|
||||
*mfs = 100;
|
||||
return Status::OK();
|
||||
@ -88,7 +88,7 @@ class DummyDB : public StackableDB {
|
||||
|
||||
virtual uint64_t LogNumber() const override {
|
||||
// what business do you have calling this method?
|
||||
ASSERT_TRUE(false);
|
||||
EXPECT_TRUE(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -98,13 +98,13 @@ class DummyDB : public StackableDB {
|
||||
|
||||
virtual SequenceNumber StartSequence() const override {
|
||||
// backupabledb should not need this method
|
||||
ASSERT_TRUE(false);
|
||||
EXPECT_TRUE(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual uint64_t SizeFileBytes() const override {
|
||||
// backupabledb should not need this method
|
||||
ASSERT_TRUE(false);
|
||||
EXPECT_TRUE(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -114,7 +114,7 @@ class DummyDB : public StackableDB {
|
||||
}; // DummyLogFile
|
||||
|
||||
virtual Status GetSortedWalFiles(VectorLogPtr& files) override {
|
||||
ASSERT_TRUE(!deletions_enabled_);
|
||||
EXPECT_TRUE(!deletions_enabled_);
|
||||
files.resize(wal_files_.size());
|
||||
for (size_t i = 0; i < files.size(); ++i) {
|
||||
files[i].reset(
|
||||
@ -183,7 +183,7 @@ class TestEnv : public EnvWrapper {
|
||||
|
||||
virtual Status DeleteFile(const std::string& fname) override {
|
||||
MutexLock l(&mutex_);
|
||||
ASSERT_GT(limit_delete_files_, 0U);
|
||||
EXPECT_GT(limit_delete_files_, 0U);
|
||||
limit_delete_files_--;
|
||||
return EnvWrapper::DeleteFile(fname);
|
||||
}
|
||||
@ -327,7 +327,7 @@ static size_t FillDB(DB* db, int from, int to) {
|
||||
std::string value = "testvalue" + std::to_string(i);
|
||||
bytes_written += key.size() + value.size();
|
||||
|
||||
ASSERT_OK(db->Put(WriteOptions(), Slice(key), Slice(value)));
|
||||
EXPECT_OK(db->Put(WriteOptions(), Slice(key), Slice(value)));
|
||||
}
|
||||
return bytes_written;
|
||||
}
|
||||
@ -382,7 +382,7 @@ class BackupableDBTest {
|
||||
|
||||
DB* OpenDB() {
|
||||
DB* db;
|
||||
ASSERT_OK(DB::Open(options_, dbname_, &db));
|
||||
EXPECT_OK(DB::Open(options_, dbname_, &db));
|
||||
return db;
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,7 @@ class GeoDBTest {
|
||||
|
||||
GeoDBTest() {
|
||||
GeoDBOptions geodb_options;
|
||||
ASSERT_OK(DestroyDB(kDefaultDbName, options));
|
||||
EXPECT_OK(DestroyDB(kDefaultDbName, options));
|
||||
options.create_if_missing = true;
|
||||
Status status = DB::Open(options, kDefaultDbName, &db);
|
||||
geodb = new GeoDBImpl(db, geodb_options);
|
||||
|
@ -32,7 +32,7 @@ std::shared_ptr<DB> OpenNormalDb(char delim_char) {
|
||||
Options options;
|
||||
options.create_if_missing = true;
|
||||
options.merge_operator.reset(new StringAppendOperator(delim_char));
|
||||
ASSERT_OK(DB::Open(options, kDbName, &db));
|
||||
EXPECT_OK(DB::Open(options, kDbName, &db));
|
||||
return std::shared_ptr<DB>(db);
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ std::shared_ptr<DB> OpenTtlDb(char delim_char) {
|
||||
Options options;
|
||||
options.create_if_missing = true;
|
||||
options.merge_operator.reset(new StringAppendTESTOperator(delim_char));
|
||||
ASSERT_OK(DBWithTTL::Open(options, kDbName, &db, 123456));
|
||||
EXPECT_OK(DBWithTTL::Open(options, kDbName, &db, 123456));
|
||||
return std::shared_ptr<DB>(db);
|
||||
}
|
||||
} // namespace
|
||||
@ -589,7 +589,7 @@ int main(int argc, char** argv) {
|
||||
{
|
||||
fprintf(stderr, "Running tests with ttl db and generic operator.\n");
|
||||
StringAppendOperatorTest::SetOpenDbFunction(&OpenTtlDb);
|
||||
result |=rocksdb::test::RunAllTests();
|
||||
result |= rocksdb::test::RunAllTests();
|
||||
}
|
||||
|
||||
return result;
|
||||
|
Loading…
Reference in New Issue
Block a user