diff --git a/db/db_test.cc b/db/db_test.cc index 3a9381dd5..f0df3d3f3 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -1457,20 +1457,20 @@ TEST(DBTest, FilterDeletes) { CreateAndReopenWithCF({"pikachu"}, &options); WriteBatch batch; - batch.Delete(1, "a"); + batch.Delete(handles_[1], "a"); dbfull()->Write(WriteOptions(), &batch); ASSERT_EQ(AllEntriesFor("a", 1), "[ ]"); // Delete skipped batch.Clear(); - batch.Put(1, "a", "b"); - batch.Delete(1, "a"); + batch.Put(handles_[1], "a", "b"); + batch.Delete(handles_[1], "a"); dbfull()->Write(WriteOptions(), &batch); ASSERT_EQ(Get(1, "a"), "NOT_FOUND"); ASSERT_EQ(AllEntriesFor("a", 1), "[ DEL, b ]"); // Delete issued batch.Clear(); - batch.Delete(1, "c"); - batch.Put(1, "c", "d"); + batch.Delete(handles_[1], "c"); + batch.Put(handles_[1], "c", "d"); dbfull()->Write(WriteOptions(), &batch); ASSERT_EQ(Get(1, "c"), "d"); ASSERT_EQ(AllEntriesFor("c", 1), "[ d ]"); // Delete skipped @@ -1478,7 +1478,7 @@ TEST(DBTest, FilterDeletes) { ASSERT_OK(Flush(1)); // A stray Flush - batch.Delete(1, "c"); + batch.Delete(handles_[1], "c"); dbfull()->Write(WriteOptions(), &batch); ASSERT_EQ(AllEntriesFor("c", 1), "[ DEL, d ]"); // Delete issued batch.Clear(); @@ -4882,10 +4882,10 @@ TEST(DBTest, TransactionLogIteratorBatchOperations) { DestroyAndReopen(&options); CreateAndReopenWithCF({"pikachu"}, &options); WriteBatch batch; - batch.Put(1, "key1", DummyString(1024)); - batch.Put(0, "key2", DummyString(1024)); - batch.Put(1, "key3", DummyString(1024)); - batch.Delete(0, "key2"); + batch.Put(handles_[1], "key1", DummyString(1024)); + batch.Put(handles_[0], "key2", DummyString(1024)); + batch.Put(handles_[1], "key3", DummyString(1024)); + batch.Delete(handles_[0], "key2"); dbfull()->Write(WriteOptions(), &batch); Flush(1); Flush(0); @@ -4902,12 +4902,12 @@ TEST(DBTest, TransactionLogIteratorBlobs) { CreateAndReopenWithCF({"pikachu"}, &options); { WriteBatch batch; - batch.Put(1, "key1", DummyString(1024)); - batch.Put(0, "key2", DummyString(1024)); + batch.Put(handles_[1], "key1", DummyString(1024)); + batch.Put(handles_[0], "key2", DummyString(1024)); batch.PutLogData(Slice("blob1")); - batch.Put(1, "key3", DummyString(1024)); + batch.Put(handles_[1], "key3", DummyString(1024)); batch.PutLogData(Slice("blob2")); - batch.Delete(0, "key2"); + batch.Delete(handles_[0], "key2"); dbfull()->Write(WriteOptions(), &batch); ReopenWithColumnFamilies({"default", "pikachu"}, &options); } @@ -5050,7 +5050,7 @@ static void MTThreadBody(void* arg) { for (int cf = 0; cf < kColumnFamilies; ++cf) { snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id, static_cast(counter), cf, unique_id); - batch.Put(cf, Slice(keybuf), Slice(valbuf)); + batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf)); } ASSERT_OK(db->Write(WriteOptions(), &batch)); } else { @@ -5225,21 +5225,21 @@ class ModelDB: public DB { virtual Status Put(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k, const Slice& v) { WriteBatch batch; - batch.Put(0, k, v); + batch.Put(cf, k, v); return Write(o, &batch); } using DB::Merge; virtual Status Merge(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k, const Slice& v) { WriteBatch batch; - batch.Merge(0, k, v); + batch.Merge(cf, k, v); return Write(o, &batch); } using DB::Delete; virtual Status Delete(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& key) { WriteBatch batch; - batch.Delete(0, key); + batch.Delete(cf, key); return Write(o, &batch); } using DB::Get; diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index c6e17e8b7..c2f412c59 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -11,6 +11,7 @@ #include #include "db/memtable.h" +#include "db/column_family.h" #include "db/write_batch_internal.h" #include "rocksdb/env.h" #include "rocksdb/memtablerep.h" @@ -279,14 +280,27 @@ TEST(WriteBatchTest, PutGatherSlices) { ASSERT_EQ(3, batch.Count()); } +namespace { +class ColumnFamilyHandleImplDummy : public ColumnFamilyHandleImpl { + public: + ColumnFamilyHandleImplDummy(int id) + : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr), id_(id) {} + uint32_t GetID() const override { return id_; } + + private: + uint32_t id_; +}; +} // namespace anonymous + TEST(WriteBatchTest, ColumnFamiliesBatchTest) { WriteBatch batch; - batch.Put(0, Slice("foo"), Slice("bar")); - batch.Put(2, Slice("twofoo"), Slice("bar2")); - batch.Put(8, Slice("eightfoo"), Slice("bar8")); - batch.Delete(8, Slice("eightfoo")); - batch.Merge(3, Slice("threethree"), Slice("3three")); - batch.Put(0, Slice("foo"), Slice("bar")); + ColumnFamilyHandleImplDummy zero(0), two(2), three(3), eight(8); + batch.Put(&zero, Slice("foo"), Slice("bar")); + batch.Put(&two, Slice("twofoo"), Slice("bar2")); + batch.Put(&eight, Slice("eightfoo"), Slice("bar8")); + batch.Delete(&eight, Slice("eightfoo")); + batch.Merge(&three, Slice("threethree"), Slice("3three")); + batch.Put(&zero, Slice("foo"), Slice("bar")); batch.Merge(Slice("omom"), Slice("nom")); TestHandler handler; diff --git a/tools/db_stress.cc b/tools/db_stress.cc index f14e5b2bb..3e0e41dcc 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -69,7 +69,7 @@ DEFINE_int64(max_key, 1 * KB* KB, DEFINE_int32(column_families, 10, "Number of column families"); DEFINE_bool(test_batches_snapshots, false, - "If set, the test uses MultiGet(), MultiPut() and MultiDelete()" + "If set, the test uses MultiGet(), Multiut() and MultiDelete()" " which read/write/delete multiple keys in a batch. In this mode," " we do not verify db content by comparing the content with the " "pre-allocated array. Instead, we do partial verification inside" @@ -853,9 +853,9 @@ class StressTest { values[i] += value.ToString(); value_slices[i] = values[i]; if (FLAGS_use_merge) { - batch.Merge(column_family->GetID(), keys[i], value_slices[i]); + batch.Merge(column_family, keys[i], value_slices[i]); } else { - batch.Put(column_family->GetID(), keys[i], value_slices[i]); + batch.Put(column_family, keys[i], value_slices[i]); } } @@ -882,7 +882,7 @@ class StressTest { Status s; for (int i = 0; i < 10; i++) { keys[i] += key.ToString(); - batch.Delete(column_family->GetID(), keys[i]); + batch.Delete(column_family, keys[i]); } s = db_->Write(writeoptions, &batch);