rocksdb/util/testutil.cc
Andres Noetzli 014fd55adc Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).

In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.

Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.

Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
  deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
  this patch supported this so it could be resurrected if needed)

Test Plan: make all check

Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor

Reviewed By: igor

Subscribers: maykov, dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 11:42:56 -07:00

143 lines
4.0 KiB
C++

// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/testutil.h"
#include "port/port.h"
#include "util/file_reader_writer.h"
#include "util/random.h"
namespace rocksdb {
namespace test {
Slice RandomString(Random* rnd, int len, std::string* dst) {
dst->resize(len);
for (int i = 0; i < len; i++) {
(*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
}
return Slice(*dst);
}
extern std::string RandomHumanReadableString(Random* rnd, int len) {
std::string ret;
ret.resize(len);
for (int i = 0; i < len; ++i) {
ret[i] = static_cast<char>('a' + rnd->Uniform(26));
}
return ret;
}
std::string RandomKey(Random* rnd, int len) {
// Make sure to generate a wide variety of characters so we
// test the boundary conditions for short-key optimizations.
static const char kTestChars[] = {
'\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'
};
std::string result;
for (int i = 0; i < len; i++) {
result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
}
return result;
}
extern Slice CompressibleString(Random* rnd, double compressed_fraction,
int len, std::string* dst) {
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1) raw = 1;
std::string raw_data;
RandomString(rnd, raw, &raw_data);
// Duplicate the random data until we have filled "len" bytes
dst->clear();
while (dst->size() < (unsigned int)len) {
dst->append(raw_data);
}
dst->resize(len);
return Slice(*dst);
}
namespace {
class Uint64ComparatorImpl : public Comparator {
public:
Uint64ComparatorImpl() { }
virtual const char* Name() const override {
return "rocksdb.Uint64Comparator";
}
virtual int Compare(const Slice& a, const Slice& b) const override {
assert(a.size() == sizeof(uint64_t) && b.size() == sizeof(uint64_t));
const uint64_t* left = reinterpret_cast<const uint64_t*>(a.data());
const uint64_t* right = reinterpret_cast<const uint64_t*>(b.data());
if (*left == *right) {
return 0;
} else if (*left < *right) {
return -1;
} else {
return 1;
}
}
virtual void FindShortestSeparator(std::string* start,
const Slice& limit) const override {
return;
}
virtual void FindShortSuccessor(std::string* key) const override {
return;
}
};
} // namespace
static port::OnceType once = LEVELDB_ONCE_INIT;
static const Comparator* uint64comp;
static void InitModule() {
uint64comp = new Uint64ComparatorImpl;
}
const Comparator* Uint64Comparator() {
port::InitOnce(&once, InitModule);
return uint64comp;
}
WritableFileWriter* GetWritableFileWriter(WritableFile* wf) {
unique_ptr<WritableFile> file(wf);
return new WritableFileWriter(std::move(file), EnvOptions());
}
RandomAccessFileReader* GetRandomAccessFileReader(RandomAccessFile* raf) {
unique_ptr<RandomAccessFile> file(raf);
return new RandomAccessFileReader(std::move(file));
}
SequentialFileReader* GetSequentialFileReader(SequentialFile* se) {
unique_ptr<SequentialFile> file(se);
return new SequentialFileReader(std::move(file));
}
void CorruptKeyType(InternalKey* ikey) {
std::string keystr = ikey->Encode().ToString();
keystr[keystr.size() - 8] = kTypeLogData;
ikey->DecodeFrom(Slice(keystr.data(), keystr.size()));
}
std::string KeyStr(const std::string& user_key, const SequenceNumber& seq,
const ValueType& t, bool corrupt) {
InternalKey k(user_key, seq, t);
if (corrupt) {
CorruptKeyType(&k);
}
return k.Encode().ToString();
}
} // namespace test
} // namespace rocksdb