d93812c9ae
Summary: Preliminary support for iterator with user timestamp. Current implementation does not consider merge operator and reverse iterator. Auto compaction is also disabled in unit tests. Create an iterator with timestamp. ``` ... read_opts.timestamp = &ts; auto* iter = db->NewIterator(read_opts); // target is key without timestamp. for (iter->Seek(target); iter->Valid(); iter->Next()) {} for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {} delete iter; read_opts.timestamp = &ts1; // lower_bound and upper_bound are without timestamp. read_opts.iterate_lower_bound = &lower_bound; read_opts.iterate_upper_bound = &upper_bound; auto* iter1 = db->NewIterator(read_opts); // Do Seek or SeekToFirst() delete iter1; ``` Test plan (dev server) ``` $make check ``` Simple benchmarking (dev server) 1. The overhead introduced by this PR even when timestamp is disabled. key size: 16 bytes value size: 100 bytes Entries: 1000000 Data reside in main memory, and try to stress iterator. Repeated three times on master and this PR. - Seek without next ``` ./db_bench -db=/dev/shm/rocksdbtest-1000 -benchmarks=fillseq,seekrandom -enable_pipelined_write=false -disable_wal=true -format_version=3 ``` master: 159047.0 ops/sec this PR: 158922.3 ops/sec (2% drop in throughput) - Seek and next 10 times ``` ./db_bench -db=/dev/shm/rocksdbtest-1000 -benchmarks=fillseq,seekrandom -enable_pipelined_write=false -disable_wal=true -format_version=3 -seek_nexts=10 ``` master: 109539.3 ops/sec this PR: 107519.7 ops/sec (2% drop in throughput) Pull Request resolved: https://github.com/facebook/rocksdb/pull/6255 Differential Revision: D19438227 Pulled By: riversand963 fbshipit-source-id: b66b4979486f8474619f4aa6bdd88598870b0746
207 lines
6.8 KiB
C++
207 lines
6.8 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
#include "db/dbformat.h"
|
|
|
|
#include <stdio.h>
|
|
#include <cinttypes>
|
|
#include "monitoring/perf_context_imp.h"
|
|
#include "port/port.h"
|
|
#include "util/coding.h"
|
|
#include "util/string_util.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// kValueTypeForSeek defines the ValueType that should be passed when
|
|
// constructing a ParsedInternalKey object for seeking to a particular
|
|
// sequence number (since we sort sequence numbers in decreasing order
|
|
// and the value type is embedded as the low 8 bits in the sequence
|
|
// number in internal keys, we need to use the highest-numbered
|
|
// ValueType, not the lowest).
|
|
const ValueType kValueTypeForSeek = kTypeBlobIndex;
|
|
const ValueType kValueTypeForSeekForPrev = kTypeDeletion;
|
|
|
|
uint64_t PackSequenceAndType(uint64_t seq, ValueType t) {
|
|
assert(seq <= kMaxSequenceNumber);
|
|
assert(IsExtendedValueType(t));
|
|
return (seq << 8) | t;
|
|
}
|
|
|
|
EntryType GetEntryType(ValueType value_type) {
|
|
switch (value_type) {
|
|
case kTypeValue:
|
|
return kEntryPut;
|
|
case kTypeDeletion:
|
|
return kEntryDelete;
|
|
case kTypeSingleDeletion:
|
|
return kEntrySingleDelete;
|
|
case kTypeMerge:
|
|
return kEntryMerge;
|
|
case kTypeRangeDeletion:
|
|
return kEntryRangeDeletion;
|
|
case kTypeBlobIndex:
|
|
return kEntryBlobIndex;
|
|
default:
|
|
return kEntryOther;
|
|
}
|
|
}
|
|
|
|
bool ParseFullKey(const Slice& internal_key, FullKey* fkey) {
|
|
ParsedInternalKey ikey;
|
|
if (!ParseInternalKey(internal_key, &ikey)) {
|
|
return false;
|
|
}
|
|
fkey->user_key = ikey.user_key;
|
|
fkey->sequence = ikey.sequence;
|
|
fkey->type = GetEntryType(ikey.type);
|
|
return true;
|
|
}
|
|
|
|
void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* t) {
|
|
*seq = packed >> 8;
|
|
*t = static_cast<ValueType>(packed & 0xff);
|
|
|
|
assert(*seq <= kMaxSequenceNumber);
|
|
assert(IsExtendedValueType(*t));
|
|
}
|
|
|
|
void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
|
|
result->append(key.user_key.data(), key.user_key.size());
|
|
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
|
|
}
|
|
|
|
void AppendInternalKeyWithDifferentTimestamp(std::string* result,
|
|
const ParsedInternalKey& key,
|
|
const Slice& ts) {
|
|
assert(key.user_key.size() >= ts.size());
|
|
result->append(key.user_key.data(), key.user_key.size() - ts.size());
|
|
result->append(ts.data(), ts.size());
|
|
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
|
|
}
|
|
|
|
void AppendInternalKeyFooter(std::string* result, SequenceNumber s,
|
|
ValueType t) {
|
|
PutFixed64(result, PackSequenceAndType(s, t));
|
|
}
|
|
|
|
std::string ParsedInternalKey::DebugString(bool hex) const {
|
|
char buf[50];
|
|
snprintf(buf, sizeof(buf), "' seq:%" PRIu64 ", type:%d", sequence,
|
|
static_cast<int>(type));
|
|
std::string result = "'";
|
|
result += user_key.ToString(hex);
|
|
result += buf;
|
|
return result;
|
|
}
|
|
|
|
std::string InternalKey::DebugString(bool hex) const {
|
|
std::string result;
|
|
ParsedInternalKey parsed;
|
|
if (ParseInternalKey(rep_, &parsed)) {
|
|
result = parsed.DebugString(hex);
|
|
} else {
|
|
result = "(bad)";
|
|
result.append(EscapeString(rep_));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
const char* InternalKeyComparator::Name() const { return name_.c_str(); }
|
|
|
|
int InternalKeyComparator::Compare(const ParsedInternalKey& a,
|
|
const ParsedInternalKey& b) const {
|
|
// Order by:
|
|
// increasing user key (according to user-supplied comparator)
|
|
// decreasing sequence number
|
|
// decreasing type (though sequence# should be enough to disambiguate)
|
|
int r = user_comparator_.Compare(a.user_key, b.user_key);
|
|
if (r == 0) {
|
|
if (a.sequence > b.sequence) {
|
|
r = -1;
|
|
} else if (a.sequence < b.sequence) {
|
|
r = +1;
|
|
} else if (a.type > b.type) {
|
|
r = -1;
|
|
} else if (a.type < b.type) {
|
|
r = +1;
|
|
}
|
|
}
|
|
return r;
|
|
}
|
|
|
|
void InternalKeyComparator::FindShortestSeparator(std::string* start,
|
|
const Slice& limit) const {
|
|
// Attempt to shorten the user portion of the key
|
|
Slice user_start = ExtractUserKey(*start);
|
|
Slice user_limit = ExtractUserKey(limit);
|
|
std::string tmp(user_start.data(), user_start.size());
|
|
user_comparator_.FindShortestSeparator(&tmp, user_limit);
|
|
if (tmp.size() <= user_start.size() &&
|
|
user_comparator_.Compare(user_start, tmp) < 0) {
|
|
// User key has become shorter physically, but larger logically.
|
|
// Tack on the earliest possible number to the shortened user key.
|
|
PutFixed64(&tmp,
|
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
|
assert(this->Compare(*start, tmp) < 0);
|
|
assert(this->Compare(tmp, limit) < 0);
|
|
start->swap(tmp);
|
|
}
|
|
}
|
|
|
|
void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
|
|
Slice user_key = ExtractUserKey(*key);
|
|
std::string tmp(user_key.data(), user_key.size());
|
|
user_comparator_.FindShortSuccessor(&tmp);
|
|
if (tmp.size() <= user_key.size() &&
|
|
user_comparator_.Compare(user_key, tmp) < 0) {
|
|
// User key has become shorter physically, but larger logically.
|
|
// Tack on the earliest possible number to the shortened user key.
|
|
PutFixed64(&tmp,
|
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
|
assert(this->Compare(*key, tmp) < 0);
|
|
key->swap(tmp);
|
|
}
|
|
}
|
|
|
|
LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s,
|
|
const Slice* ts) {
|
|
size_t usize = _user_key.size();
|
|
size_t ts_sz = (nullptr == ts) ? 0 : ts->size();
|
|
size_t needed = usize + ts_sz + 13; // A conservative estimate
|
|
char* dst;
|
|
if (needed <= sizeof(space_)) {
|
|
dst = space_;
|
|
} else {
|
|
dst = new char[needed];
|
|
}
|
|
start_ = dst;
|
|
// NOTE: We don't support users keys of more than 2GB :)
|
|
dst = EncodeVarint32(dst, static_cast<uint32_t>(usize + ts_sz + 8));
|
|
kstart_ = dst;
|
|
memcpy(dst, _user_key.data(), usize);
|
|
dst += usize;
|
|
if (nullptr != ts) {
|
|
memcpy(dst, ts->data(), ts_sz);
|
|
dst += ts_sz;
|
|
}
|
|
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
|
|
dst += 8;
|
|
end_ = dst;
|
|
}
|
|
|
|
void IterKey::EnlargeBuffer(size_t key_size) {
|
|
// If size is smaller than buffer size, continue using current buffer,
|
|
// or the static allocated one, as default
|
|
assert(key_size > buf_size_);
|
|
// Need to enlarge the buffer.
|
|
ResetBuffer();
|
|
buf_ = new char[key_size];
|
|
buf_size_ = key_size;
|
|
}
|
|
} // namespace ROCKSDB_NAMESPACE
|