2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-06-19 01:36:48 +02:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/plain/plain_table_key_coding.h"
|
2014-06-19 01:36:48 +02:00
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
#include <algorithm>
|
|
|
|
#include <string>
|
2014-06-19 01:36:48 +02:00
|
|
|
#include "db/dbformat.h"
|
2019-09-16 19:31:27 +02:00
|
|
|
#include "file/writable_file_writer.h"
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/plain/plain_table_factory.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "table/plain/plain_table_reader.h"
|
2014-06-19 01:36:48 +02:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
Avoid naming conflict of EntryType
Summary:
Fix build break on travis build:
$ OPT=-DTRAVIS V=1 make unity && make clean && OPT=-DTRAVIS V=1 make db_test && ./db_test
......
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc: In member function ‘rocksdb::Status rocksdb::PlainTableKeyDecoder::NextPrefixEncodingKey(const char*, const char*, rocksdb::ParsedInternalKey*, rocksdb::Slice*, size_t*, bool*)’:
./table/plain_table_key_coding.cc:224:3: error: reference to ‘EntryType’ is ambiguous
EntryType entry_type;
^
In file included from ./db/table_properties_collector.h:9:0,
from ./db/builder.h:11,
from ./db/builder.cc:10,
from unity.cc:1:
./include/rocksdb/table_properties.h:81:6: note: candidates are: enum rocksdb::EntryType
enum EntryType {
^
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc:16:6: note: enum rocksdb::{anonymous}::EntryType
enum EntryType : unsigned char {
^
./table/plain_table_key_coding.cc:231:51: error: ‘entry_type’ was not declared in this scope
const char* pos = DecodeSize(key_ptr, limit, &entry_type, &size);
^
make: *** [unity.o] Error 1
Test Plan:
OPT=-DTRAVIS V=1 make unity
And make sure it doesn't break anymore.
Reviewers: yhchiang, kradhakrishnan, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D36549
2015-04-06 20:33:01 +02:00
|
|
|
enum PlainTableEntryType : unsigned char {
|
2014-06-19 01:36:48 +02:00
|
|
|
kFullKey = 0,
|
|
|
|
kPrefixFromPreviousKey = 1,
|
|
|
|
kKeySuffix = 2,
|
|
|
|
};
|
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
namespace {
|
|
|
|
|
2014-06-19 01:36:48 +02:00
|
|
|
// Control byte:
|
|
|
|
// First two bits indicate type of entry
|
|
|
|
// Other bytes are inlined sizes. If all bits are 1 (0x03F), overflow bytes
|
|
|
|
// are used. key_size-0x3F will be encoded as a variint32 after this bytes.
|
|
|
|
|
|
|
|
const unsigned char kSizeInlineLimit = 0x3F;
|
|
|
|
|
|
|
|
// Return 0 for error
|
Avoid naming conflict of EntryType
Summary:
Fix build break on travis build:
$ OPT=-DTRAVIS V=1 make unity && make clean && OPT=-DTRAVIS V=1 make db_test && ./db_test
......
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc: In member function ‘rocksdb::Status rocksdb::PlainTableKeyDecoder::NextPrefixEncodingKey(const char*, const char*, rocksdb::ParsedInternalKey*, rocksdb::Slice*, size_t*, bool*)’:
./table/plain_table_key_coding.cc:224:3: error: reference to ‘EntryType’ is ambiguous
EntryType entry_type;
^
In file included from ./db/table_properties_collector.h:9:0,
from ./db/builder.h:11,
from ./db/builder.cc:10,
from unity.cc:1:
./include/rocksdb/table_properties.h:81:6: note: candidates are: enum rocksdb::EntryType
enum EntryType {
^
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc:16:6: note: enum rocksdb::{anonymous}::EntryType
enum EntryType : unsigned char {
^
./table/plain_table_key_coding.cc:231:51: error: ‘entry_type’ was not declared in this scope
const char* pos = DecodeSize(key_ptr, limit, &entry_type, &size);
^
make: *** [unity.o] Error 1
Test Plan:
OPT=-DTRAVIS V=1 make unity
And make sure it doesn't break anymore.
Reviewers: yhchiang, kradhakrishnan, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D36549
2015-04-06 20:33:01 +02:00
|
|
|
size_t EncodeSize(PlainTableEntryType type, uint32_t key_size,
|
|
|
|
char* out_buffer) {
|
2014-06-19 01:36:48 +02:00
|
|
|
out_buffer[0] = type << 6;
|
|
|
|
|
2014-09-08 05:10:17 +02:00
|
|
|
if (key_size < static_cast<uint32_t>(kSizeInlineLimit)) {
|
2014-06-19 01:36:48 +02:00
|
|
|
// size inlined
|
|
|
|
out_buffer[0] |= static_cast<char>(key_size);
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
out_buffer[0] |= kSizeInlineLimit;
|
|
|
|
char* ptr = EncodeVarint32(out_buffer + 1, key_size - kSizeInlineLimit);
|
|
|
|
return ptr - out_buffer;
|
|
|
|
}
|
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
} // namespace
|
2014-06-19 01:36:48 +02:00
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
// Fill bytes_read with number of bytes read.
|
|
|
|
inline Status PlainTableKeyDecoder::DecodeSize(uint32_t start_offset,
|
|
|
|
PlainTableEntryType* entry_type,
|
|
|
|
uint32_t* key_size,
|
|
|
|
uint32_t* bytes_read) {
|
|
|
|
Slice next_byte_slice;
|
|
|
|
bool success = file_reader_.Read(start_offset, 1, &next_byte_slice);
|
|
|
|
if (!success) {
|
|
|
|
return file_reader_.status();
|
|
|
|
}
|
Avoid naming conflict of EntryType
Summary:
Fix build break on travis build:
$ OPT=-DTRAVIS V=1 make unity && make clean && OPT=-DTRAVIS V=1 make db_test && ./db_test
......
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc: In member function ‘rocksdb::Status rocksdb::PlainTableKeyDecoder::NextPrefixEncodingKey(const char*, const char*, rocksdb::ParsedInternalKey*, rocksdb::Slice*, size_t*, bool*)’:
./table/plain_table_key_coding.cc:224:3: error: reference to ‘EntryType’ is ambiguous
EntryType entry_type;
^
In file included from ./db/table_properties_collector.h:9:0,
from ./db/builder.h:11,
from ./db/builder.cc:10,
from unity.cc:1:
./include/rocksdb/table_properties.h:81:6: note: candidates are: enum rocksdb::EntryType
enum EntryType {
^
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc:16:6: note: enum rocksdb::{anonymous}::EntryType
enum EntryType : unsigned char {
^
./table/plain_table_key_coding.cc:231:51: error: ‘entry_type’ was not declared in this scope
const char* pos = DecodeSize(key_ptr, limit, &entry_type, &size);
^
make: *** [unity.o] Error 1
Test Plan:
OPT=-DTRAVIS V=1 make unity
And make sure it doesn't break anymore.
Reviewers: yhchiang, kradhakrishnan, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D36549
2015-04-06 20:33:01 +02:00
|
|
|
*entry_type = static_cast<PlainTableEntryType>(
|
2015-09-17 01:57:43 +02:00
|
|
|
(static_cast<unsigned char>(next_byte_slice[0]) & ~kSizeInlineLimit) >>
|
|
|
|
6);
|
|
|
|
char inline_key_size = next_byte_slice[0] & kSizeInlineLimit;
|
2014-06-19 01:36:48 +02:00
|
|
|
if (inline_key_size < kSizeInlineLimit) {
|
|
|
|
*key_size = inline_key_size;
|
2015-09-17 01:57:43 +02:00
|
|
|
*bytes_read = 1;
|
|
|
|
return Status::OK();
|
2014-06-19 01:36:48 +02:00
|
|
|
} else {
|
|
|
|
uint32_t extra_size;
|
2015-09-17 01:57:43 +02:00
|
|
|
uint32_t tmp_bytes_read;
|
|
|
|
success = file_reader_.ReadVarint32(start_offset + 1, &extra_size,
|
|
|
|
&tmp_bytes_read);
|
|
|
|
if (!success) {
|
|
|
|
return file_reader_.status();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
assert(tmp_bytes_read > 0);
|
2014-06-19 01:36:48 +02:00
|
|
|
*key_size = kSizeInlineLimit + extra_size;
|
2015-09-17 01:57:43 +02:00
|
|
|
*bytes_read = tmp_bytes_read + 1;
|
|
|
|
return Status::OK();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
Status PlainTableKeyEncoder::AppendKey(const Slice& key,
|
|
|
|
WritableFileWriter* file,
|
2014-06-19 01:36:48 +02:00
|
|
|
uint64_t* offset, char* meta_bytes_buf,
|
|
|
|
size_t* meta_bytes_buf_size) {
|
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
if (!ParseInternalKey(key, &parsed_key)) {
|
|
|
|
return Status::Corruption(Slice());
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice key_to_write = key; // Portion of internal key to write out.
|
|
|
|
|
2015-02-23 23:10:09 +01:00
|
|
|
uint32_t user_key_size = static_cast<uint32_t>(key.size() - 8);
|
2014-06-19 01:36:48 +02:00
|
|
|
if (encoding_type_ == kPlain) {
|
|
|
|
if (fixed_user_key_len_ == kPlainTableVariableLength) {
|
|
|
|
// Write key length
|
|
|
|
char key_size_buf[5]; // tmp buffer for key size as varint32
|
|
|
|
char* ptr = EncodeVarint32(key_size_buf, user_key_size);
|
|
|
|
assert(ptr <= key_size_buf + sizeof(key_size_buf));
|
|
|
|
auto len = ptr - key_size_buf;
|
|
|
|
Status s = file->Append(Slice(key_size_buf, len));
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
*offset += len;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(encoding_type_ == kPrefix);
|
|
|
|
char size_bytes[12];
|
|
|
|
size_t size_bytes_pos = 0;
|
|
|
|
|
|
|
|
Slice prefix =
|
|
|
|
prefix_extractor_->Transform(Slice(key.data(), user_key_size));
|
2017-04-04 23:17:16 +02:00
|
|
|
if (key_count_for_prefix_ == 0 || prefix != pre_prefix_.GetUserKey() ||
|
2014-09-06 17:21:26 +02:00
|
|
|
key_count_for_prefix_ % index_sparseness_ == 0) {
|
|
|
|
key_count_for_prefix_ = 1;
|
2017-04-04 23:17:16 +02:00
|
|
|
pre_prefix_.SetUserKey(prefix);
|
2014-06-19 01:36:48 +02:00
|
|
|
size_bytes_pos += EncodeSize(kFullKey, user_key_size, size_bytes);
|
|
|
|
Status s = file->Append(Slice(size_bytes, size_bytes_pos));
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
*offset += size_bytes_pos;
|
|
|
|
} else {
|
2014-09-06 17:21:26 +02:00
|
|
|
key_count_for_prefix_++;
|
|
|
|
if (key_count_for_prefix_ == 2) {
|
2014-06-19 01:36:48 +02:00
|
|
|
// For second key within a prefix, need to encode prefix length
|
|
|
|
size_bytes_pos +=
|
2014-11-11 22:47:22 +01:00
|
|
|
EncodeSize(kPrefixFromPreviousKey,
|
2017-04-04 23:17:16 +02:00
|
|
|
static_cast<uint32_t>(pre_prefix_.GetUserKey().size()),
|
2014-06-19 01:36:48 +02:00
|
|
|
size_bytes + size_bytes_pos);
|
|
|
|
}
|
2017-04-04 23:17:16 +02:00
|
|
|
uint32_t prefix_len =
|
|
|
|
static_cast<uint32_t>(pre_prefix_.GetUserKey().size());
|
2014-06-19 01:36:48 +02:00
|
|
|
size_bytes_pos += EncodeSize(kKeySuffix, user_key_size - prefix_len,
|
|
|
|
size_bytes + size_bytes_pos);
|
|
|
|
Status s = file->Append(Slice(size_bytes, size_bytes_pos));
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
*offset += size_bytes_pos;
|
|
|
|
key_to_write = Slice(key.data() + prefix_len, key.size() - prefix_len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode full key
|
|
|
|
// For value size as varint32 (up to 5 bytes).
|
|
|
|
// If the row is of value type with seqId 0, flush the special flag together
|
|
|
|
// in this buffer to safe one file append call, which takes 1 byte.
|
|
|
|
if (parsed_key.sequence == 0 && parsed_key.type == kTypeValue) {
|
|
|
|
Status s =
|
|
|
|
file->Append(Slice(key_to_write.data(), key_to_write.size() - 8));
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
*offset += key_to_write.size() - 8;
|
|
|
|
meta_bytes_buf[*meta_bytes_buf_size] = PlainTableFactory::kValueTypeSeqId0;
|
|
|
|
*meta_bytes_buf_size += 1;
|
|
|
|
} else {
|
|
|
|
file->Append(key_to_write);
|
|
|
|
*offset += key_to_write.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
Slice PlainTableFileReader::GetFromBuffer(Buffer* buffer, uint32_t file_offset,
|
|
|
|
uint32_t len) {
|
|
|
|
assert(file_offset + len <= file_info_->data_end_offset);
|
|
|
|
return Slice(buffer->buf.get() + (file_offset - buffer->buf_start_offset),
|
|
|
|
len);
|
2015-09-17 01:57:43 +02:00
|
|
|
}
|
|
|
|
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
bool PlainTableFileReader::ReadNonMmap(uint32_t file_offset, uint32_t len,
|
|
|
|
Slice* out) {
|
2015-09-17 01:57:43 +02:00
|
|
|
const uint32_t kPrefetchSize = 256u;
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
|
|
|
|
// Try to read from buffers.
|
|
|
|
for (uint32_t i = 0; i < num_buf_; i++) {
|
|
|
|
Buffer* buffer = buffers_[num_buf_ - 1 - i].get();
|
|
|
|
if (file_offset >= buffer->buf_start_offset &&
|
|
|
|
file_offset + len <= buffer->buf_start_offset + buffer->buf_len) {
|
|
|
|
*out = GetFromBuffer(buffer, file_offset, len);
|
|
|
|
return true;
|
2015-09-17 01:57:43 +02:00
|
|
|
}
|
|
|
|
}
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
|
|
|
|
Buffer* new_buffer;
|
|
|
|
// Data needed is not in any of the buffer. Allocate a new buffer.
|
|
|
|
if (num_buf_ < buffers_.size()) {
|
|
|
|
// Add a new buffer
|
|
|
|
new_buffer = new Buffer();
|
|
|
|
buffers_[num_buf_++].reset(new_buffer);
|
|
|
|
} else {
|
|
|
|
// Now simply replace the last buffer. Can improve the placement policy
|
|
|
|
// if needed.
|
|
|
|
new_buffer = buffers_[num_buf_ - 1].get();
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(file_offset + len <= file_info_->data_end_offset);
|
|
|
|
uint32_t size_to_read = std::min(file_info_->data_end_offset - file_offset,
|
|
|
|
std::max(kPrefetchSize, len));
|
|
|
|
if (size_to_read > new_buffer->buf_capacity) {
|
|
|
|
new_buffer->buf.reset(new char[size_to_read]);
|
|
|
|
new_buffer->buf_capacity = size_to_read;
|
|
|
|
new_buffer->buf_len = 0;
|
|
|
|
}
|
|
|
|
Slice read_result;
|
|
|
|
Status s = file_info_->file->Read(file_offset, size_to_read, &read_result,
|
|
|
|
new_buffer->buf.get());
|
|
|
|
if (!s.ok()) {
|
|
|
|
status_ = s;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
new_buffer->buf_start_offset = file_offset;
|
|
|
|
new_buffer->buf_len = size_to_read;
|
|
|
|
*out = GetFromBuffer(new_buffer, file_offset, len);
|
2015-09-17 01:57:43 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
inline bool PlainTableFileReader::ReadVarint32(uint32_t offset, uint32_t* out,
|
|
|
|
uint32_t* bytes_read) {
|
2015-09-17 01:57:43 +02:00
|
|
|
if (file_info_->is_mmap_mode) {
|
|
|
|
const char* start = file_info_->file_data.data() + offset;
|
|
|
|
const char* limit =
|
|
|
|
file_info_->file_data.data() + file_info_->data_end_offset;
|
|
|
|
const char* key_ptr = GetVarint32Ptr(start, limit, out);
|
|
|
|
assert(key_ptr != nullptr);
|
|
|
|
*bytes_read = static_cast<uint32_t>(key_ptr - start);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return ReadVarint32NonMmap(offset, out, bytes_read);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
bool PlainTableFileReader::ReadVarint32NonMmap(uint32_t offset, uint32_t* out,
|
|
|
|
uint32_t* bytes_read) {
|
2015-09-17 01:57:43 +02:00
|
|
|
const char* start;
|
|
|
|
const char* limit;
|
|
|
|
const uint32_t kMaxVarInt32Size = 6u;
|
|
|
|
uint32_t bytes_to_read =
|
|
|
|
std::min(file_info_->data_end_offset - offset, kMaxVarInt32Size);
|
|
|
|
Slice bytes;
|
|
|
|
if (!Read(offset, bytes_to_read, &bytes)) {
|
|
|
|
return false;
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
start = bytes.data();
|
|
|
|
limit = bytes.data() + bytes.size();
|
|
|
|
|
|
|
|
const char* key_ptr = GetVarint32Ptr(start, limit, out);
|
|
|
|
*bytes_read =
|
|
|
|
(key_ptr != nullptr) ? static_cast<uint32_t>(key_ptr - start) : 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PlainTableKeyDecoder::ReadInternalKey(
|
|
|
|
uint32_t file_offset, uint32_t user_key_size, ParsedInternalKey* parsed_key,
|
|
|
|
uint32_t* bytes_read, bool* internal_key_valid, Slice* internal_key) {
|
|
|
|
Slice tmp_slice;
|
|
|
|
bool success = file_reader_.Read(file_offset, user_key_size + 1, &tmp_slice);
|
|
|
|
if (!success) {
|
|
|
|
return file_reader_.status();
|
|
|
|
}
|
|
|
|
if (tmp_slice[user_key_size] == PlainTableFactory::kValueTypeSeqId0) {
|
2014-06-19 01:36:48 +02:00
|
|
|
// Special encoding for the row with seqID=0
|
2015-09-17 01:57:43 +02:00
|
|
|
parsed_key->user_key = Slice(tmp_slice.data(), user_key_size);
|
2014-06-19 01:36:48 +02:00
|
|
|
parsed_key->sequence = 0;
|
|
|
|
parsed_key->type = kTypeValue;
|
|
|
|
*bytes_read += user_key_size + 1;
|
|
|
|
*internal_key_valid = false;
|
|
|
|
} else {
|
2015-09-17 01:57:43 +02:00
|
|
|
success = file_reader_.Read(file_offset, user_key_size + 8, internal_key);
|
|
|
|
if (!success) {
|
|
|
|
return file_reader_.status();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
*internal_key_valid = true;
|
|
|
|
if (!ParseInternalKey(*internal_key, parsed_key)) {
|
|
|
|
return Status::Corruption(
|
|
|
|
Slice("Incorrect value type found when reading the next key"));
|
|
|
|
}
|
|
|
|
*bytes_read += user_key_size + 8;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
Status PlainTableKeyDecoder::NextPlainEncodingKey(uint32_t start_offset,
|
|
|
|
ParsedInternalKey* parsed_key,
|
|
|
|
Slice* internal_key,
|
|
|
|
uint32_t* bytes_read,
|
2018-03-05 22:08:17 +01:00
|
|
|
bool* /*seekable*/) {
|
2014-11-11 22:47:22 +01:00
|
|
|
uint32_t user_key_size = 0;
|
2015-09-17 01:57:43 +02:00
|
|
|
Status s;
|
2014-06-19 01:36:48 +02:00
|
|
|
if (fixed_user_key_len_ != kPlainTableVariableLength) {
|
|
|
|
user_key_size = fixed_user_key_len_;
|
|
|
|
} else {
|
|
|
|
uint32_t tmp_size = 0;
|
2015-09-17 01:57:43 +02:00
|
|
|
uint32_t tmp_read;
|
|
|
|
bool success =
|
|
|
|
file_reader_.ReadVarint32(start_offset, &tmp_size, &tmp_read);
|
|
|
|
if (!success) {
|
|
|
|
return file_reader_.status();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
assert(tmp_read > 0);
|
2014-11-11 22:47:22 +01:00
|
|
|
user_key_size = tmp_size;
|
2015-09-17 01:57:43 +02:00
|
|
|
*bytes_read = tmp_read;
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
2014-07-04 08:13:08 +02:00
|
|
|
// dummy initial value to avoid compiler complain
|
|
|
|
bool decoded_internal_key_valid = true;
|
2014-06-19 01:36:48 +02:00
|
|
|
Slice decoded_internal_key;
|
2015-09-17 01:57:43 +02:00
|
|
|
s = ReadInternalKey(start_offset + *bytes_read, user_key_size, parsed_key,
|
|
|
|
bytes_read, &decoded_internal_key_valid,
|
|
|
|
&decoded_internal_key);
|
2014-06-19 01:36:48 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
if (!file_reader_.file_info()->is_mmap_mode) {
|
2015-09-17 01:57:43 +02:00
|
|
|
cur_key_.SetInternalKey(*parsed_key);
|
2017-04-04 23:17:16 +02:00
|
|
|
parsed_key->user_key =
|
|
|
|
Slice(cur_key_.GetInternalKey().data(), user_key_size);
|
2015-09-17 01:57:43 +02:00
|
|
|
if (internal_key != nullptr) {
|
2017-04-04 23:17:16 +02:00
|
|
|
*internal_key = cur_key_.GetInternalKey();
|
2015-09-17 01:57:43 +02:00
|
|
|
}
|
|
|
|
} else if (internal_key != nullptr) {
|
2014-06-19 01:36:48 +02:00
|
|
|
if (decoded_internal_key_valid) {
|
|
|
|
*internal_key = decoded_internal_key;
|
|
|
|
} else {
|
|
|
|
// Need to copy out the internal key
|
|
|
|
cur_key_.SetInternalKey(*parsed_key);
|
2017-04-04 23:17:16 +02:00
|
|
|
*internal_key = cur_key_.GetInternalKey();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PlainTableKeyDecoder::NextPrefixEncodingKey(
|
2015-09-17 01:57:43 +02:00
|
|
|
uint32_t start_offset, ParsedInternalKey* parsed_key, Slice* internal_key,
|
|
|
|
uint32_t* bytes_read, bool* seekable) {
|
Avoid naming conflict of EntryType
Summary:
Fix build break on travis build:
$ OPT=-DTRAVIS V=1 make unity && make clean && OPT=-DTRAVIS V=1 make db_test && ./db_test
......
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc: In member function ‘rocksdb::Status rocksdb::PlainTableKeyDecoder::NextPrefixEncodingKey(const char*, const char*, rocksdb::ParsedInternalKey*, rocksdb::Slice*, size_t*, bool*)’:
./table/plain_table_key_coding.cc:224:3: error: reference to ‘EntryType’ is ambiguous
EntryType entry_type;
^
In file included from ./db/table_properties_collector.h:9:0,
from ./db/builder.h:11,
from ./db/builder.cc:10,
from unity.cc:1:
./include/rocksdb/table_properties.h:81:6: note: candidates are: enum rocksdb::EntryType
enum EntryType {
^
In file included from unity.cc:65:0:
./table/plain_table_key_coding.cc:16:6: note: enum rocksdb::{anonymous}::EntryType
enum EntryType : unsigned char {
^
./table/plain_table_key_coding.cc:231:51: error: ‘entry_type’ was not declared in this scope
const char* pos = DecodeSize(key_ptr, limit, &entry_type, &size);
^
make: *** [unity.o] Error 1
Test Plan:
OPT=-DTRAVIS V=1 make unity
And make sure it doesn't break anymore.
Reviewers: yhchiang, kradhakrishnan, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D36549
2015-04-06 20:33:01 +02:00
|
|
|
PlainTableEntryType entry_type;
|
2014-06-19 01:36:48 +02:00
|
|
|
|
|
|
|
bool expect_suffix = false;
|
2015-09-17 01:57:43 +02:00
|
|
|
Status s;
|
2014-06-19 01:36:48 +02:00
|
|
|
do {
|
2014-11-11 22:47:22 +01:00
|
|
|
uint32_t size = 0;
|
2014-07-04 08:13:08 +02:00
|
|
|
// dummy initial value to avoid compiler complain
|
|
|
|
bool decoded_internal_key_valid = true;
|
2015-09-17 01:57:43 +02:00
|
|
|
uint32_t my_bytes_read = 0;
|
|
|
|
s = DecodeSize(start_offset + *bytes_read, &entry_type, &size,
|
|
|
|
&my_bytes_read);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (my_bytes_read == 0) {
|
2014-06-19 01:36:48 +02:00
|
|
|
return Status::Corruption("Unexpected EOF when reading size of the key");
|
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
*bytes_read += my_bytes_read;
|
2014-06-19 01:36:48 +02:00
|
|
|
|
|
|
|
switch (entry_type) {
|
|
|
|
case kFullKey: {
|
|
|
|
expect_suffix = false;
|
|
|
|
Slice decoded_internal_key;
|
2015-09-17 01:57:43 +02:00
|
|
|
s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key,
|
|
|
|
bytes_read, &decoded_internal_key_valid,
|
|
|
|
&decoded_internal_key);
|
2014-06-19 01:36:48 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
if (!file_reader_.file_info()->is_mmap_mode ||
|
2015-09-17 01:57:43 +02:00
|
|
|
(internal_key != nullptr && !decoded_internal_key_valid)) {
|
|
|
|
// In non-mmap mode, always need to make a copy of keys returned to
|
|
|
|
// users, because after reading value for the key, the key might
|
|
|
|
// be invalid.
|
|
|
|
cur_key_.SetInternalKey(*parsed_key);
|
2017-04-04 23:17:16 +02:00
|
|
|
saved_user_key_ = cur_key_.GetUserKey();
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
if (!file_reader_.file_info()->is_mmap_mode) {
|
2017-04-04 23:17:16 +02:00
|
|
|
parsed_key->user_key =
|
|
|
|
Slice(cur_key_.GetInternalKey().data(), size);
|
2015-09-17 01:57:43 +02:00
|
|
|
}
|
|
|
|
if (internal_key != nullptr) {
|
2017-04-04 23:17:16 +02:00
|
|
|
*internal_key = cur_key_.GetInternalKey();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
} else {
|
|
|
|
if (internal_key != nullptr) {
|
|
|
|
*internal_key = decoded_internal_key;
|
|
|
|
}
|
|
|
|
saved_user_key_ = parsed_key->user_key;
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kPrefixFromPreviousKey: {
|
|
|
|
if (seekable != nullptr) {
|
|
|
|
*seekable = false;
|
|
|
|
}
|
|
|
|
prefix_len_ = size;
|
|
|
|
assert(prefix_extractor_ == nullptr ||
|
|
|
|
prefix_extractor_->Transform(saved_user_key_).size() ==
|
|
|
|
prefix_len_);
|
|
|
|
// Need read another size flag for suffix
|
|
|
|
expect_suffix = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kKeySuffix: {
|
|
|
|
expect_suffix = false;
|
|
|
|
if (seekable != nullptr) {
|
|
|
|
*seekable = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice tmp_slice;
|
2015-09-17 01:57:43 +02:00
|
|
|
s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key,
|
|
|
|
bytes_read, &decoded_internal_key_valid,
|
|
|
|
&tmp_slice);
|
2014-06-19 01:36:48 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
plain table reader: non-mmap mode to keep two recent buffers
Summary: In plain table reader's non-mmap mode, we only keep the most recent read buffer. However, for binary search, it is likely we come back to a location to read. To avoid one pread in such a case, we keep two read buffers. It should cover most of the cases.
Test Plan:
1. run tests
2. check the optimization works through strace when running
./table_reader_bench -mmap_read=false --num_keys2=1 -num_keys1=5000 -table_factory=plain_table --iterator --through_db
Reviewers: anthony, rven, kradhakrishnan, igor, yhchiang, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D51171
2015-12-24 02:30:10 +01:00
|
|
|
if (!file_reader_.file_info()->is_mmap_mode) {
|
2015-09-17 01:57:43 +02:00
|
|
|
// In non-mmap mode, we need to make a copy of keys returned to
|
|
|
|
// users, because after reading value for the key, the key might
|
|
|
|
// be invalid.
|
|
|
|
// saved_user_key_ points to cur_key_. We are making a copy of
|
|
|
|
// the prefix part to another string, and construct the current
|
|
|
|
// key from the prefix part and the suffix part back to cur_key_.
|
|
|
|
std::string tmp =
|
|
|
|
Slice(saved_user_key_.data(), prefix_len_).ToString();
|
|
|
|
cur_key_.Reserve(prefix_len_ + size);
|
|
|
|
cur_key_.SetInternalKey(tmp, *parsed_key);
|
|
|
|
parsed_key->user_key =
|
2017-04-04 23:17:16 +02:00
|
|
|
Slice(cur_key_.GetInternalKey().data(), prefix_len_ + size);
|
|
|
|
saved_user_key_ = cur_key_.GetUserKey();
|
2015-09-17 01:57:43 +02:00
|
|
|
} else {
|
|
|
|
cur_key_.Reserve(prefix_len_ + size);
|
|
|
|
cur_key_.SetInternalKey(Slice(saved_user_key_.data(), prefix_len_),
|
|
|
|
*parsed_key);
|
|
|
|
}
|
2017-04-04 23:17:16 +02:00
|
|
|
parsed_key->user_key = cur_key_.GetUserKey();
|
2014-06-19 01:36:48 +02:00
|
|
|
if (internal_key != nullptr) {
|
2017-04-04 23:17:16 +02:00
|
|
|
*internal_key = cur_key_.GetInternalKey();
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2015-09-17 01:57:43 +02:00
|
|
|
return Status::Corruption("Un-identified size flag.");
|
2014-06-19 01:36:48 +02:00
|
|
|
}
|
|
|
|
} while (expect_suffix); // Another round if suffix is expected.
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2015-09-17 01:57:43 +02:00
|
|
|
Status PlainTableKeyDecoder::NextKey(uint32_t start_offset,
|
2014-06-19 01:36:48 +02:00
|
|
|
ParsedInternalKey* parsed_key,
|
2015-09-17 01:57:43 +02:00
|
|
|
Slice* internal_key, Slice* value,
|
|
|
|
uint32_t* bytes_read, bool* seekable) {
|
|
|
|
assert(value != nullptr);
|
|
|
|
Status s = NextKeyNoValue(start_offset, parsed_key, internal_key, bytes_read,
|
|
|
|
seekable);
|
|
|
|
if (s.ok()) {
|
|
|
|
assert(bytes_read != nullptr);
|
|
|
|
uint32_t value_size;
|
|
|
|
uint32_t value_size_bytes;
|
|
|
|
bool success = file_reader_.ReadVarint32(start_offset + *bytes_read,
|
|
|
|
&value_size, &value_size_bytes);
|
|
|
|
if (!success) {
|
|
|
|
return file_reader_.status();
|
|
|
|
}
|
|
|
|
if (value_size_bytes == 0) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Unexpected EOF when reading the next value's size.");
|
|
|
|
}
|
|
|
|
*bytes_read += value_size_bytes;
|
|
|
|
success = file_reader_.Read(start_offset + *bytes_read, value_size, value);
|
|
|
|
if (!success) {
|
|
|
|
return file_reader_.status();
|
|
|
|
}
|
|
|
|
*bytes_read += value_size;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PlainTableKeyDecoder::NextKeyNoValue(uint32_t start_offset,
|
|
|
|
ParsedInternalKey* parsed_key,
|
|
|
|
Slice* internal_key,
|
|
|
|
uint32_t* bytes_read,
|
|
|
|
bool* seekable) {
|
2014-06-19 01:36:48 +02:00
|
|
|
*bytes_read = 0;
|
|
|
|
if (seekable != nullptr) {
|
|
|
|
*seekable = true;
|
|
|
|
}
|
2015-09-17 01:57:43 +02:00
|
|
|
Status s;
|
2014-06-19 01:36:48 +02:00
|
|
|
if (encoding_type_ == kPlain) {
|
2015-09-17 01:57:43 +02:00
|
|
|
return NextPlainEncodingKey(start_offset, parsed_key, internal_key,
|
2014-06-19 01:36:48 +02:00
|
|
|
bytes_read, seekable);
|
|
|
|
} else {
|
|
|
|
assert(encoding_type_ == kPrefix);
|
2015-09-17 01:57:43 +02:00
|
|
|
return NextPrefixEncodingKey(start_offset, parsed_key, internal_key,
|
2014-06-19 01:36:48 +02:00
|
|
|
bytes_read, seekable);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
2015-09-17 01:57:43 +02:00
|
|
|
#endif // ROCKSDB_LIT
|