caf0f53a74
Summary: Given that index value is a BlockHandle, which is basically an <offset, size> pair we can apply delta encoding on the values. The first value at each index restart interval encoded the full BlockHandle but the rest encode only the size. Refer to IndexBlockIter::DecodeCurrentValue for the detail of the encoding. This reduces the index size which helps using the block cache more efficiently. The feature is enabled with using format_version 4. The feature comes with a bit of cpu overhead which should be paid back by the higher cache hits due to smaller index block size. Results with sysbench read-only using 4k blocks and using 16 index restart interval: Format 2: 19585 rocksdb read-only range=100 Format 3: 19569 rocksdb read-only range=100 Format 4: 19352 rocksdb read-only range=100 Pull Request resolved: https://github.com/facebook/rocksdb/pull/3983 Differential Revision: D8361343 Pulled By: maysamyabandeh fbshipit-source-id: f882ee082322acac32b0072e2bdbb0b5f854e651
162 lines
5.8 KiB
C++
162 lines
5.8 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
//
|
|
// BlockBuilder generates blocks where keys are prefix-compressed:
|
|
//
|
|
// When we store a key, we drop the prefix shared with the previous
|
|
// string. This helps reduce the space requirement significantly.
|
|
// Furthermore, once every K keys, we do not apply the prefix
|
|
// compression and store the entire key. We call this a "restart
|
|
// point". The tail end of the block stores the offsets of all of the
|
|
// restart points, and can be used to do a binary search when looking
|
|
// for a particular key. Values are stored as-is (without compression)
|
|
// immediately following the corresponding key.
|
|
//
|
|
// An entry for a particular key-value pair has the form:
|
|
// shared_bytes: varint32
|
|
// unshared_bytes: varint32
|
|
// value_length: varint32
|
|
// key_delta: char[unshared_bytes]
|
|
// value: char[value_length]
|
|
// shared_bytes == 0 for restart points.
|
|
//
|
|
// The trailer of the block has the form:
|
|
// restarts: uint32[num_restarts]
|
|
// num_restarts: uint32
|
|
// restarts[i] contains the offset within the block of the ith restart point.
|
|
|
|
#include "table/block_builder.h"
|
|
|
|
#include <algorithm>
|
|
#include <assert.h>
|
|
#include "rocksdb/comparator.h"
|
|
#include "db/dbformat.h"
|
|
#include "util/coding.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
BlockBuilder::BlockBuilder(int block_restart_interval, bool use_delta_encoding,
|
|
bool use_value_delta_encoding)
|
|
: block_restart_interval_(block_restart_interval),
|
|
use_delta_encoding_(use_delta_encoding),
|
|
use_value_delta_encoding_(use_value_delta_encoding),
|
|
restarts_(),
|
|
counter_(0),
|
|
finished_(false) {
|
|
assert(block_restart_interval_ >= 1);
|
|
restarts_.push_back(0); // First restart point is at offset 0
|
|
estimate_ = sizeof(uint32_t) + sizeof(uint32_t);
|
|
}
|
|
|
|
void BlockBuilder::Reset() {
|
|
buffer_.clear();
|
|
restarts_.clear();
|
|
restarts_.push_back(0); // First restart point is at offset 0
|
|
estimate_ = sizeof(uint32_t) + sizeof(uint32_t);
|
|
counter_ = 0;
|
|
finished_ = false;
|
|
last_key_.clear();
|
|
}
|
|
|
|
size_t BlockBuilder::EstimateSizeAfterKV(const Slice& key, const Slice& value)
|
|
const {
|
|
size_t estimate = CurrentSizeEstimate();
|
|
// Note: this is an imprecise estimate as it accounts for the whole key size
|
|
// instead of non-shared key size.
|
|
estimate += key.size();
|
|
// In value delta encoding we estimate the value delta size as half the full
|
|
// value size since only the size field of block handle is encoded.
|
|
estimate +=
|
|
!use_value_delta_encoding_ || (counter_ >= block_restart_interval_)
|
|
? value.size()
|
|
: value.size() / 2;
|
|
|
|
if (counter_ >= block_restart_interval_) {
|
|
estimate += sizeof(uint32_t); // a new restart entry.
|
|
}
|
|
|
|
estimate += sizeof(int32_t); // varint for shared prefix length.
|
|
// Note: this is an imprecise estimate as we will have to encoded size, one
|
|
// for shared key and one for non-shared key.
|
|
estimate += VarintLength(key.size()); // varint for key length.
|
|
if (!use_value_delta_encoding_ || (counter_ >= block_restart_interval_)) {
|
|
estimate += VarintLength(value.size()); // varint for value length.
|
|
}
|
|
|
|
return estimate;
|
|
}
|
|
|
|
Slice BlockBuilder::Finish() {
|
|
// Append restart array
|
|
for (size_t i = 0; i < restarts_.size(); i++) {
|
|
PutFixed32(&buffer_, restarts_[i]);
|
|
}
|
|
PutFixed32(&buffer_, static_cast<uint32_t>(restarts_.size()));
|
|
finished_ = true;
|
|
return Slice(buffer_);
|
|
}
|
|
|
|
void BlockBuilder::Add(const Slice& key, const Slice& value,
|
|
const Slice* const delta_value) {
|
|
assert(!finished_);
|
|
assert(counter_ <= block_restart_interval_);
|
|
assert(!use_value_delta_encoding_ || delta_value);
|
|
size_t shared = 0; // number of bytes shared with prev key
|
|
if (counter_ >= block_restart_interval_) {
|
|
// Restart compression
|
|
restarts_.push_back(static_cast<uint32_t>(buffer_.size()));
|
|
estimate_ += sizeof(uint32_t);
|
|
counter_ = 0;
|
|
|
|
if (use_delta_encoding_) {
|
|
// Update state
|
|
last_key_.assign(key.data(), key.size());
|
|
}
|
|
} else if (use_delta_encoding_) {
|
|
Slice last_key_piece(last_key_);
|
|
// See how much sharing to do with previous string
|
|
shared = key.difference_offset(last_key_piece);
|
|
|
|
// Update state
|
|
// We used to just copy the changed data here, but it appears to be
|
|
// faster to just copy the whole thing.
|
|
last_key_.assign(key.data(), key.size());
|
|
}
|
|
|
|
const size_t non_shared = key.size() - shared;
|
|
const size_t curr_size = buffer_.size();
|
|
|
|
if (use_value_delta_encoding_) {
|
|
// Add "<shared><non_shared>" to buffer_
|
|
PutVarint32Varint32(&buffer_, static_cast<uint32_t>(shared),
|
|
static_cast<uint32_t>(non_shared));
|
|
} else {
|
|
// Add "<shared><non_shared><value_size>" to buffer_
|
|
PutVarint32Varint32Varint32(&buffer_, static_cast<uint32_t>(shared),
|
|
static_cast<uint32_t>(non_shared),
|
|
static_cast<uint32_t>(value.size()));
|
|
}
|
|
|
|
// Add string delta to buffer_ followed by value
|
|
buffer_.append(key.data() + shared, non_shared);
|
|
// Use value delta encoding only when the key has shared bytes. This would
|
|
// simplify the decoding, where it can figure which decoding to use simply by
|
|
// looking at the shared bytes size.
|
|
if (shared != 0 && use_value_delta_encoding_) {
|
|
buffer_.append(delta_value->data(), delta_value->size());
|
|
} else {
|
|
buffer_.append(value.data(), value.size());
|
|
}
|
|
|
|
counter_++;
|
|
estimate_ += buffer_.size() - curr_size;
|
|
}
|
|
|
|
} // namespace rocksdb
|