f2f4c8072f
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call. Test Plan: perf_context_test, also testing with viewstate shadow traffic. Reviewers: dhruba Reviewed By: dhruba CC: leveldb, xjin Differential Revision: https://reviews.facebook.net/D12351
191 lines
6.0 KiB
C++
191 lines
6.0 KiB
C++
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "table/format.h"
|
|
|
|
#include "port/port.h"
|
|
#include "rocksdb/env.h"
|
|
#include "table/block.h"
|
|
#include "util/coding.h"
|
|
#include "util/crc32c.h"
|
|
#include "util/perf_context_imp.h"
|
|
|
|
namespace leveldb {
|
|
|
|
void BlockHandle::EncodeTo(std::string* dst) const {
|
|
// Sanity check that all fields have been set
|
|
assert(offset_ != ~static_cast<uint64_t>(0));
|
|
assert(size_ != ~static_cast<uint64_t>(0));
|
|
PutVarint64(dst, offset_);
|
|
PutVarint64(dst, size_);
|
|
}
|
|
|
|
Status BlockHandle::DecodeFrom(Slice* input) {
|
|
if (GetVarint64(input, &offset_) &&
|
|
GetVarint64(input, &size_)) {
|
|
return Status::OK();
|
|
} else {
|
|
return Status::Corruption("bad block handle");
|
|
}
|
|
}
|
|
|
|
void Footer::EncodeTo(std::string* dst) const {
|
|
#ifndef NDEBUG
|
|
const size_t original_size = dst->size();
|
|
#endif
|
|
metaindex_handle_.EncodeTo(dst);
|
|
index_handle_.EncodeTo(dst);
|
|
dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding
|
|
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu));
|
|
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32));
|
|
assert(dst->size() == original_size + kEncodedLength);
|
|
}
|
|
|
|
Status Footer::DecodeFrom(Slice* input) {
|
|
assert(input != nullptr);
|
|
assert(input->size() >= kEncodedLength);
|
|
|
|
const char* magic_ptr = input->data() + kEncodedLength - 8;
|
|
const uint32_t magic_lo = DecodeFixed32(magic_ptr);
|
|
const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4);
|
|
const uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
|
|
(static_cast<uint64_t>(magic_lo)));
|
|
if (magic != kTableMagicNumber) {
|
|
return Status::InvalidArgument("not an sstable (bad magic number)");
|
|
}
|
|
|
|
Status result = metaindex_handle_.DecodeFrom(input);
|
|
if (result.ok()) {
|
|
result = index_handle_.DecodeFrom(input);
|
|
}
|
|
if (result.ok()) {
|
|
// We skip over any leftover data (just padding for now) in "input"
|
|
const char* end = magic_ptr + 8;
|
|
*input = Slice(end, input->data() + input->size() - end);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
Status ReadBlockContents(RandomAccessFile* file,
|
|
const ReadOptions& options,
|
|
const BlockHandle& handle,
|
|
BlockContents* result,
|
|
Env* env) {
|
|
result->data = Slice();
|
|
result->cachable = false;
|
|
result->heap_allocated = false;
|
|
|
|
// Read the block contents as well as the type/crc footer.
|
|
// See table_builder.cc for the code that built this structure.
|
|
size_t n = static_cast<size_t>(handle.size());
|
|
char* buf = new char[n + kBlockTrailerSize];
|
|
Slice contents;
|
|
|
|
StopWatchNano timer(env);
|
|
StartPerfTimer(&timer);
|
|
Status s = file->Read(handle.offset(), n + kBlockTrailerSize, &contents, buf);
|
|
BumpPerfCount(&perf_context.block_read_count);
|
|
BumpPerfCount(&perf_context.block_read_byte, n + kBlockTrailerSize);
|
|
BumpPerfTime(&perf_context.block_read_time, &timer);
|
|
|
|
if (!s.ok()) {
|
|
delete[] buf;
|
|
return s;
|
|
}
|
|
if (contents.size() != n + kBlockTrailerSize) {
|
|
delete[] buf;
|
|
return Status::Corruption("truncated block read");
|
|
}
|
|
|
|
// Check the crc of the type and the block contents
|
|
const char* data = contents.data(); // Pointer to where Read put the data
|
|
if (options.verify_checksums) {
|
|
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
|
|
const uint32_t actual = crc32c::Value(data, n + 1);
|
|
if (actual != crc) {
|
|
delete[] buf;
|
|
s = Status::Corruption("block checksum mismatch");
|
|
return s;
|
|
}
|
|
BumpPerfTime(&perf_context.block_checksum_time, &timer);
|
|
}
|
|
|
|
char* ubuf = nullptr;
|
|
int decompress_size = 0;
|
|
switch (data[n]) {
|
|
case kNoCompression:
|
|
if (data != buf) {
|
|
// File implementation gave us pointer to some other data.
|
|
// Use it directly under the assumption that it will be live
|
|
// while the file is open.
|
|
delete[] buf;
|
|
result->data = Slice(data, n);
|
|
result->heap_allocated = false;
|
|
result->cachable = false; // Do not double-cache
|
|
} else {
|
|
result->data = Slice(buf, n);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
}
|
|
|
|
// Ok
|
|
break;
|
|
case kSnappyCompression: {
|
|
size_t ulength = 0;
|
|
static char snappy_corrupt_msg[] =
|
|
"Snappy not supported or corrupted Snappy compressed block contents";
|
|
if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) {
|
|
delete[] buf;
|
|
return Status::Corruption(snappy_corrupt_msg);
|
|
}
|
|
ubuf = new char[ulength];
|
|
if (!port::Snappy_Uncompress(data, n, ubuf)) {
|
|
delete[] buf;
|
|
delete[] ubuf;
|
|
return Status::Corruption(snappy_corrupt_msg);
|
|
}
|
|
delete[] buf;
|
|
result->data = Slice(ubuf, ulength);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
}
|
|
case kZlibCompression:
|
|
ubuf = port::Zlib_Uncompress(data, n, &decompress_size);
|
|
static char zlib_corrupt_msg[] =
|
|
"Zlib not supported or corrupted Zlib compressed block contents";
|
|
if (!ubuf) {
|
|
delete[] buf;
|
|
return Status::Corruption(zlib_corrupt_msg);
|
|
}
|
|
delete[] buf;
|
|
result->data = Slice(ubuf, decompress_size);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
case kBZip2Compression:
|
|
ubuf = port::BZip2_Uncompress(data, n, &decompress_size);
|
|
static char bzip2_corrupt_msg[] =
|
|
"Bzip2 not supported or corrupted Bzip2 compressed block contents";
|
|
if (!ubuf) {
|
|
delete[] buf;
|
|
return Status::Corruption(bzip2_corrupt_msg);
|
|
}
|
|
delete[] buf;
|
|
result->data = Slice(ubuf, decompress_size);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
default:
|
|
delete[] buf;
|
|
return Status::Corruption("bad block type");
|
|
}
|
|
|
|
BumpPerfTime(&perf_context.block_decompress_time, &timer);
|
|
|
|
return Status::OK();
|
|
}
|
|
|
|
} // namespace leveldb
|