2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/format.h"
|
|
|
|
|
|
|
|
#include "port/port.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
#include "rocksdb/env.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/block.h"
|
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/crc32c.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
#include "util/perf_context_imp.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void BlockHandle::EncodeTo(std::string* dst) const {
|
|
|
|
// Sanity check that all fields have been set
|
|
|
|
assert(offset_ != ~static_cast<uint64_t>(0));
|
|
|
|
assert(size_ != ~static_cast<uint64_t>(0));
|
|
|
|
PutVarint64(dst, offset_);
|
|
|
|
PutVarint64(dst, size_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockHandle::DecodeFrom(Slice* input) {
|
|
|
|
if (GetVarint64(input, &offset_) &&
|
|
|
|
GetVarint64(input, &size_)) {
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return Status::Corruption("bad block handle");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Footer::EncodeTo(std::string* dst) const {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
const size_t original_size = dst->size();
|
|
|
|
#endif
|
|
|
|
metaindex_handle_.EncodeTo(dst);
|
|
|
|
index_handle_.EncodeTo(dst);
|
|
|
|
dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding
|
2011-04-21 00:48:11 +02:00
|
|
|
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu));
|
2011-03-18 23:37:00 +01:00
|
|
|
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32));
|
|
|
|
assert(dst->size() == original_size + kEncodedLength);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Footer::DecodeFrom(Slice* input) {
|
2013-03-01 03:04:58 +01:00
|
|
|
assert(input != nullptr);
|
2013-01-09 19:44:30 +01:00
|
|
|
assert(input->size() >= kEncodedLength);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
const char* magic_ptr = input->data() + kEncodedLength - 8;
|
|
|
|
const uint32_t magic_lo = DecodeFixed32(magic_ptr);
|
|
|
|
const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4);
|
|
|
|
const uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
|
|
|
|
(static_cast<uint64_t>(magic_lo)));
|
|
|
|
if (magic != kTableMagicNumber) {
|
|
|
|
return Status::InvalidArgument("not an sstable (bad magic number)");
|
|
|
|
}
|
|
|
|
|
|
|
|
Status result = metaindex_handle_.DecodeFrom(input);
|
|
|
|
if (result.ok()) {
|
|
|
|
result = index_handle_.DecodeFrom(input);
|
|
|
|
}
|
|
|
|
if (result.ok()) {
|
|
|
|
// We skip over any leftover data (just padding for now) in "input"
|
|
|
|
const char* end = magic_ptr + 8;
|
|
|
|
*input = Slice(end, input->data() + input->size() - end);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-04-23 08:47:56 +02:00
|
|
|
Status ReadBlockContents(RandomAccessFile* file,
|
|
|
|
const ReadOptions& options,
|
|
|
|
const BlockHandle& handle,
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
BlockContents* result,
|
|
|
|
Env* env) {
|
2012-04-17 17:36:46 +02:00
|
|
|
result->data = Slice();
|
|
|
|
result->cachable = false;
|
|
|
|
result->heap_allocated = false;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Read the block contents as well as the type/crc footer.
|
|
|
|
// See table_builder.cc for the code that built this structure.
|
2011-04-21 00:48:11 +02:00
|
|
|
size_t n = static_cast<size_t>(handle.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
char* buf = new char[n + kBlockTrailerSize];
|
|
|
|
Slice contents;
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
|
|
|
|
StopWatchNano timer(env);
|
|
|
|
StartPerfTimer(&timer);
|
2011-03-18 23:37:00 +01:00
|
|
|
Status s = file->Read(handle.offset(), n + kBlockTrailerSize, &contents, buf);
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
BumpPerfCount(&perf_context.block_read_count);
|
|
|
|
BumpPerfCount(&perf_context.block_read_byte, n + kBlockTrailerSize);
|
|
|
|
BumpPerfTime(&perf_context.block_read_time, &timer);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
delete[] buf;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (contents.size() != n + kBlockTrailerSize) {
|
|
|
|
delete[] buf;
|
|
|
|
return Status::Corruption("truncated block read");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the crc of the type and the block contents
|
|
|
|
const char* data = contents.data(); // Pointer to where Read put the data
|
|
|
|
if (options.verify_checksums) {
|
|
|
|
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
|
|
|
|
const uint32_t actual = crc32c::Value(data, n + 1);
|
|
|
|
if (actual != crc) {
|
|
|
|
delete[] buf;
|
|
|
|
s = Status::Corruption("block checksum mismatch");
|
|
|
|
return s;
|
|
|
|
}
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
BumpPerfTime(&perf_context.block_checksum_time, &timer);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
char* ubuf = nullptr;
|
2012-06-29 04:26:43 +02:00
|
|
|
int decompress_size = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
switch (data[n]) {
|
|
|
|
case kNoCompression:
|
|
|
|
if (data != buf) {
|
|
|
|
// File implementation gave us pointer to some other data.
|
2012-03-15 17:14:00 +01:00
|
|
|
// Use it directly under the assumption that it will be live
|
|
|
|
// while the file is open.
|
|
|
|
delete[] buf;
|
2012-04-17 17:36:46 +02:00
|
|
|
result->data = Slice(data, n);
|
|
|
|
result->heap_allocated = false;
|
|
|
|
result->cachable = false; // Do not double-cache
|
2012-03-15 17:14:00 +01:00
|
|
|
} else {
|
2012-04-17 17:36:46 +02:00
|
|
|
result->data = Slice(buf, n);
|
|
|
|
result->heap_allocated = true;
|
|
|
|
result->cachable = true;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ok
|
|
|
|
break;
|
2011-03-23 00:24:02 +01:00
|
|
|
case kSnappyCompression: {
|
2011-07-21 04:40:18 +02:00
|
|
|
size_t ulength = 0;
|
2012-12-20 23:25:06 +01:00
|
|
|
static char snappy_corrupt_msg[] =
|
|
|
|
"Snappy not supported or corrupted Snappy compressed block contents";
|
2011-07-21 04:40:18 +02:00
|
|
|
if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
delete[] buf;
|
2012-12-20 23:25:06 +01:00
|
|
|
return Status::Corruption(snappy_corrupt_msg);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2012-06-28 08:41:33 +02:00
|
|
|
ubuf = new char[ulength];
|
2011-07-21 04:40:18 +02:00
|
|
|
if (!port::Snappy_Uncompress(data, n, ubuf)) {
|
|
|
|
delete[] buf;
|
|
|
|
delete[] ubuf;
|
2012-12-20 23:25:06 +01:00
|
|
|
return Status::Corruption(snappy_corrupt_msg);
|
2011-07-21 04:40:18 +02:00
|
|
|
}
|
|
|
|
delete[] buf;
|
2012-04-17 17:36:46 +02:00
|
|
|
result->data = Slice(ubuf, ulength);
|
|
|
|
result->heap_allocated = true;
|
|
|
|
result->cachable = true;
|
2011-03-18 23:37:00 +01:00
|
|
|
break;
|
|
|
|
}
|
2012-06-28 08:41:33 +02:00
|
|
|
case kZlibCompression:
|
|
|
|
ubuf = port::Zlib_Uncompress(data, n, &decompress_size);
|
2012-12-20 23:25:06 +01:00
|
|
|
static char zlib_corrupt_msg[] =
|
|
|
|
"Zlib not supported or corrupted Zlib compressed block contents";
|
2012-06-28 08:41:33 +02:00
|
|
|
if (!ubuf) {
|
|
|
|
delete[] buf;
|
2012-12-20 23:25:06 +01:00
|
|
|
return Status::Corruption(zlib_corrupt_msg);
|
2012-06-28 08:41:33 +02:00
|
|
|
}
|
|
|
|
delete[] buf;
|
|
|
|
result->data = Slice(ubuf, decompress_size);
|
|
|
|
result->heap_allocated = true;
|
|
|
|
result->cachable = true;
|
|
|
|
break;
|
2012-06-29 04:26:43 +02:00
|
|
|
case kBZip2Compression:
|
|
|
|
ubuf = port::BZip2_Uncompress(data, n, &decompress_size);
|
2012-12-20 23:25:06 +01:00
|
|
|
static char bzip2_corrupt_msg[] =
|
|
|
|
"Bzip2 not supported or corrupted Bzip2 compressed block contents";
|
2012-06-29 04:26:43 +02:00
|
|
|
if (!ubuf) {
|
|
|
|
delete[] buf;
|
2012-12-20 23:25:06 +01:00
|
|
|
return Status::Corruption(bzip2_corrupt_msg);
|
2012-06-29 04:26:43 +02:00
|
|
|
}
|
|
|
|
delete[] buf;
|
|
|
|
result->data = Slice(ubuf, decompress_size);
|
|
|
|
result->heap_allocated = true;
|
|
|
|
result->cachable = true;
|
|
|
|
break;
|
2011-03-18 23:37:00 +01:00
|
|
|
default:
|
|
|
|
delete[] buf;
|
|
|
|
return Status::Corruption("bad block type");
|
|
|
|
}
|
|
|
|
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
BumpPerfTime(&perf_context.block_decompress_time, &timer);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|