2020-04-25 00:30:12 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#include "table/block_fetcher.h"
|
2020-07-09 23:33:42 +02:00
|
|
|
|
2020-04-25 00:30:12 +02:00
|
|
|
#include "db/table_properties_collector.h"
|
2020-07-09 23:33:42 +02:00
|
|
|
#include "file/file_util.h"
|
2020-04-25 00:30:12 +02:00
|
|
|
#include "options/options_helper.h"
|
|
|
|
#include "port/port.h"
|
|
|
|
#include "port/stack_trace.h"
|
2021-01-29 07:08:46 +01:00
|
|
|
#include "rocksdb/file_system.h"
|
2020-04-25 00:30:12 +02:00
|
|
|
#include "table/block_based/binary_search_index_reader.h"
|
|
|
|
#include "table/block_based/block_based_table_builder.h"
|
|
|
|
#include "table/block_based/block_based_table_factory.h"
|
|
|
|
#include "table/block_based/block_based_table_reader.h"
|
|
|
|
#include "table/format.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
class CountedMemoryAllocator : public MemoryAllocator {
|
|
|
|
public:
|
|
|
|
const char* Name() const override { return "CountedMemoryAllocator"; }
|
|
|
|
|
|
|
|
void* Allocate(size_t size) override {
|
|
|
|
num_allocations_++;
|
|
|
|
return static_cast<void*>(new char[size]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Deallocate(void* p) override {
|
|
|
|
num_deallocations_++;
|
|
|
|
delete[] static_cast<char*>(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
int GetNumAllocations() const { return num_allocations_; }
|
|
|
|
int GetNumDeallocations() const { return num_deallocations_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
int num_allocations_ = 0;
|
|
|
|
int num_deallocations_ = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct MemcpyStats {
|
2020-07-01 00:38:59 +02:00
|
|
|
int num_stack_buf_memcpy;
|
|
|
|
int num_heap_buf_memcpy;
|
|
|
|
int num_compressed_buf_memcpy;
|
2020-04-25 00:30:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct BufAllocationStats {
|
2020-07-01 00:38:59 +02:00
|
|
|
int num_heap_buf_allocations;
|
|
|
|
int num_compressed_buf_allocations;
|
2020-04-25 00:30:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct TestStats {
|
|
|
|
MemcpyStats memcpy_stats;
|
|
|
|
BufAllocationStats buf_allocation_stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
class BlockFetcherTest : public testing::Test {
|
2020-07-01 00:38:59 +02:00
|
|
|
public:
|
|
|
|
enum class Mode {
|
|
|
|
kBufferedRead = 0,
|
|
|
|
kBufferedMmap,
|
|
|
|
kDirectRead,
|
|
|
|
kNumModes,
|
|
|
|
};
|
|
|
|
// use NumModes as array size to avoid "size of array '...' has non-integral
|
|
|
|
// type" errors.
|
|
|
|
const static int NumModes = static_cast<int>(Mode::kNumModes);
|
|
|
|
|
2020-04-25 00:30:12 +02:00
|
|
|
protected:
|
|
|
|
void SetUp() override {
|
2020-07-09 23:33:42 +02:00
|
|
|
SetupSyncPointsToMockDirectIO();
|
2020-04-25 00:30:12 +02:00
|
|
|
test_dir_ = test::PerThreadDBPath("block_fetcher_test");
|
|
|
|
env_ = Env::Default();
|
|
|
|
fs_ = FileSystem::Default();
|
|
|
|
ASSERT_OK(fs_->CreateDir(test_dir_, IOOptions(), nullptr));
|
|
|
|
}
|
|
|
|
|
2020-07-09 23:33:42 +02:00
|
|
|
void TearDown() override { EXPECT_OK(DestroyDir(env_, test_dir_)); }
|
2020-04-25 00:30:12 +02:00
|
|
|
|
2020-07-01 00:38:59 +02:00
|
|
|
void AssertSameBlock(const std::string& block1, const std::string& block2) {
|
|
|
|
ASSERT_EQ(block1, block2);
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a table with kv pairs (i, i) where i ranges from 0 to 9, inclusive.
|
|
|
|
void CreateTable(const std::string& table_name,
|
|
|
|
const CompressionType& compression_type) {
|
|
|
|
std::unique_ptr<WritableFileWriter> writer;
|
|
|
|
NewFileWriter(table_name, &writer);
|
|
|
|
|
|
|
|
// Create table builder.
|
2020-07-01 00:38:59 +02:00
|
|
|
ImmutableCFOptions ioptions(options_);
|
|
|
|
InternalKeyComparator comparator(options_.comparator);
|
|
|
|
ColumnFamilyOptions cf_options(options_);
|
2020-04-25 00:30:12 +02:00
|
|
|
MutableCFOptions moptions(cf_options);
|
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory>> factories;
|
|
|
|
std::unique_ptr<TableBuilder> table_builder(table_factory_.NewTableBuilder(
|
|
|
|
TableBuilderOptions(ioptions, moptions, comparator, &factories,
|
|
|
|
compression_type, 0 /* sample_for_compression */,
|
|
|
|
CompressionOptions(), false /* skip_filters */,
|
|
|
|
kDefaultColumnFamilyName, -1 /* level */),
|
|
|
|
0 /* column_family_id */, writer.get()));
|
|
|
|
|
|
|
|
// Build table.
|
|
|
|
for (int i = 0; i < 9; i++) {
|
|
|
|
std::string key = ToInternalKey(std::to_string(i));
|
Fix testcase failures on windows (#7992)
Summary:
Fixed 5 test case failures found on Windows 10/Windows Server 2016
1. In `flush_job_test`, the DestroyDir function fails in deconstructor because some file handles are still being held by VersionSet. This happens on Windows Server 2016, so need to manually reset versions_ pointer to release all file handles.
2. In `StatsHistoryTest.InMemoryStatsHistoryPurging` test, the capping memory cost of stats_history_size on Windows becomes 14000 bytes with latest changes, not just 13000 bytes.
3. In `SSTDumpToolTest.RawOutput` test, the output file handle is not closed at the end.
4. In `FullBloomTest.OptimizeForMemory` test, ROCKSDB_MALLOC_USABLE_SIZE is undefined on windows so `total_mem` is always equal to `total_size`. The internal memory fragmentation assertion does not apply in this case.
5. In `BlockFetcherTest.FetchAndUncompressCompressedDataBlock` test, XPRESS cannot reach 87.5% compression ratio with original CreateTable method, so I append extra zeros to the string value to enhance compression ratio. Beside, since XPRESS allocates memory internally, thus does not support for custom allocator verification, we will skip the allocator verification for XPRESS
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7992
Reviewed By: jay-zhuang
Differential Revision: D26615283
Pulled By: ajkr
fbshipit-source-id: 3632612f84b99e2b9c77c403b112b6bedf3b125d
2021-02-23 23:31:50 +01:00
|
|
|
// Append "00000000" to string value to enhance compression ratio
|
|
|
|
std::string value = "00000000" + std::to_string(i);
|
2020-04-25 00:30:12 +02:00
|
|
|
table_builder->Add(key, value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(table_builder->Finish());
|
|
|
|
}
|
|
|
|
|
2020-07-01 00:38:59 +02:00
|
|
|
void FetchIndexBlock(const std::string& table_name,
|
2020-04-25 00:30:12 +02:00
|
|
|
CountedMemoryAllocator* heap_buf_allocator,
|
|
|
|
CountedMemoryAllocator* compressed_buf_allocator,
|
2020-07-01 00:38:59 +02:00
|
|
|
MemcpyStats* memcpy_stats, BlockContents* index_block,
|
|
|
|
std::string* result) {
|
|
|
|
FileOptions fopt(options_);
|
2020-04-25 00:30:12 +02:00
|
|
|
std::unique_ptr<RandomAccessFileReader> file;
|
|
|
|
NewFileReader(table_name, fopt, &file);
|
|
|
|
|
|
|
|
// Get handle of the index block.
|
|
|
|
Footer footer;
|
|
|
|
ReadFooter(file.get(), &footer);
|
|
|
|
const BlockHandle& index_handle = footer.index_handle();
|
|
|
|
|
|
|
|
CompressionType compression_type;
|
|
|
|
FetchBlock(file.get(), index_handle, BlockType::kIndex,
|
|
|
|
false /* compressed */, false /* do_uncompress */,
|
|
|
|
heap_buf_allocator, compressed_buf_allocator, index_block,
|
|
|
|
memcpy_stats, &compression_type);
|
|
|
|
ASSERT_EQ(compression_type, CompressionType::kNoCompression);
|
2020-07-01 00:38:59 +02:00
|
|
|
result->assign(index_block->data.ToString());
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetches the first data block in both direct IO and non-direct IO mode.
|
|
|
|
//
|
|
|
|
// compressed: whether the data blocks are compressed;
|
|
|
|
// do_uncompress: whether the data blocks should be uncompressed on fetching.
|
|
|
|
// compression_type: the expected compression type.
|
|
|
|
//
|
|
|
|
// Expects:
|
|
|
|
// Block contents are the same.
|
|
|
|
// Bufferr allocation and memory copy statistics are expected.
|
2020-07-01 00:38:59 +02:00
|
|
|
void TestFetchDataBlock(
|
|
|
|
const std::string& table_name_prefix, bool compressed, bool do_uncompress,
|
|
|
|
std::array<TestStats, NumModes> expected_stats_by_mode) {
|
2020-04-25 00:30:12 +02:00
|
|
|
for (CompressionType compression_type : GetSupportedCompressions()) {
|
|
|
|
bool do_compress = compression_type != kNoCompression;
|
|
|
|
if (compressed != do_compress) continue;
|
|
|
|
std::string compression_type_str =
|
|
|
|
CompressionTypeToString(compression_type);
|
|
|
|
|
|
|
|
std::string table_name = table_name_prefix + compression_type_str;
|
|
|
|
CreateTable(table_name, compression_type);
|
|
|
|
|
|
|
|
CompressionType expected_compression_type_after_fetch =
|
|
|
|
(compressed && !do_uncompress) ? compression_type : kNoCompression;
|
|
|
|
|
2020-07-01 00:38:59 +02:00
|
|
|
BlockContents blocks[NumModes];
|
|
|
|
std::string block_datas[NumModes];
|
|
|
|
MemcpyStats memcpy_stats[NumModes];
|
|
|
|
CountedMemoryAllocator heap_buf_allocators[NumModes];
|
|
|
|
CountedMemoryAllocator compressed_buf_allocators[NumModes];
|
|
|
|
for (int i = 0; i < NumModes; ++i) {
|
|
|
|
SetMode(static_cast<Mode>(i));
|
|
|
|
FetchFirstDataBlock(table_name, compressed, do_uncompress,
|
|
|
|
expected_compression_type_after_fetch,
|
|
|
|
&heap_buf_allocators[i],
|
|
|
|
&compressed_buf_allocators[i], &blocks[i],
|
|
|
|
&block_datas[i], &memcpy_stats[i]);
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
2020-07-01 00:38:59 +02:00
|
|
|
for (int i = 0; i < NumModes - 1; ++i) {
|
|
|
|
AssertSameBlock(block_datas[i], block_datas[i + 1]);
|
|
|
|
}
|
2020-04-25 00:30:12 +02:00
|
|
|
|
|
|
|
// Check memcpy and buffer allocation statistics.
|
2020-07-01 00:38:59 +02:00
|
|
|
for (int i = 0; i < NumModes; ++i) {
|
|
|
|
const TestStats& expected_stats = expected_stats_by_mode[i];
|
2020-04-25 00:30:12 +02:00
|
|
|
|
2020-07-01 00:38:59 +02:00
|
|
|
ASSERT_EQ(memcpy_stats[i].num_stack_buf_memcpy,
|
2020-04-25 00:30:12 +02:00
|
|
|
expected_stats.memcpy_stats.num_stack_buf_memcpy);
|
2020-07-01 00:38:59 +02:00
|
|
|
ASSERT_EQ(memcpy_stats[i].num_heap_buf_memcpy,
|
2020-04-25 00:30:12 +02:00
|
|
|
expected_stats.memcpy_stats.num_heap_buf_memcpy);
|
2020-07-01 00:38:59 +02:00
|
|
|
ASSERT_EQ(memcpy_stats[i].num_compressed_buf_memcpy,
|
2020-04-25 00:30:12 +02:00
|
|
|
expected_stats.memcpy_stats.num_compressed_buf_memcpy);
|
|
|
|
|
Fix testcase failures on windows (#7992)
Summary:
Fixed 5 test case failures found on Windows 10/Windows Server 2016
1. In `flush_job_test`, the DestroyDir function fails in deconstructor because some file handles are still being held by VersionSet. This happens on Windows Server 2016, so need to manually reset versions_ pointer to release all file handles.
2. In `StatsHistoryTest.InMemoryStatsHistoryPurging` test, the capping memory cost of stats_history_size on Windows becomes 14000 bytes with latest changes, not just 13000 bytes.
3. In `SSTDumpToolTest.RawOutput` test, the output file handle is not closed at the end.
4. In `FullBloomTest.OptimizeForMemory` test, ROCKSDB_MALLOC_USABLE_SIZE is undefined on windows so `total_mem` is always equal to `total_size`. The internal memory fragmentation assertion does not apply in this case.
5. In `BlockFetcherTest.FetchAndUncompressCompressedDataBlock` test, XPRESS cannot reach 87.5% compression ratio with original CreateTable method, so I append extra zeros to the string value to enhance compression ratio. Beside, since XPRESS allocates memory internally, thus does not support for custom allocator verification, we will skip the allocator verification for XPRESS
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7992
Reviewed By: jay-zhuang
Differential Revision: D26615283
Pulled By: ajkr
fbshipit-source-id: 3632612f84b99e2b9c77c403b112b6bedf3b125d
2021-02-23 23:31:50 +01:00
|
|
|
if (kXpressCompression == compression_type) {
|
|
|
|
// XPRESS allocates memory internally, thus does not support for
|
|
|
|
// custom allocator verification
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(
|
|
|
|
heap_buf_allocators[i].GetNumAllocations(),
|
|
|
|
expected_stats.buf_allocation_stats.num_heap_buf_allocations);
|
|
|
|
ASSERT_EQ(compressed_buf_allocators[i].GetNumAllocations(),
|
|
|
|
expected_stats.buf_allocation_stats
|
|
|
|
.num_compressed_buf_allocations);
|
|
|
|
|
|
|
|
// The allocated buffers are not deallocated until
|
|
|
|
// the block content is deleted.
|
|
|
|
ASSERT_EQ(heap_buf_allocators[i].GetNumDeallocations(), 0);
|
|
|
|
ASSERT_EQ(compressed_buf_allocators[i].GetNumDeallocations(), 0);
|
|
|
|
blocks[i].allocation.reset();
|
|
|
|
ASSERT_EQ(
|
|
|
|
heap_buf_allocators[i].GetNumDeallocations(),
|
|
|
|
expected_stats.buf_allocation_stats.num_heap_buf_allocations);
|
|
|
|
ASSERT_EQ(compressed_buf_allocators[i].GetNumDeallocations(),
|
|
|
|
expected_stats.buf_allocation_stats
|
|
|
|
.num_compressed_buf_allocations);
|
|
|
|
}
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-01 00:38:59 +02:00
|
|
|
void SetMode(Mode mode) {
|
|
|
|
switch (mode) {
|
|
|
|
case Mode::kBufferedRead:
|
|
|
|
options_.use_direct_reads = false;
|
|
|
|
options_.allow_mmap_reads = false;
|
|
|
|
break;
|
|
|
|
case Mode::kBufferedMmap:
|
|
|
|
options_.use_direct_reads = false;
|
|
|
|
options_.allow_mmap_reads = true;
|
|
|
|
break;
|
|
|
|
case Mode::kDirectRead:
|
|
|
|
options_.use_direct_reads = true;
|
|
|
|
options_.allow_mmap_reads = false;
|
|
|
|
break;
|
|
|
|
case Mode::kNumModes:
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-25 00:30:12 +02:00
|
|
|
private:
|
|
|
|
std::string test_dir_;
|
|
|
|
Env* env_;
|
|
|
|
std::shared_ptr<FileSystem> fs_;
|
|
|
|
BlockBasedTableFactory table_factory_;
|
2020-07-01 00:38:59 +02:00
|
|
|
Options options_;
|
2020-04-25 00:30:12 +02:00
|
|
|
|
|
|
|
std::string Path(const std::string& fname) { return test_dir_ + "/" + fname; }
|
|
|
|
|
|
|
|
void WriteToFile(const std::string& content, const std::string& filename) {
|
|
|
|
std::unique_ptr<FSWritableFile> f;
|
|
|
|
ASSERT_OK(fs_->NewWritableFile(Path(filename), FileOptions(), &f, nullptr));
|
|
|
|
ASSERT_OK(f->Append(content, IOOptions(), nullptr));
|
|
|
|
ASSERT_OK(f->Close(IOOptions(), nullptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
void NewFileWriter(const std::string& filename,
|
|
|
|
std::unique_ptr<WritableFileWriter>* writer) {
|
|
|
|
std::string path = Path(filename);
|
2021-01-29 07:08:46 +01:00
|
|
|
FileOptions file_options;
|
|
|
|
ASSERT_OK(WritableFileWriter::Create(env_->GetFileSystem(), path,
|
|
|
|
file_options, writer, nullptr));
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void NewFileReader(const std::string& filename, const FileOptions& opt,
|
|
|
|
std::unique_ptr<RandomAccessFileReader>* reader) {
|
|
|
|
std::string path = Path(filename);
|
|
|
|
std::unique_ptr<FSRandomAccessFile> f;
|
|
|
|
ASSERT_OK(fs_->NewRandomAccessFile(path, opt, &f, nullptr));
|
2021-03-15 12:32:24 +01:00
|
|
|
reader->reset(new RandomAccessFileReader(std::move(f), path,
|
|
|
|
env_->GetSystemClock().get()));
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void NewTableReader(const ImmutableCFOptions& ioptions,
|
|
|
|
const FileOptions& foptions,
|
|
|
|
const InternalKeyComparator& comparator,
|
|
|
|
const std::string& table_name,
|
|
|
|
std::unique_ptr<BlockBasedTable>* table) {
|
|
|
|
std::unique_ptr<RandomAccessFileReader> file;
|
|
|
|
NewFileReader(table_name, foptions, &file);
|
|
|
|
|
|
|
|
uint64_t file_size = 0;
|
|
|
|
ASSERT_OK(env_->GetFileSize(Path(table_name), &file_size));
|
|
|
|
|
|
|
|
std::unique_ptr<TableReader> table_reader;
|
2020-06-29 23:51:57 +02:00
|
|
|
ReadOptions ro;
|
2020-09-15 01:59:00 +02:00
|
|
|
const auto* table_options =
|
|
|
|
table_factory_.GetOptions<BlockBasedTableOptions>();
|
|
|
|
ASSERT_NE(table_options, nullptr);
|
|
|
|
ASSERT_OK(BlockBasedTable::Open(ro, ioptions, EnvOptions(), *table_options,
|
|
|
|
comparator, std::move(file), file_size,
|
|
|
|
&table_reader));
|
2020-04-25 00:30:12 +02:00
|
|
|
|
|
|
|
table->reset(reinterpret_cast<BlockBasedTable*>(table_reader.release()));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string ToInternalKey(const std::string& key) {
|
|
|
|
InternalKey internal_key(key, 0, ValueType::kTypeValue);
|
|
|
|
return internal_key.Encode().ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReadFooter(RandomAccessFileReader* file, Footer* footer) {
|
|
|
|
uint64_t file_size = 0;
|
|
|
|
ASSERT_OK(env_->GetFileSize(file->file_name(), &file_size));
|
2020-06-29 23:51:57 +02:00
|
|
|
IOOptions opts;
|
2020-09-30 21:46:34 +02:00
|
|
|
ASSERT_OK(ReadFooterFromFile(opts, file, nullptr /* prefetch_buffer */,
|
|
|
|
file_size, footer,
|
|
|
|
kBlockBasedTableMagicNumber));
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: compression_type returns the compression type of the fetched block
|
|
|
|
// contents, so if the block is fetched and uncompressed, then it's
|
|
|
|
// kNoCompression.
|
|
|
|
void FetchBlock(RandomAccessFileReader* file, const BlockHandle& block,
|
|
|
|
BlockType block_type, bool compressed, bool do_uncompress,
|
|
|
|
MemoryAllocator* heap_buf_allocator,
|
|
|
|
MemoryAllocator* compressed_buf_allocator,
|
|
|
|
BlockContents* contents, MemcpyStats* stats,
|
|
|
|
CompressionType* compresstion_type) {
|
2020-07-01 00:38:59 +02:00
|
|
|
ImmutableCFOptions ioptions(options_);
|
2020-04-25 00:30:12 +02:00
|
|
|
ReadOptions roptions;
|
|
|
|
PersistentCacheOptions persistent_cache_options;
|
|
|
|
Footer footer;
|
|
|
|
ReadFooter(file, &footer);
|
|
|
|
std::unique_ptr<BlockFetcher> fetcher(new BlockFetcher(
|
|
|
|
file, nullptr /* prefetch_buffer */, footer, roptions, block, contents,
|
|
|
|
ioptions, do_uncompress, compressed, block_type,
|
|
|
|
UncompressionDict::GetEmptyDict(), persistent_cache_options,
|
|
|
|
heap_buf_allocator, compressed_buf_allocator));
|
|
|
|
|
|
|
|
ASSERT_OK(fetcher->ReadBlockContents());
|
|
|
|
|
|
|
|
stats->num_stack_buf_memcpy = fetcher->TEST_GetNumStackBufMemcpy();
|
|
|
|
stats->num_heap_buf_memcpy = fetcher->TEST_GetNumHeapBufMemcpy();
|
|
|
|
stats->num_compressed_buf_memcpy =
|
|
|
|
fetcher->TEST_GetNumCompressedBufMemcpy();
|
|
|
|
|
|
|
|
*compresstion_type = fetcher->get_compression_type();
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: expected_compression_type is the expected compression
|
|
|
|
// type of the fetched block content, if the block is uncompressed,
|
|
|
|
// then the expected compression type is kNoCompression.
|
2020-07-01 00:38:59 +02:00
|
|
|
void FetchFirstDataBlock(const std::string& table_name, bool compressed,
|
|
|
|
bool do_uncompress,
|
2020-04-25 00:30:12 +02:00
|
|
|
CompressionType expected_compression_type,
|
|
|
|
MemoryAllocator* heap_buf_allocator,
|
|
|
|
MemoryAllocator* compressed_buf_allocator,
|
2020-07-01 00:38:59 +02:00
|
|
|
BlockContents* block, std::string* result,
|
|
|
|
MemcpyStats* memcpy_stats) {
|
|
|
|
ImmutableCFOptions ioptions(options_);
|
|
|
|
InternalKeyComparator comparator(options_.comparator);
|
|
|
|
FileOptions foptions(options_);
|
2020-04-25 00:30:12 +02:00
|
|
|
|
|
|
|
// Get block handle for the first data block.
|
|
|
|
std::unique_ptr<BlockBasedTable> table;
|
|
|
|
NewTableReader(ioptions, foptions, comparator, table_name, &table);
|
|
|
|
|
|
|
|
std::unique_ptr<BlockBasedTable::IndexReader> index_reader;
|
2020-06-29 23:51:57 +02:00
|
|
|
ReadOptions ro;
|
2020-04-25 00:30:12 +02:00
|
|
|
ASSERT_OK(BinarySearchIndexReader::Create(
|
2020-06-29 23:51:57 +02:00
|
|
|
table.get(), ro, nullptr /* prefetch_buffer */, false /* use_cache */,
|
2020-04-25 00:30:12 +02:00
|
|
|
false /* prefetch */, false /* pin */, nullptr /* lookup_context */,
|
|
|
|
&index_reader));
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIteratorBase<IndexValue>> iter(
|
|
|
|
index_reader->NewIterator(
|
|
|
|
ReadOptions(), false /* disable_prefix_seek */, nullptr /* iter */,
|
|
|
|
nullptr /* get_context */, nullptr /* lookup_context */));
|
|
|
|
ASSERT_OK(iter->status());
|
|
|
|
iter->SeekToFirst();
|
|
|
|
BlockHandle first_block_handle = iter->value().handle;
|
|
|
|
|
|
|
|
// Fetch first data block.
|
|
|
|
std::unique_ptr<RandomAccessFileReader> file;
|
|
|
|
NewFileReader(table_name, foptions, &file);
|
|
|
|
CompressionType compression_type;
|
|
|
|
FetchBlock(file.get(), first_block_handle, BlockType::kData, compressed,
|
|
|
|
do_uncompress, heap_buf_allocator, compressed_buf_allocator,
|
|
|
|
block, memcpy_stats, &compression_type);
|
|
|
|
ASSERT_EQ(compression_type, expected_compression_type);
|
2020-07-01 00:38:59 +02:00
|
|
|
result->assign(block->data.ToString());
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-05-21 22:55:18 +02:00
|
|
|
// Skip the following tests in lite mode since direct I/O is unsupported.
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2020-04-25 00:30:12 +02:00
|
|
|
// Fetch index block under both direct IO and non-direct IO.
|
|
|
|
// Expects:
|
|
|
|
// the index block contents are the same for both read modes.
|
|
|
|
TEST_F(BlockFetcherTest, FetchIndexBlock) {
|
|
|
|
for (CompressionType compression : GetSupportedCompressions()) {
|
|
|
|
std::string table_name =
|
|
|
|
"FetchIndexBlock" + CompressionTypeToString(compression);
|
|
|
|
CreateTable(table_name, compression);
|
|
|
|
|
|
|
|
CountedMemoryAllocator allocator;
|
|
|
|
MemcpyStats memcpy_stats;
|
2020-07-01 00:38:59 +02:00
|
|
|
BlockContents indexes[NumModes];
|
|
|
|
std::string index_datas[NumModes];
|
|
|
|
for (int i = 0; i < NumModes; ++i) {
|
|
|
|
SetMode(static_cast<Mode>(i));
|
|
|
|
FetchIndexBlock(table_name, &allocator, &allocator, &memcpy_stats,
|
|
|
|
&indexes[i], &index_datas[i]);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < NumModes - 1; ++i) {
|
|
|
|
AssertSameBlock(index_datas[i], index_datas[i + 1]);
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Data blocks are not compressed,
|
2020-07-01 00:38:59 +02:00
|
|
|
// fetch data block under direct IO, mmap IO,and non-direct IO.
|
2020-04-25 00:30:12 +02:00
|
|
|
// Expects:
|
|
|
|
// 1. in non-direct IO mode, allocate a heap buffer and memcpy the block
|
|
|
|
// into the buffer;
|
|
|
|
// 2. in direct IO mode, allocate a heap buffer and memcpy from the
|
|
|
|
// direct IO buffer to the heap buffer.
|
|
|
|
TEST_F(BlockFetcherTest, FetchUncompressedDataBlock) {
|
2020-07-01 00:38:59 +02:00
|
|
|
TestStats expected_non_mmap_stats = {
|
|
|
|
{
|
|
|
|
0 /* num_stack_buf_memcpy */,
|
|
|
|
1 /* num_heap_buf_memcpy */,
|
|
|
|
0 /* num_compressed_buf_memcpy */,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
1 /* num_heap_buf_allocations */,
|
|
|
|
0 /* num_compressed_buf_allocations */,
|
|
|
|
}};
|
|
|
|
TestStats expected_mmap_stats = {{
|
|
|
|
0 /* num_stack_buf_memcpy */,
|
|
|
|
0 /* num_heap_buf_memcpy */,
|
|
|
|
0 /* num_compressed_buf_memcpy */,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
0 /* num_heap_buf_allocations */,
|
|
|
|
0 /* num_compressed_buf_allocations */,
|
|
|
|
}};
|
|
|
|
std::array<TestStats, NumModes> expected_stats_by_mode{{
|
|
|
|
expected_non_mmap_stats /* kBufferedRead */,
|
|
|
|
expected_mmap_stats /* kBufferedMmap */,
|
|
|
|
expected_non_mmap_stats /* kDirectRead */,
|
|
|
|
}};
|
|
|
|
TestFetchDataBlock("FetchUncompressedDataBlock", false, false,
|
|
|
|
expected_stats_by_mode);
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Data blocks are compressed,
|
|
|
|
// fetch data block under both direct IO and non-direct IO,
|
|
|
|
// but do not uncompress.
|
|
|
|
// Expects:
|
|
|
|
// 1. in non-direct IO mode, allocate a compressed buffer and memcpy the block
|
|
|
|
// into the buffer;
|
|
|
|
// 2. in direct IO mode, allocate a compressed buffer and memcpy from the
|
|
|
|
// direct IO buffer to the compressed buffer.
|
|
|
|
TEST_F(BlockFetcherTest, FetchCompressedDataBlock) {
|
2020-07-01 00:38:59 +02:00
|
|
|
TestStats expected_non_mmap_stats = {
|
|
|
|
{
|
|
|
|
0 /* num_stack_buf_memcpy */,
|
|
|
|
0 /* num_heap_buf_memcpy */,
|
|
|
|
1 /* num_compressed_buf_memcpy */,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
0 /* num_heap_buf_allocations */,
|
|
|
|
1 /* num_compressed_buf_allocations */,
|
|
|
|
}};
|
|
|
|
TestStats expected_mmap_stats = {{
|
|
|
|
0 /* num_stack_buf_memcpy */,
|
|
|
|
0 /* num_heap_buf_memcpy */,
|
|
|
|
0 /* num_compressed_buf_memcpy */,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
0 /* num_heap_buf_allocations */,
|
|
|
|
0 /* num_compressed_buf_allocations */,
|
|
|
|
}};
|
|
|
|
std::array<TestStats, NumModes> expected_stats_by_mode{{
|
|
|
|
expected_non_mmap_stats /* kBufferedRead */,
|
|
|
|
expected_mmap_stats /* kBufferedMmap */,
|
|
|
|
expected_non_mmap_stats /* kDirectRead */,
|
|
|
|
}};
|
|
|
|
TestFetchDataBlock("FetchCompressedDataBlock", true, false,
|
|
|
|
expected_stats_by_mode);
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Data blocks are compressed,
|
|
|
|
// fetch and uncompress data block under both direct IO and non-direct IO.
|
|
|
|
// Expects:
|
|
|
|
// 1. in non-direct IO mode, since the block is small, so it's first memcpyed
|
|
|
|
// to the stack buffer, then a heap buffer is allocated and the block is
|
|
|
|
// uncompressed into the heap.
|
|
|
|
// 2. in direct IO mode mode, allocate a heap buffer, then directly uncompress
|
|
|
|
// and memcpy from the direct IO buffer to the heap buffer.
|
|
|
|
TEST_F(BlockFetcherTest, FetchAndUncompressCompressedDataBlock) {
|
2020-07-01 00:38:59 +02:00
|
|
|
TestStats expected_buffered_read_stats = {
|
|
|
|
{
|
|
|
|
1 /* num_stack_buf_memcpy */,
|
|
|
|
1 /* num_heap_buf_memcpy */,
|
|
|
|
0 /* num_compressed_buf_memcpy */,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
1 /* num_heap_buf_allocations */,
|
|
|
|
0 /* num_compressed_buf_allocations */,
|
|
|
|
}};
|
|
|
|
TestStats expected_mmap_stats = {{
|
|
|
|
0 /* num_stack_buf_memcpy */,
|
|
|
|
1 /* num_heap_buf_memcpy */,
|
|
|
|
0 /* num_compressed_buf_memcpy */,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
1 /* num_heap_buf_allocations */,
|
|
|
|
0 /* num_compressed_buf_allocations */,
|
|
|
|
}};
|
|
|
|
TestStats expected_direct_read_stats = {
|
|
|
|
{
|
|
|
|
0 /* num_stack_buf_memcpy */,
|
|
|
|
1 /* num_heap_buf_memcpy */,
|
|
|
|
0 /* num_compressed_buf_memcpy */,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
1 /* num_heap_buf_allocations */,
|
|
|
|
0 /* num_compressed_buf_allocations */,
|
|
|
|
}};
|
|
|
|
std::array<TestStats, NumModes> expected_stats_by_mode{{
|
|
|
|
expected_buffered_read_stats,
|
|
|
|
expected_mmap_stats,
|
|
|
|
expected_direct_read_stats,
|
|
|
|
}};
|
2020-04-25 00:30:12 +02:00
|
|
|
TestFetchDataBlock("FetchAndUncompressCompressedDataBlock", true, true,
|
2020-07-01 00:38:59 +02:00
|
|
|
expected_stats_by_mode);
|
2020-04-25 00:30:12 +02:00
|
|
|
}
|
|
|
|
|
2020-05-21 22:55:18 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2020-04-25 00:30:12 +02:00
|
|
|
} // namespace
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|