2020-10-15 22:02:44 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#include "db/blob/blob_file_cache.h"
|
|
|
|
|
|
|
|
#include <cassert>
|
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include "db/blob/blob_log_format.h"
|
|
|
|
#include "db/blob/blob_log_writer.h"
|
|
|
|
#include "env/mock_env.h"
|
|
|
|
#include "file/filename.h"
|
|
|
|
#include "file/read_write_util.h"
|
|
|
|
#include "file/writable_file_writer.h"
|
|
|
|
#include "options/cf_options.h"
|
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/file_system.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Creates a test blob file with a single blob in it.
|
|
|
|
void WriteBlobFile(uint32_t column_family_id,
|
|
|
|
const ImmutableCFOptions& immutable_cf_options,
|
|
|
|
uint64_t blob_file_number) {
|
|
|
|
assert(!immutable_cf_options.cf_paths.empty());
|
|
|
|
|
|
|
|
const std::string blob_file_path = BlobFileName(
|
|
|
|
immutable_cf_options.cf_paths.front().path, blob_file_number);
|
|
|
|
|
|
|
|
std::unique_ptr<FSWritableFile> file;
|
|
|
|
ASSERT_OK(NewWritableFile(immutable_cf_options.fs, blob_file_path, &file,
|
|
|
|
FileOptions()));
|
|
|
|
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer(
|
|
|
|
new WritableFileWriter(std::move(file), blob_file_path, FileOptions(),
|
2021-01-26 07:07:26 +01:00
|
|
|
immutable_cf_options.env->GetSystemClock()));
|
2020-10-15 22:02:44 +02:00
|
|
|
|
|
|
|
constexpr Statistics* statistics = nullptr;
|
|
|
|
constexpr bool use_fsync = false;
|
Do not explicitly flush blob files when using the integrated BlobDB (#7892)
Summary:
In the original stacked BlobDB implementation, which writes blobs to blob files
immediately and treats blob files as logs, it makes sense to flush the file after
writing each blob to protect against process crashes; however, in the integrated
implementation, which builds blob files in the background jobs, this unnecessarily
reduces performance. This patch fixes this by simply adding a `do_flush` flag to
`BlobLogWriter`, which is set to `true` by the stacked implementation and to `false`
by the new code. Note: the change itself is trivial but the tests needed some work;
since in the new implementation, blobs are now buffered, adding a blob to
`BlobFileBuilder` is no longer guaranteed to result in an actual I/O. Therefore, we can
no longer rely on `FaultInjectionTestEnv` when testing failure cases; instead, we
manipulate the return values of I/O methods directly using `SyncPoint`s.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7892
Test Plan: `make check`
Reviewed By: jay-zhuang
Differential Revision: D26022814
Pulled By: ltamasi
fbshipit-source-id: b3dce419f312137fa70d84cdd9b908fd5d60d8cd
2021-01-25 22:30:17 +01:00
|
|
|
constexpr bool do_flush = false;
|
2020-10-15 22:02:44 +02:00
|
|
|
|
2021-01-26 07:07:26 +01:00
|
|
|
BlobLogWriter blob_log_writer(
|
|
|
|
std::move(file_writer), immutable_cf_options.env->GetSystemClock(),
|
|
|
|
statistics, blob_file_number, use_fsync, do_flush);
|
2020-10-15 22:02:44 +02:00
|
|
|
|
|
|
|
constexpr bool has_ttl = false;
|
|
|
|
constexpr ExpirationRange expiration_range;
|
|
|
|
|
|
|
|
BlobLogHeader header(column_family_id, kNoCompression, has_ttl,
|
|
|
|
expiration_range);
|
|
|
|
|
|
|
|
ASSERT_OK(blob_log_writer.WriteHeader(header));
|
|
|
|
|
|
|
|
constexpr char key[] = "key";
|
|
|
|
constexpr char blob[] = "blob";
|
|
|
|
|
|
|
|
std::string compressed_blob;
|
|
|
|
Slice blob_to_write;
|
|
|
|
|
|
|
|
uint64_t key_offset = 0;
|
|
|
|
uint64_t blob_offset = 0;
|
|
|
|
|
|
|
|
ASSERT_OK(blob_log_writer.AddRecord(key, blob, &key_offset, &blob_offset));
|
|
|
|
|
|
|
|
BlobLogFooter footer;
|
|
|
|
footer.blob_count = 1;
|
|
|
|
footer.expiration_range = expiration_range;
|
|
|
|
|
|
|
|
std::string checksum_method;
|
|
|
|
std::string checksum_value;
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
blob_log_writer.AppendFooter(footer, &checksum_method, &checksum_value));
|
|
|
|
}
|
|
|
|
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
class BlobFileCacheTest : public testing::Test {
|
|
|
|
protected:
|
|
|
|
BlobFileCacheTest() : mock_env_(Env::Default()) {}
|
|
|
|
|
|
|
|
MockEnv mock_env_;
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(BlobFileCacheTest, GetBlobFileReader) {
|
|
|
|
Options options;
|
|
|
|
options.env = &mock_env_;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
options.cf_paths.emplace_back(
|
|
|
|
test::PerThreadDBPath(&mock_env_, "BlobFileCacheTest_GetBlobFileReader"),
|
|
|
|
0);
|
|
|
|
options.enable_blob_files = true;
|
|
|
|
|
|
|
|
constexpr uint32_t column_family_id = 1;
|
|
|
|
ImmutableCFOptions immutable_cf_options(options);
|
|
|
|
constexpr uint64_t blob_file_number = 123;
|
|
|
|
|
|
|
|
WriteBlobFile(column_family_id, immutable_cf_options, blob_file_number);
|
|
|
|
|
|
|
|
constexpr size_t capacity = 10;
|
|
|
|
std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
|
|
|
|
|
|
|
|
FileOptions file_options;
|
|
|
|
constexpr HistogramImpl* blob_file_read_hist = nullptr;
|
|
|
|
|
|
|
|
BlobFileCache blob_file_cache(backing_cache.get(), &immutable_cf_options,
|
|
|
|
&file_options, column_family_id,
|
2021-02-16 18:47:12 +01:00
|
|
|
blob_file_read_hist, nullptr /*IOTracer*/);
|
2020-10-15 22:02:44 +02:00
|
|
|
|
|
|
|
// First try: reader should be opened and put in cache
|
|
|
|
CacheHandleGuard<BlobFileReader> first;
|
|
|
|
|
|
|
|
ASSERT_OK(blob_file_cache.GetBlobFileReader(blob_file_number, &first));
|
|
|
|
ASSERT_NE(first.GetValue(), nullptr);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 0);
|
|
|
|
|
|
|
|
// Second try: reader should be served from cache
|
|
|
|
CacheHandleGuard<BlobFileReader> second;
|
|
|
|
|
|
|
|
ASSERT_OK(blob_file_cache.GetBlobFileReader(blob_file_number, &second));
|
|
|
|
ASSERT_NE(second.GetValue(), nullptr);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(first.GetValue(), second.GetValue());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlobFileCacheTest, GetBlobFileReader_Race) {
|
|
|
|
Options options;
|
|
|
|
options.env = &mock_env_;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
options.cf_paths.emplace_back(
|
|
|
|
test::PerThreadDBPath(&mock_env_,
|
|
|
|
"BlobFileCacheTest_GetBlobFileReader_Race"),
|
|
|
|
0);
|
|
|
|
options.enable_blob_files = true;
|
|
|
|
|
|
|
|
constexpr uint32_t column_family_id = 1;
|
|
|
|
ImmutableCFOptions immutable_cf_options(options);
|
|
|
|
constexpr uint64_t blob_file_number = 123;
|
|
|
|
|
|
|
|
WriteBlobFile(column_family_id, immutable_cf_options, blob_file_number);
|
|
|
|
|
|
|
|
constexpr size_t capacity = 10;
|
|
|
|
std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
|
|
|
|
|
|
|
|
FileOptions file_options;
|
|
|
|
constexpr HistogramImpl* blob_file_read_hist = nullptr;
|
|
|
|
|
|
|
|
BlobFileCache blob_file_cache(backing_cache.get(), &immutable_cf_options,
|
|
|
|
&file_options, column_family_id,
|
2021-02-16 18:47:12 +01:00
|
|
|
blob_file_read_hist, nullptr /*IOTracer*/);
|
2020-10-15 22:02:44 +02:00
|
|
|
|
|
|
|
CacheHandleGuard<BlobFileReader> first;
|
|
|
|
CacheHandleGuard<BlobFileReader> second;
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"BlobFileCache::GetBlobFileReader:DoubleCheck", [&](void* /* arg */) {
|
|
|
|
// Disabling sync points to prevent infinite recursion
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
|
|
|
|
ASSERT_OK(blob_file_cache.GetBlobFileReader(blob_file_number, &second));
|
|
|
|
ASSERT_NE(second.GetValue(), nullptr);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 0);
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
ASSERT_OK(blob_file_cache.GetBlobFileReader(blob_file_number, &first));
|
|
|
|
ASSERT_NE(first.GetValue(), nullptr);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(first.GetValue(), second.GetValue());
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlobFileCacheTest, GetBlobFileReader_IOError) {
|
|
|
|
Options options;
|
|
|
|
options.env = &mock_env_;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
options.cf_paths.emplace_back(
|
|
|
|
test::PerThreadDBPath(&mock_env_,
|
|
|
|
"BlobFileCacheTest_GetBlobFileReader_IOError"),
|
|
|
|
0);
|
|
|
|
options.enable_blob_files = true;
|
|
|
|
|
|
|
|
constexpr size_t capacity = 10;
|
|
|
|
std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
|
|
|
|
|
|
|
|
ImmutableCFOptions immutable_cf_options(options);
|
|
|
|
FileOptions file_options;
|
|
|
|
constexpr uint32_t column_family_id = 1;
|
|
|
|
constexpr HistogramImpl* blob_file_read_hist = nullptr;
|
|
|
|
|
|
|
|
BlobFileCache blob_file_cache(backing_cache.get(), &immutable_cf_options,
|
|
|
|
&file_options, column_family_id,
|
2021-02-16 18:47:12 +01:00
|
|
|
blob_file_read_hist, nullptr /*IOTracer*/);
|
2020-10-15 22:02:44 +02:00
|
|
|
|
|
|
|
// Note: there is no blob file with the below number
|
|
|
|
constexpr uint64_t blob_file_number = 123;
|
|
|
|
|
|
|
|
CacheHandleGuard<BlobFileReader> reader;
|
|
|
|
|
|
|
|
ASSERT_TRUE(
|
|
|
|
blob_file_cache.GetBlobFileReader(blob_file_number, &reader).IsIOError());
|
|
|
|
ASSERT_EQ(reader.GetValue(), nullptr);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(BlobFileCacheTest, GetBlobFileReader_CacheFull) {
|
|
|
|
Options options;
|
|
|
|
options.env = &mock_env_;
|
|
|
|
options.statistics = CreateDBStatistics();
|
|
|
|
options.cf_paths.emplace_back(
|
|
|
|
test::PerThreadDBPath(&mock_env_,
|
|
|
|
"BlobFileCacheTest_GetBlobFileReader_CacheFull"),
|
|
|
|
0);
|
|
|
|
options.enable_blob_files = true;
|
|
|
|
|
|
|
|
constexpr uint32_t column_family_id = 1;
|
|
|
|
ImmutableCFOptions immutable_cf_options(options);
|
|
|
|
constexpr uint64_t blob_file_number = 123;
|
|
|
|
|
|
|
|
WriteBlobFile(column_family_id, immutable_cf_options, blob_file_number);
|
|
|
|
|
|
|
|
constexpr size_t capacity = 0;
|
|
|
|
constexpr int num_shard_bits = -1; // determined automatically
|
|
|
|
constexpr bool strict_capacity_limit = true;
|
|
|
|
std::shared_ptr<Cache> backing_cache =
|
|
|
|
NewLRUCache(capacity, num_shard_bits, strict_capacity_limit);
|
|
|
|
|
|
|
|
FileOptions file_options;
|
|
|
|
constexpr HistogramImpl* blob_file_read_hist = nullptr;
|
|
|
|
|
|
|
|
BlobFileCache blob_file_cache(backing_cache.get(), &immutable_cf_options,
|
|
|
|
&file_options, column_family_id,
|
2021-02-16 18:47:12 +01:00
|
|
|
blob_file_read_hist, nullptr /*IOTracer*/);
|
2020-10-15 22:02:44 +02:00
|
|
|
|
|
|
|
// Insert into cache should fail since it has zero capacity and
|
|
|
|
// strict_capacity_limit is set
|
|
|
|
CacheHandleGuard<BlobFileReader> reader;
|
|
|
|
|
|
|
|
ASSERT_TRUE(blob_file_cache.GetBlobFileReader(blob_file_number, &reader)
|
|
|
|
.IsIncomplete());
|
|
|
|
ASSERT_EQ(reader.GetValue(), nullptr);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
|
|
|
|
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|