From 459e00b3659ca1b9dfdd3c7cfaea93e6f4362595 Mon Sep 17 00:00:00 2001 From: Aaron Gao Date: Fri, 5 May 2017 11:58:10 -0700 Subject: [PATCH] Roundup read bytes in ReadaheadRandomAccessFile Summary: Fix alignment in ReadaheadRandomAccessFile Closes https://github.com/facebook/rocksdb/pull/2253 Differential Revision: D5012336 Pulled By: lightmark fbshipit-source-id: 10d2c829520cb787227ef653ef63d5d701725778 --- db/db_compaction_test.cc | 8 ++++++++ db/db_impl_open.cc | 4 +++- util/file_reader_writer.cc | 15 ++++++++++++++- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index bc9a8d473..f212908a8 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -2601,6 +2601,7 @@ TEST_P(DBCompactionDirectIOTest, DirectIO) { options.use_direct_io_for_flush_and_compaction = GetParam(); options.env = new MockEnv(Env::Default()); Reopen(options); + bool readahead = false; SyncPoint::GetInstance()->SetCallBack( "TableCache::NewIterator:for_compaction", [&](void* arg) { bool* use_direct_reads = static_cast(arg); @@ -2613,11 +2614,18 @@ TEST_P(DBCompactionDirectIOTest, DirectIO) { ASSERT_EQ(*use_direct_writes, options.use_direct_io_for_flush_and_compaction); }); + if (options.use_direct_io_for_flush_and_compaction) { + SyncPoint::GetInstance()->SetCallBack( + "SanitizeOptions:direct_io", [&](void* arg) { + readahead = true; + }); + } SyncPoint::GetInstance()->EnableProcessing(); CreateAndReopenWithCF({"pikachu"}, options); MakeTables(3, "p", "q", 1); ASSERT_EQ("1,1,1", FilesPerLevel(1)); Compact(1, "p1", "p9"); + ASSERT_FALSE(readahead ^ options.use_direct_io_for_flush_and_compaction); ASSERT_EQ("0,0,1", FilesPerLevel(1)); Destroy(options); delete options.env; diff --git a/db/db_impl_open.cc b/db/db_impl_open.cc index 9e7d3a02d..cbdc1fe8e 100644 --- a/db/db_impl_open.cc +++ b/db/db_impl_open.cc @@ -97,7 +97,9 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) { result.db_paths.emplace_back(dbname, std::numeric_limits::max()); } - if (result.use_direct_reads && result.compaction_readahead_size == 0) { + if (result.use_direct_io_for_flush_and_compaction && + result.compaction_readahead_size == 0) { + TEST_SYNC_POINT_CALLBACK("SanitizeOptions:direct_io", nullptr); result.compaction_readahead_size = 1024 * 1024 * 2; } diff --git a/util/file_reader_writer.cc b/util/file_reader_writer.cc index 1088ae3f4..7dbffd834 100644 --- a/util/file_reader_writer.cc +++ b/util/file_reader_writer.cc @@ -21,6 +21,16 @@ namespace rocksdb { +#ifndef NDEBUG +namespace { + +bool IsSectorAligned(const size_t off, size_t sector_size) { + return off % sector_size == 0; +} + +} +#endif + Status SequentialFileReader::Read(size_t n, Slice* result, char* scratch) { Status s; if (use_direct_io()) { @@ -511,7 +521,8 @@ class ReadaheadRandomAccessFile : public RandomAccessFile { if (prefetch_offset == buffer_offset_) { return Status::OK(); } - return ReadIntoBuffer(prefetch_offset, offset - prefetch_offset + n); + return ReadIntoBuffer(prefetch_offset, + Roundup(offset + n, alignment_) - prefetch_offset); } virtual size_t GetUniqueId(char* id, size_t max_size) const override { @@ -546,6 +557,8 @@ class ReadaheadRandomAccessFile : public RandomAccessFile { if (n > buffer_.Capacity()) { n = buffer_.Capacity(); } + assert(IsSectorAligned(offset, alignment_)); + assert(IsSectorAligned(n, alignment_)); Slice result; Status s = file_->Read(offset, n, &result, buffer_.BufferStart()); if (s.ok()) {