diff --git a/HISTORY.md b/HISTORY.md index 887079f3d..2bbf540b2 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -14,6 +14,7 @@ * Fix incorrect results from batched MultiGet for duplicate keys, when the duplicate key matches the largest key of an SST file and the value type for the key in the file is a merge value. * Best-efforts recovery ignores CURRENT file completely. If CURRENT file is missing during recovery, best-efforts recovery still proceeds with MANIFEST file(s). * In best-efforts recovery, an error that is not Corruption or IOError::kNotFound or IOError::kPathNotFound will be overwritten silently. Fix this by checking all non-ok cases and return early. +* Compressed block cache was automatically disabled with read-only DBs by mistake. Now it is fixed: compressed block cache will be in effective with read-only DB too. ### Public API Change * Flush(..., column_family) may return Status::ColumnFamilyDropped() instead of Status::InvalidArgument() if column_family is dropped while processing the flush request. diff --git a/db/db_test2.cc b/db/db_test2.cc index 467492155..be5b90a6d 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -86,6 +86,64 @@ TEST_F(DBTest2, OpenForReadOnlyWithColumnFamilies) { // With create_if_missing false, there should not be a dir in the file system ASSERT_NOK(env_->FileExists(dbname)); } + +class TestReadOnlyWithCompressedCache + : public DBTestBase, + public testing::WithParamInterface> { + public: + TestReadOnlyWithCompressedCache() + : DBTestBase("/test_readonly_with_compressed_cache") { + max_open_files_ = std::get<0>(GetParam()); + use_mmap_ = std::get<1>(GetParam()); + } + int max_open_files_; + bool use_mmap_; +}; + +TEST_P(TestReadOnlyWithCompressedCache, ReadOnlyWithCompressedCache) { + if (use_mmap_ && !IsMemoryMappedAccessSupported()) { + return; + } + ASSERT_OK(Put("foo", "bar")); + ASSERT_OK(Put("foo2", "barbarbarbarbarbarbarbar")); + ASSERT_OK(Flush()); + + DB* db_ptr = nullptr; + Options options = CurrentOptions(); + options.allow_mmap_reads = use_mmap_; + options.max_open_files = max_open_files_; + options.compression = kSnappyCompression; + BlockBasedTableOptions table_options; + table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024); + table_options.no_block_cache = true; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.statistics = CreateDBStatistics(); + + ASSERT_OK(DB::OpenForReadOnly(options, dbname_, &db_ptr)); + + std::string v; + ASSERT_OK(db_ptr->Get(ReadOptions(), "foo", &v)); + ASSERT_EQ("bar", v); + ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT)); + ASSERT_OK(db_ptr->Get(ReadOptions(), "foo", &v)); + ASSERT_EQ("bar", v); + if (Snappy_Supported()) { + if (use_mmap_) { + ASSERT_EQ(0, + options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT)); + } else { + ASSERT_EQ(1, + options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT)); + } + } + + delete db_ptr; +} + +INSTANTIATE_TEST_CASE_P(TestReadOnlyWithCompressedCache, + TestReadOnlyWithCompressedCache, + ::testing::Combine(::testing::Values(-1, 100), + ::testing::Bool())); #endif // ROCKSDB_LITE class PrefixFullBloomWithReverseComparator diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index 7ce7ca55e..6bb2001e3 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -1426,10 +1426,8 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache( assert(block_entry != nullptr); const bool no_io = (ro.read_tier == kBlockCacheTier); Cache* block_cache = rep_->table_options.block_cache.get(); - // No point to cache compressed blocks if it never goes away Cache* block_cache_compressed = - rep_->immortal_table ? nullptr - : rep_->table_options.block_cache_compressed.get(); + rep_->table_options.block_cache_compressed.get(); // First, try to get the block from the cache //