diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index 9d9b40c2b..be76e39de 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -48,6 +48,8 @@ class FlushJobTestBase : public testing::Test { if (getenv("KEEP_DB")) { fprintf(stdout, "db is still in %s\n", dbname_.c_str()); } else { + // destroy versions_ to release all file handles + versions_.reset(); EXPECT_OK(DestroyDir(env_, dbname_)); } } diff --git a/monitoring/stats_history_test.cc b/monitoring/stats_history_test.cc index 391f7c442..c1a2ad989 100644 --- a/monitoring/stats_history_test.cc +++ b/monitoring/stats_history_test.cc @@ -245,10 +245,10 @@ TEST_F(StatsHistoryTest, InMemoryStatsHistoryPurging) { } size_t stats_history_size = dbfull()->TEST_EstimateInMemoryStatsHistorySize(); ASSERT_GE(slice_count, kIterations - 1); - ASSERT_GE(stats_history_size, 13000); - // capping memory cost at 13000 bytes since one slice is around 10000~13000 - ASSERT_OK(dbfull()->SetDBOptions({{"stats_history_buffer_size", "13000"}})); - ASSERT_EQ(13000, dbfull()->GetDBOptions().stats_history_buffer_size); + ASSERT_GE(stats_history_size, 14000); + // capping memory cost at 14000 bytes since one slice is around 10000~14000 + ASSERT_OK(dbfull()->SetDBOptions({{"stats_history_buffer_size", "14000"}})); + ASSERT_EQ(14000, dbfull()->GetDBOptions().stats_history_buffer_size); // Wait for stats persist to finish for (int i = 0; i < kIterations; ++i) { diff --git a/table/block_fetcher_test.cc b/table/block_fetcher_test.cc index cf5a4151c..ed52a0c99 100644 --- a/table/block_fetcher_test.cc +++ b/table/block_fetcher_test.cc @@ -108,7 +108,8 @@ class BlockFetcherTest : public testing::Test { // Build table. for (int i = 0; i < 9; i++) { std::string key = ToInternalKey(std::to_string(i)); - std::string value = std::to_string(i); + // Append "00000000" to string value to enhance compression ratio + std::string value = "00000000" + std::to_string(i); table_builder->Add(key, value); } ASSERT_OK(table_builder->Finish()); @@ -190,22 +191,30 @@ class BlockFetcherTest : public testing::Test { ASSERT_EQ(memcpy_stats[i].num_compressed_buf_memcpy, expected_stats.memcpy_stats.num_compressed_buf_memcpy); - ASSERT_EQ(heap_buf_allocators[i].GetNumAllocations(), - expected_stats.buf_allocation_stats.num_heap_buf_allocations); - ASSERT_EQ( - compressed_buf_allocators[i].GetNumAllocations(), - expected_stats.buf_allocation_stats.num_compressed_buf_allocations); + if (kXpressCompression == compression_type) { + // XPRESS allocates memory internally, thus does not support for + // custom allocator verification + continue; + } else { + ASSERT_EQ( + heap_buf_allocators[i].GetNumAllocations(), + expected_stats.buf_allocation_stats.num_heap_buf_allocations); + ASSERT_EQ(compressed_buf_allocators[i].GetNumAllocations(), + expected_stats.buf_allocation_stats + .num_compressed_buf_allocations); - // The allocated buffers are not deallocated until - // the block content is deleted. - ASSERT_EQ(heap_buf_allocators[i].GetNumDeallocations(), 0); - ASSERT_EQ(compressed_buf_allocators[i].GetNumDeallocations(), 0); - blocks[i].allocation.reset(); - ASSERT_EQ(heap_buf_allocators[i].GetNumDeallocations(), - expected_stats.buf_allocation_stats.num_heap_buf_allocations); - ASSERT_EQ( - compressed_buf_allocators[i].GetNumDeallocations(), - expected_stats.buf_allocation_stats.num_compressed_buf_allocations); + // The allocated buffers are not deallocated until + // the block content is deleted. + ASSERT_EQ(heap_buf_allocators[i].GetNumDeallocations(), 0); + ASSERT_EQ(compressed_buf_allocators[i].GetNumDeallocations(), 0); + blocks[i].allocation.reset(); + ASSERT_EQ( + heap_buf_allocators[i].GetNumDeallocations(), + expected_stats.buf_allocation_stats.num_heap_buf_allocations); + ASSERT_EQ(compressed_buf_allocators[i].GetNumDeallocations(), + expected_stats.buf_allocation_stats + .num_compressed_buf_allocations); + } } } } diff --git a/tools/sst_dump_test.cc b/tools/sst_dump_test.cc index e0db79aa9..d590830a8 100644 --- a/tools/sst_dump_test.cc +++ b/tools/sst_dump_test.cc @@ -392,6 +392,8 @@ TEST_F(SSTDumpToolTest, RawOutput) { ASSERT_EQ(kNumKey, key_count); + raw_file.close(); + cleanup(opts, file_path); for (int i = 0; i < 3; i++) { delete[] usage[i]; diff --git a/util/bloom_test.cc b/util/bloom_test.cc index ab8b58725..bc2514bc3 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -573,8 +573,10 @@ TEST_P(FullBloomTest, OptimizeForMemory) { #ifdef ROCKSDB_JEMALLOC fprintf(stderr, "Jemalloc detected? %d\n", HasJemalloc()); if (HasJemalloc()) { +#ifdef ROCKSDB_MALLOC_USABLE_SIZE // More than 5% internal fragmentation EXPECT_GE(total_mem, total_size * 105 / 100); +#endif // ROCKSDB_MALLOC_USABLE_SIZE } #endif // ROCKSDB_JEMALLOC // No storage penalty, just usual overhead