Fix testcase failures on windows (#7992)
Summary: Fixed 5 test case failures found on Windows 10/Windows Server 2016 1. In `flush_job_test`, the DestroyDir function fails in deconstructor because some file handles are still being held by VersionSet. This happens on Windows Server 2016, so need to manually reset versions_ pointer to release all file handles. 2. In `StatsHistoryTest.InMemoryStatsHistoryPurging` test, the capping memory cost of stats_history_size on Windows becomes 14000 bytes with latest changes, not just 13000 bytes. 3. In `SSTDumpToolTest.RawOutput` test, the output file handle is not closed at the end. 4. In `FullBloomTest.OptimizeForMemory` test, ROCKSDB_MALLOC_USABLE_SIZE is undefined on windows so `total_mem` is always equal to `total_size`. The internal memory fragmentation assertion does not apply in this case. 5. In `BlockFetcherTest.FetchAndUncompressCompressedDataBlock` test, XPRESS cannot reach 87.5% compression ratio with original CreateTable method, so I append extra zeros to the string value to enhance compression ratio. Beside, since XPRESS allocates memory internally, thus does not support for custom allocator verification, we will skip the allocator verification for XPRESS Pull Request resolved: https://github.com/facebook/rocksdb/pull/7992 Reviewed By: jay-zhuang Differential Revision: D26615283 Pulled By: ajkr fbshipit-source-id: 3632612f84b99e2b9c77c403b112b6bedf3b125d
This commit is contained in:
parent
75c6ffb9de
commit
e017af15c1
@ -48,6 +48,8 @@ class FlushJobTestBase : public testing::Test {
|
||||
if (getenv("KEEP_DB")) {
|
||||
fprintf(stdout, "db is still in %s\n", dbname_.c_str());
|
||||
} else {
|
||||
// destroy versions_ to release all file handles
|
||||
versions_.reset();
|
||||
EXPECT_OK(DestroyDir(env_, dbname_));
|
||||
}
|
||||
}
|
||||
|
@ -245,10 +245,10 @@ TEST_F(StatsHistoryTest, InMemoryStatsHistoryPurging) {
|
||||
}
|
||||
size_t stats_history_size = dbfull()->TEST_EstimateInMemoryStatsHistorySize();
|
||||
ASSERT_GE(slice_count, kIterations - 1);
|
||||
ASSERT_GE(stats_history_size, 13000);
|
||||
// capping memory cost at 13000 bytes since one slice is around 10000~13000
|
||||
ASSERT_OK(dbfull()->SetDBOptions({{"stats_history_buffer_size", "13000"}}));
|
||||
ASSERT_EQ(13000, dbfull()->GetDBOptions().stats_history_buffer_size);
|
||||
ASSERT_GE(stats_history_size, 14000);
|
||||
// capping memory cost at 14000 bytes since one slice is around 10000~14000
|
||||
ASSERT_OK(dbfull()->SetDBOptions({{"stats_history_buffer_size", "14000"}}));
|
||||
ASSERT_EQ(14000, dbfull()->GetDBOptions().stats_history_buffer_size);
|
||||
|
||||
// Wait for stats persist to finish
|
||||
for (int i = 0; i < kIterations; ++i) {
|
||||
|
@ -108,7 +108,8 @@ class BlockFetcherTest : public testing::Test {
|
||||
// Build table.
|
||||
for (int i = 0; i < 9; i++) {
|
||||
std::string key = ToInternalKey(std::to_string(i));
|
||||
std::string value = std::to_string(i);
|
||||
// Append "00000000" to string value to enhance compression ratio
|
||||
std::string value = "00000000" + std::to_string(i);
|
||||
table_builder->Add(key, value);
|
||||
}
|
||||
ASSERT_OK(table_builder->Finish());
|
||||
@ -190,22 +191,30 @@ class BlockFetcherTest : public testing::Test {
|
||||
ASSERT_EQ(memcpy_stats[i].num_compressed_buf_memcpy,
|
||||
expected_stats.memcpy_stats.num_compressed_buf_memcpy);
|
||||
|
||||
ASSERT_EQ(heap_buf_allocators[i].GetNumAllocations(),
|
||||
expected_stats.buf_allocation_stats.num_heap_buf_allocations);
|
||||
if (kXpressCompression == compression_type) {
|
||||
// XPRESS allocates memory internally, thus does not support for
|
||||
// custom allocator verification
|
||||
continue;
|
||||
} else {
|
||||
ASSERT_EQ(
|
||||
compressed_buf_allocators[i].GetNumAllocations(),
|
||||
expected_stats.buf_allocation_stats.num_compressed_buf_allocations);
|
||||
heap_buf_allocators[i].GetNumAllocations(),
|
||||
expected_stats.buf_allocation_stats.num_heap_buf_allocations);
|
||||
ASSERT_EQ(compressed_buf_allocators[i].GetNumAllocations(),
|
||||
expected_stats.buf_allocation_stats
|
||||
.num_compressed_buf_allocations);
|
||||
|
||||
// The allocated buffers are not deallocated until
|
||||
// the block content is deleted.
|
||||
ASSERT_EQ(heap_buf_allocators[i].GetNumDeallocations(), 0);
|
||||
ASSERT_EQ(compressed_buf_allocators[i].GetNumDeallocations(), 0);
|
||||
blocks[i].allocation.reset();
|
||||
ASSERT_EQ(heap_buf_allocators[i].GetNumDeallocations(),
|
||||
expected_stats.buf_allocation_stats.num_heap_buf_allocations);
|
||||
ASSERT_EQ(
|
||||
compressed_buf_allocators[i].GetNumDeallocations(),
|
||||
expected_stats.buf_allocation_stats.num_compressed_buf_allocations);
|
||||
heap_buf_allocators[i].GetNumDeallocations(),
|
||||
expected_stats.buf_allocation_stats.num_heap_buf_allocations);
|
||||
ASSERT_EQ(compressed_buf_allocators[i].GetNumDeallocations(),
|
||||
expected_stats.buf_allocation_stats
|
||||
.num_compressed_buf_allocations);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -392,6 +392,8 @@ TEST_F(SSTDumpToolTest, RawOutput) {
|
||||
|
||||
ASSERT_EQ(kNumKey, key_count);
|
||||
|
||||
raw_file.close();
|
||||
|
||||
cleanup(opts, file_path);
|
||||
for (int i = 0; i < 3; i++) {
|
||||
delete[] usage[i];
|
||||
|
@ -573,8 +573,10 @@ TEST_P(FullBloomTest, OptimizeForMemory) {
|
||||
#ifdef ROCKSDB_JEMALLOC
|
||||
fprintf(stderr, "Jemalloc detected? %d\n", HasJemalloc());
|
||||
if (HasJemalloc()) {
|
||||
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
||||
// More than 5% internal fragmentation
|
||||
EXPECT_GE(total_mem, total_size * 105 / 100);
|
||||
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
||||
}
|
||||
#endif // ROCKSDB_JEMALLOC
|
||||
// No storage penalty, just usual overhead
|
||||
|
Loading…
Reference in New Issue
Block a user