Enable db_test running in Centos 32 bit OS and Alpine 32 bit OS (#9294)
Summary: Closes https://github.com/facebook/rocksdb/issues/9271 Pull Request resolved: https://github.com/facebook/rocksdb/pull/9294 Reviewed By: riversand963, hx235 Differential Revision: D33586002 Pulled By: pdillinger fbshipit-source-id: 3d1a2fa71023e108613ff03dbd37a5f954fc4920
This commit is contained in:
parent
5602b1d3d9
commit
93b1de4f45
@ -2576,9 +2576,7 @@ static const int kNumKeys = 1000;
|
||||
|
||||
struct MTState {
|
||||
DBTest* test;
|
||||
std::atomic<bool> stop;
|
||||
std::atomic<int> counter[kNumThreads];
|
||||
std::atomic<bool> thread_done[kNumThreads];
|
||||
};
|
||||
|
||||
struct MTThread {
|
||||
@ -2592,10 +2590,13 @@ static void MTThreadBody(void* arg) {
|
||||
int id = t->id;
|
||||
DB* db = t->state->test->db_;
|
||||
int counter = 0;
|
||||
std::shared_ptr<SystemClock> clock = SystemClock::Default();
|
||||
auto end_micros = clock->NowMicros() + kTestSeconds * 1000000U;
|
||||
|
||||
fprintf(stderr, "... starting thread %d\n", id);
|
||||
Random rnd(1000 + id);
|
||||
char valbuf[1500];
|
||||
while (t->state->stop.load(std::memory_order_acquire) == false) {
|
||||
while (clock->NowMicros() < end_micros) {
|
||||
t->state->counter[id].store(counter, std::memory_order_release);
|
||||
|
||||
int key = rnd.Uniform(kNumKeys);
|
||||
@ -2692,7 +2693,6 @@ static void MTThreadBody(void* arg) {
|
||||
}
|
||||
counter++;
|
||||
}
|
||||
t->state->thread_done[id].store(true, std::memory_order_release);
|
||||
fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
|
||||
}
|
||||
|
||||
@ -2731,10 +2731,8 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) {
|
||||
// Initialize state
|
||||
MTState mt;
|
||||
mt.test = this;
|
||||
mt.stop.store(false, std::memory_order_release);
|
||||
for (int id = 0; id < kNumThreads; id++) {
|
||||
mt.counter[id].store(0, std::memory_order_release);
|
||||
mt.thread_done[id].store(false, std::memory_order_release);
|
||||
}
|
||||
|
||||
// Start threads
|
||||
@ -2746,16 +2744,7 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) {
|
||||
env_->StartThread(MTThreadBody, &thread[id]);
|
||||
}
|
||||
|
||||
// Let them run for a while
|
||||
env_->SleepForMicroseconds(kTestSeconds * 1000000);
|
||||
|
||||
// Stop the threads and wait for them to finish
|
||||
mt.stop.store(true, std::memory_order_release);
|
||||
for (int id = 0; id < kNumThreads; id++) {
|
||||
while (mt.thread_done[id].load(std::memory_order_acquire) == false) {
|
||||
env_->SleepForMicroseconds(100000);
|
||||
}
|
||||
}
|
||||
env_->WaitForJoin();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
@ -4942,6 +4931,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
|
||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
||||
ASSERT_EQ(NumTableFilesAtLevel(2), 0);
|
||||
|
||||
ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4),
|
||||
120U * 4000U + 50U * 24);
|
||||
// Make sure data in files in L3 is not compacted by removing all files
|
||||
@ -7010,8 +7000,9 @@ TEST_F(DBTest, MemoryUsageWithMaxWriteBufferSizeToMaintain) {
|
||||
if ((size_all_mem_table > cur_active_mem) &&
|
||||
(cur_active_mem >=
|
||||
static_cast<uint64_t>(options.max_write_buffer_size_to_maintain)) &&
|
||||
(size_all_mem_table > options.max_write_buffer_size_to_maintain +
|
||||
options.write_buffer_size)) {
|
||||
(size_all_mem_table >
|
||||
static_cast<uint64_t>(options.max_write_buffer_size_to_maintain) +
|
||||
options.write_buffer_size)) {
|
||||
ASSERT_FALSE(memory_limit_exceeded);
|
||||
memory_limit_exceeded = true;
|
||||
} else {
|
||||
|
2
env/fs_posix.cc
vendored
2
env/fs_posix.cc
vendored
@ -238,7 +238,7 @@ class PosixFileSystem : public FileSystem {
|
||||
}
|
||||
SetFD_CLOEXEC(fd, &options);
|
||||
|
||||
if (options.use_mmap_reads && sizeof(void*) >= 8) {
|
||||
if (options.use_mmap_reads) {
|
||||
// Use of mmap for random reads has been removed because it
|
||||
// kills performance when storage is fast.
|
||||
// Use mmap when virtual address-space is plentiful.
|
||||
|
@ -259,7 +259,7 @@ struct BlockBasedTableOptions {
|
||||
// block size specified here corresponds to uncompressed data. The
|
||||
// actual size of the unit read from disk may be smaller if
|
||||
// compression is enabled. This parameter can be changed dynamically.
|
||||
size_t block_size = 4 * 1024;
|
||||
uint64_t block_size = 4 * 1024;
|
||||
|
||||
// This is used to close a block before it reaches the configured
|
||||
// 'block_size'. If the percentage of free space in the current block is less
|
||||
|
@ -406,7 +406,8 @@ struct BlockBasedTableBuilder::Rep {
|
||||
file(f),
|
||||
offset(0),
|
||||
alignment(table_options.block_align
|
||||
? std::min(table_options.block_size, kDefaultPageSize)
|
||||
? std::min(static_cast<size_t>(table_options.block_size),
|
||||
kDefaultPageSize)
|
||||
: 0),
|
||||
data_block(table_options.block_restart_interval,
|
||||
table_options.use_delta_encoding,
|
||||
|
@ -778,7 +778,7 @@ std::string BlockBasedTableFactory::GetPrintableOptions() const {
|
||||
ret.append(buffer);
|
||||
ret.append(table_options_.persistent_cache->GetPrintableOptions());
|
||||
}
|
||||
snprintf(buffer, kBufferSize, " block_size: %" ROCKSDB_PRIszt "\n",
|
||||
snprintf(buffer, kBufferSize, " block_size: %" PRIu64 "\n",
|
||||
table_options_.block_size);
|
||||
ret.append(buffer);
|
||||
snprintf(buffer, kBufferSize, " block_size_deviation: %d\n",
|
||||
|
Loading…
Reference in New Issue
Block a user