Apply formatter to recent 200+ commits. (#5830)
Summary: Further apply formatter to more recent commits. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5830 Test Plan: Run all existing tests. Differential Revision: D17488031 fbshipit-source-id: 137458fd94d56dd271b8b40c522b03036943a2ab
This commit is contained in:
parent
a5fa8735e9
commit
e8263dbdaa
4
cache/cache_bench.cc
vendored
4
cache/cache_bench.cc
vendored
@ -11,9 +11,9 @@ int main() {
|
||||
}
|
||||
#else
|
||||
|
||||
#include <cinttypes>
|
||||
#include <sys/types.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/types.h>
|
||||
#include <cinttypes>
|
||||
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/cache.h"
|
||||
|
@ -9,11 +9,11 @@
|
||||
|
||||
#include "db/column_family.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "db/compaction/compaction_picker.h"
|
||||
#include "db/compaction/compaction_picker_fifo.h"
|
||||
|
@ -7,8 +7,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
|
@ -7,8 +7,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
@ -5,9 +5,9 @@
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cinttypes>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
|
@ -1356,9 +1356,9 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevel) {
|
||||
// Test class for batched MultiGet with prefix extractor
|
||||
// Param bool - If true, use partitioned filters
|
||||
// If false, use full filter block
|
||||
class MultiGetPrefixExtractorTest
|
||||
: public DBBasicTest,
|
||||
public ::testing::WithParamInterface<bool> {};
|
||||
class MultiGetPrefixExtractorTest : public DBBasicTest,
|
||||
public ::testing::WithParamInterface<bool> {
|
||||
};
|
||||
|
||||
TEST_P(MultiGetPrefixExtractorTest, Batched) {
|
||||
Options options = CurrentOptions();
|
||||
@ -1396,14 +1396,12 @@ TEST_P(MultiGetPrefixExtractorTest, Batched) {
|
||||
ASSERT_EQ(get_perf_context()->bloom_sst_hit_count, 4);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
MultiGetPrefix, MultiGetPrefixExtractorTest,
|
||||
::testing::Bool());
|
||||
INSTANTIATE_TEST_CASE_P(MultiGetPrefix, MultiGetPrefixExtractorTest,
|
||||
::testing::Bool());
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
class DBMultiGetRowCacheTest
|
||||
: public DBBasicTest,
|
||||
public ::testing::WithParamInterface<bool> {};
|
||||
class DBMultiGetRowCacheTest : public DBBasicTest,
|
||||
public ::testing::WithParamInterface<bool> {};
|
||||
|
||||
TEST_P(DBMultiGetRowCacheTest, MultiGetBatched) {
|
||||
do {
|
||||
@ -1543,10 +1541,9 @@ TEST_F(DBBasicTest, GetAllKeyVersions) {
|
||||
|
||||
class DBBasicTestWithParallelIO
|
||||
: public DBTestBase,
|
||||
public testing::WithParamInterface<std::tuple<bool,bool,bool,bool>> {
|
||||
public testing::WithParamInterface<std::tuple<bool, bool, bool, bool>> {
|
||||
public:
|
||||
DBBasicTestWithParallelIO()
|
||||
: DBTestBase("/db_basic_test_with_parallel_io") {
|
||||
DBBasicTestWithParallelIO() : DBTestBase("/db_basic_test_with_parallel_io") {
|
||||
bool compressed_cache = std::get<0>(GetParam());
|
||||
bool uncompressed_cache = std::get<1>(GetParam());
|
||||
compression_enabled_ = std::get<2>(GetParam());
|
||||
@ -1570,7 +1567,7 @@ class DBBasicTestWithParallelIO
|
||||
table_options.block_cache = uncompressed_cache_;
|
||||
table_options.block_cache_compressed = compressed_cache_;
|
||||
table_options.flush_block_policy_factory.reset(
|
||||
new MyFlushBlockPolicyFactory());
|
||||
new MyFlushBlockPolicyFactory());
|
||||
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
||||
if (!compression_enabled_) {
|
||||
options.compression = kNoCompression;
|
||||
@ -1598,15 +1595,9 @@ class DBBasicTestWithParallelIO
|
||||
int num_found() { return uncompressed_cache_->num_found(); }
|
||||
int num_inserts() { return uncompressed_cache_->num_inserts(); }
|
||||
|
||||
int num_lookups_compressed() {
|
||||
return compressed_cache_->num_lookups();
|
||||
}
|
||||
int num_found_compressed() {
|
||||
return compressed_cache_->num_found();
|
||||
}
|
||||
int num_inserts_compressed() {
|
||||
return compressed_cache_->num_inserts();
|
||||
}
|
||||
int num_lookups_compressed() { return compressed_cache_->num_lookups(); }
|
||||
int num_found_compressed() { return compressed_cache_->num_found(); }
|
||||
int num_inserts_compressed() { return compressed_cache_->num_inserts(); }
|
||||
|
||||
bool fill_cache() { return fill_cache_; }
|
||||
|
||||
@ -1614,8 +1605,7 @@ class DBBasicTestWithParallelIO
|
||||
static void TearDownTestCase() {}
|
||||
|
||||
private:
|
||||
class MyFlushBlockPolicyFactory
|
||||
: public FlushBlockPolicyFactory {
|
||||
class MyFlushBlockPolicyFactory : public FlushBlockPolicyFactory {
|
||||
public:
|
||||
MyFlushBlockPolicyFactory() {}
|
||||
|
||||
@ -1630,11 +1620,10 @@ class DBBasicTestWithParallelIO
|
||||
}
|
||||
};
|
||||
|
||||
class MyFlushBlockPolicy
|
||||
: public FlushBlockPolicy {
|
||||
class MyFlushBlockPolicy : public FlushBlockPolicy {
|
||||
public:
|
||||
explicit MyFlushBlockPolicy(const BlockBuilder& data_block_builder)
|
||||
: num_keys_(0), data_block_builder_(data_block_builder) {}
|
||||
: num_keys_(0), data_block_builder_(data_block_builder) {}
|
||||
|
||||
bool Update(const Slice& /*key*/, const Slice& /*value*/) override {
|
||||
if (data_block_builder_.empty()) {
|
||||
@ -1656,11 +1645,10 @@ class DBBasicTestWithParallelIO
|
||||
const BlockBuilder& data_block_builder_;
|
||||
};
|
||||
|
||||
class MyBlockCache
|
||||
: public Cache {
|
||||
class MyBlockCache : public Cache {
|
||||
public:
|
||||
explicit MyBlockCache(std::shared_ptr<Cache>& target)
|
||||
: target_(target), num_lookups_(0), num_found_(0), num_inserts_(0) {}
|
||||
: target_(target), num_lookups_(0), num_found_(0), num_inserts_(0) {}
|
||||
|
||||
virtual const char* Name() const override { return "MyBlockCache"; }
|
||||
|
||||
@ -1682,9 +1670,7 @@ class DBBasicTestWithParallelIO
|
||||
return handle;
|
||||
}
|
||||
|
||||
virtual bool Ref(Handle* handle) override {
|
||||
return target_->Ref(handle);
|
||||
}
|
||||
virtual bool Ref(Handle* handle) override { return target_->Ref(handle); }
|
||||
|
||||
virtual bool Release(Handle* handle, bool force_erase = false) override {
|
||||
return target_->Release(handle, force_erase);
|
||||
@ -1694,12 +1680,8 @@ class DBBasicTestWithParallelIO
|
||||
return target_->Value(handle);
|
||||
}
|
||||
|
||||
virtual void Erase(const Slice& key) override {
|
||||
target_->Erase(key);
|
||||
}
|
||||
virtual uint64_t NewId() override {
|
||||
return target_->NewId();
|
||||
}
|
||||
virtual void Erase(const Slice& key) override { target_->Erase(key); }
|
||||
virtual uint64_t NewId() override { return target_->NewId(); }
|
||||
|
||||
virtual void SetCapacity(size_t capacity) override {
|
||||
target_->SetCapacity(capacity);
|
||||
@ -1717,9 +1699,7 @@ class DBBasicTestWithParallelIO
|
||||
return target_->GetCapacity();
|
||||
}
|
||||
|
||||
virtual size_t GetUsage() const override {
|
||||
return target_->GetUsage();
|
||||
}
|
||||
virtual size_t GetUsage() const override { return target_->GetUsage(); }
|
||||
|
||||
virtual size_t GetUsage(Handle* handle) const override {
|
||||
return target_->GetUsage(handle);
|
||||
@ -1745,6 +1725,7 @@ class DBBasicTestWithParallelIO
|
||||
int num_found() { return num_found_; }
|
||||
|
||||
int num_inserts() { return num_inserts_; }
|
||||
|
||||
private:
|
||||
std::shared_ptr<Cache> target_;
|
||||
int num_lookups_;
|
||||
@ -1777,7 +1758,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
statuses.resize(keys.size());
|
||||
|
||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
ASSERT_TRUE(CheckValue(0, values[0].ToString()));
|
||||
ASSERT_TRUE(CheckValue(50, values[1].ToString()));
|
||||
|
||||
@ -1789,7 +1770,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
values[0].Reset();
|
||||
values[1].Reset();
|
||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
ASSERT_TRUE(CheckValue(1, values[0].ToString()));
|
||||
ASSERT_TRUE(CheckValue(51, values[1].ToString()));
|
||||
|
||||
@ -1798,7 +1779,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
|
||||
keys.resize(10);
|
||||
statuses.resize(10);
|
||||
std::vector<int> key_ints{1,2,15,16,55,81,82,83,84,85};
|
||||
std::vector<int> key_ints{1, 2, 15, 16, 55, 81, 82, 83, 84, 85};
|
||||
for (size_t i = 0; i < key_ints.size(); ++i) {
|
||||
key_data[i] = Key(key_ints[i]);
|
||||
keys[i] = Slice(key_data[i]);
|
||||
@ -1806,7 +1787,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
values[i].Reset();
|
||||
}
|
||||
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
keys.data(), values.data(), statuses.data(), true);
|
||||
for (size_t i = 0; i < key_ints.size(); ++i) {
|
||||
ASSERT_OK(statuses[i]);
|
||||
ASSERT_TRUE(CheckValue(key_ints[i], values[i].ToString()));
|
||||
|
@ -4664,7 +4664,7 @@ TEST_F(DBCompactionTest, ConsistencyFailTest) {
|
||||
|
||||
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
||||
"VersionBuilder::CheckConsistency", [&](void* arg) {
|
||||
auto p =
|
||||
auto p =
|
||||
reinterpret_cast<std::pair<FileMetaData**, FileMetaData**>*>(arg);
|
||||
// just swap the two FileMetaData so that we hit error
|
||||
// in CheckConsistency funcion
|
||||
|
@ -6,9 +6,9 @@
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
|
||||
#include <cinttypes>
|
||||
#include <stdint.h>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <string>
|
||||
#include "db/db_impl/db_impl.h"
|
||||
#include "db/job_context.h"
|
||||
@ -172,7 +172,6 @@ Status DBImpl::GetCurrentWalFile(std::unique_ptr<LogFile>* current_log_file) {
|
||||
|
||||
return wal_manager_.GetLiveWalFile(current_logfile_number, current_log_file);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // ROCKSDB_LITE
|
||||
|
@ -3286,9 +3286,8 @@ Status DestroyDB(const std::string& dbname, const Options& options,
|
||||
if (type == kMetaDatabase) {
|
||||
del = DestroyDB(path_to_delete, options);
|
||||
} else if (type == kTableFile || type == kLogFile) {
|
||||
del =
|
||||
DeleteDBFile(&soptions, path_to_delete, dbname,
|
||||
/*force_bg=*/false, /*force_fg=*/!wal_in_db_path);
|
||||
del = DeleteDBFile(&soptions, path_to_delete, dbname,
|
||||
/*force_bg=*/false, /*force_fg=*/!wal_in_db_path);
|
||||
} else {
|
||||
del = env->DeleteFile(path_to_delete);
|
||||
}
|
||||
@ -4003,8 +4002,7 @@ Status DBImpl::IngestExternalFiles(
|
||||
Status DBImpl::CreateColumnFamilyWithImport(
|
||||
const ColumnFamilyOptions& options, const std::string& column_family_name,
|
||||
const ImportColumnFamilyOptions& import_options,
|
||||
const ExportImportFilesMetaData& metadata,
|
||||
ColumnFamilyHandle** handle) {
|
||||
const ExportImportFilesMetaData& metadata, ColumnFamilyHandle** handle) {
|
||||
assert(handle != nullptr);
|
||||
assert(*handle == nullptr);
|
||||
std::string cf_comparator_name = options.comparator->Name();
|
||||
@ -4045,8 +4043,7 @@ Status DBImpl::CreateColumnFamilyWithImport(
|
||||
// reuse the file number that has already assigned to the internal file,
|
||||
// and this will overwrite the external file. To protect the external
|
||||
// file, we have to make sure the file number will never being reused.
|
||||
next_file_number =
|
||||
versions_->FetchAddFileNumber(metadata.files.size());
|
||||
next_file_number = versions_->FetchAddFileNumber(metadata.files.size());
|
||||
auto cf_options = cfd->GetLatestMutableCFOptions();
|
||||
status = versions_->LogAndApply(cfd, *cf_options, &dummy_edit, &mutex_,
|
||||
directories_.GetDbDir());
|
||||
|
@ -1565,7 +1565,8 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
|
||||
if (stats_cf_flush_needed) {
|
||||
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
||||
"Force flushing stats CF with manual flush of %s "
|
||||
"to avoid holding old logs", cfd->GetName().c_str());
|
||||
"to avoid holding old logs",
|
||||
cfd->GetName().c_str());
|
||||
s = SwitchMemtable(cfd_stats, &context);
|
||||
flush_memtable_id = cfd_stats->imm()->GetLatestMemTableID();
|
||||
flush_req.emplace_back(cfd_stats, flush_memtable_id);
|
||||
|
@ -135,9 +135,9 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) {
|
||||
std::vector<std::string> filenames;
|
||||
result.env->GetChildren(result.wal_dir, &filenames);
|
||||
for (std::string& filename : filenames) {
|
||||
if (filename.find(".log.trash",
|
||||
filename.length() - std::string(".log.trash").length()) !=
|
||||
std::string::npos) {
|
||||
if (filename.find(".log.trash", filename.length() -
|
||||
std::string(".log.trash").length()) !=
|
||||
std::string::npos) {
|
||||
std::string trash_file = result.wal_dir + "/" + filename;
|
||||
result.env->DeleteFile(trash_file);
|
||||
}
|
||||
@ -1352,8 +1352,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
|
||||
return s;
|
||||
}
|
||||
|
||||
impl->wal_in_db_path_ =
|
||||
IsWalDirSameAsDBPath(&impl->immutable_db_options_);
|
||||
impl->wal_in_db_path_ = IsWalDirSameAsDBPath(&impl->immutable_db_options_);
|
||||
|
||||
impl->mutex_.Lock();
|
||||
// Handles create_if_missing, error_if_exists
|
||||
|
@ -588,8 +588,7 @@ Status DB::OpenAsSecondary(
|
||||
&impl->write_controller_));
|
||||
impl->column_family_memtables_.reset(
|
||||
new ColumnFamilyMemTablesImpl(impl->versions_->GetColumnFamilySet()));
|
||||
impl->wal_in_db_path_ =
|
||||
IsWalDirSameAsDBPath(&impl->immutable_db_options_);
|
||||
impl->wal_in_db_path_ = IsWalDirSameAsDBPath(&impl->immutable_db_options_);
|
||||
|
||||
impl->mutex_.Lock();
|
||||
s = impl->Recover(column_families, true, false, false);
|
||||
|
@ -605,9 +605,9 @@ TEST_F(DBSecondaryTest, SwitchWAL) {
|
||||
TEST_F(DBSecondaryTest, SwitchWALMultiColumnFamilies) {
|
||||
const int kNumKeysPerMemtable = 1;
|
||||
SyncPoint::GetInstance()->DisableProcessing();
|
||||
SyncPoint::GetInstance()->LoadDependency({
|
||||
{"DBImpl::BackgroundCallFlush:ContextCleanedUp",
|
||||
"DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"}});
|
||||
SyncPoint::GetInstance()->LoadDependency(
|
||||
{{"DBImpl::BackgroundCallFlush:ContextCleanedUp",
|
||||
"DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"}});
|
||||
SyncPoint::GetInstance()->EnableProcessing();
|
||||
const std::string kCFName1 = "pikachu";
|
||||
Options options;
|
||||
@ -662,7 +662,8 @@ TEST_F(DBSecondaryTest, SwitchWALMultiColumnFamilies) {
|
||||
Put(0 /*cf*/, "key" + std::to_string(k), "value" + std::to_string(k)));
|
||||
ASSERT_OK(
|
||||
Put(1 /*cf*/, "key" + std::to_string(k), "value" + std::to_string(k)));
|
||||
TEST_SYNC_POINT("DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp");
|
||||
TEST_SYNC_POINT(
|
||||
"DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp");
|
||||
ASSERT_OK(db_secondary_->TryCatchUpWithPrimary());
|
||||
verify_db(dbfull(), handles_, db_secondary_, handles_secondary_);
|
||||
SyncPoint::GetInstance()->ClearTrace();
|
||||
|
@ -5,10 +5,10 @@
|
||||
|
||||
#include "db/db_info_dumper.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <stdio.h>
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "file/filename.h"
|
||||
|
@ -2793,7 +2793,8 @@ class ModelDB : public DB {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status GetCurrentWalFile(std::unique_ptr<LogFile>* /*current_log_file*/) override {
|
||||
Status GetCurrentWalFile(
|
||||
std::unique_ptr<LogFile>* /*current_log_file*/) override {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -6265,7 +6266,7 @@ TEST_F(DBTest, LargeBlockSizeTest) {
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
ASSERT_OK(Put(0, "foo", "bar"));
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.block_size = 8LL*1024*1024*1024LL;
|
||||
table_options.block_size = 8LL * 1024 * 1024 * 1024LL;
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
ASSERT_NOK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));
|
||||
}
|
||||
|
@ -48,9 +48,7 @@ ROT13BlockCipher rot13Cipher_(16);
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
DBTestBase::DBTestBase(const std::string path)
|
||||
: mem_env_(nullptr),
|
||||
encrypted_env_(nullptr),
|
||||
option_config_(kDefault) {
|
||||
: mem_env_(nullptr), encrypted_env_(nullptr), option_config_(kDefault) {
|
||||
Env* base_env = Env::Default();
|
||||
#ifndef ROCKSDB_LITE
|
||||
const char* test_env_uri = getenv("TEST_ENV_URI");
|
||||
|
@ -452,7 +452,7 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionSizeAmplification) {
|
||||
ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
|
||||
&mutable_cf_options));
|
||||
ASSERT_EQ(110u, mutable_cf_options.compaction_options_universal
|
||||
.max_size_amplification_percent);
|
||||
.max_size_amplification_percent);
|
||||
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
// Verify that size amplification did happen
|
||||
@ -534,8 +534,10 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionReadAmplification) {
|
||||
ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
|
||||
&mutable_cf_options));
|
||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.size_ratio, 100u);
|
||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.min_merge_width, 2u);
|
||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.max_merge_width, 2u);
|
||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.min_merge_width,
|
||||
2u);
|
||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.max_merge_width,
|
||||
2u);
|
||||
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
|
||||
|
@ -8,8 +8,8 @@
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
#include "db/dbformat.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <stdio.h>
|
||||
#include <cinttypes>
|
||||
#include "monitoring/perf_context_imp.h"
|
||||
#include "port/port.h"
|
||||
#include "util/coding.h"
|
||||
|
@ -86,8 +86,7 @@ Status ExternalSstFileIngestionJob::Prepare(
|
||||
return Status::InvalidArgument("File contain no entries");
|
||||
}
|
||||
|
||||
if (!f.smallest_internal_key.Valid() ||
|
||||
!f.largest_internal_key.Valid()) {
|
||||
if (!f.smallest_internal_key.Valid() || !f.largest_internal_key.Valid()) {
|
||||
return Status::Corruption("Generated table have corrupted keys");
|
||||
}
|
||||
}
|
||||
@ -448,8 +447,10 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo(
|
||||
table_reader->NewRangeTombstoneIterator(ro));
|
||||
|
||||
// Get first (smallest) and last (largest) key from file.
|
||||
file_to_ingest->smallest_internal_key = InternalKey("", 0, ValueType::kTypeValue);
|
||||
file_to_ingest->largest_internal_key = InternalKey("", 0, ValueType::kTypeValue);
|
||||
file_to_ingest->smallest_internal_key =
|
||||
InternalKey("", 0, ValueType::kTypeValue);
|
||||
file_to_ingest->largest_internal_key =
|
||||
InternalKey("", 0, ValueType::kTypeValue);
|
||||
bool bounds_set = false;
|
||||
iter->SeekToFirst();
|
||||
if (iter->Valid()) {
|
||||
@ -485,11 +486,15 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo(
|
||||
RangeTombstone tombstone(key, range_del_iter->value());
|
||||
|
||||
InternalKey start_key = tombstone.SerializeKey();
|
||||
if (!bounds_set || sstableKeyCompare(ucmp, start_key, file_to_ingest->smallest_internal_key) < 0) {
|
||||
if (!bounds_set ||
|
||||
sstableKeyCompare(ucmp, start_key,
|
||||
file_to_ingest->smallest_internal_key) < 0) {
|
||||
file_to_ingest->smallest_internal_key = start_key;
|
||||
}
|
||||
InternalKey end_key = tombstone.SerializeEndKey();
|
||||
if (!bounds_set || sstableKeyCompare(ucmp, end_key, file_to_ingest->largest_internal_key) > 0) {
|
||||
if (!bounds_set ||
|
||||
sstableKeyCompare(ucmp, end_key,
|
||||
file_to_ingest->largest_internal_key) > 0) {
|
||||
file_to_ingest->largest_internal_key = end_key;
|
||||
}
|
||||
bounds_set = true;
|
||||
@ -531,9 +536,10 @@ Status ExternalSstFileIngestionJob::AssignLevelAndSeqnoForIngestedFile(
|
||||
|
||||
if (vstorage->NumLevelFiles(lvl) > 0) {
|
||||
bool overlap_with_level = false;
|
||||
status = sv->current->OverlapWithLevelIterator(ro, env_options_,
|
||||
file_to_ingest->smallest_internal_key.user_key(), file_to_ingest->largest_internal_key.user_key(),
|
||||
lvl, &overlap_with_level);
|
||||
status = sv->current->OverlapWithLevelIterator(
|
||||
ro, env_options_, file_to_ingest->smallest_internal_key.user_key(),
|
||||
file_to_ingest->largest_internal_key.user_key(), lvl,
|
||||
&overlap_with_level);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
@ -672,7 +678,8 @@ bool ExternalSstFileIngestionJob::IngestedFileFitInLevel(
|
||||
}
|
||||
|
||||
auto* vstorage = cfd_->current()->storage_info();
|
||||
Slice file_smallest_user_key(file_to_ingest->smallest_internal_key.user_key());
|
||||
Slice file_smallest_user_key(
|
||||
file_to_ingest->smallest_internal_key.user_key());
|
||||
Slice file_largest_user_key(file_to_ingest->largest_internal_key.user_key());
|
||||
|
||||
if (vstorage->OverlapInLevel(level, &file_smallest_user_key,
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
#include "db/import_column_family_job.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -64,7 +64,8 @@ Status ImportColumnFamilyJob::Prepare(uint64_t next_file_number,
|
||||
|
||||
for (size_t i = 0; i < sorted_files.size() - 1; i++) {
|
||||
if (sstableKeyCompare(ucmp, sorted_files[i]->largest_internal_key,
|
||||
sorted_files[i + 1]->smallest_internal_key) >= 0) {
|
||||
sorted_files[i + 1]->smallest_internal_key) >=
|
||||
0) {
|
||||
return Status::InvalidArgument("Files have overlapping ranges");
|
||||
}
|
||||
}
|
||||
@ -76,8 +77,7 @@ Status ImportColumnFamilyJob::Prepare(uint64_t next_file_number,
|
||||
return Status::InvalidArgument("File contain no entries");
|
||||
}
|
||||
|
||||
if (!f.smallest_internal_key.Valid() ||
|
||||
!f.largest_internal_key.Valid()) {
|
||||
if (!f.smallest_internal_key.Valid() || !f.largest_internal_key.Valid()) {
|
||||
return Status::Corruption("File has corrupted keys");
|
||||
}
|
||||
}
|
||||
@ -198,8 +198,8 @@ Status ImportColumnFamilyJob::GetIngestedFileInfo(
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
sst_file_reader.reset(new RandomAccessFileReader(std::move(sst_file),
|
||||
external_file));
|
||||
sst_file_reader.reset(
|
||||
new RandomAccessFileReader(std::move(sst_file), external_file));
|
||||
|
||||
status = cfd_->ioptions()->table_factory->NewTableReader(
|
||||
TableReaderOptions(*cfd_->ioptions(),
|
||||
|
@ -20,11 +20,11 @@ namespace rocksdb {
|
||||
// to ExternalSstFileIngestionJob.
|
||||
class ImportColumnFamilyJob {
|
||||
public:
|
||||
ImportColumnFamilyJob(
|
||||
Env* env, VersionSet* versions, ColumnFamilyData* cfd,
|
||||
const ImmutableDBOptions& db_options, const EnvOptions& env_options,
|
||||
const ImportColumnFamilyOptions& import_options,
|
||||
const std::vector<LiveFileMetaData>& metadata)
|
||||
ImportColumnFamilyJob(Env* env, VersionSet* versions, ColumnFamilyData* cfd,
|
||||
const ImmutableDBOptions& db_options,
|
||||
const EnvOptions& env_options,
|
||||
const ImportColumnFamilyOptions& import_options,
|
||||
const std::vector<LiveFileMetaData>& metadata)
|
||||
: env_(env),
|
||||
versions_(versions),
|
||||
cfd_(cfd),
|
||||
|
@ -45,8 +45,7 @@ class ImportColumnFamilyTest : public DBTestBase {
|
||||
test::DestroyDir(env_, export_files_dir_);
|
||||
}
|
||||
|
||||
LiveFileMetaData LiveFileMetaDataInit(std::string name,
|
||||
std::string path,
|
||||
LiveFileMetaData LiveFileMetaDataInit(std::string name, std::string path,
|
||||
int level,
|
||||
SequenceNumber smallest_seqno,
|
||||
SequenceNumber largest_seqno) {
|
||||
@ -64,7 +63,7 @@ class ImportColumnFamilyTest : public DBTestBase {
|
||||
std::string export_files_dir_;
|
||||
ColumnFamilyHandle* import_cfh_;
|
||||
ColumnFamilyHandle* import_cfh2_;
|
||||
ExportImportFilesMetaData *metadata_ptr_;
|
||||
ExportImportFilesMetaData* metadata_ptr_;
|
||||
};
|
||||
|
||||
TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFiles) {
|
||||
@ -545,7 +544,6 @@ TEST_F(ImportColumnFamilyTest, ImportColumnFamilyNegativeTest) {
|
||||
metadata, &import_cfh_));
|
||||
ASSERT_NE(import_cfh_, nullptr);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -10,8 +10,8 @@
|
||||
|
||||
#include "db/internal_stats.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
@ -745,20 +745,19 @@ TEST_P(PlainTableDBTest, BloomSchema) {
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
for (int bloom_locality = 0; bloom_locality <= 1; bloom_locality++) {
|
||||
options.bloom_locality = bloom_locality;
|
||||
options.bloom_locality = bloom_locality;
|
||||
PlainTableOptions plain_table_options;
|
||||
plain_table_options.user_key_len = 16;
|
||||
plain_table_options.bloom_bits_per_key = 3; // high FP rate for test
|
||||
plain_table_options.bloom_bits_per_key = 3; // high FP rate for test
|
||||
plain_table_options.hash_table_ratio = 0.75;
|
||||
plain_table_options.index_sparseness = 16;
|
||||
plain_table_options.huge_page_tlb_size = 0;
|
||||
plain_table_options.encoding_type = kPlain;
|
||||
|
||||
|
||||
bool expect_bloom_not_match = false;
|
||||
options.table_factory.reset(new TestPlainTableFactory(
|
||||
&expect_bloom_not_match, plain_table_options,
|
||||
0 /* column_family_id */, kDefaultColumnFamilyName));
|
||||
&expect_bloom_not_match, plain_table_options, 0 /* column_family_id */,
|
||||
kDefaultColumnFamilyName));
|
||||
DestroyAndReopen(&options);
|
||||
|
||||
for (unsigned i = 0; i < 2345; ++i) {
|
||||
@ -782,7 +781,7 @@ TEST_P(PlainTableDBTest, BloomSchema) {
|
||||
pattern = 163905UL;
|
||||
}
|
||||
bool expect_fp = pattern & (1UL << i);
|
||||
//fprintf(stderr, "expect_fp@%u: %d\n", i, (int)expect_fp);
|
||||
// fprintf(stderr, "expect_fp@%u: %d\n", i, (int)expect_fp);
|
||||
expect_bloom_not_match = !expect_fp;
|
||||
ASSERT_EQ("NOT_FOUND", Get(NthKey(i, 'n')));
|
||||
}
|
||||
|
@ -9,8 +9,8 @@
|
||||
#include <functional>
|
||||
#include <set>
|
||||
|
||||
#include <cinttypes>
|
||||
#include <stdio.h>
|
||||
#include <cinttypes>
|
||||
|
||||
#include "util/autovector.h"
|
||||
#include "util/kv_map.h"
|
||||
|
@ -277,10 +277,11 @@ Status TableCache::GetRangeTombstoneIterator(
|
||||
}
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
void TableCache::CreateRowCacheKeyPrefix(
|
||||
const ReadOptions& options,
|
||||
const FileDescriptor& fd, const Slice& internal_key,
|
||||
GetContext* get_context, IterKey& row_cache_key) {
|
||||
void TableCache::CreateRowCacheKeyPrefix(const ReadOptions& options,
|
||||
const FileDescriptor& fd,
|
||||
const Slice& internal_key,
|
||||
GetContext* get_context,
|
||||
IterKey& row_cache_key) {
|
||||
uint64_t fd_number = fd.GetNumber();
|
||||
// We use the user key as cache key instead of the internal key,
|
||||
// otherwise the whole cache would be invalidated every time the
|
||||
@ -312,13 +313,11 @@ void TableCache::CreateRowCacheKeyPrefix(
|
||||
AppendVarint64(&row_cache_key, seq_no);
|
||||
}
|
||||
|
||||
bool TableCache::GetFromRowCache(
|
||||
const Slice& user_key, IterKey& row_cache_key,
|
||||
size_t prefix_size, GetContext* get_context) {
|
||||
bool TableCache::GetFromRowCache(const Slice& user_key, IterKey& row_cache_key,
|
||||
size_t prefix_size, GetContext* get_context) {
|
||||
bool found = false;
|
||||
|
||||
row_cache_key.TrimAppend(prefix_size, user_key.data(),
|
||||
user_key.size());
|
||||
row_cache_key.TrimAppend(prefix_size, user_key.data(), user_key.size());
|
||||
if (auto row_handle =
|
||||
ioptions_.row_cache->Lookup(row_cache_key.GetUserKey())) {
|
||||
// Cleanable routine to release the cache entry
|
||||
@ -327,8 +326,8 @@ bool TableCache::GetFromRowCache(
|
||||
void* cache_handle) {
|
||||
((Cache*)cache_to_clean)->Release((Cache::Handle*)cache_handle);
|
||||
};
|
||||
auto found_row_cache_entry = static_cast<const std::string*>(
|
||||
ioptions_.row_cache->Value(row_handle));
|
||||
auto found_row_cache_entry =
|
||||
static_cast<const std::string*>(ioptions_.row_cache->Value(row_handle));
|
||||
// If it comes here value is located on the cache.
|
||||
// found_row_cache_entry points to the value on cache,
|
||||
// and value_pinner has cleanup procedure for the cached entry.
|
||||
@ -442,14 +441,15 @@ Status TableCache::MultiGet(const ReadOptions& options,
|
||||
Status s;
|
||||
TableReader* t = fd.table_reader;
|
||||
Cache::Handle* handle = nullptr;
|
||||
MultiGetRange table_range(*mget_range, mget_range->begin(), mget_range->end());
|
||||
MultiGetRange table_range(*mget_range, mget_range->begin(),
|
||||
mget_range->end());
|
||||
#ifndef ROCKSDB_LITE
|
||||
autovector<std::string, MultiGetContext::MAX_BATCH_SIZE> row_cache_entries;
|
||||
IterKey row_cache_key;
|
||||
size_t row_cache_key_prefix_size = 0;
|
||||
KeyContext& first_key = *table_range.begin();
|
||||
bool lookup_row_cache = ioptions_.row_cache &&
|
||||
!first_key.get_context->NeedToReadSequence();
|
||||
bool lookup_row_cache =
|
||||
ioptions_.row_cache && !first_key.get_context->NeedToReadSequence();
|
||||
|
||||
// Check row cache if enabled. Since row cache does not currently store
|
||||
// sequence numbers, we cannot use it if we need to fetch the sequence.
|
||||
@ -459,8 +459,10 @@ Status TableCache::MultiGet(const ReadOptions& options,
|
||||
row_cache_key);
|
||||
row_cache_key_prefix_size = row_cache_key.Size();
|
||||
|
||||
for (auto miter = table_range.begin(); miter != table_range.end(); ++miter) {
|
||||
const Slice& user_key = miter->ukey;;
|
||||
for (auto miter = table_range.begin(); miter != table_range.end();
|
||||
++miter) {
|
||||
const Slice& user_key = miter->ukey;
|
||||
;
|
||||
GetContext* get_context = miter->get_context;
|
||||
|
||||
if (GetFromRowCache(user_key, row_cache_key, row_cache_key_prefix_size,
|
||||
@ -519,9 +521,11 @@ Status TableCache::MultiGet(const ReadOptions& options,
|
||||
if (lookup_row_cache) {
|
||||
size_t row_idx = 0;
|
||||
|
||||
for (auto miter = table_range.begin(); miter != table_range.end(); ++miter) {
|
||||
for (auto miter = table_range.begin(); miter != table_range.end();
|
||||
++miter) {
|
||||
std::string& row_cache_entry = row_cache_entries[row_idx++];
|
||||
const Slice& user_key = miter->ukey;;
|
||||
const Slice& user_key = miter->ukey;
|
||||
;
|
||||
GetContext* get_context = miter->get_context;
|
||||
|
||||
get_context->SetReplayLog(nullptr);
|
||||
|
@ -208,8 +208,7 @@ class TableCache {
|
||||
void CreateRowCacheKeyPrefix(const ReadOptions& options,
|
||||
const FileDescriptor& fd,
|
||||
const Slice& internal_key,
|
||||
GetContext* get_context,
|
||||
IterKey& row_cache_key);
|
||||
GetContext* get_context, IterKey& row_cache_key);
|
||||
|
||||
// Helper function to lookup the row cache for a key. It appends the
|
||||
// user key to row_cache_key at offset prefix_size
|
||||
|
@ -9,9 +9,9 @@
|
||||
|
||||
#include "db/version_builder.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cinttypes>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <set>
|
||||
@ -173,14 +173,13 @@ class VersionBuilder::Rep {
|
||||
" vs. file with global_seqno %" PRIu64 "\n",
|
||||
f1->fd.smallest_seqno, f1->fd.largest_seqno,
|
||||
external_file_seqno);
|
||||
return Status::Corruption("L0 file with seqno " +
|
||||
NumberToString(f1->fd.smallest_seqno) +
|
||||
" " +
|
||||
NumberToString(f1->fd.largest_seqno) +
|
||||
" vs. file with global_seqno" +
|
||||
NumberToString(external_file_seqno) +
|
||||
" with fileNumber " +
|
||||
NumberToString(f1->fd.GetNumber()));
|
||||
return Status::Corruption(
|
||||
"L0 file with seqno " +
|
||||
NumberToString(f1->fd.smallest_seqno) + " " +
|
||||
NumberToString(f1->fd.largest_seqno) +
|
||||
" vs. file with global_seqno" +
|
||||
NumberToString(external_file_seqno) + " with fileNumber " +
|
||||
NumberToString(f1->fd.GetNumber()));
|
||||
}
|
||||
} else if (f1->fd.smallest_seqno <= f2->fd.smallest_seqno) {
|
||||
fprintf(stderr,
|
||||
|
@ -52,7 +52,7 @@ struct FileDescriptor {
|
||||
smallest_seqno(_smallest_seqno),
|
||||
largest_seqno(_largest_seqno) {}
|
||||
|
||||
FileDescriptor(const FileDescriptor& fd) { *this=fd; }
|
||||
FileDescriptor(const FileDescriptor& fd) { *this = fd; }
|
||||
|
||||
FileDescriptor& operator=(const FileDescriptor& fd) {
|
||||
table_reader = fd.table_reader;
|
||||
|
@ -5727,7 +5727,8 @@ Status ReactiveVersionSet::ApplyOneVersionEditToBuilder(
|
||||
return Status::OK();
|
||||
}
|
||||
if (active_version_builders_.find(edit.column_family_) ==
|
||||
active_version_builders_.end() && !cfd->IsDropped()) {
|
||||
active_version_builders_.end() &&
|
||||
!cfd->IsDropped()) {
|
||||
std::unique_ptr<BaseReferencedVersionBuilder> builder_guard(
|
||||
new BaseReferencedVersionBuilder(cfd));
|
||||
active_version_builders_.insert(
|
||||
|
@ -9,10 +9,10 @@
|
||||
|
||||
#include "db/wal_manager.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <cinttypes>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "db/log_reader.h"
|
||||
#include "db/log_writer.h"
|
||||
@ -415,12 +415,13 @@ Status WalManager::ReadFirstRecord(const WalFileType type,
|
||||
return s;
|
||||
}
|
||||
|
||||
Status WalManager::GetLiveWalFile(uint64_t number, std::unique_ptr<LogFile>* log_file) {
|
||||
Status WalManager::GetLiveWalFile(uint64_t number,
|
||||
std::unique_ptr<LogFile>* log_file) {
|
||||
if (!log_file) {
|
||||
return Status::InvalidArgument("log_file not preallocated.");
|
||||
}
|
||||
|
||||
if(!number) {
|
||||
if (!number) {
|
||||
return Status::PathNotFound("log file not available");
|
||||
}
|
||||
|
||||
@ -433,16 +434,13 @@ Status WalManager::GetLiveWalFile(uint64_t number, std::unique_ptr<LogFile>* log
|
||||
return s;
|
||||
}
|
||||
|
||||
log_file->reset(new LogFileImpl(
|
||||
number,
|
||||
kAliveLogFile,
|
||||
0, // SequenceNumber
|
||||
size_bytes));
|
||||
log_file->reset(new LogFileImpl(number, kAliveLogFile,
|
||||
0, // SequenceNumber
|
||||
size_bytes));
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
||||
// the function returns status.ok() and sequence == 0 if the file exists, but is
|
||||
// empty
|
||||
Status WalManager::ReadFirstLine(const std::string& fname,
|
||||
|
@ -334,9 +334,7 @@ void WriteBatch::Clear() {
|
||||
wal_term_point_.clear();
|
||||
}
|
||||
|
||||
uint32_t WriteBatch::Count() const {
|
||||
return WriteBatchInternal::Count(this);
|
||||
}
|
||||
uint32_t WriteBatch::Count() const { return WriteBatchInternal::Count(this); }
|
||||
|
||||
uint32_t WriteBatch::ComputeContentFlags() const {
|
||||
auto rv = content_flags_.load(std::memory_order_relaxed);
|
||||
|
8
env/env_test.cc
vendored
8
env/env_test.cc
vendored
@ -1110,7 +1110,8 @@ TEST_P(EnvPosixTestWithParam, MultiRead) {
|
||||
// Create file.
|
||||
{
|
||||
std::unique_ptr<WritableFile> wfile;
|
||||
#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
|
||||
#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \
|
||||
!defined(OS_AIX)
|
||||
if (soptions.use_direct_writes) {
|
||||
soptions.use_direct_writes = false;
|
||||
}
|
||||
@ -1137,7 +1138,8 @@ TEST_P(EnvPosixTestWithParam, MultiRead) {
|
||||
data.emplace_back(NewAligned(kSectorSize, 0));
|
||||
reqs[i].scratch = data.back().get();
|
||||
}
|
||||
#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && !defined(OS_AIX)
|
||||
#if !defined(OS_MACOSX) && !defined(OS_WIN) && !defined(OS_SOLARIS) && \
|
||||
!defined(OS_AIX)
|
||||
if (soptions.use_direct_reads) {
|
||||
soptions.use_direct_reads = false;
|
||||
}
|
||||
@ -1145,7 +1147,7 @@ TEST_P(EnvPosixTestWithParam, MultiRead) {
|
||||
ASSERT_OK(env_->NewRandomAccessFile(fname, &file, soptions));
|
||||
ASSERT_OK(file->MultiRead(reqs.data(), reqs.size()));
|
||||
for (size_t i = 0; i < reqs.size(); ++i) {
|
||||
auto buf = NewAligned(kSectorSize * 8, static_cast<char>(i*2 + 1));
|
||||
auto buf = NewAligned(kSectorSize * 8, static_cast<char>(i * 2 + 1));
|
||||
ASSERT_OK(reqs[i].status);
|
||||
ASSERT_EQ(memcmp(reqs[i].scratch, buf.get(), kSectorSize), 0);
|
||||
}
|
||||
|
@ -14,8 +14,8 @@
|
||||
// run for a while, tailing the logs of the primary. After process with primary
|
||||
// instance exits, this process will keep running until you hit 'CTRL+C'.
|
||||
|
||||
#include <cinttypes>
|
||||
#include <chrono>
|
||||
#include <cinttypes>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <ctime>
|
||||
|
@ -3,8 +3,8 @@
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include <cinttypes>
|
||||
#include <atomic>
|
||||
#include <cinttypes>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
|
@ -1137,7 +1137,8 @@ class DB {
|
||||
//
|
||||
// Additionally, for the sake of optimization current_log_file->StartSequence
|
||||
// would always be set to 0
|
||||
virtual Status GetCurrentWalFile(std::unique_ptr<LogFile>* current_log_file) = 0;
|
||||
virtual Status GetCurrentWalFile(
|
||||
std::unique_ptr<LogFile>* current_log_file) = 0;
|
||||
|
||||
// Note: this API is not yet consistent with WritePrepared transactions.
|
||||
// Sets iter to an iterator that is positioned at a write-batch containing
|
||||
|
@ -378,7 +378,8 @@ class StackableDB : public DB {
|
||||
return db_->GetSortedWalFiles(files);
|
||||
}
|
||||
|
||||
virtual Status GetCurrentWalFile(std::unique_ptr<LogFile>* current_log_file) override {
|
||||
virtual Status GetCurrentWalFile(
|
||||
std::unique_ptr<LogFile>* current_log_file) override {
|
||||
return db_->GetCurrentWalFile(current_log_file);
|
||||
}
|
||||
|
||||
|
@ -13,9 +13,8 @@ import java.util.List;
|
||||
*
|
||||
* Taken from include/rocksdb/advanced_options.h
|
||||
*/
|
||||
public interface AdvancedColumnFamilyOptionsInterface
|
||||
<T extends AdvancedColumnFamilyOptionsInterface<T>> {
|
||||
|
||||
public interface AdvancedColumnFamilyOptionsInterface<
|
||||
T extends AdvancedColumnFamilyOptionsInterface<T>> {
|
||||
/**
|
||||
* The minimum number of write buffers that will be merged together
|
||||
* before writing to storage. If set to 1, then
|
||||
|
@ -11,9 +11,8 @@ package org.rocksdb;
|
||||
* Taken from include/rocksdb/advanced_options.h
|
||||
* and MutableCFOptions in util/cf_options.h
|
||||
*/
|
||||
public interface AdvancedMutableColumnFamilyOptionsInterface
|
||||
<T extends AdvancedMutableColumnFamilyOptionsInterface<T>> {
|
||||
|
||||
public interface AdvancedMutableColumnFamilyOptionsInterface<
|
||||
T extends AdvancedMutableColumnFamilyOptionsInterface<T>> {
|
||||
/**
|
||||
* The maximum number of write buffers that are built up in memory.
|
||||
* The default is 2, so that when 1 write buffer is being flushed to
|
||||
|
@ -5,10 +5,8 @@
|
||||
|
||||
package org.rocksdb;
|
||||
|
||||
public interface ColumnFamilyOptionsInterface
|
||||
<T extends ColumnFamilyOptionsInterface<T>>
|
||||
extends AdvancedColumnFamilyOptionsInterface<T> {
|
||||
|
||||
public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInterface<T>>
|
||||
extends AdvancedColumnFamilyOptionsInterface<T> {
|
||||
/**
|
||||
* Use this if your DB is very small (like under 1GB) and you don't want to
|
||||
* spend lots of memory for memtables.
|
||||
|
@ -9,7 +9,6 @@ import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
public interface DBOptionsInterface<T extends DBOptionsInterface<T>> {
|
||||
|
||||
/**
|
||||
* Use this if your DB is very small (like under 1GB) and you don't want to
|
||||
* spend lots of memory for memtables.
|
||||
|
@ -5,10 +5,9 @@
|
||||
|
||||
package org.rocksdb;
|
||||
|
||||
public interface MutableColumnFamilyOptionsInterface
|
||||
<T extends MutableColumnFamilyOptionsInterface<T>>
|
||||
extends AdvancedMutableColumnFamilyOptionsInterface<T> {
|
||||
|
||||
public interface MutableColumnFamilyOptionsInterface<
|
||||
T extends MutableColumnFamilyOptionsInterface<T>>
|
||||
extends AdvancedMutableColumnFamilyOptionsInterface<T> {
|
||||
/**
|
||||
* Amount of data to build up in memory (backed by an unsorted log
|
||||
* on disk) before converting to a sorted on-disk file.
|
||||
|
@ -2,7 +2,6 @@
|
||||
package org.rocksdb;
|
||||
|
||||
public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T>> {
|
||||
|
||||
/**
|
||||
* Specifies the maximum number of concurrent background jobs (both flushes
|
||||
* and compactions combined).
|
||||
|
@ -64,9 +64,9 @@ public class Environment {
|
||||
public static String getJniLibraryName(final String name) {
|
||||
if (isUnix()) {
|
||||
final String arch = is64Bit() ? "64" : "32";
|
||||
if(isPowerPC() || isAarch64()) {
|
||||
if (isPowerPC() || isAarch64()) {
|
||||
return String.format("%sjni-linux-%s", name, ARCH);
|
||||
} else if(isS390x()) {
|
||||
} else if (isS390x()) {
|
||||
return String.format("%sjni-linux%s", name, ARCH);
|
||||
} else {
|
||||
return String.format("%sjni-linux%s", name, arch);
|
||||
|
@ -136,16 +136,12 @@ public class EnvironmentTest {
|
||||
assertThat(Environment.isUnix()).isTrue();
|
||||
assertThat(Environment.isAarch64()).isTrue();
|
||||
assertThat(Environment.is64Bit()).isTrue();
|
||||
assertThat(Environment.getJniLibraryExtension()).
|
||||
isEqualTo(".so");
|
||||
assertThat(Environment.getSharedLibraryName("rocksdb")).
|
||||
isEqualTo("rocksdbjni");
|
||||
assertThat(Environment.getJniLibraryName("rocksdb")).
|
||||
isEqualTo("rocksdbjni-linux-aarch64");
|
||||
assertThat(Environment.getJniLibraryFileName("rocksdb")).
|
||||
isEqualTo("librocksdbjni-linux-aarch64.so");
|
||||
assertThat(Environment.getSharedLibraryFileName("rocksdb")).
|
||||
isEqualTo("librocksdbjni.so");
|
||||
assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so");
|
||||
assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni");
|
||||
assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-aarch64");
|
||||
assertThat(Environment.getJniLibraryFileName("rocksdb"))
|
||||
.isEqualTo("librocksdbjni-linux-aarch64.so");
|
||||
assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so");
|
||||
}
|
||||
|
||||
private void setEnvironmentClassFields(String osName,
|
||||
|
@ -11,10 +11,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <time.h>
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include "port/sys_time.h"
|
||||
#include <time.h>
|
||||
|
||||
#include "file/writable_file_writer.h"
|
||||
#include "monitoring/iostats_context_imp.h"
|
||||
|
@ -4,8 +4,8 @@
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
|
||||
#include "env/mock_env.h"
|
||||
#include "logging/env_logger.h"
|
||||
#include "env/mock_env.h"
|
||||
#include "test_util/testharness.h"
|
||||
#include "test_util/testutil.h"
|
||||
|
||||
@ -44,9 +44,7 @@ class EnvLoggerTest : public testing::Test {
|
||||
return result;
|
||||
}
|
||||
|
||||
void DeleteLogFile() {
|
||||
ASSERT_OK(env_->DeleteFile(kLogFile));
|
||||
}
|
||||
void DeleteLogFile() { ASSERT_OK(env_->DeleteFile(kLogFile)); }
|
||||
|
||||
static const std::string kSampleMessage;
|
||||
static const std::string kTestDir;
|
||||
|
@ -5,8 +5,8 @@
|
||||
|
||||
#include "logging/event_logger.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
|
@ -9,10 +9,10 @@
|
||||
|
||||
#include "monitoring/histogram.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <cassert>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
|
||||
#include "port/port.h"
|
||||
#include "util/cast_util.h"
|
||||
|
@ -27,7 +27,8 @@ extern thread_local PerfContext perf_context;
|
||||
#define PERF_TIMER_GUARD(metric)
|
||||
#define PERF_TIMER_GUARD_WITH_ENV(metric, env)
|
||||
#define PERF_CPU_TIMER_GUARD(metric, env)
|
||||
#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition, stats, ticker_type)
|
||||
#define PERF_CONDITIONAL_TIMER_FOR_MUTEX_GUARD(metric, condition, stats, \
|
||||
ticker_type)
|
||||
#define PERF_TIMER_MEASURE(metric)
|
||||
#define PERF_COUNTER_ADD(metric, value)
|
||||
#define PERF_COUNTER_BY_LEVEL_ADD(metric, value, level)
|
||||
|
@ -5,11 +5,11 @@
|
||||
//
|
||||
#include "monitoring/statistics.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include "rocksdb/statistics.h"
|
||||
#include "port/likely.h"
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cstdio>
|
||||
#include "port/likely.h"
|
||||
#include "rocksdb/statistics.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
|
@ -5,15 +5,15 @@
|
||||
|
||||
#include "options/cf_options.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include "options/db_options.h"
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/concurrent_task_limiter.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/concurrent_task_limiter.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
|
@ -8,9 +8,9 @@
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <cctype>
|
||||
#include <cinttypes>
|
||||
#include <cstring>
|
||||
#include <unordered_map>
|
||||
#include <cinttypes>
|
||||
|
||||
#include "cache/lru_cache.h"
|
||||
#include "cache/sharded_cache.h"
|
||||
|
@ -32,8 +32,8 @@
|
||||
|
||||
#if defined(OS_WIN) && defined(_MSC_VER)
|
||||
|
||||
// MSVC does not have weak symbol support. As long as ROCKSDB_JEMALLOC is defined,
|
||||
// Jemalloc memory allocator is used.
|
||||
// MSVC does not have weak symbol support. As long as ROCKSDB_JEMALLOC is
|
||||
// defined, Jemalloc memory allocator is used.
|
||||
static inline bool HasJemalloc() { return true; }
|
||||
|
||||
#else
|
||||
|
@ -979,7 +979,8 @@ uint64_t WinEnvIO::NowMicros() {
|
||||
return li.QuadPart;
|
||||
}
|
||||
using namespace std::chrono;
|
||||
return duration_cast<microseconds>(system_clock::now().time_since_epoch()).count();
|
||||
return duration_cast<microseconds>(system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
}
|
||||
|
||||
uint64_t WinEnvIO::NowNanos() {
|
||||
|
@ -7,8 +7,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <cinttypes>
|
||||
#include <stdint.h>
|
||||
#include <cinttypes>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
@ -2019,9 +2019,10 @@ IndexBlockIter* BlockBasedTable::InitBlockIterator<IndexBlockIter>(
|
||||
// If input_iter is null, new a iterator
|
||||
// If input_iter is not null, update this iter and return it
|
||||
template <typename TBlockIter>
|
||||
TBlockIter* BlockBasedTable::NewDataBlockIterator(
|
||||
const ReadOptions& ro, CachableEntry<Block>& block, TBlockIter* input_iter,
|
||||
Status s) const {
|
||||
TBlockIter* BlockBasedTable::NewDataBlockIterator(const ReadOptions& ro,
|
||||
CachableEntry<Block>& block,
|
||||
TBlockIter* input_iter,
|
||||
Status s) const {
|
||||
PERF_TIMER_GUARD(new_table_block_iter_nanos);
|
||||
|
||||
TBlockIter* iter = input_iter != nullptr ? input_iter : new TBlockIter;
|
||||
@ -2167,11 +2168,10 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
|
||||
SequenceNumber seq_no = rep_->get_global_seqno(block_type);
|
||||
// If filling cache is allowed and a cache is configured, try to put the
|
||||
// block to the cache.
|
||||
s = PutDataBlockToCache(key, ckey, block_cache, block_cache_compressed,
|
||||
block_entry, contents,
|
||||
raw_block_comp_type, uncompression_dict, seq_no,
|
||||
GetMemoryAllocator(rep_->table_options),
|
||||
block_type, get_context);
|
||||
s = PutDataBlockToCache(
|
||||
key, ckey, block_cache, block_cache_compressed, block_entry,
|
||||
contents, raw_block_comp_type, uncompression_dict, seq_no,
|
||||
GetMemoryAllocator(rep_->table_options), block_type, get_context);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2344,12 +2344,10 @@ void BlockBasedTable::RetrieveMultipleBlocks(
|
||||
// BlockContents so it can free the memory
|
||||
assert(req.result.data() == req.scratch);
|
||||
std::unique_ptr<char[]> raw_block(req.scratch);
|
||||
raw_block_contents = BlockContents(std::move(raw_block),
|
||||
handle.size());
|
||||
raw_block_contents = BlockContents(std::move(raw_block), handle.size());
|
||||
} else {
|
||||
// We used the scratch buffer, so no need to free anything
|
||||
raw_block_contents = BlockContents(Slice(req.scratch,
|
||||
handle.size()));
|
||||
raw_block_contents = BlockContents(Slice(req.scratch, handle.size()));
|
||||
}
|
||||
#ifndef NDEBUG
|
||||
raw_block_contents.is_raw_block = true;
|
||||
@ -2370,35 +2368,36 @@ void BlockBasedTable::RetrieveMultipleBlocks(
|
||||
// MaybeReadBlockAndLoadToCache will insert into the block caches if
|
||||
// necessary. Since we're passing the raw block contents, it will
|
||||
// avoid looking up the block cache
|
||||
s = MaybeReadBlockAndLoadToCache(nullptr, options, handle,
|
||||
uncompression_dict, block_entry, BlockType::kData,
|
||||
mget_iter->get_context, &lookup_data_block_context,
|
||||
&raw_block_contents);
|
||||
s = MaybeReadBlockAndLoadToCache(
|
||||
nullptr, options, handle, uncompression_dict, block_entry,
|
||||
BlockType::kData, mget_iter->get_context,
|
||||
&lookup_data_block_context, &raw_block_contents);
|
||||
} else {
|
||||
CompressionType compression_type =
|
||||
raw_block_contents.get_compression_type();
|
||||
raw_block_contents.get_compression_type();
|
||||
BlockContents contents;
|
||||
if (compression_type != kNoCompression) {
|
||||
UncompressionContext context(compression_type);
|
||||
UncompressionInfo info(context, uncompression_dict, compression_type);
|
||||
s = UncompressBlockContents(info, req.result.data(), handle.size(),
|
||||
&contents, footer.version(), rep_->ioptions,
|
||||
memory_allocator);
|
||||
&contents, footer.version(),
|
||||
rep_->ioptions, memory_allocator);
|
||||
} else {
|
||||
if (scratch != nullptr) {
|
||||
// If we used the scratch buffer, then the contents need to be
|
||||
// copied to heap
|
||||
Slice raw = Slice(req.result.data(), handle.size());
|
||||
contents = BlockContents(CopyBufferToHeap(
|
||||
GetMemoryAllocator(rep_->table_options), raw),
|
||||
handle.size());
|
||||
contents = BlockContents(
|
||||
CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
|
||||
handle.size());
|
||||
} else {
|
||||
contents = std::move(raw_block_contents);
|
||||
}
|
||||
}
|
||||
if (s.ok()) {
|
||||
(*results)[idx_in_batch].SetOwnedValue(new Block(std::move(contents),
|
||||
global_seqno, read_amp_bytes_per_bit, ioptions.statistics));
|
||||
(*results)[idx_in_batch].SetOwnedValue(
|
||||
new Block(std::move(contents), global_seqno,
|
||||
read_amp_bytes_per_bit, ioptions.statistics));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3036,7 +3035,8 @@ void BlockBasedTableIterator<TBlockIter, TValue>::CheckOutOfBound() {
|
||||
}
|
||||
|
||||
template <class TBlockIter, typename TValue>
|
||||
void BlockBasedTableIterator<TBlockIter, TValue>::CheckDataBlockWithinUpperBound() {
|
||||
void BlockBasedTableIterator<TBlockIter,
|
||||
TValue>::CheckDataBlockWithinUpperBound() {
|
||||
if (read_options_.iterate_upper_bound != nullptr &&
|
||||
block_iter_points_to_real_block_) {
|
||||
data_block_within_upper_bound_ =
|
||||
@ -3047,7 +3047,8 @@ void BlockBasedTableIterator<TBlockIter, TValue>::CheckDataBlockWithinUpperBound
|
||||
|
||||
InternalIterator* BlockBasedTable::NewIterator(
|
||||
const ReadOptions& read_options, const SliceTransform* prefix_extractor,
|
||||
Arena* arena, bool skip_filters, TableReaderCaller caller, size_t compaction_readahead_size) {
|
||||
Arena* arena, bool skip_filters, TableReaderCaller caller,
|
||||
size_t compaction_readahead_size) {
|
||||
BlockCacheLookupContext lookup_context{caller};
|
||||
bool need_upper_bound_check =
|
||||
PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor);
|
||||
@ -3068,10 +3069,11 @@ InternalIterator* BlockBasedTable::NewIterator(
|
||||
arena->AllocateAligned(sizeof(BlockBasedTableIterator<DataBlockIter>));
|
||||
return new (mem) BlockBasedTableIterator<DataBlockIter>(
|
||||
this, read_options, rep_->internal_comparator,
|
||||
NewIndexIterator(read_options, need_upper_bound_check &&
|
||||
rep_->index_type == BlockBasedTableOptions::kHashSearch,
|
||||
/*input_iter=*/nullptr, /*get_context=*/nullptr,
|
||||
&lookup_context),
|
||||
NewIndexIterator(
|
||||
read_options,
|
||||
need_upper_bound_check &&
|
||||
rep_->index_type == BlockBasedTableOptions::kHashSearch,
|
||||
/*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context),
|
||||
!skip_filters && !read_options.total_order_seek &&
|
||||
prefix_extractor != nullptr,
|
||||
need_upper_bound_check, prefix_extractor, BlockType::kData, caller,
|
||||
@ -3395,7 +3397,7 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
|
||||
ro.read_tier = kBlockCacheTier;
|
||||
|
||||
for (auto miter = data_block_range.begin();
|
||||
miter != data_block_range.end(); ++miter) {
|
||||
miter != data_block_range.end(); ++miter) {
|
||||
const Slice& key = miter->ikey;
|
||||
iiter->Seek(miter->ikey);
|
||||
|
||||
@ -3405,9 +3407,9 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
|
||||
}
|
||||
if (!iiter->Valid() ||
|
||||
(!v.first_internal_key.empty() && !skip_filters &&
|
||||
UserComparatorWrapper(rep_->internal_comparator.user_comparator())
|
||||
.Compare(ExtractUserKey(key),
|
||||
ExtractUserKey(v.first_internal_key)) < 0)) {
|
||||
UserComparatorWrapper(rep_->internal_comparator.user_comparator())
|
||||
.Compare(ExtractUserKey(key),
|
||||
ExtractUserKey(v.first_internal_key)) < 0)) {
|
||||
// The requested key falls between highest key in previous block and
|
||||
// lowest key in current block.
|
||||
*(miter->s) = iiter->status();
|
||||
|
@ -138,8 +138,8 @@ class FilterBlockReader {
|
||||
GetContext* const get_context = iter->get_context;
|
||||
if (prefix_extractor->InDomain(ukey) &&
|
||||
!PrefixMayMatch(prefix_extractor->Transform(ukey), prefix_extractor,
|
||||
block_offset, no_io, &ikey, get_context,
|
||||
lookup_context)) {
|
||||
block_offset, no_io, &ikey, get_context,
|
||||
lookup_context)) {
|
||||
range->SkipKey(iter);
|
||||
}
|
||||
}
|
||||
|
@ -3,8 +3,8 @@
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include <array>
|
||||
#include "table/block_based/full_filter_block.h"
|
||||
#include <array>
|
||||
|
||||
#include "monitoring/perf_context_imp.h"
|
||||
#include "port/malloc.h"
|
||||
@ -218,8 +218,7 @@ void FullFilterBlockReader::PrefixesMayMatch(
|
||||
}
|
||||
|
||||
void FullFilterBlockReader::MayMatch(
|
||||
MultiGetRange* range, bool no_io,
|
||||
const SliceTransform* prefix_extractor,
|
||||
MultiGetRange* range, bool no_io, const SliceTransform* prefix_extractor,
|
||||
BlockCacheLookupContext* lookup_context) const {
|
||||
CachableEntry<BlockContents> filter_block;
|
||||
|
||||
@ -252,8 +251,7 @@ void FullFilterBlockReader::MayMatch(
|
||||
autovector<Slice, MultiGetContext::MAX_BATCH_SIZE> prefixes;
|
||||
int num_keys = 0;
|
||||
MultiGetRange filter_range(*range, range->begin(), range->end());
|
||||
for (auto iter = filter_range.begin();
|
||||
iter != filter_range.end(); ++iter) {
|
||||
for (auto iter = filter_range.begin(); iter != filter_range.end(); ++iter) {
|
||||
if (!prefix_extractor) {
|
||||
keys[num_keys++] = &iter->ukey;
|
||||
} else if (prefix_extractor->InDomain(iter->ukey)) {
|
||||
@ -266,15 +264,14 @@ void FullFilterBlockReader::MayMatch(
|
||||
filter_bits_reader->MayMatch(num_keys, &keys[0], &may_match[0]);
|
||||
|
||||
int i = 0;
|
||||
for (auto iter = filter_range.begin();
|
||||
iter != filter_range.end(); ++iter) {
|
||||
for (auto iter = filter_range.begin(); iter != filter_range.end(); ++iter) {
|
||||
if (!may_match[i]) {
|
||||
// Update original MultiGet range to skip this key. The filter_range
|
||||
// was temporarily used just to skip keys not in prefix_extractor domain
|
||||
range->SkipKey(iter);
|
||||
PERF_COUNTER_ADD(bloom_sst_miss_count, 1);
|
||||
} else {
|
||||
//PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
|
||||
// PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
|
||||
PerfContext* perf_ctx = get_perf_context();
|
||||
perf_ctx->bloom_sst_hit_count++;
|
||||
}
|
||||
|
@ -102,12 +102,12 @@ size_t UncompressionDictReader::ApproximateMemoryUsage() const {
|
||||
: 0;
|
||||
|
||||
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
||||
usage += malloc_usable_size(const_cast<UncompressionDictReader*>(this));
|
||||
usage += malloc_usable_size(const_cast<UncompressionDictReader*>(this));
|
||||
#else
|
||||
usage += sizeof(*this);
|
||||
usage += sizeof(*this);
|
||||
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
||||
|
||||
return usage;
|
||||
return usage;
|
||||
}
|
||||
|
||||
bool UncompressionDictReader::cache_dictionary_blocks() const {
|
||||
|
@ -51,7 +51,8 @@ class CuckooTableReader: public TableReader {
|
||||
InternalIterator* NewIterator(const ReadOptions&,
|
||||
const SliceTransform* prefix_extractor,
|
||||
Arena* arena, bool skip_filters,
|
||||
TableReaderCaller caller, size_t compaction_readahead_size = 0) override;
|
||||
TableReaderCaller caller,
|
||||
size_t compaction_readahead_size = 0) override;
|
||||
void Prepare(const Slice& target) override;
|
||||
|
||||
// Report an approximation of how much memory has been used.
|
||||
|
@ -14,9 +14,9 @@ int main() {
|
||||
#else
|
||||
|
||||
#include <cinttypes>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "memory/arena.h"
|
||||
#include "table/cuckoo/cuckoo_table_builder.h"
|
||||
|
@ -28,7 +28,8 @@ stl_wrappers::KVMap MakeMockFile(
|
||||
|
||||
InternalIterator* MockTableReader::NewIterator(
|
||||
const ReadOptions&, const SliceTransform* /* prefix_extractor */,
|
||||
Arena* /*arena*/, bool /*skip_filters*/, TableReaderCaller /*caller*/, size_t /*compaction_readahead_size*/) {
|
||||
Arena* /*arena*/, bool /*skip_filters*/, TableReaderCaller /*caller*/,
|
||||
size_t /*compaction_readahead_size*/) {
|
||||
return new MockTableIterator(table_);
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ class MockTableReader : public TableReader {
|
||||
const SliceTransform* prefix_extractor,
|
||||
Arena* arena, bool skip_filters,
|
||||
TableReaderCaller caller,
|
||||
size_t compaction_readahead_size = 0) override;
|
||||
size_t compaction_readahead_size = 0) override;
|
||||
|
||||
Status Get(const ReadOptions& readOptions, const Slice& key,
|
||||
GetContext* get_context, const SliceTransform* prefix_extractor,
|
||||
|
@ -5,13 +5,12 @@
|
||||
|
||||
#include "table/plain/plain_table_bloom.h"
|
||||
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include "util/dynamic_bloom.h"
|
||||
|
||||
#include "memory/allocator.h"
|
||||
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
namespace {
|
||||
@ -28,7 +27,7 @@ uint32_t GetTotalBitsForLocality(uint32_t total_bits) {
|
||||
|
||||
return num_blocks * (CACHE_LINE_SIZE * 8);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
PlainTableBloomV1::PlainTableBloomV1(uint32_t num_probes)
|
||||
: kTotalBits(0), kNumBlocks(0), kNumProbes(num_probes), data_(nullptr) {}
|
||||
@ -40,10 +39,10 @@ void PlainTableBloomV1::SetRawData(char* raw_data, uint32_t total_bits,
|
||||
kNumBlocks = num_blocks;
|
||||
}
|
||||
|
||||
void PlainTableBloomV1::SetTotalBits(Allocator* allocator,
|
||||
uint32_t total_bits, uint32_t locality,
|
||||
size_t huge_page_tlb_size,
|
||||
Logger* logger) {
|
||||
void PlainTableBloomV1::SetTotalBits(Allocator* allocator, uint32_t total_bits,
|
||||
uint32_t locality,
|
||||
size_t huge_page_tlb_size,
|
||||
Logger* logger) {
|
||||
kTotalBits = (locality > 0) ? GetTotalBitsForLocality(total_bits)
|
||||
: (total_bits + 7) / 8 * 8;
|
||||
kNumBlocks = (locality > 0) ? (kTotalBits / (CACHE_LINE_SIZE * 8)) : 0;
|
||||
@ -66,7 +65,8 @@ void PlainTableBloomV1::SetTotalBits(Allocator* allocator,
|
||||
data_ = raw;
|
||||
}
|
||||
|
||||
void BloomBlockBuilder::AddKeysHashes(const std::vector<uint32_t>& keys_hashes) {
|
||||
void BloomBlockBuilder::AddKeysHashes(
|
||||
const std::vector<uint32_t>& keys_hashes) {
|
||||
for (auto hash : keys_hashes) {
|
||||
bloom_.AddHash(hash);
|
||||
}
|
||||
|
@ -4,8 +4,8 @@
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "rocksdb/slice.h"
|
||||
|
||||
|
@ -83,7 +83,8 @@ class PlainTableReader: public TableReader {
|
||||
InternalIterator* NewIterator(const ReadOptions&,
|
||||
const SliceTransform* prefix_extractor,
|
||||
Arena* arena, bool skip_filters,
|
||||
TableReaderCaller caller, size_t compaction_readahead_size = 0) override;
|
||||
TableReaderCaller caller,
|
||||
size_t compaction_readahead_size = 0) override;
|
||||
|
||||
void Prepare(const Slice& target) override;
|
||||
|
||||
|
@ -45,11 +45,12 @@ class TableReader {
|
||||
// all the states but those allocated in arena.
|
||||
// skip_filters: disables checking the bloom filters even if they exist. This
|
||||
// option is effective only for block-based table format.
|
||||
// compaction_readahead_size: its value will only be used if caller = kCompaction
|
||||
virtual InternalIterator* NewIterator(const ReadOptions&,
|
||||
const SliceTransform* prefix_extractor,
|
||||
Arena* arena, bool skip_filters,
|
||||
TableReaderCaller caller, size_t compaction_readahead_size = 0) = 0;
|
||||
// compaction_readahead_size: its value will only be used if caller =
|
||||
// kCompaction
|
||||
virtual InternalIterator* NewIterator(
|
||||
const ReadOptions&, const SliceTransform* prefix_extractor, Arena* arena,
|
||||
bool skip_filters, TableReaderCaller caller,
|
||||
size_t compaction_readahead_size = 0) = 0;
|
||||
|
||||
virtual FragmentedRangeTombstoneIterator* NewRangeTombstoneIterator(
|
||||
const ReadOptions& /*read_options*/) {
|
||||
|
@ -6,8 +6,8 @@
|
||||
|
||||
#include "test_util/transaction_test_util.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
@ -16,12 +16,12 @@
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
#include <fcntl.h>
|
||||
#include <cinttypes>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <atomic>
|
||||
#include <cinttypes>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
@ -29,13 +29,13 @@ int main() {
|
||||
#else
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <cinttypes>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <chrono>
|
||||
#include <cinttypes>
|
||||
#include <exception>
|
||||
#include <queue>
|
||||
#include <thread>
|
||||
|
@ -226,8 +226,8 @@ LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) {
|
||||
parsed_params.flags);
|
||||
} else if (parsed_params.cmd == DropColumnFamilyCommand::Name()) {
|
||||
return new DropColumnFamilyCommand(parsed_params.cmd_params,
|
||||
parsed_params.option_map,
|
||||
parsed_params.flags);
|
||||
parsed_params.option_map,
|
||||
parsed_params.flags);
|
||||
} else if (parsed_params.cmd == DBFileDumperCommand::Name()) {
|
||||
return new DBFileDumperCommand(parsed_params.cmd_params,
|
||||
parsed_params.option_map,
|
||||
@ -1176,7 +1176,7 @@ DropColumnFamilyCommand::DropColumnFamilyCommand(
|
||||
const std::vector<std::string>& params,
|
||||
const std::map<std::string, std::string>& options,
|
||||
const std::vector<std::string>& flags)
|
||||
: LDBCommand(options, flags, true, {ARG_DB}) {
|
||||
: LDBCommand(options, flags, true, {ARG_DB}) {
|
||||
if (params.size() != 1) {
|
||||
exec_state_ = LDBCommandExecuteResult::Failed(
|
||||
"The name of column family to drop must be specified");
|
||||
|
@ -573,8 +573,9 @@ Status TraceAnalyzer::MakeStatistics() {
|
||||
// output the access count distribution
|
||||
if (FLAGS_output_access_count_stats && stat.second.a_count_dist_f) {
|
||||
for (auto& record : stat.second.a_count_stats) {
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "access_count: %" PRIu64 " num: %" PRIu64 "\n",
|
||||
record.first, record.second);
|
||||
ret = snprintf(buffer_, sizeof(buffer_),
|
||||
"access_count: %" PRIu64 " num: %" PRIu64 "\n",
|
||||
record.first, record.second);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format the output failed");
|
||||
}
|
||||
@ -597,8 +598,8 @@ Status TraceAnalyzer::MakeStatistics() {
|
||||
get_mid = true;
|
||||
}
|
||||
if (FLAGS_output_key_distribution && stat.second.a_key_size_f) {
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 "\n", record.first,
|
||||
record.second);
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 "\n",
|
||||
record.first, record.second);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format output failed");
|
||||
}
|
||||
@ -626,9 +627,9 @@ Status TraceAnalyzer::MakeStatistics() {
|
||||
(type == TraceOperationType::kPut ||
|
||||
type == TraceOperationType::kMerge)) {
|
||||
ret = snprintf(buffer_, sizeof(buffer_),
|
||||
"Number_of_value_size_between %" PRIu64 " and %" PRIu64
|
||||
" is: %" PRIu64 "\n",
|
||||
v_begin, v_end, record.second);
|
||||
"Number_of_value_size_between %" PRIu64 " and %" PRIu64
|
||||
" is: %" PRIu64 "\n",
|
||||
v_begin, v_end, record.second);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format output failed");
|
||||
}
|
||||
@ -676,9 +677,10 @@ Status TraceAnalyzer::MakeStatisticKeyStatsOrPrefix(TraceStats& stats) {
|
||||
succ_ratio = (static_cast<double>(record.second.succ_count)) /
|
||||
record.second.access_count;
|
||||
}
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%u %zu %" PRIu64 " %" PRIu64 " %f\n",
|
||||
record.second.cf_id, record.second.value_size,
|
||||
record.second.key_id, record.second.access_count, succ_ratio);
|
||||
ret = snprintf(buffer_, sizeof(buffer_),
|
||||
"%u %zu %" PRIu64 " %" PRIu64 " %f\n", record.second.cf_id,
|
||||
record.second.value_size, record.second.key_id,
|
||||
record.second.access_count, succ_ratio);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format output failed");
|
||||
}
|
||||
@ -704,9 +706,11 @@ Status TraceAnalyzer::MakeStatisticKeyStatsOrPrefix(TraceStats& stats) {
|
||||
prefix_succ_ratio =
|
||||
(static_cast<double>(prefix_succ_access)) / prefix_access;
|
||||
}
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 " %" PRIu64 " %f %f %s\n",
|
||||
record.second.key_id, prefix_access, prefix_count,
|
||||
prefix_ave_access, prefix_succ_ratio, prefix_out.c_str());
|
||||
ret =
|
||||
snprintf(buffer_, sizeof(buffer_),
|
||||
"%" PRIu64 " %" PRIu64 " %" PRIu64 " %f %f %s\n",
|
||||
record.second.key_id, prefix_access, prefix_count,
|
||||
prefix_ave_access, prefix_succ_ratio, prefix_out.c_str());
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format output failed");
|
||||
}
|
||||
@ -871,7 +875,8 @@ Status TraceAnalyzer::MakeStatisticQPS() {
|
||||
cur_ratio = (static_cast<double>(find_time->second)) / cur_uni_key;
|
||||
cur_num = find_time->second;
|
||||
}
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %.12f\n", cur_num, cur_ratio);
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %.12f\n",
|
||||
cur_num, cur_ratio);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format the output failed");
|
||||
}
|
||||
@ -889,8 +894,8 @@ Status TraceAnalyzer::MakeStatisticQPS() {
|
||||
if (FLAGS_output_prefix_cut > 0 && stat.second.a_top_qps_prefix_f) {
|
||||
while (!stat.second.top_k_qps_sec.empty()) {
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "At time: %u with QPS: %u\n",
|
||||
stat.second.top_k_qps_sec.top().second,
|
||||
stat.second.top_k_qps_sec.top().first);
|
||||
stat.second.top_k_qps_sec.top().second,
|
||||
stat.second.top_k_qps_sec.top().first);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format the output failed");
|
||||
}
|
||||
@ -907,8 +912,9 @@ Status TraceAnalyzer::MakeStatisticQPS() {
|
||||
for (auto& qps_prefix : stat.second.a_qps_prefix_stats[qps_time]) {
|
||||
std::string qps_prefix_out =
|
||||
rocksdb::LDBCommand::StringToHex(qps_prefix.first);
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "The prefix: %s Access count: %u\n",
|
||||
qps_prefix_out.c_str(), qps_prefix.second);
|
||||
ret = snprintf(buffer_, sizeof(buffer_),
|
||||
"The prefix: %s Access count: %u\n",
|
||||
qps_prefix_out.c_str(), qps_prefix.second);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format the output failed");
|
||||
}
|
||||
@ -1017,9 +1023,10 @@ Status TraceAnalyzer::ReProcessing() {
|
||||
if (found != stat.a_key_stats.end()) {
|
||||
key_id = found->second.key_id;
|
||||
}
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%u %" PRIu64 " %" PRIu64 "\n",
|
||||
stat.time_series.front().type,
|
||||
stat.time_series.front().ts, key_id);
|
||||
ret =
|
||||
snprintf(buffer_, sizeof(buffer_), "%u %" PRIu64 " %" PRIu64 "\n",
|
||||
stat.time_series.front().type,
|
||||
stat.time_series.front().ts, key_id);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format the output failed");
|
||||
}
|
||||
@ -1065,9 +1072,9 @@ Status TraceAnalyzer::ReProcessing() {
|
||||
TraceStats& stat = ta_[type].stats[cf_id];
|
||||
if (stat.w_key_f) {
|
||||
if (stat.a_key_stats.find(input_key) != stat.a_key_stats.end()) {
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %" PRIu64 "\n",
|
||||
cfs_[cf_id].w_count,
|
||||
stat.a_key_stats[input_key].access_count);
|
||||
ret = snprintf(buffer_, sizeof(buffer_),
|
||||
"%" PRIu64 " %" PRIu64 "\n", cfs_[cf_id].w_count,
|
||||
stat.a_key_stats[input_key].access_count);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format the output failed");
|
||||
}
|
||||
@ -1087,8 +1094,8 @@ Status TraceAnalyzer::ReProcessing() {
|
||||
prefix[type] = input_key.substr(0, FLAGS_output_prefix_cut);
|
||||
std::string prefix_out =
|
||||
rocksdb::LDBCommand::StringToHex(prefix[type]);
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %s\n", cfs_[cf_id].w_count,
|
||||
prefix_out.c_str());
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%" PRIu64 " %s\n",
|
||||
cfs_[cf_id].w_count, prefix_out.c_str());
|
||||
if (ret < 0) {
|
||||
return Status::IOError("Format the output failed");
|
||||
}
|
||||
@ -1904,8 +1911,8 @@ Status TraceAnalyzer::WriteTraceSequence(const uint32_t& type,
|
||||
const uint64_t ts) {
|
||||
std::string hex_key = rocksdb::LDBCommand::StringToHex(key);
|
||||
int ret;
|
||||
ret =
|
||||
snprintf(buffer_, sizeof(buffer_), "%u %u %zu %" PRIu64 "\n", type, cf_id, value_size, ts);
|
||||
ret = snprintf(buffer_, sizeof(buffer_), "%u %u %zu %" PRIu64 "\n", type,
|
||||
cf_id, value_size, ts);
|
||||
if (ret < 0) {
|
||||
return Status::IOError("failed to format the output");
|
||||
}
|
||||
|
@ -56,8 +56,8 @@ int main() {
|
||||
}
|
||||
#else
|
||||
|
||||
#include <cinttypes>
|
||||
#include <atomic>
|
||||
#include <cinttypes>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
@ -23,8 +23,8 @@ int main() {
|
||||
#include "table/full_filter_bits_builder.h"
|
||||
#include "test_util/testharness.h"
|
||||
#include "test_util/testutil.h"
|
||||
#include "util/hash.h"
|
||||
#include "util/gflags_compat.h"
|
||||
#include "util/hash.h"
|
||||
|
||||
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
||||
|
||||
@ -98,9 +98,7 @@ class BloomTest : public testing::Test {
|
||||
return filter_.size();
|
||||
}
|
||||
|
||||
Slice FilterData() const {
|
||||
return Slice(filter_);
|
||||
}
|
||||
Slice FilterData() const { return Slice(filter_); }
|
||||
|
||||
void DumpFilter() {
|
||||
fprintf(stderr, "F(");
|
||||
@ -190,28 +188,28 @@ TEST_F(BloomTest, VaryingLengths) {
|
||||
TEST_F(BloomTest, Schema) {
|
||||
char buffer[sizeof(int)];
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5
|
||||
ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5
|
||||
for (int key = 0; key < 87; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
Build();
|
||||
ASSERT_EQ(BloomHash(FilterData()), 3589896109U);
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6
|
||||
ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6
|
||||
for (int key = 0; key < 87; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
Build();
|
||||
ASSERT_EQ(BloomHash(FilterData()), 969445585);
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7
|
||||
ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7
|
||||
for (int key = 0; key < 87; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
Build();
|
||||
ASSERT_EQ(BloomHash(FilterData()), 1694458207);
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6
|
||||
ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6
|
||||
for (int key = 0; key < 87; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
@ -235,7 +233,6 @@ TEST_F(BloomTest, Schema) {
|
||||
ResetPolicy();
|
||||
}
|
||||
|
||||
|
||||
// Different bits-per-byte
|
||||
|
||||
class FullBloomTest : public testing::Test {
|
||||
@ -287,9 +284,7 @@ class FullBloomTest : public testing::Test {
|
||||
return filter_size_;
|
||||
}
|
||||
|
||||
Slice FilterData() {
|
||||
return Slice(buf_.get(), filter_size_);
|
||||
}
|
||||
Slice FilterData() { return Slice(buf_.get(), filter_size_); }
|
||||
|
||||
bool Matches(const Slice& s) {
|
||||
if (bits_reader_ == nullptr) {
|
||||
@ -381,9 +376,8 @@ TEST_F(FullBloomTest, FullVaryingLengths) {
|
||||
}
|
||||
|
||||
namespace {
|
||||
inline uint32_t SelectByCacheLineSize(uint32_t for64,
|
||||
uint32_t for128,
|
||||
uint32_t for256) {
|
||||
inline uint32_t SelectByCacheLineSize(uint32_t for64, uint32_t for128,
|
||||
uint32_t for256) {
|
||||
(void)for64;
|
||||
(void)for128;
|
||||
(void)for256;
|
||||
@ -394,10 +388,10 @@ inline uint32_t SelectByCacheLineSize(uint32_t for64,
|
||||
#elif CACHE_LINE_SIZE == 256
|
||||
return for256;
|
||||
#else
|
||||
#error "CACHE_LINE_SIZE unknown or unrecognized"
|
||||
#error "CACHE_LINE_SIZE unknown or unrecognized"
|
||||
#endif
|
||||
}
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
// Ensure the implementation doesn't accidentally change in an
|
||||
// incompatible way
|
||||
@ -407,7 +401,7 @@ TEST_F(FullBloomTest, Schema) {
|
||||
// Use enough keys so that changing bits / key by 1 is guaranteed to
|
||||
// change number of allocated cache lines. So keys > max cache line bits.
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5
|
||||
ResetPolicy(NewBloomFilterPolicy(8)); // num_probes = 5
|
||||
for (int key = 0; key < 2087; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
@ -415,7 +409,7 @@ TEST_F(FullBloomTest, Schema) {
|
||||
ASSERT_EQ(BloomHash(FilterData()),
|
||||
SelectByCacheLineSize(1302145999, 2811644657U, 756553699));
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6
|
||||
ResetPolicy(NewBloomFilterPolicy(9)); // num_probes = 6
|
||||
for (int key = 0; key < 2087; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
@ -423,7 +417,7 @@ TEST_F(FullBloomTest, Schema) {
|
||||
ASSERT_EQ(BloomHash(FilterData()),
|
||||
SelectByCacheLineSize(2092755149, 661139132, 1182970461));
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7
|
||||
ResetPolicy(NewBloomFilterPolicy(11)); // num_probes = 7
|
||||
for (int key = 0; key < 2087; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
@ -431,7 +425,7 @@ TEST_F(FullBloomTest, Schema) {
|
||||
ASSERT_EQ(BloomHash(FilterData()),
|
||||
SelectByCacheLineSize(3755609649U, 1812694762, 1449142939));
|
||||
|
||||
ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6
|
||||
ResetPolicy(NewBloomFilterPolicy(10)); // num_probes = 6
|
||||
for (int key = 0; key < 2087; key++) {
|
||||
Add(Key(key, buffer));
|
||||
}
|
||||
|
@ -15,21 +15,22 @@
|
||||
|
||||
#ifdef HAVE_ARM64_CRYPTO
|
||||
/* unfolding to compute 8 * 3 = 24 bytes parallelly */
|
||||
#define CRC32C24BYTES(ITR) \
|
||||
crc1 = crc32c_u64(crc1, *(buf64 + BLK_LENGTH + (ITR)));\
|
||||
crc2 = crc32c_u64(crc2, *(buf64 + BLK_LENGTH*2 + (ITR)));\
|
||||
#define CRC32C24BYTES(ITR) \
|
||||
crc1 = crc32c_u64(crc1, *(buf64 + BLK_LENGTH + (ITR))); \
|
||||
crc2 = crc32c_u64(crc2, *(buf64 + BLK_LENGTH * 2 + (ITR))); \
|
||||
crc0 = crc32c_u64(crc0, *(buf64 + (ITR)));
|
||||
|
||||
/* unfolding to compute 24 * 7 = 168 bytes parallelly */
|
||||
#define CRC32C7X24BYTES(ITR) do {\
|
||||
CRC32C24BYTES((ITR)*7+0) \
|
||||
CRC32C24BYTES((ITR)*7+1) \
|
||||
CRC32C24BYTES((ITR)*7+2) \
|
||||
CRC32C24BYTES((ITR)*7+3) \
|
||||
CRC32C24BYTES((ITR)*7+4) \
|
||||
CRC32C24BYTES((ITR)*7+5) \
|
||||
CRC32C24BYTES((ITR)*7+6) \
|
||||
} while(0)
|
||||
#define CRC32C7X24BYTES(ITR) \
|
||||
do { \
|
||||
CRC32C24BYTES((ITR)*7 + 0) \
|
||||
CRC32C24BYTES((ITR)*7 + 1) \
|
||||
CRC32C24BYTES((ITR)*7 + 2) \
|
||||
CRC32C24BYTES((ITR)*7 + 3) \
|
||||
CRC32C24BYTES((ITR)*7 + 4) \
|
||||
CRC32C24BYTES((ITR)*7 + 5) \
|
||||
CRC32C24BYTES((ITR)*7 + 6) \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
uint32_t crc32c_runtime_check(void) {
|
||||
@ -45,15 +46,15 @@ uint32_t crc32c_arm64(uint32_t crc, unsigned char const *data,
|
||||
crc ^= 0xffffffff;
|
||||
|
||||
#ifdef HAVE_ARM64_CRYPTO
|
||||
/* Crc32c Parallel computation
|
||||
* Algorithm comes from Intel whitepaper:
|
||||
* crc-iscsi-polynomial-crc32-instruction-paper
|
||||
*
|
||||
* Input data is divided into three equal-sized blocks
|
||||
* Three parallel blocks (crc0, crc1, crc2) for 1024 Bytes
|
||||
* One Block: 42(BLK_LENGTH) * 8(step length: crc32c_u64) bytes
|
||||
*/
|
||||
#define BLK_LENGTH 42
|
||||
/* Crc32c Parallel computation
|
||||
* Algorithm comes from Intel whitepaper:
|
||||
* crc-iscsi-polynomial-crc32-instruction-paper
|
||||
*
|
||||
* Input data is divided into three equal-sized blocks
|
||||
* Three parallel blocks (crc0, crc1, crc2) for 1024 Bytes
|
||||
* One Block: 42(BLK_LENGTH) * 8(step length: crc32c_u64) bytes
|
||||
*/
|
||||
#define BLK_LENGTH 42
|
||||
while (length >= 1024) {
|
||||
uint64_t t0, t1;
|
||||
uint32_t crc0 = 0, crc1 = 0, crc2 = 0;
|
||||
@ -97,31 +98,29 @@ uint32_t crc32c_arm64(uint32_t crc, unsigned char const *data,
|
||||
length -= 1024;
|
||||
}
|
||||
|
||||
if (length == 0)
|
||||
return crc ^ (0xffffffffU);
|
||||
if (length == 0) return crc ^ (0xffffffffU);
|
||||
#endif
|
||||
buf8 = (const uint8_t *)buf64;
|
||||
while (length >= 8) {
|
||||
crc = crc32c_u64(crc, *(const uint64_t*)buf8);
|
||||
crc = crc32c_u64(crc, *(const uint64_t *)buf8);
|
||||
buf8 += 8;
|
||||
length -= 8;
|
||||
}
|
||||
|
||||
/* The following is more efficient than the straight loop */
|
||||
if (length >= 4) {
|
||||
crc = crc32c_u32(crc, *(const uint32_t*)buf8);
|
||||
crc = crc32c_u32(crc, *(const uint32_t *)buf8);
|
||||
buf8 += 4;
|
||||
length -= 4;
|
||||
}
|
||||
|
||||
if (length >= 2) {
|
||||
crc = crc32c_u16(crc, *(const uint16_t*)buf8);
|
||||
crc = crc32c_u16(crc, *(const uint16_t *)buf8);
|
||||
buf8 += 2;
|
||||
length -= 2;
|
||||
}
|
||||
|
||||
if (length >= 1)
|
||||
crc = crc32c_u8(crc, *buf8);
|
||||
if (length >= 1) crc = crc32c_u8(crc, *buf8);
|
||||
|
||||
crc ^= 0xffffffff;
|
||||
return crc;
|
||||
|
@ -13,7 +13,7 @@
|
||||
#ifdef __ARM_FEATURE_CRC32
|
||||
#define HAVE_ARM64_CRC
|
||||
#include <arm_acle.h>
|
||||
#define crc32c_u8(crc, v) __crc32cb(crc, v)
|
||||
#define crc32c_u8(crc, v) __crc32cb(crc, v)
|
||||
#define crc32c_u16(crc, v) __crc32ch(crc, v)
|
||||
#define crc32c_u32(crc, v) __crc32cw(crc, v)
|
||||
#define crc32c_u64(crc, v) __crc32cd(crc, v)
|
||||
|
@ -23,16 +23,15 @@ uint32_t roundUpToPow2(uint32_t x) {
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
DynamicBloom::DynamicBloom(Allocator* allocator, uint32_t total_bits,
|
||||
uint32_t num_probes,
|
||||
size_t huge_page_tlb_size, Logger* logger)
|
||||
uint32_t num_probes, size_t huge_page_tlb_size,
|
||||
Logger* logger)
|
||||
// Round down, except round up with 1
|
||||
: kNumDoubleProbes((num_probes + (num_probes == 1)) / 2) {
|
||||
assert(num_probes % 2 == 0); // limitation of current implementation
|
||||
assert(num_probes <= 10); // limitation of current implementation
|
||||
assert(num_probes % 2 == 0); // limitation of current implementation
|
||||
assert(num_probes <= 10); // limitation of current implementation
|
||||
assert(kNumDoubleProbes > 0);
|
||||
|
||||
// Determine how much to round off + align by so that x ^ i (that's xor) is
|
||||
|
@ -44,10 +44,8 @@ class DynamicBloom {
|
||||
// it to be allocated, like:
|
||||
// sysctl -w vm.nr_hugepages=20
|
||||
// See linux doc Documentation/vm/hugetlbpage.txt
|
||||
explicit DynamicBloom(Allocator* allocator,
|
||||
uint32_t total_bits,
|
||||
uint32_t num_probes = 6,
|
||||
size_t huge_page_tlb_size = 0,
|
||||
explicit DynamicBloom(Allocator* allocator, uint32_t total_bits,
|
||||
uint32_t num_probes = 6, size_t huge_page_tlb_size = 0,
|
||||
Logger* logger = nullptr);
|
||||
|
||||
~DynamicBloom() {}
|
||||
@ -159,8 +157,8 @@ inline bool DynamicBloom::MayContainHash(uint32_t h32) const {
|
||||
uint64_t h = 0x9e3779b97f4a7c13ULL * h32;
|
||||
for (unsigned i = 0;; ++i) {
|
||||
// Two bit probes per uint64_t probe
|
||||
uint64_t mask = ((uint64_t)1 << (h & 63))
|
||||
| ((uint64_t)1 << ((h >> 6) & 63));
|
||||
uint64_t mask =
|
||||
((uint64_t)1 << (h & 63)) | ((uint64_t)1 << ((h >> 6) & 63));
|
||||
uint64_t val = data_[a ^ i].load(std::memory_order_relaxed);
|
||||
if (i + 1 >= kNumDoubleProbes) {
|
||||
return (val & mask) == mask;
|
||||
@ -179,8 +177,8 @@ inline void DynamicBloom::AddHash(uint32_t h32, const OrFunc& or_func) {
|
||||
uint64_t h = 0x9e3779b97f4a7c13ULL * h32;
|
||||
for (unsigned i = 0;; ++i) {
|
||||
// Two bit probes per uint64_t probe
|
||||
uint64_t mask = ((uint64_t)1 << (h & 63))
|
||||
| ((uint64_t)1 << ((h >> 6) & 63));
|
||||
uint64_t mask =
|
||||
((uint64_t)1 << (h & 63)) | ((uint64_t)1 << ((h >> 6) & 63));
|
||||
or_func(&data_[a ^ i], mask);
|
||||
if (i + 1 >= kNumDoubleProbes) {
|
||||
return;
|
||||
|
@ -11,9 +11,9 @@ int main() {
|
||||
}
|
||||
#else
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cinttypes>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
@ -253,8 +253,9 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) {
|
||||
threads.pop_back();
|
||||
}
|
||||
|
||||
fprintf(stderr, "dynamic bloom, avg parallel add latency %3g"
|
||||
" nanos/key\n",
|
||||
fprintf(stderr,
|
||||
"dynamic bloom, avg parallel add latency %3g"
|
||||
" nanos/key\n",
|
||||
static_cast<double>(elapsed) / num_threads / num_keys);
|
||||
|
||||
elapsed = 0;
|
||||
@ -276,8 +277,9 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) {
|
||||
threads.pop_back();
|
||||
}
|
||||
|
||||
fprintf(stderr, "dynamic bloom, avg parallel hit latency %3g"
|
||||
" nanos/key\n",
|
||||
fprintf(stderr,
|
||||
"dynamic bloom, avg parallel hit latency %3g"
|
||||
" nanos/key\n",
|
||||
static_cast<double>(elapsed) / num_threads / num_keys);
|
||||
|
||||
elapsed = 0;
|
||||
@ -286,8 +288,7 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) {
|
||||
KeyMaker km;
|
||||
StopWatchNano timer(Env::Default());
|
||||
timer.Start();
|
||||
for (uint64_t i = num_keys + 1 + t; i <= 2 * num_keys;
|
||||
i += num_threads) {
|
||||
for (uint64_t i = num_keys + 1 + t; i <= 2 * num_keys; i += num_threads) {
|
||||
bool f = std_bloom.MayContain(km.Seq(i));
|
||||
if (f) {
|
||||
++false_positives;
|
||||
@ -303,8 +304,9 @@ TEST_F(DynamicBloomTest, concurrent_with_perf) {
|
||||
threads.pop_back();
|
||||
}
|
||||
|
||||
fprintf(stderr, "dynamic bloom, avg parallel miss latency %3g"
|
||||
" nanos/key, %f%% false positive rate\n",
|
||||
fprintf(stderr,
|
||||
"dynamic bloom, avg parallel miss latency %3g"
|
||||
" nanos/key, %f%% false positive rate\n",
|
||||
static_cast<double>(elapsed) / num_threads / num_keys,
|
||||
false_positives.load() * 100.0 / num_keys);
|
||||
}
|
||||
|
@ -52,10 +52,14 @@ TEST(HashTest, Values) {
|
||||
EXPECT_EQ(Hash("\x38\xd6\xf7\x28\x20\xb4\x8a\xe9", 8, kSeed), 3530274698u);
|
||||
EXPECT_EQ(Hash("\xbb\x18\x5d\xf4\x12\x03\xf7\x99", 8, kSeed), 1974545809u);
|
||||
EXPECT_EQ(Hash("\x80\xd4\x3b\x3b\xae\x22\xa2\x78", 8, kSeed), 3563570120u);
|
||||
EXPECT_EQ(Hash("\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 9, kSeed), 2706087434u);
|
||||
EXPECT_EQ(Hash("\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 9, kSeed), 1534654151u);
|
||||
EXPECT_EQ(Hash("\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 9, kSeed), 2355554696u);
|
||||
EXPECT_EQ(Hash("\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 9, kSeed), 1400800912u);
|
||||
EXPECT_EQ(Hash("\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 9, kSeed),
|
||||
2706087434u);
|
||||
EXPECT_EQ(Hash("\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 9, kSeed),
|
||||
1534654151u);
|
||||
EXPECT_EQ(Hash("\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 9, kSeed),
|
||||
2355554696u);
|
||||
EXPECT_EQ(Hash("\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 9, kSeed),
|
||||
1400800912u);
|
||||
EXPECT_EQ(Hash("\x22\x6f\x39\x1f\xf8\xdd\x4f\x52\x17\x94", 10, kSeed),
|
||||
3420325137u);
|
||||
EXPECT_EQ(Hash("\x32\x89\x2a\x75\x48\x3a\x4a\x02\x69\xdd", 10, kSeed),
|
||||
|
@ -9,8 +9,8 @@
|
||||
|
||||
#include "util/rate_limiter.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <chrono>
|
||||
#include <cinttypes>
|
||||
#include <limits>
|
||||
|
||||
#include "db/db_test_util.h"
|
||||
|
@ -98,24 +98,23 @@ struct ThreadPoolImpl::Impl {
|
||||
void SetThreadPriority(Env::Priority priority) { priority_ = priority; }
|
||||
|
||||
private:
|
||||
static void BGThreadWrapper(void* arg);
|
||||
|
||||
static void BGThreadWrapper(void* arg);
|
||||
bool low_io_priority_;
|
||||
bool low_cpu_priority_;
|
||||
Env::Priority priority_;
|
||||
Env* env_;
|
||||
|
||||
bool low_io_priority_;
|
||||
bool low_cpu_priority_;
|
||||
Env::Priority priority_;
|
||||
Env* env_;
|
||||
int total_threads_limit_;
|
||||
std::atomic_uint queue_len_; // Queue length. Used for stats reporting
|
||||
bool exit_all_threads_;
|
||||
bool wait_for_jobs_to_complete_;
|
||||
|
||||
int total_threads_limit_;
|
||||
std::atomic_uint queue_len_; // Queue length. Used for stats reporting
|
||||
bool exit_all_threads_;
|
||||
bool wait_for_jobs_to_complete_;
|
||||
|
||||
// Entry per Schedule()/Submit() call
|
||||
struct BGItem {
|
||||
void* tag = nullptr;
|
||||
std::function<void()> function;
|
||||
std::function<void()> unschedFunction;
|
||||
// Entry per Schedule()/Submit() call
|
||||
struct BGItem {
|
||||
void* tag = nullptr;
|
||||
std::function<void()> function;
|
||||
std::function<void()> unschedFunction;
|
||||
};
|
||||
|
||||
using BGQueue = std::deque<BGItem>;
|
||||
|
@ -24,10 +24,10 @@
|
||||
#include "util/string_util.h"
|
||||
#include "utilities/checkpoint/checkpoint_impl.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <stdlib.h>
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cinttypes>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <limits>
|
||||
|
@ -6,8 +6,8 @@
|
||||
#ifndef ROCKSDB_LITE
|
||||
#include "utilities/blob_db/blob_file.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <stdio.h>
|
||||
#include <cinttypes>
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
@ -11,8 +11,8 @@
|
||||
|
||||
#include "utilities/checkpoint/checkpoint_impl.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
@ -313,106 +313,105 @@ TEST_F(CheckpointTest, GetSnapshotLink) {
|
||||
}
|
||||
|
||||
TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) {
|
||||
// Create a database
|
||||
Status s;
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
CreateAndReopenWithCF({}, options);
|
||||
// Create a database
|
||||
Status s;
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
CreateAndReopenWithCF({}, options);
|
||||
|
||||
// Helper to verify the number of files in metadata and export dir
|
||||
auto verify_files_exported = [&](const ExportImportFilesMetaData& metadata,
|
||||
int num_files_expected) {
|
||||
ASSERT_EQ(metadata.files.size(), num_files_expected);
|
||||
std::vector<std::string> subchildren;
|
||||
env_->GetChildren(export_path_, &subchildren);
|
||||
int num_children = 0;
|
||||
for (const auto& child : subchildren) {
|
||||
if (child != "." && child != "..") {
|
||||
++num_children;
|
||||
}
|
||||
// Helper to verify the number of files in metadata and export dir
|
||||
auto verify_files_exported = [&](const ExportImportFilesMetaData& metadata,
|
||||
int num_files_expected) {
|
||||
ASSERT_EQ(metadata.files.size(), num_files_expected);
|
||||
std::vector<std::string> subchildren;
|
||||
env_->GetChildren(export_path_, &subchildren);
|
||||
int num_children = 0;
|
||||
for (const auto& child : subchildren) {
|
||||
if (child != "." && child != "..") {
|
||||
++num_children;
|
||||
}
|
||||
ASSERT_EQ(num_children, num_files_expected);
|
||||
};
|
||||
|
||||
// Test DefaultColumnFamily
|
||||
{
|
||||
const auto key = std::string("foo");
|
||||
ASSERT_OK(Put(key, "v1"));
|
||||
|
||||
Checkpoint* checkpoint;
|
||||
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
||||
|
||||
// Export the Tables and verify
|
||||
ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_));
|
||||
verify_files_exported(*metadata_, 1);
|
||||
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
||||
test::DestroyDir(env_, export_path_);
|
||||
delete metadata_;
|
||||
metadata_ = nullptr;
|
||||
|
||||
// Check again after compaction
|
||||
CompactAll();
|
||||
ASSERT_OK(Put(key, "v2"));
|
||||
ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_));
|
||||
verify_files_exported(*metadata_, 2);
|
||||
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
||||
test::DestroyDir(env_, export_path_);
|
||||
delete metadata_;
|
||||
metadata_ = nullptr;
|
||||
delete checkpoint;
|
||||
}
|
||||
ASSERT_EQ(num_children, num_files_expected);
|
||||
};
|
||||
|
||||
// Test non default column family with non default comparator
|
||||
{
|
||||
auto cf_options = CurrentOptions();
|
||||
cf_options.comparator = ReverseBytewiseComparator();
|
||||
ASSERT_OK(
|
||||
db_->CreateColumnFamily(cf_options, "yoyo", &cfh_reverse_comp_));
|
||||
|
||||
const auto key = std::string("foo");
|
||||
ASSERT_OK(db_->Put(WriteOptions(), cfh_reverse_comp_, key, "v1"));
|
||||
|
||||
Checkpoint* checkpoint;
|
||||
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
||||
|
||||
// Export the Tables and verify
|
||||
ASSERT_OK(checkpoint->ExportColumnFamily(cfh_reverse_comp_, export_path_,
|
||||
&metadata_));
|
||||
verify_files_exported(*metadata_, 1);
|
||||
ASSERT_EQ(metadata_->db_comparator_name,
|
||||
ReverseBytewiseComparator()->Name());
|
||||
delete checkpoint;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) {
|
||||
// Create a database
|
||||
Status s;
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
CreateAndReopenWithCF({}, options);
|
||||
|
||||
// Test DefaultColumnFamily
|
||||
{
|
||||
const auto key = std::string("foo");
|
||||
ASSERT_OK(Put(key, "v1"));
|
||||
|
||||
Checkpoint* checkpoint;
|
||||
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
||||
|
||||
// Export onto existing directory
|
||||
env_->CreateDirIfMissing(export_path_);
|
||||
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_),
|
||||
Status::InvalidArgument("Specified export_dir exists"));
|
||||
// Export the Tables and verify
|
||||
ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_));
|
||||
verify_files_exported(*metadata_, 1);
|
||||
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
||||
test::DestroyDir(env_, export_path_);
|
||||
delete metadata_;
|
||||
metadata_ = nullptr;
|
||||
|
||||
// Export with invalid directory specification
|
||||
export_path_ = "";
|
||||
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_),
|
||||
Status::InvalidArgument("Specified export_dir invalid"));
|
||||
// Check again after compaction
|
||||
CompactAll();
|
||||
ASSERT_OK(Put(key, "v2"));
|
||||
ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_));
|
||||
verify_files_exported(*metadata_, 2);
|
||||
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
||||
test::DestroyDir(env_, export_path_);
|
||||
delete metadata_;
|
||||
metadata_ = nullptr;
|
||||
delete checkpoint;
|
||||
}
|
||||
|
||||
// Test non default column family with non default comparator
|
||||
{
|
||||
auto cf_options = CurrentOptions();
|
||||
cf_options.comparator = ReverseBytewiseComparator();
|
||||
ASSERT_OK(db_->CreateColumnFamily(cf_options, "yoyo", &cfh_reverse_comp_));
|
||||
|
||||
const auto key = std::string("foo");
|
||||
ASSERT_OK(db_->Put(WriteOptions(), cfh_reverse_comp_, key, "v1"));
|
||||
|
||||
Checkpoint* checkpoint;
|
||||
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
||||
|
||||
// Export the Tables and verify
|
||||
ASSERT_OK(checkpoint->ExportColumnFamily(cfh_reverse_comp_, export_path_,
|
||||
&metadata_));
|
||||
verify_files_exported(*metadata_, 1);
|
||||
ASSERT_EQ(metadata_->db_comparator_name,
|
||||
ReverseBytewiseComparator()->Name());
|
||||
delete checkpoint;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) {
|
||||
// Create a database
|
||||
Status s;
|
||||
auto options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
CreateAndReopenWithCF({}, options);
|
||||
|
||||
const auto key = std::string("foo");
|
||||
ASSERT_OK(Put(key, "v1"));
|
||||
|
||||
Checkpoint* checkpoint;
|
||||
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
||||
|
||||
// Export onto existing directory
|
||||
env_->CreateDirIfMissing(export_path_);
|
||||
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_),
|
||||
Status::InvalidArgument("Specified export_dir exists"));
|
||||
test::DestroyDir(env_, export_path_);
|
||||
|
||||
// Export with invalid directory specification
|
||||
export_path_ = "";
|
||||
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
||||
export_path_, &metadata_),
|
||||
Status::InvalidArgument("Specified export_dir invalid"));
|
||||
delete checkpoint;
|
||||
}
|
||||
|
||||
TEST_F(CheckpointTest, CheckpointCF) {
|
||||
|
@ -2,10 +2,10 @@
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
#include "utilities/merge_operators/sortlist.h"
|
||||
#include "rocksdb/merge_operator.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "utilities/merge_operators.h"
|
||||
#include "utilities/merge_operators/sortlist.h"
|
||||
|
||||
using rocksdb::Logger;
|
||||
using rocksdb::MergeOperator;
|
||||
|
@ -8,8 +8,8 @@
|
||||
#include "utilities/persistent_cache/persistent_cache_tier.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
|
@ -235,7 +235,9 @@ class SimCacheImpl : public SimCache {
|
||||
return cache_->GetUsage(handle);
|
||||
}
|
||||
|
||||
size_t GetCharge(Handle* handle) const override { return cache_->GetCharge(handle); }
|
||||
size_t GetCharge(Handle* handle) const override {
|
||||
return cache_->GetCharge(handle);
|
||||
}
|
||||
|
||||
size_t GetPinnedUsage() const override { return cache_->GetPinnedUsage(); }
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
|
||||
#include "db/db_impl/db_impl.h"
|
||||
#include "logging/logging.h"
|
||||
#include "port/port.h"
|
||||
|
@ -638,8 +638,7 @@ void TransactionBaseImpl::TrackKey(TransactionKeyMap* key_map, uint32_t cfh_id,
|
||||
// in case it is not already in the map
|
||||
auto result = cf_key_map.try_emplace(key, seq);
|
||||
auto iter = result.first;
|
||||
if (!result.second &&
|
||||
seq < iter->second.seq) {
|
||||
if (!result.second && seq < iter->second.seq) {
|
||||
// Now tracking this key with an earlier sequence number
|
||||
iter->second.seq = seq;
|
||||
}
|
||||
|
@ -5,8 +5,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
@ -1445,8 +1445,8 @@ TEST_P(WritePreparedTransactionTest, MaxCatchupWithUnbackedSnapshot) {
|
||||
s = s_vec[0];
|
||||
ASSERT_TRUE(s.ok() || s.IsTryAgain());
|
||||
Slice key("key");
|
||||
txn->MultiGet(ropt, db->DefaultColumnFamily(), 1, &key, &pinnable_val,
|
||||
&s, true);
|
||||
txn->MultiGet(ropt, db->DefaultColumnFamily(), 1, &key, &pinnable_val, &s,
|
||||
true);
|
||||
ASSERT_TRUE(s.ok() || s.IsTryAgain());
|
||||
delete txn;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ Status WritePreparedTxnDB::Initialize(
|
||||
ordered_seq_cnt[seq] = cnt;
|
||||
}
|
||||
// AddPrepared must be called in order
|
||||
for (auto seq_cnt: ordered_seq_cnt) {
|
||||
for (auto seq_cnt : ordered_seq_cnt) {
|
||||
auto seq = seq_cnt.first;
|
||||
auto cnt = seq_cnt.second;
|
||||
for (size_t i = 0; i < cnt; i++) {
|
||||
|
@ -411,9 +411,8 @@ Status WriteUnpreparedTxn::FlushWriteBatchWithSavePointToDB() {
|
||||
bool trailing_batch = i == unflushed_save_points_->size();
|
||||
SavePointBatchHandler sp_handler(&write_batch_,
|
||||
*wupt_db_->GetCFHandleMap().get());
|
||||
size_t curr_boundary = trailing_batch
|
||||
? wb.GetWriteBatch()->GetDataSize()
|
||||
: (*unflushed_save_points_)[i];
|
||||
size_t curr_boundary = trailing_batch ? wb.GetWriteBatch()->GetDataSize()
|
||||
: (*unflushed_save_points_)[i];
|
||||
|
||||
// Construct the partial write batch up to the savepoint.
|
||||
//
|
||||
|
@ -293,7 +293,7 @@ Status WriteUnpreparedTxnDB::Initialize(
|
||||
}
|
||||
}
|
||||
// AddPrepared must be called in order
|
||||
for (auto seq_cnt: ordered_seq_cnt) {
|
||||
for (auto seq_cnt : ordered_seq_cnt) {
|
||||
auto seq = seq_cnt.first;
|
||||
auto cnt = seq_cnt.second;
|
||||
for (size_t i = 0; i < cnt; i++) {
|
||||
|
@ -103,8 +103,8 @@ class DBWithTTLImpl : public DBWithTTL {
|
||||
void SetTtl(ColumnFamilyHandle *h, int32_t ttl) override;
|
||||
|
||||
private:
|
||||
// remember whether the Close completes or not
|
||||
bool closed_;
|
||||
// remember whether the Close completes or not
|
||||
bool closed_;
|
||||
};
|
||||
|
||||
class TtlIterator : public Iterator {
|
||||
|
@ -87,14 +87,10 @@ class TtlTest : public testing::Test {
|
||||
}
|
||||
|
||||
// Call db_ttl_->Close() before delete db_ttl_
|
||||
void CloseTtl() {
|
||||
CloseTtlHelper(true);
|
||||
}
|
||||
void CloseTtl() { CloseTtlHelper(true); }
|
||||
|
||||
// No db_ttl_->Close() before delete db_ttl_
|
||||
void CloseTtlNoDBClose() {
|
||||
CloseTtlHelper(false);
|
||||
}
|
||||
void CloseTtlNoDBClose() { CloseTtlHelper(false); }
|
||||
|
||||
void CloseTtlHelper(bool close_db) {
|
||||
if (db_ttl_ != nullptr) {
|
||||
@ -416,7 +412,6 @@ TEST_F(TtlTest, NoEffect) {
|
||||
CloseTtl();
|
||||
}
|
||||
|
||||
|
||||
// Rerun the NoEffect test with a different version of CloseTtl
|
||||
// function, where db is directly deleted without close.
|
||||
TEST_F(TtlTest, DestructWithoutClose) {
|
||||
@ -425,18 +420,18 @@ TEST_F(TtlTest, DestructWithoutClose) {
|
||||
int64_t boundary2 = 2 * boundary1;
|
||||
|
||||
OpenTtl();
|
||||
PutValues(0, boundary1); //T=0: Set1 never deleted
|
||||
SleepCompactCheck(1, 0, boundary1); //T=1: Set1 still there
|
||||
PutValues(0, boundary1); // T=0: Set1 never deleted
|
||||
SleepCompactCheck(1, 0, boundary1); // T=1: Set1 still there
|
||||
CloseTtlNoDBClose();
|
||||
|
||||
OpenTtl(0);
|
||||
PutValues(boundary1, boundary2 - boundary1); //T=1: Set2 never deleted
|
||||
SleepCompactCheck(1, 0, boundary2); //T=2: Sets1 & 2 still there
|
||||
PutValues(boundary1, boundary2 - boundary1); // T=1: Set2 never deleted
|
||||
SleepCompactCheck(1, 0, boundary2); // T=2: Sets1 & 2 still there
|
||||
CloseTtlNoDBClose();
|
||||
|
||||
OpenTtl(-1);
|
||||
PutValues(boundary2, kSampleSize_ - boundary2); //T=3: Set3 never deleted
|
||||
SleepCompactCheck(1, 0, kSampleSize_, true); //T=4: Sets 1,2,3 still there
|
||||
PutValues(boundary2, kSampleSize_ - boundary2); // T=3: Set3 never deleted
|
||||
SleepCompactCheck(1, 0, kSampleSize_, true); // T=4: Sets 1,2,3 still there
|
||||
CloseTtlNoDBClose();
|
||||
}
|
||||
|
||||
|
@ -918,9 +918,9 @@ Status WriteBatchWithIndex::GetFromBatchAndDB(
|
||||
|
||||
if (merge_operator) {
|
||||
std::string merge_result;
|
||||
s = MergeHelper::TimedFullMerge(
|
||||
merge_operator, key, merge_data, merge_context.GetOperands(),
|
||||
&merge_result, logger, statistics, env);
|
||||
s = MergeHelper::TimedFullMerge(merge_operator, key, merge_data,
|
||||
merge_context.GetOperands(),
|
||||
&merge_result, logger, statistics, env);
|
||||
pinnable_val->Reset();
|
||||
*pinnable_val->GetSelf() = std::move(merge_result);
|
||||
pinnable_val->PinSelf();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user