diff --git a/db/c.cc b/db/c.cc index 87e08f806..a5d0133b4 100644 --- a/db/c.cc +++ b/db/c.cc @@ -1784,11 +1784,11 @@ rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base( } rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base_cf( - rocksdb_writebatch_wi_t* wbwi, - rocksdb_iterator_t* base_iterator, + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator, rocksdb_column_family_handle_t* column_family) { rocksdb_iterator_t* result = new rocksdb_iterator_t; - result->rep = wbwi->rep->NewIteratorWithBase(column_family->rep, base_iterator->rep); + result->rep = + wbwi->rep->NewIteratorWithBase(column_family->rep, base_iterator->rep); delete base_iterator; return result; } diff --git a/db/c_test.c b/db/c_test.c index 4f1c0da9f..39b8b1cfb 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -917,7 +917,8 @@ int main(int argc, char** argv) { rocksdb_writebatch_wi_t* wbi = rocksdb_writebatch_wi_create(0, 1); rocksdb_writebatch_wi_put(wbi, "bar", 3, "b", 1); rocksdb_writebatch_wi_delete(wbi, "foo", 3); - rocksdb_iterator_t* iter = rocksdb_writebatch_wi_create_iterator_with_base(wbi, base_iter); + rocksdb_iterator_t* iter = + rocksdb_writebatch_wi_create_iterator_with_base(wbi, base_iter); CheckCondition(!rocksdb_iter_valid(iter)); rocksdb_iter_seek_to_first(iter); CheckCondition(rocksdb_iter_valid(iter)); @@ -1527,7 +1528,7 @@ int main(int argc, char** argv) { const rocksdb_snapshot_t* snapshot; snapshot = rocksdb_transactiondb_create_snapshot(txn_db); rocksdb_readoptions_set_snapshot(roptions, snapshot); - + rocksdb_transactiondb_put(txn_db, woptions, "foo", 3, "hey", 3, &err); CheckNoError(err); diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 1d7cf78a0..bee5d62cf 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -357,6 +357,51 @@ TEST_F(CompactFilesTest, SentinelCompressionType) { } } +TEST_F(CompactFilesTest, GetCompactionJobInfo) { + Options options; + options.create_if_missing = true; + // Disable RocksDB background compaction. + options.compaction_style = kCompactionStyleNone; + options.level0_slowdown_writes_trigger = 1000; + options.level0_stop_writes_trigger = 1000; + options.write_buffer_size = 65536; + options.max_write_buffer_number = 2; + options.compression = kNoCompression; + options.max_compaction_bytes = 5000; + + // Add listener + FlushedFileCollector* collector = new FlushedFileCollector(); + options.listeners.emplace_back(collector); + + DB* db = nullptr; + DestroyDB(db_name_, options); + Status s = DB::Open(options, db_name_, &db); + assert(s.ok()); + assert(db); + + // create couple files + for (int i = 0; i < 500; ++i) { + db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26))); + } + reinterpret_cast(db)->TEST_WaitForFlushMemTable(); + auto l0_files_1 = collector->GetFlushedFiles(); + CompactionOptions co; + co.compression = CompressionType::kLZ4Compression; + CompactionJobInfo compaction_job_info; + ASSERT_OK( + db->CompactFiles(co, l0_files_1, 0, -1, nullptr, &compaction_job_info)); + ASSERT_EQ(compaction_job_info.base_input_level, 0); + ASSERT_EQ(compaction_job_info.cf_id, db->DefaultColumnFamily()->GetID()); + ASSERT_EQ(compaction_job_info.cf_name, db->DefaultColumnFamily()->GetName()); + ASSERT_EQ(compaction_job_info.compaction_reason, + CompactionReason::kManualCompaction); + ASSERT_EQ(compaction_job_info.compression, CompressionType::kLZ4Compression); + ASSERT_EQ(compaction_job_info.output_level, 0); + ASSERT_OK(compaction_job_info.status); + // no assertion failure + delete db; +} + } // namespace rocksdb int main(int argc, char** argv) { diff --git a/db/db_impl.h b/db/db_impl.h index 1147b2929..d99f19b87 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -192,13 +192,13 @@ class DBImpl : public DB { const Slice* begin, const Slice* end) override; using DB::CompactFiles; - virtual Status CompactFiles(const CompactionOptions& compact_options, - ColumnFamilyHandle* column_family, - const std::vector& input_file_names, - const int output_level, - const int output_path_id = -1, - std::vector* const output_file_names - = nullptr) override; + virtual Status CompactFiles( + const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, const int output_level, + const int output_path_id = -1, + std::vector* const output_file_names = nullptr, + CompactionJobInfo* compaction_job_info = nullptr) override; virtual Status PauseBackgroundWork() override; virtual Status ContinueBackgroundWork() override; @@ -1054,7 +1054,8 @@ class DBImpl : public DB { const std::vector& input_file_names, std::vector* const output_file_names, const int output_level, int output_path_id, - JobContext* job_context, LogBuffer* log_buffer); + JobContext* job_context, LogBuffer* log_buffer, + CompactionJobInfo* compaction_job_info); // Wait for current IngestExternalFile() calls to finish. // REQUIRES: mutex_ held @@ -1572,6 +1573,13 @@ class DBImpl : public DB { bool ShouldntRunManualCompaction(ManualCompactionState* m); bool HaveManualCompaction(ColumnFamilyData* cfd); bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1); +#ifndef ROCKSDB_LITE + void BuildCompactionJobInfo(const ColumnFamilyData* cfd, Compaction* c, + const Status& st, + const CompactionJobStats& compaction_job_stats, + const int job_id, const Version* current, + CompactionJobInfo* compaction_job_info) const; +#endif bool ShouldPurge(uint64_t file_number) const; void MarkAsGrabbedForPurge(uint64_t file_number); diff --git a/db/db_impl_compaction_flush.cc b/db/db_impl_compaction_flush.cc index c2623161f..795e4164d 100644 --- a/db/db_impl_compaction_flush.cc +++ b/db/db_impl_compaction_flush.cc @@ -727,7 +727,8 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options, ColumnFamilyHandle* column_family, const std::vector& input_file_names, const int output_level, const int output_path_id, - std::vector* const output_file_names) { + std::vector* const output_file_names, + CompactionJobInfo* compaction_job_info) { #ifdef ROCKSDB_LITE (void)compact_options; (void)column_family; @@ -735,6 +736,7 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options, (void)output_level; (void)output_path_id; (void)output_file_names; + (void)compaction_job_info; // not supported in lite version return Status::NotSupported("Not supported in ROCKSDB LITE"); #else @@ -766,7 +768,7 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options, s = CompactFilesImpl(compact_options, cfd, current, input_file_names, output_file_names, output_level, output_path_id, - &job_context, &log_buffer); + &job_context, &log_buffer, compaction_job_info); current->Unref(); } @@ -806,7 +808,8 @@ Status DBImpl::CompactFilesImpl( const CompactionOptions& compact_options, ColumnFamilyData* cfd, Version* version, const std::vector& input_file_names, std::vector* const output_file_names, const int output_level, - int output_path_id, JobContext* job_context, LogBuffer* log_buffer) { + int output_path_id, JobContext* job_context, LogBuffer* log_buffer, + CompactionJobInfo* compaction_job_info) { mutex_.AssertHeld(); if (shutting_down_.load(std::memory_order_acquire)) { @@ -892,6 +895,7 @@ Status DBImpl::CompactFilesImpl( snapshot_checker = DisableGCSnapshotChecker::Instance(); } assert(is_snapshot_supported_ || snapshots_.empty()); + CompactionJobStats compaction_job_stats; CompactionJob compaction_job( job_context->job_id, c.get(), immutable_db_options_, env_options_for_compaction_, versions_.get(), &shutting_down_, @@ -901,19 +905,7 @@ Status DBImpl::CompactFilesImpl( snapshot_checker, table_cache_, &event_logger_, c->mutable_cf_options()->paranoid_file_checks, c->mutable_cf_options()->report_bg_io_stats, dbname_, - nullptr); // Here we pass a nullptr for CompactionJobStats because - // CompactFiles does not trigger OnCompactionCompleted(), - // which is the only place where CompactionJobStats is - // returned. The idea of not triggering OnCompationCompleted() - // is that CompactFiles runs in the caller thread, so the user - // should always know when it completes. As a result, it makes - // less sense to notify the users something they should already - // know. - // - // In the future, if we would like to add CompactionJobStats - // support for CompactFiles, we should have CompactFiles API - // pass a pointer of CompactionJobStats as the out-value - // instead of using EventListener. + &compaction_job_stats); // Creating a compaction influences the compaction score because the score // takes running compactions into account (by skipping files that are already @@ -950,6 +942,11 @@ Status DBImpl::CompactFilesImpl( ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem); + if (compaction_job_info != nullptr) { + BuildCompactionJobInfo(cfd, c.get(), s, compaction_job_stats, + job_context->job_id, version, compaction_job_info); + } + if (status.ok()) { // Done } else if (status.IsShutdownInProgress()) { @@ -1092,36 +1089,8 @@ void DBImpl::NotifyOnCompactionCompleted( TEST_SYNC_POINT("DBImpl::NotifyOnCompactionCompleted::UnlockMutex"); { CompactionJobInfo info; - info.cf_id = cfd->GetID(); - info.cf_name = cfd->GetName(); - info.status = st; - info.thread_id = env_->GetThreadID(); - info.job_id = job_id; - info.base_input_level = c->start_level(); - info.output_level = c->output_level(); - info.stats = compaction_job_stats; - info.table_properties = c->GetOutputTableProperties(); - info.compaction_reason = c->compaction_reason(); - info.compression = c->output_compression(); - for (size_t i = 0; i < c->num_input_levels(); ++i) { - for (const auto fmd : *c->inputs(i)) { - auto fn = TableFileName(c->immutable_cf_options()->cf_paths, - fmd->fd.GetNumber(), fmd->fd.GetPathId()); - info.input_files.push_back(fn); - if (info.table_properties.count(fn) == 0) { - std::shared_ptr tp; - auto s = current->GetTableProperties(&tp, fmd, &fn); - if (s.ok()) { - info.table_properties[fn] = tp; - } - } - } - } - for (const auto newf : c->edit()->GetNewFiles()) { - info.output_files.push_back(TableFileName( - c->immutable_cf_options()->cf_paths, newf.second.fd.GetNumber(), - newf.second.fd.GetPathId())); - } + BuildCompactionJobInfo(cfd, c, st, compaction_job_stats, job_id, current, + &info); for (auto listener : immutable_db_options_.listeners) { listener->OnCompactionCompleted(this, info); } @@ -2762,6 +2731,45 @@ bool DBImpl::MCOverlap(ManualCompactionState* m, ManualCompactionState* m1) { return true; } +#ifndef ROCKSDB_LITE +void DBImpl::BuildCompactionJobInfo( + const ColumnFamilyData* cfd, Compaction* c, const Status& st, + const CompactionJobStats& compaction_job_stats, const int job_id, + const Version* current, CompactionJobInfo* compaction_job_info) const { + assert(compaction_job_info != nullptr); + compaction_job_info->cf_id = cfd->GetID(); + compaction_job_info->cf_name = cfd->GetName(); + compaction_job_info->status = st; + compaction_job_info->thread_id = env_->GetThreadID(); + compaction_job_info->job_id = job_id; + compaction_job_info->base_input_level = c->start_level(); + compaction_job_info->output_level = c->output_level(); + compaction_job_info->stats = compaction_job_stats; + compaction_job_info->table_properties = c->GetOutputTableProperties(); + compaction_job_info->compaction_reason = c->compaction_reason(); + compaction_job_info->compression = c->output_compression(); + for (size_t i = 0; i < c->num_input_levels(); ++i) { + for (const auto fmd : *c->inputs(i)) { + auto fn = TableFileName(c->immutable_cf_options()->cf_paths, + fmd->fd.GetNumber(), fmd->fd.GetPathId()); + compaction_job_info->input_files.push_back(fn); + if (compaction_job_info->table_properties.count(fn) == 0) { + shared_ptr tp; + auto s = current->GetTableProperties(&tp, fmd, &fn); + if (s.ok()) { + compaction_job_info->table_properties[fn] = tp; + } + } + } + } + for (const auto& newf : c->edit()->GetNewFiles()) { + compaction_job_info->output_files.push_back( + TableFileName(c->immutable_cf_options()->cf_paths, + newf.second.fd.GetNumber(), newf.second.fd.GetPathId())); + } +} +#endif + // SuperVersionContext gets created and destructed outside of the lock -- // we use this conveniently to: // * malloc one SuperVersion() outside of the lock -- new_superversion diff --git a/db/db_impl_readonly.h b/db/db_impl_readonly.h index 2d77dbac0..d96b58a3e 100644 --- a/db/db_impl_readonly.h +++ b/db/db_impl_readonly.h @@ -77,8 +77,8 @@ class DBImplReadOnly : public DBImpl { ColumnFamilyHandle* /*column_family*/, const std::vector& /*input_file_names*/, const int /*output_level*/, const int /*output_path_id*/ = -1, - std::vector* const /*output_file_names*/ = nullptr - ) override { + std::vector* const /*output_file_names*/ = nullptr, + CompactionJobInfo* /*compaction_job_info*/ = nullptr) override { return Status::NotSupported("Not supported operation in read only mode."); } diff --git a/db/db_test.cc b/db/db_test.cc index 5a9c39469..81a7fc9da 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2606,8 +2606,8 @@ class ModelDB : public DB { ColumnFamilyHandle* /*column_family*/, const std::vector& /*input_file_names*/, const int /*output_level*/, const int /*output_path_id*/ = -1, - std::vector* const /*output_file_names*/ = nullptr - ) override { + std::vector* const /*output_file_names*/ = nullptr, + CompactionJobInfo* /*compaction_job_info*/ = nullptr) override { return Status::NotSupported("Not supported operation."); } diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index cf46054aa..c0cd83cd4 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -637,7 +637,6 @@ extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_writebatch_wi_create_iter rocksdb_iterator_t* base_iterator, rocksdb_column_family_handle_t* cf); - /* Block based table options */ extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t* diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 6a37084c5..53fb52c94 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -53,6 +53,9 @@ class WriteBatch; class Env; class EventListener; class TraceWriter; +#ifdef ROCKSDB_LITE +class CompactionJobInfo; +#endif using std::unique_ptr; @@ -834,18 +837,20 @@ class DB { virtual Status CompactFiles( const CompactionOptions& compact_options, ColumnFamilyHandle* column_family, - const std::vector& input_file_names, - const int output_level, const int output_path_id = -1, - std::vector* const output_file_names = nullptr) = 0; + const std::vector& input_file_names, const int output_level, + const int output_path_id = -1, + std::vector* const output_file_names = nullptr, + CompactionJobInfo* compaction_job_info = nullptr) = 0; virtual Status CompactFiles( const CompactionOptions& compact_options, - const std::vector& input_file_names, - const int output_level, const int output_path_id = -1, - std::vector* const output_file_names = nullptr) { + const std::vector& input_file_names, const int output_level, + const int output_path_id = -1, + std::vector* const output_file_names = nullptr, + CompactionJobInfo* compaction_job_info = nullptr) { return CompactFiles(compact_options, DefaultColumnFamily(), input_file_names, output_level, output_path_id, - output_file_names); + output_file_names, compaction_job_info); } // This function will wait until all currently running background processes diff --git a/include/rocksdb/sst_file_reader.h b/include/rocksdb/sst_file_reader.h index e58c84792..517907dd5 100644 --- a/include/rocksdb/sst_file_reader.h +++ b/include/rocksdb/sst_file_reader.h @@ -7,9 +7,9 @@ #ifndef ROCKSDB_LITE -#include "rocksdb/slice.h" -#include "rocksdb/options.h" #include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" #include "rocksdb/table_properties.h" namespace rocksdb { diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index a99c8bf6e..e3407dada 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -454,8 +454,8 @@ class TableFactory { // and cache the table object returned. // (2) SstFileDumper (for SST Dump) opens the table and dump the table // contents using the iterator of the table. - // (3) DBImpl::IngestExternalFile() calls this function to read the contents of - // the sst file it's attempting to add + // (3) DBImpl::IngestExternalFile() calls this function to read the contents + // of the sst file it's attempting to add // // table_reader_options is a TableReaderOptions which contain all the // needed parameters and configuration to open the table. diff --git a/include/rocksdb/utilities/stackable_db.h b/include/rocksdb/utilities/stackable_db.h index eae3a85ea..1242befed 100644 --- a/include/rocksdb/utilities/stackable_db.h +++ b/include/rocksdb/utilities/stackable_db.h @@ -218,12 +218,13 @@ class StackableDB : public DB { virtual Status CompactFiles( const CompactionOptions& compact_options, ColumnFamilyHandle* column_family, - const std::vector& input_file_names, - const int output_level, const int output_path_id = -1, - std::vector* const output_file_names = nullptr) override { - return db_->CompactFiles( - compact_options, column_family, input_file_names, - output_level, output_path_id, output_file_names); + const std::vector& input_file_names, const int output_level, + const int output_path_id = -1, + std::vector* const output_file_names = nullptr, + CompactionJobInfo* compaction_job_info = nullptr) override { + return db_->CompactFiles(compact_options, column_family, input_file_names, + output_level, output_path_id, output_file_names, + compaction_job_info); } virtual Status PauseBackgroundWork() override { diff --git a/java/rocksjni/statisticsjni.cc b/java/rocksjni/statisticsjni.cc index 8fddc437a..f59ace4df 100644 --- a/java/rocksjni/statisticsjni.cc +++ b/java/rocksjni/statisticsjni.cc @@ -10,25 +10,23 @@ namespace rocksdb { - StatisticsJni::StatisticsJni(std::shared_ptr stats) - : StatisticsImpl(stats), m_ignore_histograms() { +StatisticsJni::StatisticsJni(std::shared_ptr stats) + : StatisticsImpl(stats), m_ignore_histograms() {} + +StatisticsJni::StatisticsJni(std::shared_ptr stats, + const std::set ignore_histograms) + : StatisticsImpl(stats), m_ignore_histograms(ignore_histograms) {} + +bool StatisticsJni::HistEnabledForType(uint32_t type) const { + if (type >= HISTOGRAM_ENUM_MAX) { + return false; } - StatisticsJni::StatisticsJni(std::shared_ptr stats, - const std::set ignore_histograms) : StatisticsImpl(stats), - m_ignore_histograms(ignore_histograms) { + if (m_ignore_histograms.count(type) > 0) { + return false; } - bool StatisticsJni::HistEnabledForType(uint32_t type) const { - if (type >= HISTOGRAM_ENUM_MAX) { - return false; - } - - if (m_ignore_histograms.count(type) > 0) { - return false; - } - - return true; - } + return true; +} // @lint-ignore TXT4 T25377293 Grandfathered in }; \ No newline at end of file diff --git a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java index 2c0350837..2ad91042d 100644 --- a/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java +++ b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -129,12 +129,10 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { public RocksIterator newIteratorWithBase( final ColumnFamilyHandle columnFamilyHandle, final RocksIterator baseIterator) { - RocksIterator iterator = new RocksIterator( - baseIterator.parent_, - iteratorWithBase(nativeHandle_, - columnFamilyHandle.nativeHandle_, - baseIterator.nativeHandle_)); - //when the iterator is deleted it will also delete the baseIterator + RocksIterator iterator = new RocksIterator(baseIterator.parent_, + iteratorWithBase( + nativeHandle_, columnFamilyHandle.nativeHandle_, baseIterator.nativeHandle_)); + // when the iterator is deleted it will also delete the baseIterator baseIterator.disOwnNativeHandle(); return iterator; } @@ -151,8 +149,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { * point-in-timefrom baseIterator and modifications made in this write batch. */ public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) { - return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), - baseIterator); + return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator); } /** @@ -295,8 +292,8 @@ public class WriteBatchWithIndex extends AbstractWriteBatch { final boolean overwriteKey); private native long iterator0(final long handle); private native long iterator1(final long handle, final long cfHandle); - private native long iteratorWithBase(final long handle, - final long baseIteratorHandle, final long cfHandle); + private native long iteratorWithBase( + final long handle, final long baseIteratorHandle, final long cfHandle); private native byte[] getFromBatch(final long handle, final long optHandle, final byte[] key, final int keyLen); private native byte[] getFromBatch(final long handle, final long optHandle, diff --git a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java index 061af2b8f..3a872c412 100644 --- a/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java +++ b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java @@ -47,7 +47,6 @@ public class WriteBatchWithIndexTest { try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); final RocksIterator base = db.newIterator(); final RocksIterator it = wbwi.newIteratorWithBase(base)) { - it.seek(k1); assertThat(it.isValid()).isTrue(); assertThat(it.key()).isEqualTo(k1); @@ -421,8 +420,8 @@ public class WriteBatchWithIndexTest { final ReadOptions readOptions, final WriteBatchWithIndex wbwi, final String skey) { final byte[] key = skey.getBytes(); - try(final RocksIterator baseIterator = db.newIterator(readOptions); - final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) { + try (final RocksIterator baseIterator = db.newIterator(readOptions); + final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) { iterator.seek(key); // Arrays.equals(key, iterator.key()) ensures an exact match in Rocks, diff --git a/monitoring/statistics.cc b/monitoring/statistics.cc index cba427ae4..285287d20 100644 --- a/monitoring/statistics.cc +++ b/monitoring/statistics.cc @@ -357,11 +357,12 @@ std::string StatisticsImpl::ToString() const { getHistogramImplLocked(h.first)->Data(&hData); // don't handle failures - buffer should always be big enough and arguments // should be provided correctly - int ret = snprintf( - buffer, kTmpStrBufferSize, - "%s P50 : %f P95 : %f P99 : %f P100 : %f COUNT : %" PRIu64 " SUM : %" - PRIu64 "\n", h.second.c_str(), hData.median, hData.percentile95, - hData.percentile99, hData.max, hData.count, hData.sum); + int ret = + snprintf(buffer, kTmpStrBufferSize, + "%s P50 : %f P95 : %f P99 : %f P100 : %f COUNT : %" PRIu64 + " SUM : %" PRIu64 "\n", + h.second.c_str(), hData.median, hData.percentile95, + hData.percentile99, hData.max, hData.count, hData.sum); if (ret < 0 || ret >= kTmpStrBufferSize) { assert(false); continue; diff --git a/table/sst_file_reader.cc b/table/sst_file_reader.cc index a915449be..e8b7173e7 100644 --- a/table/sst_file_reader.cc +++ b/table/sst_file_reader.cc @@ -10,8 +10,8 @@ #include "db/db_iter.h" #include "options/cf_options.h" #include "table/get_context.h" -#include "table/table_reader.h" #include "table/table_builder.h" +#include "table/table_reader.h" #include "util/file_reader_writer.h" namespace rocksdb { @@ -31,8 +31,7 @@ struct SstFileReader::Rep { moptions(ColumnFamilyOptions(options)) {} }; -SstFileReader::SstFileReader(const Options& options) - : rep_(new Rep(options)) {} +SstFileReader::SstFileReader(const Options& options) : rep_(new Rep(options)) {} SstFileReader::~SstFileReader() {} @@ -60,18 +59,19 @@ Status SstFileReader::Open(const std::string& file_path) { Iterator* SstFileReader::NewIterator(const ReadOptions& options) { auto r = rep_.get(); - auto sequence = options.snapshot != nullptr ? - options.snapshot->GetSequenceNumber() : - kMaxSequenceNumber; - auto internal_iter = r->table_reader->NewIterator( - options, r->moptions.prefix_extractor.get()); + auto sequence = options.snapshot != nullptr + ? options.snapshot->GetSequenceNumber() + : kMaxSequenceNumber; + auto internal_iter = + r->table_reader->NewIterator(options, r->moptions.prefix_extractor.get()); return NewDBIterator(r->options.env, options, r->ioptions, r->moptions, r->ioptions.user_comparator, internal_iter, sequence, r->moptions.max_sequential_skip_in_iterations, nullptr /* read_callback */); } -std::shared_ptr SstFileReader::GetTableProperties() const { +std::shared_ptr SstFileReader::GetTableProperties() + const { return rep_->table_reader->GetTableProperties(); } diff --git a/table/sst_file_reader_test.cc b/table/sst_file_reader_test.cc index 8da366fd7..3a95584fc 100644 --- a/table/sst_file_reader_test.cc +++ b/table/sst_file_reader_test.cc @@ -39,8 +39,8 @@ class SstFileReaderTest : public testing::Test { ASSERT_OK(writer.Open(sst_name_)); for (size_t i = 0; i + 2 < keys.size(); i += 3) { ASSERT_OK(writer.Put(keys[i], keys[i])); - ASSERT_OK(writer.Merge(keys[i+1], EncodeAsUint64(i+1))); - ASSERT_OK(writer.Delete(keys[i+2])); + ASSERT_OK(writer.Merge(keys[i + 1], EncodeAsUint64(i + 1))); + ASSERT_OK(writer.Delete(keys[i + 2])); } ASSERT_OK(writer.Finish()); @@ -56,8 +56,8 @@ class SstFileReaderTest : public testing::Test { ASSERT_EQ(iter->value().compare(keys[i]), 0); iter->Next(); ASSERT_TRUE(iter->Valid()); - ASSERT_EQ(iter->key().compare(keys[i+1]), 0); - ASSERT_EQ(iter->value().compare(EncodeAsUint64(i+1)), 0); + ASSERT_EQ(iter->key().compare(keys[i + 1]), 0); + ASSERT_EQ(iter->value().compare(EncodeAsUint64(i + 1)), 0); iter->Next(); } ASSERT_FALSE(iter->Valid()); @@ -99,7 +99,8 @@ int main(int argc, char** argv) { #include int main(int /*argc*/, char** /*argv*/) { - fprintf(stderr, "SKIPPED as SstFileReader is not supported in ROCKSDB_LITE\n"); + fprintf(stderr, + "SKIPPED as SstFileReader is not supported in ROCKSDB_LITE\n"); return 0; } diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index 75f1d449d..3bd7f92da 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -4580,8 +4580,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { if (FLAGS_max_scan_distance != 0) { if (FLAGS_reverse_iterator) { GenerateKeyFromInt( - (uint64_t)std::max((int64_t)0, - seek_pos - FLAGS_max_scan_distance), + static_cast(std::max( + static_cast(0), seek_pos - FLAGS_max_scan_distance)), FLAGS_num, &lower_bound); options.iterate_lower_bound = &lower_bound; } else { diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 997718ef2..a99afac2e 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -2846,7 +2846,8 @@ void DumpSstFile(std::string filename, bool output_hex, bool show_properties) { } // no verification rocksdb::SstFileDumper dumper(filename, false, output_hex); - Status st = dumper.ReadSequential(true, std::numeric_limits::max(), false, // has_from + Status st = dumper.ReadSequential(true, std::numeric_limits::max(), + false, // has_from from_key, false, // has_to to_key); if (!st.ok()) { diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 25699777e..6da90d388 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -570,8 +570,7 @@ int SSTDumpTool::Run(int argc, char** argv) { filename = std::string(dir_or_file) + "/" + filename; } - rocksdb::SstFileDumper dumper(filename, verify_checksum, - output_hex); + rocksdb::SstFileDumper dumper(filename, verify_checksum, output_hex); if (!dumper.getStatus().ok()) { fprintf(stderr, "%s: %s\n", filename.c_str(), dumper.getStatus().ToString().c_str()); diff --git a/utilities/transactions/transaction_base.cc b/utilities/transactions/transaction_base.cc index 23cc41be1..2b0519cf8 100644 --- a/utilities/transactions/transaction_base.cc +++ b/utilities/transactions/transaction_base.cc @@ -180,7 +180,7 @@ Status TransactionBaseImpl::RollbackToSavePoint() { return Status::NotFound(); } } - + Status TransactionBaseImpl::PopSavePoint() { if (save_points_ == nullptr || save_points_->empty()) { @@ -189,7 +189,7 @@ Status TransactionBaseImpl::PopSavePoint() { return Status::NotFound(); } - assert(!save_points_->empty()); + assert(!save_points_->empty()); save_points_->pop(); return write_batch_.PopSavePoint(); }