Get CompactionJobInfo
from CompactFiles
Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/4716 Differential Revision: D13207677 Pulled By: ajkr fbshipit-source-id: d0ccf5a66df6cbb07288b0c5ebad81fd9df3926b
This commit is contained in:
parent
a8b9891f95
commit
2670fe8c73
6
db/c.cc
6
db/c.cc
@ -1784,11 +1784,11 @@ rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base(
|
||||
}
|
||||
|
||||
rocksdb_iterator_t* rocksdb_writebatch_wi_create_iterator_with_base_cf(
|
||||
rocksdb_writebatch_wi_t* wbwi,
|
||||
rocksdb_iterator_t* base_iterator,
|
||||
rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator,
|
||||
rocksdb_column_family_handle_t* column_family) {
|
||||
rocksdb_iterator_t* result = new rocksdb_iterator_t;
|
||||
result->rep = wbwi->rep->NewIteratorWithBase(column_family->rep, base_iterator->rep);
|
||||
result->rep =
|
||||
wbwi->rep->NewIteratorWithBase(column_family->rep, base_iterator->rep);
|
||||
delete base_iterator;
|
||||
return result;
|
||||
}
|
||||
|
@ -917,7 +917,8 @@ int main(int argc, char** argv) {
|
||||
rocksdb_writebatch_wi_t* wbi = rocksdb_writebatch_wi_create(0, 1);
|
||||
rocksdb_writebatch_wi_put(wbi, "bar", 3, "b", 1);
|
||||
rocksdb_writebatch_wi_delete(wbi, "foo", 3);
|
||||
rocksdb_iterator_t* iter = rocksdb_writebatch_wi_create_iterator_with_base(wbi, base_iter);
|
||||
rocksdb_iterator_t* iter =
|
||||
rocksdb_writebatch_wi_create_iterator_with_base(wbi, base_iter);
|
||||
CheckCondition(!rocksdb_iter_valid(iter));
|
||||
rocksdb_iter_seek_to_first(iter);
|
||||
CheckCondition(rocksdb_iter_valid(iter));
|
||||
@ -1527,7 +1528,7 @@ int main(int argc, char** argv) {
|
||||
const rocksdb_snapshot_t* snapshot;
|
||||
snapshot = rocksdb_transactiondb_create_snapshot(txn_db);
|
||||
rocksdb_readoptions_set_snapshot(roptions, snapshot);
|
||||
|
||||
|
||||
rocksdb_transactiondb_put(txn_db, woptions, "foo", 3, "hey", 3, &err);
|
||||
CheckNoError(err);
|
||||
|
||||
|
@ -357,6 +357,51 @@ TEST_F(CompactFilesTest, SentinelCompressionType) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(CompactFilesTest, GetCompactionJobInfo) {
|
||||
Options options;
|
||||
options.create_if_missing = true;
|
||||
// Disable RocksDB background compaction.
|
||||
options.compaction_style = kCompactionStyleNone;
|
||||
options.level0_slowdown_writes_trigger = 1000;
|
||||
options.level0_stop_writes_trigger = 1000;
|
||||
options.write_buffer_size = 65536;
|
||||
options.max_write_buffer_number = 2;
|
||||
options.compression = kNoCompression;
|
||||
options.max_compaction_bytes = 5000;
|
||||
|
||||
// Add listener
|
||||
FlushedFileCollector* collector = new FlushedFileCollector();
|
||||
options.listeners.emplace_back(collector);
|
||||
|
||||
DB* db = nullptr;
|
||||
DestroyDB(db_name_, options);
|
||||
Status s = DB::Open(options, db_name_, &db);
|
||||
assert(s.ok());
|
||||
assert(db);
|
||||
|
||||
// create couple files
|
||||
for (int i = 0; i < 500; ++i) {
|
||||
db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
|
||||
}
|
||||
reinterpret_cast<DBImpl*>(db)->TEST_WaitForFlushMemTable();
|
||||
auto l0_files_1 = collector->GetFlushedFiles();
|
||||
CompactionOptions co;
|
||||
co.compression = CompressionType::kLZ4Compression;
|
||||
CompactionJobInfo compaction_job_info;
|
||||
ASSERT_OK(
|
||||
db->CompactFiles(co, l0_files_1, 0, -1, nullptr, &compaction_job_info));
|
||||
ASSERT_EQ(compaction_job_info.base_input_level, 0);
|
||||
ASSERT_EQ(compaction_job_info.cf_id, db->DefaultColumnFamily()->GetID());
|
||||
ASSERT_EQ(compaction_job_info.cf_name, db->DefaultColumnFamily()->GetName());
|
||||
ASSERT_EQ(compaction_job_info.compaction_reason,
|
||||
CompactionReason::kManualCompaction);
|
||||
ASSERT_EQ(compaction_job_info.compression, CompressionType::kLZ4Compression);
|
||||
ASSERT_EQ(compaction_job_info.output_level, 0);
|
||||
ASSERT_OK(compaction_job_info.status);
|
||||
// no assertion failure
|
||||
delete db;
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
24
db/db_impl.h
24
db/db_impl.h
@ -192,13 +192,13 @@ class DBImpl : public DB {
|
||||
const Slice* begin, const Slice* end) override;
|
||||
|
||||
using DB::CompactFiles;
|
||||
virtual Status CompactFiles(const CompactionOptions& compact_options,
|
||||
ColumnFamilyHandle* column_family,
|
||||
const std::vector<std::string>& input_file_names,
|
||||
const int output_level,
|
||||
const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names
|
||||
= nullptr) override;
|
||||
virtual Status CompactFiles(
|
||||
const CompactionOptions& compact_options,
|
||||
ColumnFamilyHandle* column_family,
|
||||
const std::vector<std::string>& input_file_names, const int output_level,
|
||||
const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names = nullptr,
|
||||
CompactionJobInfo* compaction_job_info = nullptr) override;
|
||||
|
||||
virtual Status PauseBackgroundWork() override;
|
||||
virtual Status ContinueBackgroundWork() override;
|
||||
@ -1054,7 +1054,8 @@ class DBImpl : public DB {
|
||||
const std::vector<std::string>& input_file_names,
|
||||
std::vector<std::string>* const output_file_names,
|
||||
const int output_level, int output_path_id,
|
||||
JobContext* job_context, LogBuffer* log_buffer);
|
||||
JobContext* job_context, LogBuffer* log_buffer,
|
||||
CompactionJobInfo* compaction_job_info);
|
||||
|
||||
// Wait for current IngestExternalFile() calls to finish.
|
||||
// REQUIRES: mutex_ held
|
||||
@ -1572,6 +1573,13 @@ class DBImpl : public DB {
|
||||
bool ShouldntRunManualCompaction(ManualCompactionState* m);
|
||||
bool HaveManualCompaction(ColumnFamilyData* cfd);
|
||||
bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1);
|
||||
#ifndef ROCKSDB_LITE
|
||||
void BuildCompactionJobInfo(const ColumnFamilyData* cfd, Compaction* c,
|
||||
const Status& st,
|
||||
const CompactionJobStats& compaction_job_stats,
|
||||
const int job_id, const Version* current,
|
||||
CompactionJobInfo* compaction_job_info) const;
|
||||
#endif
|
||||
|
||||
bool ShouldPurge(uint64_t file_number) const;
|
||||
void MarkAsGrabbedForPurge(uint64_t file_number);
|
||||
|
@ -727,7 +727,8 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options,
|
||||
ColumnFamilyHandle* column_family,
|
||||
const std::vector<std::string>& input_file_names,
|
||||
const int output_level, const int output_path_id,
|
||||
std::vector<std::string>* const output_file_names) {
|
||||
std::vector<std::string>* const output_file_names,
|
||||
CompactionJobInfo* compaction_job_info) {
|
||||
#ifdef ROCKSDB_LITE
|
||||
(void)compact_options;
|
||||
(void)column_family;
|
||||
@ -735,6 +736,7 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options,
|
||||
(void)output_level;
|
||||
(void)output_path_id;
|
||||
(void)output_file_names;
|
||||
(void)compaction_job_info;
|
||||
// not supported in lite version
|
||||
return Status::NotSupported("Not supported in ROCKSDB LITE");
|
||||
#else
|
||||
@ -766,7 +768,7 @@ Status DBImpl::CompactFiles(const CompactionOptions& compact_options,
|
||||
|
||||
s = CompactFilesImpl(compact_options, cfd, current, input_file_names,
|
||||
output_file_names, output_level, output_path_id,
|
||||
&job_context, &log_buffer);
|
||||
&job_context, &log_buffer, compaction_job_info);
|
||||
|
||||
current->Unref();
|
||||
}
|
||||
@ -806,7 +808,8 @@ Status DBImpl::CompactFilesImpl(
|
||||
const CompactionOptions& compact_options, ColumnFamilyData* cfd,
|
||||
Version* version, const std::vector<std::string>& input_file_names,
|
||||
std::vector<std::string>* const output_file_names, const int output_level,
|
||||
int output_path_id, JobContext* job_context, LogBuffer* log_buffer) {
|
||||
int output_path_id, JobContext* job_context, LogBuffer* log_buffer,
|
||||
CompactionJobInfo* compaction_job_info) {
|
||||
mutex_.AssertHeld();
|
||||
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
@ -892,6 +895,7 @@ Status DBImpl::CompactFilesImpl(
|
||||
snapshot_checker = DisableGCSnapshotChecker::Instance();
|
||||
}
|
||||
assert(is_snapshot_supported_ || snapshots_.empty());
|
||||
CompactionJobStats compaction_job_stats;
|
||||
CompactionJob compaction_job(
|
||||
job_context->job_id, c.get(), immutable_db_options_,
|
||||
env_options_for_compaction_, versions_.get(), &shutting_down_,
|
||||
@ -901,19 +905,7 @@ Status DBImpl::CompactFilesImpl(
|
||||
snapshot_checker, table_cache_, &event_logger_,
|
||||
c->mutable_cf_options()->paranoid_file_checks,
|
||||
c->mutable_cf_options()->report_bg_io_stats, dbname_,
|
||||
nullptr); // Here we pass a nullptr for CompactionJobStats because
|
||||
// CompactFiles does not trigger OnCompactionCompleted(),
|
||||
// which is the only place where CompactionJobStats is
|
||||
// returned. The idea of not triggering OnCompationCompleted()
|
||||
// is that CompactFiles runs in the caller thread, so the user
|
||||
// should always know when it completes. As a result, it makes
|
||||
// less sense to notify the users something they should already
|
||||
// know.
|
||||
//
|
||||
// In the future, if we would like to add CompactionJobStats
|
||||
// support for CompactFiles, we should have CompactFiles API
|
||||
// pass a pointer of CompactionJobStats as the out-value
|
||||
// instead of using EventListener.
|
||||
&compaction_job_stats);
|
||||
|
||||
// Creating a compaction influences the compaction score because the score
|
||||
// takes running compactions into account (by skipping files that are already
|
||||
@ -950,6 +942,11 @@ Status DBImpl::CompactFilesImpl(
|
||||
|
||||
ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
|
||||
|
||||
if (compaction_job_info != nullptr) {
|
||||
BuildCompactionJobInfo(cfd, c.get(), s, compaction_job_stats,
|
||||
job_context->job_id, version, compaction_job_info);
|
||||
}
|
||||
|
||||
if (status.ok()) {
|
||||
// Done
|
||||
} else if (status.IsShutdownInProgress()) {
|
||||
@ -1092,36 +1089,8 @@ void DBImpl::NotifyOnCompactionCompleted(
|
||||
TEST_SYNC_POINT("DBImpl::NotifyOnCompactionCompleted::UnlockMutex");
|
||||
{
|
||||
CompactionJobInfo info;
|
||||
info.cf_id = cfd->GetID();
|
||||
info.cf_name = cfd->GetName();
|
||||
info.status = st;
|
||||
info.thread_id = env_->GetThreadID();
|
||||
info.job_id = job_id;
|
||||
info.base_input_level = c->start_level();
|
||||
info.output_level = c->output_level();
|
||||
info.stats = compaction_job_stats;
|
||||
info.table_properties = c->GetOutputTableProperties();
|
||||
info.compaction_reason = c->compaction_reason();
|
||||
info.compression = c->output_compression();
|
||||
for (size_t i = 0; i < c->num_input_levels(); ++i) {
|
||||
for (const auto fmd : *c->inputs(i)) {
|
||||
auto fn = TableFileName(c->immutable_cf_options()->cf_paths,
|
||||
fmd->fd.GetNumber(), fmd->fd.GetPathId());
|
||||
info.input_files.push_back(fn);
|
||||
if (info.table_properties.count(fn) == 0) {
|
||||
std::shared_ptr<const TableProperties> tp;
|
||||
auto s = current->GetTableProperties(&tp, fmd, &fn);
|
||||
if (s.ok()) {
|
||||
info.table_properties[fn] = tp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const auto newf : c->edit()->GetNewFiles()) {
|
||||
info.output_files.push_back(TableFileName(
|
||||
c->immutable_cf_options()->cf_paths, newf.second.fd.GetNumber(),
|
||||
newf.second.fd.GetPathId()));
|
||||
}
|
||||
BuildCompactionJobInfo(cfd, c, st, compaction_job_stats, job_id, current,
|
||||
&info);
|
||||
for (auto listener : immutable_db_options_.listeners) {
|
||||
listener->OnCompactionCompleted(this, info);
|
||||
}
|
||||
@ -2762,6 +2731,45 @@ bool DBImpl::MCOverlap(ManualCompactionState* m, ManualCompactionState* m1) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
void DBImpl::BuildCompactionJobInfo(
|
||||
const ColumnFamilyData* cfd, Compaction* c, const Status& st,
|
||||
const CompactionJobStats& compaction_job_stats, const int job_id,
|
||||
const Version* current, CompactionJobInfo* compaction_job_info) const {
|
||||
assert(compaction_job_info != nullptr);
|
||||
compaction_job_info->cf_id = cfd->GetID();
|
||||
compaction_job_info->cf_name = cfd->GetName();
|
||||
compaction_job_info->status = st;
|
||||
compaction_job_info->thread_id = env_->GetThreadID();
|
||||
compaction_job_info->job_id = job_id;
|
||||
compaction_job_info->base_input_level = c->start_level();
|
||||
compaction_job_info->output_level = c->output_level();
|
||||
compaction_job_info->stats = compaction_job_stats;
|
||||
compaction_job_info->table_properties = c->GetOutputTableProperties();
|
||||
compaction_job_info->compaction_reason = c->compaction_reason();
|
||||
compaction_job_info->compression = c->output_compression();
|
||||
for (size_t i = 0; i < c->num_input_levels(); ++i) {
|
||||
for (const auto fmd : *c->inputs(i)) {
|
||||
auto fn = TableFileName(c->immutable_cf_options()->cf_paths,
|
||||
fmd->fd.GetNumber(), fmd->fd.GetPathId());
|
||||
compaction_job_info->input_files.push_back(fn);
|
||||
if (compaction_job_info->table_properties.count(fn) == 0) {
|
||||
shared_ptr<const TableProperties> tp;
|
||||
auto s = current->GetTableProperties(&tp, fmd, &fn);
|
||||
if (s.ok()) {
|
||||
compaction_job_info->table_properties[fn] = tp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const auto& newf : c->edit()->GetNewFiles()) {
|
||||
compaction_job_info->output_files.push_back(
|
||||
TableFileName(c->immutable_cf_options()->cf_paths,
|
||||
newf.second.fd.GetNumber(), newf.second.fd.GetPathId()));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// SuperVersionContext gets created and destructed outside of the lock --
|
||||
// we use this conveniently to:
|
||||
// * malloc one SuperVersion() outside of the lock -- new_superversion
|
||||
|
@ -77,8 +77,8 @@ class DBImplReadOnly : public DBImpl {
|
||||
ColumnFamilyHandle* /*column_family*/,
|
||||
const std::vector<std::string>& /*input_file_names*/,
|
||||
const int /*output_level*/, const int /*output_path_id*/ = -1,
|
||||
std::vector<std::string>* const /*output_file_names*/ = nullptr
|
||||
) override {
|
||||
std::vector<std::string>* const /*output_file_names*/ = nullptr,
|
||||
CompactionJobInfo* /*compaction_job_info*/ = nullptr) override {
|
||||
return Status::NotSupported("Not supported operation in read only mode.");
|
||||
}
|
||||
|
||||
|
@ -2606,8 +2606,8 @@ class ModelDB : public DB {
|
||||
ColumnFamilyHandle* /*column_family*/,
|
||||
const std::vector<std::string>& /*input_file_names*/,
|
||||
const int /*output_level*/, const int /*output_path_id*/ = -1,
|
||||
std::vector<std::string>* const /*output_file_names*/ = nullptr
|
||||
) override {
|
||||
std::vector<std::string>* const /*output_file_names*/ = nullptr,
|
||||
CompactionJobInfo* /*compaction_job_info*/ = nullptr) override {
|
||||
return Status::NotSupported("Not supported operation.");
|
||||
}
|
||||
|
||||
|
@ -637,7 +637,6 @@ extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_writebatch_wi_create_iter
|
||||
rocksdb_iterator_t* base_iterator,
|
||||
rocksdb_column_family_handle_t* cf);
|
||||
|
||||
|
||||
/* Block based table options */
|
||||
|
||||
extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t*
|
||||
|
@ -53,6 +53,9 @@ class WriteBatch;
|
||||
class Env;
|
||||
class EventListener;
|
||||
class TraceWriter;
|
||||
#ifdef ROCKSDB_LITE
|
||||
class CompactionJobInfo;
|
||||
#endif
|
||||
|
||||
using std::unique_ptr;
|
||||
|
||||
@ -834,18 +837,20 @@ class DB {
|
||||
virtual Status CompactFiles(
|
||||
const CompactionOptions& compact_options,
|
||||
ColumnFamilyHandle* column_family,
|
||||
const std::vector<std::string>& input_file_names,
|
||||
const int output_level, const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names = nullptr) = 0;
|
||||
const std::vector<std::string>& input_file_names, const int output_level,
|
||||
const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names = nullptr,
|
||||
CompactionJobInfo* compaction_job_info = nullptr) = 0;
|
||||
|
||||
virtual Status CompactFiles(
|
||||
const CompactionOptions& compact_options,
|
||||
const std::vector<std::string>& input_file_names,
|
||||
const int output_level, const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names = nullptr) {
|
||||
const std::vector<std::string>& input_file_names, const int output_level,
|
||||
const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names = nullptr,
|
||||
CompactionJobInfo* compaction_job_info = nullptr) {
|
||||
return CompactFiles(compact_options, DefaultColumnFamily(),
|
||||
input_file_names, output_level, output_path_id,
|
||||
output_file_names);
|
||||
output_file_names, compaction_job_info);
|
||||
}
|
||||
|
||||
// This function will wait until all currently running background processes
|
||||
|
@ -7,9 +7,9 @@
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/iterator.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/table_properties.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
@ -454,8 +454,8 @@ class TableFactory {
|
||||
// and cache the table object returned.
|
||||
// (2) SstFileDumper (for SST Dump) opens the table and dump the table
|
||||
// contents using the iterator of the table.
|
||||
// (3) DBImpl::IngestExternalFile() calls this function to read the contents of
|
||||
// the sst file it's attempting to add
|
||||
// (3) DBImpl::IngestExternalFile() calls this function to read the contents
|
||||
// of the sst file it's attempting to add
|
||||
//
|
||||
// table_reader_options is a TableReaderOptions which contain all the
|
||||
// needed parameters and configuration to open the table.
|
||||
|
@ -218,12 +218,13 @@ class StackableDB : public DB {
|
||||
virtual Status CompactFiles(
|
||||
const CompactionOptions& compact_options,
|
||||
ColumnFamilyHandle* column_family,
|
||||
const std::vector<std::string>& input_file_names,
|
||||
const int output_level, const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names = nullptr) override {
|
||||
return db_->CompactFiles(
|
||||
compact_options, column_family, input_file_names,
|
||||
output_level, output_path_id, output_file_names);
|
||||
const std::vector<std::string>& input_file_names, const int output_level,
|
||||
const int output_path_id = -1,
|
||||
std::vector<std::string>* const output_file_names = nullptr,
|
||||
CompactionJobInfo* compaction_job_info = nullptr) override {
|
||||
return db_->CompactFiles(compact_options, column_family, input_file_names,
|
||||
output_level, output_path_id, output_file_names,
|
||||
compaction_job_info);
|
||||
}
|
||||
|
||||
virtual Status PauseBackgroundWork() override {
|
||||
|
@ -10,25 +10,23 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats)
|
||||
: StatisticsImpl(stats), m_ignore_histograms() {
|
||||
StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats)
|
||||
: StatisticsImpl(stats), m_ignore_histograms() {}
|
||||
|
||||
StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats,
|
||||
const std::set<uint32_t> ignore_histograms)
|
||||
: StatisticsImpl(stats), m_ignore_histograms(ignore_histograms) {}
|
||||
|
||||
bool StatisticsJni::HistEnabledForType(uint32_t type) const {
|
||||
if (type >= HISTOGRAM_ENUM_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StatisticsJni::StatisticsJni(std::shared_ptr<Statistics> stats,
|
||||
const std::set<uint32_t> ignore_histograms) : StatisticsImpl(stats),
|
||||
m_ignore_histograms(ignore_histograms) {
|
||||
if (m_ignore_histograms.count(type) > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool StatisticsJni::HistEnabledForType(uint32_t type) const {
|
||||
if (type >= HISTOGRAM_ENUM_MAX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m_ignore_histograms.count(type) > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
// @lint-ignore TXT4 T25377293 Grandfathered in
|
||||
};
|
@ -129,12 +129,10 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
|
||||
public RocksIterator newIteratorWithBase(
|
||||
final ColumnFamilyHandle columnFamilyHandle,
|
||||
final RocksIterator baseIterator) {
|
||||
RocksIterator iterator = new RocksIterator(
|
||||
baseIterator.parent_,
|
||||
iteratorWithBase(nativeHandle_,
|
||||
columnFamilyHandle.nativeHandle_,
|
||||
baseIterator.nativeHandle_));
|
||||
//when the iterator is deleted it will also delete the baseIterator
|
||||
RocksIterator iterator = new RocksIterator(baseIterator.parent_,
|
||||
iteratorWithBase(
|
||||
nativeHandle_, columnFamilyHandle.nativeHandle_, baseIterator.nativeHandle_));
|
||||
// when the iterator is deleted it will also delete the baseIterator
|
||||
baseIterator.disOwnNativeHandle();
|
||||
return iterator;
|
||||
}
|
||||
@ -151,8 +149,7 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
|
||||
* point-in-timefrom baseIterator and modifications made in this write batch.
|
||||
*/
|
||||
public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) {
|
||||
return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(),
|
||||
baseIterator);
|
||||
return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -295,8 +292,8 @@ public class WriteBatchWithIndex extends AbstractWriteBatch {
|
||||
final boolean overwriteKey);
|
||||
private native long iterator0(final long handle);
|
||||
private native long iterator1(final long handle, final long cfHandle);
|
||||
private native long iteratorWithBase(final long handle,
|
||||
final long baseIteratorHandle, final long cfHandle);
|
||||
private native long iteratorWithBase(
|
||||
final long handle, final long baseIteratorHandle, final long cfHandle);
|
||||
private native byte[] getFromBatch(final long handle, final long optHandle,
|
||||
final byte[] key, final int keyLen);
|
||||
private native byte[] getFromBatch(final long handle, final long optHandle,
|
||||
|
@ -47,7 +47,6 @@ public class WriteBatchWithIndexTest {
|
||||
try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true);
|
||||
final RocksIterator base = db.newIterator();
|
||||
final RocksIterator it = wbwi.newIteratorWithBase(base)) {
|
||||
|
||||
it.seek(k1);
|
||||
assertThat(it.isValid()).isTrue();
|
||||
assertThat(it.key()).isEqualTo(k1);
|
||||
@ -421,8 +420,8 @@ public class WriteBatchWithIndexTest {
|
||||
final ReadOptions readOptions, final WriteBatchWithIndex wbwi,
|
||||
final String skey) {
|
||||
final byte[] key = skey.getBytes();
|
||||
try(final RocksIterator baseIterator = db.newIterator(readOptions);
|
||||
final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) {
|
||||
try (final RocksIterator baseIterator = db.newIterator(readOptions);
|
||||
final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) {
|
||||
iterator.seek(key);
|
||||
|
||||
// Arrays.equals(key, iterator.key()) ensures an exact match in Rocks,
|
||||
|
@ -357,11 +357,12 @@ std::string StatisticsImpl::ToString() const {
|
||||
getHistogramImplLocked(h.first)->Data(&hData);
|
||||
// don't handle failures - buffer should always be big enough and arguments
|
||||
// should be provided correctly
|
||||
int ret = snprintf(
|
||||
buffer, kTmpStrBufferSize,
|
||||
"%s P50 : %f P95 : %f P99 : %f P100 : %f COUNT : %" PRIu64 " SUM : %"
|
||||
PRIu64 "\n", h.second.c_str(), hData.median, hData.percentile95,
|
||||
hData.percentile99, hData.max, hData.count, hData.sum);
|
||||
int ret =
|
||||
snprintf(buffer, kTmpStrBufferSize,
|
||||
"%s P50 : %f P95 : %f P99 : %f P100 : %f COUNT : %" PRIu64
|
||||
" SUM : %" PRIu64 "\n",
|
||||
h.second.c_str(), hData.median, hData.percentile95,
|
||||
hData.percentile99, hData.max, hData.count, hData.sum);
|
||||
if (ret < 0 || ret >= kTmpStrBufferSize) {
|
||||
assert(false);
|
||||
continue;
|
||||
|
@ -10,8 +10,8 @@
|
||||
#include "db/db_iter.h"
|
||||
#include "options/cf_options.h"
|
||||
#include "table/get_context.h"
|
||||
#include "table/table_reader.h"
|
||||
#include "table/table_builder.h"
|
||||
#include "table/table_reader.h"
|
||||
#include "util/file_reader_writer.h"
|
||||
|
||||
namespace rocksdb {
|
||||
@ -31,8 +31,7 @@ struct SstFileReader::Rep {
|
||||
moptions(ColumnFamilyOptions(options)) {}
|
||||
};
|
||||
|
||||
SstFileReader::SstFileReader(const Options& options)
|
||||
: rep_(new Rep(options)) {}
|
||||
SstFileReader::SstFileReader(const Options& options) : rep_(new Rep(options)) {}
|
||||
|
||||
SstFileReader::~SstFileReader() {}
|
||||
|
||||
@ -60,18 +59,19 @@ Status SstFileReader::Open(const std::string& file_path) {
|
||||
|
||||
Iterator* SstFileReader::NewIterator(const ReadOptions& options) {
|
||||
auto r = rep_.get();
|
||||
auto sequence = options.snapshot != nullptr ?
|
||||
options.snapshot->GetSequenceNumber() :
|
||||
kMaxSequenceNumber;
|
||||
auto internal_iter = r->table_reader->NewIterator(
|
||||
options, r->moptions.prefix_extractor.get());
|
||||
auto sequence = options.snapshot != nullptr
|
||||
? options.snapshot->GetSequenceNumber()
|
||||
: kMaxSequenceNumber;
|
||||
auto internal_iter =
|
||||
r->table_reader->NewIterator(options, r->moptions.prefix_extractor.get());
|
||||
return NewDBIterator(r->options.env, options, r->ioptions, r->moptions,
|
||||
r->ioptions.user_comparator, internal_iter, sequence,
|
||||
r->moptions.max_sequential_skip_in_iterations,
|
||||
nullptr /* read_callback */);
|
||||
}
|
||||
|
||||
std::shared_ptr<const TableProperties> SstFileReader::GetTableProperties() const {
|
||||
std::shared_ptr<const TableProperties> SstFileReader::GetTableProperties()
|
||||
const {
|
||||
return rep_->table_reader->GetTableProperties();
|
||||
}
|
||||
|
||||
|
@ -39,8 +39,8 @@ class SstFileReaderTest : public testing::Test {
|
||||
ASSERT_OK(writer.Open(sst_name_));
|
||||
for (size_t i = 0; i + 2 < keys.size(); i += 3) {
|
||||
ASSERT_OK(writer.Put(keys[i], keys[i]));
|
||||
ASSERT_OK(writer.Merge(keys[i+1], EncodeAsUint64(i+1)));
|
||||
ASSERT_OK(writer.Delete(keys[i+2]));
|
||||
ASSERT_OK(writer.Merge(keys[i + 1], EncodeAsUint64(i + 1)));
|
||||
ASSERT_OK(writer.Delete(keys[i + 2]));
|
||||
}
|
||||
ASSERT_OK(writer.Finish());
|
||||
|
||||
@ -56,8 +56,8 @@ class SstFileReaderTest : public testing::Test {
|
||||
ASSERT_EQ(iter->value().compare(keys[i]), 0);
|
||||
iter->Next();
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
ASSERT_EQ(iter->key().compare(keys[i+1]), 0);
|
||||
ASSERT_EQ(iter->value().compare(EncodeAsUint64(i+1)), 0);
|
||||
ASSERT_EQ(iter->key().compare(keys[i + 1]), 0);
|
||||
ASSERT_EQ(iter->value().compare(EncodeAsUint64(i + 1)), 0);
|
||||
iter->Next();
|
||||
}
|
||||
ASSERT_FALSE(iter->Valid());
|
||||
@ -99,7 +99,8 @@ int main(int argc, char** argv) {
|
||||
#include <stdio.h>
|
||||
|
||||
int main(int /*argc*/, char** /*argv*/) {
|
||||
fprintf(stderr, "SKIPPED as SstFileReader is not supported in ROCKSDB_LITE\n");
|
||||
fprintf(stderr,
|
||||
"SKIPPED as SstFileReader is not supported in ROCKSDB_LITE\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4580,8 +4580,8 @@ void VerifyDBFromDB(std::string& truth_db_name) {
|
||||
if (FLAGS_max_scan_distance != 0) {
|
||||
if (FLAGS_reverse_iterator) {
|
||||
GenerateKeyFromInt(
|
||||
(uint64_t)std::max((int64_t)0,
|
||||
seek_pos - FLAGS_max_scan_distance),
|
||||
static_cast<uint64_t>(std::max(
|
||||
static_cast<int64_t>(0), seek_pos - FLAGS_max_scan_distance)),
|
||||
FLAGS_num, &lower_bound);
|
||||
options.iterate_lower_bound = &lower_bound;
|
||||
} else {
|
||||
|
@ -2846,7 +2846,8 @@ void DumpSstFile(std::string filename, bool output_hex, bool show_properties) {
|
||||
}
|
||||
// no verification
|
||||
rocksdb::SstFileDumper dumper(filename, false, output_hex);
|
||||
Status st = dumper.ReadSequential(true, std::numeric_limits<uint64_t>::max(), false, // has_from
|
||||
Status st = dumper.ReadSequential(true, std::numeric_limits<uint64_t>::max(),
|
||||
false, // has_from
|
||||
from_key, false, // has_to
|
||||
to_key);
|
||||
if (!st.ok()) {
|
||||
|
@ -570,8 +570,7 @@ int SSTDumpTool::Run(int argc, char** argv) {
|
||||
filename = std::string(dir_or_file) + "/" + filename;
|
||||
}
|
||||
|
||||
rocksdb::SstFileDumper dumper(filename, verify_checksum,
|
||||
output_hex);
|
||||
rocksdb::SstFileDumper dumper(filename, verify_checksum, output_hex);
|
||||
if (!dumper.getStatus().ok()) {
|
||||
fprintf(stderr, "%s: %s\n", filename.c_str(),
|
||||
dumper.getStatus().ToString().c_str());
|
||||
|
@ -180,7 +180,7 @@ Status TransactionBaseImpl::RollbackToSavePoint() {
|
||||
return Status::NotFound();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Status TransactionBaseImpl::PopSavePoint() {
|
||||
if (save_points_ == nullptr ||
|
||||
save_points_->empty()) {
|
||||
@ -189,7 +189,7 @@ Status TransactionBaseImpl::PopSavePoint() {
|
||||
return Status::NotFound();
|
||||
}
|
||||
|
||||
assert(!save_points_->empty());
|
||||
assert(!save_points_->empty());
|
||||
save_points_->pop();
|
||||
return write_batch_.PopSavePoint();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user