Remove some unneeded code (#8736)
Summary: * FullKey and ParseFullKey appear to serve no purpose in the public API (or anything else) so removed. Only use in one test updated. * NumberToString serves no purpose vs. ToString so removed, numerous calls updated * Remove unnecessary forward declarations in metadata.h by re-arranging class definitions. * Remove some unneeded semicolons Pull Request resolved: https://github.com/facebook/rocksdb/pull/8736 Test Plan: existing tests Reviewed By: mrambacher Differential Revision: D30700039 Pulled By: pdillinger fbshipit-source-id: 1e436a576f511a6ed8b4d97af7cc8216bc729af2
This commit is contained in:
parent
32752551b9
commit
c9cd5d25a8
@ -11,6 +11,9 @@
|
||||
### New Features
|
||||
* RemoteCompaction's interface now includes `db_name`, `db_id`, `session_id`, which could help the user uniquely identify compaction job between db instances and sessions.
|
||||
|
||||
### Public API change
|
||||
* Remove obsolete implementation details FullKey and ParseFullKey from public API
|
||||
|
||||
## 6.24.0 (2021-08-20)
|
||||
### Bug Fixes
|
||||
* If the primary's CURRENT file is missing or inaccessible, the secondary instance should not hang repeatedly trying to switch to a new MANIFEST. It should instead return the error code encountered while accessing the file.
|
||||
|
@ -203,7 +203,7 @@ Status CheckCFPathsSupported(const DBOptions& db_options,
|
||||
namespace {
|
||||
const uint64_t kDefaultTtl = 0xfffffffffffffffe;
|
||||
const uint64_t kDefaultPeriodicCompSecs = 0xfffffffffffffffe;
|
||||
}; // namespace
|
||||
} // namespace
|
||||
|
||||
ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
|
||||
const ColumnFamilyOptions& src) {
|
||||
|
@ -269,10 +269,10 @@ class CompactionJobStatsTest : public testing::Test,
|
||||
if (cf == 0) {
|
||||
// default cfd
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
"rocksdb.num-files-at-level" + ToString(level), &property));
|
||||
} else {
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level),
|
||||
handles_[cf], "rocksdb.num-files-at-level" + ToString(level),
|
||||
&property));
|
||||
}
|
||||
return atoi(property.c_str());
|
||||
|
@ -94,8 +94,8 @@ class CuckooTableDBTest : public testing::Test {
|
||||
|
||||
int NumTableFilesAtLevel(int level) {
|
||||
std::string property;
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level),
|
||||
&property));
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
|
||||
|
@ -318,7 +318,7 @@ bool CompareCandidateFile(const JobContext::CandidateFileInfo& first,
|
||||
return (first.file_path > second.file_path);
|
||||
}
|
||||
}
|
||||
}; // namespace
|
||||
} // namespace
|
||||
|
||||
// Delete obsolete files and log status and information of file deletion
|
||||
void DBImpl::DeleteObsoleteFileImpl(int job_id, const std::string& fname,
|
||||
|
@ -2588,10 +2588,11 @@ TEST_F(DBIteratorTest, DBIteratorTestDifferentialSnapshots) {
|
||||
std::string values[4] = {"1c", "2c", "3c", "4b"};
|
||||
int i = 0;
|
||||
for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
|
||||
FullKey fkey;
|
||||
ParseFullKey(db_iter->key(), &fkey);
|
||||
ParsedInternalKey fkey;
|
||||
ASSERT_OK(
|
||||
ParseInternalKey(db_iter->key(), &fkey, true /* log_err_key */));
|
||||
ASSERT_EQ(user_keys[i], fkey.user_key.ToString());
|
||||
ASSERT_EQ(EntryType::kEntryPut, fkey.type);
|
||||
ASSERT_EQ(kTypeValue, fkey.type);
|
||||
ASSERT_EQ(seqnums[i], fkey.sequence);
|
||||
ASSERT_EQ(values[i], db_iter->value().ToString());
|
||||
i++;
|
||||
@ -2620,14 +2621,15 @@ TEST_F(DBIteratorTest, DBIteratorTestDifferentialSnapshots) {
|
||||
nullptr /* read_callback */));
|
||||
// Expecting InternalKeys in [5,8] range with correct type
|
||||
int seqnums[4] = {5,8,11,13};
|
||||
EntryType key_types[4] = {EntryType::kEntryDelete,EntryType::kEntryDelete,
|
||||
EntryType::kEntryDelete,EntryType::kEntryPut};
|
||||
ValueType key_types[4] = {kTypeDeletion, kTypeDeletion, kTypeDeletion,
|
||||
kTypeValue};
|
||||
std::string user_keys[4] = {"1","2","3","4"};
|
||||
std::string values[4] = {"", "", "", "4b"};
|
||||
int i = 0;
|
||||
for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
|
||||
FullKey fkey;
|
||||
ParseFullKey(db_iter->key(), &fkey);
|
||||
ParsedInternalKey fkey;
|
||||
ASSERT_OK(
|
||||
ParseInternalKey(db_iter->key(), &fkey, true /* log_err_key */));
|
||||
ASSERT_EQ(user_keys[i], fkey.user_key.ToString());
|
||||
ASSERT_EQ(key_types[i], fkey.type);
|
||||
ASSERT_EQ(seqnums[i], fkey.sequence);
|
||||
|
@ -1684,9 +1684,9 @@ class CompactionCompressionListener : public EventListener {
|
||||
int bottommost_level = 0;
|
||||
for (int level = 0; level < db->NumberLevels(); level++) {
|
||||
std::string files_at_level;
|
||||
ASSERT_TRUE(
|
||||
db->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
|
||||
&files_at_level));
|
||||
ASSERT_TRUE(db->GetProperty(
|
||||
"rocksdb.num-files-at-level" + ROCKSDB_NAMESPACE::ToString(level),
|
||||
&files_at_level));
|
||||
if (files_at_level != "0") {
|
||||
bottommost_level = level;
|
||||
}
|
||||
|
@ -1070,12 +1070,12 @@ int DBTestBase::NumTableFilesAtLevel(int level, int cf) {
|
||||
std::string property;
|
||||
if (cf == 0) {
|
||||
// default cfd
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level),
|
||||
&property));
|
||||
} else {
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level),
|
||||
&property));
|
||||
EXPECT_TRUE(db_->GetProperty(handles_[cf],
|
||||
"rocksdb.num-files-at-level" + ToString(level),
|
||||
&property));
|
||||
}
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
@ -1085,12 +1085,10 @@ double DBTestBase::CompressionRatioAtLevel(int level, int cf) {
|
||||
if (cf == 0) {
|
||||
// default cfd
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.compression-ratio-at-level" + NumberToString(level),
|
||||
&property));
|
||||
"rocksdb.compression-ratio-at-level" + ToString(level), &property));
|
||||
} else {
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
handles_[cf],
|
||||
"rocksdb.compression-ratio-at-level" + NumberToString(level),
|
||||
handles_[cf], "rocksdb.compression-ratio-at-level" + ToString(level),
|
||||
&property));
|
||||
}
|
||||
return std::stod(property);
|
||||
|
@ -47,18 +47,6 @@ EntryType GetEntryType(ValueType value_type) {
|
||||
}
|
||||
}
|
||||
|
||||
bool ParseFullKey(const Slice& internal_key, FullKey* fkey) {
|
||||
ParsedInternalKey ikey;
|
||||
if (!ParseInternalKey(internal_key, &ikey, false /*log_err_key */)
|
||||
.ok()) { // TODO
|
||||
return false;
|
||||
}
|
||||
fkey->user_key = ikey.user_key;
|
||||
fkey->sequence = ikey.sequence;
|
||||
fkey->type = GetEntryType(ikey.type);
|
||||
return true;
|
||||
}
|
||||
|
||||
void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
|
||||
result->append(key.user_key.data(), key.user_key.size());
|
||||
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
|
||||
|
@ -1576,11 +1576,11 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
|
||||
std::string prop;
|
||||
ASSERT_EQ(listener[i]->WaitForRecovery(5000000), true);
|
||||
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true));
|
||||
EXPECT_TRUE(db[i]->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(0), &prop));
|
||||
EXPECT_TRUE(
|
||||
db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(0), &prop));
|
||||
EXPECT_EQ(atoi(prop.c_str()), 0);
|
||||
EXPECT_TRUE(db[i]->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(1), &prop));
|
||||
EXPECT_TRUE(
|
||||
db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(1), &prop));
|
||||
EXPECT_EQ(atoi(prop.c_str()), 1);
|
||||
}
|
||||
|
||||
@ -1713,11 +1713,11 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
|
||||
if (i == 1) {
|
||||
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true));
|
||||
}
|
||||
EXPECT_TRUE(db[i]->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(0), &prop));
|
||||
EXPECT_TRUE(
|
||||
db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(0), &prop));
|
||||
EXPECT_EQ(atoi(prop.c_str()), 0);
|
||||
EXPECT_TRUE(db[i]->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(1), &prop));
|
||||
EXPECT_TRUE(
|
||||
db[i]->GetProperty("rocksdb.num-files-at-level" + ToString(1), &prop));
|
||||
EXPECT_EQ(atoi(prop.c_str()), 1);
|
||||
}
|
||||
|
||||
|
@ -220,8 +220,8 @@ class PlainTableDBTest : public testing::Test,
|
||||
|
||||
int NumTableFilesAtLevel(int level) {
|
||||
std::string property;
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level),
|
||||
&property));
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
|
||||
|
@ -312,21 +312,20 @@ class VersionBuilder::Rep {
|
||||
if (!(external_file_seqno < f1->fd.largest_seqno ||
|
||||
external_file_seqno == 0)) {
|
||||
return Status::Corruption(
|
||||
"L0 file with seqno " +
|
||||
NumberToString(f1->fd.smallest_seqno) + " " +
|
||||
NumberToString(f1->fd.largest_seqno) +
|
||||
"L0 file with seqno " + ToString(f1->fd.smallest_seqno) +
|
||||
" " + ToString(f1->fd.largest_seqno) +
|
||||
" vs. file with global_seqno" +
|
||||
NumberToString(external_file_seqno) + " with fileNumber " +
|
||||
NumberToString(f1->fd.GetNumber()));
|
||||
ToString(external_file_seqno) + " with fileNumber " +
|
||||
ToString(f1->fd.GetNumber()));
|
||||
}
|
||||
} else if (f1->fd.smallest_seqno <= f2->fd.smallest_seqno) {
|
||||
return Status::Corruption(
|
||||
"L0 files seqno " + NumberToString(f1->fd.smallest_seqno) +
|
||||
" " + NumberToString(f1->fd.largest_seqno) + " " +
|
||||
NumberToString(f1->fd.GetNumber()) + " vs. " +
|
||||
NumberToString(f2->fd.smallest_seqno) + " " +
|
||||
NumberToString(f2->fd.largest_seqno) + " " +
|
||||
NumberToString(f2->fd.GetNumber()));
|
||||
return Status::Corruption("L0 files seqno " +
|
||||
ToString(f1->fd.smallest_seqno) + " " +
|
||||
ToString(f1->fd.largest_seqno) + " " +
|
||||
ToString(f1->fd.GetNumber()) + " vs. " +
|
||||
ToString(f2->fd.smallest_seqno) + " " +
|
||||
ToString(f2->fd.largest_seqno) + " " +
|
||||
ToString(f2->fd.GetNumber()));
|
||||
}
|
||||
} else {
|
||||
#ifndef NDEBUG
|
||||
@ -335,21 +334,20 @@ class VersionBuilder::Rep {
|
||||
#endif
|
||||
if (!level_nonzero_cmp_(f1, f2)) {
|
||||
return Status::Corruption(
|
||||
"L" + NumberToString(level) +
|
||||
"L" + ToString(level) +
|
||||
" files are not sorted properly: files #" +
|
||||
NumberToString(f1->fd.GetNumber()) + ", #" +
|
||||
NumberToString(f2->fd.GetNumber()));
|
||||
ToString(f1->fd.GetNumber()) + ", #" +
|
||||
ToString(f2->fd.GetNumber()));
|
||||
}
|
||||
|
||||
// Make sure there is no overlap in levels > 0
|
||||
if (vstorage->InternalComparator()->Compare(f1->largest,
|
||||
f2->smallest) >= 0) {
|
||||
return Status::Corruption(
|
||||
"L" + NumberToString(level) +
|
||||
" have overlapping ranges: file #" +
|
||||
NumberToString(f1->fd.GetNumber()) +
|
||||
"L" + ToString(level) + " have overlapping ranges: file #" +
|
||||
ToString(f1->fd.GetNumber()) +
|
||||
" largest key: " + (f1->largest).DebugString(true) +
|
||||
" vs. file #" + NumberToString(f2->fd.GetNumber()) +
|
||||
" vs. file #" + ToString(f2->fd.GetNumber()) +
|
||||
" smallest key: " + (f2->smallest).DebugString(true));
|
||||
}
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ static std::string PrintContents(WriteBatch* b,
|
||||
break;
|
||||
}
|
||||
state.append("@");
|
||||
state.append(NumberToString(ikey.sequence));
|
||||
state.append(ToString(ikey.sequence));
|
||||
}
|
||||
EXPECT_OK(iter->status());
|
||||
}
|
||||
|
@ -2087,7 +2087,7 @@ void StressTest::PrintEnv() const {
|
||||
(unsigned long)FLAGS_ops_per_thread);
|
||||
std::string ttl_state("unused");
|
||||
if (FLAGS_ttl > 0) {
|
||||
ttl_state = NumberToString(FLAGS_ttl);
|
||||
ttl_state = ToString(FLAGS_ttl);
|
||||
}
|
||||
fprintf(stdout, "Time to live(sec) : %s\n", ttl_state.c_str());
|
||||
fprintf(stdout, "Read percentage : %d%%\n", FLAGS_readpercent);
|
||||
|
@ -15,50 +15,6 @@
|
||||
#include "rocksdb/types.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
struct BlobMetaData;
|
||||
struct ColumnFamilyMetaData;
|
||||
struct LevelMetaData;
|
||||
struct SstFileMetaData;
|
||||
|
||||
// The metadata that describes a column family.
|
||||
struct ColumnFamilyMetaData {
|
||||
ColumnFamilyMetaData() : size(0), file_count(0), name("") {}
|
||||
ColumnFamilyMetaData(const std::string& _name, uint64_t _size,
|
||||
const std::vector<LevelMetaData>&& _levels)
|
||||
: size(_size), name(_name), levels(_levels) {}
|
||||
|
||||
// The size of this column family in bytes, which is equal to the sum of
|
||||
// the file size of its "levels".
|
||||
uint64_t size;
|
||||
// The number of files in this column family.
|
||||
size_t file_count;
|
||||
// The name of the column family.
|
||||
std::string name;
|
||||
// The metadata of all levels in this column family.
|
||||
std::vector<LevelMetaData> levels;
|
||||
|
||||
// The total size of all blob files
|
||||
uint64_t blob_file_size = 0;
|
||||
// The number of blob files in this column family.
|
||||
size_t blob_file_count = 0;
|
||||
// The metadata of the blobs in this column family
|
||||
std::vector<BlobMetaData> blob_files;
|
||||
};
|
||||
|
||||
// The metadata that describes a level.
|
||||
struct LevelMetaData {
|
||||
LevelMetaData(int _level, uint64_t _size,
|
||||
const std::vector<SstFileMetaData>&& _files)
|
||||
: level(_level), size(_size), files(_files) {}
|
||||
|
||||
// The level which this meta data describes.
|
||||
const int level;
|
||||
// The size of this level in bytes, which is equal to the sum of
|
||||
// the file size of its "files".
|
||||
const uint64_t size;
|
||||
// The metadata of all sst files in this level.
|
||||
const std::vector<SstFileMetaData> files;
|
||||
};
|
||||
|
||||
// The metadata that describes a SST file.
|
||||
struct SstFileMetaData {
|
||||
@ -199,6 +155,46 @@ struct BlobMetaData {
|
||||
std::string checksum_value;
|
||||
};
|
||||
|
||||
// The metadata that describes a level.
|
||||
struct LevelMetaData {
|
||||
LevelMetaData(int _level, uint64_t _size,
|
||||
const std::vector<SstFileMetaData>&& _files)
|
||||
: level(_level), size(_size), files(_files) {}
|
||||
|
||||
// The level which this meta data describes.
|
||||
const int level;
|
||||
// The size of this level in bytes, which is equal to the sum of
|
||||
// the file size of its "files".
|
||||
const uint64_t size;
|
||||
// The metadata of all sst files in this level.
|
||||
const std::vector<SstFileMetaData> files;
|
||||
};
|
||||
|
||||
// The metadata that describes a column family.
|
||||
struct ColumnFamilyMetaData {
|
||||
ColumnFamilyMetaData() : size(0), file_count(0), name("") {}
|
||||
ColumnFamilyMetaData(const std::string& _name, uint64_t _size,
|
||||
const std::vector<LevelMetaData>&& _levels)
|
||||
: size(_size), name(_name), levels(_levels) {}
|
||||
|
||||
// The size of this column family in bytes, which is equal to the sum of
|
||||
// the file size of its "levels".
|
||||
uint64_t size;
|
||||
// The number of files in this column family.
|
||||
size_t file_count;
|
||||
// The name of the column family.
|
||||
std::string name;
|
||||
// The metadata of all levels in this column family.
|
||||
std::vector<LevelMetaData> levels;
|
||||
|
||||
// The total size of all blob files
|
||||
uint64_t blob_file_size = 0;
|
||||
// The number of blob files in this column family.
|
||||
size_t blob_file_count = 0;
|
||||
// The metadata of the blobs in this column family
|
||||
std::vector<BlobMetaData> blob_files;
|
||||
};
|
||||
|
||||
// Metadata returned as output from ExportColumnFamily() and used as input to
|
||||
// CreateColumnFamiliesWithImport().
|
||||
struct ExportImportFilesMetaData {
|
||||
|
@ -55,27 +55,4 @@ enum EntryType {
|
||||
kEntryOther,
|
||||
};
|
||||
|
||||
// <user key, sequence number, and entry type> tuple.
|
||||
struct FullKey {
|
||||
Slice user_key;
|
||||
SequenceNumber sequence;
|
||||
EntryType type;
|
||||
|
||||
FullKey() : sequence(0) {} // Intentionally left uninitialized (for speed)
|
||||
FullKey(const Slice& u, const SequenceNumber& seq, EntryType t)
|
||||
: user_key(u), sequence(seq), type(t) {}
|
||||
std::string DebugString(bool hex = false) const;
|
||||
|
||||
void clear() {
|
||||
user_key.clear();
|
||||
sequence = 0;
|
||||
type = EntryType::kEntryPut;
|
||||
}
|
||||
};
|
||||
|
||||
// Parse slice representing internal key to FullKey
|
||||
// Parsed FullKey is valid for as long as the memory pointed to by
|
||||
// internal_key is alive.
|
||||
bool ParseFullKey(const Slice& internal_key, FullKey* result);
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
@ -119,7 +119,7 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(JNIEnv* env,
|
||||
break;
|
||||
}
|
||||
state.append("@");
|
||||
state.append(ROCKSDB_NAMESPACE::NumberToString(ikey.sequence));
|
||||
state.append(ROCKSDB_NAMESPACE::ToString(ikey.sequence));
|
||||
}
|
||||
if (!s.ok()) {
|
||||
state.append(s.ToString());
|
||||
|
@ -132,4 +132,4 @@ class BloomBlockBuilder {
|
||||
PlainTableBloomV1 bloom_;
|
||||
};
|
||||
|
||||
}; // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
@ -206,6 +206,6 @@ Slice PlainTableIndexBuilder::FillIndexes(
|
||||
|
||||
const std::string PlainTableIndexBuilder::kPlainTableIndexBlock =
|
||||
"PlainTableIndexBlock";
|
||||
}; // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
#endif // ROCKSDB_LITE
|
||||
|
@ -244,6 +244,6 @@ class PlainTableIndexBuilder {
|
||||
static const size_t kRecordsPerGroup = 256;
|
||||
};
|
||||
|
||||
}; // namespace ROCKSDB_NAMESPACE
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
||||
#endif // ROCKSDB_LITE
|
||||
|
@ -2163,8 +2163,7 @@ void ChangeCompactionStyleCommand::DoCommand() {
|
||||
std::string property;
|
||||
std::string files_per_level;
|
||||
for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
|
||||
db_->GetProperty(GetCfHandle(),
|
||||
"rocksdb.num-files-at-level" + NumberToString(i),
|
||||
db_->GetProperty(GetCfHandle(), "rocksdb.num-files-at-level" + ToString(i),
|
||||
&property);
|
||||
|
||||
// format print string
|
||||
@ -2192,8 +2191,7 @@ void ChangeCompactionStyleCommand::DoCommand() {
|
||||
files_per_level = "";
|
||||
int num_files = 0;
|
||||
for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
|
||||
db_->GetProperty(GetCfHandle(),
|
||||
"rocksdb.num-files-at-level" + NumberToString(i),
|
||||
db_->GetProperty(GetCfHandle(), "rocksdb.num-files-at-level" + ToString(i),
|
||||
&property);
|
||||
|
||||
// format print string
|
||||
|
@ -70,8 +70,8 @@ public:
|
||||
|
||||
int FilesOnLevel(int level) {
|
||||
std::string property;
|
||||
EXPECT_TRUE(db_->GetProperty(
|
||||
"rocksdb.num-files-at-level" + NumberToString(level), &property));
|
||||
EXPECT_TRUE(db_->GetProperty("rocksdb.num-files-at-level" + ToString(level),
|
||||
&property));
|
||||
return atoi(property.c_str());
|
||||
}
|
||||
|
||||
|
@ -112,12 +112,6 @@ void AppendEscapedStringTo(std::string* str, const Slice& value) {
|
||||
}
|
||||
}
|
||||
|
||||
std::string NumberToString(uint64_t num) {
|
||||
std::string r;
|
||||
AppendNumberTo(&r, num);
|
||||
return r;
|
||||
}
|
||||
|
||||
std::string NumberToHumanString(int64_t num) {
|
||||
char buf[19];
|
||||
int64_t absnum = num < 0 ? -num : num;
|
||||
|
@ -39,9 +39,6 @@ extern void AppendNumberTo(std::string* str, uint64_t num);
|
||||
// Escapes any non-printable characters found in "value".
|
||||
extern void AppendEscapedStringTo(std::string* str, const Slice& value);
|
||||
|
||||
// Return a string printout of "num"
|
||||
extern std::string NumberToString(uint64_t num);
|
||||
|
||||
// Put n digits from v in base kBase to (*buf)[0] to (*buf)[n-1] and
|
||||
// advance *buf to the position after what was written.
|
||||
template <size_t kBase>
|
||||
|
Loading…
Reference in New Issue
Block a user