Fix all the lint errors.

Summary:
Scripted and removed all trailing spaces and converted all tabs to
spaces.

Also fixed other lint errors.
All lint errors from this point of time should be taken seriously.

Test Plan: make all check

Reviewers: dhruba

Reviewed By: dhruba

CC: leveldb

Differential Revision: https://reviews.facebook.net/D7059
This commit is contained in:
Abhishek Kona 2012-11-28 16:42:36 -08:00
parent 9b838535d1
commit d29f181923
20 changed files with 120 additions and 118 deletions

28
db/c.cc
View File

@ -445,7 +445,7 @@ void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) {
} }
void leveldb_options_set_target_file_size_base( void leveldb_options_set_target_file_size_base(
leveldb_options_t* opt, uint64_t n) { leveldb_options_t* opt, uint64_t n) {
opt->rep.target_file_size_base = n; opt->rep.target_file_size_base = n;
} }
@ -455,47 +455,47 @@ void leveldb_options_set_target_file_size_multiplier(
} }
void leveldb_options_set_max_bytes_for_level_base( void leveldb_options_set_max_bytes_for_level_base(
leveldb_options_t* opt, uint64_t n) { leveldb_options_t* opt, uint64_t n) {
opt->rep.max_bytes_for_level_base = n; opt->rep.max_bytes_for_level_base = n;
} }
void leveldb_options_set_max_bytes_for_level_multiplier( void leveldb_options_set_max_bytes_for_level_multiplier(
leveldb_options_t* opt, int n) { leveldb_options_t* opt, int n) {
opt->rep.max_bytes_for_level_multiplier = n; opt->rep.max_bytes_for_level_multiplier = n;
} }
void leveldb_options_set_expanded_compaction_factor( void leveldb_options_set_expanded_compaction_factor(
leveldb_options_t* opt, int n) { leveldb_options_t* opt, int n) {
opt->rep.expanded_compaction_factor = n; opt->rep.expanded_compaction_factor = n;
} }
void leveldb_options_set_max_grandparent_overlap_factor( void leveldb_options_set_max_grandparent_overlap_factor(
leveldb_options_t* opt, int n) { leveldb_options_t* opt, int n) {
opt->rep.max_grandparent_overlap_factor = n; opt->rep.max_grandparent_overlap_factor = n;
} }
void leveldb_options_set_num_levels(leveldb_options_t* opt, int n) { void leveldb_options_set_num_levels(leveldb_options_t* opt, int n) {
opt->rep.num_levels = n; opt->rep.num_levels = n;
} }
void leveldb_options_set_level0_file_num_compaction_trigger( void leveldb_options_set_level0_file_num_compaction_trigger(
leveldb_options_t* opt, int n) { leveldb_options_t* opt, int n) {
opt->rep.level0_file_num_compaction_trigger = n; opt->rep.level0_file_num_compaction_trigger = n;
} }
void leveldb_options_set_level0_slowdown_writes_trigger( void leveldb_options_set_level0_slowdown_writes_trigger(
leveldb_options_t* opt, int n) { leveldb_options_t* opt, int n) {
opt->rep.level0_slowdown_writes_trigger = n; opt->rep.level0_slowdown_writes_trigger = n;
} }
void leveldb_options_set_level0_stop_writes_trigger( void leveldb_options_set_level0_stop_writes_trigger(
leveldb_options_t* opt, int n) { leveldb_options_t* opt, int n) {
opt->rep.level0_stop_writes_trigger = n; opt->rep.level0_stop_writes_trigger = n;
} }
void leveldb_options_set_max_mem_compaction_level( void leveldb_options_set_max_mem_compaction_level(
leveldb_options_t* opt, int n) { leveldb_options_t* opt, int n) {
opt->rep.max_mem_compaction_level = n; opt->rep.max_mem_compaction_level = n;
} }
void leveldb_options_set_compression(leveldb_options_t* opt, int t) { void leveldb_options_set_compression(leveldb_options_t* opt, int t) {

View File

@ -452,7 +452,7 @@ struct ThreadState {
Stats stats; Stats stats;
SharedState* shared; SharedState* shared;
ThreadState(int index) /* implicit */ ThreadState(int index)
: tid(index), : tid(index),
rand(1000 + index) { rand(1000 + index) {
} }
@ -1320,7 +1320,7 @@ int main(int argc, char** argv) {
exit(1); exit(1);
} }
} else if (sscanf(argv[i], "--table_cache_numshardbits=%d%c", } else if (sscanf(argv[i], "--table_cache_numshardbits=%d%c",
&n, &junk) == 1) { &n, &junk) == 1) {
if (n <= 0 || n > 20) { if (n <= 0 || n > 20) {
fprintf(stderr, "The cache cannot be sharded into 2**%d pieces\n", n); fprintf(stderr, "The cache cannot be sharded into 2**%d pieces\n", n);
exit(1); exit(1);

View File

@ -1877,7 +1877,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile); s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
if (!s.ok()) { if (!s.ok()) {
// Avoid chewing through file number space in a tight loop. // Avoid chewing through file number space in a tight loop.
versions_->ReuseFileNumber(new_log_number); versions_->ReuseFileNumber(new_log_number);
break; break;
} }
delete log_; delete log_;

View File

@ -1703,7 +1703,7 @@ TEST(DBTest, DeletionMarkers2) {
TEST(DBTest, OverlapInLevel0) { TEST(DBTest, OverlapInLevel0) {
do { do {
int tmp = dbfull()->MaxMemCompactionLevel(); int tmp = dbfull()->MaxMemCompactionLevel();
ASSERT_EQ(tmp, 2) << "Fix test to match config"; ASSERT_EQ(tmp, 2) << "Fix test to match config";
// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
@ -2429,17 +2429,17 @@ class ModelDB: public DB {
virtual int NumberLevels() virtual int NumberLevels()
{ {
return 1; return 1;
} }
virtual int MaxMemCompactionLevel() virtual int MaxMemCompactionLevel()
{ {
return 1; return 1;
} }
virtual int Level0StopWriteTrigger() virtual int Level0StopWriteTrigger()
{ {
return -1; return -1;
} }
virtual Status Flush(const leveldb::FlushOptions& options) { virtual Status Flush(const leveldb::FlushOptions& options) {

View File

@ -1,9 +1,11 @@
// Copyright (c) 2012 Facebook. // Copyright (c) 2012 Facebook.
#include "db/memtablelist.h"
#include <string> #include <string>
#include "leveldb/db.h" #include "leveldb/db.h"
#include "db/memtable.h" #include "db/memtable.h"
#include "db/memtablelist.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "leveldb/iterator.h" #include "leveldb/iterator.h"
#include "util/coding.h" #include "util/coding.h"

View File

@ -29,9 +29,9 @@ struct FileMetaData {
class VersionEdit { class VersionEdit {
public: public:
VersionEdit(int number_levels) : /* implicit */ VersionEdit(int number_levels) :
number_levels_(number_levels) { number_levels_(number_levels) {
Clear(); Clear();
} }
~VersionEdit() { } ~VersionEdit() { }

View File

@ -1690,11 +1690,11 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
} }
double VersionSet::MaxBytesForLevel(int level) { double VersionSet::MaxBytesForLevel(int level) {
// Note: the result for level zero is not really used since we set // Note: the result for level zero is not really used since we set
// the level-0 compaction threshold based on number of files. // the level-0 compaction threshold based on number of files.
assert(level >= 0); assert(level >= 0);
assert(level < NumberLevels()); assert(level < NumberLevels());
return level_max_bytes_[level]; return level_max_bytes_[level];
} }
uint64_t VersionSet::MaxFileSizeForLevel(int level) { uint64_t VersionSet::MaxFileSizeForLevel(int level) {
@ -1887,7 +1887,7 @@ Compaction* VersionSet::PickCompaction() {
if (c == NULL && (current_->file_to_compact_ != NULL)) { if (c == NULL && (current_->file_to_compact_ != NULL)) {
level = current_->file_to_compact_level_; level = current_->file_to_compact_level_;
c = new Compaction(level, MaxFileSizeForLevel(level), c = new Compaction(level, MaxFileSizeForLevel(level),
MaxGrandParentOverlapBytes(level), NumberLevels(), true); MaxGrandParentOverlapBytes(level), NumberLevels(), true);
c->inputs_[0].push_back(current_->file_to_compact_); c->inputs_[0].push_back(current_->file_to_compact_);
} }
@ -2085,8 +2085,8 @@ Compaction::Compaction(int level, uint64_t target_file_size,
} }
Compaction::~Compaction() { Compaction::~Compaction() {
delete[] level_ptrs_; delete[] level_ptrs_;
delete edit_; delete edit_;
if (input_version_ != NULL) { if (input_version_ != NULL) {
input_version_->Unref(); input_version_->Unref();
} }

View File

@ -296,7 +296,7 @@ uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
uint64_t l = crc ^ 0xffffffffu; uint64_t l = crc ^ 0xffffffffu;
// Align n to (1 << m) byte boundary // Align n to (1 << m) byte boundary
#define ALIGN(n, m) ((n + ((1 << m) - 1)) & ~((1 << m) - 1)) #define ALIGN(n, m) ((n + ((1 << m) - 1)) & ~((1 << m) - 1))
#define STEP1 do { \ #define STEP1 do { \
int c = (l & 0xff) ^ *p++; \ int c = (l & 0xff) ^ *p++; \

View File

@ -153,7 +153,7 @@ class HdfsReadableFile: virtual public SequentialFile, virtual public RandomAcce
size = pFileInfo->mSize; size = pFileInfo->mSize;
hdfsFreeFileInfo(pFileInfo, 1); hdfsFreeFileInfo(pFileInfo, 1);
} else { } else {
throw new leveldb::HdfsFatalException("fileSize on unknown file " + throw leveldb::HdfsFatalException("fileSize on unknown file " +
filename_); filename_);
} }
return size; return size;

View File

@ -127,7 +127,7 @@ Options::Dump(
Log(log," Options.no_block_cache: %d", Log(log," Options.no_block_cache: %d",
no_block_cache); no_block_cache);
Log(log," Options.table_cache_numshardbits: %d", Log(log," Options.table_cache_numshardbits: %d",
table_cache_numshardbits); table_cache_numshardbits);
Log(log," Options.delete_obsolete_files_period_micros: %ld", Log(log," Options.delete_obsolete_files_period_micros: %ld",
delete_obsolete_files_period_micros); delete_obsolete_files_period_micros);
Log(log," Options.max_background_compactions: %d", Log(log," Options.max_background_compactions: %d",