Misc cleanup on performance branch

Summary:

Did some trivial stuffs:

* Add more comments;
* fix compiler's warning messages (uninitialized variables).
* etc

Test Plan:

make check
This commit is contained in:
Kai Liu 2014-01-17 12:22:39 -08:00
parent 8079dd5d24
commit ef602f6275
5 changed files with 19 additions and 19 deletions

View File

@ -47,7 +47,6 @@ fi
# ln -s `git rev-parse --show-toplevel`/build_tools/format-diff.sh $PRE_COMMIT_SCRIPT_PATH
# fi
# fi
set -e
uncommitted_code=`git diff HEAD`
@ -55,7 +54,6 @@ uncommitted_code=`git diff HEAD`
# If there's no uncommitted changes, we assume user are doing post-commit
# format check, in which case we'll check the modified lines from latest commit.
# Otherwise, we'll check format of the uncommitted code only.
format_last_commit=0
if [ -z "$uncommitted_code" ]
then
# Check the format of last commit

View File

@ -232,7 +232,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
// sequence number since the Seek() call above should have skipped
// all entries with overly large sequence numbers.
const char* entry = iter->key();
uint32_t key_length;
uint32_t key_length = 0;
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
if (comparator_.comparator.user_comparator()->Compare(
Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
@ -337,7 +337,7 @@ void MemTable::Update(SequenceNumber seq,
// sequence number since the Seek() call above should have skipped
// all entries with overly large sequence numbers.
const char* entry = iter->key();
uint32_t key_length;
uint32_t key_length = 0;
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
if (comparator_.comparator.user_comparator()->Compare(
Slice(key_ptr, key_length - 8), lkey.user_key()) == 0) {
@ -401,7 +401,7 @@ bool MemTable::UpdateCallback(SequenceNumber seq,
// sequence number since the Seek() call above should have skipped
// all entries with overly large sequence numbers.
const char* entry = iter->key();
uint32_t key_length;
uint32_t key_length = 0;
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
if (comparator_.comparator.user_comparator()->Compare(
Slice(key_ptr, key_length - 8), lkey.user_key()) == 0) {
@ -466,7 +466,7 @@ size_t MemTable::CountSuccessiveMergeEntries(const LookupKey& key) {
for (; iter->Valid(); iter->Next()) {
const char* entry = iter->key();
uint32_t key_length;
uint32_t key_length = 0;
const char* iter_key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
if (!comparator_.comparator.user_comparator()->Compare(
Slice(iter_key_ptr, key_length - 8), key.user_key()) == 0) {

View File

@ -405,7 +405,7 @@ Status PlainTableReader::GetOffset(const Slice& target, const Slice& prefix,
uint64_t prefix_index_offset = bucket_value ^ kSubIndexMask;
const char* index_ptr = sub_index_ + prefix_index_offset;
uint32_t upper_bound;
uint32_t upper_bound = 0;
const uint32_t* base_ptr = (const uint32_t*) GetVarint32Ptr(index_ptr,
index_ptr + 4,
&upper_bound);
@ -464,17 +464,17 @@ bool PlainTableReader::MayHavePrefix(uint32_t hash) {
Status PlainTableReader::ReadKey(const char* row_ptr, Slice* key,
size_t& bytes_read) {
const char* key_ptr;
const char* key_ptr = nullptr;
bytes_read = 0;
size_t internal_key_size;
size_t internal_key_size = 0;
if (IsFixedLength()) {
internal_key_size = GetFixedInternalKeyLength();
key_ptr = row_ptr;
} else {
uint32_t key_size;
uint32_t key_size = 0;
key_ptr = GetVarint32Ptr(row_ptr, file_data_.data() + data_end_offset_,
&key_size);
internal_key_size = (size_t) key_size;
internal_key_size = (size_t)key_size;
bytes_read = key_ptr - row_ptr;
}
if (row_ptr + internal_key_size >= file_data_.data() + data_end_offset_) {

View File

@ -41,7 +41,7 @@ TEST(Coding, Fixed64) {
const char* p = s.data();
for (int power = 0; power <= 63; power++) {
uint64_t v = static_cast<uint64_t>(1) << power;
uint64_t actual;
uint64_t actual = 0;
actual = DecodeFixed64(p);
ASSERT_EQ(v-1, actual);
p += sizeof(uint64_t);
@ -90,7 +90,7 @@ TEST(Coding, Varint32) {
const char* limit = p + s.size();
for (uint32_t i = 0; i < (32 * 32); i++) {
uint32_t expected = (i / 32) << (i % 32);
uint32_t actual;
uint32_t actual = 0;
const char* start = p;
p = GetVarint32Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
@ -125,7 +125,7 @@ TEST(Coding, Varint64) {
const char* limit = p + s.size();
for (unsigned int i = 0; i < values.size(); i++) {
ASSERT_TRUE(p < limit);
uint64_t actual;
uint64_t actual = 0;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);

View File

@ -23,16 +23,16 @@ class DynamicBloom {
explicit DynamicBloom(uint32_t total_bits, uint32_t num_probes = 6);
// Assuming single threaded access to Add
void Add(const Slice& key) { AddHash(hash_func_(key)); }
// Assuming single threaded access to this function.
void Add(const Slice& key);
// Assuming single threaded access to Add
// Assuming single threaded access to this function.
void AddHash(uint32_t hash);
// Multithreaded access to MayContain is OK
// Multithreaded access to this function is OK
bool MayContain(const Slice& key);
// Multithreaded access to MayContain is OK
// Multithreaded access to this function is OK
bool MayContainHash(uint32_t hash);
private:
@ -42,6 +42,8 @@ class DynamicBloom {
std::unique_ptr<unsigned char[]> data_;
};
inline void DynamicBloom::Add(const Slice& key) { AddHash(hash_func_(key)); }
inline bool DynamicBloom::MayContain(const Slice& key) {
return (MayContainHash(hash_func_(key)));
}