2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 20:48:09 +02:00
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
#include <malloc.h>
|
|
|
|
#endif
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/iterator.h"
|
2013-09-02 08:23:40 +02:00
|
|
|
#include "rocksdb/options.h"
|
2014-07-31 01:34:35 +02:00
|
|
|
#include "db/dbformat.h"
|
2014-08-16 00:05:09 +02:00
|
|
|
#include "table/block_prefix_index.h"
|
|
|
|
#include "table/block_hash_index.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2014-08-16 00:05:09 +02:00
|
|
|
|
|
|
|
#include "format.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-04-17 17:36:46 +02:00
|
|
|
struct BlockContents;
|
2011-03-18 23:37:00 +01:00
|
|
|
class Comparator;
|
2014-07-31 01:34:35 +02:00
|
|
|
class BlockIter;
|
2014-04-10 23:19:43 +02:00
|
|
|
class BlockHashIndex;
|
2014-06-13 04:03:22 +02:00
|
|
|
class BlockPrefixIndex;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class Block {
|
|
|
|
public:
|
|
|
|
// Initialize the block with the specified contents.
|
2014-08-16 00:05:09 +02:00
|
|
|
explicit Block(BlockContents&& contents);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-08-16 00:05:09 +02:00
|
|
|
~Block() = default;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
size_t size() const { return size_; }
|
2014-04-10 23:19:43 +02:00
|
|
|
const char* data() const { return data_; }
|
2014-08-16 00:05:09 +02:00
|
|
|
bool cachable() const { return contents_.cachable; }
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 20:48:09 +02:00
|
|
|
size_t usable_size() const {
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
if (contents_.allocation.get() != nullptr) {
|
|
|
|
return malloc_usable_size(contents_.allocation.get());
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return size_;
|
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
uint32_t NumRestarts() const;
|
2014-09-18 00:08:19 +02:00
|
|
|
CompressionType compression_type() const {
|
|
|
|
return contents_.compression_type;
|
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
// If hash index lookup is enabled and `use_hash_index` is true. This block
|
|
|
|
// will do hash lookup for the key prefix.
|
|
|
|
//
|
|
|
|
// NOTE: for the hash based lookup, if a key prefix doesn't match any key,
|
|
|
|
// the iterator will simply be set as "invalid", rather than returning
|
|
|
|
// the key that is just pass the target key.
|
2014-07-31 01:34:35 +02:00
|
|
|
//
|
|
|
|
// If iter is null, return new Iterator
|
|
|
|
// If iter is not null, update this one and return it as Iterator*
|
2014-08-26 01:14:30 +02:00
|
|
|
//
|
|
|
|
// If total_order_seek is true, hash_index_ and prefix_index_ are ignored.
|
|
|
|
// This option only applies for index block. For data block, hash_index_
|
|
|
|
// and prefix_index_ are null, so this option does not matter.
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* NewIterator(const Comparator* comparator,
|
|
|
|
BlockIter* iter = nullptr,
|
|
|
|
bool total_order_seek = true);
|
2014-04-10 23:19:43 +02:00
|
|
|
void SetBlockHashIndex(BlockHashIndex* hash_index);
|
2014-06-13 04:03:22 +02:00
|
|
|
void SetBlockPrefixIndex(BlockPrefixIndex* prefix_index);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-08-05 20:27:34 +02:00
|
|
|
// Report an approximation of how much memory has been used.
|
|
|
|
size_t ApproximateMemoryUsage() const;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2014-08-16 00:05:09 +02:00
|
|
|
BlockContents contents_;
|
2014-09-18 01:45:58 +02:00
|
|
|
const char* data_; // contents_.data.data()
|
|
|
|
size_t size_; // contents_.data.size()
|
2011-03-18 23:37:00 +01:00
|
|
|
uint32_t restart_offset_; // Offset in data_ of restart array
|
2014-04-10 23:19:43 +02:00
|
|
|
std::unique_ptr<BlockHashIndex> hash_index_;
|
2014-06-13 04:03:22 +02:00
|
|
|
std::unique_ptr<BlockPrefixIndex> prefix_index_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// No copying allowed
|
|
|
|
Block(const Block&);
|
|
|
|
void operator=(const Block&);
|
2014-07-31 01:34:35 +02:00
|
|
|
};
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
class BlockIter : public InternalIterator {
|
2014-07-31 01:34:35 +02:00
|
|
|
public:
|
|
|
|
BlockIter()
|
|
|
|
: comparator_(nullptr),
|
|
|
|
data_(nullptr),
|
|
|
|
restarts_(0),
|
|
|
|
num_restarts_(0),
|
|
|
|
current_(0),
|
|
|
|
restart_index_(0),
|
|
|
|
status_(Status::OK()),
|
|
|
|
hash_index_(nullptr),
|
|
|
|
prefix_index_(nullptr) {}
|
|
|
|
|
|
|
|
BlockIter(const Comparator* comparator, const char* data, uint32_t restarts,
|
|
|
|
uint32_t num_restarts, BlockHashIndex* hash_index,
|
|
|
|
BlockPrefixIndex* prefix_index)
|
|
|
|
: BlockIter() {
|
|
|
|
Initialize(comparator, data, restarts, num_restarts,
|
|
|
|
hash_index, prefix_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Initialize(const Comparator* comparator, const char* data,
|
|
|
|
uint32_t restarts, uint32_t num_restarts, BlockHashIndex* hash_index,
|
|
|
|
BlockPrefixIndex* prefix_index) {
|
|
|
|
assert(data_ == nullptr); // Ensure it is called only once
|
|
|
|
assert(num_restarts > 0); // Ensure the param is valid
|
|
|
|
|
|
|
|
comparator_ = comparator;
|
|
|
|
data_ = data;
|
|
|
|
restarts_ = restarts;
|
|
|
|
num_restarts_ = num_restarts;
|
|
|
|
current_ = restarts_;
|
|
|
|
restart_index_ = num_restarts_;
|
|
|
|
hash_index_ = hash_index;
|
|
|
|
prefix_index_ = prefix_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetStatus(Status s) {
|
|
|
|
status_ = s;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool Valid() const override { return current_ < restarts_; }
|
|
|
|
virtual Status status() const override { return status_; }
|
|
|
|
virtual Slice key() const override {
|
|
|
|
assert(Valid());
|
|
|
|
return key_.GetKey();
|
|
|
|
}
|
|
|
|
virtual Slice value() const override {
|
|
|
|
assert(Valid());
|
|
|
|
return value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void Next() override;
|
|
|
|
|
|
|
|
virtual void Prev() override;
|
|
|
|
|
|
|
|
virtual void Seek(const Slice& target) override;
|
|
|
|
|
|
|
|
virtual void SeekToFirst() override;
|
|
|
|
|
|
|
|
virtual void SeekToLast() override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
const Comparator* comparator_;
|
|
|
|
const char* data_; // underlying block contents
|
|
|
|
uint32_t restarts_; // Offset of restart array (list of fixed32)
|
|
|
|
uint32_t num_restarts_; // Number of uint32_t entries in restart array
|
|
|
|
|
|
|
|
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
|
|
|
|
uint32_t current_;
|
|
|
|
uint32_t restart_index_; // Index of restart block in which current_ falls
|
|
|
|
IterKey key_;
|
|
|
|
Slice value_;
|
|
|
|
Status status_;
|
|
|
|
BlockHashIndex* hash_index_;
|
|
|
|
BlockPrefixIndex* prefix_index_;
|
|
|
|
|
|
|
|
inline int Compare(const Slice& a, const Slice& b) const {
|
|
|
|
return comparator_->Compare(a, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the offset in data_ just past the end of the current entry.
|
|
|
|
inline uint32_t NextEntryOffset() const {
|
2014-11-11 22:47:22 +01:00
|
|
|
// NOTE: We don't support files bigger than 2GB
|
|
|
|
return static_cast<uint32_t>((value_.data() + value_.size()) - data_);
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t GetRestartPoint(uint32_t index) {
|
|
|
|
assert(index < num_restarts_);
|
|
|
|
return DecodeFixed32(data_ + restarts_ + index * sizeof(uint32_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
void SeekToRestartPoint(uint32_t index) {
|
|
|
|
key_.Clear();
|
|
|
|
restart_index_ = index;
|
|
|
|
// current_ will be fixed by ParseNextKey();
|
|
|
|
|
|
|
|
// ParseNextKey() starts at the end of value_, so set value_ accordingly
|
|
|
|
uint32_t offset = GetRestartPoint(index);
|
|
|
|
value_ = Slice(data_ + offset, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CorruptionError();
|
|
|
|
|
|
|
|
bool ParseNextKey();
|
|
|
|
|
|
|
|
bool BinarySeek(const Slice& target, uint32_t left, uint32_t right,
|
|
|
|
uint32_t* index);
|
|
|
|
|
|
|
|
int CompareBlockKey(uint32_t block_index, const Slice& target);
|
|
|
|
|
|
|
|
bool BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids,
|
|
|
|
uint32_t left, uint32_t right,
|
|
|
|
uint32_t* index);
|
|
|
|
|
|
|
|
bool HashSeek(const Slice& target, uint32_t* index);
|
|
|
|
|
|
|
|
bool PrefixSeek(const Slice& target, uint32_t* index);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
};
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|