2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// The representation of a DBImpl consists of a set of Versions. The
|
|
|
|
// newest version is called "current". Older versions may be kept
|
|
|
|
// around to provide a consistent view to live iterators.
|
|
|
|
//
|
|
|
|
// Each Version keeps track of a set of Table files per level. The
|
|
|
|
// entire set of versions is maintained in a VersionSet.
|
|
|
|
//
|
|
|
|
// Version,VersionSet are thread-compatible, but require external
|
|
|
|
// synchronization on all accesses.
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <map>
|
2013-01-20 11:07:13 +01:00
|
|
|
#include <memory>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <set>
|
|
|
|
#include <vector>
|
2012-10-19 23:00:53 +02:00
|
|
|
#include <deque>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/version_edit.h"
|
|
|
|
#include "port/port.h"
|
2012-10-31 19:47:18 +01:00
|
|
|
#include "db/table_cache.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
namespace log { class Writer; }
|
|
|
|
|
|
|
|
class Compaction;
|
|
|
|
class Iterator;
|
|
|
|
class MemTable;
|
|
|
|
class TableCache;
|
|
|
|
class Version;
|
|
|
|
class VersionSet;
|
2013-12-03 03:34:05 +01:00
|
|
|
class MergeContext;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2011-06-22 04:36:45 +02:00
|
|
|
// Return the smallest index i such that files[i]->largest >= key.
|
|
|
|
// Return files.size() if there is no such file.
|
|
|
|
// REQUIRES: "files" contains a sorted list of non-overlapping files.
|
|
|
|
extern int FindFile(const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<FileMetaData*>& files,
|
|
|
|
const Slice& key);
|
|
|
|
|
2011-07-15 02:20:57 +02:00
|
|
|
// Returns true iff some file in "files" overlaps the user key range
|
2011-10-06 01:30:28 +02:00
|
|
|
// [*smallest,*largest].
|
2013-03-01 03:04:58 +01:00
|
|
|
// smallest==nullptr represents a key smaller than all keys in the DB.
|
|
|
|
// largest==nullptr represents a key largest than all keys in the DB.
|
2011-10-06 01:30:28 +02:00
|
|
|
// REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges
|
|
|
|
// in sorted order.
|
2011-06-22 04:36:45 +02:00
|
|
|
extern bool SomeFileOverlapsRange(
|
|
|
|
const InternalKeyComparator& icmp,
|
2011-10-06 01:30:28 +02:00
|
|
|
bool disjoint_sorted_files,
|
2011-06-22 04:36:45 +02:00
|
|
|
const std::vector<FileMetaData*>& files,
|
2011-10-06 01:30:28 +02:00
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key);
|
2011-06-22 04:36:45 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
class Version {
|
|
|
|
public:
|
|
|
|
// Append to *iters a sequence of iterators that will
|
|
|
|
// yield the contents of this Version when merged together.
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
2013-06-08 00:35:17 +02:00
|
|
|
void AddIterators(const ReadOptions&, const EnvOptions& soptions,
|
2013-03-15 01:00:04 +01:00
|
|
|
std::vector<Iterator*>* iters);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2011-06-22 04:36:45 +02:00
|
|
|
// Lookup the value for key. If found, store it in *val and
|
|
|
|
// return OK. Else return a non-OK status. Fills *stats.
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
// Uses *operands to store merge_operator operations to apply later
|
2011-06-22 04:36:45 +02:00
|
|
|
// REQUIRES: lock is not held
|
|
|
|
struct GetStats {
|
|
|
|
FileMetaData* seek_file;
|
|
|
|
int seek_file_level;
|
|
|
|
};
|
2013-03-21 23:59:47 +01:00
|
|
|
void Get(const ReadOptions&, const LookupKey& key, std::string* val,
|
2013-12-03 03:34:05 +01:00
|
|
|
Status* status, MergeContext* merge_context,
|
|
|
|
GetStats* stats, const Options& db_option, bool* value_found =
|
|
|
|
nullptr);
|
2011-06-22 04:36:45 +02:00
|
|
|
|
|
|
|
// Adds "stats" into the current state. Returns true if a new
|
|
|
|
// compaction may need to be triggered, false otherwise.
|
|
|
|
// REQUIRES: lock is held
|
|
|
|
bool UpdateStats(const GetStats& stats);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Reference count management (so Versions do not disappear out from
|
|
|
|
// under live iterators)
|
|
|
|
void Ref();
|
|
|
|
void Unref();
|
|
|
|
|
2011-10-06 01:30:28 +02:00
|
|
|
void GetOverlappingInputs(
|
|
|
|
int level,
|
2013-03-01 03:04:58 +01:00
|
|
|
const InternalKey* begin, // nullptr means before all keys
|
|
|
|
const InternalKey* end, // nullptr means after all keys
|
2012-11-06 18:06:16 +01:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
|
|
|
int hint_index = -1, // index of overlap file
|
2013-03-01 03:04:58 +01:00
|
|
|
int* file_index = nullptr); // return index of overlap file
|
2011-10-06 01:30:28 +02:00
|
|
|
|
2012-11-05 08:47:06 +01:00
|
|
|
void GetOverlappingInputsBinarySearch(
|
|
|
|
int level,
|
2013-03-01 03:04:58 +01:00
|
|
|
const Slice& begin, // nullptr means before all keys
|
|
|
|
const Slice& end, // nullptr means after all keys
|
2012-11-06 18:06:16 +01:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
|
|
|
int hint_index, // index of overlap file
|
|
|
|
int* file_index); // return index of overlap file
|
2012-11-05 08:47:06 +01:00
|
|
|
|
|
|
|
void ExtendOverlappingInputs(
|
|
|
|
int level,
|
2013-03-01 03:04:58 +01:00
|
|
|
const Slice& begin, // nullptr means before all keys
|
|
|
|
const Slice& end, // nullptr means after all keys
|
2012-11-05 08:47:06 +01:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
2013-03-15 02:32:01 +01:00
|
|
|
unsigned int index); // start extending from this index
|
2012-11-05 08:47:06 +01:00
|
|
|
|
2011-06-22 04:36:45 +02:00
|
|
|
// Returns true iff some file in the specified level overlaps
|
2011-10-06 01:30:28 +02:00
|
|
|
// some part of [*smallest_user_key,*largest_user_key].
|
|
|
|
// smallest_user_key==NULL represents a key smaller than all keys in the DB.
|
|
|
|
// largest_user_key==NULL represents a key largest than all keys in the DB.
|
2011-06-22 04:36:45 +02:00
|
|
|
bool OverlapInLevel(int level,
|
2011-10-06 01:30:28 +02:00
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key);
|
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
// Returns true iff the first or last file in inputs contains
|
|
|
|
// an overlapping user key to the file "just outside" of it (i.e.
|
|
|
|
// just after the last file, or just before the first file)
|
|
|
|
// REQUIRES: "*inputs" is a sorted list of non-overlapping files
|
|
|
|
bool HasOverlappingUserKey(const std::vector<FileMetaData*>* inputs,
|
|
|
|
int level);
|
|
|
|
|
|
|
|
|
2011-10-06 01:30:28 +02:00
|
|
|
// Return the level at which we should place a new memtable compaction
|
|
|
|
// result that covers the range [smallest_user_key,largest_user_key].
|
|
|
|
int PickLevelForMemTableOutput(const Slice& smallest_user_key,
|
|
|
|
const Slice& largest_user_key);
|
2011-06-22 04:36:45 +02:00
|
|
|
|
|
|
|
int NumFiles(int level) const { return files_[level].size(); }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return a human readable string that describes this version's contents.
|
2012-12-16 03:28:36 +01:00
|
|
|
std::string DebugString(bool hex = false) const;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// Returns the version nuber of this version
|
|
|
|
uint64_t GetVersionNumber() {
|
|
|
|
return version_number_;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
friend class Compaction;
|
|
|
|
friend class VersionSet;
|
2013-06-30 08:21:36 +02:00
|
|
|
friend class DBImpl;
|
2014-01-11 00:12:34 +01:00
|
|
|
friend struct ColumnFamilyData;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class LevelFileNumIterator;
|
2013-03-15 01:00:04 +01:00
|
|
|
Iterator* NewConcatenatingIterator(const ReadOptions&,
|
2013-06-08 00:35:17 +02:00
|
|
|
const EnvOptions& soptions,
|
2013-03-15 01:00:04 +01:00
|
|
|
int level) const;
|
2013-08-23 23:49:57 +02:00
|
|
|
bool PrefixMayMatch(const ReadOptions& options, const EnvOptions& soptions,
|
|
|
|
const Slice& internal_prefix, Iterator* level_iter) const;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
VersionSet* vset_; // VersionSet to which this Version belongs
|
|
|
|
Version* next_; // Next version in linked list
|
2011-05-21 04:17:43 +02:00
|
|
|
Version* prev_; // Previous version in linked list
|
2011-03-18 23:37:00 +01:00
|
|
|
int refs_; // Number of live refs to this version
|
|
|
|
|
2012-10-26 03:21:54 +02:00
|
|
|
// List of files per level, files in each level are arranged
|
|
|
|
// in increasing order of keys
|
2012-06-23 04:30:03 +02:00
|
|
|
std::vector<FileMetaData*>* files_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-11-29 01:42:36 +01:00
|
|
|
// A list for the same set of files that are stored in files_,
|
|
|
|
// but files in each level are now sorted based on file
|
2012-10-26 03:21:54 +02:00
|
|
|
// size. The file with the largest size is at the front.
|
|
|
|
// This vector stores the index of the file from files_.
|
|
|
|
std::vector< std::vector<int> > files_by_size_;
|
|
|
|
|
2012-11-01 06:01:57 +01:00
|
|
|
// An index into files_by_size_ that specifies the first
|
|
|
|
// file that is not yet compacted
|
|
|
|
std::vector<int> next_file_to_compact_by_size_;
|
|
|
|
|
|
|
|
// Only the first few entries of files_by_size_ are sorted.
|
|
|
|
// There is no need to sort all the files because it is likely
|
|
|
|
// that on a running system, we need to look at only the first
|
|
|
|
// few largest files because a new version is created every few
|
|
|
|
// seconds/minutes (because of concurrent compactions).
|
|
|
|
static const int number_of_files_to_sort_ = 50;
|
|
|
|
|
2011-06-22 04:36:45 +02:00
|
|
|
// Next file to compact based on seek stats.
|
|
|
|
FileMetaData* file_to_compact_;
|
|
|
|
int file_to_compact_level_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Level that should be compacted next and its compaction score.
|
|
|
|
// Score < 1 means compaction is not strictly needed. These fields
|
|
|
|
// are initialized by Finalize().
|
2012-10-19 23:00:53 +02:00
|
|
|
// The most critical level to be compacted is listed first
|
|
|
|
// These are used to pick the best compaction level
|
|
|
|
std::vector<double> compaction_score_;
|
|
|
|
std::vector<int> compaction_level_;
|
2012-10-29 22:18:00 +01:00
|
|
|
double max_compaction_score_; // max score in l1 to ln-1
|
2013-03-02 21:56:04 +01:00
|
|
|
int max_compaction_score_level_; // level on which max score occurs
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// A version number that uniquely represents this version. This is
|
|
|
|
// used for debugging and logging purposes only.
|
|
|
|
uint64_t version_number_;
|
|
|
|
|
|
|
|
explicit Version(VersionSet* vset, uint64_t version_number = 0);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
~Version();
|
|
|
|
|
2012-11-01 06:01:57 +01:00
|
|
|
// re-initializes the index that is used to offset into files_by_size_
|
|
|
|
// to find the next compaction candidate file.
|
|
|
|
void ResetNextCompactionIndex(int level) {
|
|
|
|
next_file_to_compact_by_size_[level] = 0;
|
2012-11-29 01:42:36 +01:00
|
|
|
}
|
2012-11-01 06:01:57 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// No copying allowed
|
|
|
|
Version(const Version&);
|
|
|
|
void operator=(const Version&);
|
|
|
|
};
|
|
|
|
|
2014-01-11 00:12:34 +01:00
|
|
|
// column family metadata
|
|
|
|
struct ColumnFamilyData {
|
|
|
|
std::string name;
|
|
|
|
Version dummy_versions; // Head of circular doubly-linked list of versions.
|
|
|
|
Version* current; // == dummy_versions.prev_
|
|
|
|
ColumnFamilyOptions options;
|
2014-01-13 18:21:37 +01:00
|
|
|
int refs;
|
|
|
|
|
|
|
|
void Ref() {
|
|
|
|
++refs;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Unref() {
|
|
|
|
assert(refs > 0);
|
|
|
|
if (refs == 1) {
|
|
|
|
delete this;
|
|
|
|
} else {
|
|
|
|
--refs;
|
|
|
|
}
|
|
|
|
}
|
2014-01-11 00:12:34 +01:00
|
|
|
|
|
|
|
ColumnFamilyData(const std::string& name,
|
|
|
|
VersionSet* vset,
|
|
|
|
const ColumnFamilyOptions& options)
|
2014-01-13 18:21:37 +01:00
|
|
|
: name(name),
|
|
|
|
dummy_versions(vset),
|
|
|
|
current(nullptr),
|
|
|
|
options(options),
|
|
|
|
refs(1) {}
|
2014-01-11 00:12:34 +01:00
|
|
|
~ColumnFamilyData() {}
|
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
class VersionSet {
|
|
|
|
public:
|
|
|
|
VersionSet(const std::string& dbname,
|
|
|
|
const Options* options,
|
2013-06-08 00:35:17 +02:00
|
|
|
const EnvOptions& storage_options,
|
2011-03-18 23:37:00 +01:00
|
|
|
TableCache* table_cache,
|
|
|
|
const InternalKeyComparator*);
|
|
|
|
~VersionSet();
|
|
|
|
|
|
|
|
// Apply *edit to the current version to form a new descriptor that
|
|
|
|
// is both saved to persistent state and installed as the new
|
2011-09-01 21:08:02 +02:00
|
|
|
// current version. Will release *mu while actually writing to the file.
|
|
|
|
// REQUIRES: *mu is held on entry.
|
|
|
|
// REQUIRES: no other thread concurrently calls LogAndApply()
|
2014-01-11 00:12:34 +01:00
|
|
|
Status LogAndApply(ColumnFamilyData* column_family_data,
|
|
|
|
VersionEdit* edit,
|
|
|
|
port::Mutex* mu,
|
|
|
|
bool new_descriptor_log = false);
|
|
|
|
|
|
|
|
Status LogAndApply(VersionEdit* edit,
|
|
|
|
port::Mutex* mu,
|
|
|
|
bool new_descriptor_log = false) {
|
|
|
|
return LogAndApply(
|
|
|
|
column_family_data_.find(0)->second, edit, mu, new_descriptor_log);
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Recover the last saved descriptor from persistent storage.
|
2011-04-12 21:38:58 +02:00
|
|
|
Status Recover();
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
// Try to reduce the number of levels. This call is valid when
|
|
|
|
// only one level from the new max level to the old
|
|
|
|
// max level containing files.
|
|
|
|
// For example, a db currently has 7 levels [0-6], and a call to
|
|
|
|
// to reduce to 5 [0-4] can only be executed when only one level
|
|
|
|
// among [4-6] contains files.
|
|
|
|
Status ReduceNumberOfLevels(int new_levels, port::Mutex* mu);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return the current version.
|
2014-01-11 00:12:34 +01:00
|
|
|
Version* current() const {
|
|
|
|
// TODO this only works for default column family now
|
|
|
|
return column_family_data_.find(0)->second->current;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Return the current manifest file number
|
|
|
|
uint64_t ManifestFileNumber() const { return manifest_file_number_; }
|
|
|
|
|
|
|
|
// Allocate and return a new file number
|
|
|
|
uint64_t NewFileNumber() { return next_file_number_++; }
|
|
|
|
|
2012-08-27 08:45:35 +02:00
|
|
|
// Arrange to reuse "file_number" unless a newer file number has
|
|
|
|
// already been allocated.
|
|
|
|
// REQUIRES: "file_number" was returned by a call to NewFileNumber().
|
|
|
|
void ReuseFileNumber(uint64_t file_number) {
|
|
|
|
if (next_file_number_ == file_number + 1) {
|
|
|
|
next_file_number_ = file_number;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return the number of Table files at the specified level.
|
|
|
|
int NumLevelFiles(int level) const;
|
|
|
|
|
2011-04-12 21:38:58 +02:00
|
|
|
// Return the combined file size of all files at the specified level.
|
|
|
|
int64_t NumLevelBytes(int level) const;
|
|
|
|
|
|
|
|
// Return the last sequence number.
|
2013-12-20 18:57:58 +01:00
|
|
|
uint64_t LastSequence() const {
|
|
|
|
return last_sequence_.load(std::memory_order_acquire);
|
|
|
|
}
|
2011-04-12 21:38:58 +02:00
|
|
|
|
|
|
|
// Set the last sequence number to s.
|
|
|
|
void SetLastSequence(uint64_t s) {
|
|
|
|
assert(s >= last_sequence_);
|
2013-12-20 18:57:58 +01:00
|
|
|
last_sequence_.store(s, std::memory_order_release);
|
2011-04-12 21:38:58 +02:00
|
|
|
}
|
|
|
|
|
2011-09-01 21:08:02 +02:00
|
|
|
// Mark the specified file number as used.
|
|
|
|
void MarkFileNumberUsed(uint64_t number);
|
|
|
|
|
2011-04-12 21:38:58 +02:00
|
|
|
// Return the current log file number.
|
|
|
|
uint64_t LogNumber() const { return log_number_; }
|
|
|
|
|
|
|
|
// Return the log file number for the log file that is currently
|
|
|
|
// being compacted, or zero if there is no such log file.
|
|
|
|
uint64_t PrevLogNumber() const { return prev_log_number_; }
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
int NumberLevels() const { return num_levels_; }
|
2012-06-23 04:30:03 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Pick level and inputs for a new compaction.
|
2013-03-01 03:04:58 +01:00
|
|
|
// Returns nullptr if there is no compaction to be done.
|
2011-03-18 23:37:00 +01:00
|
|
|
// Otherwise returns a pointer to a heap-allocated object that
|
|
|
|
// describes the compaction. Caller should delete the result.
|
|
|
|
Compaction* PickCompaction();
|
|
|
|
|
|
|
|
// Return a compaction object for compacting the range [begin,end] in
|
2013-03-01 03:04:58 +01:00
|
|
|
// the specified level. Returns nullptr if there is nothing in that
|
2011-03-18 23:37:00 +01:00
|
|
|
// level that overlaps the specified range. Caller should delete
|
|
|
|
// the result.
|
|
|
|
Compaction* CompactRange(
|
|
|
|
int level,
|
2011-10-06 01:30:28 +02:00
|
|
|
const InternalKey* begin,
|
|
|
|
const InternalKey* end);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2011-03-22 19:32:49 +01:00
|
|
|
// Return the maximum overlapping data (in bytes) at next level for any
|
|
|
|
// file at a level >= 1.
|
2011-03-23 00:24:02 +01:00
|
|
|
int64_t MaxNextLevelOverlappingBytes();
|
2011-03-22 19:32:49 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Create an iterator that reads over the compaction inputs for "*c".
|
|
|
|
// The caller should delete the iterator when no longer needed.
|
|
|
|
Iterator* MakeInputIterator(Compaction* c);
|
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// Returns true iff some level needs a compaction because it has
|
|
|
|
// exceeded its target size.
|
|
|
|
bool NeedsSizeCompaction() const {
|
2013-10-16 22:32:53 +02:00
|
|
|
// In universal compaction case, this check doesn't really
|
|
|
|
// check the compaction condition, but checks num of files threshold
|
|
|
|
// only. We are not going to miss any compaction opportunity
|
|
|
|
// but it's likely that more compactions are scheduled but
|
|
|
|
// ending up with nothing to do. We can improve it later.
|
|
|
|
// TODO: improve this function to be accurate for universal
|
|
|
|
// compactions.
|
2014-01-11 00:12:34 +01:00
|
|
|
// TODO this only works for default column family now
|
|
|
|
Version* version = column_family_data_.find(0)->second->current;
|
2013-10-16 22:32:53 +02:00
|
|
|
int num_levels_to_check =
|
|
|
|
(options_->compaction_style != kCompactionStyleUniversal) ?
|
|
|
|
NumberLevels() - 1 : 1;
|
|
|
|
for (int i = 0; i < num_levels_to_check; i++) {
|
2014-01-11 00:12:34 +01:00
|
|
|
if (version->compaction_score_[i] >= 1) {
|
2012-10-19 23:00:53 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
// Returns true iff some level needs a compaction.
|
2011-06-22 04:36:45 +02:00
|
|
|
bool NeedsCompaction() const {
|
2014-01-11 00:12:34 +01:00
|
|
|
// TODO this only works for default column family now
|
|
|
|
Version* version = column_family_data_.find(0)->second->current;
|
|
|
|
return ((version->file_to_compact_ != nullptr) || NeedsSizeCompaction());
|
2011-06-22 04:36:45 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-26 22:37:21 +02:00
|
|
|
// Returns the maxmimum compaction score for levels 1 to max
|
|
|
|
double MaxCompactionScore() const {
|
2014-01-11 00:12:34 +01:00
|
|
|
// TODO this only works for default column family now
|
|
|
|
Version* version = column_family_data_.find(0)->second->current;
|
|
|
|
return version->max_compaction_score_;
|
2012-10-26 22:37:21 +02:00
|
|
|
}
|
|
|
|
|
2013-03-02 21:56:04 +01:00
|
|
|
// See field declaration
|
|
|
|
int MaxCompactionScoreLevel() const {
|
2014-01-11 00:12:34 +01:00
|
|
|
// TODO this only works for default column family now
|
|
|
|
Version* version = column_family_data_.find(0)->second->current;
|
|
|
|
return version->max_compaction_score_level_;
|
2013-03-02 21:56:04 +01:00
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Add all files listed in any live version to *live.
|
[RocksDB] [Performance] Speed up FindObsoleteFiles
Summary:
FindObsoleteFiles was slow, holding the single big lock, resulted in bad p99 behavior.
Didn't profile anything, but several things could be improved:
1. VersionSet::AddLiveFiles works with std::set, which is by itself slow (a tree).
You also don't know how many dynamic allocations occur just for building up this tree.
switched to std::vector, also added logic to pre-calculate total size and do just one allocation
2. Don't see why env_->GetChildren() needs to be mutex proteced, moved to PurgeObsoleteFiles where
mutex could be unlocked.
3. switched std::set to std:unordered_set, the conversion from vector is also inside PurgeObsoleteFiles
I have a feeling this should pretty much fix it.
Test Plan: make check; db_stress
Reviewers: dhruba, heyongqiang, MarkCallaghan
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D10197
2013-04-12 01:49:53 +02:00
|
|
|
void AddLiveFiles(std::vector<uint64_t>* live_list);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-09-15 02:11:35 +02:00
|
|
|
// Add all files listed in the current version to *live.
|
|
|
|
void AddLiveFilesCurrentVersion(std::set<uint64_t>* live);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return the approximate offset in the database of the data for
|
|
|
|
// "key" as of version "v".
|
|
|
|
uint64_t ApproximateOffsetOf(Version* v, const InternalKey& key);
|
|
|
|
|
2011-05-21 04:17:43 +02:00
|
|
|
// Return a human-readable short (single-line) summary of the number
|
|
|
|
// of files per level. Uses *scratch as backing store.
|
|
|
|
struct LevelSummaryStorage {
|
|
|
|
char buffer[100];
|
|
|
|
};
|
2013-06-14 07:09:08 +02:00
|
|
|
struct FileSummaryStorage {
|
|
|
|
char buffer[1000];
|
|
|
|
};
|
2011-05-21 04:17:43 +02:00
|
|
|
const char* LevelSummary(LevelSummaryStorage* scratch) const;
|
|
|
|
|
2012-08-17 19:48:40 +02:00
|
|
|
// printf contents (for debugging)
|
2012-11-19 20:54:13 +01:00
|
|
|
Status DumpManifest(Options& options, std::string& manifestFileName,
|
2012-12-16 03:28:36 +01:00
|
|
|
bool verbose, bool hex = false);
|
2012-08-17 19:48:40 +02:00
|
|
|
|
2012-08-15 00:20:36 +02:00
|
|
|
// Return a human-readable short (single-line) summary of the data size
|
|
|
|
// of files per level. Uses *scratch as backing store.
|
|
|
|
const char* LevelDataSizeSummary(LevelSummaryStorage* scratch) const;
|
|
|
|
|
2013-06-14 07:09:08 +02:00
|
|
|
// Return a human-readable short (single-line) summary of files
|
|
|
|
// in a specified level. Uses *scratch as backing store.
|
2014-01-11 00:12:34 +01:00
|
|
|
const char* LevelFileSummary(Version* version,
|
|
|
|
FileSummaryStorage* scratch,
|
|
|
|
int level) const;
|
2013-06-14 07:09:08 +02:00
|
|
|
|
2012-09-24 23:01:01 +02:00
|
|
|
// Return the size of the current manifest file
|
2014-01-11 00:12:34 +01:00
|
|
|
const uint64_t ManifestFileSize() { return manifest_file_size_; }
|
2012-09-24 23:01:01 +02:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// For the specfied level, pick a compaction.
|
2013-03-01 03:04:58 +01:00
|
|
|
// Returns nullptr if there is no compaction to be done.
|
2013-01-15 21:43:09 +01:00
|
|
|
// If level is 0 and there is already a compaction on that level, this
|
2013-03-01 03:04:58 +01:00
|
|
|
// function will return nullptr.
|
2012-12-04 18:47:05 +01:00
|
|
|
Compaction* PickCompactionBySize(int level, double score);
|
2012-10-19 23:00:53 +02:00
|
|
|
|
2013-07-10 01:08:54 +02:00
|
|
|
// Pick files to compact in Universal mode
|
|
|
|
Compaction* PickCompactionUniversal(int level, double score);
|
2013-09-10 01:06:10 +02:00
|
|
|
|
|
|
|
// Pick Universal compaction to limit read amplification
|
|
|
|
Compaction* PickCompactionUniversalReadAmp(int level, double score,
|
|
|
|
unsigned int ratio, unsigned int num_files);
|
|
|
|
|
|
|
|
// Pick Universal compaction to limit space amplification.
|
|
|
|
Compaction* PickCompactionUniversalSizeAmp(int level, double score);
|
2013-06-14 07:09:08 +02:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// Free up the files that were participated in a compaction
|
2012-11-01 06:01:57 +01:00
|
|
|
void ReleaseCompactionFiles(Compaction* c, Status status);
|
2012-10-19 23:00:53 +02:00
|
|
|
|
|
|
|
// verify that the files that we started with for a compaction
|
|
|
|
// still exist in the current version and in the same original level.
|
|
|
|
// This ensures that a concurrent compaction did not erroneously
|
|
|
|
// pick the same files to compact.
|
|
|
|
bool VerifyCompactionFileConsistency(Compaction* c);
|
|
|
|
|
2012-11-01 06:01:57 +01:00
|
|
|
// used to sort files by size
|
|
|
|
typedef struct fsize {
|
|
|
|
int index;
|
|
|
|
FileMetaData* file;
|
|
|
|
} Fsize;
|
|
|
|
|
|
|
|
// Sort all files for this version based on their file size and
|
|
|
|
// record results in files_by_size_. The largest files are listed first.
|
|
|
|
void UpdateFilesBySize(Version *v);
|
|
|
|
|
2013-01-15 23:05:42 +01:00
|
|
|
// Get the max file size in a given level.
|
|
|
|
uint64_t MaxFileSizeForLevel(int level);
|
|
|
|
|
2013-05-31 07:57:22 +02:00
|
|
|
double MaxBytesForLevel(int level);
|
|
|
|
|
2013-08-22 23:32:53 +02:00
|
|
|
Status GetMetadataForFile(
|
|
|
|
uint64_t number, int *filelevel, FileMetaData *metadata);
|
|
|
|
|
|
|
|
void GetLiveFilesMetaData(
|
|
|
|
std::vector<LiveFileMetaData> *metadata);
|
|
|
|
|
2013-11-12 20:53:26 +01:00
|
|
|
void GetObsoleteFiles(std::vector<FileMetaData*>* files);
|
2013-11-09 00:23:46 +01:00
|
|
|
|
2014-01-11 00:12:34 +01:00
|
|
|
ColumnFamilyData* CreateColumnFamily(const ColumnFamilyOptions& options,
|
|
|
|
VersionEdit* edit);
|
|
|
|
|
|
|
|
void DropColumnFamily(VersionEdit* edit);
|
|
|
|
|
2014-01-02 18:08:12 +01:00
|
|
|
std::unordered_map<std::string, uint32_t> column_families_;
|
2014-01-11 00:12:34 +01:00
|
|
|
std::unordered_map<uint32_t, ColumnFamilyData*> column_family_data_;
|
2014-01-02 18:08:12 +01:00
|
|
|
uint32_t max_column_family_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
class Builder;
|
2012-10-19 23:00:53 +02:00
|
|
|
struct ManifestWriter;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
friend class Compaction;
|
|
|
|
friend class Version;
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
void Init(int num_levels);
|
|
|
|
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 17:47:48 +01:00
|
|
|
void Finalize(Version* v, std::vector<uint64_t>&);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void GetRange(const std::vector<FileMetaData*>& inputs,
|
|
|
|
InternalKey* smallest,
|
|
|
|
InternalKey* largest);
|
|
|
|
|
2011-03-22 19:32:49 +01:00
|
|
|
void GetRange2(const std::vector<FileMetaData*>& inputs1,
|
|
|
|
const std::vector<FileMetaData*>& inputs2,
|
|
|
|
InternalKey* smallest,
|
|
|
|
InternalKey* largest);
|
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
void ExpandWhileOverlapping(Compaction* c);
|
|
|
|
|
2011-03-22 19:32:49 +01:00
|
|
|
void SetupOtherInputs(Compaction* c);
|
|
|
|
|
2011-09-01 21:08:02 +02:00
|
|
|
// Save current contents to *log
|
|
|
|
Status WriteSnapshot(log::Writer* log);
|
|
|
|
|
2014-01-11 00:12:34 +01:00
|
|
|
void AppendVersion(ColumnFamilyData* column_family_data, Version* v);
|
2011-05-21 04:17:43 +02:00
|
|
|
|
2013-01-08 21:00:13 +01:00
|
|
|
bool ManifestContains(const std::string& record) const;
|
|
|
|
|
2013-07-17 22:56:24 +02:00
|
|
|
uint64_t ExpandedCompactionByteSizeLimit(int level);
|
2012-06-23 04:30:03 +02:00
|
|
|
|
2013-07-17 22:56:24 +02:00
|
|
|
uint64_t MaxGrandParentOverlapBytes(int level);
|
2012-06-23 04:30:03 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Env* const env_;
|
|
|
|
const std::string dbname_;
|
|
|
|
const Options* const options_;
|
|
|
|
TableCache* const table_cache_;
|
|
|
|
const InternalKeyComparator icmp_;
|
|
|
|
uint64_t next_file_number_;
|
|
|
|
uint64_t manifest_file_number_;
|
2013-12-20 18:57:58 +01:00
|
|
|
std::atomic<uint64_t> last_sequence_;
|
2011-04-12 21:38:58 +02:00
|
|
|
uint64_t log_number_;
|
|
|
|
uint64_t prev_log_number_; // 0 or backing store for memtable being compacted
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
int num_levels_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Opened lazily
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<log::Writer> descriptor_log_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Per-level key at which the next compaction at that level should start.
|
|
|
|
// Either an empty string, or a valid InternalKey.
|
2012-06-23 04:30:03 +02:00
|
|
|
std::string* compact_pointer_;
|
|
|
|
|
|
|
|
// Per-level target file size.
|
|
|
|
uint64_t* max_file_size_;
|
|
|
|
|
|
|
|
// Per-level max bytes
|
|
|
|
uint64_t* level_max_bytes_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// record all the ongoing compactions for all levels
|
|
|
|
std::vector<std::set<Compaction*> > compactions_in_progress_;
|
|
|
|
|
|
|
|
// generates a increasing version number for every new version
|
|
|
|
uint64_t current_version_number_;
|
|
|
|
|
|
|
|
// Queue of writers to the manifest file
|
|
|
|
std::deque<ManifestWriter*> manifest_writers_;
|
|
|
|
|
2014-01-11 00:12:34 +01:00
|
|
|
// size of manifest file
|
|
|
|
uint64_t manifest_file_size_;
|
2013-01-11 02:18:50 +01:00
|
|
|
|
2013-11-09 00:23:46 +01:00
|
|
|
std::vector<FileMetaData*> obsolete_files_;
|
|
|
|
|
2013-03-15 01:00:04 +01:00
|
|
|
// storage options for all reads and writes except compactions
|
2013-06-08 00:35:17 +02:00
|
|
|
const EnvOptions& storage_options_;
|
2013-03-15 01:00:04 +01:00
|
|
|
|
|
|
|
// storage options used for compactions. This is a copy of
|
|
|
|
// storage_options_ but with readaheads set to readahead_compactions_.
|
2013-06-08 00:35:17 +02:00
|
|
|
const EnvOptions storage_options_compactions_;
|
2013-03-15 01:00:04 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// No copying allowed
|
|
|
|
VersionSet(const VersionSet&);
|
|
|
|
void operator=(const VersionSet&);
|
2012-10-19 23:00:53 +02:00
|
|
|
|
|
|
|
// Return the total amount of data that is undergoing
|
Prevent segfault because SizeUnderCompaction was called without any locks.
Summary:
SizeBeingCompacted was called without any lock protection. This causes
crashes, especially when running db_bench with value_size=128K.
The fix is to compute SizeUnderCompaction while holding the mutex and
passing in these values into the call to Finalize.
(gdb) where
#4 leveldb::VersionSet::SizeBeingCompacted (this=this@entry=0x7f0b490931c0, level=level@entry=4) at db/version_set.cc:1827
#5 0x000000000043a3c8 in leveldb::VersionSet::Finalize (this=this@entry=0x7f0b490931c0, v=v@entry=0x7f0b3b86b480) at db/version_set.cc:1420
#6 0x00000000004418d1 in leveldb::VersionSet::LogAndApply (this=0x7f0b490931c0, edit=0x7f0b3dc8c200, mu=0x7f0b490835b0, new_descriptor_log=<optimized out>) at db/version_set.cc:1016
#7 0x00000000004222b2 in leveldb::DBImpl::InstallCompactionResults (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1473
#8 0x0000000000426027 in leveldb::DBImpl::DoCompactionWork (this=this@entry=0x7f0b49083400, compact=compact@entry=0x7f0b2b8330f0) at db/db_impl.cc:1757
#9 0x0000000000426690 in leveldb::DBImpl::BackgroundCompaction (this=this@entry=0x7f0b49083400, madeProgress=madeProgress@entry=0x7f0b41bf2d1e, deletion_state=...) at db/db_impl.cc:1268
#10 0x0000000000428f42 in leveldb::DBImpl::BackgroundCall (this=0x7f0b49083400) at db/db_impl.cc:1170
#11 0x000000000045348e in BGThread (this=0x7f0b49023100) at util/env_posix.cc:941
#12 leveldb::(anonymous namespace)::PosixEnv::BGThreadWrapper (arg=0x7f0b49023100) at util/env_posix.cc:874
#13 0x00007f0b4a7cf10d in start_thread (arg=0x7f0b41bf3700) at pthread_create.c:301
#14 0x00007f0b49b4b11d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115
Test Plan:
make check
I am running db_bench with a value size of 128K to see if the segfault is fixed.
Reviewers: MarkCallaghan, sheki, emayanke
Reviewed By: sheki
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9279
2013-03-11 17:47:48 +01:00
|
|
|
// compactions per level
|
|
|
|
void SizeBeingCompacted(std::vector<uint64_t>&);
|
2012-10-19 23:00:53 +02:00
|
|
|
|
|
|
|
// Returns true if any one of the parent files are being compacted
|
2012-11-29 01:42:36 +01:00
|
|
|
bool ParentRangeInCompaction(const InternalKey* smallest,
|
Assertion failure while running with unit tests with OPT=-g
Summary:
When we expand the range of keys for a level 0 compaction, we
need to invoke ParentFilesInCompaction() only once for the
entire range of keys that is being compacted. We were invoking
it for each file that was being compacted, but this triggers
an assertion because each file's range were contiguous but
non-overlapping.
I renamed ParentFilesInCompaction to ParentRangeInCompaction
to adequately represent that it is the range-of-keys and
not individual files that we compact in a single compaction run.
Here is the assertion that is fixed by this patch.
db_test: db/version_set.cc:585: void leveldb::Version::ExtendOverlappingInputs(int, const leveldb::Slice&, const leveldb::Slice&, std::vector<leveldb::FileMetaData*, std::allocator<leveldb::FileMetaData*> >*, int): Assertion `user_cmp->Compare(flimit, user_begin) >= 0' failed.
Test Plan: make clean check OPT=-g
Reviewers: sheki
Reviewed By: sheki
CC: MarkCallaghan, emayanke, leveldb
Differential Revision: https://reviews.facebook.net/D6963
2012-11-26 10:49:50 +01:00
|
|
|
const InternalKey* largest, int level, int* index);
|
2012-10-19 23:00:53 +02:00
|
|
|
|
|
|
|
// Returns true if any one of the specified files are being compacted
|
|
|
|
bool FilesInCompaction(std::vector<FileMetaData*>& files);
|
|
|
|
|
|
|
|
void LogAndApplyHelper(Builder*b, Version* v,
|
|
|
|
VersionEdit* edit, port::Mutex* mu);
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// A Compaction encapsulates information about a compaction.
|
|
|
|
class Compaction {
|
|
|
|
public:
|
|
|
|
~Compaction();
|
|
|
|
|
|
|
|
// Return the level that is being compacted. Inputs from "level"
|
2013-06-14 07:09:08 +02:00
|
|
|
// will be merged.
|
2011-03-18 23:37:00 +01:00
|
|
|
int level() const { return level_; }
|
|
|
|
|
2013-06-14 07:09:08 +02:00
|
|
|
// Outputs will go to this level
|
|
|
|
int output_level() const { return out_level_; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return the object that holds the edits to the descriptor done
|
|
|
|
// by this compaction.
|
2012-06-23 04:30:03 +02:00
|
|
|
VersionEdit* edit() { return edit_; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// "which" must be either 0 or 1
|
|
|
|
int num_input_files(int which) const { return inputs_[which].size(); }
|
|
|
|
|
|
|
|
// Return the ith input file at "level()+which" ("which" must be 0 or 1).
|
|
|
|
FileMetaData* input(int which, int i) const { return inputs_[which][i]; }
|
|
|
|
|
|
|
|
// Maximum size of files to build during this compaction.
|
|
|
|
uint64_t MaxOutputFileSize() const { return max_output_file_size_; }
|
|
|
|
|
2013-10-17 22:33:39 +02:00
|
|
|
// Whether compression will be enabled for compaction outputs
|
|
|
|
bool enable_compression() const { return enable_compression_; }
|
|
|
|
|
2011-03-22 19:32:49 +01:00
|
|
|
// Is this a trivial compaction that can be implemented by just
|
|
|
|
// moving a single input file to the next level (no merging or splitting)
|
|
|
|
bool IsTrivialMove() const;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Add all inputs to this compaction as delete operations to *edit.
|
|
|
|
void AddInputDeletions(VersionEdit* edit);
|
|
|
|
|
|
|
|
// Returns true if the information we have available guarantees that
|
|
|
|
// the compaction is producing data in "level+1" for which no data exists
|
|
|
|
// in levels greater than "level+1".
|
|
|
|
bool IsBaseLevelForKey(const Slice& user_key);
|
|
|
|
|
2011-03-22 19:32:49 +01:00
|
|
|
// Returns true iff we should stop building the current output
|
2011-05-21 04:17:43 +02:00
|
|
|
// before processing "internal_key".
|
|
|
|
bool ShouldStopBefore(const Slice& internal_key);
|
2011-03-22 19:32:49 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Release the input version for the compaction, once the compaction
|
|
|
|
// is successful.
|
|
|
|
void ReleaseInputs();
|
|
|
|
|
2012-08-18 03:10:09 +02:00
|
|
|
void Summary(char* output, int len);
|
|
|
|
|
2012-12-04 18:47:05 +01:00
|
|
|
// Return the score that was used to pick this compaction run.
|
|
|
|
double score() const { return score_; }
|
|
|
|
|
2013-08-08 00:25:00 +02:00
|
|
|
// Is this compaction creating a file in the bottom most level?
|
|
|
|
bool BottomMostLevel() { return bottommost_level_; }
|
|
|
|
|
2013-10-27 07:01:26 +01:00
|
|
|
// Does this compaction include all sst files?
|
|
|
|
bool IsFullCompaction() { return is_full_compaction_; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
friend class Version;
|
|
|
|
friend class VersionSet;
|
|
|
|
|
2014-01-11 00:12:34 +01:00
|
|
|
Compaction(int level,
|
|
|
|
int out_level,
|
|
|
|
uint64_t target_file_size,
|
|
|
|
uint64_t max_grandparent_overlap_bytes,
|
|
|
|
int number_levels,
|
|
|
|
Version* input_version,
|
|
|
|
bool seek_compaction = false,
|
|
|
|
bool enable_compression = true);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
int level_;
|
2013-06-14 07:09:08 +02:00
|
|
|
int out_level_; // levels to which output files are stored
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t max_output_file_size_;
|
2013-07-17 22:56:24 +02:00
|
|
|
uint64_t maxGrandParentOverlapBytes_;
|
2011-03-18 23:37:00 +01:00
|
|
|
Version* input_version_;
|
2012-06-23 04:30:03 +02:00
|
|
|
VersionEdit* edit_;
|
|
|
|
int number_levels_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-20 01:41:34 +02:00
|
|
|
bool seek_compaction_;
|
2013-10-17 22:33:39 +02:00
|
|
|
bool enable_compression_;
|
2012-10-20 01:41:34 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Each compaction reads inputs from "level_" and "level_+1"
|
|
|
|
std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
|
|
|
|
|
2011-03-22 19:32:49 +01:00
|
|
|
// State used to check for number of of overlapping grandparent files
|
|
|
|
// (parent == level_ + 1, grandparent == level_ + 2)
|
|
|
|
std::vector<FileMetaData*> grandparents_;
|
2011-04-21 00:48:11 +02:00
|
|
|
size_t grandparent_index_; // Index in grandparent_starts_
|
2011-03-23 00:24:02 +01:00
|
|
|
bool seen_key_; // Some output key has been seen
|
2013-07-17 22:56:24 +02:00
|
|
|
uint64_t overlapped_bytes_; // Bytes of overlap between current output
|
2011-03-23 00:24:02 +01:00
|
|
|
// and grandparent files
|
2012-11-06 18:06:16 +01:00
|
|
|
int base_index_; // index of the file in files_[level_]
|
|
|
|
int parent_index_; // index of some file with same range in files_[level_+1]
|
2012-12-04 18:47:05 +01:00
|
|
|
double score_; // score that was used to pick this compaction.
|
2011-03-22 19:32:49 +01:00
|
|
|
|
2013-08-08 00:25:00 +02:00
|
|
|
// Is this compaction creating a file in the bottom most level?
|
|
|
|
bool bottommost_level_;
|
2013-10-27 07:01:26 +01:00
|
|
|
// Does this compaction include all sst files?
|
|
|
|
bool is_full_compaction_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// level_ptrs_ holds indices into input_version_->levels_: our state
|
|
|
|
// is that we are positioned at one of the file ranges for each
|
|
|
|
// higher level than the ones involved in this compaction (i.e. for
|
|
|
|
// all L >= level_ + 2).
|
2013-04-15 22:25:53 +02:00
|
|
|
std::vector<size_t> level_ptrs_;
|
2012-10-19 23:00:53 +02:00
|
|
|
|
|
|
|
// mark (or clear) all files that are being compacted
|
|
|
|
void MarkFilesBeingCompacted(bool);
|
2012-11-29 01:42:36 +01:00
|
|
|
|
2013-08-08 00:25:00 +02:00
|
|
|
// Initialize whether compaction producing files at the bottommost level
|
|
|
|
void SetupBottomMostLevel(bool isManual);
|
|
|
|
|
2012-11-01 06:01:57 +01:00
|
|
|
// In case of compaction error, reset the nextIndex that is used
|
|
|
|
// to pick up the next file to be compacted from files_by_size_
|
|
|
|
void ResetNextCompactionIndex();
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|