2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2013-10-16 23:59:46 +02:00
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// The representation of a DBImpl consists of a set of Versions. The
|
|
|
|
// newest version is called "current". Older versions may be kept
|
|
|
|
// around to provide a consistent view to live iterators.
|
|
|
|
//
|
|
|
|
// Each Version keeps track of a set of Table files per level. The
|
|
|
|
// entire set of versions is maintained in a VersionSet.
|
|
|
|
//
|
|
|
|
// Version,VersionSet are thread-compatible, but require external
|
|
|
|
// synchronization on all accesses.
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
#include <atomic>
|
|
|
|
#include <deque>
|
|
|
|
#include <limits>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <map>
|
2013-01-20 11:07:13 +01:00
|
|
|
#include <memory>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <set>
|
2016-03-11 03:16:21 +01:00
|
|
|
#include <string>
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
#include <utility>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <vector>
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
2014-10-31 16:48:19 +01:00
|
|
|
#include "db/version_builder.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/version_edit.h"
|
|
|
|
#include "port/port.h"
|
2012-10-31 19:47:18 +01:00
|
|
|
#include "db/table_cache.h"
|
2014-01-16 01:22:34 +01:00
|
|
|
#include "db/compaction.h"
|
CompactionPicker
Summary:
This is a big one. This diff moves all the code related to picking compactions from VersionSet to new class CompactionPicker. Column families' compactions will be completely separate processes, so we need to have multiple CompactionPickers.
To make this easier to review, most of the code change is just copy/paste. There is also a small change not to use VersionSet::current_, but rather to take `Version* version` as a parameter. Most of the other code is exactly the same.
In future diffs, I will also make some improvements to CompactionPickers. I think the most important part will be encapsulating it better. Currently Version, VersionSet, Compaction and CompactionPicker are all friend classes, which makes it harder to change the implementation.
This diff depends on D15171, D15183, D15189 and D15201
Test Plan: `make check`
Reviewers: kailiu, sdong, dhruba, haobo
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15207
2014-01-16 22:03:52 +01:00
|
|
|
#include "db/compaction_picker.h"
|
2014-01-22 20:44:53 +01:00
|
|
|
#include "db/column_family.h"
|
|
|
|
#include "db/log_reader.h"
|
hints for narrowing down FindFile range and avoiding checking unrelevant L0 files
Summary:
The file tree structure in Version is prebuilt and the range of each file is known.
On the Get() code path, we do binary search in FindFile() by comparing
target key with each file's largest key and also check the range for each L0 file.
With some pre-calculated knowledge, each key comparision that has been done can serve
as a hint to narrow down further searches:
(1) If a key falls within a L0 file's range, we can safely skip the next
file if its range does not overlap with the current one.
(2) If a key falls within a file's range in level L0 - Ln-1, we should only
need to binary search in the next level for files that overlap with the current one.
(1) will be able to skip some files depending one the key distribution.
(2) can greatly reduce the range of binary search, especially for bottom
levels, given that one file most likely only overlaps with N files from
the level below (where N is max_bytes_for_level_multiplier). So on level
L, we will only look at ~N files instead of N^L files.
Some inital results: measured with 500M key DB, when write is light (10k/s = 1.2M/s), this
improves QPS ~7% on top of blocked bloom. When write is heavier (80k/s =
9.6M/s), it gives us ~13% improvement.
Test Plan: make all check
Reviewers: haobo, igor, dhruba, sdong, yhchiang
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D17205
2014-04-21 18:10:12 +02:00
|
|
|
#include "db/file_indexer.h"
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 20:20:25 +02:00
|
|
|
#include "db/write_controller.h"
|
2015-03-03 19:59:36 +01:00
|
|
|
#include "rocksdb/env.h"
|
2015-02-05 06:39:45 +01:00
|
|
|
#include "util/instrumented_mutex.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
namespace log {
|
|
|
|
class Writer;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class Compaction;
|
2015-10-13 00:06:38 +02:00
|
|
|
class InternalIterator;
|
2014-03-10 06:01:13 +01:00
|
|
|
class LogBuffer;
|
|
|
|
class LookupKey;
|
2011-03-18 23:37:00 +01:00
|
|
|
class MemTable;
|
|
|
|
class Version;
|
|
|
|
class VersionSet;
|
2016-06-21 03:01:03 +02:00
|
|
|
class WriteBufferManager;
|
2013-12-03 03:34:05 +01:00
|
|
|
class MergeContext;
|
2014-01-22 20:44:53 +01:00
|
|
|
class ColumnFamilySet;
|
2014-03-11 01:25:10 +01:00
|
|
|
class TableCache;
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
class MergeIteratorBuilder;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
// Return the smallest index i such that file_level.files[i]->largest >= key.
|
|
|
|
// Return file_level.num_files if there is no such file.
|
|
|
|
// REQUIRES: "file_level.files" contains a sorted list of
|
|
|
|
// non-overlapping files.
|
|
|
|
extern int FindFile(const InternalKeyComparator& icmp,
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
const LevelFilesBrief& file_level, const Slice& key);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
|
2011-07-15 02:20:57 +02:00
|
|
|
// Returns true iff some file in "files" overlaps the user key range
|
2011-10-06 01:30:28 +02:00
|
|
|
// [*smallest,*largest].
|
2013-03-01 03:04:58 +01:00
|
|
|
// smallest==nullptr represents a key smaller than all keys in the DB.
|
|
|
|
// largest==nullptr represents a key largest than all keys in the DB.
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
// REQUIRES: If disjoint_sorted_files, file_level.files[]
|
|
|
|
// contains disjoint ranges in sorted order.
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
extern bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
|
|
|
|
bool disjoint_sorted_files,
|
|
|
|
const LevelFilesBrief& file_level,
|
|
|
|
const Slice* smallest_user_key,
|
|
|
|
const Slice* largest_user_key);
|
2011-06-22 04:36:45 +02:00
|
|
|
|
2014-10-28 18:03:13 +01:00
|
|
|
// Generate LevelFilesBrief from vector<FdWithKeyRange*>
|
2014-07-11 21:52:41 +02:00
|
|
|
// Would copy smallest_key and largest_key data to sequential memory
|
|
|
|
// arena: Arena used to allocate the memory
|
2014-10-28 18:03:13 +01:00
|
|
|
extern void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
const std::vector<FileMetaData*>& files,
|
|
|
|
Arena* arena);
|
2014-07-11 21:52:41 +02:00
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
class VersionStorageInfo {
|
2011-03-18 23:37:00 +01:00
|
|
|
public:
|
2014-10-27 23:49:46 +01:00
|
|
|
VersionStorageInfo(const InternalKeyComparator* internal_comparator,
|
|
|
|
const Comparator* user_comparator, int num_levels,
|
|
|
|
CompactionStyle compaction_style,
|
|
|
|
VersionStorageInfo* src_vstorage);
|
|
|
|
~VersionStorageInfo();
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
|
2014-10-31 16:48:19 +01:00
|
|
|
void Reserve(int level, size_t size) { files_[level].reserve(size); }
|
|
|
|
|
2015-10-19 22:07:05 +02:00
|
|
|
void AddFile(int level, FileMetaData* f, Logger* info_log = nullptr);
|
2014-10-31 16:48:19 +01:00
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
void SetFinalized();
|
2014-10-27 23:49:46 +01:00
|
|
|
|
|
|
|
// Update num_non_empty_levels_.
|
|
|
|
void UpdateNumNonEmptyLevels();
|
|
|
|
|
|
|
|
void GenerateFileIndexer() {
|
|
|
|
file_indexer_.UpdateIndex(&arena_, num_non_empty_levels_, files_);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the accumulated stats from a file-meta.
|
|
|
|
void UpdateAccumulatedStats(FileMetaData* file_meta);
|
|
|
|
|
2015-12-07 19:51:08 +01:00
|
|
|
// Decrease the current stat form a to-be-delected file-meta
|
|
|
|
void RemoveCurrentStats(FileMetaData* file_meta);
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
void ComputeCompensatedSizes();
|
2011-06-22 04:36:45 +02:00
|
|
|
|
2014-01-16 01:23:36 +01:00
|
|
|
// Updates internal structures that keep track of compaction scores
|
|
|
|
// We use compaction scores to figure out which compaction to do next
|
2015-02-05 01:04:51 +01:00
|
|
|
// REQUIRES: db_mutex held!!
|
2014-10-27 23:49:46 +01:00
|
|
|
// TODO find a better way to pass compaction_options_fifo.
|
2016-05-24 00:55:29 +02:00
|
|
|
void ComputeCompactionScore(const MutableCFOptions& mutable_cf_options);
|
2014-01-16 01:23:36 +01:00
|
|
|
|
2015-08-14 06:42:20 +02:00
|
|
|
// Estimate est_comp_needed_bytes_
|
|
|
|
void EstimateCompactionBytesNeeded(
|
|
|
|
const MutableCFOptions& mutable_cf_options);
|
|
|
|
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
// This computes files_marked_for_compaction_ and is called by
|
|
|
|
// ComputeCompactionScore()
|
|
|
|
void ComputeFilesMarkedForCompaction();
|
|
|
|
|
2014-10-28 18:03:13 +01:00
|
|
|
// Generate level_files_brief_ from files_
|
|
|
|
void GenerateLevelFilesBrief();
|
2014-10-27 23:49:46 +01:00
|
|
|
// Sort all files for this version based on their file size and
|
2015-09-22 02:16:31 +02:00
|
|
|
// record results in files_by_compaction_pri_. The largest files are listed
|
|
|
|
// first.
|
|
|
|
void UpdateFilesByCompactionPri(const MutableCFOptions& mutable_cf_options);
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
2015-06-05 01:51:25 +02:00
|
|
|
void GenerateLevel0NonOverlapping();
|
|
|
|
bool level0_non_overlapping() const {
|
|
|
|
return level0_non_overlapping_;
|
|
|
|
}
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
int MaxInputLevel() const;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-10-28 18:00:51 +01:00
|
|
|
// Return level number that has idx'th highest score
|
|
|
|
int CompactionScoreLevel(int idx) const { return compaction_level_[idx]; }
|
|
|
|
|
|
|
|
// Return idx'th highest score
|
|
|
|
double CompactionScore(int idx) const { return compaction_score_[idx]; }
|
|
|
|
|
2011-10-06 01:30:28 +02:00
|
|
|
void GetOverlappingInputs(
|
2014-10-27 23:49:46 +01:00
|
|
|
int level, const InternalKey* begin, // nullptr means before all keys
|
|
|
|
const InternalKey* end, // nullptr means after all keys
|
2012-11-06 18:06:16 +01:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
2015-10-13 23:24:45 +02:00
|
|
|
int hint_index = -1, // index of overlap file
|
|
|
|
int* file_index = nullptr, // return index of overlap file
|
|
|
|
bool expand_range = true) // if set, returns files which overlap the
|
|
|
|
const; // range and overlap each other. If false,
|
|
|
|
// then just files intersecting the range
|
2011-10-06 01:30:28 +02:00
|
|
|
|
2012-11-05 08:47:06 +01:00
|
|
|
void GetOverlappingInputsBinarySearch(
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int level,
|
|
|
|
const Slice& begin, // nullptr means before all keys
|
|
|
|
const Slice& end, // nullptr means after all keys
|
2012-11-06 18:06:16 +01:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
2015-10-13 23:24:45 +02:00
|
|
|
int hint_index, // index of overlap file
|
|
|
|
int* file_index) const; // return index of overlap file
|
2012-11-05 08:47:06 +01:00
|
|
|
|
|
|
|
void ExtendOverlappingInputs(
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int level,
|
|
|
|
const Slice& begin, // nullptr means before all keys
|
|
|
|
const Slice& end, // nullptr means after all keys
|
2012-11-05 08:47:06 +01:00
|
|
|
std::vector<FileMetaData*>* inputs,
|
2015-10-13 23:24:45 +02:00
|
|
|
unsigned int index) const; // start extending from this index
|
2012-11-05 08:47:06 +01:00
|
|
|
|
2011-06-22 04:36:45 +02:00
|
|
|
// Returns true iff some file in the specified level overlaps
|
2011-10-06 01:30:28 +02:00
|
|
|
// some part of [*smallest_user_key,*largest_user_key].
|
|
|
|
// smallest_user_key==NULL represents a key smaller than all keys in the DB.
|
|
|
|
// largest_user_key==NULL represents a key largest than all keys in the DB.
|
2014-10-27 23:49:46 +01:00
|
|
|
bool OverlapInLevel(int level, const Slice* smallest_user_key,
|
2011-10-06 01:30:28 +02:00
|
|
|
const Slice* largest_user_key);
|
|
|
|
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
// Returns true iff the first or last file in inputs contains
|
|
|
|
// an overlapping user key to the file "just outside" of it (i.e.
|
|
|
|
// just after the last file, or just before the first file)
|
|
|
|
// REQUIRES: "*inputs" is a sorted list of non-overlapping files
|
|
|
|
bool HasOverlappingUserKey(const std::vector<FileMetaData*>* inputs,
|
|
|
|
int level);
|
|
|
|
|
2014-11-04 02:45:55 +01:00
|
|
|
int num_levels() const { return num_levels_; }
|
2014-01-16 01:15:43 +01:00
|
|
|
|
2014-10-28 18:03:13 +01:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
2014-11-04 02:45:55 +01:00
|
|
|
int num_non_empty_levels() const {
|
2014-10-28 18:03:13 +01:00
|
|
|
assert(finalized_);
|
|
|
|
return num_non_empty_levels_;
|
|
|
|
}
|
|
|
|
|
2015-03-30 23:04:21 +02:00
|
|
|
// REQUIRES: This version has been finalized.
|
|
|
|
// (CalculateBaseBytes() is called)
|
|
|
|
// This may or may not return number of level files. It is to keep backward
|
|
|
|
// compatible behavior in universal compaction.
|
|
|
|
int l0_delay_trigger_count() const { return l0_delay_trigger_count_; }
|
|
|
|
|
|
|
|
void set_l0_delay_trigger_count(int v) { l0_delay_trigger_count_ = v; }
|
|
|
|
|
2014-10-28 17:59:56 +01:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
int NumLevelFiles(int level) const {
|
|
|
|
assert(finalized_);
|
2014-11-11 22:47:22 +01:00
|
|
|
return static_cast<int>(files_[level].size());
|
2014-10-28 17:59:56 +01:00
|
|
|
}
|
2011-06-22 04:36:45 +02:00
|
|
|
|
2014-01-16 01:18:04 +01:00
|
|
|
// Return the combined file size of all files at the specified level.
|
2014-10-02 01:19:16 +02:00
|
|
|
uint64_t NumLevelBytes(int level) const;
|
2014-01-16 01:18:04 +01:00
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
const std::vector<FileMetaData*>& LevelFiles(int level) const {
|
|
|
|
return files_[level];
|
|
|
|
}
|
|
|
|
|
|
|
|
const rocksdb::LevelFilesBrief& LevelFilesBrief(int level) const {
|
2014-11-12 23:19:33 +01:00
|
|
|
assert(level < static_cast<int>(level_files_brief_.size()));
|
2014-10-27 23:49:46 +01:00
|
|
|
return level_files_brief_[level];
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
2015-09-22 02:16:31 +02:00
|
|
|
const std::vector<int>& FilesByCompactionPri(int level) const {
|
2014-10-27 23:49:46 +01:00
|
|
|
assert(finalized_);
|
2015-09-22 02:16:31 +02:00
|
|
|
return files_by_compaction_pri_[level];
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
|
|
|
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
// REQUIRES: DB mutex held during access
|
|
|
|
const autovector<std::pair<int, FileMetaData*>>& FilesMarkedForCompaction()
|
|
|
|
const {
|
|
|
|
assert(finalized_);
|
|
|
|
return files_marked_for_compaction_;
|
|
|
|
}
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int base_level() const { return base_level_; }
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
// REQUIRES: lock is held
|
2015-09-22 02:16:31 +02:00
|
|
|
// Set the index that is used to offset into files_by_compaction_pri_ to find
|
2014-10-27 23:49:46 +01:00
|
|
|
// the next compaction candidate file.
|
|
|
|
void SetNextCompactionIndex(int level, int index) {
|
|
|
|
next_file_to_compact_by_size_[level] = index;
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: lock is held
|
|
|
|
int NextCompactionIndex(int level) const {
|
|
|
|
return next_file_to_compact_by_size_[level];
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
2014-11-04 02:45:55 +01:00
|
|
|
const FileIndexer& file_indexer() const {
|
2014-10-27 23:49:46 +01:00
|
|
|
assert(finalized_);
|
|
|
|
return file_indexer_;
|
|
|
|
}
|
|
|
|
|
2015-09-22 02:16:31 +02:00
|
|
|
// Only the first few entries of files_by_compaction_pri_ are sorted.
|
2014-10-27 23:49:46 +01:00
|
|
|
// There is no need to sort all the files because it is likely
|
|
|
|
// that on a running system, we need to look at only the first
|
|
|
|
// few largest files because a new version is created every few
|
|
|
|
// seconds/minutes (because of concurrent compactions).
|
|
|
|
static const size_t kNumberFilesToSort = 50;
|
|
|
|
|
2014-01-16 01:18:04 +01:00
|
|
|
// Return a human-readable short (single-line) summary of the number
|
|
|
|
// of files per level. Uses *scratch as backing store.
|
|
|
|
struct LevelSummaryStorage {
|
2014-09-23 22:43:03 +02:00
|
|
|
char buffer[1000];
|
2014-01-16 01:18:04 +01:00
|
|
|
};
|
|
|
|
struct FileSummaryStorage {
|
2014-09-23 22:43:03 +02:00
|
|
|
char buffer[3000];
|
2014-01-16 01:18:04 +01:00
|
|
|
};
|
|
|
|
const char* LevelSummary(LevelSummaryStorage* scratch) const;
|
|
|
|
// Return a human-readable short (single-line) summary of files
|
|
|
|
// in a specified level. Uses *scratch as backing store.
|
|
|
|
const char* LevelFileSummary(FileSummaryStorage* scratch, int level) const;
|
|
|
|
|
|
|
|
// Return the maximum overlapping data (in bytes) at next level for any
|
|
|
|
// file at a level >= 1.
|
|
|
|
int64_t MaxNextLevelOverlappingBytes();
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return a human readable string that describes this version's contents.
|
2012-12-16 03:28:36 +01:00
|
|
|
std::string DebugString(bool hex = false) const;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-06-25 00:37:06 +02:00
|
|
|
uint64_t GetAverageValueSize() const {
|
2014-10-17 23:58:30 +02:00
|
|
|
if (accumulated_num_non_deletions_ == 0) {
|
2014-06-25 00:37:06 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2014-10-17 23:58:30 +02:00
|
|
|
assert(accumulated_raw_key_size_ + accumulated_raw_value_size_ > 0);
|
|
|
|
assert(accumulated_file_size_ > 0);
|
2014-10-27 23:49:46 +01:00
|
|
|
return accumulated_raw_value_size_ / accumulated_num_non_deletions_ *
|
2014-10-17 23:58:30 +02:00
|
|
|
accumulated_file_size_ /
|
|
|
|
(accumulated_raw_key_size_ + accumulated_raw_value_size_);
|
2014-06-25 00:37:06 +02:00
|
|
|
}
|
|
|
|
|
2014-10-31 16:48:19 +01:00
|
|
|
uint64_t GetEstimatedActiveKeys() const;
|
2014-10-27 23:49:46 +01:00
|
|
|
|
2016-04-21 03:46:54 +02:00
|
|
|
double GetEstimatedCompressionRatioAtLevel(int level) const;
|
|
|
|
|
2015-09-22 02:16:31 +02:00
|
|
|
// re-initializes the index that is used to offset into
|
|
|
|
// files_by_compaction_pri_
|
2014-10-27 23:49:46 +01:00
|
|
|
// to find the next compaction candidate file.
|
|
|
|
void ResetNextCompactionIndex(int level) {
|
|
|
|
next_file_to_compact_by_size_[level] = 0;
|
|
|
|
}
|
|
|
|
|
2014-10-31 16:48:19 +01:00
|
|
|
const InternalKeyComparator* InternalComparator() {
|
|
|
|
return internal_comparator_;
|
|
|
|
}
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
// Returns maximum total bytes of data on a given level.
|
|
|
|
uint64_t MaxBytesForLevel(int level) const;
|
|
|
|
|
|
|
|
// Must be called after any change to MutableCFOptions.
|
|
|
|
void CalculateBaseBytes(const ImmutableCFOptions& ioptions,
|
|
|
|
const MutableCFOptions& options);
|
|
|
|
|
2015-07-22 06:33:20 +02:00
|
|
|
// Returns an estimate of the amount of live data in bytes.
|
|
|
|
uint64_t EstimateLiveDataSize() const;
|
|
|
|
|
2015-08-14 06:42:20 +02:00
|
|
|
uint64_t estimated_compaction_needed_bytes() const {
|
|
|
|
return estimated_compaction_needed_bytes_;
|
|
|
|
}
|
|
|
|
|
When slowdown is triggered, reduce the write rate
Summary: It's usually hard for users to set a value of options.delayed_write_rate. With this diff, after slowdown condition triggers, we greedily reduce write rate if estimated pending compaction bytes increase. If estimated compaction pending bytes drop, we increase the write rate.
Test Plan:
Add a unit test
Test with db_bench setting:
TEST_TMPDIR=/dev/shm/ ./db_bench --benchmarks=fillrandom -num=10000000 --soft_pending_compaction_bytes_limit=1000000000 --hard_pending_compaction_bytes_limit=3000000000 --delayed_write_rate=100000000
and make sure without the commit, write stop will happen, but with the commit, it will not happen.
Reviewers: igor, anthony, rven, yhchiang, kradhakrishnan, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D52131
2015-12-18 02:07:44 +01:00
|
|
|
void TEST_set_estimated_compaction_needed_bytes(uint64_t v) {
|
|
|
|
estimated_compaction_needed_bytes_ = v;
|
|
|
|
}
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
private:
|
|
|
|
const InternalKeyComparator* internal_comparator_;
|
|
|
|
const Comparator* user_comparator_;
|
|
|
|
int num_levels_; // Number of levels
|
|
|
|
int num_non_empty_levels_; // Number of levels. Any level larger than it
|
|
|
|
// is guaranteed to be empty.
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
// Per-level max bytes
|
|
|
|
std::vector<uint64_t> level_max_bytes_;
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
// A short brief metadata of files per level
|
|
|
|
autovector<rocksdb::LevelFilesBrief> level_files_brief_;
|
|
|
|
FileIndexer file_indexer_;
|
|
|
|
Arena arena_; // Used to allocate space for file_levels_
|
|
|
|
|
|
|
|
CompactionStyle compaction_style_;
|
|
|
|
|
|
|
|
// List of files per level, files in each level are arranged
|
|
|
|
// in increasing order of keys
|
|
|
|
std::vector<FileMetaData*>* files_;
|
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
// Level that L0 data should be compacted to. All levels < base_level_ should
|
2015-03-30 23:04:21 +02:00
|
|
|
// be empty. -1 if it is not level-compaction so it's not applicable.
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int base_level_;
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
// A list for the same set of files that are stored in files_,
|
|
|
|
// but files in each level are now sorted based on file
|
|
|
|
// size. The file with the largest size is at the front.
|
|
|
|
// This vector stores the index of the file from files_.
|
2015-09-22 02:16:31 +02:00
|
|
|
std::vector<std::vector<int>> files_by_compaction_pri_;
|
2014-10-27 23:49:46 +01:00
|
|
|
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
2015-06-05 01:51:25 +02:00
|
|
|
// If true, means that files in L0 have keys with non overlapping ranges
|
|
|
|
bool level0_non_overlapping_;
|
|
|
|
|
2015-09-22 02:16:31 +02:00
|
|
|
// An index into files_by_compaction_pri_ that specifies the first
|
2014-10-27 23:49:46 +01:00
|
|
|
// file that is not yet compacted
|
|
|
|
std::vector<int> next_file_to_compact_by_size_;
|
|
|
|
|
2015-09-22 02:16:31 +02:00
|
|
|
// Only the first few entries of files_by_compaction_pri_ are sorted.
|
2014-10-27 23:49:46 +01:00
|
|
|
// There is no need to sort all the files because it is likely
|
|
|
|
// that on a running system, we need to look at only the first
|
|
|
|
// few largest files because a new version is created every few
|
|
|
|
// seconds/minutes (because of concurrent compactions).
|
|
|
|
static const size_t number_of_files_to_sort_ = 50;
|
|
|
|
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
// This vector contains list of files marked for compaction and also not
|
|
|
|
// currently being compacted. It is protected by DB mutex. It is calculated in
|
|
|
|
// ComputeCompactionScore()
|
|
|
|
autovector<std::pair<int, FileMetaData*>> files_marked_for_compaction_;
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
// Level that should be compacted next and its compaction score.
|
|
|
|
// Score < 1 means compaction is not strictly needed. These fields
|
|
|
|
// are initialized by Finalize().
|
|
|
|
// The most critical level to be compacted is listed first
|
|
|
|
// These are used to pick the best compaction level
|
|
|
|
std::vector<double> compaction_score_;
|
|
|
|
std::vector<int> compaction_level_;
|
2015-03-30 23:04:21 +02:00
|
|
|
int l0_delay_trigger_count_ = 0; // Count used to trigger slow down and stop
|
|
|
|
// for number of L0 files.
|
2014-10-27 23:49:46 +01:00
|
|
|
|
|
|
|
// the following are the sampled temporary stats.
|
|
|
|
// the current accumulated size of sampled files.
|
|
|
|
uint64_t accumulated_file_size_;
|
|
|
|
// the current accumulated size of all raw keys based on the sampled files.
|
|
|
|
uint64_t accumulated_raw_key_size_;
|
|
|
|
// the current accumulated size of all raw keys based on the sampled files.
|
|
|
|
uint64_t accumulated_raw_value_size_;
|
|
|
|
// total number of non-deletion entries
|
|
|
|
uint64_t accumulated_num_non_deletions_;
|
|
|
|
// total number of deletion entries
|
|
|
|
uint64_t accumulated_num_deletions_;
|
2015-12-07 19:51:08 +01:00
|
|
|
// current number of non_deletion entries
|
|
|
|
uint64_t current_num_non_deletions_;
|
|
|
|
// current number of delection entries
|
|
|
|
uint64_t current_num_deletions_;
|
|
|
|
// current number of file samples
|
|
|
|
uint64_t current_num_samples_;
|
2015-08-14 06:42:20 +02:00
|
|
|
// Estimated bytes needed to be compacted until all levels' size is down to
|
|
|
|
// target sizes.
|
|
|
|
uint64_t estimated_compaction_needed_bytes_;
|
2014-10-27 23:49:46 +01:00
|
|
|
|
|
|
|
bool finalized_;
|
|
|
|
|
|
|
|
friend class Version;
|
|
|
|
friend class VersionSet;
|
|
|
|
// No copying allowed
|
|
|
|
VersionStorageInfo(const VersionStorageInfo&) = delete;
|
|
|
|
void operator=(const VersionStorageInfo&) = delete;
|
|
|
|
};
|
|
|
|
|
|
|
|
class Version {
|
|
|
|
public:
|
|
|
|
// Append to *iters a sequence of iterators that will
|
|
|
|
// yield the contents of this Version when merged together.
|
|
|
|
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
|
|
|
void AddIterators(const ReadOptions&, const EnvOptions& soptions,
|
|
|
|
MergeIteratorBuilder* merger_iter_builder);
|
|
|
|
|
|
|
|
// Lookup the value for key. If found, store it in *val and
|
|
|
|
// return OK. Else return a non-OK status.
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
2015-10-16 01:37:15 +02:00
|
|
|
// Uses *operands to store merge_operator operations to apply later.
|
|
|
|
//
|
|
|
|
// If the ReadOptions.read_tier is set to do a read-only fetch, then
|
|
|
|
// *value_found will be set to false if it cannot be determined whether
|
|
|
|
// this value exists without doing IO.
|
|
|
|
//
|
|
|
|
// If the key is Deleted, *status will be set to NotFound and
|
|
|
|
// *key_exists will be set to true.
|
|
|
|
// If no key was found, *status will be set to NotFound and
|
|
|
|
// *key_exists will be set to false.
|
|
|
|
// If seq is non-null, *seq will be set to the sequence number found
|
|
|
|
// for the key if a key was found.
|
|
|
|
//
|
2014-10-27 23:49:46 +01:00
|
|
|
// REQUIRES: lock is not held
|
|
|
|
void Get(const ReadOptions&, const LookupKey& key, std::string* val,
|
|
|
|
Status* status, MergeContext* merge_context,
|
Use SST files for Transaction conflict detection
Summary:
Currently, transactions can fail even if there is no actual write conflict. This is due to relying on only the memtables to check for write-conflicts. Users have to tune memtable settings to try to avoid this, but it's hard to figure out exactly how to tune these settings.
With this diff, TransactionDB will use both memtables and SST files to determine if there are any write conflicts. This relies on the fact that BlockBasedTable stores sequence numbers for all writes that happen after any open snapshot. Also, D50295 is needed to prevent SingleDelete from disappearing writes (the TODOs in this test code will be fixed once the other diff is approved and merged).
Note that Optimistic transactions will still rely on tuning memtable settings as we do not want to read from SST while on the write thread. Also, memtable settings can still be used to reduce how often TransactionDB needs to read SST files.
Test Plan: unit tests, db bench
Reviewers: rven, yhchiang, kradhakrishnan, IslamAbdelRahman, sdong
Reviewed By: sdong
Subscribers: dhruba, leveldb, yoshinorim
Differential Revision: https://reviews.facebook.net/D50475
2015-10-16 01:37:15 +02:00
|
|
|
bool* value_found = nullptr, bool* key_exists = nullptr,
|
|
|
|
SequenceNumber* seq = nullptr);
|
2014-10-27 23:49:46 +01:00
|
|
|
|
2015-02-05 01:04:51 +01:00
|
|
|
// Loads some stats information from files. Call without mutex held. It needs
|
|
|
|
// to be called before applying the version to the version set.
|
2015-08-04 22:48:16 +02:00
|
|
|
void PrepareApply(const MutableCFOptions& mutable_cf_options,
|
|
|
|
bool update_stats);
|
2014-10-27 23:49:46 +01:00
|
|
|
|
|
|
|
// Reference count management (so Versions do not disappear out from
|
|
|
|
// under live iterators)
|
|
|
|
void Ref();
|
|
|
|
// Decrease reference count. Delete the object if no reference left
|
|
|
|
// and return true. Otherwise, return false.
|
|
|
|
bool Unref();
|
|
|
|
|
|
|
|
// Add all files listed in the current version to *live.
|
|
|
|
void AddLiveFiles(std::vector<FileDescriptor>* live);
|
|
|
|
|
|
|
|
// Return a human readable string that describes this version's contents.
|
|
|
|
std::string DebugString(bool hex = false) const;
|
|
|
|
|
|
|
|
// Returns the version nuber of this version
|
|
|
|
uint64_t GetVersionNumber() const { return version_number_; }
|
|
|
|
|
2014-06-25 00:37:06 +02:00
|
|
|
// REQUIRES: lock is held
|
|
|
|
// On success, "tp" will contains the table properties of the file
|
|
|
|
// specified in "file_meta". If the file name of "file_meta" is
|
|
|
|
// known ahread, passing it by a non-null "fname" can save a
|
|
|
|
// file-name conversion.
|
|
|
|
Status GetTableProperties(std::shared_ptr<const TableProperties>* tp,
|
|
|
|
const FileMetaData* file_meta,
|
2015-10-13 23:24:45 +02:00
|
|
|
const std::string* fname = nullptr) const;
|
2014-06-25 00:37:06 +02:00
|
|
|
|
2014-02-14 01:28:21 +01:00
|
|
|
// REQUIRES: lock is held
|
|
|
|
// On success, *props will be populated with all SSTables' table properties.
|
|
|
|
// The keys of `props` are the sst file name, the values of `props` are the
|
|
|
|
// tables' propertis, represented as shared_ptr.
|
|
|
|
Status GetPropertiesOfAllTables(TablePropertiesCollection* props);
|
2015-08-25 21:03:54 +02:00
|
|
|
Status GetPropertiesOfAllTables(TablePropertiesCollection* props, int level);
|
2015-10-19 19:34:55 +02:00
|
|
|
Status GetPropertiesOfTablesInRange(const Range* range, std::size_t n,
|
2015-10-13 23:24:45 +02:00
|
|
|
TablePropertiesCollection* props) const;
|
2015-08-25 21:03:54 +02:00
|
|
|
|
|
|
|
// REQUIRES: lock is held
|
|
|
|
// On success, "tp" will contains the aggregated table property amoug
|
|
|
|
// the table properties of all sst files in this version.
|
|
|
|
Status GetAggregatedTableProperties(
|
|
|
|
std::shared_ptr<const TableProperties>* tp, int level = -1);
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
uint64_t GetEstimatedActiveKeys() {
|
2014-10-31 16:48:19 +01:00
|
|
|
return storage_info_.GetEstimatedActiveKeys();
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
2014-07-28 23:50:16 +02:00
|
|
|
|
2014-08-05 20:27:34 +02:00
|
|
|
size_t GetMemoryUsageByTableReaders();
|
|
|
|
|
2014-10-28 17:59:56 +01:00
|
|
|
ColumnFamilyData* cfd() const { return cfd_; }
|
|
|
|
|
2014-10-28 18:04:38 +01:00
|
|
|
// Return the next Version in the linked list. Used for debug only
|
|
|
|
Version* TEST_Next() const {
|
|
|
|
return next_;
|
|
|
|
}
|
|
|
|
|
2014-10-31 16:48:19 +01:00
|
|
|
VersionStorageInfo* storage_info() { return &storage_info_; }
|
|
|
|
|
|
|
|
VersionSet* version_set() { return vset_; }
|
2014-10-28 18:08:41 +01:00
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 23:45:18 +01:00
|
|
|
void GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2015-03-03 19:59:36 +01:00
|
|
|
Env* env_;
|
2011-03-18 23:37:00 +01:00
|
|
|
friend class VersionSet;
|
2014-10-27 23:49:46 +01:00
|
|
|
|
2014-10-31 16:48:19 +01:00
|
|
|
const InternalKeyComparator* internal_comparator() const {
|
|
|
|
return storage_info_.internal_comparator_;
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
2014-10-31 16:48:19 +01:00
|
|
|
const Comparator* user_comparator() const {
|
|
|
|
return storage_info_.user_comparator_;
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
bool PrefixMayMatch(const ReadOptions& read_options,
|
|
|
|
InternalIterator* level_iter,
|
2014-04-25 21:22:23 +02:00
|
|
|
const Slice& internal_prefix) const;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
// Returns true if the filter blocks in the specified level will not be
|
|
|
|
// checked during read operations. In certain cases (trivial move or preload),
|
|
|
|
// the filter block may already be cached, but we still do not access it such
|
|
|
|
// that it eventually expires from the cache.
|
2016-02-01 23:58:46 +01:00
|
|
|
bool IsFilterSkipped(int level, bool is_file_last_in_level = false);
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
|
2014-10-17 23:58:30 +02:00
|
|
|
// The helper function of UpdateAccumulatedStats, which may fill the missing
|
2014-06-25 00:37:06 +02:00
|
|
|
// fields of file_mata from its associated TableProperties.
|
|
|
|
// Returns true if it does initialize FileMetaData.
|
|
|
|
bool MaybeInitializeFileMetaData(FileMetaData* file_meta);
|
|
|
|
|
2014-10-17 23:58:30 +02:00
|
|
|
// Update the accumulated stats associated with the current version.
|
|
|
|
// This accumulated stats will be used in compaction.
|
2015-08-04 22:48:16 +02:00
|
|
|
void UpdateAccumulatedStats(bool update_stats);
|
2014-06-25 00:37:06 +02:00
|
|
|
|
2014-01-16 01:23:36 +01:00
|
|
|
// Sort all files for this version based on their file size and
|
2015-09-22 02:16:31 +02:00
|
|
|
// record results in files_by_compaction_pri_. The largest files are listed
|
|
|
|
// first.
|
|
|
|
void UpdateFilesByCompactionPri();
|
2014-01-16 01:23:36 +01:00
|
|
|
|
2014-02-01 00:30:27 +01:00
|
|
|
ColumnFamilyData* cfd_; // ColumnFamilyData to which this Version belongs
|
2014-10-27 23:49:46 +01:00
|
|
|
Logger* info_log_;
|
|
|
|
Statistics* db_statistics_;
|
2014-04-17 23:07:05 +02:00
|
|
|
TableCache* table_cache_;
|
|
|
|
const MergeOperator* merge_operator_;
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
|
2014-10-31 16:48:19 +01:00
|
|
|
VersionStorageInfo storage_info_;
|
2011-03-18 23:37:00 +01:00
|
|
|
VersionSet* vset_; // VersionSet to which this Version belongs
|
|
|
|
Version* next_; // Next version in linked list
|
2011-05-21 04:17:43 +02:00
|
|
|
Version* prev_; // Previous version in linked list
|
2011-03-18 23:37:00 +01:00
|
|
|
int refs_; // Number of live refs to this version
|
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// A version number that uniquely represents this version. This is
|
|
|
|
// used for debugging and logging purposes only.
|
|
|
|
uint64_t version_number_;
|
|
|
|
|
2014-02-01 00:30:27 +01:00
|
|
|
Version(ColumnFamilyData* cfd, VersionSet* vset, uint64_t version_number = 0);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-10-28 17:59:56 +01:00
|
|
|
~Version();
|
2012-11-01 06:01:57 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// No copying allowed
|
|
|
|
Version(const Version&);
|
|
|
|
void operator=(const Version&);
|
|
|
|
};
|
|
|
|
|
|
|
|
class VersionSet {
|
|
|
|
public:
|
2014-09-09 00:25:01 +02:00
|
|
|
VersionSet(const std::string& dbname, const DBOptions* db_options,
|
|
|
|
const EnvOptions& env_options, Cache* table_cache,
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager* write_buffer_manager,
|
|
|
|
WriteController* write_controller);
|
2011-03-18 23:37:00 +01:00
|
|
|
~VersionSet();
|
|
|
|
|
|
|
|
// Apply *edit to the current version to form a new descriptor that
|
|
|
|
// is both saved to persistent state and installed as the new
|
2011-09-01 21:08:02 +02:00
|
|
|
// current version. Will release *mu while actually writing to the file.
|
2014-02-28 23:05:11 +01:00
|
|
|
// column_family_options has to be set if edit is column family add
|
2011-09-01 21:08:02 +02:00
|
|
|
// REQUIRES: *mu is held on entry.
|
|
|
|
// REQUIRES: no other thread concurrently calls LogAndApply()
|
2014-10-27 23:49:46 +01:00
|
|
|
Status LogAndApply(
|
|
|
|
ColumnFamilyData* column_family_data,
|
|
|
|
const MutableCFOptions& mutable_cf_options, VersionEdit* edit,
|
2015-02-05 06:39:45 +01:00
|
|
|
InstrumentedMutex* mu, Directory* db_directory = nullptr,
|
2014-10-27 23:49:46 +01:00
|
|
|
bool new_descriptor_log = false,
|
2016-07-06 03:09:59 +02:00
|
|
|
const ColumnFamilyOptions* column_family_options = nullptr) {
|
|
|
|
autovector<VersionEdit*> edit_list;
|
|
|
|
edit_list.push_back(edit);
|
|
|
|
return LogAndApply(column_family_data, mutable_cf_options, edit_list, mu,
|
|
|
|
db_directory, new_descriptor_log, column_family_options);
|
|
|
|
}
|
|
|
|
// The batch version. If edit_list.size() > 1, caller must ensure that
|
|
|
|
// no edit in the list column family add or drop
|
|
|
|
Status LogAndApply(
|
|
|
|
ColumnFamilyData* column_family_data,
|
|
|
|
const MutableCFOptions& mutable_cf_options,
|
|
|
|
const autovector<VersionEdit*>& edit_list, InstrumentedMutex* mu,
|
|
|
|
Directory* db_directory = nullptr, bool new_descriptor_log = false,
|
2014-10-27 23:49:46 +01:00
|
|
|
const ColumnFamilyOptions* column_family_options = nullptr);
|
2014-01-11 00:12:34 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Recover the last saved descriptor from persistent storage.
|
2014-04-09 18:56:17 +02:00
|
|
|
// If read_only == true, Recover() will not complain if some column families
|
|
|
|
// are not opened
|
|
|
|
Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
|
|
|
|
bool read_only = false);
|
2014-01-22 20:44:53 +01:00
|
|
|
|
|
|
|
// Reads a manifest file and returns a list of column families in
|
|
|
|
// column_families.
|
|
|
|
static Status ListColumnFamilies(std::vector<std::string>* column_families,
|
|
|
|
const std::string& dbname, Env* env);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-04-15 22:39:26 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2012-10-31 19:47:18 +01:00
|
|
|
// Try to reduce the number of levels. This call is valid when
|
|
|
|
// only one level from the new max level to the old
|
|
|
|
// max level containing files.
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 23:57:04 +01:00
|
|
|
// The call is static, since number of levels is immutable during
|
|
|
|
// the lifetime of a RocksDB instance. It reduces number of levels
|
|
|
|
// in a DB by applying changes to manifest.
|
2012-10-31 19:47:18 +01:00
|
|
|
// For example, a db currently has 7 levels [0-6], and a call to
|
|
|
|
// to reduce to 5 [0-4] can only be executed when only one level
|
|
|
|
// among [4-6] contains files.
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 23:57:04 +01:00
|
|
|
static Status ReduceNumberOfLevels(const std::string& dbname,
|
|
|
|
const Options* options,
|
2014-09-09 00:25:01 +02:00
|
|
|
const EnvOptions& env_options,
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 23:57:04 +01:00
|
|
|
int new_levels);
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2014-04-15 22:39:26 +02:00
|
|
|
// printf contents (for debugging)
|
|
|
|
Status DumpManifest(Options& options, std::string& manifestFileName,
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
bool verbose, bool hex = false, bool json = false);
|
2014-04-15 22:39:26 +02:00
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return the current manifest file number
|
2014-11-04 02:45:55 +01:00
|
|
|
uint64_t manifest_file_number() const { return manifest_file_number_; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2016-06-10 04:03:10 +02:00
|
|
|
uint64_t options_file_number() const { return options_file_number_; }
|
|
|
|
|
2014-11-04 02:45:55 +01:00
|
|
|
uint64_t pending_manifest_file_number() const {
|
2014-03-18 05:50:15 +01:00
|
|
|
return pending_manifest_file_number_;
|
|
|
|
}
|
|
|
|
|
2014-11-08 00:44:12 +01:00
|
|
|
uint64_t current_next_file_number() const { return next_file_number_.load(); }
|
2014-11-07 20:50:34 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Allocate and return a new file number
|
2014-11-11 15:58:47 +01:00
|
|
|
uint64_t NewFileNumber() { return next_file_number_.fetch_add(1); }
|
2012-08-27 08:45:35 +02:00
|
|
|
|
2011-04-12 21:38:58 +02:00
|
|
|
// Return the last sequence number.
|
2013-12-20 18:57:58 +01:00
|
|
|
uint64_t LastSequence() const {
|
|
|
|
return last_sequence_.load(std::memory_order_acquire);
|
|
|
|
}
|
2011-04-12 21:38:58 +02:00
|
|
|
|
|
|
|
// Set the last sequence number to s.
|
|
|
|
void SetLastSequence(uint64_t s) {
|
|
|
|
assert(s >= last_sequence_);
|
2013-12-20 18:57:58 +01:00
|
|
|
last_sequence_.store(s, std::memory_order_release);
|
2011-04-12 21:38:58 +02:00
|
|
|
}
|
|
|
|
|
2011-09-01 21:08:02 +02:00
|
|
|
// Mark the specified file number as used.
|
2014-11-08 00:44:12 +01:00
|
|
|
// REQUIRED: this is only called during single-threaded recovery
|
|
|
|
void MarkFileNumberUsedDuringRecovery(uint64_t number);
|
2011-09-01 21:08:02 +02:00
|
|
|
|
2011-04-12 21:38:58 +02:00
|
|
|
// Return the log file number for the log file that is currently
|
|
|
|
// being compacted, or zero if there is no such log file.
|
2014-11-04 02:45:55 +01:00
|
|
|
uint64_t prev_log_number() const { return prev_log_number_; }
|
2011-04-12 21:38:58 +02:00
|
|
|
|
2014-01-28 20:05:04 +01:00
|
|
|
// Returns the minimum log number such that all
|
|
|
|
// log numbers less than or equal to it can be deleted
|
|
|
|
uint64_t MinLogNumber() const {
|
2014-02-25 19:38:04 +01:00
|
|
|
uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
|
2014-01-28 20:05:04 +01:00
|
|
|
for (auto cfd : *column_family_set_) {
|
2015-07-02 23:27:00 +02:00
|
|
|
// It's safe to ignore dropped column families here:
|
|
|
|
// cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
|
|
|
|
if (min_log_num > cfd->GetLogNumber() && !cfd->IsDropped()) {
|
2014-01-29 22:28:50 +01:00
|
|
|
min_log_num = cfd->GetLogNumber();
|
2014-01-28 20:05:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return min_log_num;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Create an iterator that reads over the compaction inputs for "*c".
|
|
|
|
// The caller should delete the iterator when no longer needed.
|
2016-03-25 03:36:39 +01:00
|
|
|
InternalIterator* MakeInputIterator(const Compaction* c);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Add all files listed in any live version to *live.
|
2014-07-02 18:54:20 +02:00
|
|
|
void AddLiveFiles(std::vector<FileDescriptor>* live_list);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-04-30 00:36:21 +02:00
|
|
|
// Return the approximate size of data to be scanned for range [start, end)
|
2015-09-10 22:50:00 +02:00
|
|
|
// in levels [start_level, end_level). If end_level == 0 it will search
|
|
|
|
// through all non-empty levels
|
|
|
|
uint64_t ApproximateSize(Version* v, const Slice& start, const Slice& end,
|
|
|
|
int start_level = 0, int end_level = -1);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-09-24 23:01:01 +02:00
|
|
|
// Return the size of the current manifest file
|
2014-11-04 02:45:55 +01:00
|
|
|
uint64_t manifest_file_size() const { return manifest_file_size_; }
|
2012-10-19 23:00:53 +02:00
|
|
|
|
|
|
|
// verify that the files that we started with for a compaction
|
|
|
|
// still exist in the current version and in the same original level.
|
|
|
|
// This ensures that a concurrent compaction did not erroneously
|
|
|
|
// pick the same files to compact.
|
|
|
|
bool VerifyCompactionFileConsistency(Compaction* c);
|
|
|
|
|
2014-01-27 23:33:50 +01:00
|
|
|
Status GetMetadataForFile(uint64_t number, int* filelevel,
|
2014-02-07 00:42:16 +01:00
|
|
|
FileMetaData** metadata, ColumnFamilyData** cfd);
|
2013-08-22 23:32:53 +02:00
|
|
|
|
2015-10-07 02:46:22 +02:00
|
|
|
// This function doesn't support leveldb SST filenames
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 23:45:18 +01:00
|
|
|
void GetLiveFilesMetaData(std::vector<LiveFileMetaData> *metadata);
|
2013-08-22 23:32:53 +02:00
|
|
|
|
2015-02-10 02:38:32 +01:00
|
|
|
void GetObsoleteFiles(std::vector<FileMetaData*>* files,
|
2016-03-11 03:16:21 +01:00
|
|
|
std::vector<std::string>* manifest_filenames,
|
2015-02-10 02:38:32 +01:00
|
|
|
uint64_t min_pending_output);
|
2013-11-09 00:23:46 +01:00
|
|
|
|
2014-01-22 20:44:53 +01:00
|
|
|
ColumnFamilySet* GetColumnFamilySet() { return column_family_set_.get(); }
|
2014-11-04 02:45:55 +01:00
|
|
|
const EnvOptions& env_options() { return env_options_; }
|
2014-01-02 18:08:12 +01:00
|
|
|
|
2015-02-12 02:10:43 +01:00
|
|
|
static uint64_t GetNumLiveVersions(Version* dummy_versions);
|
|
|
|
|
2015-08-20 20:47:19 +02:00
|
|
|
static uint64_t GetTotalSstFilesSize(Version* dummy_versions);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2012-10-19 23:00:53 +02:00
|
|
|
struct ManifestWriter;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
friend class Version;
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 23:45:18 +01:00
|
|
|
friend class DBImpl;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-01-22 20:44:53 +01:00
|
|
|
struct LogReporter : public log::Reader::Reporter {
|
|
|
|
Status* status;
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Corruption(size_t bytes, const Status& s) override {
|
2014-01-22 20:44:53 +01:00
|
|
|
if (this->status->ok()) *this->status = s;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-04-30 00:36:21 +02:00
|
|
|
// ApproximateSize helper
|
|
|
|
uint64_t ApproximateSizeLevel0(Version* v, const LevelFilesBrief& files_brief,
|
|
|
|
const Slice& start, const Slice& end);
|
|
|
|
|
|
|
|
uint64_t ApproximateSize(Version* v, const FdWithKeyRange& f,
|
|
|
|
const Slice& key);
|
|
|
|
|
2011-09-01 21:08:02 +02:00
|
|
|
// Save current contents to *log
|
|
|
|
Status WriteSnapshot(log::Writer* log);
|
|
|
|
|
2014-01-11 00:12:34 +01:00
|
|
|
void AppendVersion(ColumnFamilyData* column_family_data, Version* v);
|
2011-05-21 04:17:43 +02:00
|
|
|
|
2014-09-09 00:25:01 +02:00
|
|
|
ColumnFamilyData* CreateColumnFamily(const ColumnFamilyOptions& cf_options,
|
2014-02-28 23:05:11 +01:00
|
|
|
VersionEdit* edit);
|
|
|
|
|
2014-01-22 20:44:53 +01:00
|
|
|
std::unique_ptr<ColumnFamilySet> column_family_set_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Env* const env_;
|
|
|
|
const std::string dbname_;
|
2014-09-09 00:25:01 +02:00
|
|
|
const DBOptions* const db_options_;
|
2014-11-08 00:44:12 +01:00
|
|
|
std::atomic<uint64_t> next_file_number_;
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t manifest_file_number_;
|
2016-06-10 04:03:10 +02:00
|
|
|
uint64_t options_file_number_;
|
2014-03-18 05:50:15 +01:00
|
|
|
uint64_t pending_manifest_file_number_;
|
2013-12-20 18:57:58 +01:00
|
|
|
std::atomic<uint64_t> last_sequence_;
|
2011-04-12 21:38:58 +02:00
|
|
|
uint64_t prev_log_number_; // 0 or backing store for memtable being compacted
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Opened lazily
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<log::Writer> descriptor_log_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// generates a increasing version number for every new version
|
|
|
|
uint64_t current_version_number_;
|
|
|
|
|
|
|
|
// Queue of writers to the manifest file
|
|
|
|
std::deque<ManifestWriter*> manifest_writers_;
|
|
|
|
|
2014-01-16 01:15:43 +01:00
|
|
|
// Current size of manifest file
|
2014-01-11 00:12:34 +01:00
|
|
|
uint64_t manifest_file_size_;
|
2013-01-11 02:18:50 +01:00
|
|
|
|
2013-11-09 00:23:46 +01:00
|
|
|
std::vector<FileMetaData*> obsolete_files_;
|
2016-03-11 03:16:21 +01:00
|
|
|
std::vector<std::string> obsolete_manifests_;
|
2013-11-09 00:23:46 +01:00
|
|
|
|
2014-09-09 00:25:01 +02:00
|
|
|
// env options for all reads and writes except compactions
|
|
|
|
const EnvOptions& env_options_;
|
2013-03-15 01:00:04 +01:00
|
|
|
|
2014-09-09 00:25:01 +02:00
|
|
|
// env options used for compactions. This is a copy of
|
|
|
|
// env_options_ but with readaheads set to readahead_compactions_.
|
|
|
|
const EnvOptions env_options_compactions_;
|
2013-03-15 01:00:04 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// No copying allowed
|
|
|
|
VersionSet(const VersionSet&);
|
|
|
|
void operator=(const VersionSet&);
|
2012-10-19 23:00:53 +02:00
|
|
|
|
2014-03-13 02:09:03 +01:00
|
|
|
void LogAndApplyCFHelper(VersionEdit* edit);
|
2014-10-31 16:48:19 +01:00
|
|
|
void LogAndApplyHelper(ColumnFamilyData* cfd, VersionBuilder* b, Version* v,
|
2015-02-05 06:39:45 +01:00
|
|
|
VersionEdit* edit, InstrumentedMutex* mu);
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|