2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
2013-05-24 21:52:45 +02:00
|
|
|
#include <atomic>
|
2012-03-09 01:23:21 +01:00
|
|
|
#include <deque>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <set>
|
2013-06-05 20:22:38 +02:00
|
|
|
#include <vector>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/log_writer.h"
|
|
|
|
#include "db/snapshot.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/memtablerep.h"
|
|
|
|
#include "rocksdb/transaction_log.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "port/port.h"
|
2012-08-15 00:20:36 +02:00
|
|
|
#include "util/stats_logger.h"
|
2012-10-19 23:00:53 +02:00
|
|
|
#include "memtablelist.h"
|
2012-08-15 00:20:36 +02:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class MemTable;
|
|
|
|
class TableCache;
|
|
|
|
class Version;
|
|
|
|
class VersionEdit;
|
|
|
|
class VersionSet;
|
|
|
|
|
|
|
|
class DBImpl : public DB {
|
|
|
|
public:
|
|
|
|
DBImpl(const Options& options, const std::string& dbname);
|
|
|
|
virtual ~DBImpl();
|
|
|
|
|
|
|
|
// Implementations of the DB interface
|
|
|
|
virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
|
2013-03-21 23:59:47 +01:00
|
|
|
virtual Status Merge(const WriteOptions&, const Slice& key,
|
|
|
|
const Slice& value);
|
2011-03-18 23:37:00 +01:00
|
|
|
virtual Status Delete(const WriteOptions&, const Slice& key);
|
|
|
|
virtual Status Write(const WriteOptions& options, WriteBatch* updates);
|
|
|
|
virtual Status Get(const ReadOptions& options,
|
|
|
|
const Slice& key,
|
|
|
|
std::string* value);
|
2013-06-05 20:22:38 +02:00
|
|
|
virtual std::vector<Status> MultiGet(const ReadOptions& options,
|
|
|
|
const std::vector<Slice>& keys,
|
|
|
|
std::vector<std::string>* values);
|
2013-07-06 03:49:18 +02:00
|
|
|
|
2013-07-26 21:57:01 +02:00
|
|
|
// Returns false if key doesn't exist in the database and true if it may.
|
|
|
|
// If value_found is not passed in as null, then return the value if found in
|
|
|
|
// memory. On return, if value was found, then value_found will be set to true
|
|
|
|
// , otherwise false.
|
|
|
|
virtual bool KeyMayExist(const ReadOptions& options,
|
|
|
|
const Slice& key,
|
|
|
|
std::string* value,
|
|
|
|
bool* value_found = nullptr);
|
2011-03-18 23:37:00 +01:00
|
|
|
virtual Iterator* NewIterator(const ReadOptions&);
|
|
|
|
virtual const Snapshot* GetSnapshot();
|
|
|
|
virtual void ReleaseSnapshot(const Snapshot* snapshot);
|
2011-04-12 21:38:58 +02:00
|
|
|
virtual bool GetProperty(const Slice& property, std::string* value);
|
2011-03-18 23:37:00 +01:00
|
|
|
virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes);
|
2013-06-30 08:21:36 +02:00
|
|
|
virtual void CompactRange(const Slice* begin, const Slice* end,
|
2013-09-04 22:13:08 +02:00
|
|
|
bool reduce_level = false, int target_level = -1);
|
2012-06-23 04:30:03 +02:00
|
|
|
virtual int NumberLevels();
|
|
|
|
virtual int MaxMemCompactionLevel();
|
|
|
|
virtual int Level0StopWriteTrigger();
|
2012-07-06 20:42:09 +02:00
|
|
|
virtual Status Flush(const FlushOptions& options);
|
2012-09-15 02:11:35 +02:00
|
|
|
virtual Status DisableFileDeletions();
|
|
|
|
virtual Status EnableFileDeletions();
|
2012-11-06 20:21:57 +01:00
|
|
|
virtual Status GetLiveFiles(std::vector<std::string>&,
|
2013-10-03 23:38:32 +02:00
|
|
|
uint64_t* manifest_file_size,
|
|
|
|
bool flush_memtable = true);
|
2013-08-06 21:54:37 +02:00
|
|
|
virtual Status GetSortedWalFiles(VectorLogPtr& files);
|
|
|
|
virtual Status DeleteWalFiles(const VectorLogPtr& files);
|
2012-12-11 00:37:00 +01:00
|
|
|
virtual SequenceNumber GetLatestSequenceNumber();
|
2012-11-30 02:28:37 +01:00
|
|
|
virtual Status GetUpdatesSince(SequenceNumber seq_number,
|
2013-01-24 19:54:26 +01:00
|
|
|
unique_ptr<TransactionLogIterator>* iter);
|
2013-08-22 23:32:53 +02:00
|
|
|
virtual Status DeleteFile(std::string name);
|
|
|
|
|
|
|
|
virtual void GetLiveFilesMetaData(
|
|
|
|
std::vector<LiveFileMetaData> *metadata);
|
2012-11-26 22:56:45 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Extra methods (for testing) that are not in the public DB interface
|
|
|
|
|
2013-06-05 20:22:38 +02:00
|
|
|
// Compact any files in the named level that overlap [*begin, *end]
|
2011-10-06 01:30:28 +02:00
|
|
|
void TEST_CompactRange(int level, const Slice* begin, const Slice* end);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-15 00:12:15 +02:00
|
|
|
// Force current memtable contents to be flushed.
|
|
|
|
Status TEST_FlushMemTable();
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-06-23 04:30:03 +02:00
|
|
|
// Wait for memtable compaction
|
2013-10-15 00:12:15 +02:00
|
|
|
Status TEST_WaitForFlushMemTable();
|
2012-06-23 04:30:03 +02:00
|
|
|
|
|
|
|
// Wait for any compaction
|
|
|
|
Status TEST_WaitForCompact();
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Return an internal iterator over the current state of the database.
|
|
|
|
// The keys of this iterator are internal keys (see format.h).
|
|
|
|
// The returned iterator should be deleted when no longer needed.
|
|
|
|
Iterator* TEST_NewInternalIterator();
|
|
|
|
|
2011-03-22 19:32:49 +01:00
|
|
|
// Return the maximum overlapping data (in bytes) at next level for any
|
|
|
|
// file at a level >= 1.
|
2011-03-23 00:24:02 +01:00
|
|
|
int64_t TEST_MaxNextLevelOverlappingBytes();
|
2011-03-22 19:32:49 +01:00
|
|
|
|
2012-11-17 00:28:14 +01:00
|
|
|
// Simulate a db crash, no elegant closing of database.
|
|
|
|
void TEST_Destroy_DBImpl();
|
|
|
|
|
2013-01-11 02:18:50 +01:00
|
|
|
// Return the current manifest file no.
|
|
|
|
uint64_t TEST_Current_Manifest_FileNo();
|
2013-05-06 20:41:01 +02:00
|
|
|
|
|
|
|
// Trigger's a background call for testing.
|
|
|
|
void TEST_PurgeObsoleteteWAL();
|
|
|
|
|
2013-10-17 22:33:39 +02:00
|
|
|
// get total level0 file size. Only for testing.
|
|
|
|
uint64_t TEST_GetLevel0TotalSize() { return versions_->NumLevelBytes(0);}
|
|
|
|
|
2012-12-18 22:05:39 +01:00
|
|
|
protected:
|
2012-11-06 04:18:49 +01:00
|
|
|
Env* const env_;
|
|
|
|
const std::string dbname_;
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<VersionSet> versions_;
|
2012-11-06 04:18:49 +01:00
|
|
|
const InternalKeyComparator internal_comparator_;
|
|
|
|
const Options options_; // options_.comparator == &internal_comparator_
|
|
|
|
|
|
|
|
const Comparator* user_comparator() const {
|
|
|
|
return internal_comparator_.user_comparator();
|
|
|
|
}
|
2013-02-16 00:28:24 +01:00
|
|
|
|
2012-12-18 22:05:39 +01:00
|
|
|
MemTable* GetMemTable() {
|
|
|
|
return mem_;
|
2013-01-11 02:18:50 +01:00
|
|
|
}
|
2012-11-06 04:18:49 +01:00
|
|
|
|
2013-02-16 00:28:24 +01:00
|
|
|
Iterator* NewInternalIterator(const ReadOptions&,
|
|
|
|
SequenceNumber* latest_snapshot);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
|
|
|
friend class DB;
|
2012-03-09 01:23:21 +01:00
|
|
|
struct CompactionState;
|
|
|
|
struct Writer;
|
2012-10-21 10:49:48 +02:00
|
|
|
struct DeletionState;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Status NewDB();
|
|
|
|
|
|
|
|
// Recover the descriptor from persistent storage. May do a significant
|
|
|
|
// amount of work to recover recently logged updates. Any changes to
|
|
|
|
// be made to the descriptor are added to *edit.
|
2013-02-16 00:28:24 +01:00
|
|
|
Status Recover(VersionEdit* edit, MemTable* external_table = nullptr,
|
2012-11-06 04:18:49 +01:00
|
|
|
bool error_if_log_file_exist = false);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void MaybeIgnoreError(Status* s) const;
|
|
|
|
|
2012-11-26 22:56:45 +01:00
|
|
|
const Status CreateArchivalDirectory();
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Delete any unneeded files and stale in-memory entries.
|
|
|
|
void DeleteObsoleteFiles();
|
|
|
|
|
2013-10-15 00:12:15 +02:00
|
|
|
// Flush the in-memory write buffer to storage. Switches to a new
|
2011-03-18 23:37:00 +01:00
|
|
|
// log-file/memtable and writes a new descriptor iff successful.
|
2013-10-15 00:12:15 +02:00
|
|
|
Status FlushMemTableToOutputFile(bool* madeProgress = nullptr);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Status RecoverLogFile(uint64_t log_number,
|
|
|
|
VersionEdit* edit,
|
2012-12-18 22:05:39 +01:00
|
|
|
SequenceNumber* max_sequence,
|
|
|
|
MemTable* external_table);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// The following two methods are used to flush a memtable to
|
|
|
|
// storage. The first one is used atdatabase RecoveryTime (when the
|
|
|
|
// database is opened) and is heavyweight because it holds the mutex
|
|
|
|
// for the entire period. The second method WriteLevel0Table supports
|
|
|
|
// concurrent flush memtables to storage.
|
|
|
|
Status WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit);
|
2013-06-11 23:23:58 +02:00
|
|
|
Status WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
|
2012-10-19 23:00:53 +02:00
|
|
|
uint64_t* filenumber);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-08-06 00:43:49 +02:00
|
|
|
uint64_t SlowdownAmount(int n, int top, int bottom);
|
2011-04-12 21:38:58 +02:00
|
|
|
Status MakeRoomForWrite(bool force /* compact even if there is room? */);
|
2012-03-09 01:23:21 +01:00
|
|
|
WriteBatch* BuildBatchGroup(Writer** last_writer);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-07-06 20:42:09 +02:00
|
|
|
// Force current memtable contents to be flushed.
|
|
|
|
Status FlushMemTable(const FlushOptions& options);
|
|
|
|
|
2013-10-15 00:12:15 +02:00
|
|
|
// Wait for memtable flushed
|
|
|
|
Status WaitForFlushMemTable();
|
2012-07-06 20:42:09 +02:00
|
|
|
|
2012-08-15 00:20:36 +02:00
|
|
|
void MaybeScheduleLogDBDeployStats();
|
2012-08-27 09:50:26 +02:00
|
|
|
static void BGLogDBDeployStats(void* db);
|
|
|
|
void LogDBDeployStats();
|
2012-08-15 00:20:36 +02:00
|
|
|
|
2013-10-15 00:12:15 +02:00
|
|
|
void MaybeScheduleFlushOrCompaction();
|
2013-09-13 23:38:37 +02:00
|
|
|
static void BGWorkCompaction(void* db);
|
|
|
|
static void BGWorkFlush(void* db);
|
|
|
|
void BackgroundCallCompaction();
|
|
|
|
void BackgroundCallFlush();
|
2013-08-06 21:54:37 +02:00
|
|
|
Status BackgroundCompaction(bool* madeProgress,DeletionState& deletion_state);
|
2013-10-16 22:32:53 +02:00
|
|
|
Status BackgroundFlush(bool* madeProgress);
|
2011-03-18 23:37:00 +01:00
|
|
|
void CleanupCompaction(CompactionState* compact);
|
|
|
|
Status DoCompactionWork(CompactionState* compact);
|
|
|
|
|
|
|
|
Status OpenCompactionOutputFile(CompactionState* compact);
|
|
|
|
Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input);
|
|
|
|
Status InstallCompactionResults(CompactionState* compact);
|
2012-10-19 23:00:53 +02:00
|
|
|
void AllocateCompactionOutputFileNumbers(CompactionState* compact);
|
|
|
|
void ReleaseCompactionUnusedFileNumbers(CompactionState* compact);
|
2012-11-29 01:42:36 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-10-21 10:49:48 +02:00
|
|
|
// Returns the list of live files in 'live' and the list
|
|
|
|
// of all files in the filesystem in 'allfiles'.
|
|
|
|
void FindObsoleteFiles(DeletionState& deletion_state);
|
|
|
|
|
|
|
|
// Diffs the files listed in filenames and those that do not
|
|
|
|
// belong to live files are posibly removed. If the removed file
|
|
|
|
// is a sst file, then it returns the file number in files_to_evict.
|
|
|
|
void PurgeObsoleteFiles(DeletionState& deletion_state);
|
|
|
|
|
|
|
|
// Removes the file listed in files_to_evict from the table_cache
|
|
|
|
void EvictObsoleteFiles(DeletionState& deletion_state);
|
|
|
|
|
2013-07-16 20:56:46 +02:00
|
|
|
Status DeleteLogFile(uint64_t number);
|
|
|
|
|
2012-11-26 22:56:45 +01:00
|
|
|
void PurgeObsoleteWALFiles();
|
2012-11-30 02:28:37 +01:00
|
|
|
|
2013-08-06 21:54:37 +02:00
|
|
|
Status AppendSortedWalsOfType(const std::string& path,
|
|
|
|
VectorLogPtr& log_files,
|
|
|
|
WalFileType type);
|
2012-11-30 02:28:37 +01:00
|
|
|
|
2013-08-06 21:54:37 +02:00
|
|
|
// Requires: all_logs should be sorted with earliest log file first
|
|
|
|
// Retains all log files in all_logs which contain updates with seq no.
|
|
|
|
// Greater Than or Equal to the requested SequenceNumber.
|
|
|
|
Status RetainProbableWalFiles(VectorLogPtr& all_logs,
|
|
|
|
const SequenceNumber target);
|
2013-03-18 22:50:59 +01:00
|
|
|
// return true if
|
2013-08-06 21:54:37 +02:00
|
|
|
bool CheckWalFileExistsAndEmpty(const WalFileType type,
|
|
|
|
const uint64_t number);
|
2012-11-30 02:28:37 +01:00
|
|
|
|
2013-08-06 21:54:37 +02:00
|
|
|
Status ReadFirstRecord(const WalFileType type, const uint64_t number,
|
|
|
|
WriteBatch* const result);
|
2012-11-30 02:28:37 +01:00
|
|
|
|
|
|
|
Status ReadFirstLine(const std::string& fname, WriteBatch* const batch);
|
2013-05-11 00:21:04 +02:00
|
|
|
|
2013-05-28 21:35:43 +02:00
|
|
|
void PrintStatistics();
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
// dump rocksdb.stats to LOG
|
2013-05-11 00:21:04 +02:00
|
|
|
void MaybeDumpStats();
|
|
|
|
|
2013-06-30 08:21:36 +02:00
|
|
|
// Return the minimum empty level that could hold the total data in the
|
|
|
|
// input level. Return the input level, if such level could not be found.
|
|
|
|
int FindMinimumEmptyLevelFitting(int level);
|
|
|
|
|
2013-09-04 22:13:08 +02:00
|
|
|
// Move the files in the input level to the target level.
|
|
|
|
// If target_level < 0, automatically calculate the minimum level that could
|
|
|
|
// hold the data set.
|
|
|
|
void ReFitLevel(int level, int target_level = -1);
|
2013-06-30 08:21:36 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Constant after construction
|
2012-04-17 17:36:46 +02:00
|
|
|
const InternalFilterPolicy internal_filter_policy_;
|
2011-03-18 23:37:00 +01:00
|
|
|
bool owns_info_log_;
|
|
|
|
|
|
|
|
// table_cache_ provides its own synchronization
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<TableCache> table_cache_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-02-16 00:28:24 +01:00
|
|
|
// Lock over the persistent DB state. Non-nullptr iff successfully acquired.
|
2011-03-18 23:37:00 +01:00
|
|
|
FileLock* db_lock_;
|
|
|
|
|
|
|
|
// State below is protected by mutex_
|
|
|
|
port::Mutex mutex_;
|
|
|
|
port::AtomicPointer shutting_down_;
|
2011-06-07 16:40:26 +02:00
|
|
|
port::CondVar bg_cv_; // Signalled when background work finishes
|
2013-07-23 23:42:27 +02:00
|
|
|
std::shared_ptr<MemTableRepFactory> mem_rep_factory_;
|
2011-03-18 23:37:00 +01:00
|
|
|
MemTable* mem_;
|
2012-10-19 23:00:53 +02:00
|
|
|
MemTableList imm_; // Memtable that are not changing
|
2011-06-22 04:36:45 +02:00
|
|
|
uint64_t logfile_number_;
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<log::Writer> log_;
|
2012-03-09 01:23:21 +01:00
|
|
|
|
2012-08-15 00:20:36 +02:00
|
|
|
std::string host_name_;
|
|
|
|
|
2012-03-09 01:23:21 +01:00
|
|
|
// Queue of writers.
|
|
|
|
std::deque<Writer*> writers_;
|
2013-03-28 23:19:28 +01:00
|
|
|
WriteBatch tmp_batch_;
|
2012-03-09 01:23:21 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
SnapshotList snapshots_;
|
|
|
|
|
|
|
|
// Set of table files to protect from deletion because they are
|
|
|
|
// part of ongoing compactions.
|
|
|
|
std::set<uint64_t> pending_outputs_;
|
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// count how many background compaction been scheduled or is running?
|
|
|
|
int bg_compaction_scheduled_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-09-13 23:38:37 +02:00
|
|
|
// number of background memtable flush jobs, submitted to the HIGH pool
|
|
|
|
int bg_flush_scheduled_;
|
|
|
|
|
2012-08-27 09:50:26 +02:00
|
|
|
// Has a background stats log thread scheduled?
|
|
|
|
bool bg_logstats_scheduled_;
|
|
|
|
|
2011-06-07 16:40:26 +02:00
|
|
|
// Information for a manual compaction
|
|
|
|
struct ManualCompaction {
|
|
|
|
int level;
|
2011-10-06 01:30:28 +02:00
|
|
|
bool done;
|
2012-10-19 23:00:53 +02:00
|
|
|
bool in_progress; // compaction request being processed?
|
2013-02-16 00:28:24 +01:00
|
|
|
const InternalKey* begin; // nullptr means beginning of key range
|
|
|
|
const InternalKey* end; // nullptr means end of key range
|
2011-10-06 01:30:28 +02:00
|
|
|
InternalKey tmp_storage; // Used to keep track of compaction progress
|
2011-06-07 16:40:26 +02:00
|
|
|
};
|
|
|
|
ManualCompaction* manual_compaction_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Have we encountered a background error in paranoid mode?
|
|
|
|
Status bg_error_;
|
|
|
|
|
2013-03-28 23:19:28 +01:00
|
|
|
std::unique_ptr<StatsLogger> logger_;
|
2012-08-15 00:20:36 +02:00
|
|
|
|
2012-08-22 02:02:21 +02:00
|
|
|
int64_t volatile last_log_ts;
|
2012-08-15 00:20:36 +02:00
|
|
|
|
2012-09-15 02:11:35 +02:00
|
|
|
// shall we disable deletion of obsolete files
|
|
|
|
bool disable_delete_obsolete_files_;
|
|
|
|
|
2012-10-16 17:53:46 +02:00
|
|
|
// last time when DeleteObsoleteFiles was invoked
|
|
|
|
uint64_t delete_obsolete_files_last_run_;
|
|
|
|
|
2013-05-06 20:41:01 +02:00
|
|
|
// last time when PurgeObsoleteWALFiles ran.
|
|
|
|
uint64_t purge_wal_files_last_run_;
|
|
|
|
|
2013-05-11 00:21:04 +02:00
|
|
|
// last time stats were dumped to LOG
|
2013-05-24 21:52:45 +02:00
|
|
|
std::atomic<uint64_t> last_stats_dump_time_microsec_;
|
2013-05-11 00:21:04 +02:00
|
|
|
|
2012-10-23 19:34:09 +02:00
|
|
|
// These count the number of microseconds for which MakeRoomForWrite stalls.
|
|
|
|
uint64_t stall_level0_slowdown_;
|
|
|
|
uint64_t stall_memtable_compaction_;
|
|
|
|
uint64_t stall_level0_num_files_;
|
2013-03-02 21:56:04 +01:00
|
|
|
std::vector<uint64_t> stall_leveln_slowdown_;
|
2013-07-29 19:34:23 +02:00
|
|
|
uint64_t stall_level0_slowdown_count_;
|
|
|
|
uint64_t stall_memtable_compaction_count_;
|
|
|
|
uint64_t stall_level0_num_files_count_;
|
|
|
|
std::vector<uint64_t> stall_leveln_slowdown_count_;
|
2012-10-23 19:34:09 +02:00
|
|
|
|
|
|
|
// Time at which this instance was started.
|
|
|
|
const uint64_t started_at_;
|
|
|
|
|
2012-11-06 20:21:57 +01:00
|
|
|
bool flush_on_destroy_; // Used when disableWAL is true.
|
|
|
|
|
2011-04-12 21:38:58 +02:00
|
|
|
// Per level compaction stats. stats_[level] stores the stats for
|
|
|
|
// compactions that produced data for the specified "level".
|
|
|
|
struct CompactionStats {
|
2013-02-22 01:23:33 +01:00
|
|
|
uint64_t micros;
|
2012-10-23 19:34:09 +02:00
|
|
|
|
|
|
|
// Bytes read from level N during compaction between levels N and N+1
|
|
|
|
int64_t bytes_readn;
|
|
|
|
|
|
|
|
// Bytes read from level N+1 during compaction between levels N and N+1
|
|
|
|
int64_t bytes_readnp1;
|
|
|
|
|
|
|
|
// Total bytes written during compaction between levels N and N+1
|
2011-04-12 21:38:58 +02:00
|
|
|
int64_t bytes_written;
|
|
|
|
|
2012-10-23 19:34:09 +02:00
|
|
|
// Files read from level N during compaction between levels N and N+1
|
|
|
|
int files_in_leveln;
|
|
|
|
|
|
|
|
// Files read from level N+1 during compaction between levels N and N+1
|
|
|
|
int files_in_levelnp1;
|
|
|
|
|
|
|
|
// Files written during compaction between levels N and N+1
|
|
|
|
int files_out_levelnp1;
|
|
|
|
|
|
|
|
// Number of compactions done
|
|
|
|
int count;
|
|
|
|
|
|
|
|
CompactionStats() : micros(0), bytes_readn(0), bytes_readnp1(0),
|
|
|
|
bytes_written(0), files_in_leveln(0),
|
|
|
|
files_in_levelnp1(0), files_out_levelnp1(0),
|
|
|
|
count(0) { }
|
2011-04-12 21:38:58 +02:00
|
|
|
|
|
|
|
void Add(const CompactionStats& c) {
|
|
|
|
this->micros += c.micros;
|
2012-10-23 19:34:09 +02:00
|
|
|
this->bytes_readn += c.bytes_readn;
|
|
|
|
this->bytes_readnp1 += c.bytes_readnp1;
|
2011-04-12 21:38:58 +02:00
|
|
|
this->bytes_written += c.bytes_written;
|
2012-10-23 19:34:09 +02:00
|
|
|
this->files_in_leveln += c.files_in_leveln;
|
|
|
|
this->files_in_levelnp1 += c.files_in_levelnp1;
|
|
|
|
this->files_out_levelnp1 += c.files_out_levelnp1;
|
|
|
|
this->count += 1;
|
2011-04-12 21:38:58 +02:00
|
|
|
}
|
|
|
|
};
|
2012-10-23 19:34:09 +02:00
|
|
|
|
2013-03-28 23:19:28 +01:00
|
|
|
std::vector<CompactionStats> stats_;
|
2011-04-12 21:38:58 +02:00
|
|
|
|
Improve output for GetProperty('leveldb.stats')
Summary:
Display separate values for read, write & total compaction IO.
Display compaction amplification and write amplification.
Add similar values for the period since the last call to GetProperty. Results since the server started
are reported as "cumulative" stats. Results since the last call to GetProperty are reported as
"interval" stats.
Level Files Size(MB) Time(sec) Read(MB) Write(MB) Rn(MB) Rnp1(MB) Wnew(MB) Amplify Read(MB/s) Write(MB/s) Rn Rnp1 Wnp1 NewW Count Ln-stall
----------------------------------------------------------------------------------------------------------------------------------------------------------------------
0 7 13 21 0 211 0 0 211 0.0 0.0 10.1 0 0 0 0 113 0.0
1 79 157 88 993 989 198 795 194 9.0 11.3 11.2 106 405 502 97 14 0.0
2 19 36 5 63 63 37 27 36 2.4 12.3 12.2 19 14 32 18 12 0.0
>>>>>>>>>>>>>>>>>>>>>>>>> text below has been is new and/or reformatted
Uptime(secs): 122.2 total, 0.9 interval
Compaction IO cumulative (GB): 0.21 new, 1.03 read, 1.23 write, 2.26 read+write
Compaction IO cumulative (MB/sec): 1.7 new, 8.6 read, 10.3 write, 19.0 read+write
Amplification cumulative: 6.0 write, 11.0 compaction
Compaction IO interval (MB): 5.59 new, 0.00 read, 5.59 write, 5.59 read+write
Compaction IO interval (MB/sec): 6.5 new, 0.0 read, 6.5 write, 6.5 read+write
Amplification interval: 1.0 write, 1.0 compaction
>>>>>>>>>>>>>>>>>>>>>>>> text above is new and/or reformatted
Stalls(secs): 90.574 level0_slowdown, 0.000 level0_numfiles, 10.165 memtable_compaction, 0.000 leveln_slowdown
Task ID: #
Blame Rev:
Test Plan:
make check, run db_bench
Revert Plan:
Database Impact:
Memcache Impact:
Other Notes:
EImportant:
- begin *PUBLIC* platform impact section -
Bugzilla: #
- end platform impact -
Reviewers: haobo
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11049
2013-06-03 17:16:16 +02:00
|
|
|
// Used to compute per-interval statistics
|
|
|
|
struct StatsSnapshot {
|
|
|
|
uint64_t bytes_read_;
|
|
|
|
uint64_t bytes_written_;
|
|
|
|
uint64_t bytes_new_;
|
|
|
|
double seconds_up_;
|
|
|
|
|
|
|
|
StatsSnapshot() : bytes_read_(0), bytes_written_(0),
|
|
|
|
bytes_new_(0), seconds_up_(0) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
StatsSnapshot last_stats_;
|
|
|
|
|
2012-08-18 01:06:05 +02:00
|
|
|
static const int KEEP_LOG_FILE_NUM = 1000;
|
2012-09-06 02:44:13 +02:00
|
|
|
std::string db_absolute_path_;
|
2012-08-18 01:06:05 +02:00
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// count of the number of contiguous delaying writes
|
|
|
|
int delayed_writes_;
|
|
|
|
|
2013-03-04 19:44:04 +01:00
|
|
|
// store the last flushed sequence.
|
|
|
|
// Used by transaction log iterator.
|
|
|
|
SequenceNumber last_flushed_sequence_;
|
|
|
|
|
2013-03-15 01:00:04 +01:00
|
|
|
// The options to access storage files
|
2013-06-08 00:35:17 +02:00
|
|
|
const EnvOptions storage_options_;
|
2013-03-15 01:00:04 +01:00
|
|
|
|
2013-06-30 08:21:36 +02:00
|
|
|
// A value of true temporarily disables scheduling of background work
|
|
|
|
bool bg_work_gate_closed_;
|
|
|
|
|
|
|
|
// Guard against multiple concurrent refitting
|
|
|
|
bool refitting_level_;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// No copying allowed
|
|
|
|
DBImpl(const DBImpl&);
|
|
|
|
void operator=(const DBImpl&);
|
|
|
|
|
2012-10-19 23:00:53 +02:00
|
|
|
// dump the delayed_writes_ to the log file and reset counter.
|
|
|
|
void DelayLoggingAndReset();
|
2012-11-27 06:16:21 +01:00
|
|
|
|
2013-03-21 23:59:47 +01:00
|
|
|
// Return the earliest snapshot where seqno is visible.
|
|
|
|
// Store the snapshot right before that, if any, in prev_snapshot
|
|
|
|
inline SequenceNumber findEarliestVisibleSnapshot(
|
|
|
|
SequenceNumber in,
|
|
|
|
std::vector<SequenceNumber>& snapshots,
|
|
|
|
SequenceNumber* prev_snapshot);
|
2013-07-06 03:49:18 +02:00
|
|
|
|
2013-07-26 21:57:01 +02:00
|
|
|
// Function that Get and KeyMayExist call with no_io true or false
|
|
|
|
// Note: 'value_found' from KeyMayExist propagates here
|
2013-07-06 03:49:18 +02:00
|
|
|
Status GetImpl(const ReadOptions& options,
|
|
|
|
const Slice& key,
|
|
|
|
std::string* value,
|
2013-07-26 21:57:01 +02:00
|
|
|
bool* value_found = nullptr);
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// Sanitize db options. The caller should delete result.info_log if
|
|
|
|
// it is not equal to src.info_log.
|
|
|
|
extern Options SanitizeOptions(const std::string& db,
|
|
|
|
const InternalKeyComparator* icmp,
|
2012-04-17 17:36:46 +02:00
|
|
|
const InternalFilterPolicy* ipolicy,
|
2011-03-18 23:37:00 +01:00
|
|
|
const Options& src);
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|