2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-10-29 01:52:32 +01:00
|
|
|
#pragma once
|
2015-09-02 22:58:22 +02:00
|
|
|
|
2014-10-29 01:52:32 +01:00
|
|
|
#include <algorithm>
|
2014-10-29 02:10:55 +01:00
|
|
|
#include <atomic>
|
2014-10-29 01:52:32 +01:00
|
|
|
#include <map>
|
2015-09-02 22:58:22 +02:00
|
|
|
#include <memory>
|
|
|
|
#include <set>
|
2014-10-29 01:52:32 +01:00
|
|
|
#include <string>
|
2015-09-02 22:58:22 +02:00
|
|
|
#include <utility>
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2020-02-11 00:42:46 +01:00
|
|
|
#include "db/version_edit.h"
|
2015-09-02 22:58:22 +02:00
|
|
|
#include "port/port.h"
|
2015-08-08 06:59:51 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
#include "rocksdb/io_status.h"
|
2014-10-29 01:52:32 +01:00
|
|
|
#include "rocksdb/table.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2014-10-29 01:52:32 +01:00
|
|
|
#include "table/table_builder.h"
|
2015-09-02 22:58:22 +02:00
|
|
|
#include "table/table_reader.h"
|
2019-05-30 20:21:38 +02:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "util/kv_map.h"
|
|
|
|
#include "util/mutexlock.h"
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-11-14 20:35:48 +01:00
|
|
|
namespace mock {
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap MakeMockFile(
|
|
|
|
std::initializer_list<std::pair<const std::string, std::string>> l = {});
|
2014-10-29 01:52:32 +01:00
|
|
|
|
|
|
|
struct MockTableFileSystem {
|
|
|
|
port::Mutex mutex;
|
2015-09-02 22:58:22 +02:00
|
|
|
std::map<uint32_t, stl_wrappers::KVMap> files;
|
2014-10-29 01:52:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class MockTableReader : public TableReader {
|
|
|
|
public:
|
2015-09-02 22:58:22 +02:00
|
|
|
explicit MockTableReader(const stl_wrappers::KVMap& table) : table_(table) {}
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2017-05-06 00:01:04 +02:00
|
|
|
InternalIterator* NewIterator(const ReadOptions&,
|
2018-05-21 23:33:55 +02:00
|
|
|
const SliceTransform* prefix_extractor,
|
2019-06-20 23:28:22 +02:00
|
|
|
Arena* arena, bool skip_filters,
|
|
|
|
TableReaderCaller caller,
|
Properly report IO errors when IndexType::kBinarySearchWithFirstKey is used (#6621)
Summary:
Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype.
Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling.
It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas.
Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621
Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats.
Reviewed By: siying
Differential Revision: D20786930
Pulled By: al13n321
fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
2020-04-16 02:37:23 +02:00
|
|
|
size_t compaction_readahead_size = 0,
|
|
|
|
bool allow_unprepared_value = false) override;
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
Status Get(const ReadOptions& readOptions, const Slice& key,
|
|
|
|
GetContext* get_context, const SliceTransform* prefix_extractor,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
bool skip_filters = false) override;
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2019-06-11 00:30:05 +02:00
|
|
|
uint64_t ApproximateOffsetOf(const Slice& /*key*/,
|
2019-06-20 23:28:22 +02:00
|
|
|
TableReaderCaller /*caller*/) override {
|
2019-06-11 00:30:05 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2019-08-16 23:16:49 +02:00
|
|
|
uint64_t ApproximateSize(const Slice& /*start*/, const Slice& /*end*/,
|
|
|
|
TableReaderCaller /*caller*/) override {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-11 00:30:05 +02:00
|
|
|
size_t ApproximateMemoryUsage() const override { return 0; }
|
2014-10-29 01:52:32 +01:00
|
|
|
|
|
|
|
void SetupForCompaction() override {}
|
|
|
|
|
|
|
|
std::shared_ptr<const TableProperties> GetTableProperties() const override;
|
|
|
|
|
|
|
|
~MockTableReader() {}
|
|
|
|
|
|
|
|
private:
|
2015-09-02 22:58:22 +02:00
|
|
|
const stl_wrappers::KVMap& table_;
|
2014-10-29 01:52:32 +01:00
|
|
|
};
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
class MockTableIterator : public InternalIterator {
|
2014-10-29 01:52:32 +01:00
|
|
|
public:
|
2015-09-02 22:58:22 +02:00
|
|
|
explicit MockTableIterator(const stl_wrappers::KVMap& table) : table_(table) {
|
2014-10-29 01:52:32 +01:00
|
|
|
itr_ = table_.end();
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
bool Valid() const override { return itr_ != table_.end(); }
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void SeekToFirst() override { itr_ = table_.begin(); }
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void SeekToLast() override {
|
2014-10-29 01:52:32 +01:00
|
|
|
itr_ = table_.end();
|
|
|
|
--itr_;
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void Seek(const Slice& target) override {
|
2014-10-29 01:52:32 +01:00
|
|
|
std::string str_target(target.data(), target.size());
|
|
|
|
itr_ = table_.lower_bound(str_target);
|
|
|
|
}
|
|
|
|
|
2016-09-28 03:20:57 +02:00
|
|
|
void SeekForPrev(const Slice& target) override {
|
|
|
|
std::string str_target(target.data(), target.size());
|
|
|
|
itr_ = table_.upper_bound(str_target);
|
|
|
|
Prev();
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void Next() override { ++itr_; }
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
void Prev() override {
|
2014-10-29 01:52:32 +01:00
|
|
|
if (itr_ == table_.begin()) {
|
|
|
|
itr_ = table_.end();
|
|
|
|
} else {
|
|
|
|
--itr_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
Slice key() const override { return Slice(itr_->first); }
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
Slice value() const override { return Slice(itr_->second); }
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
Status status() const override { return Status::OK(); }
|
2014-10-29 01:52:32 +01:00
|
|
|
|
|
|
|
private:
|
2015-09-02 22:58:22 +02:00
|
|
|
const stl_wrappers::KVMap& table_;
|
|
|
|
stl_wrappers::KVMap::const_iterator itr_;
|
2014-10-29 01:52:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class MockTableBuilder : public TableBuilder {
|
|
|
|
public:
|
2015-04-14 10:55:19 +02:00
|
|
|
MockTableBuilder(uint32_t id, MockTableFileSystem* file_system)
|
2015-09-02 22:58:22 +02:00
|
|
|
: id_(id), file_system_(file_system) {
|
|
|
|
table_ = MakeMockFile({});
|
|
|
|
}
|
2014-10-29 01:52:32 +01:00
|
|
|
|
|
|
|
// REQUIRES: Either Finish() or Abandon() has been called.
|
|
|
|
~MockTableBuilder() {}
|
|
|
|
|
|
|
|
// Add key,value to the table being constructed.
|
|
|
|
// REQUIRES: key is after any previously added key according to comparator.
|
|
|
|
// REQUIRES: Finish(), Abandon() have not been called
|
|
|
|
void Add(const Slice& key, const Slice& value) override {
|
|
|
|
table_.insert({key.ToString(), value.ToString()});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return non-ok iff some error has been detected.
|
|
|
|
Status status() const override { return Status::OK(); }
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
// Return non-ok iff some error happens during IO.
|
|
|
|
IOStatus io_status() const override { return IOStatus::OK(); }
|
|
|
|
|
2014-10-29 01:52:32 +01:00
|
|
|
Status Finish() override {
|
|
|
|
MutexLock lock_guard(&file_system_->mutex);
|
|
|
|
file_system_->files.insert({id_, table_});
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Abandon() override {}
|
|
|
|
|
|
|
|
uint64_t NumEntries() const override { return table_.size(); }
|
|
|
|
|
|
|
|
uint64_t FileSize() const override { return table_.size(); }
|
|
|
|
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-13 00:53:55 +02:00
|
|
|
TableProperties GetTableProperties() const override {
|
|
|
|
return TableProperties();
|
|
|
|
}
|
|
|
|
|
2020-02-11 00:42:46 +01:00
|
|
|
// Get file checksum
|
2020-03-30 00:57:02 +02:00
|
|
|
std::string GetFileChecksum() const override { return kUnknownFileChecksum; }
|
2020-02-11 00:42:46 +01:00
|
|
|
// Get file checksum function name
|
|
|
|
const char* GetFileChecksumFuncName() const override {
|
2020-06-08 06:54:54 +02:00
|
|
|
return kUnknownFileChecksumFuncName;
|
2020-02-11 00:42:46 +01:00
|
|
|
}
|
|
|
|
|
2014-10-29 01:52:32 +01:00
|
|
|
private:
|
|
|
|
uint32_t id_;
|
|
|
|
MockTableFileSystem* file_system_;
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap table_;
|
2014-10-29 01:52:32 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class MockTableFactory : public TableFactory {
|
|
|
|
public:
|
|
|
|
MockTableFactory();
|
|
|
|
const char* Name() const override { return "MockTable"; }
|
2020-06-29 23:51:57 +02:00
|
|
|
using TableFactory::NewTableReader;
|
2016-07-20 20:23:31 +02:00
|
|
|
Status NewTableReader(
|
2020-06-29 23:51:57 +02:00
|
|
|
const ReadOptions& ro, const TableReaderOptions& table_reader_options,
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
|
|
|
|
std::unique_ptr<TableReader>* table_reader,
|
2016-07-20 20:23:31 +02:00
|
|
|
bool prefetch_index_and_filter_in_cache = true) const override;
|
A new call back to TablePropertiesCollector to allow users know the entry is add, delete or merge
Summary:
Currently users have no idea a key is add, delete or merge from TablePropertiesCollector call back. Add a new function to add it.
Also refactor the codes so that
(1) make table property collector and internal table property collector two separate data structures with the later one now exposed
(2) table builders only receive internal table properties
Test Plan: Add cases in table_properties_collector_test to cover both of old and new ways of using TablePropertiesCollector.
Reviewers: yhchiang, igor.sugak, rven, igor
Reviewed By: rven, igor
Subscribers: meyering, yoshinorim, maykov, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D35373
2015-04-06 19:04:30 +02:00
|
|
|
TableBuilder* NewTableBuilder(
|
|
|
|
const TableBuilderOptions& table_builder_options,
|
2015-10-09 01:57:35 +02:00
|
|
|
uint32_t column_familly_id, WritableFileWriter* file) const override;
|
2014-10-29 01:52:32 +01:00
|
|
|
|
2014-11-14 20:35:48 +01:00
|
|
|
// This function will directly create mock table instead of going through
|
2015-09-02 22:58:22 +02:00
|
|
|
// MockTableBuilder. file_contents has to have a format of <internal_key,
|
|
|
|
// value>. Those key-value pairs will then be inserted into the mock table.
|
2014-11-14 20:35:48 +01:00
|
|
|
Status CreateMockTable(Env* env, const std::string& fname,
|
2015-09-02 22:58:22 +02:00
|
|
|
stl_wrappers::KVMap file_contents);
|
2014-11-14 20:35:48 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status SanitizeOptions(
|
2018-03-05 22:08:17 +01:00
|
|
|
const DBOptions& /*db_opts*/,
|
|
|
|
const ColumnFamilyOptions& /*cf_opts*/) const override {
|
2014-10-29 01:52:32 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual std::string GetPrintableTableOptions() const override {
|
|
|
|
return std::string();
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function will assert that only a single file exists and that the
|
|
|
|
// contents are equal to file_contents
|
2015-09-02 22:58:22 +02:00
|
|
|
void AssertSingleFile(const stl_wrappers::KVMap& file_contents);
|
|
|
|
void AssertLatestFile(const stl_wrappers::KVMap& file_contents);
|
2014-10-29 01:52:32 +01:00
|
|
|
|
|
|
|
private:
|
2015-10-28 18:53:14 +01:00
|
|
|
uint32_t GetAndWriteNextID(WritableFileWriter* file) const;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
uint32_t GetIDFromFile(RandomAccessFileReader* file) const;
|
2014-10-29 01:52:32 +01:00
|
|
|
|
|
|
|
mutable MockTableFileSystem file_system_;
|
|
|
|
mutable std::atomic<uint32_t> next_id_;
|
|
|
|
};
|
|
|
|
|
2014-11-14 20:35:48 +01:00
|
|
|
} // namespace mock
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|