run make format for PR 3838 (#3954)
Summary: PR https://github.com/facebook/rocksdb/pull/3838 made some changes that triggers lint warnings. Run `make format` to fix formatting as suggested by siying . Also piggyback two changes: 1) fix singleton destruction order for windows and posix env 2) fix two clang warnings Closes https://github.com/facebook/rocksdb/pull/3954 Differential Revision: D8272041 Pulled By: miasantreble fbshipit-source-id: 7c4fd12bd17aac13534520de0c733328aa3c6c9f
This commit is contained in:
parent
812c7371d3
commit
f1592a06c2
@ -260,7 +260,6 @@ class ComparatorDBTest
|
||||
DB* db_;
|
||||
Options last_options_;
|
||||
std::unique_ptr<const Comparator> comparator_guard;
|
||||
uint32_t format_;
|
||||
|
||||
public:
|
||||
ComparatorDBTest() : env_(Env::Default()), db_(nullptr) {
|
||||
|
2
env/env_posix.cc
vendored
2
env/env_posix.cc
vendored
@ -49,6 +49,7 @@
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "util/coding.h"
|
||||
#include "util/compression_context_cache.h"
|
||||
#include "util/logging.h"
|
||||
#include "util/random.h"
|
||||
#include "util/string_util.h"
|
||||
@ -1057,6 +1058,7 @@ Env* Env::Default() {
|
||||
// the destructor of static PosixEnv will go first, then the
|
||||
// the singletons of ThreadLocalPtr.
|
||||
ThreadLocalPtr::InitSingletons();
|
||||
CompressionContextCache::InitSingleton();
|
||||
INIT_SYNC_POINT_SINGLETONS();
|
||||
static PosixEnv default_env;
|
||||
return &default_env;
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <rocksdb/env.h>
|
||||
#include "port/win/env_win.h"
|
||||
#include "util/compression_context_cache.h"
|
||||
#include "util/sync_point.h"
|
||||
#include "util/thread_local.h"
|
||||
|
||||
namespace rocksdb {
|
||||
@ -32,9 +33,9 @@ Env* Env::Default() {
|
||||
using namespace port;
|
||||
ThreadLocalPtr::InitSingletons();
|
||||
CompressionContextCache::InitSingleton();
|
||||
INIT_SYNC_POINT_SINGLETONS();
|
||||
std::call_once(winenv_once_flag, []() { envptr = new WinEnv(); });
|
||||
return envptr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ void JemallocDeallocateForZSTD(void* /* opaque */, void* address) {
|
||||
je_free(address);
|
||||
}
|
||||
ZSTD_customMem GetJeZstdAllocationOverrides() {
|
||||
return { JemallocAllocateForZSTD, JemallocDeallocateForZSTD, nullptr };
|
||||
return {JemallocAllocateForZSTD, JemallocDeallocateForZSTD, nullptr};
|
||||
}
|
||||
} // namespace port
|
||||
} // namespace rocksdb
|
||||
@ -63,4 +63,3 @@ void operator delete[](void* p) {
|
||||
je_free(p);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,8 +103,7 @@ bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) {
|
||||
} // namespace
|
||||
|
||||
// format_version is the block format as defined in include/rocksdb/table.h
|
||||
Slice CompressBlock(const Slice& raw,
|
||||
const CompressionContext& compression_ctx,
|
||||
Slice CompressBlock(const Slice& raw, const CompressionContext& compression_ctx,
|
||||
CompressionType* type, uint32_t format_version,
|
||||
std::string* compressed_output) {
|
||||
*type = compression_ctx.type();
|
||||
@ -262,8 +261,8 @@ struct BlockBasedTableBuilder::Rep {
|
||||
|
||||
std::string last_key;
|
||||
// Compression dictionary or nullptr
|
||||
const std::string* compression_dict;
|
||||
CompressionContext compression_ctx;
|
||||
const std::string* compression_dict;
|
||||
CompressionContext compression_ctx;
|
||||
std::unique_ptr<UncompressionContext> verify_ctx;
|
||||
TableProperties props;
|
||||
|
||||
@ -343,15 +342,14 @@ struct BlockBasedTableBuilder::Rep {
|
||||
_moptions.prefix_extractor != nullptr));
|
||||
if (table_options.verify_compression) {
|
||||
verify_ctx.reset(new UncompressionContext(UncompressionContext::NoCache(),
|
||||
compression_ctx.type()));
|
||||
compression_ctx.type()));
|
||||
}
|
||||
}
|
||||
|
||||
Rep(const Rep&) = delete;
|
||||
Rep& operator=(const Rep&) = delete;
|
||||
|
||||
~Rep() {
|
||||
}
|
||||
~Rep() {}
|
||||
};
|
||||
|
||||
BlockBasedTableBuilder::BlockBasedTableBuilder(
|
||||
@ -513,9 +511,9 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
|
||||
}
|
||||
}
|
||||
|
||||
block_contents = CompressBlock(raw_block_contents, r->compression_ctx,
|
||||
&type, r->table_options.format_version,
|
||||
&r->compressed_output);
|
||||
block_contents =
|
||||
CompressBlock(raw_block_contents, r->compression_ctx, &type,
|
||||
r->table_options.format_version, &r->compressed_output);
|
||||
|
||||
// Some of the compression algorithms are known to be unreliable. If
|
||||
// the verify_compression flag is set then try to de-compress the
|
||||
@ -523,10 +521,9 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
|
||||
if (type != kNoCompression && r->table_options.verify_compression) {
|
||||
// Retrieve the uncompressed contents into a new buffer
|
||||
BlockContents contents;
|
||||
Status stat = UncompressBlockContentsForCompressionType(*r->verify_ctx,
|
||||
block_contents.data(), block_contents.size(), &contents,
|
||||
r->table_options.format_version,
|
||||
r->ioptions);
|
||||
Status stat = UncompressBlockContentsForCompressionType(
|
||||
*r->verify_ctx, block_contents.data(), block_contents.size(),
|
||||
&contents, r->table_options.format_version, r->ioptions);
|
||||
|
||||
if (stat.ok()) {
|
||||
bool compressed_ok = contents.data.compare(raw_block_contents) == 0;
|
||||
@ -759,7 +756,8 @@ Status BlockBasedTableBuilder::Finish() {
|
||||
r->props.merge_operator_name = r->ioptions.merge_operator != nullptr
|
||||
? r->ioptions.merge_operator->Name()
|
||||
: "nullptr";
|
||||
r->props.compression_name = CompressionTypeToString(r->compression_ctx.type());
|
||||
r->props.compression_name =
|
||||
CompressionTypeToString(r->compression_ctx.type());
|
||||
r->props.prefix_extractor_name =
|
||||
r->moptions.prefix_extractor != nullptr
|
||||
? r->moptions.prefix_extractor->Name()
|
||||
|
@ -122,8 +122,7 @@ class BlockBasedTableBuilder : public TableBuilder {
|
||||
const uint64_t kCompressionSizeLimit = std::numeric_limits<int>::max();
|
||||
};
|
||||
|
||||
Slice CompressBlock(const Slice& raw,
|
||||
const CompressionContext& compression_ctx,
|
||||
Slice CompressBlock(const Slice& raw, const CompressionContext& compression_ctx,
|
||||
CompressionType* type, uint32_t format_version,
|
||||
std::string* compressed_output);
|
||||
|
||||
|
@ -1103,12 +1103,10 @@ Status BlockBasedTable::GetDataBlockFromCache(
|
||||
// Retrieve the uncompressed contents into a new buffer
|
||||
BlockContents contents;
|
||||
UncompressionContext uncompresssion_ctx(compressed_block->compression_type(),
|
||||
compression_dict);
|
||||
s = UncompressBlockContents(uncompresssion_ctx,
|
||||
compressed_block->data(),
|
||||
compression_dict);
|
||||
s = UncompressBlockContents(uncompresssion_ctx, compressed_block->data(),
|
||||
compressed_block->size(), &contents,
|
||||
format_version,
|
||||
ioptions);
|
||||
format_version, ioptions);
|
||||
|
||||
// Insert uncompressed block into block cache
|
||||
if (s.ok()) {
|
||||
@ -1183,10 +1181,11 @@ Status BlockBasedTable::PutDataBlockToCache(
|
||||
BlockContents contents;
|
||||
Statistics* statistics = ioptions.statistics;
|
||||
if (raw_block->compression_type() != kNoCompression) {
|
||||
UncompressionContext uncompression_ctx(raw_block->compression_type(), compression_dict);
|
||||
UncompressionContext uncompression_ctx(raw_block->compression_type(),
|
||||
compression_dict);
|
||||
s = UncompressBlockContents(uncompression_ctx, raw_block->data(),
|
||||
raw_block->size(), &contents,
|
||||
format_version, ioptions);
|
||||
raw_block->size(), &contents, format_version,
|
||||
ioptions);
|
||||
}
|
||||
if (!s.ok()) {
|
||||
delete raw_block;
|
||||
|
@ -226,10 +226,9 @@ Status BlockFetcher::ReadBlockContents() {
|
||||
if (do_uncompress_ && compression_type != kNoCompression) {
|
||||
// compressed page, uncompress, update cache
|
||||
UncompressionContext uncompression_ctx(compression_type, compression_dict_);
|
||||
status_ = UncompressBlockContents(uncompression_ctx,
|
||||
slice_.data(), block_size_, contents_,
|
||||
footer_.version(),
|
||||
ioptions_);
|
||||
status_ =
|
||||
UncompressBlockContents(uncompression_ctx, slice_.data(), block_size_,
|
||||
contents_, footer_.version(), ioptions_);
|
||||
} else {
|
||||
GetBlockContents();
|
||||
}
|
||||
|
@ -264,12 +264,14 @@ Status ReadFooterFromFile(RandomAccessFileReader* file,
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status UncompressBlockContentsForCompressionType(const UncompressionContext& uncompression_ctx,
|
||||
const char* data, size_t n, BlockContents* contents,
|
||||
uint32_t format_version, const ImmutableCFOptions &ioptions) {
|
||||
Status UncompressBlockContentsForCompressionType(
|
||||
const UncompressionContext& uncompression_ctx, const char* data, size_t n,
|
||||
BlockContents* contents, uint32_t format_version,
|
||||
const ImmutableCFOptions& ioptions) {
|
||||
std::unique_ptr<char[]> ubuf;
|
||||
|
||||
assert(uncompression_ctx.type() != kNoCompression && "Invalid compression type");
|
||||
assert(uncompression_ctx.type() != kNoCompression &&
|
||||
"Invalid compression type");
|
||||
|
||||
StopWatchNano timer(ioptions.env,
|
||||
ShouldReportDetailedTime(ioptions.env, ioptions.statistics));
|
||||
@ -290,8 +292,8 @@ Status UncompressBlockContentsForCompressionType(const UncompressionContext& unc
|
||||
break;
|
||||
}
|
||||
case kZlibCompression:
|
||||
ubuf.reset(Zlib_Uncompress(uncompression_ctx,
|
||||
data, n, &decompress_size,
|
||||
ubuf.reset(Zlib_Uncompress(
|
||||
uncompression_ctx, data, n, &decompress_size,
|
||||
GetCompressFormatForVersion(kZlibCompression, format_version)));
|
||||
if (!ubuf) {
|
||||
static char zlib_corrupt_msg[] =
|
||||
@ -314,8 +316,8 @@ Status UncompressBlockContentsForCompressionType(const UncompressionContext& unc
|
||||
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
||||
break;
|
||||
case kLZ4Compression:
|
||||
ubuf.reset(LZ4_Uncompress(uncompression_ctx,
|
||||
data, n, &decompress_size,
|
||||
ubuf.reset(LZ4_Uncompress(
|
||||
uncompression_ctx, data, n, &decompress_size,
|
||||
GetCompressFormatForVersion(kLZ4Compression, format_version)));
|
||||
if (!ubuf) {
|
||||
static char lz4_corrupt_msg[] =
|
||||
@ -326,8 +328,8 @@ Status UncompressBlockContentsForCompressionType(const UncompressionContext& unc
|
||||
BlockContents(std::move(ubuf), decompress_size, true, kNoCompression);
|
||||
break;
|
||||
case kLZ4HCCompression:
|
||||
ubuf.reset(LZ4_Uncompress(uncompression_ctx,
|
||||
data, n, &decompress_size,
|
||||
ubuf.reset(LZ4_Uncompress(
|
||||
uncompression_ctx, data, n, &decompress_size,
|
||||
GetCompressFormatForVersion(kLZ4HCCompression, format_version)));
|
||||
if (!ubuf) {
|
||||
static char lz4hc_corrupt_msg[] =
|
||||
@ -382,12 +384,11 @@ Status UncompressBlockContentsForCompressionType(const UncompressionContext& unc
|
||||
Status UncompressBlockContents(const UncompressionContext& uncompression_ctx,
|
||||
const char* data, size_t n,
|
||||
BlockContents* contents, uint32_t format_version,
|
||||
const ImmutableCFOptions &ioptions) {
|
||||
const ImmutableCFOptions& ioptions) {
|
||||
assert(data[n] != kNoCompression);
|
||||
assert(data[n] == uncompression_ctx.type());
|
||||
return UncompressBlockContentsForCompressionType(
|
||||
uncompression_ctx, data, n, contents,
|
||||
format_version, ioptions);
|
||||
uncompression_ctx, data, n, contents, format_version, ioptions);
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -228,19 +228,17 @@ extern Status ReadBlockContents(
|
||||
// free this buffer.
|
||||
// For description of compress_format_version and possible values, see
|
||||
// util/compression.h
|
||||
extern Status UncompressBlockContents(const UncompressionContext& uncompression_ctx,
|
||||
const char* data, size_t n,
|
||||
BlockContents* contents,
|
||||
uint32_t compress_format_version,
|
||||
const ImmutableCFOptions &ioptions);
|
||||
extern Status UncompressBlockContents(
|
||||
const UncompressionContext& uncompression_ctx, const char* data, size_t n,
|
||||
BlockContents* contents, uint32_t compress_format_version,
|
||||
const ImmutableCFOptions& ioptions);
|
||||
|
||||
// This is an extension to UncompressBlockContents that accepts
|
||||
// a specific compression type. This is used by un-wrapped blocks
|
||||
// with no compression header.
|
||||
extern Status UncompressBlockContentsForCompressionType(
|
||||
const UncompressionContext& uncompression_ctx,
|
||||
const char* data, size_t n, BlockContents* contents,
|
||||
uint32_t compress_format_version,
|
||||
const UncompressionContext& uncompression_ctx, const char* data, size_t n,
|
||||
BlockContents* contents, uint32_t compress_format_version,
|
||||
const ImmutableCFOptions& ioptions);
|
||||
|
||||
// Implementation details follow. Clients should ignore,
|
||||
|
@ -1947,36 +1947,36 @@ class Benchmark {
|
||||
}
|
||||
|
||||
inline bool CompressSlice(const CompressionContext& compression_ctx,
|
||||
const Slice& input, std::string* compressed) {
|
||||
const Slice& input, std::string* compressed) {
|
||||
bool ok = true;
|
||||
switch (FLAGS_compression_type_e) {
|
||||
case rocksdb::kSnappyCompression:
|
||||
ok = Snappy_Compress(compression_ctx, input.data(),
|
||||
input.size(), compressed);
|
||||
ok = Snappy_Compress(compression_ctx, input.data(), input.size(),
|
||||
compressed);
|
||||
break;
|
||||
case rocksdb::kZlibCompression:
|
||||
ok = Zlib_Compress(compression_ctx, 2, input.data(),
|
||||
input.size(), compressed);
|
||||
ok = Zlib_Compress(compression_ctx, 2, input.data(), input.size(),
|
||||
compressed);
|
||||
break;
|
||||
case rocksdb::kBZip2Compression:
|
||||
ok = BZip2_Compress(compression_ctx, 2, input.data(),
|
||||
input.size(), compressed);
|
||||
ok = BZip2_Compress(compression_ctx, 2, input.data(), input.size(),
|
||||
compressed);
|
||||
break;
|
||||
case rocksdb::kLZ4Compression:
|
||||
ok = LZ4_Compress(compression_ctx, 2, input.data(),
|
||||
input.size(), compressed);
|
||||
ok = LZ4_Compress(compression_ctx, 2, input.data(), input.size(),
|
||||
compressed);
|
||||
break;
|
||||
case rocksdb::kLZ4HCCompression:
|
||||
ok = LZ4HC_Compress(compression_ctx, 2, input.data(),
|
||||
input.size(), compressed);
|
||||
ok = LZ4HC_Compress(compression_ctx, 2, input.data(), input.size(),
|
||||
compressed);
|
||||
break;
|
||||
case rocksdb::kXpressCompression:
|
||||
ok = XPRESS_Compress(input.data(),
|
||||
input.size(), compressed);
|
||||
break;
|
||||
case rocksdb::kZSTD:
|
||||
ok = ZSTD_Compress(compression_ctx, input.data(),
|
||||
input.size(), compressed);
|
||||
ok = ZSTD_Compress(compression_ctx, input.data(), input.size(),
|
||||
compressed);
|
||||
break;
|
||||
default:
|
||||
ok = false;
|
||||
@ -2058,8 +2058,10 @@ class Benchmark {
|
||||
const int len = FLAGS_block_size;
|
||||
std::string input_str(len, 'y');
|
||||
std::string compressed;
|
||||
CompressionContext compression_ctx(FLAGS_compression_type_e, Options().compression_opts);
|
||||
bool result = CompressSlice(compression_ctx, Slice(input_str), &compressed);
|
||||
CompressionContext compression_ctx(FLAGS_compression_type_e,
|
||||
Options().compression_opts);
|
||||
bool result =
|
||||
CompressSlice(compression_ctx, Slice(input_str), &compressed);
|
||||
|
||||
if (!result) {
|
||||
fprintf(stdout, "WARNING: %s compression is not enabled\n",
|
||||
@ -2851,8 +2853,8 @@ void VerifyDBFromDB(std::string& truth_db_name) {
|
||||
int64_t produced = 0;
|
||||
bool ok = true;
|
||||
std::string compressed;
|
||||
CompressionContext compression_ctx(FLAGS_compression_type_e,
|
||||
Options().compression_opts);
|
||||
CompressionContext compression_ctx(FLAGS_compression_type_e,
|
||||
Options().compression_opts);
|
||||
|
||||
// Compress 1G
|
||||
while (ok && bytes < int64_t(1) << 30) {
|
||||
@ -2881,7 +2883,7 @@ void VerifyDBFromDB(std::string& truth_db_name) {
|
||||
|
||||
UncompressionContext uncompression_ctx(FLAGS_compression_type_e);
|
||||
CompressionContext compression_ctx(FLAGS_compression_type_e,
|
||||
Options().compression_opts);
|
||||
Options().compression_opts);
|
||||
|
||||
bool ok = CompressSlice(compression_ctx, input, &compressed);
|
||||
int64_t bytes = 0;
|
||||
@ -2904,8 +2906,7 @@ void VerifyDBFromDB(std::string& truth_db_name) {
|
||||
}
|
||||
case rocksdb::kZlibCompression:
|
||||
uncompressed = Zlib_Uncompress(uncompression_ctx, compressed.data(),
|
||||
compressed.size(),
|
||||
&decompress_size, 2);
|
||||
compressed.size(), &decompress_size, 2);
|
||||
ok = uncompressed != nullptr;
|
||||
break;
|
||||
case rocksdb::kBZip2Compression:
|
||||
@ -2915,14 +2916,12 @@ void VerifyDBFromDB(std::string& truth_db_name) {
|
||||
break;
|
||||
case rocksdb::kLZ4Compression:
|
||||
uncompressed = LZ4_Uncompress(uncompression_ctx, compressed.data(),
|
||||
compressed.size(),
|
||||
&decompress_size, 2);
|
||||
compressed.size(), &decompress_size, 2);
|
||||
ok = uncompressed != nullptr;
|
||||
break;
|
||||
case rocksdb::kLZ4HCCompression:
|
||||
uncompressed = LZ4_Uncompress(uncompression_ctx, compressed.data(),
|
||||
compressed.size(),
|
||||
&decompress_size, 2);
|
||||
compressed.size(), &decompress_size, 2);
|
||||
ok = uncompressed != nullptr;
|
||||
break;
|
||||
case rocksdb::kXpressCompression:
|
||||
@ -2932,8 +2931,7 @@ void VerifyDBFromDB(std::string& truth_db_name) {
|
||||
break;
|
||||
case rocksdb::kZSTD:
|
||||
uncompressed = ZSTD_Uncompress(uncompression_ctx, compressed.data(),
|
||||
compressed.size(),
|
||||
&decompress_size);
|
||||
compressed.size(), &decompress_size);
|
||||
ok = uncompressed != nullptr;
|
||||
break;
|
||||
default:
|
||||
|
@ -219,7 +219,8 @@ DEFINE_int32(level0_stop_writes_trigger,
|
||||
rocksdb::Options().level0_stop_writes_trigger,
|
||||
"Number of files in level-0 that will trigger put stop.");
|
||||
|
||||
DEFINE_int32(block_size, rocksdb::BlockBasedTableOptions().block_size,
|
||||
DEFINE_int32(block_size,
|
||||
static_cast<int32_t>(rocksdb::BlockBasedTableOptions().block_size),
|
||||
"Number of bytes in a block.");
|
||||
|
||||
DEFINE_int32(
|
||||
|
@ -56,34 +56,32 @@ ZSTD_customMem GetJeZstdAllocationOverrides();
|
||||
// If, in the future we have more than one native context to
|
||||
// cache we can arrange this as a tuple
|
||||
class ZSTDUncompressCachedData {
|
||||
public:
|
||||
public:
|
||||
using ZSTDNativeContext = ZSTD_DCtx*;
|
||||
ZSTDUncompressCachedData() {}
|
||||
ZSTDUncompressCachedData() {}
|
||||
// Init from cache
|
||||
ZSTDUncompressCachedData(const ZSTDUncompressCachedData& o) = delete;
|
||||
ZSTDUncompressCachedData& operator=(const ZSTDUncompressCachedData&) = delete;
|
||||
ZSTDUncompressCachedData(ZSTDUncompressCachedData&& o) ROCKSDB_NOEXCEPT :
|
||||
ZSTDUncompressCachedData() {
|
||||
ZSTDUncompressCachedData(ZSTDUncompressCachedData&& o) ROCKSDB_NOEXCEPT
|
||||
: ZSTDUncompressCachedData() {
|
||||
*this = std::move(o);
|
||||
}
|
||||
ZSTDUncompressCachedData& operator=(ZSTDUncompressCachedData&& o) ROCKSDB_NOEXCEPT {
|
||||
ZSTDUncompressCachedData& operator=(ZSTDUncompressCachedData&& o)
|
||||
ROCKSDB_NOEXCEPT {
|
||||
assert(zstd_ctx_ == nullptr);
|
||||
std::swap(zstd_ctx_,o.zstd_ctx_);
|
||||
std::swap(cache_idx_,o.cache_idx_);
|
||||
std::swap(zstd_ctx_, o.zstd_ctx_);
|
||||
std::swap(cache_idx_, o.cache_idx_);
|
||||
return *this;
|
||||
}
|
||||
ZSTDNativeContext Get() const {
|
||||
return zstd_ctx_;
|
||||
}
|
||||
int64_t GetCacheIndex() const {
|
||||
return cache_idx_;
|
||||
}
|
||||
ZSTDNativeContext Get() const { return zstd_ctx_; }
|
||||
int64_t GetCacheIndex() const { return cache_idx_; }
|
||||
void CreateIfNeeded() {
|
||||
if (zstd_ctx_ == nullptr) {
|
||||
#ifdef ROCKSDB_ZSTD_CUSTOM_MEM
|
||||
zstd_ctx_ = ZSTD_createDCtx_advanced(port::GetJeZstdAllocationOverrides());
|
||||
zstd_ctx_ =
|
||||
ZSTD_createDCtx_advanced(port::GetJeZstdAllocationOverrides());
|
||||
#else // ROCKSDB_ZSTD_CUSTOM_MEM
|
||||
zstd_ctx_ = ZSTD_createDCtx();
|
||||
zstd_ctx_ = ZSTD_createDCtx();
|
||||
#endif // ROCKSDB_ZSTD_CUSTOM_MEM
|
||||
cache_idx_ = -1;
|
||||
}
|
||||
@ -97,31 +95,30 @@ public:
|
||||
ZSTD_freeDCtx(zstd_ctx_);
|
||||
}
|
||||
}
|
||||
private:
|
||||
ZSTDNativeContext zstd_ctx_ = nullptr;
|
||||
int64_t cache_idx_ = -1; // -1 means this instance owns the context
|
||||
|
||||
private:
|
||||
ZSTDNativeContext zstd_ctx_ = nullptr;
|
||||
int64_t cache_idx_ = -1; // -1 means this instance owns the context
|
||||
};
|
||||
#endif // (ZSTD_VERSION_NUMBER >= 500)
|
||||
#endif // (ZSTD_VERSION_NUMBER >= 500)
|
||||
} // namespace rocksdb
|
||||
#endif // ZSTD
|
||||
|
||||
#if !(defined ZSTD) || !(ZSTD_VERSION_NUMBER >= 500)
|
||||
namespace rocksdb {
|
||||
class ZSTDUncompressCachedData {
|
||||
void* padding; // unused
|
||||
public:
|
||||
void* padding; // unused
|
||||
public:
|
||||
using ZSTDNativeContext = void*;
|
||||
ZSTDUncompressCachedData() {}
|
||||
ZSTDUncompressCachedData(const ZSTDUncompressCachedData&) {}
|
||||
ZSTDUncompressCachedData& operator=(const ZSTDUncompressCachedData&) = delete;
|
||||
ZSTDUncompressCachedData(ZSTDUncompressCachedData&&) ROCKSDB_NOEXCEPT = default;
|
||||
ZSTDUncompressCachedData& operator=(ZSTDUncompressCachedData&&) ROCKSDB_NOEXCEPT = default;
|
||||
ZSTDNativeContext Get() const {
|
||||
return nullptr;
|
||||
}
|
||||
int64_t GetCacheIndex() const {
|
||||
return -1;
|
||||
}
|
||||
ZSTDUncompressCachedData(ZSTDUncompressCachedData&&)
|
||||
ROCKSDB_NOEXCEPT = default;
|
||||
ZSTDUncompressCachedData& operator=(ZSTDUncompressCachedData&&)
|
||||
ROCKSDB_NOEXCEPT = default;
|
||||
ZSTDNativeContext Get() const { return nullptr; }
|
||||
int64_t GetCacheIndex() const { return -1; }
|
||||
void CreateIfNeeded() {}
|
||||
void InitFromCache(const ZSTDUncompressCachedData&, int64_t) {}
|
||||
};
|
||||
@ -136,18 +133,19 @@ namespace rocksdb {
|
||||
|
||||
// Instantiate this class and pass it to the uncompression API below
|
||||
class CompressionContext {
|
||||
private:
|
||||
const CompressionType type_;
|
||||
private:
|
||||
const CompressionType type_;
|
||||
const CompressionOptions opts_;
|
||||
Slice dict_;
|
||||
Slice dict_;
|
||||
#if defined(ZSTD) && (ZSTD_VERSION_NUMBER >= 500)
|
||||
ZSTD_CCtx* zstd_ctx_ = nullptr;
|
||||
ZSTD_CCtx* zstd_ctx_ = nullptr;
|
||||
void CreateNativeContext() {
|
||||
if (type_ == kZSTD) {
|
||||
#ifdef ROCKSDB_ZSTD_CUSTOM_MEM
|
||||
zstd_ctx_ = ZSTD_createCCtx_advanced(port::GetJeZstdAllocationOverrides());
|
||||
zstd_ctx_ =
|
||||
ZSTD_createCCtx_advanced(port::GetJeZstdAllocationOverrides());
|
||||
#else // ROCKSDB_ZSTD_CUSTOM_MEM
|
||||
zstd_ctx_ = ZSTD_createCCtx();
|
||||
zstd_ctx_ = ZSTD_createCCtx();
|
||||
#endif // ROCKSDB_ZSTD_CUSTOM_MEM
|
||||
}
|
||||
}
|
||||
@ -156,79 +154,63 @@ private:
|
||||
ZSTD_freeCCtx(zstd_ctx_);
|
||||
}
|
||||
}
|
||||
public:
|
||||
|
||||
public:
|
||||
// callable inside ZSTD_Compress
|
||||
ZSTD_CCtx * ZSTDPreallocCtx() const {
|
||||
ZSTD_CCtx* ZSTDPreallocCtx() const {
|
||||
assert(type_ == kZSTD);
|
||||
return zstd_ctx_;
|
||||
}
|
||||
#else // ZSTD && (ZSTD_VERSION_NUMBER >= 500)
|
||||
private:
|
||||
#else // ZSTD && (ZSTD_VERSION_NUMBER >= 500)
|
||||
private:
|
||||
void CreateNativeContext() {}
|
||||
void DestroyNativeContext() {}
|
||||
#endif //ZSTD && (ZSTD_VERSION_NUMBER >= 500)
|
||||
public:
|
||||
explicit CompressionContext(CompressionType comp_type) :
|
||||
type_(comp_type) {
|
||||
#endif // ZSTD && (ZSTD_VERSION_NUMBER >= 500)
|
||||
public:
|
||||
explicit CompressionContext(CompressionType comp_type) : type_(comp_type) {
|
||||
CreateNativeContext();
|
||||
}
|
||||
CompressionContext(CompressionType comp_type,
|
||||
const CompressionOptions& opts,
|
||||
const Slice& comp_dict = Slice()) :
|
||||
type_(comp_type),
|
||||
opts_(opts),
|
||||
dict_(comp_dict) {
|
||||
CompressionContext(CompressionType comp_type, const CompressionOptions& opts,
|
||||
const Slice& comp_dict = Slice())
|
||||
: type_(comp_type), opts_(opts), dict_(comp_dict) {
|
||||
CreateNativeContext();
|
||||
}
|
||||
~CompressionContext() {
|
||||
DestroyNativeContext();
|
||||
}
|
||||
~CompressionContext() { DestroyNativeContext(); }
|
||||
CompressionContext(const CompressionContext&) = delete;
|
||||
CompressionContext& operator=(const CompressionContext&) = delete;
|
||||
|
||||
const CompressionOptions& options() const {
|
||||
return opts_;
|
||||
}
|
||||
CompressionType type() const {
|
||||
return type_;
|
||||
}
|
||||
const Slice& dict() const {
|
||||
return dict_;
|
||||
}
|
||||
Slice& dict() {
|
||||
return dict_;
|
||||
}
|
||||
const CompressionOptions& options() const { return opts_; }
|
||||
CompressionType type() const { return type_; }
|
||||
const Slice& dict() const { return dict_; }
|
||||
Slice& dict() { return dict_; }
|
||||
};
|
||||
|
||||
// Instantiate this class and pass it to the uncompression API below
|
||||
class UncompressionContext {
|
||||
private:
|
||||
CompressionType type_;
|
||||
Slice dict_;
|
||||
private:
|
||||
CompressionType type_;
|
||||
Slice dict_;
|
||||
CompressionContextCache* ctx_cache_ = nullptr;
|
||||
ZSTDUncompressCachedData uncomp_cached_data_;
|
||||
public:
|
||||
|
||||
public:
|
||||
struct NoCache {};
|
||||
// Do not use context cache, used by TableBuilder
|
||||
UncompressionContext(NoCache, CompressionType comp_type) :
|
||||
type_(comp_type) {
|
||||
}
|
||||
explicit UncompressionContext(CompressionType comp_type) :
|
||||
UncompressionContext(comp_type, Slice()) {
|
||||
}
|
||||
UncompressionContext(CompressionType comp_type, const Slice& comp_dict) :
|
||||
type_(comp_type), dict_(comp_dict) {
|
||||
UncompressionContext(NoCache, CompressionType comp_type) : type_(comp_type) {}
|
||||
explicit UncompressionContext(CompressionType comp_type)
|
||||
: UncompressionContext(comp_type, Slice()) {}
|
||||
UncompressionContext(CompressionType comp_type, const Slice& comp_dict)
|
||||
: type_(comp_type), dict_(comp_dict) {
|
||||
if (type_ == kZSTD) {
|
||||
ctx_cache_ = CompressionContextCache::Instance();
|
||||
uncomp_cached_data_ = ctx_cache_->GetCachedZSTDUncompressData();
|
||||
}
|
||||
}
|
||||
~UncompressionContext() {
|
||||
if (type_ == kZSTD &&
|
||||
uncomp_cached_data_.GetCacheIndex() != -1) {
|
||||
if (type_ == kZSTD && uncomp_cached_data_.GetCacheIndex() != -1) {
|
||||
assert(ctx_cache_ != nullptr);
|
||||
ctx_cache_->ReturnCachedZSTDUncompressData(
|
||||
uncomp_cached_data_.GetCacheIndex());
|
||||
uncomp_cached_data_.GetCacheIndex());
|
||||
}
|
||||
}
|
||||
UncompressionContext(const UncompressionContext&) = delete;
|
||||
@ -237,15 +219,9 @@ public:
|
||||
ZSTDUncompressCachedData::ZSTDNativeContext GetZSTDContext() const {
|
||||
return uncomp_cached_data_.Get();
|
||||
}
|
||||
CompressionType type() const {
|
||||
return type_;
|
||||
}
|
||||
const Slice& dict() const {
|
||||
return dict_;
|
||||
}
|
||||
Slice& dict() {
|
||||
return dict_;
|
||||
}
|
||||
CompressionType type() const { return type_; }
|
||||
const Slice& dict() const { return dict_; }
|
||||
Slice& dict() { return dict_; }
|
||||
};
|
||||
|
||||
inline bool Snappy_Supported() {
|
||||
@ -471,9 +447,9 @@ inline bool Zlib_Compress(const CompressionContext& ctx,
|
||||
|
||||
if (ctx.dict().size()) {
|
||||
// Initialize the compression library's dictionary
|
||||
st = deflateSetDictionary(
|
||||
&_stream, reinterpret_cast<const Bytef*>(ctx.dict().data()),
|
||||
static_cast<unsigned int>(ctx.dict().size()));
|
||||
st = deflateSetDictionary(&_stream,
|
||||
reinterpret_cast<const Bytef*>(ctx.dict().data()),
|
||||
static_cast<unsigned int>(ctx.dict().size()));
|
||||
if (st != Z_OK) {
|
||||
deflateEnd(&_stream);
|
||||
return false;
|
||||
@ -516,8 +492,8 @@ inline bool Zlib_Compress(const CompressionContext& ctx,
|
||||
// header in varint32 format
|
||||
// @param compression_dict Data for presetting the compression library's
|
||||
// dictionary.
|
||||
inline char* Zlib_Uncompress(const UncompressionContext& ctx, const char* input_data,
|
||||
size_t input_length,
|
||||
inline char* Zlib_Uncompress(const UncompressionContext& ctx,
|
||||
const char* input_data, size_t input_length,
|
||||
int* decompress_size,
|
||||
uint32_t compress_format_version,
|
||||
int windowBits = -14) {
|
||||
@ -551,9 +527,9 @@ inline char* Zlib_Uncompress(const UncompressionContext& ctx, const char* input_
|
||||
|
||||
if (ctx.dict().size()) {
|
||||
// Initialize the compression library's dictionary
|
||||
st = inflateSetDictionary(
|
||||
&_stream, reinterpret_cast<const Bytef*>(ctx.dict().data()),
|
||||
static_cast<unsigned int>(ctx.dict().size()));
|
||||
st = inflateSetDictionary(&_stream,
|
||||
reinterpret_cast<const Bytef*>(ctx.dict().data()),
|
||||
static_cast<unsigned int>(ctx.dict().size()));
|
||||
if (st != Z_OK) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -835,8 +811,8 @@ inline bool LZ4_Compress(const CompressionContext& ctx,
|
||||
// header in varint32 format
|
||||
// @param compression_dict Data for presetting the compression library's
|
||||
// dictionary.
|
||||
inline char* LZ4_Uncompress(const UncompressionContext& ctx, const char* input_data,
|
||||
size_t input_length,
|
||||
inline char* LZ4_Uncompress(const UncompressionContext& ctx,
|
||||
const char* input_data, size_t input_length,
|
||||
int* decompress_size,
|
||||
uint32_t compress_format_version) {
|
||||
#ifdef LZ4
|
||||
@ -1026,9 +1002,9 @@ inline bool ZSTD_Compress(const CompressionContext& ctx, const char* input,
|
||||
#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+
|
||||
ZSTD_CCtx* context = ctx.ZSTDPreallocCtx();
|
||||
assert(context != nullptr);
|
||||
outlen = ZSTD_compress_usingDict(
|
||||
context, &(*output)[output_header_len], compressBound, input, length,
|
||||
ctx.dict().data(), ctx.dict().size(), level);
|
||||
outlen = ZSTD_compress_usingDict(context, &(*output)[output_header_len],
|
||||
compressBound, input, length,
|
||||
ctx.dict().data(), ctx.dict().size(), level);
|
||||
#else // up to v0.4.x
|
||||
outlen = ZSTD_compress(&(*output)[output_header_len], compressBound, input,
|
||||
length, level);
|
||||
@ -1049,8 +1025,8 @@ inline bool ZSTD_Compress(const CompressionContext& ctx, const char* input,
|
||||
|
||||
// @param compression_dict Data for presetting the compression library's
|
||||
// dictionary.
|
||||
inline char* ZSTD_Uncompress(const UncompressionContext& ctx, const char* input_data,
|
||||
size_t input_length,
|
||||
inline char* ZSTD_Uncompress(const UncompressionContext& ctx,
|
||||
const char* input_data, size_t input_length,
|
||||
int* decompress_size) {
|
||||
#ifdef ZSTD
|
||||
uint32_t output_len = 0;
|
||||
@ -1065,8 +1041,8 @@ inline char* ZSTD_Uncompress(const UncompressionContext& ctx, const char* input_
|
||||
ZSTD_DCtx* context = ctx.GetZSTDContext();
|
||||
assert(context != nullptr);
|
||||
actual_output_length = ZSTD_decompress_usingDict(
|
||||
context, output, output_len, input_data, input_length,
|
||||
ctx.dict().data(), ctx.dict().size());
|
||||
context, output, output_len, input_data, input_length, ctx.dict().data(),
|
||||
ctx.dict().size());
|
||||
#else // up to v0.4.x
|
||||
actual_output_length =
|
||||
ZSTD_decompress(output, output_len, input_data, input_length);
|
||||
|
@ -26,13 +26,13 @@ struct ZSTDCachedData {
|
||||
// We choose to cache the below structure instead of a ptr
|
||||
// because we want to avoid a) native types leak b) make
|
||||
// cache use transparent for the user
|
||||
ZSTDUncompressCachedData uncomp_cached_data_;
|
||||
std::atomic<void*> zstd_uncomp_sentinel_;
|
||||
ZSTDUncompressCachedData uncomp_cached_data_;
|
||||
std::atomic<void*> zstd_uncomp_sentinel_;
|
||||
|
||||
char padding[(CACHE_LINE_SIZE -
|
||||
(sizeof(ZSTDUncompressCachedData) +
|
||||
sizeof(std::atomic<void*>)) %
|
||||
CACHE_LINE_SIZE)]; // unused padding field
|
||||
char
|
||||
padding[(CACHE_LINE_SIZE -
|
||||
(sizeof(ZSTDUncompressCachedData) + sizeof(std::atomic<void*>)) %
|
||||
CACHE_LINE_SIZE)]; // unused padding field
|
||||
|
||||
ZSTDCachedData() : zstd_uncomp_sentinel_(&uncomp_cached_data_) {}
|
||||
ZSTDCachedData(const ZSTDCachedData&) = delete;
|
||||
@ -41,7 +41,8 @@ struct ZSTDCachedData {
|
||||
ZSTDUncompressCachedData GetUncompressData(int64_t idx) {
|
||||
ZSTDUncompressCachedData result;
|
||||
void* expected = &uncomp_cached_data_;
|
||||
if (zstd_uncomp_sentinel_.compare_exchange_strong(expected, SentinelValue)) {
|
||||
if (zstd_uncomp_sentinel_.compare_exchange_strong(expected,
|
||||
SentinelValue)) {
|
||||
uncomp_cached_data_.CreateIfNeeded();
|
||||
result.InitFromCache(uncomp_cached_data_, idx);
|
||||
} else {
|
||||
@ -60,15 +61,15 @@ struct ZSTDCachedData {
|
||||
}
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(ZSTDCachedData) % CACHE_LINE_SIZE == 0, "Expected CACHE_LINE_SIZE alignment");
|
||||
} // compression_cache
|
||||
static_assert(sizeof(ZSTDCachedData) % CACHE_LINE_SIZE == 0,
|
||||
"Expected CACHE_LINE_SIZE alignment");
|
||||
} // namespace compression_cache
|
||||
|
||||
using namespace compression_cache;
|
||||
|
||||
class CompressionContextCache::Rep {
|
||||
public:
|
||||
Rep() {
|
||||
}
|
||||
public:
|
||||
Rep() {}
|
||||
ZSTDUncompressCachedData GetZSTDUncompressData() {
|
||||
auto p = per_core_uncompr_.AccessElementAndIndex();
|
||||
int64_t idx = static_cast<int64_t>(p.second);
|
||||
@ -79,24 +80,22 @@ public:
|
||||
auto* cn = per_core_uncompr_.AccessAtCore(static_cast<size_t>(idx));
|
||||
cn->ReturnUncompressData();
|
||||
}
|
||||
private:
|
||||
|
||||
private:
|
||||
CoreLocalArray<ZSTDCachedData> per_core_uncompr_;
|
||||
};
|
||||
|
||||
CompressionContextCache::CompressionContextCache() :
|
||||
rep_(new Rep()) {
|
||||
}
|
||||
CompressionContextCache::CompressionContextCache() : rep_(new Rep()) {}
|
||||
|
||||
CompressionContextCache* CompressionContextCache::Instance() {
|
||||
static CompressionContextCache instance;
|
||||
return &instance;
|
||||
}
|
||||
|
||||
void CompressionContextCache::InitSingleton() {
|
||||
Instance();
|
||||
}
|
||||
void CompressionContextCache::InitSingleton() { Instance(); }
|
||||
|
||||
ZSTDUncompressCachedData CompressionContextCache::GetCachedZSTDUncompressData() {
|
||||
ZSTDUncompressCachedData
|
||||
CompressionContextCache::GetCachedZSTDUncompressData() {
|
||||
return rep_->GetZSTDUncompressData();
|
||||
}
|
||||
|
||||
@ -104,8 +103,6 @@ void CompressionContextCache::ReturnCachedZSTDUncompressData(int64_t idx) {
|
||||
rep_->ReturnZSTDUncompressData(idx);
|
||||
}
|
||||
|
||||
CompressionContextCache::~CompressionContextCache() {
|
||||
delete rep_;
|
||||
}
|
||||
CompressionContextCache::~CompressionContextCache() { delete rep_; }
|
||||
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -11,9 +11,9 @@
|
||||
// This helps with Random Read latencies and reduces CPU utilization
|
||||
// Caching is implemented using CoreLocal facility. Compression/Uncompression
|
||||
// instances are cached on a per core basis using CoreLocalArray. A borrowed
|
||||
// instance is atomically replaced with a sentinel value for the time of being used.
|
||||
// If it turns out that another thread is already makes use of the instance we still
|
||||
// create one on the heap which is later is destroyed.
|
||||
// instance is atomically replaced with a sentinel value for the time of being
|
||||
// used. If it turns out that another thread is already makes use of the
|
||||
// instance we still create one on the heap which is later is destroyed.
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -23,7 +23,7 @@ namespace rocksdb {
|
||||
class ZSTDUncompressCachedData;
|
||||
|
||||
class CompressionContextCache {
|
||||
public:
|
||||
public:
|
||||
// Singleton
|
||||
static CompressionContextCache* Instance();
|
||||
static void InitSingleton();
|
||||
@ -33,13 +33,13 @@ public:
|
||||
ZSTDUncompressCachedData GetCachedZSTDUncompressData();
|
||||
void ReturnCachedZSTDUncompressData(int64_t idx);
|
||||
|
||||
private:
|
||||
private:
|
||||
// Singleton
|
||||
CompressionContextCache();
|
||||
CompressionContextCache();
|
||||
~CompressionContextCache();
|
||||
|
||||
class Rep;
|
||||
Rep* rep_;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -1122,10 +1122,8 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry,
|
||||
BLOB_DB_DECOMPRESSION_MICROS);
|
||||
UncompressionContext uncompression_ctx(bfile->compression());
|
||||
s = UncompressBlockContentsForCompressionType(
|
||||
uncompression_ctx,
|
||||
blob_value.data(), blob_value.size(), &contents,
|
||||
kBlockBasedTableVersionFormat,
|
||||
*(cfh->cfd()->ioptions()));
|
||||
uncompression_ctx, blob_value.data(), blob_value.size(), &contents,
|
||||
kBlockBasedTableVersionFormat, *(cfh->cfd()->ioptions()));
|
||||
}
|
||||
value->PinSelf(contents.data);
|
||||
}
|
||||
|
@ -207,10 +207,8 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob,
|
||||
BlockContents contents;
|
||||
UncompressionContext uncompression_ctx(compression);
|
||||
s = UncompressBlockContentsForCompressionType(
|
||||
uncompression_ctx,
|
||||
slice.data() + key_size, value_size, &contents,
|
||||
2 /*compress_format_version*/,
|
||||
ImmutableCFOptions(Options()));
|
||||
uncompression_ctx, slice.data() + key_size, value_size, &contents,
|
||||
2 /*compress_format_version*/, ImmutableCFOptions(Options()));
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
@ -173,8 +173,7 @@ void ColumnAwareEncodingReader::DecodeBlocksFromRowFormat(
|
||||
(CompressionType)slice_final_with_bit[slice_final_with_bit.size() - 1];
|
||||
if (type != kNoCompression) {
|
||||
UncompressionContext uncompression_ctx(type);
|
||||
UncompressBlockContents(uncompression_ctx,
|
||||
slice_final_with_bit.c_str(),
|
||||
UncompressBlockContents(uncompression_ctx, slice_final_with_bit.c_str(),
|
||||
slice_final_with_bit.size() - 1, &contents,
|
||||
format_version, ioptions);
|
||||
decoded_content = std::string(contents.data.data(), contents.data.size());
|
||||
@ -247,9 +246,8 @@ void CompressDataBlock(const std::string& output_content, Slice* slice_final,
|
||||
CompressionType* type, std::string* compressed_output) {
|
||||
CompressionContext compression_ctx(*type);
|
||||
uint32_t format_version = 2; // hard-coded version
|
||||
*slice_final =
|
||||
CompressBlock(output_content, compression_ctx, type, format_version,
|
||||
compressed_output);
|
||||
*slice_final = CompressBlock(output_content, compression_ctx, type,
|
||||
format_version, compressed_output);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
Loading…
x
Reference in New Issue
Block a user