2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-01-09 21:57:11 +01:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
#pragma once
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
#include <algorithm>
|
|
|
|
#include <limits>
|
2019-01-24 03:11:08 +01:00
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
#ifdef OS_FREEBSD
|
|
|
|
#include <malloc_np.h>
|
|
|
|
#else // OS_FREEBSD
|
|
|
|
#include <malloc.h>
|
|
|
|
#endif // OS_FREEBSD
|
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
2015-08-28 00:40:42 +02:00
|
|
|
#include <string>
|
2015-01-15 01:24:24 +01:00
|
|
|
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "memory/memory_allocator.h"
|
2015-01-09 21:57:11 +01:00
|
|
|
#include "rocksdb/options.h"
|
2018-10-03 02:21:54 +02:00
|
|
|
#include "rocksdb/table.h"
|
2020-08-13 03:24:27 +02:00
|
|
|
#include "test_util/sync_point.h"
|
2015-01-15 01:24:24 +01:00
|
|
|
#include "util/coding.h"
|
2018-06-04 21:04:52 +02:00
|
|
|
#include "util/compression_context_cache.h"
|
2019-04-02 23:48:52 +02:00
|
|
|
#include "util/string_util.h"
|
2015-01-09 21:57:11 +01:00
|
|
|
|
|
|
|
#ifdef SNAPPY
|
|
|
|
#include <snappy.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef ZLIB
|
|
|
|
#include <zlib.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef BZIP2
|
|
|
|
#include <bzlib.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(LZ4)
|
|
|
|
#include <lz4.h>
|
|
|
|
#include <lz4hc.h>
|
|
|
|
#endif
|
|
|
|
|
2015-08-28 00:40:42 +02:00
|
|
|
#if defined(ZSTD)
|
|
|
|
#include <zstd.h>
|
2018-08-23 03:22:10 +02:00
|
|
|
#if ZSTD_VERSION_NUMBER >= 10103 // v1.1.3+
|
2017-11-03 06:46:13 +01:00
|
|
|
#include <zdict.h>
|
2018-08-23 03:22:10 +02:00
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 10103
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2018-06-04 21:04:52 +02:00
|
|
|
// Need this for the context allocation override
|
|
|
|
// On windows we need to do this explicitly
|
|
|
|
#if (ZSTD_VERSION_NUMBER >= 500)
|
|
|
|
#if defined(ROCKSDB_JEMALLOC) && defined(OS_WIN) && \
|
|
|
|
defined(ZSTD_STATIC_LINKING_ONLY)
|
|
|
|
#define ROCKSDB_ZSTD_CUSTOM_MEM
|
|
|
|
namespace port {
|
|
|
|
ZSTD_customMem GetJeZstdAllocationOverrides();
|
|
|
|
} // namespace port
|
|
|
|
#endif // defined(ROCKSDB_JEMALLOC) && defined(OS_WIN) &&
|
|
|
|
// defined(ZSTD_STATIC_LINKING_ONLY)
|
|
|
|
|
2019-01-24 03:11:08 +01:00
|
|
|
// We require `ZSTD_sizeof_DDict` and `ZSTD_createDDict_byReference` to use
|
|
|
|
// `ZSTD_DDict`. The former was introduced in v1.0.0 and the latter was
|
|
|
|
// introduced in v1.1.3. But an important bug fix for `ZSTD_sizeof_DDict` came
|
|
|
|
// in v1.1.4, so that is the version we require. As of today's latest version
|
|
|
|
// (v1.3.8), they are both still in the experimental API, which means they are
|
|
|
|
// only exported when the compiler flag `ZSTD_STATIC_LINKING_ONLY` is set.
|
|
|
|
#if defined(ZSTD_STATIC_LINKING_ONLY) && ZSTD_VERSION_NUMBER >= 10104
|
|
|
|
#define ROCKSDB_ZSTD_DDICT
|
|
|
|
#endif // defined(ZSTD_STATIC_LINKING_ONLY) && ZSTD_VERSION_NUMBER >= 10104
|
|
|
|
|
2018-06-04 21:04:52 +02:00
|
|
|
// Cached data represents a portion that can be re-used
|
|
|
|
// If, in the future we have more than one native context to
|
|
|
|
// cache we can arrange this as a tuple
|
|
|
|
class ZSTDUncompressCachedData {
|
2018-06-05 21:51:05 +02:00
|
|
|
public:
|
2018-06-04 21:04:52 +02:00
|
|
|
using ZSTDNativeContext = ZSTD_DCtx*;
|
2018-06-05 21:51:05 +02:00
|
|
|
ZSTDUncompressCachedData() {}
|
2018-06-04 21:04:52 +02:00
|
|
|
// Init from cache
|
|
|
|
ZSTDUncompressCachedData(const ZSTDUncompressCachedData& o) = delete;
|
|
|
|
ZSTDUncompressCachedData& operator=(const ZSTDUncompressCachedData&) = delete;
|
2018-06-05 21:51:05 +02:00
|
|
|
ZSTDUncompressCachedData(ZSTDUncompressCachedData&& o) ROCKSDB_NOEXCEPT
|
|
|
|
: ZSTDUncompressCachedData() {
|
2018-06-04 21:04:52 +02:00
|
|
|
*this = std::move(o);
|
|
|
|
}
|
2018-06-05 21:51:05 +02:00
|
|
|
ZSTDUncompressCachedData& operator=(ZSTDUncompressCachedData&& o)
|
|
|
|
ROCKSDB_NOEXCEPT {
|
2018-06-04 21:04:52 +02:00
|
|
|
assert(zstd_ctx_ == nullptr);
|
2018-06-05 21:51:05 +02:00
|
|
|
std::swap(zstd_ctx_, o.zstd_ctx_);
|
|
|
|
std::swap(cache_idx_, o.cache_idx_);
|
2018-06-04 21:04:52 +02:00
|
|
|
return *this;
|
|
|
|
}
|
2018-06-05 21:51:05 +02:00
|
|
|
ZSTDNativeContext Get() const { return zstd_ctx_; }
|
|
|
|
int64_t GetCacheIndex() const { return cache_idx_; }
|
2018-06-04 21:04:52 +02:00
|
|
|
void CreateIfNeeded() {
|
|
|
|
if (zstd_ctx_ == nullptr) {
|
|
|
|
#ifdef ROCKSDB_ZSTD_CUSTOM_MEM
|
2018-06-05 21:51:05 +02:00
|
|
|
zstd_ctx_ =
|
|
|
|
ZSTD_createDCtx_advanced(port::GetJeZstdAllocationOverrides());
|
2018-06-04 21:04:52 +02:00
|
|
|
#else // ROCKSDB_ZSTD_CUSTOM_MEM
|
2018-06-05 21:51:05 +02:00
|
|
|
zstd_ctx_ = ZSTD_createDCtx();
|
2018-06-04 21:04:52 +02:00
|
|
|
#endif // ROCKSDB_ZSTD_CUSTOM_MEM
|
|
|
|
cache_idx_ = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void InitFromCache(const ZSTDUncompressCachedData& o, int64_t idx) {
|
|
|
|
zstd_ctx_ = o.zstd_ctx_;
|
|
|
|
cache_idx_ = idx;
|
|
|
|
}
|
|
|
|
~ZSTDUncompressCachedData() {
|
|
|
|
if (zstd_ctx_ != nullptr && cache_idx_ == -1) {
|
|
|
|
ZSTD_freeDCtx(zstd_ctx_);
|
|
|
|
}
|
|
|
|
}
|
2018-06-05 21:51:05 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
ZSTDNativeContext zstd_ctx_ = nullptr;
|
|
|
|
int64_t cache_idx_ = -1; // -1 means this instance owns the context
|
2018-06-04 21:04:52 +02:00
|
|
|
};
|
2018-06-05 21:51:05 +02:00
|
|
|
#endif // (ZSTD_VERSION_NUMBER >= 500)
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2017-11-03 06:46:13 +01:00
|
|
|
#endif // ZSTD
|
2015-08-28 00:40:42 +02:00
|
|
|
|
2018-06-04 21:04:52 +02:00
|
|
|
#if !(defined ZSTD) || !(ZSTD_VERSION_NUMBER >= 500)
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2018-06-04 21:04:52 +02:00
|
|
|
class ZSTDUncompressCachedData {
|
2018-06-05 21:51:05 +02:00
|
|
|
void* padding; // unused
|
|
|
|
public:
|
2018-06-04 21:04:52 +02:00
|
|
|
using ZSTDNativeContext = void*;
|
|
|
|
ZSTDUncompressCachedData() {}
|
|
|
|
ZSTDUncompressCachedData(const ZSTDUncompressCachedData&) {}
|
|
|
|
ZSTDUncompressCachedData& operator=(const ZSTDUncompressCachedData&) = delete;
|
2018-06-05 21:51:05 +02:00
|
|
|
ZSTDUncompressCachedData(ZSTDUncompressCachedData&&)
|
|
|
|
ROCKSDB_NOEXCEPT = default;
|
|
|
|
ZSTDUncompressCachedData& operator=(ZSTDUncompressCachedData&&)
|
|
|
|
ROCKSDB_NOEXCEPT = default;
|
|
|
|
ZSTDNativeContext Get() const { return nullptr; }
|
|
|
|
int64_t GetCacheIndex() const { return -1; }
|
2018-06-04 21:04:52 +02:00
|
|
|
void CreateIfNeeded() {}
|
|
|
|
void InitFromCache(const ZSTDUncompressCachedData&, int64_t) {}
|
2018-07-13 23:07:53 +02:00
|
|
|
private:
|
|
|
|
void ignore_padding__() { padding = nullptr; }
|
2018-06-04 21:04:52 +02:00
|
|
|
};
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2018-06-04 21:04:52 +02:00
|
|
|
#endif
|
|
|
|
|
2016-04-20 07:54:24 +02:00
|
|
|
#if defined(XPRESS)
|
|
|
|
#include "port/xpress.h"
|
|
|
|
#endif
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2015-01-09 21:57:11 +01:00
|
|
|
|
2019-01-19 04:10:17 +01:00
|
|
|
// Holds dictionary and related data, like ZSTD's digested compression
|
|
|
|
// dictionary.
|
|
|
|
struct CompressionDict {
|
|
|
|
#if ZSTD_VERSION_NUMBER >= 700
|
|
|
|
ZSTD_CDict* zstd_cdict_ = nullptr;
|
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 700
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
2019-02-12 04:42:25 +01:00
|
|
|
std::string dict_;
|
2019-01-19 04:10:17 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
#if ZSTD_VERSION_NUMBER >= 700
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
2019-02-12 04:42:25 +01:00
|
|
|
CompressionDict(std::string dict, CompressionType type, int level) {
|
2019-01-19 04:10:17 +01:00
|
|
|
#else // ZSTD_VERSION_NUMBER >= 700
|
Reduce scope of compression dictionary to single SST (#4952)
Summary:
Our previous approach was to train one compression dictionary per compaction, using the first output SST to train a dictionary, and then applying it on subsequent SSTs in the same compaction. While this was great for minimizing CPU/memory/I/O overhead, it did not achieve good compression ratios in practice. In our most promising potential use case, moderate reductions in a dictionary's scope make a major difference on compression ratio.
So, this PR changes compression dictionary to be scoped per-SST. It accepts the tradeoff during table building to use more memory and CPU. Important changes include:
- The `BlockBasedTableBuilder` has a new state when dictionary compression is in-use: `kBuffered`. In that state it accumulates uncompressed data in-memory whenever `Add` is called.
- After accumulating target file size bytes or calling `BlockBasedTableBuilder::Finish`, a `BlockBasedTableBuilder` moves to the `kUnbuffered` state. The transition (`EnterUnbuffered()`) involves sampling the buffered data, training a dictionary, and compressing/writing out all buffered data. In the `kUnbuffered` state, a `BlockBasedTableBuilder` behaves the same as before -- blocks are compressed/written out as soon as they fill up.
- Samples are now whole uncompressed data blocks, except the final sample may be a partial data block so we don't breach the user's configured `max_dict_bytes` or `zstd_max_train_bytes`. The dictionary trainer is supposed to work better when we pass it real units of compression. Previously we were passing 64-byte KV samples which was not realistic.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4952
Differential Revision: D13967980
Pulled By: ajkr
fbshipit-source-id: 82bea6f7537e1529c7a1a4cdee84585f5949300f
2019-02-12 04:42:25 +01:00
|
|
|
CompressionDict(std::string dict, CompressionType /*type*/, int /*level*/) {
|
2019-01-19 04:10:17 +01:00
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 700
|
|
|
|
dict_ = std::move(dict);
|
|
|
|
#if ZSTD_VERSION_NUMBER >= 700
|
|
|
|
zstd_cdict_ = nullptr;
|
|
|
|
if (!dict_.empty() && (type == kZSTD || type == kZSTDNotFinalCompression)) {
|
|
|
|
if (level == CompressionOptions::kDefaultCompressionLevel) {
|
|
|
|
// 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see
|
|
|
|
// https://github.com/facebook/zstd/issues/1148
|
|
|
|
level = 3;
|
|
|
|
}
|
|
|
|
// Should be safe (but slower) if below call fails as we'll use the
|
|
|
|
// raw dictionary to compress.
|
|
|
|
zstd_cdict_ = ZSTD_createCDict(dict_.data(), dict_.size(), level);
|
|
|
|
assert(zstd_cdict_ != nullptr);
|
|
|
|
}
|
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 700
|
|
|
|
}
|
|
|
|
|
|
|
|
~CompressionDict() {
|
|
|
|
#if ZSTD_VERSION_NUMBER >= 700
|
|
|
|
size_t res = 0;
|
|
|
|
if (zstd_cdict_ != nullptr) {
|
|
|
|
res = ZSTD_freeCDict(zstd_cdict_);
|
|
|
|
}
|
|
|
|
assert(res == 0); // Last I checked they can't fail
|
|
|
|
(void)res; // prevent unused var warning
|
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 700
|
|
|
|
}
|
|
|
|
|
|
|
|
#if ZSTD_VERSION_NUMBER >= 700
|
2019-01-30 01:23:21 +01:00
|
|
|
const ZSTD_CDict* GetDigestedZstdCDict() const { return zstd_cdict_; }
|
2019-01-19 04:10:17 +01:00
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 700
|
|
|
|
|
|
|
|
Slice GetRawDict() const { return dict_; }
|
|
|
|
|
|
|
|
static const CompressionDict& GetEmptyDict() {
|
|
|
|
static CompressionDict empty_dict{};
|
|
|
|
return empty_dict;
|
|
|
|
}
|
|
|
|
|
|
|
|
CompressionDict() = default;
|
|
|
|
// Disable copy/move
|
|
|
|
CompressionDict(const CompressionDict&) = delete;
|
|
|
|
CompressionDict& operator=(const CompressionDict&) = delete;
|
|
|
|
CompressionDict(CompressionDict&&) = delete;
|
|
|
|
CompressionDict& operator=(CompressionDict&&) = delete;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Holds dictionary and related data, like ZSTD's digested uncompression
|
|
|
|
// dictionary.
|
2019-08-23 17:25:52 +02:00
|
|
|
struct UncompressionDict {
|
|
|
|
// Block containing the data for the compression dictionary in case the
|
|
|
|
// constructor that takes a string parameter is used.
|
2019-07-24 00:57:43 +02:00
|
|
|
std::string dict_;
|
|
|
|
|
2019-08-23 17:25:52 +02:00
|
|
|
// Block containing the data for the compression dictionary in case the
|
|
|
|
// constructor that takes a Slice parameter is used and the passed in
|
|
|
|
// CacheAllocationPtr is not nullptr.
|
|
|
|
CacheAllocationPtr allocation_;
|
|
|
|
|
|
|
|
// Slice pointing to the compression dictionary data. Can point to
|
|
|
|
// dict_, allocation_, or some other memory location, depending on how
|
|
|
|
// the object was constructed.
|
2019-07-24 00:57:43 +02:00
|
|
|
Slice slice_;
|
|
|
|
|
2019-01-24 03:11:08 +01:00
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
2019-07-24 00:57:43 +02:00
|
|
|
// Processed version of the contents of slice_ for ZSTD compression.
|
|
|
|
ZSTD_DDict* zstd_ddict_ = nullptr;
|
2019-01-24 03:11:08 +01:00
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
|
|
|
|
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
2019-08-23 17:25:52 +02:00
|
|
|
UncompressionDict(std::string dict, bool using_zstd)
|
2019-01-24 03:11:08 +01:00
|
|
|
#else // ROCKSDB_ZSTD_DDICT
|
2019-08-23 17:25:52 +02:00
|
|
|
UncompressionDict(std::string dict, bool /* using_zstd */)
|
2019-01-24 03:11:08 +01:00
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
2019-08-23 17:25:52 +02:00
|
|
|
: dict_(std::move(dict)), slice_(dict_) {
|
2019-01-24 03:11:08 +01:00
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
2019-07-24 00:57:43 +02:00
|
|
|
if (!slice_.empty() && using_zstd) {
|
|
|
|
zstd_ddict_ = ZSTD_createDDict_byReference(slice_.data(), slice_.size());
|
2019-01-19 04:10:17 +01:00
|
|
|
assert(zstd_ddict_ != nullptr);
|
|
|
|
}
|
2019-01-24 03:11:08 +01:00
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
2019-01-19 04:10:17 +01:00
|
|
|
}
|
|
|
|
|
2019-08-23 17:25:52 +02:00
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
|
|
|
UncompressionDict(Slice slice, CacheAllocationPtr&& allocation,
|
|
|
|
bool using_zstd)
|
|
|
|
#else // ROCKSDB_ZSTD_DDICT
|
|
|
|
UncompressionDict(Slice slice, CacheAllocationPtr&& allocation,
|
|
|
|
bool /* using_zstd */)
|
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
|
|
|
: allocation_(std::move(allocation)), slice_(std::move(slice)) {
|
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
|
|
|
if (!slice_.empty() && using_zstd) {
|
|
|
|
zstd_ddict_ = ZSTD_createDDict_byReference(slice_.data(), slice_.size());
|
|
|
|
assert(zstd_ddict_ != nullptr);
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
2019-07-24 00:57:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
UncompressionDict(UncompressionDict&& rhs)
|
|
|
|
: dict_(std::move(rhs.dict_)),
|
2019-08-23 17:25:52 +02:00
|
|
|
allocation_(std::move(rhs.allocation_)),
|
2019-07-24 00:57:43 +02:00
|
|
|
slice_(std::move(rhs.slice_))
|
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
|
|
|
,
|
|
|
|
zstd_ddict_(rhs.zstd_ddict_)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
|
|
|
rhs.zstd_ddict_ = nullptr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-01-19 04:10:17 +01:00
|
|
|
~UncompressionDict() {
|
2019-01-24 03:11:08 +01:00
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
2019-01-19 04:10:17 +01:00
|
|
|
size_t res = 0;
|
|
|
|
if (zstd_ddict_ != nullptr) {
|
|
|
|
res = ZSTD_freeDDict(zstd_ddict_);
|
|
|
|
}
|
|
|
|
assert(res == 0); // Last I checked they can't fail
|
|
|
|
(void)res; // prevent unused var warning
|
2019-01-24 03:11:08 +01:00
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
2019-01-19 04:10:17 +01:00
|
|
|
}
|
|
|
|
|
2019-07-24 00:57:43 +02:00
|
|
|
UncompressionDict& operator=(UncompressionDict&& rhs) {
|
|
|
|
if (this == &rhs) {
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
dict_ = std::move(rhs.dict_);
|
2019-08-23 17:25:52 +02:00
|
|
|
allocation_ = std::move(rhs.allocation_);
|
2019-07-24 00:57:43 +02:00
|
|
|
slice_ = std::move(rhs.slice_);
|
|
|
|
|
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
|
|
|
zstd_ddict_ = rhs.zstd_ddict_;
|
|
|
|
rhs.zstd_ddict_ = nullptr;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2019-08-23 17:25:52 +02:00
|
|
|
// The object is self-contained if the string constructor is used, or the
|
|
|
|
// Slice constructor is invoked with a non-null allocation. Otherwise, it
|
|
|
|
// is the caller's responsibility to ensure that the underlying storage
|
|
|
|
// outlives this object.
|
|
|
|
bool own_bytes() const { return !dict_.empty() || allocation_; }
|
|
|
|
|
2019-07-24 00:57:43 +02:00
|
|
|
const Slice& GetRawDict() const { return slice_; }
|
|
|
|
|
2019-01-24 03:11:08 +01:00
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
2019-01-30 01:23:21 +01:00
|
|
|
const ZSTD_DDict* GetDigestedZstdDDict() const { return zstd_ddict_; }
|
2019-01-24 03:11:08 +01:00
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
2019-01-19 04:10:17 +01:00
|
|
|
|
|
|
|
static const UncompressionDict& GetEmptyDict() {
|
|
|
|
static UncompressionDict empty_dict{};
|
|
|
|
return empty_dict;
|
|
|
|
}
|
|
|
|
|
2019-07-24 00:57:43 +02:00
|
|
|
size_t ApproximateMemoryUsage() const {
|
2019-08-23 17:25:52 +02:00
|
|
|
size_t usage = sizeof(struct UncompressionDict);
|
|
|
|
usage += dict_.size();
|
|
|
|
if (allocation_) {
|
|
|
|
auto allocator = allocation_.get_deleter().allocator;
|
|
|
|
if (allocator) {
|
|
|
|
usage += allocator->UsableSize(allocation_.get(), slice_.size());
|
|
|
|
} else {
|
|
|
|
usage += slice_.size();
|
|
|
|
}
|
|
|
|
}
|
2019-01-24 03:11:08 +01:00
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
|
|
|
usage += ZSTD_sizeof_DDict(zstd_ddict_);
|
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
|
|
|
return usage;
|
|
|
|
}
|
|
|
|
|
2019-01-19 04:10:17 +01:00
|
|
|
UncompressionDict() = default;
|
2019-07-24 00:57:43 +02:00
|
|
|
// Disable copy
|
2019-01-19 04:10:17 +01:00
|
|
|
UncompressionDict(const CompressionDict&) = delete;
|
|
|
|
UncompressionDict& operator=(const CompressionDict&) = delete;
|
|
|
|
};
|
|
|
|
|
2018-06-04 21:04:52 +02:00
|
|
|
class CompressionContext {
|
2018-06-05 21:51:05 +02:00
|
|
|
private:
|
2018-09-06 18:44:03 +02:00
|
|
|
#if defined(ZSTD) && (ZSTD_VERSION_NUMBER >= 500)
|
2018-06-05 21:51:05 +02:00
|
|
|
ZSTD_CCtx* zstd_ctx_ = nullptr;
|
2019-01-24 03:11:08 +01:00
|
|
|
void CreateNativeContext(CompressionType type) {
|
|
|
|
if (type == kZSTD || type == kZSTDNotFinalCompression) {
|
2018-06-04 21:04:52 +02:00
|
|
|
#ifdef ROCKSDB_ZSTD_CUSTOM_MEM
|
2018-06-05 21:51:05 +02:00
|
|
|
zstd_ctx_ =
|
|
|
|
ZSTD_createCCtx_advanced(port::GetJeZstdAllocationOverrides());
|
2018-06-04 21:04:52 +02:00
|
|
|
#else // ROCKSDB_ZSTD_CUSTOM_MEM
|
2018-06-05 21:51:05 +02:00
|
|
|
zstd_ctx_ = ZSTD_createCCtx();
|
2018-06-04 21:04:52 +02:00
|
|
|
#endif // ROCKSDB_ZSTD_CUSTOM_MEM
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void DestroyNativeContext() {
|
|
|
|
if (zstd_ctx_ != nullptr) {
|
|
|
|
ZSTD_freeCCtx(zstd_ctx_);
|
|
|
|
}
|
|
|
|
}
|
2018-06-05 21:51:05 +02:00
|
|
|
|
|
|
|
public:
|
2018-06-04 21:04:52 +02:00
|
|
|
// callable inside ZSTD_Compress
|
2018-06-05 21:51:05 +02:00
|
|
|
ZSTD_CCtx* ZSTDPreallocCtx() const {
|
2019-01-24 03:11:08 +01:00
|
|
|
assert(zstd_ctx_ != nullptr);
|
2018-06-04 21:04:52 +02:00
|
|
|
return zstd_ctx_;
|
|
|
|
}
|
2019-01-19 04:10:17 +01:00
|
|
|
|
2018-06-05 21:51:05 +02:00
|
|
|
#else // ZSTD && (ZSTD_VERSION_NUMBER >= 500)
|
|
|
|
private:
|
2019-01-24 03:11:08 +01:00
|
|
|
void CreateNativeContext(CompressionType /* type */) {}
|
2018-06-04 21:04:52 +02:00
|
|
|
void DestroyNativeContext() {}
|
2018-06-05 21:51:05 +02:00
|
|
|
#endif // ZSTD && (ZSTD_VERSION_NUMBER >= 500)
|
|
|
|
public:
|
2019-01-24 03:11:08 +01:00
|
|
|
explicit CompressionContext(CompressionType type) {
|
|
|
|
CreateNativeContext(type);
|
2018-09-06 18:44:03 +02:00
|
|
|
}
|
2018-06-05 21:51:05 +02:00
|
|
|
~CompressionContext() { DestroyNativeContext(); }
|
2018-06-04 21:04:52 +02:00
|
|
|
CompressionContext(const CompressionContext&) = delete;
|
|
|
|
CompressionContext& operator=(const CompressionContext&) = delete;
|
2019-01-19 04:10:17 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class CompressionInfo {
|
|
|
|
const CompressionOptions& opts_;
|
|
|
|
const CompressionContext& context_;
|
|
|
|
const CompressionDict& dict_;
|
|
|
|
const CompressionType type_;
|
2019-03-18 20:07:35 +01:00
|
|
|
const uint64_t sample_for_compression_;
|
2019-01-19 04:10:17 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
CompressionInfo(const CompressionOptions& _opts,
|
|
|
|
const CompressionContext& _context,
|
2019-03-18 20:07:35 +01:00
|
|
|
const CompressionDict& _dict, CompressionType _type,
|
|
|
|
uint64_t _sample_for_compression)
|
|
|
|
: opts_(_opts),
|
|
|
|
context_(_context),
|
|
|
|
dict_(_dict),
|
|
|
|
type_(_type),
|
|
|
|
sample_for_compression_(_sample_for_compression) {}
|
2018-06-04 21:04:52 +02:00
|
|
|
|
2018-06-05 21:51:05 +02:00
|
|
|
const CompressionOptions& options() const { return opts_; }
|
2019-01-19 04:10:17 +01:00
|
|
|
const CompressionContext& context() const { return context_; }
|
|
|
|
const CompressionDict& dict() const { return dict_; }
|
2018-06-05 21:51:05 +02:00
|
|
|
CompressionType type() const { return type_; }
|
2019-03-18 20:07:35 +01:00
|
|
|
uint64_t SampleForCompression() const { return sample_for_compression_; }
|
2018-06-04 21:04:52 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
class UncompressionContext {
|
2018-06-05 21:51:05 +02:00
|
|
|
private:
|
2018-06-04 21:04:52 +02:00
|
|
|
CompressionContextCache* ctx_cache_ = nullptr;
|
|
|
|
ZSTDUncompressCachedData uncomp_cached_data_;
|
2018-06-05 21:51:05 +02:00
|
|
|
|
|
|
|
public:
|
2019-01-24 03:11:08 +01:00
|
|
|
explicit UncompressionContext(CompressionType type) {
|
|
|
|
if (type == kZSTD || type == kZSTDNotFinalCompression) {
|
2018-06-04 21:04:52 +02:00
|
|
|
ctx_cache_ = CompressionContextCache::Instance();
|
|
|
|
uncomp_cached_data_ = ctx_cache_->GetCachedZSTDUncompressData();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
~UncompressionContext() {
|
2019-01-24 03:11:08 +01:00
|
|
|
if (uncomp_cached_data_.GetCacheIndex() != -1) {
|
2018-06-04 21:04:52 +02:00
|
|
|
assert(ctx_cache_ != nullptr);
|
|
|
|
ctx_cache_->ReturnCachedZSTDUncompressData(
|
2018-06-05 21:51:05 +02:00
|
|
|
uncomp_cached_data_.GetCacheIndex());
|
2018-06-04 21:04:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
UncompressionContext(const UncompressionContext&) = delete;
|
|
|
|
UncompressionContext& operator=(const UncompressionContext&) = delete;
|
|
|
|
|
|
|
|
ZSTDUncompressCachedData::ZSTDNativeContext GetZSTDContext() const {
|
|
|
|
return uncomp_cached_data_.Get();
|
|
|
|
}
|
2019-01-19 04:10:17 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class UncompressionInfo {
|
|
|
|
const UncompressionContext& context_;
|
|
|
|
const UncompressionDict& dict_;
|
|
|
|
const CompressionType type_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
UncompressionInfo(const UncompressionContext& _context,
|
|
|
|
const UncompressionDict& _dict, CompressionType _type)
|
|
|
|
: context_(_context), dict_(_dict), type_(_type) {}
|
|
|
|
|
|
|
|
const UncompressionContext& context() const { return context_; }
|
|
|
|
const UncompressionDict& dict() const { return dict_; }
|
2018-06-05 21:51:05 +02:00
|
|
|
CompressionType type() const { return type_; }
|
2018-06-04 21:04:52 +02:00
|
|
|
};
|
|
|
|
|
2015-04-06 21:50:44 +02:00
|
|
|
inline bool Snappy_Supported() {
|
|
|
|
#ifdef SNAPPY
|
|
|
|
return true;
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2015-04-06 21:50:44 +02:00
|
|
|
return false;
|
2017-10-19 19:48:47 +02:00
|
|
|
#endif
|
2015-04-06 21:50:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool Zlib_Supported() {
|
|
|
|
#ifdef ZLIB
|
|
|
|
return true;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else
|
2015-04-06 21:50:44 +02:00
|
|
|
return false;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-04-06 21:50:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool BZip2_Supported() {
|
|
|
|
#ifdef BZIP2
|
|
|
|
return true;
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2015-04-06 21:50:44 +02:00
|
|
|
return false;
|
2017-10-19 19:48:47 +02:00
|
|
|
#endif
|
2015-04-06 21:50:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool LZ4_Supported() {
|
|
|
|
#ifdef LZ4
|
|
|
|
return true;
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2015-04-06 21:50:44 +02:00
|
|
|
return false;
|
2017-10-19 19:48:47 +02:00
|
|
|
#endif
|
2015-04-06 21:50:44 +02:00
|
|
|
}
|
|
|
|
|
2016-04-20 07:54:24 +02:00
|
|
|
inline bool XPRESS_Supported() {
|
|
|
|
#ifdef XPRESS
|
|
|
|
return true;
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2016-04-20 07:54:24 +02:00
|
|
|
return false;
|
2017-10-19 19:48:47 +02:00
|
|
|
#endif
|
2016-04-20 07:54:24 +02:00
|
|
|
}
|
|
|
|
|
2015-08-28 00:40:42 +02:00
|
|
|
inline bool ZSTD_Supported() {
|
2016-09-02 00:28:40 +02:00
|
|
|
#ifdef ZSTD
|
|
|
|
// ZSTD format is finalized since version 0.8.0.
|
|
|
|
return (ZSTD_versionNumber() >= 800);
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2016-09-02 00:28:40 +02:00
|
|
|
return false;
|
2017-10-19 19:48:47 +02:00
|
|
|
#endif
|
2016-09-02 00:28:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool ZSTDNotFinal_Supported() {
|
2015-08-28 00:40:42 +02:00
|
|
|
#ifdef ZSTD
|
|
|
|
return true;
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2015-08-28 00:40:42 +02:00
|
|
|
return false;
|
2017-10-19 19:48:47 +02:00
|
|
|
#endif
|
2015-08-28 00:40:42 +02:00
|
|
|
}
|
|
|
|
|
2015-06-18 23:55:05 +02:00
|
|
|
inline bool CompressionTypeSupported(CompressionType compression_type) {
|
|
|
|
switch (compression_type) {
|
|
|
|
case kNoCompression:
|
|
|
|
return true;
|
|
|
|
case kSnappyCompression:
|
|
|
|
return Snappy_Supported();
|
|
|
|
case kZlibCompression:
|
|
|
|
return Zlib_Supported();
|
|
|
|
case kBZip2Compression:
|
|
|
|
return BZip2_Supported();
|
|
|
|
case kLZ4Compression:
|
|
|
|
return LZ4_Supported();
|
|
|
|
case kLZ4HCCompression:
|
|
|
|
return LZ4_Supported();
|
2016-04-20 07:54:24 +02:00
|
|
|
case kXpressCompression:
|
|
|
|
return XPRESS_Supported();
|
2015-08-28 00:40:42 +02:00
|
|
|
case kZSTDNotFinalCompression:
|
2016-09-02 00:28:40 +02:00
|
|
|
return ZSTDNotFinal_Supported();
|
|
|
|
case kZSTD:
|
2015-09-10 01:36:19 +02:00
|
|
|
return ZSTD_Supported();
|
2015-06-18 23:55:05 +02:00
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline std::string CompressionTypeToString(CompressionType compression_type) {
|
|
|
|
switch (compression_type) {
|
|
|
|
case kNoCompression:
|
|
|
|
return "NoCompression";
|
|
|
|
case kSnappyCompression:
|
|
|
|
return "Snappy";
|
|
|
|
case kZlibCompression:
|
|
|
|
return "Zlib";
|
|
|
|
case kBZip2Compression:
|
|
|
|
return "BZip2";
|
|
|
|
case kLZ4Compression:
|
|
|
|
return "LZ4";
|
|
|
|
case kLZ4HCCompression:
|
|
|
|
return "LZ4HC";
|
2016-04-20 07:54:24 +02:00
|
|
|
case kXpressCompression:
|
|
|
|
return "Xpress";
|
2016-09-02 00:28:40 +02:00
|
|
|
case kZSTD:
|
2015-08-28 00:40:42 +02:00
|
|
|
return "ZSTD";
|
2017-11-16 04:30:35 +01:00
|
|
|
case kZSTDNotFinalCompression:
|
|
|
|
return "ZSTDNotFinal";
|
2019-12-21 01:13:19 +01:00
|
|
|
case kDisableCompressionOption:
|
|
|
|
return "DisableOption";
|
2015-06-18 23:55:05 +02:00
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-02 23:48:52 +02:00
|
|
|
inline std::string CompressionOptionsToString(
|
|
|
|
CompressionOptions& compression_options) {
|
|
|
|
std::string result;
|
|
|
|
result.reserve(512);
|
|
|
|
result.append("window_bits=")
|
|
|
|
.append(ToString(compression_options.window_bits))
|
|
|
|
.append("; ");
|
|
|
|
result.append("level=")
|
|
|
|
.append(ToString(compression_options.level))
|
|
|
|
.append("; ");
|
|
|
|
result.append("strategy=")
|
|
|
|
.append(ToString(compression_options.strategy))
|
|
|
|
.append("; ");
|
|
|
|
result.append("max_dict_bytes=")
|
|
|
|
.append(ToString(compression_options.max_dict_bytes))
|
|
|
|
.append("; ");
|
|
|
|
result.append("zstd_max_train_bytes=")
|
|
|
|
.append(ToString(compression_options.zstd_max_train_bytes))
|
|
|
|
.append("; ");
|
|
|
|
result.append("enabled=")
|
|
|
|
.append(ToString(compression_options.enabled))
|
|
|
|
.append("; ");
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version can have two values:
|
|
|
|
// 1 -- decompressed sizes for BZip2 and Zlib are not included in the compressed
|
|
|
|
// block. Also, decompressed sizes for LZ4 are encoded in platform-dependent
|
|
|
|
// way.
|
|
|
|
// 2 -- Zlib, BZip2 and LZ4 encode decompressed size as Varint32 just before the
|
|
|
|
// start of compressed block. Snappy format is the same as version 1.
|
|
|
|
|
2019-01-19 04:10:17 +01:00
|
|
|
inline bool Snappy_Compress(const CompressionInfo& /*info*/, const char* input,
|
|
|
|
size_t length, ::std::string* output) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef SNAPPY
|
|
|
|
output->resize(snappy::MaxCompressedLength(length));
|
|
|
|
size_t outlen;
|
|
|
|
snappy::RawCompress(input, length, &(*output)[0], &outlen);
|
|
|
|
output->resize(outlen);
|
|
|
|
return true;
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)output;
|
2015-01-09 21:57:11 +01:00
|
|
|
return false;
|
2017-10-19 19:48:47 +02:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
|
|
|
size_t* result) {
|
|
|
|
#ifdef SNAPPY
|
|
|
|
return snappy::GetUncompressedLength(input, length, result);
|
|
|
|
#else
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)result;
|
2015-01-09 21:57:11 +01:00
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef SNAPPY
|
|
|
|
return snappy::RawUncompress(input, length, output);
|
|
|
|
#else
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)output;
|
2015-01-09 21:57:11 +01:00
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
namespace compression {
|
|
|
|
// returns size
|
|
|
|
inline size_t PutDecompressedSizeInfo(std::string* output, uint32_t length) {
|
|
|
|
PutVarint32(output, length);
|
|
|
|
return output->size();
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool GetDecompressedSizeInfo(const char** input_data,
|
|
|
|
size_t* input_length,
|
|
|
|
uint32_t* output_len) {
|
|
|
|
auto new_input_data =
|
|
|
|
GetVarint32Ptr(*input_data, *input_data + *input_length, output_len);
|
|
|
|
if (new_input_data == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*input_length -= (new_input_data - *input_data);
|
|
|
|
*input_data = new_input_data;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} // namespace compression
|
|
|
|
|
|
|
|
// compress_format_version == 1 -- decompressed size is not included in the
|
|
|
|
// block header
|
|
|
|
// compress_format_version == 2 -- decompressed size is included in the block
|
|
|
|
// header in varint32 format
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2019-01-19 04:10:17 +01:00
|
|
|
inline bool Zlib_Compress(const CompressionInfo& info,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
uint32_t compress_format_version, const char* input,
|
2018-06-04 21:04:52 +02:00
|
|
|
size_t length, ::std::string* output) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef ZLIB
|
2015-01-15 01:24:24 +01:00
|
|
|
if (length > std::numeric_limits<uint32_t>::max()) {
|
|
|
|
// Can't compress more than 4GB
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t output_header_len = 0;
|
|
|
|
if (compress_format_version == 2) {
|
|
|
|
output_header_len = compression::PutDecompressedSizeInfo(
|
|
|
|
output, static_cast<uint32_t>(length));
|
|
|
|
}
|
|
|
|
// Resize output to be the plain data length.
|
|
|
|
// This may not be big enough if the compression actually expands data.
|
|
|
|
output->resize(output_header_len + length);
|
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
// The memLevel parameter specifies how much memory should be allocated for
|
|
|
|
// the internal compression state.
|
|
|
|
// memLevel=1 uses minimum memory but is slow and reduces compression ratio.
|
|
|
|
// memLevel=9 uses maximum memory for optimal speed.
|
|
|
|
// The default value is 8. See zconf.h for more details.
|
|
|
|
static const int memLevel = 8;
|
2018-05-24 03:33:00 +02:00
|
|
|
int level;
|
2019-01-19 04:10:17 +01:00
|
|
|
if (info.options().level == CompressionOptions::kDefaultCompressionLevel) {
|
2018-05-24 03:33:00 +02:00
|
|
|
level = Z_DEFAULT_COMPRESSION;
|
|
|
|
} else {
|
2019-01-19 04:10:17 +01:00
|
|
|
level = info.options().level;
|
2018-05-24 03:33:00 +02:00
|
|
|
}
|
2015-01-09 21:57:11 +01:00
|
|
|
z_stream _stream;
|
|
|
|
memset(&_stream, 0, sizeof(z_stream));
|
2019-01-19 04:10:17 +01:00
|
|
|
int st = deflateInit2(&_stream, level, Z_DEFLATED, info.options().window_bits,
|
|
|
|
memLevel, info.options().strategy);
|
2015-01-09 21:57:11 +01:00
|
|
|
if (st != Z_OK) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-01-19 04:10:17 +01:00
|
|
|
Slice compression_dict = info.dict().GetRawDict();
|
|
|
|
if (compression_dict.size()) {
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// Initialize the compression library's dictionary
|
2019-01-19 04:10:17 +01:00
|
|
|
st = deflateSetDictionary(
|
|
|
|
&_stream, reinterpret_cast<const Bytef*>(compression_dict.data()),
|
|
|
|
static_cast<unsigned int>(compression_dict.size()));
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
if (st != Z_OK) {
|
2016-07-08 19:52:25 +02:00
|
|
|
deflateEnd(&_stream);
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
// Compress the input, and put compressed data in output.
|
2018-04-13 02:55:14 +02:00
|
|
|
_stream.next_in = (Bytef*)input;
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_in = static_cast<unsigned int>(length);
|
|
|
|
|
|
|
|
// Initialize the output size.
|
|
|
|
_stream.avail_out = static_cast<unsigned int>(length);
|
2015-01-15 01:24:24 +01:00
|
|
|
_stream.next_out = reinterpret_cast<Bytef*>(&(*output)[output_header_len]);
|
2015-01-09 21:57:11 +01:00
|
|
|
|
2016-07-08 19:52:25 +02:00
|
|
|
bool compressed = false;
|
|
|
|
st = deflate(&_stream, Z_FINISH);
|
|
|
|
if (st == Z_STREAM_END) {
|
|
|
|
compressed = true;
|
|
|
|
output->resize(output->size() - _stream.avail_out);
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
2016-07-08 19:52:25 +02:00
|
|
|
// The only return value we really care about is Z_STREAM_END.
|
|
|
|
// Z_OK means insufficient output space. This means the compression is
|
|
|
|
// bigger than decompressed size. Just fail the compression in that case.
|
2015-01-09 21:57:11 +01:00
|
|
|
|
|
|
|
deflateEnd(&_stream);
|
2016-07-08 19:52:25 +02:00
|
|
|
return compressed;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)compress_format_version;
|
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)output;
|
2015-01-09 21:57:11 +01:00
|
|
|
return false;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version == 1 -- decompressed size is not included in the
|
|
|
|
// block header
|
|
|
|
// compress_format_version == 2 -- decompressed size is included in the block
|
|
|
|
// header in varint32 format
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2018-10-03 02:21:54 +02:00
|
|
|
inline CacheAllocationPtr Zlib_Uncompress(
|
2019-01-19 04:10:17 +01:00
|
|
|
const UncompressionInfo& info, const char* input_data, size_t input_length,
|
|
|
|
int* decompress_size, uint32_t compress_format_version,
|
2018-10-26 23:27:09 +02:00
|
|
|
MemoryAllocator* allocator = nullptr, int windowBits = -14) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef ZLIB
|
2015-01-15 01:24:24 +01:00
|
|
|
uint32_t output_len = 0;
|
|
|
|
if (compress_format_version == 2) {
|
|
|
|
if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
|
|
|
|
&output_len)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Assume the decompressed data size will 5x of compressed size, but round
|
|
|
|
// to the page size
|
|
|
|
size_t proposed_output_len = ((input_length * 5) & (~(4096 - 1))) + 4096;
|
|
|
|
output_len = static_cast<uint32_t>(
|
|
|
|
std::min(proposed_output_len,
|
|
|
|
static_cast<size_t>(std::numeric_limits<uint32_t>::max())));
|
|
|
|
}
|
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
z_stream _stream;
|
|
|
|
memset(&_stream, 0, sizeof(z_stream));
|
|
|
|
|
|
|
|
// For raw inflate, the windowBits should be -8..-15.
|
|
|
|
// If windowBits is bigger than zero, it will use either zlib
|
|
|
|
// header or gzip header. Adding 32 to it will do automatic detection.
|
2018-04-13 02:55:14 +02:00
|
|
|
int st =
|
|
|
|
inflateInit2(&_stream, windowBits > 0 ? windowBits + 32 : windowBits);
|
2015-01-09 21:57:11 +01:00
|
|
|
if (st != Z_OK) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2019-07-24 00:57:43 +02:00
|
|
|
const Slice& compression_dict = info.dict().GetRawDict();
|
2019-01-19 04:10:17 +01:00
|
|
|
if (compression_dict.size()) {
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// Initialize the compression library's dictionary
|
2019-01-19 04:10:17 +01:00
|
|
|
st = inflateSetDictionary(
|
|
|
|
&_stream, reinterpret_cast<const Bytef*>(compression_dict.data()),
|
|
|
|
static_cast<unsigned int>(compression_dict.size()));
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
if (st != Z_OK) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
_stream.next_in = (Bytef*)input_data;
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_in = static_cast<unsigned int>(input_length);
|
|
|
|
|
2018-10-03 02:21:54 +02:00
|
|
|
auto output = AllocateBlock(output_len, allocator);
|
2015-01-09 21:57:11 +01:00
|
|
|
|
2018-10-03 02:21:54 +02:00
|
|
|
_stream.next_out = (Bytef*)output.get();
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_out = static_cast<unsigned int>(output_len);
|
|
|
|
|
|
|
|
bool done = false;
|
|
|
|
while (!done) {
|
|
|
|
st = inflate(&_stream, Z_SYNC_FLUSH);
|
|
|
|
switch (st) {
|
|
|
|
case Z_STREAM_END:
|
|
|
|
done = true;
|
|
|
|
break;
|
2015-01-15 01:24:24 +01:00
|
|
|
case Z_OK: {
|
2015-01-09 21:57:11 +01:00
|
|
|
// No output space. Increase the output space by 20%.
|
2015-01-15 01:24:24 +01:00
|
|
|
// We should never run out of output space if
|
|
|
|
// compress_format_version == 2
|
|
|
|
assert(compress_format_version != 2);
|
|
|
|
size_t old_sz = output_len;
|
2018-04-13 02:55:14 +02:00
|
|
|
uint32_t output_len_delta = output_len / 5;
|
2015-01-09 21:57:11 +01:00
|
|
|
output_len += output_len_delta < 10 ? 10 : output_len_delta;
|
2018-10-03 02:21:54 +02:00
|
|
|
auto tmp = AllocateBlock(output_len, allocator);
|
|
|
|
memcpy(tmp.get(), output.get(), old_sz);
|
|
|
|
output = std::move(tmp);
|
2015-01-09 21:57:11 +01:00
|
|
|
|
|
|
|
// Set more output.
|
2018-10-03 02:21:54 +02:00
|
|
|
_stream.next_out = (Bytef*)(output.get() + old_sz);
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
|
|
|
|
break;
|
2015-01-15 01:24:24 +01:00
|
|
|
}
|
2015-01-09 21:57:11 +01:00
|
|
|
case Z_BUF_ERROR:
|
|
|
|
default:
|
|
|
|
inflateEnd(&_stream);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// If we encoded decompressed block size, we should have no bytes left
|
|
|
|
assert(compress_format_version != 2 || _stream.avail_out == 0);
|
2015-01-09 21:57:11 +01:00
|
|
|
*decompress_size = static_cast<int>(output_len - _stream.avail_out);
|
|
|
|
inflateEnd(&_stream);
|
|
|
|
return output;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input_data;
|
|
|
|
(void)input_length;
|
|
|
|
(void)decompress_size;
|
|
|
|
(void)compress_format_version;
|
2018-10-04 22:00:10 +02:00
|
|
|
(void)allocator;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)windowBits;
|
2015-01-09 21:57:11 +01:00
|
|
|
return nullptr;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version == 1 -- decompressed size is not included in the
|
|
|
|
// block header
|
|
|
|
// compress_format_version == 2 -- decompressed size is included in the block
|
|
|
|
// header in varint32 format
|
2019-01-19 04:10:17 +01:00
|
|
|
inline bool BZip2_Compress(const CompressionInfo& /*info*/,
|
2018-03-05 22:08:17 +01:00
|
|
|
uint32_t compress_format_version, const char* input,
|
|
|
|
size_t length, ::std::string* output) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef BZIP2
|
2015-01-15 01:24:24 +01:00
|
|
|
if (length > std::numeric_limits<uint32_t>::max()) {
|
|
|
|
// Can't compress more than 4GB
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
size_t output_header_len = 0;
|
|
|
|
if (compress_format_version == 2) {
|
|
|
|
output_header_len = compression::PutDecompressedSizeInfo(
|
|
|
|
output, static_cast<uint32_t>(length));
|
|
|
|
}
|
|
|
|
// Resize output to be the plain data length.
|
|
|
|
// This may not be big enough if the compression actually expands data.
|
|
|
|
output->resize(output_header_len + length);
|
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
bz_stream _stream;
|
|
|
|
memset(&_stream, 0, sizeof(bz_stream));
|
|
|
|
|
|
|
|
// Block size 1 is 100K.
|
|
|
|
// 0 is for silent.
|
|
|
|
// 30 is the default workFactor
|
|
|
|
int st = BZ2_bzCompressInit(&_stream, 1, 0, 30);
|
|
|
|
if (st != BZ_OK) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compress the input, and put compressed data in output.
|
2018-04-13 02:55:14 +02:00
|
|
|
_stream.next_in = (char*)input;
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_in = static_cast<unsigned int>(length);
|
|
|
|
|
|
|
|
// Initialize the output size.
|
|
|
|
_stream.avail_out = static_cast<unsigned int>(length);
|
2015-01-15 01:24:24 +01:00
|
|
|
_stream.next_out = reinterpret_cast<char*>(&(*output)[output_header_len]);
|
2015-01-09 21:57:11 +01:00
|
|
|
|
2016-07-08 19:52:25 +02:00
|
|
|
bool compressed = false;
|
|
|
|
st = BZ2_bzCompress(&_stream, BZ_FINISH);
|
|
|
|
if (st == BZ_STREAM_END) {
|
|
|
|
compressed = true;
|
|
|
|
output->resize(output->size() - _stream.avail_out);
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
2016-07-08 19:52:25 +02:00
|
|
|
// The only return value we really care about is BZ_STREAM_END.
|
|
|
|
// BZ_FINISH_OK means insufficient output space. This means the compression
|
|
|
|
// is bigger than decompressed size. Just fail the compression in that case.
|
2015-01-09 21:57:11 +01:00
|
|
|
|
|
|
|
BZ2_bzCompressEnd(&_stream);
|
2016-07-08 19:52:25 +02:00
|
|
|
return compressed;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)compress_format_version;
|
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)output;
|
2015-01-09 21:57:11 +01:00
|
|
|
return false;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version == 1 -- decompressed size is not included in the
|
|
|
|
// block header
|
|
|
|
// compress_format_version == 2 -- decompressed size is included in the block
|
|
|
|
// header in varint32 format
|
2018-10-03 02:21:54 +02:00
|
|
|
inline CacheAllocationPtr BZip2_Uncompress(
|
|
|
|
const char* input_data, size_t input_length, int* decompress_size,
|
2018-10-26 23:27:09 +02:00
|
|
|
uint32_t compress_format_version, MemoryAllocator* allocator = nullptr) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef BZIP2
|
2015-01-15 01:24:24 +01:00
|
|
|
uint32_t output_len = 0;
|
|
|
|
if (compress_format_version == 2) {
|
|
|
|
if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
|
|
|
|
&output_len)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Assume the decompressed data size will 5x of compressed size, but round
|
|
|
|
// to the next page size
|
|
|
|
size_t proposed_output_len = ((input_length * 5) & (~(4096 - 1))) + 4096;
|
|
|
|
output_len = static_cast<uint32_t>(
|
|
|
|
std::min(proposed_output_len,
|
|
|
|
static_cast<size_t>(std::numeric_limits<uint32_t>::max())));
|
|
|
|
}
|
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
bz_stream _stream;
|
|
|
|
memset(&_stream, 0, sizeof(bz_stream));
|
|
|
|
|
|
|
|
int st = BZ2_bzDecompressInit(&_stream, 0, 0);
|
|
|
|
if (st != BZ_OK) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
_stream.next_in = (char*)input_data;
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_in = static_cast<unsigned int>(input_length);
|
|
|
|
|
2018-10-03 02:21:54 +02:00
|
|
|
auto output = AllocateBlock(output_len, allocator);
|
2015-01-09 21:57:11 +01:00
|
|
|
|
2018-10-03 02:21:54 +02:00
|
|
|
_stream.next_out = (char*)output.get();
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_out = static_cast<unsigned int>(output_len);
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
bool done = false;
|
|
|
|
while (!done) {
|
2015-01-09 21:57:11 +01:00
|
|
|
st = BZ2_bzDecompress(&_stream);
|
|
|
|
switch (st) {
|
|
|
|
case BZ_STREAM_END:
|
2015-01-15 01:24:24 +01:00
|
|
|
done = true;
|
2015-01-09 21:57:11 +01:00
|
|
|
break;
|
2015-01-15 01:24:24 +01:00
|
|
|
case BZ_OK: {
|
2015-01-09 21:57:11 +01:00
|
|
|
// No output space. Increase the output space by 20%.
|
2015-01-15 01:24:24 +01:00
|
|
|
// We should never run out of output space if
|
|
|
|
// compress_format_version == 2
|
|
|
|
assert(compress_format_version != 2);
|
|
|
|
uint32_t old_sz = output_len;
|
|
|
|
output_len = output_len * 1.2;
|
2018-10-03 02:21:54 +02:00
|
|
|
auto tmp = AllocateBlock(output_len, allocator);
|
|
|
|
memcpy(tmp.get(), output.get(), old_sz);
|
|
|
|
output = std::move(tmp);
|
2015-01-09 21:57:11 +01:00
|
|
|
|
|
|
|
// Set more output.
|
2018-10-03 02:21:54 +02:00
|
|
|
_stream.next_out = (char*)(output.get() + old_sz);
|
2015-01-09 21:57:11 +01:00
|
|
|
_stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
|
|
|
|
break;
|
2015-01-15 01:24:24 +01:00
|
|
|
}
|
2015-01-09 21:57:11 +01:00
|
|
|
default:
|
|
|
|
BZ2_bzDecompressEnd(&_stream);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// If we encoded decompressed block size, we should have no bytes left
|
|
|
|
assert(compress_format_version != 2 || _stream.avail_out == 0);
|
2015-01-09 21:57:11 +01:00
|
|
|
*decompress_size = static_cast<int>(output_len - _stream.avail_out);
|
|
|
|
BZ2_bzDecompressEnd(&_stream);
|
|
|
|
return output;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input_data;
|
|
|
|
(void)input_length;
|
|
|
|
(void)decompress_size;
|
|
|
|
(void)compress_format_version;
|
2018-10-04 22:00:10 +02:00
|
|
|
(void)allocator;
|
2015-01-09 21:57:11 +01:00
|
|
|
return nullptr;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version == 1 -- decompressed size is included in the
|
|
|
|
// block header using memcpy, which makes database non-portable)
|
|
|
|
// compress_format_version == 2 -- decompressed size is included in the block
|
|
|
|
// header in varint32 format
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2019-01-19 04:10:17 +01:00
|
|
|
inline bool LZ4_Compress(const CompressionInfo& info,
|
2015-01-15 01:24:24 +01:00
|
|
|
uint32_t compress_format_version, const char* input,
|
2018-06-04 21:04:52 +02:00
|
|
|
size_t length, ::std::string* output) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef LZ4
|
2015-01-15 01:24:24 +01:00
|
|
|
if (length > std::numeric_limits<uint32_t>::max()) {
|
|
|
|
// Can't compress more than 4GB
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t output_header_len = 0;
|
|
|
|
if (compress_format_version == 2) {
|
|
|
|
// new encoding, using varint32 to store size information
|
|
|
|
output_header_len = compression::PutDecompressedSizeInfo(
|
|
|
|
output, static_cast<uint32_t>(length));
|
|
|
|
} else {
|
|
|
|
// legacy encoding, which is not really portable (depends on big/little
|
|
|
|
// endianness)
|
|
|
|
output_header_len = 8;
|
|
|
|
output->resize(output_header_len);
|
|
|
|
char* p = const_cast<char*>(output->c_str());
|
|
|
|
memcpy(p, &length, sizeof(length));
|
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
int compress_bound = LZ4_compressBound(static_cast<int>(length));
|
|
|
|
output->resize(static_cast<size_t>(output_header_len + compress_bound));
|
|
|
|
|
|
|
|
int outlen;
|
|
|
|
#if LZ4_VERSION_NUMBER >= 10400 // r124+
|
|
|
|
LZ4_stream_t* stream = LZ4_createStream();
|
2019-01-19 04:10:17 +01:00
|
|
|
Slice compression_dict = info.dict().GetRawDict();
|
|
|
|
if (compression_dict.size()) {
|
|
|
|
LZ4_loadDict(stream, compression_dict.data(),
|
|
|
|
static_cast<int>(compression_dict.size()));
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
}
|
2016-11-21 21:17:06 +01:00
|
|
|
#if LZ4_VERSION_NUMBER >= 10700 // r129+
|
2018-04-13 02:55:14 +02:00
|
|
|
outlen =
|
|
|
|
LZ4_compress_fast_continue(stream, input, &(*output)[output_header_len],
|
|
|
|
static_cast<int>(length), compress_bound, 1);
|
2016-11-21 21:17:06 +01:00
|
|
|
#else // up to r128
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
outlen = LZ4_compress_limitedOutput_continue(
|
|
|
|
stream, input, &(*output)[output_header_len], static_cast<int>(length),
|
|
|
|
compress_bound);
|
2016-11-21 21:17:06 +01:00
|
|
|
#endif
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
LZ4_freeStream(stream);
|
|
|
|
#else // up to r123
|
|
|
|
outlen = LZ4_compress_limitedOutput(input, &(*output)[output_header_len],
|
|
|
|
static_cast<int>(length), compress_bound);
|
|
|
|
#endif // LZ4_VERSION_NUMBER >= 10400
|
2015-01-15 01:24:24 +01:00
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
if (outlen == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
2015-01-15 01:24:24 +01:00
|
|
|
output->resize(static_cast<size_t>(output_header_len + outlen));
|
2015-01-09 21:57:11 +01:00
|
|
|
return true;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else // LZ4
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)compress_format_version;
|
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)output;
|
2015-01-09 21:57:11 +01:00
|
|
|
return false;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version == 1 -- decompressed size is included in the
|
|
|
|
// block header using memcpy, which makes database non-portable)
|
|
|
|
// compress_format_version == 2 -- decompressed size is included in the block
|
|
|
|
// header in varint32 format
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2019-01-19 04:10:17 +01:00
|
|
|
inline CacheAllocationPtr LZ4_Uncompress(const UncompressionInfo& info,
|
2018-10-03 02:21:54 +02:00
|
|
|
const char* input_data,
|
|
|
|
size_t input_length,
|
|
|
|
int* decompress_size,
|
|
|
|
uint32_t compress_format_version,
|
2018-10-26 23:27:09 +02:00
|
|
|
MemoryAllocator* allocator = nullptr) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef LZ4
|
2015-01-15 01:24:24 +01:00
|
|
|
uint32_t output_len = 0;
|
|
|
|
if (compress_format_version == 2) {
|
|
|
|
// new encoding, using varint32 to store size information
|
|
|
|
if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
|
|
|
|
&output_len)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// legacy encoding, which is not really portable (depends on big/little
|
|
|
|
// endianness)
|
|
|
|
if (input_length < 8) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
memcpy(&output_len, input_data, sizeof(output_len));
|
|
|
|
input_length -= 8;
|
|
|
|
input_data += 8;
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
|
2018-10-03 02:21:54 +02:00
|
|
|
auto output = AllocateBlock(output_len, allocator);
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#if LZ4_VERSION_NUMBER >= 10400 // r124+
|
|
|
|
LZ4_streamDecode_t* stream = LZ4_createStreamDecode();
|
2019-07-24 00:57:43 +02:00
|
|
|
const Slice& compression_dict = info.dict().GetRawDict();
|
2019-01-19 04:10:17 +01:00
|
|
|
if (compression_dict.size()) {
|
|
|
|
LZ4_setStreamDecode(stream, compression_dict.data(),
|
|
|
|
static_cast<int>(compression_dict.size()));
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
}
|
|
|
|
*decompress_size = LZ4_decompress_safe_continue(
|
2018-10-03 02:21:54 +02:00
|
|
|
stream, input_data, output.get(), static_cast<int>(input_length),
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
static_cast<int>(output_len));
|
|
|
|
LZ4_freeStreamDecode(stream);
|
|
|
|
#else // up to r123
|
2018-10-03 02:21:54 +02:00
|
|
|
*decompress_size = LZ4_decompress_safe(input_data, output.get(),
|
|
|
|
static_cast<int>(input_length),
|
|
|
|
static_cast<int>(output_len));
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#endif // LZ4_VERSION_NUMBER >= 10400
|
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
if (*decompress_size < 0) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2015-01-15 01:24:24 +01:00
|
|
|
assert(*decompress_size == static_cast<int>(output_len));
|
2015-01-09 21:57:11 +01:00
|
|
|
return output;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else // LZ4
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input_data;
|
|
|
|
(void)input_length;
|
|
|
|
(void)decompress_size;
|
|
|
|
(void)compress_format_version;
|
2018-10-03 02:21:54 +02:00
|
|
|
(void)allocator;
|
2015-01-09 21:57:11 +01:00
|
|
|
return nullptr;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version == 1 -- decompressed size is included in the
|
|
|
|
// block header using memcpy, which makes database non-portable)
|
|
|
|
// compress_format_version == 2 -- decompressed size is included in the block
|
|
|
|
// header in varint32 format
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2019-01-19 04:10:17 +01:00
|
|
|
inline bool LZ4HC_Compress(const CompressionInfo& info,
|
2015-01-15 01:24:24 +01:00
|
|
|
uint32_t compress_format_version, const char* input,
|
2018-06-04 21:04:52 +02:00
|
|
|
size_t length, ::std::string* output) {
|
2015-01-09 21:57:11 +01:00
|
|
|
#ifdef LZ4
|
2015-01-15 01:24:24 +01:00
|
|
|
if (length > std::numeric_limits<uint32_t>::max()) {
|
|
|
|
// Can't compress more than 4GB
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t output_header_len = 0;
|
|
|
|
if (compress_format_version == 2) {
|
|
|
|
// new encoding, using varint32 to store size information
|
|
|
|
output_header_len = compression::PutDecompressedSizeInfo(
|
|
|
|
output, static_cast<uint32_t>(length));
|
|
|
|
} else {
|
|
|
|
// legacy encoding, which is not really portable (depends on big/little
|
|
|
|
// endianness)
|
|
|
|
output_header_len = 8;
|
|
|
|
output->resize(output_header_len);
|
|
|
|
char* p = const_cast<char*>(output->c_str());
|
|
|
|
memcpy(p, &length, sizeof(length));
|
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
int compress_bound = LZ4_compressBound(static_cast<int>(length));
|
|
|
|
output->resize(static_cast<size_t>(output_header_len + compress_bound));
|
2015-01-15 01:24:24 +01:00
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
int outlen;
|
2018-05-24 03:33:00 +02:00
|
|
|
int level;
|
2019-01-19 04:10:17 +01:00
|
|
|
if (info.options().level == CompressionOptions::kDefaultCompressionLevel) {
|
2018-05-24 03:33:00 +02:00
|
|
|
level = 0; // lz4hc.h says any value < 1 will be sanitized to default
|
|
|
|
} else {
|
2019-01-19 04:10:17 +01:00
|
|
|
level = info.options().level;
|
2018-05-24 03:33:00 +02:00
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#if LZ4_VERSION_NUMBER >= 10400 // r124+
|
|
|
|
LZ4_streamHC_t* stream = LZ4_createStreamHC();
|
2018-05-24 03:33:00 +02:00
|
|
|
LZ4_resetStreamHC(stream, level);
|
2019-01-19 04:10:17 +01:00
|
|
|
Slice compression_dict = info.dict().GetRawDict();
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
const char* compression_dict_data =
|
2019-01-19 04:10:17 +01:00
|
|
|
compression_dict.size() > 0 ? compression_dict.data() : nullptr;
|
|
|
|
size_t compression_dict_size = compression_dict.size();
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
LZ4_loadDictHC(stream, compression_dict_data,
|
|
|
|
static_cast<int>(compression_dict_size));
|
|
|
|
|
|
|
|
#if LZ4_VERSION_NUMBER >= 10700 // r129+
|
|
|
|
outlen =
|
|
|
|
LZ4_compress_HC_continue(stream, input, &(*output)[output_header_len],
|
|
|
|
static_cast<int>(length), compress_bound);
|
|
|
|
#else // r124-r128
|
|
|
|
outlen = LZ4_compressHC_limitedOutput_continue(
|
|
|
|
stream, input, &(*output)[output_header_len], static_cast<int>(length),
|
|
|
|
compress_bound);
|
|
|
|
#endif // LZ4_VERSION_NUMBER >= 10700
|
|
|
|
LZ4_freeStreamHC(stream);
|
|
|
|
|
|
|
|
#elif LZ4_VERSION_MAJOR // r113-r123
|
2015-01-15 01:24:24 +01:00
|
|
|
outlen = LZ4_compressHC2_limitedOutput(input, &(*output)[output_header_len],
|
|
|
|
static_cast<int>(length),
|
2018-05-24 03:33:00 +02:00
|
|
|
compress_bound, level);
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#else // up to r112
|
2015-01-15 01:24:24 +01:00
|
|
|
outlen =
|
|
|
|
LZ4_compressHC_limitedOutput(input, &(*output)[output_header_len],
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
static_cast<int>(length), compress_bound);
|
|
|
|
#endif // LZ4_VERSION_NUMBER >= 10400
|
|
|
|
|
2015-01-09 21:57:11 +01:00
|
|
|
if (outlen == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
2015-01-15 01:24:24 +01:00
|
|
|
output->resize(static_cast<size_t>(output_header_len + outlen));
|
2015-01-09 21:57:11 +01:00
|
|
|
return true;
|
2018-01-31 21:04:52 +01:00
|
|
|
#else // LZ4
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)compress_format_version;
|
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)output;
|
2015-01-09 21:57:11 +01:00
|
|
|
return false;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-01-09 21:57:11 +01:00
|
|
|
}
|
2015-01-15 01:24:24 +01:00
|
|
|
|
2016-04-20 07:54:24 +02:00
|
|
|
#ifdef XPRESS
|
2018-03-05 22:08:17 +01:00
|
|
|
inline bool XPRESS_Compress(const char* input, size_t length,
|
|
|
|
std::string* output) {
|
2016-04-20 07:54:24 +02:00
|
|
|
return port::xpress::Compress(input, length, output);
|
2018-03-05 22:08:17 +01:00
|
|
|
}
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2018-03-05 22:08:17 +01:00
|
|
|
inline bool XPRESS_Compress(const char* /*input*/, size_t /*length*/,
|
|
|
|
std::string* /*output*/) {
|
2016-04-20 07:54:24 +02:00
|
|
|
return false;
|
|
|
|
}
|
2018-03-05 22:08:17 +01:00
|
|
|
#endif
|
2016-04-20 07:54:24 +02:00
|
|
|
|
|
|
|
#ifdef XPRESS
|
2018-04-13 02:55:14 +02:00
|
|
|
inline char* XPRESS_Uncompress(const char* input_data, size_t input_length,
|
2018-03-05 22:08:17 +01:00
|
|
|
int* decompress_size) {
|
2016-04-20 07:54:24 +02:00
|
|
|
return port::xpress::Decompress(input_data, input_length, decompress_size);
|
2018-03-05 22:08:17 +01:00
|
|
|
}
|
2017-10-19 19:48:47 +02:00
|
|
|
#else
|
2018-03-05 22:08:17 +01:00
|
|
|
inline char* XPRESS_Uncompress(const char* /*input_data*/,
|
|
|
|
size_t /*input_length*/,
|
|
|
|
int* /*decompress_size*/) {
|
2016-04-20 07:54:24 +02:00
|
|
|
return nullptr;
|
|
|
|
}
|
2018-03-05 22:08:17 +01:00
|
|
|
#endif
|
2016-04-20 07:54:24 +02:00
|
|
|
|
2019-01-19 04:10:17 +01:00
|
|
|
inline bool ZSTD_Compress(const CompressionInfo& info, const char* input,
|
2018-06-04 21:04:52 +02:00
|
|
|
size_t length, ::std::string* output) {
|
2015-08-28 00:40:42 +02:00
|
|
|
#ifdef ZSTD
|
|
|
|
if (length > std::numeric_limits<uint32_t>::max()) {
|
|
|
|
// Can't compress more than 4GB
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t output_header_len = compression::PutDecompressedSizeInfo(
|
|
|
|
output, static_cast<uint32_t>(length));
|
|
|
|
|
|
|
|
size_t compressBound = ZSTD_compressBound(length);
|
|
|
|
output->resize(static_cast<size_t>(output_header_len + compressBound));
|
2018-06-04 21:04:52 +02:00
|
|
|
size_t outlen = 0;
|
2018-05-24 03:33:00 +02:00
|
|
|
int level;
|
2019-01-19 04:10:17 +01:00
|
|
|
if (info.options().level == CompressionOptions::kDefaultCompressionLevel) {
|
2018-05-24 03:33:00 +02:00
|
|
|
// 3 is the value of ZSTD_CLEVEL_DEFAULT (not exposed publicly), see
|
|
|
|
// https://github.com/facebook/zstd/issues/1148
|
|
|
|
level = 3;
|
|
|
|
} else {
|
2019-01-19 04:10:17 +01:00
|
|
|
level = info.options().level;
|
2018-05-24 03:33:00 +02:00
|
|
|
}
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+
|
2019-01-19 04:10:17 +01:00
|
|
|
ZSTD_CCtx* context = info.context().ZSTDPreallocCtx();
|
2018-06-04 21:04:52 +02:00
|
|
|
assert(context != nullptr);
|
2019-01-19 04:10:17 +01:00
|
|
|
#if ZSTD_VERSION_NUMBER >= 700 // v0.7.0+
|
|
|
|
if (info.dict().GetDigestedZstdCDict() != nullptr) {
|
|
|
|
outlen = ZSTD_compress_usingCDict(context, &(*output)[output_header_len],
|
|
|
|
compressBound, input, length,
|
|
|
|
info.dict().GetDigestedZstdCDict());
|
|
|
|
}
|
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 700
|
|
|
|
if (outlen == 0) {
|
|
|
|
outlen = ZSTD_compress_usingDict(context, &(*output)[output_header_len],
|
|
|
|
compressBound, input, length,
|
|
|
|
info.dict().GetRawDict().data(),
|
|
|
|
info.dict().GetRawDict().size(), level);
|
|
|
|
}
|
2018-04-13 02:55:14 +02:00
|
|
|
#else // up to v0.4.x
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
outlen = ZSTD_compress(&(*output)[output_header_len], compressBound, input,
|
2018-05-24 03:33:00 +02:00
|
|
|
length, level);
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 500
|
2015-08-28 00:40:42 +02:00
|
|
|
if (outlen == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
output->resize(output_header_len + outlen);
|
|
|
|
return true;
|
2018-04-13 02:55:14 +02:00
|
|
|
#else // ZSTD
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input;
|
|
|
|
(void)length;
|
|
|
|
(void)output;
|
2015-08-28 00:40:42 +02:00
|
|
|
return false;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-08-28 00:40:42 +02:00
|
|
|
}
|
|
|
|
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
2018-10-26 23:27:09 +02:00
|
|
|
inline CacheAllocationPtr ZSTD_Uncompress(
|
2019-01-19 04:10:17 +01:00
|
|
|
const UncompressionInfo& info, const char* input_data, size_t input_length,
|
|
|
|
int* decompress_size, MemoryAllocator* allocator = nullptr) {
|
2015-08-28 00:40:42 +02:00
|
|
|
#ifdef ZSTD
|
|
|
|
uint32_t output_len = 0;
|
|
|
|
if (!compression::GetDecompressedSizeInfo(&input_data, &input_length,
|
|
|
|
&output_len)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-10-03 02:21:54 +02:00
|
|
|
auto output = AllocateBlock(output_len, allocator);
|
2019-01-19 04:10:17 +01:00
|
|
|
size_t actual_output_length = 0;
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+
|
2019-01-19 04:10:17 +01:00
|
|
|
ZSTD_DCtx* context = info.context().GetZSTDContext();
|
2018-06-04 21:04:52 +02:00
|
|
|
assert(context != nullptr);
|
2019-01-24 03:11:08 +01:00
|
|
|
#ifdef ROCKSDB_ZSTD_DDICT
|
2019-01-19 04:10:17 +01:00
|
|
|
if (info.dict().GetDigestedZstdDDict() != nullptr) {
|
|
|
|
actual_output_length = ZSTD_decompress_usingDDict(
|
|
|
|
context, output.get(), output_len, input_data, input_length,
|
|
|
|
info.dict().GetDigestedZstdDDict());
|
|
|
|
}
|
2019-01-24 03:11:08 +01:00
|
|
|
#endif // ROCKSDB_ZSTD_DDICT
|
2019-01-19 04:10:17 +01:00
|
|
|
if (actual_output_length == 0) {
|
|
|
|
actual_output_length = ZSTD_decompress_usingDict(
|
|
|
|
context, output.get(), output_len, input_data, input_length,
|
|
|
|
info.dict().GetRawDict().data(), info.dict().GetRawDict().size());
|
|
|
|
}
|
2018-04-13 02:55:14 +02:00
|
|
|
#else // up to v0.4.x
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
actual_output_length =
|
2018-10-03 02:21:54 +02:00
|
|
|
ZSTD_decompress(output.get(), output_len, input_data, input_length);
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 500
|
2015-08-28 00:40:42 +02:00
|
|
|
assert(actual_output_length == output_len);
|
|
|
|
*decompress_size = static_cast<int>(actual_output_length);
|
|
|
|
return output;
|
2018-04-13 02:55:14 +02:00
|
|
|
#else // ZSTD
|
2019-01-19 04:10:17 +01:00
|
|
|
(void)info;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)input_data;
|
|
|
|
(void)input_length;
|
|
|
|
(void)decompress_size;
|
2018-10-03 02:21:54 +02:00
|
|
|
(void)allocator;
|
2015-08-28 00:40:42 +02:00
|
|
|
return nullptr;
|
2018-01-31 21:04:52 +01:00
|
|
|
#endif
|
2015-08-28 00:40:42 +02:00
|
|
|
}
|
|
|
|
|
2019-01-19 04:10:17 +01:00
|
|
|
inline bool ZSTD_TrainDictionarySupported() {
|
|
|
|
#ifdef ZSTD
|
|
|
|
// Dictionary trainer is available since v0.6.1 for static linking, but not
|
|
|
|
// available for dynamic linking until v1.1.3. For now we enable the feature
|
|
|
|
// in v1.1.3+ only.
|
|
|
|
return (ZSTD_versionNumber() >= 10103);
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-11-03 06:46:13 +01:00
|
|
|
inline std::string ZSTD_TrainDictionary(const std::string& samples,
|
|
|
|
const std::vector<size_t>& sample_lens,
|
|
|
|
size_t max_dict_bytes) {
|
2018-08-23 03:22:10 +02:00
|
|
|
// Dictionary trainer is available since v0.6.1 for static linking, but not
|
|
|
|
// available for dynamic linking until v1.1.3. For now we enable the feature
|
|
|
|
// in v1.1.3+ only.
|
|
|
|
#if ZSTD_VERSION_NUMBER >= 10103 // v1.1.3+
|
2019-01-19 04:10:17 +01:00
|
|
|
assert(samples.empty() == sample_lens.empty());
|
|
|
|
if (samples.empty()) {
|
|
|
|
return "";
|
|
|
|
}
|
2017-11-03 06:46:13 +01:00
|
|
|
std::string dict_data(max_dict_bytes, '\0');
|
2017-11-03 19:15:26 +01:00
|
|
|
size_t dict_len = ZDICT_trainFromBuffer(
|
|
|
|
&dict_data[0], max_dict_bytes, &samples[0], &sample_lens[0],
|
|
|
|
static_cast<unsigned>(sample_lens.size()));
|
2017-11-03 06:46:13 +01:00
|
|
|
if (ZDICT_isError(dict_len)) {
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
assert(dict_len <= max_dict_bytes);
|
|
|
|
dict_data.resize(dict_len);
|
|
|
|
return dict_data;
|
2018-08-23 03:22:10 +02:00
|
|
|
#else // up to v1.1.2
|
2017-11-03 06:46:13 +01:00
|
|
|
assert(false);
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)samples;
|
|
|
|
(void)sample_lens;
|
|
|
|
(void)max_dict_bytes;
|
2017-11-03 06:46:13 +01:00
|
|
|
return "";
|
2018-08-23 03:22:10 +02:00
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 10103
|
2017-11-03 06:46:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
inline std::string ZSTD_TrainDictionary(const std::string& samples,
|
|
|
|
size_t sample_len_shift,
|
|
|
|
size_t max_dict_bytes) {
|
|
|
|
// Dictionary trainer is available since v0.6.1, but ZSTD was marked stable
|
|
|
|
// only since v0.8.0. For now we enable the feature in stable versions only.
|
2018-08-23 03:22:10 +02:00
|
|
|
#if ZSTD_VERSION_NUMBER >= 10103 // v1.1.3+
|
2017-11-03 06:46:13 +01:00
|
|
|
// skips potential partial sample at the end of "samples"
|
|
|
|
size_t num_samples = samples.size() >> sample_len_shift;
|
2018-03-30 20:16:10 +02:00
|
|
|
std::vector<size_t> sample_lens(num_samples, size_t(1) << sample_len_shift);
|
2017-11-03 06:46:13 +01:00
|
|
|
return ZSTD_TrainDictionary(samples, sample_lens, max_dict_bytes);
|
2018-08-23 03:22:10 +02:00
|
|
|
#else // up to v1.1.2
|
2017-11-03 06:46:13 +01:00
|
|
|
assert(false);
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)samples;
|
|
|
|
(void)sample_len_shift;
|
|
|
|
(void)max_dict_bytes;
|
2017-11-03 06:46:13 +01:00
|
|
|
return "";
|
2018-08-23 03:22:10 +02:00
|
|
|
#endif // ZSTD_VERSION_NUMBER >= 10103
|
2017-11-03 06:46:13 +01:00
|
|
|
}
|
|
|
|
|
2020-08-13 03:24:27 +02:00
|
|
|
inline bool CompressData(const Slice& raw,
|
|
|
|
const CompressionInfo& compression_info,
|
|
|
|
uint32_t compress_format_version,
|
|
|
|
std::string* compressed_output) {
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
// Will return compressed block contents if (1) the compression method is
|
|
|
|
// supported in this platform and (2) the compression rate is "good enough".
|
|
|
|
switch (compression_info.type()) {
|
|
|
|
case kSnappyCompression:
|
|
|
|
ret = Snappy_Compress(compression_info, raw.data(), raw.size(),
|
|
|
|
compressed_output);
|
|
|
|
break;
|
|
|
|
case kZlibCompression:
|
|
|
|
ret = Zlib_Compress(compression_info, compress_format_version, raw.data(),
|
|
|
|
raw.size(), compressed_output);
|
|
|
|
break;
|
|
|
|
case kBZip2Compression:
|
|
|
|
ret = BZip2_Compress(compression_info, compress_format_version,
|
|
|
|
raw.data(), raw.size(), compressed_output);
|
|
|
|
break;
|
|
|
|
case kLZ4Compression:
|
|
|
|
ret = LZ4_Compress(compression_info, compress_format_version, raw.data(),
|
|
|
|
raw.size(), compressed_output);
|
|
|
|
break;
|
|
|
|
case kLZ4HCCompression:
|
|
|
|
ret = LZ4HC_Compress(compression_info, compress_format_version,
|
|
|
|
raw.data(), raw.size(), compressed_output);
|
|
|
|
break;
|
|
|
|
case kXpressCompression:
|
|
|
|
ret = XPRESS_Compress(raw.data(), raw.size(), compressed_output);
|
|
|
|
break;
|
|
|
|
case kZSTD:
|
|
|
|
case kZSTDNotFinalCompression:
|
|
|
|
ret = ZSTD_Compress(compression_info, raw.data(), raw.size(),
|
|
|
|
compressed_output);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// Do not recognize this compression type
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_SYNC_POINT_CALLBACK("CompressData:TamperWithReturnValue",
|
|
|
|
static_cast<void*>(&ret));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|