2012-12-05 22:11:48 +01:00
|
|
|
/*
|
|
|
|
* Copyright 2012 The Netty Project
|
|
|
|
*
|
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
|
|
|
*
|
2020-10-23 14:44:18 +02:00
|
|
|
* https://www.apache.org/licenses/LICENSE-2.0
|
2012-12-05 22:11:48 +01:00
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
|
|
|
|
2021-09-17 16:28:14 +02:00
|
|
|
package io.net5.buffer;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2021-09-17 16:28:14 +02:00
|
|
|
import static io.net5.util.internal.ObjectUtil.checkPositiveOrZero;
|
2019-01-31 09:06:59 +01:00
|
|
|
|
2021-09-17 16:28:14 +02:00
|
|
|
import io.net5.util.NettyRuntime;
|
|
|
|
import io.net5.util.concurrent.EventExecutor;
|
|
|
|
import io.net5.util.concurrent.FastThreadLocal;
|
|
|
|
import io.net5.util.concurrent.FastThreadLocalThread;
|
|
|
|
import io.net5.util.internal.PlatformDependent;
|
|
|
|
import io.net5.util.internal.StringUtil;
|
|
|
|
import io.net5.util.internal.SystemPropertyUtil;
|
|
|
|
import io.net5.util.internal.ThreadExecutorMap;
|
|
|
|
import io.net5.util.internal.logging.InternalLogger;
|
|
|
|
import io.net5.util.internal.logging.InternalLoggerFactory;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
|
|
|
import java.nio.ByteBuffer;
|
2015-05-13 17:15:06 +02:00
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.Collections;
|
|
|
|
import java.util.List;
|
2019-03-22 11:08:37 +01:00
|
|
|
import java.util.concurrent.TimeUnit;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2017-03-02 08:50:47 +01:00
|
|
|
public class PooledByteBufAllocator extends AbstractByteBufAllocator implements ByteBufAllocatorMetricProvider {
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2013-04-03 05:08:01 +02:00
|
|
|
private static final InternalLogger logger = InternalLoggerFactory.getInstance(PooledByteBufAllocator.class);
|
2013-06-14 05:14:45 +02:00
|
|
|
private static final int DEFAULT_NUM_HEAP_ARENA;
|
|
|
|
private static final int DEFAULT_NUM_DIRECT_ARENA;
|
|
|
|
|
2013-04-03 05:08:01 +02:00
|
|
|
private static final int DEFAULT_PAGE_SIZE;
|
|
|
|
private static final int DEFAULT_MAX_ORDER; // 8192 << 11 = 16 MiB per chunk
|
2014-03-01 15:47:03 +01:00
|
|
|
private static final int DEFAULT_SMALL_CACHE_SIZE;
|
|
|
|
private static final int DEFAULT_NORMAL_CACHE_SIZE;
|
2020-11-25 15:05:30 +01:00
|
|
|
static final int DEFAULT_MAX_CACHED_BUFFER_CAPACITY;
|
2014-03-01 15:47:03 +01:00
|
|
|
private static final int DEFAULT_CACHE_TRIM_INTERVAL;
|
2019-03-22 11:08:37 +01:00
|
|
|
private static final long DEFAULT_CACHE_TRIM_INTERVAL_MILLIS;
|
2016-11-22 16:18:23 +01:00
|
|
|
private static final boolean DEFAULT_USE_CACHE_FOR_ALL_THREADS;
|
2017-01-29 22:26:40 +01:00
|
|
|
private static final int DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT;
|
2018-12-04 15:26:05 +01:00
|
|
|
static final int DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
|
|
|
private static final int MIN_PAGE_SIZE = 4096;
|
|
|
|
private static final int MAX_CHUNK_SIZE = (int) (((long) Integer.MAX_VALUE + 1) / 2);
|
|
|
|
|
2019-03-22 11:08:37 +01:00
|
|
|
private final Runnable trimTask = this::trimCurrentThreadCache;
|
|
|
|
|
2013-04-03 05:08:01 +02:00
|
|
|
static {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
int defaultAlignment = SystemPropertyUtil.getInt(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.directMemoryCacheAlignment", 0);
|
|
|
|
int defaultPageSize = SystemPropertyUtil.getInt("io.net5.allocator.pageSize", 8192);
|
2013-04-03 05:08:01 +02:00
|
|
|
Throwable pageSizeFallbackCause = null;
|
|
|
|
try {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
validateAndCalculatePageShifts(defaultPageSize, defaultAlignment);
|
2013-04-03 05:08:01 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
pageSizeFallbackCause = t;
|
|
|
|
defaultPageSize = 8192;
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
defaultAlignment = 0;
|
2013-04-03 05:08:01 +02:00
|
|
|
}
|
|
|
|
DEFAULT_PAGE_SIZE = defaultPageSize;
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = defaultAlignment;
|
2013-04-03 05:08:01 +02:00
|
|
|
|
2021-09-17 16:28:14 +02:00
|
|
|
int defaultMaxOrder = SystemPropertyUtil.getInt("io.net5.allocator.maxOrder", 11);
|
2013-04-03 05:08:01 +02:00
|
|
|
Throwable maxOrderFallbackCause = null;
|
|
|
|
try {
|
|
|
|
validateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder);
|
|
|
|
} catch (Throwable t) {
|
|
|
|
maxOrderFallbackCause = t;
|
|
|
|
defaultMaxOrder = 11;
|
|
|
|
}
|
|
|
|
DEFAULT_MAX_ORDER = defaultMaxOrder;
|
|
|
|
|
2013-06-14 05:14:45 +02:00
|
|
|
// Determine reasonable default for nHeapArena and nDirectArena.
|
|
|
|
// Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.
|
|
|
|
final Runtime runtime = Runtime.getRuntime();
|
2015-06-17 09:39:54 +02:00
|
|
|
|
2017-01-16 18:36:32 +01:00
|
|
|
/*
|
|
|
|
* We use 2 * available processors by default to reduce contention as we use 2 * available processors for the
|
|
|
|
* number of EventLoops in NIO and EPOLL as well. If we choose a smaller number we will run into hot spots as
|
|
|
|
* allocation and de-allocation needs to be synchronized on the PoolArena.
|
|
|
|
*
|
|
|
|
* See https://github.com/netty/netty/issues/3888.
|
|
|
|
*/
|
|
|
|
final int defaultMinNumArena = NettyRuntime.availableProcessors() * 2;
|
2013-06-14 05:14:45 +02:00
|
|
|
final int defaultChunkSize = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER;
|
|
|
|
DEFAULT_NUM_HEAP_ARENA = Math.max(0,
|
|
|
|
SystemPropertyUtil.getInt(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.numHeapArenas",
|
2013-06-14 05:14:45 +02:00
|
|
|
(int) Math.min(
|
2015-06-17 09:39:54 +02:00
|
|
|
defaultMinNumArena,
|
|
|
|
runtime.maxMemory() / defaultChunkSize / 2 / 3)));
|
2013-06-14 05:14:45 +02:00
|
|
|
DEFAULT_NUM_DIRECT_ARENA = Math.max(0,
|
|
|
|
SystemPropertyUtil.getInt(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.numDirectArenas",
|
2013-06-14 05:14:45 +02:00
|
|
|
(int) Math.min(
|
2015-06-17 09:39:54 +02:00
|
|
|
defaultMinNumArena,
|
2013-06-14 05:14:45 +02:00
|
|
|
PlatformDependent.maxDirectMemory() / defaultChunkSize / 2 / 3)));
|
|
|
|
|
2014-03-01 15:47:03 +01:00
|
|
|
// cache sizes
|
2021-09-17 16:28:14 +02:00
|
|
|
DEFAULT_SMALL_CACHE_SIZE = SystemPropertyUtil.getInt("io.net5.allocator.smallCacheSize", 256);
|
|
|
|
DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.getInt("io.net5.allocator.normalCacheSize", 64);
|
2014-03-01 15:47:03 +01:00
|
|
|
|
|
|
|
// 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
|
|
|
|
// 'Scalable memory allocation using jemalloc'
|
|
|
|
DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.getInt(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.maxCachedBufferCapacity", 32 * 1024);
|
2014-03-01 15:47:03 +01:00
|
|
|
|
|
|
|
// the number of threshold of allocations when cached entries will be freed up if not frequently used
|
|
|
|
DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.getInt(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.cacheTrimInterval", 8192);
|
2014-03-01 15:47:03 +01:00
|
|
|
|
2019-03-22 11:08:37 +01:00
|
|
|
DEFAULT_CACHE_TRIM_INTERVAL_MILLIS = SystemPropertyUtil.getLong(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.cacheTrimIntervalMillis", 0);
|
2019-03-22 11:08:37 +01:00
|
|
|
|
2016-11-22 16:18:23 +01:00
|
|
|
DEFAULT_USE_CACHE_FOR_ALL_THREADS = SystemPropertyUtil.getBoolean(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.useCacheForAllThreads", false);
|
2016-11-22 16:18:23 +01:00
|
|
|
|
2018-12-04 15:26:05 +01:00
|
|
|
// Use 1023 by default as we use an ArrayDeque as backing storage which will then allocate an internal array
|
|
|
|
// of 1024 elements. Otherwise we would allocate 2048 and only use 1024 which is wasteful.
|
|
|
|
DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK = SystemPropertyUtil.getInt(
|
2021-09-17 16:28:14 +02:00
|
|
|
"io.net5.allocator.maxCachedByteBuffersPerChunk", 1023);
|
2018-12-04 15:26:05 +01:00
|
|
|
|
2013-04-03 05:08:01 +02:00
|
|
|
if (logger.isDebugEnabled()) {
|
2021-09-17 16:28:14 +02:00
|
|
|
logger.debug("-Dio.net5.allocator.numHeapArenas: {}", DEFAULT_NUM_HEAP_ARENA);
|
|
|
|
logger.debug("-Dio.net5.allocator.numDirectArenas: {}", DEFAULT_NUM_DIRECT_ARENA);
|
2013-04-03 05:08:01 +02:00
|
|
|
if (pageSizeFallbackCause == null) {
|
2021-09-17 16:28:14 +02:00
|
|
|
logger.debug("-Dio.net5.allocator.pageSize: {}", DEFAULT_PAGE_SIZE);
|
2013-04-03 05:08:01 +02:00
|
|
|
} else {
|
2021-09-17 16:28:14 +02:00
|
|
|
logger.debug("-Dio.net5.allocator.pageSize: {}", DEFAULT_PAGE_SIZE, pageSizeFallbackCause);
|
2013-04-03 05:08:01 +02:00
|
|
|
}
|
|
|
|
if (maxOrderFallbackCause == null) {
|
2021-09-17 16:28:14 +02:00
|
|
|
logger.debug("-Dio.net5.allocator.maxOrder: {}", DEFAULT_MAX_ORDER);
|
2013-04-03 05:08:01 +02:00
|
|
|
} else {
|
2021-09-17 16:28:14 +02:00
|
|
|
logger.debug("-Dio.net5.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause);
|
2013-04-03 05:08:01 +02:00
|
|
|
}
|
2021-09-17 16:28:14 +02:00
|
|
|
logger.debug("-Dio.net5.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER);
|
|
|
|
logger.debug("-Dio.net5.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE);
|
|
|
|
logger.debug("-Dio.net5.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE);
|
|
|
|
logger.debug("-Dio.net5.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY);
|
|
|
|
logger.debug("-Dio.net5.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL);
|
|
|
|
logger.debug("-Dio.net5.allocator.cacheTrimIntervalMillis: {}", DEFAULT_CACHE_TRIM_INTERVAL_MILLIS);
|
|
|
|
logger.debug("-Dio.net5.allocator.useCacheForAllThreads: {}", DEFAULT_USE_CACHE_FOR_ALL_THREADS);
|
|
|
|
logger.debug("-Dio.net5.allocator.maxCachedByteBuffersPerChunk: {}",
|
2018-12-04 15:26:05 +01:00
|
|
|
DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK);
|
2013-04-03 05:08:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 09:55:24 +01:00
|
|
|
public static final PooledByteBufAllocator DEFAULT =
|
|
|
|
new PooledByteBufAllocator(PlatformDependent.directBufferPreferred());
|
2012-12-05 22:11:48 +01:00
|
|
|
|
|
|
|
private final PoolArena<byte[]>[] heapArenas;
|
|
|
|
private final PoolArena<ByteBuffer>[] directArenas;
|
2014-03-01 15:47:03 +01:00
|
|
|
private final int smallCacheSize;
|
|
|
|
private final int normalCacheSize;
|
2015-05-13 17:15:06 +02:00
|
|
|
private final List<PoolArenaMetric> heapArenaMetrics;
|
|
|
|
private final List<PoolArenaMetric> directArenaMetrics;
|
2015-05-25 21:00:24 +02:00
|
|
|
private final PoolThreadLocalCache threadCache;
|
2017-02-03 20:07:39 +01:00
|
|
|
private final int chunkSize;
|
2017-03-02 08:50:47 +01:00
|
|
|
private final PooledByteBufAllocatorMetric metric;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
|
|
|
public PooledByteBufAllocator() {
|
|
|
|
this(false);
|
|
|
|
}
|
|
|
|
|
2016-11-22 16:18:23 +01:00
|
|
|
@SuppressWarnings("deprecation")
|
2013-03-05 09:55:24 +01:00
|
|
|
public PooledByteBufAllocator(boolean preferDirect) {
|
|
|
|
this(preferDirect, DEFAULT_NUM_HEAP_ARENA, DEFAULT_NUM_DIRECT_ARENA, DEFAULT_PAGE_SIZE, DEFAULT_MAX_ORDER);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2016-11-22 16:18:23 +01:00
|
|
|
@SuppressWarnings("deprecation")
|
2012-12-05 22:11:48 +01:00
|
|
|
public PooledByteBufAllocator(int nHeapArena, int nDirectArena, int pageSize, int maxOrder) {
|
|
|
|
this(false, nHeapArena, nDirectArena, pageSize, maxOrder);
|
|
|
|
}
|
|
|
|
|
2016-11-22 16:18:23 +01:00
|
|
|
/**
|
|
|
|
* @deprecated use
|
2020-07-15 21:33:27 +02:00
|
|
|
* {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean)}
|
2016-11-22 16:18:23 +01:00
|
|
|
*/
|
|
|
|
@Deprecated
|
2013-04-27 01:55:16 +02:00
|
|
|
public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder) {
|
2014-03-01 15:47:03 +01:00
|
|
|
this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder,
|
2020-07-15 21:33:27 +02:00
|
|
|
0, DEFAULT_SMALL_CACHE_SIZE, DEFAULT_NORMAL_CACHE_SIZE);
|
2014-03-01 15:47:03 +01:00
|
|
|
}
|
2012-12-19 09:35:32 +01:00
|
|
|
|
2016-11-22 16:18:23 +01:00
|
|
|
/**
|
|
|
|
* @deprecated use
|
2020-07-15 21:33:27 +02:00
|
|
|
* {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean)}
|
2016-11-22 16:18:23 +01:00
|
|
|
*/
|
|
|
|
@Deprecated
|
2014-03-01 15:47:03 +01:00
|
|
|
public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder,
|
|
|
|
int tinyCacheSize, int smallCacheSize, int normalCacheSize) {
|
2020-07-15 21:33:27 +02:00
|
|
|
this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, smallCacheSize,
|
|
|
|
normalCacheSize, DEFAULT_USE_CACHE_FOR_ALL_THREADS, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
|
2017-01-29 22:26:40 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
/**
|
|
|
|
* @deprecated use
|
|
|
|
* {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean)}
|
|
|
|
*/
|
|
|
|
@Deprecated
|
2017-01-29 22:26:40 +01:00
|
|
|
public PooledByteBufAllocator(boolean preferDirect, int nHeapArena,
|
2017-02-24 20:06:17 +01:00
|
|
|
int nDirectArena, int pageSize, int maxOrder, int tinyCacheSize,
|
|
|
|
int smallCacheSize, int normalCacheSize,
|
|
|
|
boolean useCacheForAllThreads) {
|
2017-01-29 22:26:40 +01:00
|
|
|
this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder,
|
2020-07-15 21:33:27 +02:00
|
|
|
smallCacheSize, normalCacheSize,
|
|
|
|
useCacheForAllThreads);
|
2016-11-22 16:18:23 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
public PooledByteBufAllocator(boolean preferDirect, int nHeapArena,
|
|
|
|
int nDirectArena, int pageSize, int maxOrder,
|
|
|
|
int smallCacheSize, int normalCacheSize,
|
|
|
|
boolean useCacheForAllThreads) {
|
|
|
|
this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder,
|
|
|
|
smallCacheSize, normalCacheSize,
|
|
|
|
useCacheForAllThreads, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @deprecated use
|
|
|
|
* {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean, int)}
|
|
|
|
*/
|
|
|
|
@Deprecated
|
2016-11-22 16:18:23 +01:00
|
|
|
public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder,
|
|
|
|
int tinyCacheSize, int smallCacheSize, int normalCacheSize,
|
2017-01-29 22:26:40 +01:00
|
|
|
boolean useCacheForAllThreads, int directMemoryCacheAlignment) {
|
2020-07-15 21:33:27 +02:00
|
|
|
this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder,
|
|
|
|
smallCacheSize, normalCacheSize,
|
|
|
|
useCacheForAllThreads, directMemoryCacheAlignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder,
|
|
|
|
int smallCacheSize, int normalCacheSize,
|
|
|
|
boolean useCacheForAllThreads, int directMemoryCacheAlignment) {
|
2014-03-01 15:47:03 +01:00
|
|
|
super(preferDirect);
|
2016-11-22 16:18:23 +01:00
|
|
|
threadCache = new PoolThreadLocalCache(useCacheForAllThreads);
|
2014-03-01 15:47:03 +01:00
|
|
|
this.smallCacheSize = smallCacheSize;
|
|
|
|
this.normalCacheSize = normalCacheSize;
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
|
|
|
|
if (directMemoryCacheAlignment != 0) {
|
|
|
|
if (!PlatformDependent.hasAlignDirectByteBuffer()) {
|
|
|
|
throw new UnsupportedOperationException("Buffer alignment is not supported. " +
|
|
|
|
"Either Unsafe or ByteBuffer.alignSlice() must be available.");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure page size is a whole multiple of the alignment, or bump it to the next whole multiple.
|
|
|
|
pageSize = (int) PlatformDependent.align(pageSize, directMemoryCacheAlignment);
|
|
|
|
}
|
|
|
|
|
2017-02-03 20:07:39 +01:00
|
|
|
chunkSize = validateAndCalculateChunkSize(pageSize, maxOrder);
|
2012-12-19 09:35:32 +01:00
|
|
|
|
2019-01-31 09:06:59 +01:00
|
|
|
checkPositiveOrZero(nHeapArena, "nHeapArena");
|
|
|
|
checkPositiveOrZero(nDirectArena, "nDirectArena");
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2019-01-31 09:06:59 +01:00
|
|
|
checkPositiveOrZero(directMemoryCacheAlignment, "directMemoryCacheAlignment");
|
2017-02-13 07:42:22 +01:00
|
|
|
if (directMemoryCacheAlignment > 0 && !isDirectMemoryCacheAlignmentSupported()) {
|
|
|
|
throw new IllegalArgumentException("directMemoryCacheAlignment is not supported");
|
|
|
|
}
|
2017-01-29 22:26:40 +01:00
|
|
|
|
|
|
|
if ((directMemoryCacheAlignment & -directMemoryCacheAlignment) != directMemoryCacheAlignment) {
|
|
|
|
throw new IllegalArgumentException("directMemoryCacheAlignment: "
|
|
|
|
+ directMemoryCacheAlignment + " (expected: power of two)");
|
|
|
|
}
|
|
|
|
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
int pageShifts = validateAndCalculatePageShifts(pageSize, directMemoryCacheAlignment);
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2013-04-27 01:55:16 +02:00
|
|
|
if (nHeapArena > 0) {
|
|
|
|
heapArenas = newArenaArray(nHeapArena);
|
2019-01-22 16:07:26 +01:00
|
|
|
List<PoolArenaMetric> metrics = new ArrayList<>(heapArenas.length);
|
2013-04-27 01:55:16 +02:00
|
|
|
for (int i = 0; i < heapArenas.length; i ++) {
|
2017-01-29 22:26:40 +01:00
|
|
|
PoolArena.HeapArena arena = new PoolArena.HeapArena(this,
|
2020-07-15 21:33:27 +02:00
|
|
|
pageSize, pageShifts, chunkSize,
|
2017-01-29 22:26:40 +01:00
|
|
|
directMemoryCacheAlignment);
|
2015-05-13 17:15:06 +02:00
|
|
|
heapArenas[i] = arena;
|
|
|
|
metrics.add(arena);
|
2013-04-27 01:55:16 +02:00
|
|
|
}
|
2015-05-13 17:15:06 +02:00
|
|
|
heapArenaMetrics = Collections.unmodifiableList(metrics);
|
2013-04-27 01:55:16 +02:00
|
|
|
} else {
|
|
|
|
heapArenas = null;
|
2015-05-13 17:15:06 +02:00
|
|
|
heapArenaMetrics = Collections.emptyList();
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2013-05-30 20:24:11 +02:00
|
|
|
if (nDirectArena > 0) {
|
2013-04-27 01:55:16 +02:00
|
|
|
directArenas = newArenaArray(nDirectArena);
|
2019-01-22 16:07:26 +01:00
|
|
|
List<PoolArenaMetric> metrics = new ArrayList<>(directArenas.length);
|
2013-04-27 01:55:16 +02:00
|
|
|
for (int i = 0; i < directArenas.length; i ++) {
|
2015-05-13 17:15:06 +02:00
|
|
|
PoolArena.DirectArena arena = new PoolArena.DirectArena(
|
2020-07-15 21:33:27 +02:00
|
|
|
this, pageSize, pageShifts, chunkSize, directMemoryCacheAlignment);
|
2015-05-13 17:15:06 +02:00
|
|
|
directArenas[i] = arena;
|
|
|
|
metrics.add(arena);
|
2013-04-27 01:55:16 +02:00
|
|
|
}
|
2015-05-13 17:15:06 +02:00
|
|
|
directArenaMetrics = Collections.unmodifiableList(metrics);
|
2013-04-27 01:55:16 +02:00
|
|
|
} else {
|
|
|
|
directArenas = null;
|
2015-05-13 17:15:06 +02:00
|
|
|
directArenaMetrics = Collections.emptyList();
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
2017-03-02 08:50:47 +01:00
|
|
|
metric = new PooledByteBufAllocatorMetric(this);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@SuppressWarnings("unchecked")
|
|
|
|
private static <T> PoolArena<T>[] newArenaArray(int size) {
|
|
|
|
return new PoolArena[size];
|
|
|
|
}
|
|
|
|
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
private static int validateAndCalculatePageShifts(int pageSize, int alignment) {
|
2012-12-05 22:11:48 +01:00
|
|
|
if (pageSize < MIN_PAGE_SIZE) {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ')');
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2014-03-17 16:32:39 +01:00
|
|
|
if ((pageSize & pageSize - 1) != 0) {
|
|
|
|
throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: power of 2)");
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
2014-03-17 16:32:39 +01:00
|
|
|
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
if (pageSize < alignment) {
|
|
|
|
throw new IllegalArgumentException("Alignment cannot be greater than page size. " +
|
|
|
|
"Alignment: " + alignment + ", page size: " + pageSize + '.');
|
|
|
|
}
|
|
|
|
|
2014-03-17 16:32:39 +01:00
|
|
|
// Logarithm base 2. At this point we know that pageSize is a power of two.
|
|
|
|
return Integer.SIZE - 1 - Integer.numberOfLeadingZeros(pageSize);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private static int validateAndCalculateChunkSize(int pageSize, int maxOrder) {
|
|
|
|
if (maxOrder > 14) {
|
|
|
|
throw new IllegalArgumentException("maxOrder: " + maxOrder + " (expected: 0-14)");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the resulting chunkSize does not overflow.
|
|
|
|
int chunkSize = pageSize;
|
|
|
|
for (int i = maxOrder; i > 0; i --) {
|
|
|
|
if (chunkSize > MAX_CHUNK_SIZE / 2) {
|
|
|
|
throw new IllegalArgumentException(String.format(
|
|
|
|
"pageSize (%d) << maxOrder (%d) must not exceed %d", pageSize, maxOrder, MAX_CHUNK_SIZE));
|
|
|
|
}
|
|
|
|
chunkSize <<= 1;
|
|
|
|
}
|
|
|
|
return chunkSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
protected ByteBuf newHeapBuffer(int initialCapacity, int maxCapacity) {
|
|
|
|
PoolThreadCache cache = threadCache.get();
|
2013-04-27 01:55:16 +02:00
|
|
|
PoolArena<byte[]> heapArena = cache.heapArena;
|
2013-12-04 11:03:32 +01:00
|
|
|
|
2017-02-15 13:19:31 +01:00
|
|
|
final ByteBuf buf;
|
2013-04-27 01:55:16 +02:00
|
|
|
if (heapArena != null) {
|
2013-12-04 11:03:32 +01:00
|
|
|
buf = heapArena.allocate(cache, initialCapacity, maxCapacity);
|
2013-04-27 01:55:16 +02:00
|
|
|
} else {
|
2017-02-15 13:19:31 +01:00
|
|
|
buf = PlatformDependent.hasUnsafe() ?
|
|
|
|
new UnpooledUnsafeHeapByteBuf(this, initialCapacity, maxCapacity) :
|
|
|
|
new UnpooledHeapByteBuf(this, initialCapacity, maxCapacity);
|
2013-04-27 01:55:16 +02:00
|
|
|
}
|
2013-12-04 11:03:32 +01:00
|
|
|
|
|
|
|
return toLeakAwareBuffer(buf);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
protected ByteBuf newDirectBuffer(int initialCapacity, int maxCapacity) {
|
|
|
|
PoolThreadCache cache = threadCache.get();
|
2013-04-27 01:55:16 +02:00
|
|
|
PoolArena<ByteBuffer> directArena = cache.directArena;
|
2013-12-04 11:03:32 +01:00
|
|
|
|
2017-02-15 13:19:31 +01:00
|
|
|
final ByteBuf buf;
|
2013-04-27 01:55:16 +02:00
|
|
|
if (directArena != null) {
|
2013-12-04 11:03:32 +01:00
|
|
|
buf = directArena.allocate(cache, initialCapacity, maxCapacity);
|
2013-04-27 01:55:16 +02:00
|
|
|
} else {
|
2017-02-15 13:19:31 +01:00
|
|
|
buf = PlatformDependent.hasUnsafe() ?
|
|
|
|
UnsafeByteBufUtil.newUnsafeDirectByteBuf(this, initialCapacity, maxCapacity) :
|
|
|
|
new UnpooledDirectByteBuf(this, initialCapacity, maxCapacity);
|
2013-04-27 01:55:16 +02:00
|
|
|
}
|
2013-12-04 11:03:32 +01:00
|
|
|
|
|
|
|
return toLeakAwareBuffer(buf);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2015-11-07 23:31:45 +01:00
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default number of heap arenas - System Property: io.net5.allocator.numHeapArenas - default 2 * cores
|
2015-11-07 23:31:45 +01:00
|
|
|
*/
|
|
|
|
public static int defaultNumHeapArena() {
|
|
|
|
return DEFAULT_NUM_HEAP_ARENA;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default number of direct arenas - System Property: io.net5.allocator.numDirectArenas - default 2 * cores
|
2015-11-07 23:31:45 +01:00
|
|
|
*/
|
|
|
|
public static int defaultNumDirectArena() {
|
|
|
|
return DEFAULT_NUM_DIRECT_ARENA;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default buffer page size - System Property: io.net5.allocator.pageSize - default 8192
|
2015-11-07 23:31:45 +01:00
|
|
|
*/
|
|
|
|
public static int defaultPageSize() {
|
|
|
|
return DEFAULT_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default maximum order - System Property: io.net5.allocator.maxOrder - default 11
|
2015-11-07 23:31:45 +01:00
|
|
|
*/
|
|
|
|
public static int defaultMaxOrder() {
|
|
|
|
return DEFAULT_MAX_ORDER;
|
|
|
|
}
|
|
|
|
|
2017-09-20 23:57:28 +02:00
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default thread caching behavior - System Property: io.net5.allocator.useCacheForAllThreads - default true
|
2017-09-20 23:57:28 +02:00
|
|
|
*/
|
|
|
|
public static boolean defaultUseCacheForAllThreads() {
|
|
|
|
return DEFAULT_USE_CACHE_FOR_ALL_THREADS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default prefer direct - System Property: io.net5.noPreferDirect - default false
|
2017-09-20 23:57:28 +02:00
|
|
|
*/
|
|
|
|
public static boolean defaultPreferDirect() {
|
|
|
|
return PlatformDependent.directBufferPreferred();
|
|
|
|
}
|
|
|
|
|
2015-11-07 23:31:45 +01:00
|
|
|
/**
|
2020-07-15 21:33:27 +02:00
|
|
|
* Default tiny cache size - default 0
|
|
|
|
*
|
|
|
|
* @deprecated Tiny caches have been merged into small caches.
|
2015-11-07 23:31:45 +01:00
|
|
|
*/
|
2020-07-15 21:33:27 +02:00
|
|
|
@Deprecated
|
2015-11-07 23:31:45 +01:00
|
|
|
public static int defaultTinyCacheSize() {
|
2020-07-15 21:33:27 +02:00
|
|
|
return 0;
|
2015-11-07 23:31:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default small cache size - System Property: io.net5.allocator.smallCacheSize - default 256
|
2015-11-07 23:31:45 +01:00
|
|
|
*/
|
|
|
|
public static int defaultSmallCacheSize() {
|
|
|
|
return DEFAULT_SMALL_CACHE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-09-17 16:28:14 +02:00
|
|
|
* Default normal cache size - System Property: io.net5.allocator.normalCacheSize - default 64
|
2015-11-07 23:31:45 +01:00
|
|
|
*/
|
|
|
|
public static int defaultNormalCacheSize() {
|
|
|
|
return DEFAULT_NORMAL_CACHE_SIZE;
|
|
|
|
}
|
|
|
|
|
2017-02-13 07:42:22 +01:00
|
|
|
/**
|
2017-04-19 22:37:03 +02:00
|
|
|
* Return {@code true} if direct memory cache alignment is supported, {@code false} otherwise.
|
2017-02-13 07:42:22 +01:00
|
|
|
*/
|
|
|
|
public static boolean isDirectMemoryCacheAlignmentSupported() {
|
|
|
|
return PlatformDependent.hasUnsafe();
|
|
|
|
}
|
|
|
|
|
2013-08-16 21:53:47 +02:00
|
|
|
@Override
|
|
|
|
public boolean isDirectBufferPooled() {
|
|
|
|
return directArenas != null;
|
|
|
|
}
|
|
|
|
|
2014-03-01 15:47:03 +01:00
|
|
|
/**
|
|
|
|
* Returns {@code true} if the calling {@link Thread} has a {@link ThreadLocal} cache for the allocated
|
|
|
|
* buffers.
|
|
|
|
*/
|
2014-04-09 11:07:14 +02:00
|
|
|
@Deprecated
|
2014-03-01 15:47:03 +01:00
|
|
|
public boolean hasThreadLocalCache() {
|
2014-06-17 11:37:58 +02:00
|
|
|
return threadCache.isSet();
|
2014-03-01 15:47:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Free all cached buffers for the calling {@link Thread}.
|
|
|
|
*/
|
2014-04-09 11:07:14 +02:00
|
|
|
@Deprecated
|
2014-03-01 15:47:03 +01:00
|
|
|
public void freeThreadLocalCache() {
|
2014-06-17 11:37:58 +02:00
|
|
|
threadCache.remove();
|
2014-03-01 15:47:03 +01:00
|
|
|
}
|
|
|
|
|
2014-06-09 02:18:46 +02:00
|
|
|
final class PoolThreadLocalCache extends FastThreadLocal<PoolThreadCache> {
|
2016-11-22 16:18:23 +01:00
|
|
|
private final boolean useCacheForAllThreads;
|
|
|
|
|
|
|
|
PoolThreadLocalCache(boolean useCacheForAllThreads) {
|
|
|
|
this.useCacheForAllThreads = useCacheForAllThreads;
|
|
|
|
}
|
2014-03-01 15:47:03 +01:00
|
|
|
|
|
|
|
@Override
|
2016-03-14 17:25:43 +01:00
|
|
|
protected synchronized PoolThreadCache initialValue() {
|
|
|
|
final PoolArena<byte[]> heapArena = leastUsedArena(heapArenas);
|
|
|
|
final PoolArena<ByteBuffer> directArena = leastUsedArena(directArenas);
|
2014-03-01 15:47:03 +01:00
|
|
|
|
2019-03-22 11:08:37 +01:00
|
|
|
final Thread current = Thread.currentThread();
|
2017-11-27 13:53:12 +01:00
|
|
|
if (useCacheForAllThreads || current instanceof FastThreadLocalThread) {
|
2019-03-22 11:08:37 +01:00
|
|
|
final PoolThreadCache cache = new PoolThreadCache(
|
2020-07-15 21:33:27 +02:00
|
|
|
heapArena, directArena, smallCacheSize, normalCacheSize,
|
2017-12-01 16:37:30 +01:00
|
|
|
DEFAULT_MAX_CACHED_BUFFER_CAPACITY, DEFAULT_CACHE_TRIM_INTERVAL);
|
2019-03-22 11:08:37 +01:00
|
|
|
|
|
|
|
if (DEFAULT_CACHE_TRIM_INTERVAL_MILLIS > 0) {
|
|
|
|
final EventExecutor executor = ThreadExecutorMap.currentExecutor();
|
|
|
|
if (executor != null) {
|
|
|
|
executor.scheduleAtFixedRate(trimTask, DEFAULT_CACHE_TRIM_INTERVAL_MILLIS,
|
|
|
|
DEFAULT_CACHE_TRIM_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cache;
|
2016-11-22 16:18:23 +01:00
|
|
|
}
|
2017-12-01 16:37:30 +01:00
|
|
|
// No caching so just use 0 as sizes.
|
2020-07-15 21:33:27 +02:00
|
|
|
return new PoolThreadCache(heapArena, directArena, 0, 0, 0, 0);
|
2014-03-01 15:47:03 +01:00
|
|
|
}
|
|
|
|
|
2014-06-17 11:37:58 +02:00
|
|
|
@Override
|
2016-03-14 17:25:43 +01:00
|
|
|
protected void onRemoval(PoolThreadCache threadCache) {
|
2019-03-22 12:16:21 +01:00
|
|
|
threadCache.free(false);
|
2016-03-14 17:25:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private <T> PoolArena<T> leastUsedArena(PoolArena<T>[] arenas) {
|
|
|
|
if (arenas == null || arenas.length == 0) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
PoolArena<T> minArena = arenas[0];
|
|
|
|
for (int i = 1; i < arenas.length; i++) {
|
|
|
|
PoolArena<T> arena = arenas[i];
|
|
|
|
if (arena.numThreadCaches.get() < minArena.numThreadCaches.get()) {
|
|
|
|
minArena = arena;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return minArena;
|
2014-03-01 15:47:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-02 08:50:47 +01:00
|
|
|
@Override
|
|
|
|
public PooledByteBufAllocatorMetric metric() {
|
|
|
|
return metric;
|
|
|
|
}
|
|
|
|
|
2015-05-13 17:15:06 +02:00
|
|
|
/**
|
|
|
|
* Return the number of heap arenas.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#numHeapArenas()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public int numHeapArenas() {
|
|
|
|
return heapArenaMetrics.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the number of direct arenas.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#numDirectArenas()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public int numDirectArenas() {
|
|
|
|
return directArenaMetrics.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return a {@link List} of all heap {@link PoolArenaMetric}s that are provided by this pool.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#heapArenas()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public List<PoolArenaMetric> heapArenas() {
|
|
|
|
return heapArenaMetrics;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return a {@link List} of all direct {@link PoolArenaMetric}s that are provided by this pool.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#directArenas()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public List<PoolArenaMetric> directArenas() {
|
|
|
|
return directArenaMetrics;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the number of thread local caches used by this {@link PooledByteBufAllocator}.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#numThreadLocalCaches()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public int numThreadLocalCaches() {
|
2016-03-14 17:25:43 +01:00
|
|
|
PoolArena<?>[] arenas = heapArenas != null ? heapArenas : directArenas;
|
|
|
|
if (arenas == null) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int total = 0;
|
2017-02-04 00:08:46 +01:00
|
|
|
for (PoolArena<?> arena : arenas) {
|
|
|
|
total += arena.numThreadCaches.get();
|
2016-03-14 17:25:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return total;
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the size of the tiny cache.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#tinyCacheSize()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public int tinyCacheSize() {
|
2020-07-15 21:33:27 +02:00
|
|
|
return 0;
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the size of the small cache.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#smallCacheSize()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public int smallCacheSize() {
|
|
|
|
return smallCacheSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the size of the normal cache.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#normalCacheSize()}.
|
2015-05-13 17:15:06 +02:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2015-05-13 17:15:06 +02:00
|
|
|
public int normalCacheSize() {
|
|
|
|
return normalCacheSize;
|
|
|
|
}
|
|
|
|
|
2017-02-03 20:07:39 +01:00
|
|
|
/**
|
|
|
|
* Return the chunk size for an arena.
|
2017-03-02 08:50:47 +01:00
|
|
|
*
|
|
|
|
* @deprecated use {@link PooledByteBufAllocatorMetric#chunkSize()}.
|
2017-02-03 20:07:39 +01:00
|
|
|
*/
|
2017-03-02 08:50:47 +01:00
|
|
|
@Deprecated
|
2017-02-03 20:07:39 +01:00
|
|
|
public final int chunkSize() {
|
|
|
|
return chunkSize;
|
|
|
|
}
|
|
|
|
|
2017-03-02 08:50:47 +01:00
|
|
|
final long usedHeapMemory() {
|
2017-02-24 20:06:17 +01:00
|
|
|
return usedMemory(heapArenas);
|
|
|
|
}
|
|
|
|
|
2017-03-02 08:50:47 +01:00
|
|
|
final long usedDirectMemory() {
|
2017-02-24 20:06:17 +01:00
|
|
|
return usedMemory(directArenas);
|
|
|
|
}
|
|
|
|
|
2018-10-05 13:06:44 +02:00
|
|
|
private static long usedMemory(PoolArena<?>[] arenas) {
|
2017-02-24 20:06:17 +01:00
|
|
|
if (arenas == null) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
long used = 0;
|
|
|
|
for (PoolArena<?> arena : arenas) {
|
|
|
|
used += arena.numActiveBytes();
|
|
|
|
if (used < 0) {
|
|
|
|
return Long.MAX_VALUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return used;
|
|
|
|
}
|
|
|
|
|
2021-09-15 16:22:57 +02:00
|
|
|
/**
|
|
|
|
* Returns the number of bytes of heap memory that is currently pinned to heap buffers allocated by a
|
|
|
|
* {@link ByteBufAllocator}, or {@code -1} if unknown.
|
|
|
|
* A buffer can pin more memory than its {@linkplain ByteBuf#capacity() capacity} might indicate,
|
|
|
|
* due to implementation details of the allocator.
|
|
|
|
*/
|
|
|
|
public final long pinnedHeapMemory() {
|
|
|
|
return pinnedMemory(heapArenas);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the number of bytes of direct memory that is currently pinned to direct buffers allocated by a
|
|
|
|
* {@link ByteBufAllocator}, or {@code -1} if unknown.
|
|
|
|
* A buffer can pin more memory than its {@linkplain ByteBuf#capacity() capacity} might indicate,
|
|
|
|
* due to implementation details of the allocator.
|
|
|
|
*/
|
|
|
|
public final long pinnedDirectMemory() {
|
|
|
|
return pinnedMemory(directArenas);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static long pinnedMemory(PoolArena<?>[] arenas) {
|
|
|
|
if (arenas == null) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
long used = 0;
|
|
|
|
for (PoolArena<?> arena : arenas) {
|
|
|
|
used += arena.numPinnedBytes();
|
|
|
|
if (used < 0) {
|
|
|
|
return Long.MAX_VALUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return used;
|
|
|
|
}
|
|
|
|
|
2015-05-25 21:00:24 +02:00
|
|
|
final PoolThreadCache threadCache() {
|
2017-11-27 13:53:12 +01:00
|
|
|
PoolThreadCache cache = threadCache.get();
|
|
|
|
assert cache != null;
|
|
|
|
return cache;
|
2015-05-25 21:00:24 +02:00
|
|
|
}
|
|
|
|
|
2019-03-22 11:08:37 +01:00
|
|
|
/**
|
|
|
|
* Trim thread local cache for the current {@link Thread}, which will give back any cached memory that was not
|
|
|
|
* allocated frequently since the last trim operation.
|
|
|
|
*
|
|
|
|
* Returns {@code true} if a cache for the current {@link Thread} exists and so was trimmed, false otherwise.
|
|
|
|
*/
|
|
|
|
public boolean trimCurrentThreadCache() {
|
|
|
|
PoolThreadCache cache = threadCache.getIfExists();
|
|
|
|
if (cache != null) {
|
|
|
|
cache.trim();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-04-06 09:25:22 +02:00
|
|
|
/**
|
|
|
|
* Returns the status of the allocator (which contains all metrics) as string. Be aware this may be expensive
|
|
|
|
* and so should not called too frequently.
|
|
|
|
*/
|
|
|
|
public String dumpStats() {
|
|
|
|
int heapArenasLen = heapArenas == null ? 0 : heapArenas.length;
|
|
|
|
StringBuilder buf = new StringBuilder(512)
|
|
|
|
.append(heapArenasLen)
|
|
|
|
.append(" heap arena(s):")
|
|
|
|
.append(StringUtil.NEWLINE);
|
|
|
|
if (heapArenasLen > 0) {
|
|
|
|
for (PoolArena<byte[]> a: heapArenas) {
|
|
|
|
buf.append(a);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int directArenasLen = directArenas == null ? 0 : directArenas.length;
|
|
|
|
|
|
|
|
buf.append(directArenasLen)
|
|
|
|
.append(" direct arena(s):")
|
|
|
|
.append(StringUtil.NEWLINE);
|
|
|
|
if (directArenasLen > 0) {
|
|
|
|
for (PoolArena<ByteBuffer> a: directArenas) {
|
|
|
|
buf.append(a);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf.toString();
|
|
|
|
}
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|