2012-12-05 22:11:48 +01:00
|
|
|
/*
|
|
|
|
* Copyright 2012 The Netty Project
|
|
|
|
*
|
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
|
|
|
*
|
2020-10-23 14:44:18 +02:00
|
|
|
* https://www.apache.org/licenses/LICENSE-2.0
|
2012-12-05 22:11:48 +01:00
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
|
|
|
|
2021-09-17 16:28:14 +02:00
|
|
|
package io.net5.buffer;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2021-09-17 16:28:14 +02:00
|
|
|
import io.net5.util.internal.PlatformDependent;
|
|
|
|
import io.net5.util.internal.StringUtil;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
|
|
|
import java.nio.ByteBuffer;
|
2015-05-13 17:15:06 +02:00
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.Collections;
|
|
|
|
import java.util.List;
|
2016-03-14 17:25:43 +01:00
|
|
|
import java.util.concurrent.atomic.AtomicInteger;
|
2019-01-22 13:53:28 +01:00
|
|
|
import java.util.concurrent.atomic.LongAdder;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2021-09-17 16:28:14 +02:00
|
|
|
import static io.net5.buffer.PoolChunk.isSubpage;
|
2016-04-08 11:56:32 +02:00
|
|
|
import static java.lang.Math.max;
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
abstract class PoolArena<T> extends SizeClasses implements PoolArenaMetric {
|
2015-10-09 21:03:03 +02:00
|
|
|
static final boolean HAS_UNSAFE = PlatformDependent.hasUnsafe();
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2015-05-25 21:00:24 +02:00
|
|
|
enum SizeClass {
|
|
|
|
Small,
|
|
|
|
Normal
|
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
final PooledByteBufAllocator parent;
|
|
|
|
|
2014-03-01 15:47:03 +01:00
|
|
|
final int numSmallSubpagePools;
|
2017-01-29 22:26:40 +01:00
|
|
|
final int directMemoryCacheAlignment;
|
2013-03-05 15:55:41 +01:00
|
|
|
private final PoolSubpage<T>[] smallSubpagePools;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
|
|
|
private final PoolChunkList<T> q050;
|
|
|
|
private final PoolChunkList<T> q025;
|
|
|
|
private final PoolChunkList<T> q000;
|
|
|
|
private final PoolChunkList<T> qInit;
|
|
|
|
private final PoolChunkList<T> q075;
|
|
|
|
private final PoolChunkList<T> q100;
|
|
|
|
|
2015-05-13 17:15:06 +02:00
|
|
|
private final List<PoolChunkListMetric> chunkListMetrics;
|
|
|
|
|
|
|
|
// Metrics for allocations and deallocations
|
|
|
|
private long allocationsNormal;
|
2020-07-15 21:33:27 +02:00
|
|
|
|
2019-01-22 13:53:28 +01:00
|
|
|
// We need to use the LongAdder here as this is not guarded via synchronized block.
|
|
|
|
private final LongAdder allocationsSmall = new LongAdder();
|
|
|
|
private final LongAdder allocationsHuge = new LongAdder();
|
|
|
|
private final LongAdder activeBytesHuge = new LongAdder();
|
2015-05-13 17:15:06 +02:00
|
|
|
|
|
|
|
private long deallocationsSmall;
|
|
|
|
private long deallocationsNormal;
|
2016-03-14 09:00:50 +01:00
|
|
|
|
2019-01-22 13:53:28 +01:00
|
|
|
// We need to use the LongAdder here as this is not guarded via synchronized block.
|
|
|
|
private final LongAdder deallocationsHuge = new LongAdder();
|
2015-05-13 17:15:06 +02:00
|
|
|
|
2016-03-14 17:25:43 +01:00
|
|
|
// Number of thread caches backed by this arena.
|
|
|
|
final AtomicInteger numThreadCaches = new AtomicInteger();
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
// TODO: Test if adding padding helps under contention
|
|
|
|
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
|
|
|
|
|
2017-01-29 22:26:40 +01:00
|
|
|
protected PoolArena(PooledByteBufAllocator parent, int pageSize,
|
2020-07-15 21:33:27 +02:00
|
|
|
int pageShifts, int chunkSize, int cacheAlignment) {
|
|
|
|
super(pageSize, pageShifts, chunkSize, cacheAlignment);
|
2012-12-05 22:11:48 +01:00
|
|
|
this.parent = parent;
|
2017-02-27 21:09:07 +01:00
|
|
|
directMemoryCacheAlignment = cacheAlignment;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
numSmallSubpagePools = nSubpages;
|
2014-03-01 15:47:03 +01:00
|
|
|
smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools);
|
2012-12-05 22:11:48 +01:00
|
|
|
for (int i = 0; i < smallSubpagePools.length; i ++) {
|
2020-07-15 21:33:27 +02:00
|
|
|
smallSubpagePools[i] = newSubpagePoolHead();
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2019-01-22 13:53:28 +01:00
|
|
|
q100 = new PoolChunkList<>(this, null, 100, Integer.MAX_VALUE, chunkSize);
|
|
|
|
q075 = new PoolChunkList<>(this, q100, 75, 100, chunkSize);
|
|
|
|
q050 = new PoolChunkList<>(this, q075, 50, 100, chunkSize);
|
|
|
|
q025 = new PoolChunkList<>(this, q050, 25, 75, chunkSize);
|
|
|
|
q000 = new PoolChunkList<>(this, q025, 1, 50, chunkSize);
|
|
|
|
qInit = new PoolChunkList<>(this, q000, Integer.MIN_VALUE, 25, chunkSize);
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2015-05-25 21:00:24 +02:00
|
|
|
q100.prevList(q075);
|
|
|
|
q075.prevList(q050);
|
|
|
|
q050.prevList(q025);
|
|
|
|
q025.prevList(q000);
|
|
|
|
q000.prevList(null);
|
|
|
|
qInit.prevList(qInit);
|
2015-05-13 17:15:06 +02:00
|
|
|
|
2019-01-22 13:53:28 +01:00
|
|
|
List<PoolChunkListMetric> metrics = new ArrayList<>(6);
|
2015-05-13 17:15:06 +02:00
|
|
|
metrics.add(qInit);
|
|
|
|
metrics.add(q000);
|
|
|
|
metrics.add(q025);
|
|
|
|
metrics.add(q050);
|
|
|
|
metrics.add(q075);
|
|
|
|
metrics.add(q100);
|
|
|
|
chunkListMetrics = Collections.unmodifiableList(metrics);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
private PoolSubpage<T> newSubpagePoolHead() {
|
|
|
|
PoolSubpage<T> head = new PoolSubpage<>();
|
2013-03-05 15:55:41 +01:00
|
|
|
head.prev = head;
|
|
|
|
head.next = head;
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
@SuppressWarnings("unchecked")
|
2013-03-05 15:55:41 +01:00
|
|
|
private PoolSubpage<T>[] newSubpagePoolArray(int size) {
|
|
|
|
return new PoolSubpage[size];
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2014-03-01 15:47:03 +01:00
|
|
|
abstract boolean isDirect();
|
|
|
|
|
2012-12-19 08:48:53 +01:00
|
|
|
PooledByteBuf<T> allocate(PoolThreadCache cache, int reqCapacity, int maxCapacity) {
|
2012-12-05 22:11:48 +01:00
|
|
|
PooledByteBuf<T> buf = newByteBuf(maxCapacity);
|
2012-12-19 08:48:53 +01:00
|
|
|
allocate(cache, buf, reqCapacity);
|
2012-12-05 22:11:48 +01:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) {
|
|
|
|
final int sizeIdx = size2SizeIdx(reqCapacity);
|
2014-03-01 15:47:03 +01:00
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
if (sizeIdx <= smallMaxSizeIdx) {
|
|
|
|
tcacheAllocateSmall(cache, buf, reqCapacity, sizeIdx);
|
|
|
|
} else if (sizeIdx < nSizes) {
|
|
|
|
tcacheAllocateNormal(cache, buf, reqCapacity, sizeIdx);
|
|
|
|
} else {
|
|
|
|
int normCapacity = directMemoryCacheAlignment > 0
|
|
|
|
? normalizeSize(reqCapacity) : reqCapacity;
|
|
|
|
// Huge allocations are never served via the cache so just call allocateHuge
|
|
|
|
allocateHuge(buf, normCapacity);
|
2014-03-01 15:47:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
private void tcacheAllocateSmall(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity,
|
|
|
|
final int sizeIdx) {
|
2014-03-01 15:47:03 +01:00
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
if (cache.allocateSmall(this, buf, reqCapacity, sizeIdx)) {
|
|
|
|
// was able to allocate out of the cache so move on
|
|
|
|
return;
|
|
|
|
}
|
2014-03-01 15:47:03 +01:00
|
|
|
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
/*
|
2020-07-15 21:33:27 +02:00
|
|
|
* Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
|
|
|
|
* {@link PoolChunk#free(long)} may modify the doubly linked list as well.
|
|
|
|
*/
|
|
|
|
final PoolSubpage<T> head = smallSubpagePools[sizeIdx];
|
2020-07-16 19:40:40 +02:00
|
|
|
final boolean needsNormalAllocation;
|
2020-07-15 21:33:27 +02:00
|
|
|
synchronized (head) {
|
|
|
|
final PoolSubpage<T> s = head.next;
|
2020-07-16 19:40:40 +02:00
|
|
|
needsNormalAllocation = s == head;
|
|
|
|
if (!needsNormalAllocation) {
|
2020-07-15 21:33:27 +02:00
|
|
|
assert s.doNotDestroy && s.elemSize == sizeIdx2size(sizeIdx);
|
|
|
|
long handle = s.allocate();
|
|
|
|
assert handle >= 0;
|
|
|
|
s.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
2020-07-15 21:33:27 +02:00
|
|
|
}
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2020-07-16 19:40:40 +02:00
|
|
|
if (needsNormalAllocation) {
|
|
|
|
synchronized (this) {
|
|
|
|
allocateNormal(buf, reqCapacity, sizeIdx, cache);
|
|
|
|
}
|
2020-07-15 21:33:27 +02:00
|
|
|
}
|
2020-07-16 19:40:40 +02:00
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
incSmallAllocation();
|
|
|
|
}
|
2017-01-27 09:28:19 +01:00
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
private void tcacheAllocateNormal(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity,
|
|
|
|
final int sizeIdx) {
|
|
|
|
if (cache.allocateNormal(this, buf, reqCapacity, sizeIdx)) {
|
|
|
|
// was able to allocate out of the cache so move on
|
2015-05-20 07:27:55 +02:00
|
|
|
return;
|
2015-05-13 13:50:22 +02:00
|
|
|
}
|
2020-07-15 21:33:27 +02:00
|
|
|
synchronized (this) {
|
|
|
|
allocateNormal(buf, reqCapacity, sizeIdx, cache);
|
|
|
|
++allocationsNormal;
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-29 11:29:46 +01:00
|
|
|
// Method must be called inside synchronized(this) { ... } block
|
2020-07-15 21:33:27 +02:00
|
|
|
private void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache) {
|
|
|
|
if (q050.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
|
|
|
|
q025.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
|
|
|
|
q000.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
|
|
|
|
qInit.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
|
|
|
|
q075.allocate(buf, reqCapacity, sizeIdx, threadCache)) {
|
2012-12-05 22:11:48 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a new chunk.
|
2020-07-15 21:33:27 +02:00
|
|
|
PoolChunk<T> c = newChunk(pageSize, nPSizes, pageShifts, chunkSize);
|
|
|
|
boolean success = c.allocate(buf, reqCapacity, sizeIdx, threadCache);
|
2018-12-04 15:26:05 +01:00
|
|
|
assert success;
|
2012-12-05 22:11:48 +01:00
|
|
|
qInit.add(c);
|
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
private void incSmallAllocation() {
|
|
|
|
allocationsSmall.increment();
|
2017-01-27 09:28:19 +01:00
|
|
|
}
|
|
|
|
|
2012-12-19 09:35:32 +01:00
|
|
|
private void allocateHuge(PooledByteBuf<T> buf, int reqCapacity) {
|
2016-04-06 15:12:37 +02:00
|
|
|
PoolChunk<T> chunk = newUnpooledChunk(reqCapacity);
|
|
|
|
activeBytesHuge.add(chunk.chunkSize());
|
|
|
|
buf.initUnpooled(chunk, reqCapacity);
|
2016-04-05 11:46:25 +02:00
|
|
|
allocationsHuge.increment();
|
2012-12-19 09:35:32 +01:00
|
|
|
}
|
|
|
|
|
2018-12-04 15:26:05 +01:00
|
|
|
void free(PoolChunk<T> chunk, ByteBuffer nioBuffer, long handle, int normCapacity, PoolThreadCache cache) {
|
2012-12-19 09:35:32 +01:00
|
|
|
if (chunk.unpooled) {
|
2016-04-06 15:12:37 +02:00
|
|
|
int size = chunk.chunkSize();
|
2012-12-19 09:35:32 +01:00
|
|
|
destroyChunk(chunk);
|
2016-04-06 15:12:37 +02:00
|
|
|
activeBytesHuge.add(-size);
|
2016-05-05 20:54:08 +02:00
|
|
|
deallocationsHuge.increment();
|
2012-12-19 09:35:32 +01:00
|
|
|
} else {
|
2020-07-15 21:33:27 +02:00
|
|
|
SizeClass sizeClass = sizeClass(handle);
|
2018-12-04 15:26:05 +01:00
|
|
|
if (cache != null && cache.add(this, chunk, nioBuffer, handle, normCapacity, sizeClass)) {
|
2015-05-19 15:03:29 +02:00
|
|
|
// cached so not free it.
|
|
|
|
return;
|
2014-03-01 15:47:03 +01:00
|
|
|
}
|
2015-05-19 15:03:29 +02:00
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
freeChunk(chunk, handle, normCapacity, sizeClass, nioBuffer, false);
|
2015-05-25 21:00:24 +02:00
|
|
|
}
|
|
|
|
}
|
2014-09-02 07:15:58 +02:00
|
|
|
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
private static SizeClass sizeClass(long handle) {
|
2020-07-15 21:33:27 +02:00
|
|
|
return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal;
|
2015-05-25 21:00:24 +02:00
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
void freeChunk(PoolChunk<T> chunk, long handle, int normCapacity, SizeClass sizeClass, ByteBuffer nioBuffer,
|
|
|
|
boolean finalizer) {
|
2015-05-25 21:00:24 +02:00
|
|
|
final boolean destroyChunk;
|
|
|
|
synchronized (this) {
|
2019-03-22 12:16:21 +01:00
|
|
|
// We only call this if freeChunk is not called because of the PoolThreadCache finalizer as otherwise this
|
|
|
|
// may fail due lazy class-loading in for example tomcat.
|
|
|
|
if (!finalizer) {
|
|
|
|
switch (sizeClass) {
|
|
|
|
case Normal:
|
|
|
|
++deallocationsNormal;
|
|
|
|
break;
|
|
|
|
case Small:
|
|
|
|
++deallocationsSmall;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
throw new Error();
|
|
|
|
}
|
2013-11-30 19:48:40 +01:00
|
|
|
}
|
2020-07-15 21:33:27 +02:00
|
|
|
destroyChunk = !chunk.parent.free(chunk, handle, normCapacity, nioBuffer);
|
2015-05-25 21:00:24 +02:00
|
|
|
}
|
|
|
|
if (destroyChunk) {
|
|
|
|
// destroyChunk not need to be called while holding the synchronized lock.
|
|
|
|
destroyChunk(chunk);
|
2012-12-19 09:35:32 +01:00
|
|
|
}
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
PoolSubpage<T> findSubpagePoolHead(int sizeIdx) {
|
|
|
|
return smallSubpagePools[sizeIdx];
|
2017-01-29 22:26:40 +01:00
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
void reallocate(PooledByteBuf<T> buf, int newCapacity, boolean freeOldMemory) {
|
2019-08-16 08:18:09 +02:00
|
|
|
assert newCapacity >= 0 && newCapacity <= buf.maxCapacity();
|
2012-12-05 22:11:48 +01:00
|
|
|
|
|
|
|
int oldCapacity = buf.length;
|
|
|
|
if (oldCapacity == newCapacity) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
PoolChunk<T> oldChunk = buf.chunk;
|
2018-12-04 15:26:05 +01:00
|
|
|
ByteBuffer oldNioBuffer = buf.tmpNioBuf;
|
2012-12-05 22:11:48 +01:00
|
|
|
long oldHandle = buf.handle;
|
|
|
|
T oldMemory = buf.memory;
|
|
|
|
int oldOffset = buf.offset;
|
2014-03-01 15:47:03 +01:00
|
|
|
int oldMaxLength = buf.maxLength;
|
2012-12-05 22:11:48 +01:00
|
|
|
|
2019-08-16 08:18:09 +02:00
|
|
|
// This does not touch buf's reader/writer indices
|
2015-05-25 21:00:24 +02:00
|
|
|
allocate(parent.threadCache(), buf, newCapacity);
|
2019-08-16 08:18:09 +02:00
|
|
|
int bytesToCopy;
|
2012-12-05 22:11:48 +01:00
|
|
|
if (newCapacity > oldCapacity) {
|
2019-08-16 08:18:09 +02:00
|
|
|
bytesToCopy = oldCapacity;
|
|
|
|
} else {
|
|
|
|
buf.trimIndicesToCapacity(newCapacity);
|
|
|
|
bytesToCopy = newCapacity;
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
2019-11-16 20:26:02 +01:00
|
|
|
memoryCopy(oldMemory, oldOffset, buf, bytesToCopy);
|
2012-12-05 22:11:48 +01:00
|
|
|
if (freeOldMemory) {
|
2018-12-04 15:26:05 +01:00
|
|
|
free(oldChunk, oldNioBuffer, oldHandle, oldMaxLength, buf.cache);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-14 17:25:43 +01:00
|
|
|
@Override
|
|
|
|
public int numThreadCaches() {
|
|
|
|
return numThreadCaches.get();
|
|
|
|
}
|
|
|
|
|
2015-05-13 17:15:06 +02:00
|
|
|
@Override
|
|
|
|
public int numTinySubpages() {
|
2020-07-15 21:33:27 +02:00
|
|
|
return 0;
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int numSmallSubpages() {
|
|
|
|
return smallSubpagePools.length;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int numChunkLists() {
|
|
|
|
return chunkListMetrics.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<PoolSubpageMetric> tinySubpages() {
|
2020-07-15 21:33:27 +02:00
|
|
|
return Collections.emptyList();
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<PoolSubpageMetric> smallSubpages() {
|
|
|
|
return subPageMetricList(smallSubpagePools);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<PoolChunkListMetric> chunkLists() {
|
|
|
|
return chunkListMetrics;
|
|
|
|
}
|
|
|
|
|
|
|
|
private static List<PoolSubpageMetric> subPageMetricList(PoolSubpage<?>[] pages) {
|
2019-01-22 13:53:28 +01:00
|
|
|
List<PoolSubpageMetric> metrics = new ArrayList<>();
|
2017-02-04 00:08:46 +01:00
|
|
|
for (PoolSubpage<?> head : pages) {
|
2015-05-13 17:15:06 +02:00
|
|
|
if (head.next == head) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
PoolSubpage<?> s = head.next;
|
2019-01-23 14:01:48 +01:00
|
|
|
do {
|
2015-05-13 17:15:06 +02:00
|
|
|
metrics.add(s);
|
|
|
|
s = s.next;
|
2019-01-23 14:01:48 +01:00
|
|
|
} while (s != head);
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
return metrics;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numAllocations() {
|
2016-03-14 09:09:24 +01:00
|
|
|
final long allocsNormal;
|
|
|
|
synchronized (this) {
|
|
|
|
allocsNormal = allocationsNormal;
|
|
|
|
}
|
2020-07-15 21:33:27 +02:00
|
|
|
|
|
|
|
return allocationsSmall.longValue() + allocsNormal + allocationsHuge.longValue();
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numTinyAllocations() {
|
2020-07-15 21:33:27 +02:00
|
|
|
return 0;
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numSmallAllocations() {
|
2019-01-22 13:53:28 +01:00
|
|
|
return allocationsSmall.longValue();
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-03-14 09:09:24 +01:00
|
|
|
public synchronized long numNormalAllocations() {
|
2015-05-13 17:15:06 +02:00
|
|
|
return allocationsNormal;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numDeallocations() {
|
2016-03-14 09:09:24 +01:00
|
|
|
final long deallocs;
|
|
|
|
synchronized (this) {
|
2020-07-15 21:33:27 +02:00
|
|
|
deallocs = deallocationsSmall + deallocationsNormal;
|
2016-03-14 09:09:24 +01:00
|
|
|
}
|
2019-01-22 13:53:28 +01:00
|
|
|
return deallocs + deallocationsHuge.longValue();
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2020-07-15 21:33:27 +02:00
|
|
|
public long numTinyDeallocations() {
|
|
|
|
return 0;
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-03-14 09:09:24 +01:00
|
|
|
public synchronized long numSmallDeallocations() {
|
2015-05-13 17:15:06 +02:00
|
|
|
return deallocationsSmall;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-03-14 09:09:24 +01:00
|
|
|
public synchronized long numNormalDeallocations() {
|
2015-05-13 17:15:06 +02:00
|
|
|
return deallocationsNormal;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numHugeAllocations() {
|
2019-01-22 13:53:28 +01:00
|
|
|
return allocationsHuge.longValue();
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numHugeDeallocations() {
|
2019-01-22 13:53:28 +01:00
|
|
|
return deallocationsHuge.longValue();
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-03-14 09:09:24 +01:00
|
|
|
public long numActiveAllocations() {
|
2020-07-15 21:33:27 +02:00
|
|
|
|
|
|
|
long val = allocationsSmall.longValue() + allocationsHuge.longValue()
|
2019-01-22 13:53:28 +01:00
|
|
|
- deallocationsHuge.longValue();
|
2016-03-14 09:09:24 +01:00
|
|
|
synchronized (this) {
|
2020-07-15 21:33:27 +02:00
|
|
|
val += allocationsNormal - (deallocationsSmall + deallocationsNormal);
|
2016-03-14 09:09:24 +01:00
|
|
|
}
|
2016-04-08 11:56:32 +02:00
|
|
|
return max(val, 0);
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numActiveTinyAllocations() {
|
2020-07-15 21:33:27 +02:00
|
|
|
return 0;
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numActiveSmallAllocations() {
|
2016-04-08 11:56:32 +02:00
|
|
|
return max(numSmallAllocations() - numSmallDeallocations(), 0);
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numActiveNormalAllocations() {
|
2016-03-14 09:09:24 +01:00
|
|
|
final long val;
|
|
|
|
synchronized (this) {
|
|
|
|
val = allocationsNormal - deallocationsNormal;
|
|
|
|
}
|
2016-04-08 11:56:32 +02:00
|
|
|
return max(val, 0);
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public long numActiveHugeAllocations() {
|
2016-04-08 11:56:32 +02:00
|
|
|
return max(numHugeAllocations() - numHugeDeallocations(), 0);
|
2015-05-13 17:15:06 +02:00
|
|
|
}
|
|
|
|
|
2016-04-06 15:12:37 +02:00
|
|
|
@Override
|
|
|
|
public long numActiveBytes() {
|
2019-01-22 13:53:28 +01:00
|
|
|
long val = activeBytesHuge.longValue();
|
2016-04-06 15:12:37 +02:00
|
|
|
synchronized (this) {
|
|
|
|
for (int i = 0; i < chunkListMetrics.size(); i++) {
|
|
|
|
for (PoolChunkMetric m: chunkListMetrics.get(i)) {
|
|
|
|
val += m.chunkSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return max(0, val);
|
|
|
|
}
|
|
|
|
|
2021-09-15 16:22:57 +02:00
|
|
|
/**
|
|
|
|
* Return the number of bytes that are currently pinned to buffer instances, by the arena. The pinned memory is not
|
|
|
|
* accessible for use by any other allocation, until the buffers using have all been released.
|
|
|
|
*/
|
|
|
|
public long numPinnedBytes() {
|
|
|
|
long val = activeBytesHuge.longValue(); // Huge chunks are exact-sized for the buffers they were allocated to.
|
|
|
|
synchronized (this) {
|
|
|
|
for (int i = 0; i < chunkListMetrics.size(); i++) {
|
|
|
|
for (PoolChunkMetric m: chunkListMetrics.get(i)) {
|
|
|
|
val += ((PoolChunk<?>) m).pinnedBytes();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return max(0, val);
|
|
|
|
}
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
protected abstract PoolChunk<T> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize);
|
2012-12-19 09:35:32 +01:00
|
|
|
protected abstract PoolChunk<T> newUnpooledChunk(int capacity);
|
2012-12-05 22:11:48 +01:00
|
|
|
protected abstract PooledByteBuf<T> newByteBuf(int maxCapacity);
|
2019-11-16 20:26:02 +01:00
|
|
|
protected abstract void memoryCopy(T src, int srcOffset, PooledByteBuf<T> dst, int length);
|
2012-12-05 22:11:48 +01:00
|
|
|
protected abstract void destroyChunk(PoolChunk<T> chunk);
|
|
|
|
|
2015-05-13 17:15:06 +02:00
|
|
|
@Override
|
2012-12-05 22:11:48 +01:00
|
|
|
public synchronized String toString() {
|
2014-11-08 23:46:30 +01:00
|
|
|
StringBuilder buf = new StringBuilder()
|
|
|
|
.append("Chunk(s) at 0~25%:")
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append(qInit)
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append("Chunk(s) at 0~50%:")
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append(q000)
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append("Chunk(s) at 25~75%:")
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append(q025)
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append("Chunk(s) at 50~100%:")
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append(q050)
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append("Chunk(s) at 75~100%:")
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append(q075)
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append("Chunk(s) at 100%:")
|
|
|
|
.append(StringUtil.NEWLINE)
|
|
|
|
.append(q100)
|
|
|
|
.append(StringUtil.NEWLINE)
|
2020-07-15 21:33:27 +02:00
|
|
|
.append("small subpages:");
|
2016-07-10 12:54:30 +02:00
|
|
|
appendPoolSubPages(buf, smallSubpagePools);
|
|
|
|
buf.append(StringUtil.NEWLINE);
|
|
|
|
|
|
|
|
return buf.toString();
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void appendPoolSubPages(StringBuilder buf, PoolSubpage<?>[] subpages) {
|
|
|
|
for (int i = 0; i < subpages.length; i ++) {
|
|
|
|
PoolSubpage<?> head = subpages[i];
|
2013-03-05 15:55:41 +01:00
|
|
|
if (head.next == head) {
|
2012-12-05 22:11:48 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-11-08 23:46:30 +01:00
|
|
|
buf.append(StringUtil.NEWLINE)
|
2016-07-10 12:54:30 +02:00
|
|
|
.append(i)
|
|
|
|
.append(": ");
|
|
|
|
PoolSubpage<?> s = head.next;
|
2019-01-23 14:01:48 +01:00
|
|
|
do {
|
2013-03-05 15:55:41 +01:00
|
|
|
buf.append(s);
|
|
|
|
s = s.next;
|
2019-01-23 14:01:48 +01:00
|
|
|
} while (s != head);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-17 04:43:51 +02:00
|
|
|
@Override
|
|
|
|
protected final void finalize() throws Throwable {
|
|
|
|
try {
|
|
|
|
super.finalize();
|
|
|
|
} finally {
|
|
|
|
destroyPoolSubPages(smallSubpagePools);
|
|
|
|
destroyPoolChunkLists(qInit, q000, q025, q050, q075, q100);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void destroyPoolSubPages(PoolSubpage<?>[] pages) {
|
|
|
|
for (PoolSubpage<?> page : pages) {
|
|
|
|
page.destroy();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private void destroyPoolChunkLists(PoolChunkList<T>... chunkLists) {
|
|
|
|
for (PoolChunkList<T> chunkList: chunkLists) {
|
|
|
|
chunkList.destroy(this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
static final class HeapArena extends PoolArena<byte[]> {
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
HeapArena(PooledByteBufAllocator parent, int pageSize, int pageShifts,
|
|
|
|
int chunkSize, int directMemoryCacheAlignment) {
|
|
|
|
super(parent, pageSize, pageShifts, chunkSize,
|
|
|
|
directMemoryCacheAlignment);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2017-03-26 04:06:31 +02:00
|
|
|
private static byte[] newByteArray(int size) {
|
|
|
|
return PlatformDependent.allocateUninitializedArray(size);
|
|
|
|
}
|
|
|
|
|
2014-03-01 15:47:03 +01:00
|
|
|
@Override
|
|
|
|
boolean isDirect() {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
@Override
|
2020-07-15 21:33:27 +02:00
|
|
|
protected PoolChunk<byte[]> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
return new PoolChunk<>(
|
|
|
|
this, null, newByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2012-12-19 09:35:32 +01:00
|
|
|
@Override
|
|
|
|
protected PoolChunk<byte[]> newUnpooledChunk(int capacity) {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
return new PoolChunk<>(this, null, newByteArray(capacity), capacity);
|
2012-12-19 09:35:32 +01:00
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
@Override
|
|
|
|
protected void destroyChunk(PoolChunk<byte[]> chunk) {
|
|
|
|
// Rely on GC.
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
protected PooledByteBuf<byte[]> newByteBuf(int maxCapacity) {
|
2015-10-09 21:03:03 +02:00
|
|
|
return HAS_UNSAFE ? PooledUnsafeHeapByteBuf.newUnsafeInstance(maxCapacity)
|
|
|
|
: PooledHeapByteBuf.newInstance(maxCapacity);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2019-11-16 20:26:02 +01:00
|
|
|
protected void memoryCopy(byte[] src, int srcOffset, PooledByteBuf<byte[]> dst, int length) {
|
2012-12-05 22:11:48 +01:00
|
|
|
if (length == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-16 20:26:02 +01:00
|
|
|
System.arraycopy(src, srcOffset, dst.memory, dst.offset, length);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static final class DirectArena extends PoolArena<ByteBuffer> {
|
|
|
|
|
2020-07-15 21:33:27 +02:00
|
|
|
DirectArena(PooledByteBufAllocator parent, int pageSize, int pageShifts,
|
|
|
|
int chunkSize, int directMemoryCacheAlignment) {
|
|
|
|
super(parent, pageSize, pageShifts, chunkSize,
|
|
|
|
directMemoryCacheAlignment);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2014-03-01 15:47:03 +01:00
|
|
|
@Override
|
|
|
|
boolean isDirect() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
@Override
|
2020-07-15 21:33:27 +02:00
|
|
|
protected PoolChunk<ByteBuffer> newChunk(int pageSize, int maxPageIdx,
|
|
|
|
int pageShifts, int chunkSize) {
|
2017-01-29 22:26:40 +01:00
|
|
|
if (directMemoryCacheAlignment == 0) {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
ByteBuffer memory = allocateDirect(chunkSize);
|
|
|
|
return new PoolChunk<>(this, memory, memory, pageSize, pageShifts,
|
|
|
|
chunkSize, maxPageIdx);
|
2017-01-29 22:26:40 +01:00
|
|
|
}
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
|
|
|
|
final ByteBuffer base = allocateDirect(chunkSize + directMemoryCacheAlignment);
|
|
|
|
final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment);
|
|
|
|
return new PoolChunk<>(this, base, memory, pageSize,
|
|
|
|
pageShifts, chunkSize, maxPageIdx);
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
2012-12-19 09:35:32 +01:00
|
|
|
@Override
|
|
|
|
protected PoolChunk<ByteBuffer> newUnpooledChunk(int capacity) {
|
2017-01-29 22:26:40 +01:00
|
|
|
if (directMemoryCacheAlignment == 0) {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
ByteBuffer memory = allocateDirect(capacity);
|
|
|
|
return new PoolChunk<>(this, memory, memory, capacity);
|
2017-01-29 22:26:40 +01:00
|
|
|
}
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
|
|
|
|
final ByteBuffer base = allocateDirect(capacity + directMemoryCacheAlignment);
|
|
|
|
final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment);
|
|
|
|
return new PoolChunk<>(this, base, memory, capacity);
|
2016-05-23 11:59:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
private static ByteBuffer allocateDirect(int capacity) {
|
|
|
|
return PlatformDependent.useDirectBufferNoCleaner() ?
|
|
|
|
PlatformDependent.allocateDirectNoCleaner(capacity) : ByteBuffer.allocateDirect(capacity);
|
2012-12-19 09:35:32 +01:00
|
|
|
}
|
|
|
|
|
2012-12-05 22:11:48 +01:00
|
|
|
@Override
|
|
|
|
protected void destroyChunk(PoolChunk<ByteBuffer> chunk) {
|
2016-05-23 11:59:55 +02:00
|
|
|
if (PlatformDependent.useDirectBufferNoCleaner()) {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
PlatformDependent.freeDirectNoCleaner((ByteBuffer) chunk.base);
|
2016-05-23 11:59:55 +02:00
|
|
|
} else {
|
Fix alignment handling for pooled direct buffers (#11106)
Motivation:
Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset.
The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101.
Modification:
Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address.
Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment.
In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure.
The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned.
This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms.
Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception.
Luckily, I think use of aligned allocations are rare.
Result:
Aligned pooled byte bufs now work correctly, and never have any overlap.
Fixes #11101
2021-03-23 17:07:06 +01:00
|
|
|
PlatformDependent.freeDirectBuffer((ByteBuffer) chunk.base);
|
2016-05-23 11:59:55 +02:00
|
|
|
}
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
protected PooledByteBuf<ByteBuffer> newByteBuf(int maxCapacity) {
|
2013-03-05 07:25:25 +01:00
|
|
|
if (HAS_UNSAFE) {
|
2013-06-10 12:52:56 +02:00
|
|
|
return PooledUnsafeDirectByteBuf.newInstance(maxCapacity);
|
2013-01-10 10:27:16 +01:00
|
|
|
} else {
|
2013-06-10 12:52:56 +02:00
|
|
|
return PooledDirectByteBuf.newInstance(maxCapacity);
|
2013-01-10 10:27:16 +01:00
|
|
|
}
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2019-11-16 20:26:02 +01:00
|
|
|
protected void memoryCopy(ByteBuffer src, int srcOffset, PooledByteBuf<ByteBuffer> dstBuf, int length) {
|
2012-12-05 22:11:48 +01:00
|
|
|
if (length == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-03-05 07:25:25 +01:00
|
|
|
if (HAS_UNSAFE) {
|
2013-01-10 10:27:16 +01:00
|
|
|
PlatformDependent.copyMemory(
|
|
|
|
PlatformDependent.directBufferAddress(src) + srcOffset,
|
2019-11-16 20:26:02 +01:00
|
|
|
PlatformDependent.directBufferAddress(dstBuf.memory) + dstBuf.offset, length);
|
2013-01-10 10:27:16 +01:00
|
|
|
} else {
|
|
|
|
// We must duplicate the NIO buffers because they may be accessed by other Netty buffers.
|
|
|
|
src = src.duplicate();
|
2019-11-16 20:26:02 +01:00
|
|
|
ByteBuffer dst = dstBuf.internalNioBuffer();
|
2013-01-10 10:27:16 +01:00
|
|
|
src.position(srcOffset).limit(srcOffset + length);
|
2019-11-16 20:26:02 +01:00
|
|
|
dst.position(dstBuf.offset);
|
2013-01-10 10:27:16 +01:00
|
|
|
dst.put(src);
|
|
|
|
}
|
2012-12-05 22:11:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|