netty5/buffer/src/main/java/io/net5/buffer/PooledByteBuf.java

267 lines
8.0 KiB
Java
Raw Normal View History

/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
2021-09-17 16:28:14 +02:00
package io.net5.buffer;
2021-09-17 16:28:14 +02:00
import io.net5.util.internal.ObjectPool.Handle;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
abstract class PooledByteBuf<T> extends AbstractReferenceCountedByteBuf {
private final Handle<PooledByteBuf<T>> recyclerHandle;
protected PoolChunk<T> chunk;
protected long handle;
protected T memory;
protected int offset;
protected int length;
int maxLength;
PoolThreadCache cache;
ByteBuffer tmpNioBuf;
private ByteBufAllocator allocator;
@SuppressWarnings("unchecked")
protected PooledByteBuf(Handle<? extends PooledByteBuf<T>> recyclerHandle, int maxCapacity) {
super(maxCapacity);
this.recyclerHandle = (Handle<PooledByteBuf<T>>) recyclerHandle;
}
void init(PoolChunk<T> chunk, ByteBuffer nioBuffer,
long handle, int offset, int length, int maxLength, PoolThreadCache cache) {
init0(chunk, nioBuffer, handle, offset, length, maxLength, cache);
}
void initUnpooled(PoolChunk<T> chunk, int length) {
Fix alignment handling for pooled direct buffers (#11106) Motivation: Alignment handling was broken, and basically turned into a fixed offset into each allocation address regardless of its initial value, instead of ensuring that the allocated address is either aligned or bumped to the nearest alignment offset. The brokenness of the alignment handling extended so far, that overlapping ByteBuf instances could even be created, as was seen in #11101. Modification: Instead of fixing the per-allocation pointer bump, we now ensure that 1) the minimum page size is a whole multiple of the alignment, and 2) the reference memory for each chunk is bumped to the nearest aligned address, and finally 3) ensured that the reservations are whole multiples of the alignment, thus ensuring that the next allocation automatically occurs from an aligned address. Incidentally, (3) above comes for free because the reservations are in whole pages, and in (1) we ensured that pages are sized in whole multiples of the alignment. In order to ensure that the memory for a chunk is aligned, we introduce some new PlatformDependent infrastructure. The PlatformDependent.alignDirectBuffer will produce a slice of the given buffer, and the slice will have an address that is aligned. This method is plainly available on ByteBuffer in Java 9 onwards, but for pre-9 we have to use Unsafe, which means it can fail and might not be available on all platforms. Attempts to create a PooledByteBufAllocator that uses alignment, when this is not supported, will throw an exception. Luckily, I think use of aligned allocations are rare. Result: Aligned pooled byte bufs now work correctly, and never have any overlap. Fixes #11101
2021-03-23 17:07:06 +01:00
init0(chunk, null, 0, 0, length, length, null);
}
private void init0(PoolChunk<T> chunk, ByteBuffer nioBuffer,
long handle, int offset, int length, int maxLength, PoolThreadCache cache) {
assert handle >= 0;
assert chunk != null;
this.chunk = chunk;
memory = chunk.memory;
tmpNioBuf = nioBuffer;
allocator = chunk.arena.parent;
this.cache = cache;
this.handle = handle;
this.offset = offset;
this.length = length;
this.maxLength = maxLength;
}
/**
* Method must be called before reuse this {@link PooledByteBufAllocator}
*/
final void reuse(int maxCapacity) {
maxCapacity(maxCapacity);
resetRefCnt();
setIndex0(0, 0);
}
@Override
public final int capacity() {
return length;
}
@Override
public int maxFastWritableBytes() {
return Math.min(maxLength, maxCapacity()) - writerIndex;
}
@Override
public final ByteBuf capacity(int newCapacity) {
if (newCapacity == length) {
ensureAccessible();
return this;
}
checkNewCapacity(newCapacity);
if (!chunk.unpooled) {
// If the request capacity does not require reallocation, just update the length of the memory.
if (newCapacity > length) {
if (newCapacity <= maxLength) {
length = newCapacity;
return this;
}
} else if (newCapacity > maxLength >>> 1 &&
(maxLength > 512 || newCapacity > maxLength - 16)) {
// here newCapacity < length
length = newCapacity;
trimIndicesToCapacity(newCapacity);
return this;
}
}
// Reallocation required.
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
chunk.arena.reallocate(this, newCapacity, true);
return this;
}
@Override
public final ByteBufAllocator alloc() {
return allocator;
}
@Override
public final ByteOrder order() {
return ByteOrder.BIG_ENDIAN;
}
@Override
public final ByteBuf unwrap() {
return null;
}
@Override
public final ByteBuf retainedDuplicate() {
return PooledDuplicatedByteBuf.newInstance(this, this, readerIndex(), writerIndex());
}
@Override
public final ByteBuf retainedSlice() {
final int index = readerIndex();
return retainedSlice(index, writerIndex() - index);
}
@Override
public final ByteBuf retainedSlice(int index, int length) {
return PooledSlicedByteBuf.newInstance(this, this, index, length);
}
protected final ByteBuffer internalNioBuffer() {
ByteBuffer tmpNioBuf = this.tmpNioBuf;
if (tmpNioBuf == null) {
this.tmpNioBuf = tmpNioBuf = newInternalNioBuffer(memory);
} else {
tmpNioBuf.clear();
}
return tmpNioBuf;
}
protected abstract ByteBuffer newInternalNioBuffer(T memory);
@Override
protected final void deallocate() {
if (handle >= 0) {
final long handle = this.handle;
this.handle = -1;
memory = null;
chunk.arena.free(chunk, tmpNioBuf, handle, maxLength, cache);
tmpNioBuf = null;
chunk = null;
recycle();
}
}
private void recycle() {
recyclerHandle.recycle(this);
}
protected final int idx(int index) {
return offset + index;
}
final ByteBuffer _internalNioBuffer(int index, int length, boolean duplicate) {
index = idx(index);
ByteBuffer buffer = duplicate ? newInternalNioBuffer(memory) : internalNioBuffer();
buffer.limit(index + length).position(index);
return buffer;
}
ByteBuffer duplicateInternalNioBuffer(int index, int length) {
checkIndex(index, length);
return _internalNioBuffer(index, length, true);
}
@Override
public final ByteBuffer internalNioBuffer(int index, int length) {
checkIndex(index, length);
return _internalNioBuffer(index, length, false);
}
@Override
public final int nioBufferCount() {
return 1;
}
@Override
public final ByteBuffer nioBuffer(int index, int length) {
return duplicateInternalNioBuffer(index, length).slice();
}
@Override
public final ByteBuffer[] nioBuffers(int index, int length) {
return new ByteBuffer[] { nioBuffer(index, length) };
}
@Override
public final boolean isContiguous() {
return true;
}
@Override
public final int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
return out.write(duplicateInternalNioBuffer(index, length));
}
@Override
public final int readBytes(GatheringByteChannel out, int length) throws IOException {
checkReadableBytes(length);
int readBytes = out.write(_internalNioBuffer(readerIndex, length, false));
readerIndex += readBytes;
return readBytes;
}
@Override
public final int getBytes(int index, FileChannel out, long position, int length) throws IOException {
return out.write(duplicateInternalNioBuffer(index, length), position);
}
@Override
public final int readBytes(FileChannel out, long position, int length) throws IOException {
checkReadableBytes(length);
int readBytes = out.write(_internalNioBuffer(readerIndex, length, false), position);
readerIndex += readBytes;
return readBytes;
}
@Override
public final int setBytes(int index, ScatteringByteChannel in, int length) throws IOException {
try {
return in.read(internalNioBuffer(index, length));
} catch (ClosedChannelException ignored) {
return -1;
}
}
@Override
public final int setBytes(int index, FileChannel in, long position, int length) throws IOException {
try {
return in.read(internalNioBuffer(index, length), position);
} catch (ClosedChannelException ignored) {
return -1;
}
}
}