netty5/transport/src/main/java/io/netty/channel/DefaultChannelHandlerContext.java

546 lines
19 KiB
Java
Raw Normal View History

2012-06-04 13:31:44 -07:00
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
2013-07-12 20:41:29 +09:00
*
2012-06-04 13:31:44 -07:00
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel;
ByteBufAllocator API w/ ByteBuf perf improvements This commit introduces a new API for ByteBuf allocation which fixes issue #643 along with refactoring of ByteBuf for simplicity and better performance. (see #62) A user can configure the ByteBufAllocator of a Channel via ChannelOption.ALLOCATOR or ChannelConfig.get/setAllocator(). The default allocator is currently UnpooledByteBufAllocator.HEAP_BY_DEFAULT. To allocate a buffer, do not use Unpooled anymore. do the following: ctx.alloc().buffer(...); // allocator chooses the buffer type. ctx.alloc().heapBuffer(...); ctx.alloc().directBuffer(...); To deallocate a buffer, use the unsafe free() operation: ((UnsafeByteBuf) buf).free(); The following is the list of the relevant changes: - Add ChannelInboundHandler.freeInboundBuffer() and ChannelOutboundHandler.freeOutboundBuffer() to let a user free the buffer he or she allocated. ChannelHandler adapter classes implement is already, so most users won't need to call free() by themselves. freeIn/OutboundBuffer() methods are invoked when a Channel is closed and deregistered. - All ByteBuf by contract must implement UnsafeByteBuf. To access an unsafe operation: ((UnsafeByteBuf) buf).internalNioBuffer() - Replace WrappedByteBuf and ByteBuf.Unsafe with UnsafeByteBuf to simplify overall class hierarchy and to avoid unnecesary instantiation of Unsafe instances on an unsafe operation. - Remove buffer reference counting which is confusing - Instantiate SwappedByteBuf lazily to avoid instantiation cost - Rename ChannelFutureFactory to ChannelPropertyAccess and move common methods between Channel and ChannelHandlerContext there. Also made it package-private to hide it from a user. - Remove unused unsafe operations such as newBuffer() - Add DetectionUtil.canFreeDirectBuffer() so that an allocator decides which buffer type to use safely
2012-11-16 06:04:37 +09:00
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.ChannelHandler.Skip;
import io.netty.util.Attribute;
import io.netty.util.AttributeKey;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.ResourceLeakHint;
import io.netty.util.concurrent.EventExecutor;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.StringUtil;
import java.net.SocketAddress;
import java.util.WeakHashMap;
final class DefaultChannelHandlerContext implements ChannelHandlerContext, ResourceLeakHint {
// This class keeps an integer member field 'skipFlags' whose each bit tells if the corresponding handler method
// is annotated with @Skip. 'skipFlags' is retrieved in runtime via the reflection API and is cached.
// The following constants signify which bit of 'skipFlags' corresponds to which handler method:
static final int MASK_HANDLER_ADDED = 1;
static final int MASK_HANDLER_REMOVED = 1 << 1;
private static final int MASK_EXCEPTION_CAUGHT = 1 << 2;
private static final int MASK_CHANNEL_REGISTERED = 1 << 3;
private static final int MASK_CHANNEL_UNREGISTERED = 1 << 4;
private static final int MASK_CHANNEL_ACTIVE = 1 << 5;
private static final int MASK_CHANNEL_INACTIVE = 1 << 6;
private static final int MASK_CHANNEL_READ = 1 << 7;
private static final int MASK_CHANNEL_READ_COMPLETE = 1 << 8;
private static final int MASK_CHANNEL_WRITABILITY_CHANGED = 1 << 9;
private static final int MASK_USER_EVENT_TRIGGERED = 1 << 10;
private static final int MASK_BIND = 1 << 11;
private static final int MASK_CONNECT = 1 << 12;
private static final int MASK_DISCONNECT = 1 << 13;
private static final int MASK_CLOSE = 1 << 14;
private static final int MASK_DEREGISTER = 1 << 15;
private static final int MASK_READ = 1 << 16;
private static final int MASK_WRITE = 1 << 17;
private static final int MASK_FLUSH = 1 << 18;
/**
* Cache the result of the costly generation of {@link #skipFlags} in the partitioned synchronized
* {@link WeakHashMap}.
*/
@SuppressWarnings("unchecked")
private static final WeakHashMap<Class<?>, Integer>[] skipFlagsCache =
new WeakHashMap[Runtime.getRuntime().availableProcessors()];
static {
for (int i = 0; i < skipFlagsCache.length; i ++) {
skipFlagsCache[i] = new WeakHashMap<Class<?>, Integer>();
}
}
/**
* Returns an integer bitset that tells which handler methods were annotated with {@link Skip}.
* It gets the value from {@link #skipFlagsCache} if an handler of the same type were queried before.
* Otherwise, it delegates to {@link #skipFlags0(Class)} to get it.
*/
private static int skipFlags(ChannelHandler handler) {
WeakHashMap<Class<?>, Integer> cache =
skipFlagsCache[(int) (Thread.currentThread().getId() % skipFlagsCache.length)];
Class<? extends ChannelHandler> handlerType = handler.getClass();
int flagsVal;
synchronized (cache) {
Integer flags = cache.get(handlerType);
if (flags != null) {
flagsVal = flags;
} else {
flagsVal = skipFlags0(handlerType);
cache.put(handlerType, Integer.valueOf(flagsVal));
}
}
return flagsVal;
}
/**
* Determines the {@link #skipFlags} of the specified {@code handlerType} using the reflection API.
*/
private static int skipFlags0(Class<? extends ChannelHandler> handlerType) {
int flags = 0;
try {
if (handlerType.getMethod(
"handlerAdded", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_HANDLER_ADDED;
}
if (handlerType.getMethod(
"handlerRemoved", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_HANDLER_REMOVED;
}
if (handlerType.getMethod(
"exceptionCaught", ChannelHandlerContext.class, Throwable.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_EXCEPTION_CAUGHT;
}
if (handlerType.getMethod(
"channelRegistered", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CHANNEL_REGISTERED;
}
if (handlerType.getMethod(
"channelUnregistered", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CHANNEL_UNREGISTERED;
}
if (handlerType.getMethod(
"channelActive", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CHANNEL_ACTIVE;
}
if (handlerType.getMethod(
"channelInactive", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CHANNEL_INACTIVE;
}
if (handlerType.getMethod(
"channelRead", ChannelHandlerContext.class, Object.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CHANNEL_READ;
}
if (handlerType.getMethod(
"channelReadComplete", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CHANNEL_READ_COMPLETE;
}
if (handlerType.getMethod(
"channelWritabilityChanged", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CHANNEL_WRITABILITY_CHANGED;
}
if (handlerType.getMethod(
"userEventTriggered", ChannelHandlerContext.class, Object.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_USER_EVENT_TRIGGERED;
}
if (handlerType.getMethod(
"bind", ChannelHandlerContext.class,
SocketAddress.class, ChannelPromise.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_BIND;
}
if (handlerType.getMethod(
"connect", ChannelHandlerContext.class, SocketAddress.class, SocketAddress.class,
ChannelPromise.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CONNECT;
}
if (handlerType.getMethod(
"disconnect", ChannelHandlerContext.class, ChannelPromise.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_DISCONNECT;
}
if (handlerType.getMethod(
"close", ChannelHandlerContext.class, ChannelPromise.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_CLOSE;
}
if (handlerType.getMethod(
"deregister", ChannelHandlerContext.class, ChannelPromise.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_DEREGISTER;
}
if (handlerType.getMethod(
"read", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_READ;
}
if (handlerType.getMethod(
"write", ChannelHandlerContext.class,
Object.class, ChannelPromise.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_WRITE;
// flush() is skipped only when write() is also skipped to avoid the situation where
// flush() is handled by the event loop before write() in staged execution.
if (handlerType.getMethod(
"flush", ChannelHandlerContext.class).isAnnotationPresent(Skip.class)) {
flags |= MASK_FLUSH;
}
}
} catch (Exception e) {
// Should never reach here.
PlatformDependent.throwException(e);
}
return flags;
}
volatile DefaultChannelHandlerContext next;
volatile DefaultChannelHandlerContext prev;
ByteBufAllocator API w/ ByteBuf perf improvements This commit introduces a new API for ByteBuf allocation which fixes issue #643 along with refactoring of ByteBuf for simplicity and better performance. (see #62) A user can configure the ByteBufAllocator of a Channel via ChannelOption.ALLOCATOR or ChannelConfig.get/setAllocator(). The default allocator is currently UnpooledByteBufAllocator.HEAP_BY_DEFAULT. To allocate a buffer, do not use Unpooled anymore. do the following: ctx.alloc().buffer(...); // allocator chooses the buffer type. ctx.alloc().heapBuffer(...); ctx.alloc().directBuffer(...); To deallocate a buffer, use the unsafe free() operation: ((UnsafeByteBuf) buf).free(); The following is the list of the relevant changes: - Add ChannelInboundHandler.freeInboundBuffer() and ChannelOutboundHandler.freeOutboundBuffer() to let a user free the buffer he or she allocated. ChannelHandler adapter classes implement is already, so most users won't need to call free() by themselves. freeIn/OutboundBuffer() methods are invoked when a Channel is closed and deregistered. - All ByteBuf by contract must implement UnsafeByteBuf. To access an unsafe operation: ((UnsafeByteBuf) buf).internalNioBuffer() - Replace WrappedByteBuf and ByteBuf.Unsafe with UnsafeByteBuf to simplify overall class hierarchy and to avoid unnecesary instantiation of Unsafe instances on an unsafe operation. - Remove buffer reference counting which is confusing - Instantiate SwappedByteBuf lazily to avoid instantiation cost - Rename ChannelFutureFactory to ChannelPropertyAccess and move common methods between Channel and ChannelHandlerContext there. Also made it package-private to hide it from a user. - Remove unused unsafe operations such as newBuffer() - Add DetectionUtil.canFreeDirectBuffer() so that an allocator decides which buffer type to use safely
2012-11-16 06:04:37 +09:00
private final AbstractChannel channel;
private final DefaultChannelPipeline pipeline;
private final String name;
private final ChannelHandler handler;
private boolean removed;
final int skipFlags;
// Will be set to null if no child executor should be used, otherwise it will be set to the
// child executor.
final ChannelHandlerInvoker invoker;
private ChannelFuture succeededFuture;
ByteBufAllocator API w/ ByteBuf perf improvements This commit introduces a new API for ByteBuf allocation which fixes issue #643 along with refactoring of ByteBuf for simplicity and better performance. (see #62) A user can configure the ByteBufAllocator of a Channel via ChannelOption.ALLOCATOR or ChannelConfig.get/setAllocator(). The default allocator is currently UnpooledByteBufAllocator.HEAP_BY_DEFAULT. To allocate a buffer, do not use Unpooled anymore. do the following: ctx.alloc().buffer(...); // allocator chooses the buffer type. ctx.alloc().heapBuffer(...); ctx.alloc().directBuffer(...); To deallocate a buffer, use the unsafe free() operation: ((UnsafeByteBuf) buf).free(); The following is the list of the relevant changes: - Add ChannelInboundHandler.freeInboundBuffer() and ChannelOutboundHandler.freeOutboundBuffer() to let a user free the buffer he or she allocated. ChannelHandler adapter classes implement is already, so most users won't need to call free() by themselves. freeIn/OutboundBuffer() methods are invoked when a Channel is closed and deregistered. - All ByteBuf by contract must implement UnsafeByteBuf. To access an unsafe operation: ((UnsafeByteBuf) buf).internalNioBuffer() - Replace WrappedByteBuf and ByteBuf.Unsafe with UnsafeByteBuf to simplify overall class hierarchy and to avoid unnecesary instantiation of Unsafe instances on an unsafe operation. - Remove buffer reference counting which is confusing - Instantiate SwappedByteBuf lazily to avoid instantiation cost - Rename ChannelFutureFactory to ChannelPropertyAccess and move common methods between Channel and ChannelHandlerContext there. Also made it package-private to hide it from a user. - Remove unused unsafe operations such as newBuffer() - Add DetectionUtil.canFreeDirectBuffer() so that an allocator decides which buffer type to use safely
2012-11-16 06:04:37 +09:00
// Lazily instantiated tasks used to trigger events to a handler with different executor.
// These needs to be volatile as otherwise an other Thread may see an half initialized instance.
// See the JMM for more details
volatile Runnable invokeChannelReadCompleteTask;
volatile Runnable invokeReadTask;
volatile Runnable invokeFlushTask;
volatile Runnable invokeChannelWritableStateChangedTask;
ByteBufAllocator API w/ ByteBuf perf improvements This commit introduces a new API for ByteBuf allocation which fixes issue #643 along with refactoring of ByteBuf for simplicity and better performance. (see #62) A user can configure the ByteBufAllocator of a Channel via ChannelOption.ALLOCATOR or ChannelConfig.get/setAllocator(). The default allocator is currently UnpooledByteBufAllocator.HEAP_BY_DEFAULT. To allocate a buffer, do not use Unpooled anymore. do the following: ctx.alloc().buffer(...); // allocator chooses the buffer type. ctx.alloc().heapBuffer(...); ctx.alloc().directBuffer(...); To deallocate a buffer, use the unsafe free() operation: ((UnsafeByteBuf) buf).free(); The following is the list of the relevant changes: - Add ChannelInboundHandler.freeInboundBuffer() and ChannelOutboundHandler.freeOutboundBuffer() to let a user free the buffer he or she allocated. ChannelHandler adapter classes implement is already, so most users won't need to call free() by themselves. freeIn/OutboundBuffer() methods are invoked when a Channel is closed and deregistered. - All ByteBuf by contract must implement UnsafeByteBuf. To access an unsafe operation: ((UnsafeByteBuf) buf).internalNioBuffer() - Replace WrappedByteBuf and ByteBuf.Unsafe with UnsafeByteBuf to simplify overall class hierarchy and to avoid unnecesary instantiation of Unsafe instances on an unsafe operation. - Remove buffer reference counting which is confusing - Instantiate SwappedByteBuf lazily to avoid instantiation cost - Rename ChannelFutureFactory to ChannelPropertyAccess and move common methods between Channel and ChannelHandlerContext there. Also made it package-private to hide it from a user. - Remove unused unsafe operations such as newBuffer() - Add DetectionUtil.canFreeDirectBuffer() so that an allocator decides which buffer type to use safely
2012-11-16 06:04:37 +09:00
DefaultChannelHandlerContext(
DefaultChannelPipeline pipeline, ChannelHandlerInvoker invoker, String name, ChannelHandler handler) {
if (name == null) {
throw new NullPointerException("name");
}
if (handler == null) {
throw new NullPointerException("handler");
}
2012-06-01 18:34:19 -07:00
channel = pipeline.channel;
this.pipeline = pipeline;
this.name = name;
this.handler = handler;
this.invoker = invoker;
skipFlags = skipFlags(handler);
}
/** Invocation initiated by {@link DefaultChannelPipeline#teardownAll()}}. */
void teardown() {
EventExecutor executor = executor();
if (executor.inEventLoop()) {
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
teardown0();
} else {
executor.execute(new Runnable() {
@Override
public void run() {
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
teardown0();
}
});
}
}
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
private void teardown0() {
DefaultChannelHandlerContext prev = this.prev;
if (prev != null) {
synchronized (pipeline) {
pipeline.remove0(this);
}
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
prev.teardown();
}
}
@Override
public Channel channel() {
2012-06-01 18:34:19 -07:00
return channel;
}
@Override
public ChannelPipeline pipeline() {
return pipeline;
}
ByteBufAllocator API w/ ByteBuf perf improvements This commit introduces a new API for ByteBuf allocation which fixes issue #643 along with refactoring of ByteBuf for simplicity and better performance. (see #62) A user can configure the ByteBufAllocator of a Channel via ChannelOption.ALLOCATOR or ChannelConfig.get/setAllocator(). The default allocator is currently UnpooledByteBufAllocator.HEAP_BY_DEFAULT. To allocate a buffer, do not use Unpooled anymore. do the following: ctx.alloc().buffer(...); // allocator chooses the buffer type. ctx.alloc().heapBuffer(...); ctx.alloc().directBuffer(...); To deallocate a buffer, use the unsafe free() operation: ((UnsafeByteBuf) buf).free(); The following is the list of the relevant changes: - Add ChannelInboundHandler.freeInboundBuffer() and ChannelOutboundHandler.freeOutboundBuffer() to let a user free the buffer he or she allocated. ChannelHandler adapter classes implement is already, so most users won't need to call free() by themselves. freeIn/OutboundBuffer() methods are invoked when a Channel is closed and deregistered. - All ByteBuf by contract must implement UnsafeByteBuf. To access an unsafe operation: ((UnsafeByteBuf) buf).internalNioBuffer() - Replace WrappedByteBuf and ByteBuf.Unsafe with UnsafeByteBuf to simplify overall class hierarchy and to avoid unnecesary instantiation of Unsafe instances on an unsafe operation. - Remove buffer reference counting which is confusing - Instantiate SwappedByteBuf lazily to avoid instantiation cost - Rename ChannelFutureFactory to ChannelPropertyAccess and move common methods between Channel and ChannelHandlerContext there. Also made it package-private to hide it from a user. - Remove unused unsafe operations such as newBuffer() - Add DetectionUtil.canFreeDirectBuffer() so that an allocator decides which buffer type to use safely
2012-11-16 06:04:37 +09:00
@Override
public ByteBufAllocator alloc() {
return channel().config().getAllocator();
ByteBufAllocator API w/ ByteBuf perf improvements This commit introduces a new API for ByteBuf allocation which fixes issue #643 along with refactoring of ByteBuf for simplicity and better performance. (see #62) A user can configure the ByteBufAllocator of a Channel via ChannelOption.ALLOCATOR or ChannelConfig.get/setAllocator(). The default allocator is currently UnpooledByteBufAllocator.HEAP_BY_DEFAULT. To allocate a buffer, do not use Unpooled anymore. do the following: ctx.alloc().buffer(...); // allocator chooses the buffer type. ctx.alloc().heapBuffer(...); ctx.alloc().directBuffer(...); To deallocate a buffer, use the unsafe free() operation: ((UnsafeByteBuf) buf).free(); The following is the list of the relevant changes: - Add ChannelInboundHandler.freeInboundBuffer() and ChannelOutboundHandler.freeOutboundBuffer() to let a user free the buffer he or she allocated. ChannelHandler adapter classes implement is already, so most users won't need to call free() by themselves. freeIn/OutboundBuffer() methods are invoked when a Channel is closed and deregistered. - All ByteBuf by contract must implement UnsafeByteBuf. To access an unsafe operation: ((UnsafeByteBuf) buf).internalNioBuffer() - Replace WrappedByteBuf and ByteBuf.Unsafe with UnsafeByteBuf to simplify overall class hierarchy and to avoid unnecesary instantiation of Unsafe instances on an unsafe operation. - Remove buffer reference counting which is confusing - Instantiate SwappedByteBuf lazily to avoid instantiation cost - Rename ChannelFutureFactory to ChannelPropertyAccess and move common methods between Channel and ChannelHandlerContext there. Also made it package-private to hide it from a user. - Remove unused unsafe operations such as newBuffer() - Add DetectionUtil.canFreeDirectBuffer() so that an allocator decides which buffer type to use safely
2012-11-16 06:04:37 +09:00
}
@Override
public EventExecutor executor() {
return invoker().executor();
}
@Override
public ChannelHandlerInvoker invoker() {
if (invoker == null) {
return channel.unsafe().invoker();
}
return invoker;
}
@Override
public ChannelHandler handler() {
return handler;
}
@Override
public String name() {
return name;
}
@Override
public <T> Attribute<T> attr(AttributeKey<T> key) {
return channel.attr(key);
}
@Override
2013-02-11 09:44:04 +01:00
public ChannelHandlerContext fireChannelRegistered() {
DefaultChannelHandlerContext next = findContextInbound(MASK_CHANNEL_REGISTERED);
next.invoker().invokeChannelRegistered(next);
return this;
}
@Override
public ChannelHandlerContext fireChannelUnregistered() {
DefaultChannelHandlerContext next = findContextInbound(MASK_CHANNEL_UNREGISTERED);
next.invoker().invokeChannelUnregistered(next);
2013-02-11 09:44:04 +01:00
return this;
}
@Override
2013-02-11 09:44:04 +01:00
public ChannelHandlerContext fireChannelActive() {
DefaultChannelHandlerContext next = findContextInbound(MASK_CHANNEL_ACTIVE);
next.invoker().invokeChannelActive(next);
2013-02-11 09:44:04 +01:00
return this;
}
@Override
2013-02-11 09:44:04 +01:00
public ChannelHandlerContext fireChannelInactive() {
DefaultChannelHandlerContext next = findContextInbound(MASK_CHANNEL_INACTIVE);
next.invoker().invokeChannelInactive(next);
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public ChannelHandlerContext fireExceptionCaught(Throwable cause) {
DefaultChannelHandlerContext next = findContextInbound(MASK_EXCEPTION_CAUGHT);
next.invoker().invokeExceptionCaught(next, cause);
return this;
}
@Override
public ChannelHandlerContext fireUserEventTriggered(Object event) {
DefaultChannelHandlerContext next = findContextInbound(MASK_USER_EVENT_TRIGGERED);
next.invoker().invokeUserEventTriggered(next, event);
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public ChannelHandlerContext fireChannelRead(Object msg) {
DefaultChannelHandlerContext next = findContextInbound(MASK_CHANNEL_READ);
ReferenceCountUtil.touch(msg, next);
next.invoker().invokeChannelRead(next, msg);
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public ChannelHandlerContext fireChannelReadComplete() {
DefaultChannelHandlerContext next = findContextInbound(MASK_CHANNEL_READ_COMPLETE);
next.invoker().invokeChannelReadComplete(next);
return this;
}
Read only when requested (read-on-demand) This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not. Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this. This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly. This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false. Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above. There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following: public void read(ChannelHandlerContext ctx) throws Exception { ctx.read(); } Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
@Override
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
public ChannelHandlerContext fireChannelWritabilityChanged() {
DefaultChannelHandlerContext next = findContextInbound(MASK_CHANNEL_WRITABILITY_CHANGED);
next.invoker().invokeChannelWritabilityChanged(next);
2013-02-11 09:44:04 +01:00
return this;
Read only when requested (read-on-demand) This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not. Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this. This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly. This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false. Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above. There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following: public void read(ChannelHandlerContext ctx) throws Exception { ctx.read(); } Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
}
@Override
public ChannelFuture bind(SocketAddress localAddress) {
return bind(localAddress, newPromise());
}
@Override
public ChannelFuture connect(SocketAddress remoteAddress) {
return connect(remoteAddress, newPromise());
}
@Override
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
return connect(remoteAddress, localAddress, newPromise());
}
@Override
public ChannelFuture disconnect() {
return disconnect(newPromise());
}
@Override
public ChannelFuture close() {
return close(newPromise());
}
@Override
public ChannelFuture deregister() {
return deregister(newPromise());
}
@Override
public ChannelFuture bind(final SocketAddress localAddress, final ChannelPromise promise) {
DefaultChannelHandlerContext next = findContextOutbound(MASK_BIND);
next.invoker().invokeBind(next, localAddress, promise);
return promise;
}
@Override
public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) {
return connect(remoteAddress, null, promise);
}
@Override
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) {
DefaultChannelHandlerContext next = findContextOutbound(MASK_CONNECT);
next.invoker().invokeConnect(next, remoteAddress, localAddress, promise);
return promise;
}
@Override
public ChannelFuture disconnect(ChannelPromise promise) {
if (!channel().metadata().hasDisconnect()) {
return close(promise);
}
DefaultChannelHandlerContext next = findContextOutbound(MASK_DISCONNECT);
next.invoker().invokeDisconnect(next, promise);
return promise;
}
@Override
public ChannelFuture close(ChannelPromise promise) {
DefaultChannelHandlerContext next = findContextOutbound(MASK_CLOSE);
next.invoker().invokeClose(next, promise);
return promise;
}
@Override
public ChannelFuture deregister(ChannelPromise promise) {
DefaultChannelHandlerContext next = findContextOutbound(MASK_DEREGISTER);
next.invoker().invokeDeregister(next, promise);
return promise;
}
Read only when requested (read-on-demand) This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not. Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this. This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly. This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false. Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above. There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following: public void read(ChannelHandlerContext ctx) throws Exception { ctx.read(); } Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
@Override
public ChannelHandlerContext read() {
DefaultChannelHandlerContext next = findContextOutbound(MASK_READ);
next.invoker().invokeRead(next);
return this;
}
@Override
public ChannelFuture write(Object msg) {
return write(msg, newPromise());
}
@Override
public ChannelFuture write(Object msg, ChannelPromise promise) {
DefaultChannelHandlerContext next = findContextOutbound(MASK_WRITE);
ReferenceCountUtil.touch(msg, next);
next.invoker().invokeWrite(next, msg, promise);
return promise;
}
@Override
public ChannelHandlerContext flush() {
DefaultChannelHandlerContext next = findContextOutbound(MASK_FLUSH);
next.invoker().invokeFlush(next);
return this;
}
@Override
public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) {
DefaultChannelHandlerContext next;
next = findContextOutbound(MASK_WRITE);
ReferenceCountUtil.touch(msg, next);
next.invoker().invokeWrite(next, msg, promise);
next = findContextOutbound(MASK_FLUSH);
next.invoker().invokeFlush(next);
return promise;
2013-08-07 20:28:33 +02:00
}
@Override
public ChannelFuture writeAndFlush(Object msg) {
return writeAndFlush(msg, newPromise());
}
@Override
public ChannelPromise newPromise() {
return new DefaultChannelPromise(channel(), executor());
}
@Override
public ChannelProgressivePromise newProgressivePromise() {
return new DefaultChannelProgressivePromise(channel(), executor());
}
@Override
public ChannelFuture newSucceededFuture() {
ChannelFuture succeededFuture = this.succeededFuture;
if (succeededFuture == null) {
this.succeededFuture = succeededFuture = new SucceededChannelFuture(channel(), executor());
}
return succeededFuture;
}
@Override
public ChannelFuture newFailedFuture(Throwable cause) {
return new FailedChannelFuture(channel(), executor(), cause);
}
private DefaultChannelHandlerContext findContextInbound(int mask) {
DefaultChannelHandlerContext ctx = this;
do {
ctx = ctx.next;
} while ((ctx.skipFlags & mask) != 0);
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
return ctx;
}
private DefaultChannelHandlerContext findContextOutbound(int mask) {
DefaultChannelHandlerContext ctx = this;
do {
ctx = ctx.prev;
} while ((ctx.skipFlags & mask) != 0);
return ctx;
}
@Override
public ChannelPromise voidPromise() {
return channel.voidPromise();
}
void setRemoved() {
removed = true;
}
@Override
public boolean isRemoved() {
return removed;
}
@Override
public String toHintString() {
return '\'' + name + "' will handle the message from this point.";
}
@Override
public String toString() {
return StringUtil.simpleClassName(ChannelHandlerContext.class) + '(' + name + ", " + channel + ')';
}
2012-06-04 13:43:02 -07:00
}