netty5/transport/src/main/java/io/netty/channel/ChannelInitializer.java

161 lines
6.6 KiB
Java
Raw Normal View History

2012-06-04 22:31:44 +02:00
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel;
import io.netty.bootstrap.Bootstrap;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
* A special {@link ChannelInboundHandler} which offers an easy way to initialize a {@link Channel} once it was
* registered to its {@link EventLoop}.
*
* Implementations are most often used in the context of {@link Bootstrap#handler(ChannelHandler)} ,
* {@link ServerBootstrap#handler(ChannelHandler)} and {@link ServerBootstrap#childHandler(ChannelHandler)} to
* setup the {@link ChannelPipeline} of a {@link Channel}.
*
2012-12-20 15:45:49 +01:00
* <pre>
*
* public class MyChannelInitializer extends {@link ChannelInitializer} {
* public void initChannel({@link Channel} channel) {
* channel.pipeline().addLast("myHandler", new MyHandler());
* }
* }
*
* {@link ServerBootstrap} bootstrap = ...;
* ...
* bootstrap.childHandler(new MyChannelInitializer());
* ...
* </pre>
* Be aware that this class is marked as {@link Sharable} and so the implementation must be safe to be re-used.
*
* @param <C> A sub-type of {@link Channel}
*/
@Sharable
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
public abstract class ChannelInitializer<C extends Channel> extends ChannelInboundHandlerAdapter {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(ChannelInitializer.class);
// We use a Set as a ChannelInitializer is usually shared between all Channels in a Bootstrap /
// ServerBootstrap. This way we can reduce the memory usage compared to use Attributes.
private final Set<ChannelHandlerContext> initMap = Collections.newSetFromMap(
new ConcurrentHashMap<ChannelHandlerContext, Boolean>());
/**
* This method will be called once the {@link Channel} was registered. After the method returns this instance
* will be removed from the {@link ChannelPipeline} of the {@link Channel}.
*
* @param ch the {@link Channel} which was registered.
* @throws Exception is thrown if an error occurs. In that case it will be handled by
* {@link #exceptionCaught(ChannelHandlerContext, Throwable)} which will by default close
* the {@link Channel}.
*/
2013-02-11 07:29:13 +01:00
protected abstract void initChannel(C ch) throws Exception;
@Override
@SuppressWarnings("unchecked")
public final void channelRegistered(ChannelHandlerContext ctx) throws Exception {
// Normally this method will never be called as handlerAdded(...) should call initChannel(...) and remove
// the handler.
if (initChannel(ctx)) {
// we called initChannel(...) so we need to call now pipeline.fireChannelRegistered() to ensure we not
// miss an event.
ctx.pipeline().fireChannelRegistered();
// We are done with init the Channel, removing all the state for the Channel now.
removeState(ctx);
} else {
// Called initChannel(...) before which is the expected behavior, so just forward the event.
ctx.fireChannelRegistered();
}
}
/**
* Handle the {@link Throwable} by logging and closing the {@link Channel}. Sub-classes may override this.
*/
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (logger.isWarnEnabled()) {
logger.warn("Failed to initialize a channel. Closing: " + ctx.channel(), cause);
}
ctx.close();
}
/**
* {@inheritDoc} If override this method ensure you call super!
*/
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
if (ctx.channel().isRegistered()) {
// This should always be true with our current DefaultChannelPipeline implementation.
// The good thing about calling initChannel(...) in handlerAdded(...) is that there will be no ordering
2017-04-19 22:37:03 +02:00
// surprises if a ChannelInitializer will add another ChannelInitializer. This is as all handlers
// will be added in the expected order.
if (initChannel(ctx)) {
// We are done with init the Channel, removing the initializer now.
removeState(ctx);
}
}
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
initMap.remove(ctx);
}
@SuppressWarnings("unchecked")
private boolean initChannel(ChannelHandlerContext ctx) throws Exception {
if (initMap.add(ctx)) { // Guard against re-entrance.
try {
initChannel((C) ctx.channel());
} catch (Throwable cause) {
// Explicitly call exceptionCaught(...) as we removed the handler before calling initChannel(...).
// We do so to prevent multiple calls to initChannel(...).
exceptionCaught(ctx, cause);
} finally {
ChannelPipeline pipeline = ctx.pipeline();
if (pipeline.context(this) != null) {
pipeline.remove(this);
}
}
return true;
}
return false;
}
private void removeState(final ChannelHandlerContext ctx) {
// The removal may happen in an async fashion if the EventExecutor we use does something funky.
if (ctx.isRemoved()) {
initMap.remove(ctx);
} else {
// The context is not removed yet which is most likely the case because a custom EventExecutor is used.
// Let's schedule it on the EventExecutor to give it some more time to be completed in case it is offloaded.
ctx.executor().execute(new Runnable() {
@Override
public void run() {
initMap.remove(ctx);
}
});
}
}
}