2008-08-08 00:37:18 +00:00
|
|
|
/*
|
2012-06-04 13:31:44 -07:00
|
|
|
* Copyright 2012 The Netty Project
|
2008-08-08 00:37:18 +00:00
|
|
|
*
|
2011-12-09 14:18:34 +09:00
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
2008-08-08 00:37:18 +00:00
|
|
|
*
|
2012-06-04 13:31:44 -07:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2008-08-08 01:27:24 +00:00
|
|
|
*
|
2009-08-28 07:15:49 +00:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
2011-12-09 14:18:34 +09:00
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
2009-08-28 07:15:49 +00:00
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2008-08-08 00:37:18 +00:00
|
|
|
*/
|
2011-12-09 12:38:59 +09:00
|
|
|
package io.netty.channel;
|
2008-08-08 00:37:18 +00:00
|
|
|
|
2013-02-07 23:58:21 +09:00
|
|
|
import io.netty.channel.Channel.Unsafe;
|
2013-06-27 10:39:39 +09:00
|
|
|
import io.netty.util.ReferenceCountUtil;
|
2013-03-05 21:41:19 +01:00
|
|
|
import io.netty.util.concurrent.EventExecutor;
|
|
|
|
import io.netty.util.concurrent.EventExecutorGroup;
|
2013-03-14 15:01:35 +09:00
|
|
|
import io.netty.util.internal.PlatformDependent;
|
2013-05-01 17:04:43 +09:00
|
|
|
import io.netty.util.internal.StringUtil;
|
2013-02-26 14:54:25 -08:00
|
|
|
import io.netty.util.internal.logging.InternalLogger;
|
|
|
|
import io.netty.util.internal.logging.InternalLoggerFactory;
|
2012-05-01 17:19:41 +09:00
|
|
|
|
|
|
|
import java.net.SocketAddress;
|
2011-08-02 08:43:10 +09:00
|
|
|
import java.util.ArrayList;
|
2008-08-08 00:37:18 +00:00
|
|
|
import java.util.HashMap;
|
2012-06-01 17:51:19 -07:00
|
|
|
import java.util.IdentityHashMap;
|
2013-02-08 07:10:46 +01:00
|
|
|
import java.util.Iterator;
|
2008-08-08 00:37:18 +00:00
|
|
|
import java.util.LinkedHashMap;
|
2011-08-02 08:43:10 +09:00
|
|
|
import java.util.List;
|
2008-08-08 00:37:18 +00:00
|
|
|
import java.util.Map;
|
|
|
|
import java.util.NoSuchElementException;
|
2013-03-07 12:43:16 +09:00
|
|
|
import java.util.WeakHashMap;
|
2013-03-14 15:01:35 +09:00
|
|
|
import java.util.concurrent.ExecutionException;
|
2012-06-05 11:21:44 +02:00
|
|
|
import java.util.concurrent.Future;
|
2008-08-08 00:37:18 +00:00
|
|
|
|
2008-09-02 07:13:20 +00:00
|
|
|
/**
|
2012-05-01 17:19:41 +09:00
|
|
|
* The default {@link ChannelPipeline} implementation. It is usually created
|
|
|
|
* by a {@link Channel} implementation when the {@link Channel} is created.
|
2008-09-02 07:13:20 +00:00
|
|
|
*/
|
2012-12-23 15:54:14 +01:00
|
|
|
final class DefaultChannelPipeline implements ChannelPipeline {
|
2008-08-08 00:37:18 +00:00
|
|
|
|
2008-08-09 15:05:53 +00:00
|
|
|
static final InternalLogger logger = InternalLoggerFactory.getInstance(DefaultChannelPipeline.class);
|
2009-03-11 10:45:55 +00:00
|
|
|
|
2013-03-07 12:43:16 +09:00
|
|
|
@SuppressWarnings("unchecked")
|
|
|
|
private static final WeakHashMap<Class<?>, String>[] nameCaches =
|
|
|
|
new WeakHashMap[Runtime.getRuntime().availableProcessors()];
|
|
|
|
|
|
|
|
static {
|
|
|
|
for (int i = 0; i < nameCaches.length; i ++) {
|
|
|
|
nameCaches[i] = new WeakHashMap<Class<?>, String>();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-05 14:58:16 +02:00
|
|
|
final AbstractChannel channel;
|
2012-06-01 18:34:19 -07:00
|
|
|
|
2012-06-07 14:52:33 +09:00
|
|
|
final DefaultChannelHandlerContext head;
|
2013-01-09 19:13:43 +09:00
|
|
|
final DefaultChannelHandlerContext tail;
|
2013-01-07 08:44:16 +01:00
|
|
|
|
2008-08-08 00:37:18 +00:00
|
|
|
private final Map<String, DefaultChannelHandlerContext> name2ctx =
|
|
|
|
new HashMap<String, DefaultChannelHandlerContext>(4);
|
2012-06-04 11:56:00 -07:00
|
|
|
|
2013-11-06 21:14:07 +09:00
|
|
|
final Map<EventExecutorGroup, ChannelHandlerInvoker> childInvokers =
|
|
|
|
new IdentityHashMap<EventExecutorGroup, ChannelHandlerInvoker>();
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2013-08-05 14:58:16 +02:00
|
|
|
public DefaultChannelPipeline(AbstractChannel channel) {
|
2008-08-08 00:37:18 +00:00
|
|
|
if (channel == null) {
|
|
|
|
throw new NullPointerException("channel");
|
|
|
|
}
|
|
|
|
this.channel = channel;
|
2012-06-03 18:51:42 -07:00
|
|
|
|
2013-02-06 12:55:42 +09:00
|
|
|
TailHandler tailHandler = new TailHandler();
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
tail = new DefaultChannelHandlerContext(this, null, generateName(tailHandler), tailHandler);
|
2013-01-05 15:04:25 +09:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
HeadHandler headHandler = new HeadHandler(channel.unsafe());
|
|
|
|
head = new DefaultChannelHandlerContext(this, null, generateName(headHandler), headHandler);
|
2013-01-09 20:36:16 +09:00
|
|
|
|
|
|
|
head.next = tail;
|
2013-01-09 19:13:43 +09:00
|
|
|
tail.prev = head;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-05-01 17:19:41 +09:00
|
|
|
public Channel channel() {
|
|
|
|
return channel;
|
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-01 17:51:19 -07:00
|
|
|
public ChannelPipeline addFirst(String name, ChannelHandler handler) {
|
2013-11-06 21:14:07 +09:00
|
|
|
return addFirst((ChannelHandlerInvoker) null, name, handler);
|
2012-06-01 17:51:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-11-06 21:14:07 +09:00
|
|
|
public ChannelPipeline addFirst(EventExecutorGroup group, String name, ChannelHandler handler) {
|
|
|
|
return addFirst(findInvoker(group), name, handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline addFirst(ChannelHandlerInvoker invoker, final String name, ChannelHandler handler) {
|
2012-11-12 14:55:05 -08:00
|
|
|
synchronized (this) {
|
|
|
|
checkDuplicateName(name);
|
2013-11-06 21:14:07 +09:00
|
|
|
|
|
|
|
DefaultChannelHandlerContext newCtx =
|
|
|
|
new DefaultChannelHandlerContext(this, invoker, name, handler);
|
|
|
|
|
2013-04-05 15:46:18 +02:00
|
|
|
addFirst0(name, newCtx);
|
2012-11-12 14:55:05 -08:00
|
|
|
}
|
2012-06-05 11:21:44 +02:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
return this;
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2013-01-09 19:13:43 +09:00
|
|
|
private void addFirst0(String name, DefaultChannelHandlerContext newCtx) {
|
2013-04-24 19:02:34 +09:00
|
|
|
checkMultiplicity(newCtx);
|
|
|
|
|
2013-01-09 19:13:43 +09:00
|
|
|
DefaultChannelHandlerContext nextCtx = head.next;
|
|
|
|
newCtx.prev = head;
|
|
|
|
newCtx.next = nextCtx;
|
2013-01-09 20:34:22 +09:00
|
|
|
head.next = newCtx;
|
2013-01-09 19:13:43 +09:00
|
|
|
nextCtx.prev = newCtx;
|
2012-11-16 13:30:34 +08:00
|
|
|
|
2012-06-03 18:51:42 -07:00
|
|
|
name2ctx.put(name, newCtx);
|
2012-05-15 14:08:42 +09:00
|
|
|
|
2013-04-24 18:55:51 +09:00
|
|
|
callHandlerAdded(newCtx);
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
2012-06-04 11:56:00 -07:00
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-01 17:51:19 -07:00
|
|
|
public ChannelPipeline addLast(String name, ChannelHandler handler) {
|
2013-11-06 21:14:07 +09:00
|
|
|
return addLast((ChannelHandlerInvoker) null, name, handler);
|
2012-06-01 17:51:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-11-06 21:14:07 +09:00
|
|
|
public ChannelPipeline addLast(EventExecutorGroup group, String name, ChannelHandler handler) {
|
|
|
|
return addLast(findInvoker(group), name, handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline addLast(ChannelHandlerInvoker invoker, final String name, ChannelHandler handler) {
|
2012-11-12 14:55:05 -08:00
|
|
|
synchronized (this) {
|
|
|
|
checkDuplicateName(name);
|
2012-06-05 11:21:44 +02:00
|
|
|
|
2013-11-06 21:14:07 +09:00
|
|
|
DefaultChannelHandlerContext newCtx =
|
|
|
|
new DefaultChannelHandlerContext(this, invoker, name, handler);
|
|
|
|
|
2013-04-05 15:46:18 +02:00
|
|
|
addLast0(name, newCtx);
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
return this;
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2012-06-04 11:56:00 -07:00
|
|
|
|
2013-04-24 19:02:34 +09:00
|
|
|
private void addLast0(final String name, DefaultChannelHandlerContext newCtx) {
|
|
|
|
checkMultiplicity(newCtx);
|
|
|
|
|
2013-01-09 19:13:43 +09:00
|
|
|
DefaultChannelHandlerContext prev = tail.prev;
|
|
|
|
newCtx.prev = prev;
|
|
|
|
newCtx.next = tail;
|
2013-01-09 20:34:22 +09:00
|
|
|
prev.next = newCtx;
|
2013-01-09 19:13:43 +09:00
|
|
|
tail.prev = newCtx;
|
2013-01-07 08:44:16 +01:00
|
|
|
|
2013-01-09 19:13:43 +09:00
|
|
|
name2ctx.put(name, newCtx);
|
2012-05-15 14:08:42 +09:00
|
|
|
|
2013-04-24 18:55:51 +09:00
|
|
|
callHandlerAdded(newCtx);
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-01 17:51:19 -07:00
|
|
|
public ChannelPipeline addBefore(String baseName, String name, ChannelHandler handler) {
|
2013-11-06 21:14:07 +09:00
|
|
|
return addBefore((ChannelHandlerInvoker) null, baseName, name, handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline addBefore(EventExecutorGroup group, String baseName, String name, ChannelHandler handler) {
|
|
|
|
return addBefore(findInvoker(group), baseName, name, handler);
|
2012-06-01 17:51:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-06-08 19:28:12 +09:00
|
|
|
public ChannelPipeline addBefore(
|
2013-11-06 21:14:07 +09:00
|
|
|
ChannelHandlerInvoker invoker, String baseName, final String name, ChannelHandler handler) {
|
2012-11-12 14:55:05 -08:00
|
|
|
synchronized (this) {
|
2013-04-05 15:46:18 +02:00
|
|
|
DefaultChannelHandlerContext ctx = getContextOrDie(baseName);
|
2013-11-06 21:14:07 +09:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
checkDuplicateName(name);
|
2013-11-06 21:14:07 +09:00
|
|
|
|
|
|
|
DefaultChannelHandlerContext newCtx =
|
|
|
|
new DefaultChannelHandlerContext(this, invoker, name, handler);
|
|
|
|
|
2013-04-05 15:46:18 +02:00
|
|
|
addBefore0(name, ctx, newCtx);
|
2012-11-12 14:55:05 -08:00
|
|
|
}
|
|
|
|
return this;
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
|
|
|
|
2012-06-04 11:56:00 -07:00
|
|
|
private void addBefore0(final String name, DefaultChannelHandlerContext ctx, DefaultChannelHandlerContext newCtx) {
|
2013-04-24 19:02:34 +09:00
|
|
|
checkMultiplicity(newCtx);
|
2013-01-09 20:34:22 +09:00
|
|
|
|
|
|
|
newCtx.prev = ctx.prev;
|
|
|
|
newCtx.next = ctx;
|
2012-06-03 18:51:42 -07:00
|
|
|
ctx.prev.next = newCtx;
|
|
|
|
ctx.prev = newCtx;
|
2013-04-24 19:02:34 +09:00
|
|
|
|
2012-06-03 18:51:42 -07:00
|
|
|
name2ctx.put(name, newCtx);
|
2012-05-15 14:08:42 +09:00
|
|
|
|
2013-04-24 18:55:51 +09:00
|
|
|
callHandlerAdded(newCtx);
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
2012-06-04 11:56:00 -07:00
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-01 17:51:19 -07:00
|
|
|
public ChannelPipeline addAfter(String baseName, String name, ChannelHandler handler) {
|
2013-11-06 21:14:07 +09:00
|
|
|
return addAfter((ChannelHandlerInvoker) null, baseName, name, handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline addAfter(EventExecutorGroup group, String baseName, String name, ChannelHandler handler) {
|
|
|
|
return addAfter(findInvoker(group), baseName, name, handler);
|
2012-06-01 17:51:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-06-08 19:28:12 +09:00
|
|
|
public ChannelPipeline addAfter(
|
2013-11-06 21:14:07 +09:00
|
|
|
ChannelHandlerInvoker invoker, String baseName, final String name, ChannelHandler handler) {
|
2012-11-12 14:55:05 -08:00
|
|
|
synchronized (this) {
|
2013-04-05 15:46:18 +02:00
|
|
|
DefaultChannelHandlerContext ctx = getContextOrDie(baseName);
|
2013-11-06 21:14:07 +09:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
checkDuplicateName(name);
|
2013-11-06 21:14:07 +09:00
|
|
|
|
|
|
|
DefaultChannelHandlerContext newCtx =
|
|
|
|
new DefaultChannelHandlerContext(this, invoker, name, handler);
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2013-04-05 15:46:18 +02:00
|
|
|
addAfter0(name, ctx, newCtx);
|
2012-11-12 14:55:05 -08:00
|
|
|
}
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
return this;
|
2012-05-15 14:08:42 +09:00
|
|
|
}
|
|
|
|
|
2012-06-04 11:56:00 -07:00
|
|
|
private void addAfter0(final String name, DefaultChannelHandlerContext ctx, DefaultChannelHandlerContext newCtx) {
|
2012-06-04 20:34:09 +02:00
|
|
|
checkDuplicateName(name);
|
2013-04-24 19:02:34 +09:00
|
|
|
checkMultiplicity(newCtx);
|
2012-06-04 20:34:09 +02:00
|
|
|
|
2013-01-09 19:13:43 +09:00
|
|
|
newCtx.prev = ctx;
|
|
|
|
newCtx.next = ctx.next;
|
2012-06-04 20:34:09 +02:00
|
|
|
ctx.next.prev = newCtx;
|
|
|
|
ctx.next = newCtx;
|
2013-01-09 19:13:43 +09:00
|
|
|
|
2012-06-04 20:34:09 +02:00
|
|
|
name2ctx.put(name, newCtx);
|
|
|
|
|
2013-04-24 18:55:51 +09:00
|
|
|
callHandlerAdded(newCtx);
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2012-06-04 11:56:00 -07:00
|
|
|
|
2012-05-15 14:08:42 +09:00
|
|
|
@Override
|
|
|
|
public ChannelPipeline addFirst(ChannelHandler... handlers) {
|
2013-11-06 21:14:07 +09:00
|
|
|
return addFirst((ChannelHandlerInvoker) null, handlers);
|
2012-06-01 17:51:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-11-06 21:14:07 +09:00
|
|
|
public ChannelPipeline addFirst(EventExecutorGroup group, ChannelHandler... handlers) {
|
|
|
|
return addFirst(findInvoker(group), handlers);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline addFirst(ChannelHandlerInvoker invoker, ChannelHandler... handlers) {
|
2012-05-15 14:08:42 +09:00
|
|
|
if (handlers == null) {
|
|
|
|
throw new NullPointerException("handlers");
|
|
|
|
}
|
2012-05-15 14:49:23 +09:00
|
|
|
if (handlers.length == 0 || handlers[0] == null) {
|
2012-05-15 14:08:42 +09:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
int size;
|
|
|
|
for (size = 1; size < handlers.length; size ++) {
|
|
|
|
if (handlers[size] == null) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = size - 1; i >= 0; i --) {
|
|
|
|
ChannelHandler h = handlers[i];
|
2013-11-06 21:14:07 +09:00
|
|
|
addFirst(invoker, generateName(h), h);
|
2012-05-15 14:08:42 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline addLast(ChannelHandler... handlers) {
|
2013-11-06 21:14:07 +09:00
|
|
|
return addLast((ChannelHandlerInvoker) null, handlers);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline addLast(EventExecutorGroup group, ChannelHandler... handlers) {
|
|
|
|
return addLast(findInvoker(group), handlers);
|
2012-06-01 17:51:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-11-06 21:14:07 +09:00
|
|
|
public ChannelPipeline addLast(ChannelHandlerInvoker invoker, ChannelHandler... handlers) {
|
2012-05-15 14:08:42 +09:00
|
|
|
if (handlers == null) {
|
|
|
|
throw new NullPointerException("handlers");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (ChannelHandler h: handlers) {
|
|
|
|
if (h == null) {
|
|
|
|
break;
|
|
|
|
}
|
2013-11-06 21:14:07 +09:00
|
|
|
addLast(invoker, generateName(h), h);
|
2012-05-15 14:08:42 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
2013-11-06 21:14:07 +09:00
|
|
|
private ChannelHandlerInvoker findInvoker(EventExecutorGroup group) {
|
|
|
|
if (group == null) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pin one of the child executors once and remember it so that the same child executor
|
|
|
|
// is used to fire events for the same channel.
|
|
|
|
ChannelHandlerInvoker invoker = childInvokers.get(group);
|
|
|
|
if (invoker == null) {
|
|
|
|
EventExecutor executor = group.next();
|
|
|
|
if (executor instanceof EventLoop) {
|
|
|
|
invoker = ((EventLoop) executor).asInvoker();
|
|
|
|
} else {
|
|
|
|
invoker = new DefaultChannelHandlerInvoker(executor);
|
|
|
|
}
|
|
|
|
childInvokers.put(group, invoker);
|
|
|
|
}
|
|
|
|
|
|
|
|
return invoker;
|
|
|
|
}
|
|
|
|
|
2013-11-22 19:34:27 +09:00
|
|
|
String generateName(ChannelHandler handler) {
|
2013-03-07 12:43:16 +09:00
|
|
|
WeakHashMap<Class<?>, String> cache = nameCaches[(int) (Thread.currentThread().getId() % nameCaches.length)];
|
|
|
|
Class<?> handlerType = handler.getClass();
|
|
|
|
String name;
|
|
|
|
synchronized (cache) {
|
|
|
|
name = cache.get(handlerType);
|
|
|
|
if (name == null) {
|
2013-05-01 17:04:43 +09:00
|
|
|
name = StringUtil.simpleClassName(handlerType) + "#0";
|
2013-03-07 12:43:16 +09:00
|
|
|
cache.put(handlerType, name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
synchronized (this) {
|
|
|
|
// It's not very likely for a user to put more than one handler of the same type, but make sure to avoid
|
|
|
|
// any name conflicts. Note that we don't cache the names generated here.
|
|
|
|
if (name2ctx.containsKey(name)) {
|
|
|
|
String baseName = name.substring(0, name.length() - 1); // Strip the trailing '0'.
|
|
|
|
for (int i = 1;; i ++) {
|
|
|
|
String newName = baseName + i;
|
|
|
|
if (!name2ctx.containsKey(newName)) {
|
|
|
|
name = newName;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return name;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-12-14 17:06:31 +01:00
|
|
|
public ChannelPipeline remove(ChannelHandler handler) {
|
2013-04-13 18:19:33 +02:00
|
|
|
remove(getContextOrDie(handler));
|
2012-12-14 17:06:31 +01:00
|
|
|
return this;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-05 11:21:44 +02:00
|
|
|
public ChannelHandler remove(String name) {
|
2013-04-13 18:19:33 +02:00
|
|
|
return remove(getContextOrDie(name)).handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2012-09-21 22:33:11 +02:00
|
|
|
@SuppressWarnings("unchecked")
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-05 11:21:44 +02:00
|
|
|
public <T extends ChannelHandler> T remove(Class<T> handlerType) {
|
2013-04-13 18:19:33 +02:00
|
|
|
return (T) remove(getContextOrDie(handlerType)).handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2013-04-13 18:19:33 +02:00
|
|
|
private DefaultChannelHandlerContext remove(final DefaultChannelHandlerContext ctx) {
|
2013-01-09 19:13:43 +09:00
|
|
|
assert ctx != head && ctx != tail;
|
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
DefaultChannelHandlerContext context;
|
2012-11-16 06:04:37 +09:00
|
|
|
Future<?> future;
|
2012-11-12 14:55:05 -08:00
|
|
|
|
|
|
|
synchronized (this) {
|
2013-01-09 19:13:43 +09:00
|
|
|
if (!ctx.channel().isRegistered() || ctx.executor().inEventLoop()) {
|
2013-06-14 10:47:31 +09:00
|
|
|
remove0(ctx);
|
2013-01-09 19:13:43 +09:00
|
|
|
return ctx;
|
2012-11-12 14:55:05 -08:00
|
|
|
} else {
|
2013-01-09 19:13:43 +09:00
|
|
|
future = ctx.executor().submit(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
synchronized (DefaultChannelPipeline.this) {
|
2013-06-14 10:47:31 +09:00
|
|
|
remove0(ctx);
|
2013-01-09 19:13:43 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
context = ctx;
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2012-11-12 14:55:05 -08:00
|
|
|
}
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
// Run the following 'waiting' code outside of the above synchronized block
|
|
|
|
// in order to avoid deadlock
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2012-11-16 06:04:37 +09:00
|
|
|
waitForFuture(future);
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
return context;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
2012-06-04 11:56:00 -07:00
|
|
|
|
2013-06-14 10:47:31 +09:00
|
|
|
void remove0(DefaultChannelHandlerContext ctx) {
|
2012-06-04 20:34:09 +02:00
|
|
|
DefaultChannelHandlerContext prev = ctx.prev;
|
|
|
|
DefaultChannelHandlerContext next = ctx.next;
|
|
|
|
prev.next = next;
|
|
|
|
next.prev = prev;
|
|
|
|
name2ctx.remove(ctx.name());
|
2013-06-14 10:47:31 +09:00
|
|
|
callHandlerRemoved(ctx);
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2008-08-08 00:37:18 +00:00
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-05 11:21:44 +02:00
|
|
|
public ChannelHandler removeFirst() {
|
2013-01-09 19:13:43 +09:00
|
|
|
if (head.next == tail) {
|
2008-08-18 02:38:54 +00:00
|
|
|
throw new NoSuchElementException();
|
|
|
|
}
|
2013-04-13 18:19:33 +02:00
|
|
|
return remove(head.next).handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-05 11:21:44 +02:00
|
|
|
public ChannelHandler removeLast() {
|
2013-01-09 19:13:43 +09:00
|
|
|
if (head.next == tail) {
|
|
|
|
throw new NoSuchElementException();
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2013-04-13 18:19:33 +02:00
|
|
|
return remove(tail.prev).handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-12-14 17:06:31 +01:00
|
|
|
public ChannelPipeline replace(ChannelHandler oldHandler, String newName, ChannelHandler newHandler) {
|
2013-04-13 18:19:33 +02:00
|
|
|
replace(getContextOrDie(oldHandler), newName, newHandler);
|
2012-12-14 17:06:31 +01:00
|
|
|
return this;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-06-05 11:21:44 +02:00
|
|
|
public ChannelHandler replace(String oldName, String newName, ChannelHandler newHandler) {
|
2013-04-13 18:19:33 +02:00
|
|
|
return replace(getContextOrDie(oldName), newName, newHandler);
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2008-08-08 00:37:18 +00:00
|
|
|
@SuppressWarnings("unchecked")
|
2012-06-05 11:21:44 +02:00
|
|
|
public <T extends ChannelHandler> T replace(
|
2008-08-08 00:37:18 +00:00
|
|
|
Class<T> oldHandlerType, String newName, ChannelHandler newHandler) {
|
2013-04-13 18:19:33 +02:00
|
|
|
return (T) replace(getContextOrDie(oldHandlerType), newName, newHandler);
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2012-06-08 19:28:12 +09:00
|
|
|
private ChannelHandler replace(
|
2013-01-24 18:58:05 +01:00
|
|
|
final DefaultChannelHandlerContext ctx, final String newName,
|
2013-04-13 18:19:33 +02:00
|
|
|
ChannelHandler newHandler) {
|
2013-01-09 19:13:43 +09:00
|
|
|
|
|
|
|
assert ctx != head && ctx != tail;
|
|
|
|
|
2012-11-16 06:04:37 +09:00
|
|
|
Future<?> future;
|
2012-11-12 14:55:05 -08:00
|
|
|
synchronized (this) {
|
2013-01-09 19:13:43 +09:00
|
|
|
boolean sameName = ctx.name().equals(newName);
|
|
|
|
if (!sameName) {
|
|
|
|
checkDuplicateName(newName);
|
2013-01-07 08:44:16 +01:00
|
|
|
}
|
2012-06-05 11:21:44 +02:00
|
|
|
|
2013-01-09 19:13:43 +09:00
|
|
|
final DefaultChannelHandlerContext newCtx =
|
2013-11-06 21:14:07 +09:00
|
|
|
new DefaultChannelHandlerContext(this, ctx.invoker, newName, newHandler);
|
2012-06-05 11:21:44 +02:00
|
|
|
|
2013-01-09 19:13:43 +09:00
|
|
|
if (!newCtx.channel().isRegistered() || newCtx.executor().inEventLoop()) {
|
2013-04-13 18:19:33 +02:00
|
|
|
replace0(ctx, newName, newCtx);
|
2013-01-09 19:13:43 +09:00
|
|
|
return ctx.handler();
|
|
|
|
} else {
|
|
|
|
future = newCtx.executor().submit(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
synchronized (DefaultChannelPipeline.this) {
|
2013-04-13 18:19:33 +02:00
|
|
|
replace0(ctx, newName, newCtx);
|
2013-01-09 19:13:43 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
2012-11-12 14:55:05 -08:00
|
|
|
}
|
2012-06-06 23:02:47 +09:00
|
|
|
|
2012-11-12 14:55:05 -08:00
|
|
|
// Run the following 'waiting' code outside of the above synchronized block
|
|
|
|
// in order to avoid deadlock
|
2012-06-05 11:21:44 +02:00
|
|
|
|
2012-11-16 06:04:37 +09:00
|
|
|
waitForFuture(future);
|
2012-11-12 14:55:05 -08:00
|
|
|
|
|
|
|
return ctx.handler();
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2008-12-01 10:07:54 +00:00
|
|
|
|
2013-04-24 19:25:43 +09:00
|
|
|
private void replace0(DefaultChannelHandlerContext oldCtx, String newName,
|
2013-04-13 18:19:33 +02:00
|
|
|
DefaultChannelHandlerContext newCtx) {
|
2013-04-24 18:55:51 +09:00
|
|
|
checkMultiplicity(newCtx);
|
2008-12-01 10:07:54 +00:00
|
|
|
|
2013-04-24 19:25:43 +09:00
|
|
|
DefaultChannelHandlerContext prev = oldCtx.prev;
|
|
|
|
DefaultChannelHandlerContext next = oldCtx.next;
|
2013-01-09 20:34:22 +09:00
|
|
|
newCtx.prev = prev;
|
|
|
|
newCtx.next = next;
|
2013-04-24 19:25:43 +09:00
|
|
|
|
|
|
|
// Finish the replacement of oldCtx with newCtx in the linked list.
|
|
|
|
// Note that this doesn't mean events will be sent to the new handler immediately
|
|
|
|
// because we are currently at the event handler thread and no more than one handler methods can be invoked
|
|
|
|
// at the same time (we ensured that in replace().)
|
2012-06-04 20:34:09 +02:00
|
|
|
prev.next = newCtx;
|
|
|
|
next.prev = newCtx;
|
|
|
|
|
2013-04-24 19:25:43 +09:00
|
|
|
if (!oldCtx.name().equals(newName)) {
|
|
|
|
name2ctx.remove(oldCtx.name());
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
2012-06-04 20:34:09 +02:00
|
|
|
name2ctx.put(newName, newCtx);
|
2008-12-01 10:07:54 +00:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
// update the reference to the replacement so forward of buffered content will work correctly
|
|
|
|
oldCtx.prev = newCtx;
|
|
|
|
oldCtx.next = newCtx;
|
|
|
|
|
2013-04-24 19:25:43 +09:00
|
|
|
// Invoke newHandler.handlerAdded() first (i.e. before oldHandler.handlerRemoved() is invoked)
|
|
|
|
// because callHandlerRemoved() will trigger inboundBufferUpdated() or flush() on newHandler and those
|
|
|
|
// event handlers must be called after handlerAdded().
|
2013-04-24 18:55:51 +09:00
|
|
|
callHandlerAdded(newCtx);
|
2013-06-14 10:47:31 +09:00
|
|
|
callHandlerRemoved(oldCtx);
|
2012-06-04 20:34:09 +02:00
|
|
|
}
|
2012-06-04 11:56:00 -07:00
|
|
|
|
2013-04-24 18:55:51 +09:00
|
|
|
private static void checkMultiplicity(ChannelHandlerContext ctx) {
|
2012-05-31 14:54:48 -07:00
|
|
|
ChannelHandler handler = ctx.handler();
|
2013-02-06 12:55:42 +09:00
|
|
|
if (handler instanceof ChannelHandlerAdapter) {
|
|
|
|
ChannelHandlerAdapter h = (ChannelHandlerAdapter) handler;
|
2012-05-31 14:54:48 -07:00
|
|
|
if (!h.isSharable() && h.added) {
|
2012-12-21 17:10:36 +01:00
|
|
|
throw new ChannelPipelineException(
|
2013-04-24 18:55:51 +09:00
|
|
|
h.getClass().getName() +
|
2012-11-18 23:11:39 +13:00
|
|
|
" is not a @Sharable handler, so can't be added or removed multiple times.");
|
2012-05-31 14:54:48 -07:00
|
|
|
}
|
|
|
|
h.added = true;
|
|
|
|
}
|
2013-04-05 15:46:18 +02:00
|
|
|
}
|
|
|
|
|
2013-11-22 19:34:27 +09:00
|
|
|
private void callHandlerAdded(final DefaultChannelHandlerContext ctx) {
|
|
|
|
if ((ctx.skipFlags & DefaultChannelHandlerContext.MASK_HANDLER_ADDED) != 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-05 15:46:18 +02:00
|
|
|
if (ctx.channel().isRegistered() && !ctx.executor().inEventLoop()) {
|
|
|
|
ctx.executor().execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-04-24 18:55:51 +09:00
|
|
|
callHandlerAdded0(ctx);
|
2013-04-05 15:46:18 +02:00
|
|
|
}
|
|
|
|
});
|
|
|
|
return;
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
2013-04-24 18:55:51 +09:00
|
|
|
callHandlerAdded0(ctx);
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
|
|
|
|
2013-11-22 19:34:27 +09:00
|
|
|
private void callHandlerAdded0(final DefaultChannelHandlerContext ctx) {
|
2008-12-01 10:07:54 +00:00
|
|
|
try {
|
2013-04-05 15:46:18 +02:00
|
|
|
ctx.handler().handlerAdded(ctx);
|
2008-12-01 10:07:54 +00:00
|
|
|
} catch (Throwable t) {
|
|
|
|
boolean removed = false;
|
|
|
|
try {
|
2013-11-22 19:34:27 +09:00
|
|
|
remove(ctx);
|
2008-12-01 10:07:54 +00:00
|
|
|
removed = true;
|
|
|
|
} catch (Throwable t2) {
|
2012-02-17 10:37:41 +01:00
|
|
|
if (logger.isWarnEnabled()) {
|
2012-05-01 17:19:41 +09:00
|
|
|
logger.warn("Failed to remove a handler: " + ctx.name(), t2);
|
2012-02-17 10:37:41 +01:00
|
|
|
}
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (removed) {
|
2013-04-05 15:46:18 +02:00
|
|
|
fireExceptionCaught(new ChannelPipelineException(
|
2012-05-01 17:19:41 +09:00
|
|
|
ctx.handler().getClass().getName() +
|
2013-04-24 18:55:51 +09:00
|
|
|
".handlerAdded() has thrown an exception; removed.", t));
|
2008-12-01 10:07:54 +00:00
|
|
|
} else {
|
2013-04-05 15:46:18 +02:00
|
|
|
fireExceptionCaught(new ChannelPipelineException(
|
2012-05-01 17:19:41 +09:00
|
|
|
ctx.handler().getClass().getName() +
|
2013-04-24 18:55:51 +09:00
|
|
|
".handlerAdded() has thrown an exception; also failed to remove.", t));
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-14 10:47:31 +09:00
|
|
|
private void callHandlerRemoved(final DefaultChannelHandlerContext ctx) {
|
2013-11-22 19:34:27 +09:00
|
|
|
if ((ctx.skipFlags & DefaultChannelHandlerContext.MASK_HANDLER_REMOVED) != 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-08-01 09:54:07 +02:00
|
|
|
if (ctx.channel().isRegistered() && !ctx.executor().inEventLoop()) {
|
|
|
|
ctx.executor().execute(new Runnable() {
|
2013-04-05 15:46:18 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-06-14 10:47:31 +09:00
|
|
|
callHandlerRemoved0(ctx);
|
2013-08-01 09:54:07 +02:00
|
|
|
}
|
2013-04-05 15:46:18 +02:00
|
|
|
});
|
2013-08-01 09:54:07 +02:00
|
|
|
return;
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
2013-06-14 10:47:31 +09:00
|
|
|
callHandlerRemoved0(ctx);
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
|
|
|
|
2013-06-14 10:47:31 +09:00
|
|
|
private void callHandlerRemoved0(final DefaultChannelHandlerContext ctx) {
|
2013-04-24 18:55:51 +09:00
|
|
|
// Notify the complete removal.
|
|
|
|
try {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
ctx.handler().handlerRemoved(ctx);
|
2013-06-14 10:47:31 +09:00
|
|
|
ctx.setRemoved();
|
2008-12-01 10:07:54 +00:00
|
|
|
} catch (Throwable t) {
|
2013-04-05 15:46:18 +02:00
|
|
|
fireExceptionCaught(new ChannelPipelineException(
|
2013-04-24 18:57:14 +09:00
|
|
|
ctx.handler().getClass().getName() + ".handlerRemoved() has thrown an exception.", t));
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
2013-03-14 15:01:35 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Waits for a future to finish. If the task is interrupted, then the current thread will be interrupted.
|
|
|
|
* It is expected that the task performs any appropriate locking.
|
|
|
|
* <p>
|
|
|
|
* If the internal call throws a {@link Throwable}, but it is not an instance of {@link Error} or
|
|
|
|
* {@link RuntimeException}, then it is wrapped inside a {@link ChannelPipelineException} and that is
|
|
|
|
* thrown instead.</p>
|
|
|
|
*
|
|
|
|
* @param future wait for this future
|
|
|
|
* @see Future#get()
|
|
|
|
* @throws Error if the task threw this.
|
|
|
|
* @throws RuntimeException if the task threw this.
|
|
|
|
* @throws ChannelPipelineException with a {@link Throwable} as a cause, if the task threw another type of
|
|
|
|
* {@link Throwable}.
|
|
|
|
*/
|
|
|
|
private static void waitForFuture(Future<?> future) {
|
|
|
|
try {
|
|
|
|
future.get();
|
|
|
|
} catch (ExecutionException ex) {
|
|
|
|
// In the arbitrary case, we can throw Error, RuntimeException, and Exception
|
|
|
|
PlatformDependent.throwException(ex.getCause());
|
|
|
|
} catch (InterruptedException ex) {
|
|
|
|
// Interrupt the calling thread (note that this method is not called from the event loop)
|
|
|
|
Thread.currentThread().interrupt();
|
2013-01-14 21:49:01 +09:00
|
|
|
}
|
2008-12-01 10:07:54 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-08-28 13:03:41 +09:00
|
|
|
public ChannelHandler first() {
|
2013-05-27 15:45:34 +02:00
|
|
|
ChannelHandlerContext first = firstContext();
|
|
|
|
if (first == null) {
|
2008-08-18 02:27:11 +00:00
|
|
|
return null;
|
|
|
|
}
|
2012-06-03 18:51:42 -07:00
|
|
|
return first.handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-08-28 13:03:41 +09:00
|
|
|
public ChannelHandlerContext firstContext() {
|
2013-05-27 15:45:34 +02:00
|
|
|
DefaultChannelHandlerContext first = head.next;
|
|
|
|
if (first == head) {
|
|
|
|
return null;
|
|
|
|
}
|
2012-08-28 13:03:41 +09:00
|
|
|
return head.next;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelHandler last() {
|
2013-01-09 19:13:43 +09:00
|
|
|
DefaultChannelHandlerContext last = tail.prev;
|
|
|
|
if (last == head) {
|
2008-08-18 02:27:11 +00:00
|
|
|
return null;
|
|
|
|
}
|
2012-06-03 18:51:42 -07:00
|
|
|
return last.handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-08-28 13:03:41 +09:00
|
|
|
public ChannelHandlerContext lastContext() {
|
2013-01-09 19:13:43 +09:00
|
|
|
DefaultChannelHandlerContext last = tail.prev;
|
|
|
|
if (last == head) {
|
2012-08-28 13:03:41 +09:00
|
|
|
return null;
|
|
|
|
}
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelHandler get(String name) {
|
|
|
|
ChannelHandlerContext ctx = context(name);
|
2008-08-08 00:37:18 +00:00
|
|
|
if (ctx == null) {
|
|
|
|
return null;
|
|
|
|
} else {
|
2012-05-01 17:19:41 +09:00
|
|
|
return ctx.handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-21 22:33:11 +02:00
|
|
|
@SuppressWarnings("unchecked")
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-08-28 13:03:41 +09:00
|
|
|
public <T extends ChannelHandler> T get(Class<T> handlerType) {
|
2012-05-01 17:19:41 +09:00
|
|
|
ChannelHandlerContext ctx = context(handlerType);
|
2008-08-08 00:37:18 +00:00
|
|
|
if (ctx == null) {
|
|
|
|
return null;
|
|
|
|
} else {
|
2012-05-01 17:19:41 +09:00
|
|
|
return (T) ctx.handler();
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-08-28 13:03:41 +09:00
|
|
|
public ChannelHandlerContext context(String name) {
|
2008-08-08 00:37:18 +00:00
|
|
|
if (name == null) {
|
|
|
|
throw new NullPointerException("name");
|
|
|
|
}
|
2012-08-28 13:03:41 +09:00
|
|
|
|
|
|
|
synchronized (this) {
|
|
|
|
return name2ctx.get(name);
|
|
|
|
}
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-08-28 13:03:41 +09:00
|
|
|
public ChannelHandlerContext context(ChannelHandler handler) {
|
2008-08-08 00:37:18 +00:00
|
|
|
if (handler == null) {
|
|
|
|
throw new NullPointerException("handler");
|
|
|
|
}
|
2012-08-28 13:03:41 +09:00
|
|
|
|
|
|
|
DefaultChannelHandlerContext ctx = head.next;
|
2008-08-08 00:37:18 +00:00
|
|
|
for (;;) {
|
2013-01-07 08:44:16 +01:00
|
|
|
|
2012-08-28 13:03:41 +09:00
|
|
|
if (ctx == null) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
if (ctx.handler() == handler) {
|
2008-08-08 00:37:18 +00:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = ctx.next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2012-08-28 13:03:41 +09:00
|
|
|
public ChannelHandlerContext context(Class<? extends ChannelHandler> handlerType) {
|
2009-12-17 10:57:57 +00:00
|
|
|
if (handlerType == null) {
|
|
|
|
throw new NullPointerException("handlerType");
|
|
|
|
}
|
|
|
|
|
2012-06-03 18:51:42 -07:00
|
|
|
DefaultChannelHandlerContext ctx = head.next;
|
2008-08-08 00:37:18 +00:00
|
|
|
for (;;) {
|
2012-08-28 13:03:41 +09:00
|
|
|
if (ctx == null) {
|
|
|
|
return null;
|
|
|
|
}
|
2012-05-01 17:19:41 +09:00
|
|
|
if (handlerType.isAssignableFrom(ctx.handler().getClass())) {
|
2008-08-08 00:37:18 +00:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
ctx = ctx.next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-02 08:43:10 +09:00
|
|
|
@Override
|
2012-05-01 17:19:41 +09:00
|
|
|
public List<String> names() {
|
2011-08-02 08:43:10 +09:00
|
|
|
List<String> list = new ArrayList<String>();
|
2012-06-03 18:51:42 -07:00
|
|
|
DefaultChannelHandlerContext ctx = head.next;
|
2011-08-02 08:43:10 +09:00
|
|
|
for (;;) {
|
|
|
|
if (ctx == null) {
|
2012-08-28 13:03:41 +09:00
|
|
|
return list;
|
2011-08-02 08:43:10 +09:00
|
|
|
}
|
2012-08-28 13:03:41 +09:00
|
|
|
list.add(ctx.name());
|
|
|
|
ctx = ctx.next;
|
2011-08-02 08:43:10 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2008-08-08 00:37:18 +00:00
|
|
|
public Map<String, ChannelHandler> toMap() {
|
|
|
|
Map<String, ChannelHandler> map = new LinkedHashMap<String, ChannelHandler>();
|
2012-06-03 18:51:42 -07:00
|
|
|
DefaultChannelHandlerContext ctx = head.next;
|
2008-08-08 00:37:18 +00:00
|
|
|
for (;;) {
|
2013-01-09 19:13:43 +09:00
|
|
|
if (ctx == tail) {
|
2012-08-28 13:03:41 +09:00
|
|
|
return map;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
2012-08-28 13:03:41 +09:00
|
|
|
map.put(ctx.name(), ctx.handler());
|
|
|
|
ctx = ctx.next;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-07 12:43:16 +09:00
|
|
|
@Override
|
|
|
|
public Iterator<Map.Entry<String, ChannelHandler>> iterator() {
|
|
|
|
return toMap().entrySet().iterator();
|
|
|
|
}
|
|
|
|
|
2008-09-03 04:09:46 +00:00
|
|
|
/**
|
|
|
|
* Returns the {@link String} representation of this pipeline.
|
|
|
|
*/
|
|
|
|
@Override
|
|
|
|
public String toString() {
|
|
|
|
StringBuilder buf = new StringBuilder();
|
2013-11-04 19:42:33 +09:00
|
|
|
buf.append(StringUtil.simpleClassName(this));
|
2008-09-03 04:09:46 +00:00
|
|
|
buf.append('{');
|
2012-06-03 18:51:42 -07:00
|
|
|
DefaultChannelHandlerContext ctx = head.next;
|
2008-09-03 04:09:46 +00:00
|
|
|
for (;;) {
|
2013-01-09 19:16:09 +09:00
|
|
|
if (ctx == tail) {
|
2012-08-28 13:03:41 +09:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-09-03 04:09:46 +00:00
|
|
|
buf.append('(');
|
2012-05-01 17:19:41 +09:00
|
|
|
buf.append(ctx.name());
|
2008-09-03 04:09:46 +00:00
|
|
|
buf.append(" = ");
|
2012-05-01 17:19:41 +09:00
|
|
|
buf.append(ctx.handler().getClass().getName());
|
2008-09-03 04:09:46 +00:00
|
|
|
buf.append(')');
|
2012-08-28 13:03:41 +09:00
|
|
|
|
2008-09-03 04:09:46 +00:00
|
|
|
ctx = ctx.next;
|
2013-01-09 19:13:43 +09:00
|
|
|
if (ctx == tail) {
|
2008-09-03 04:09:46 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-08-28 13:03:41 +09:00
|
|
|
|
2008-09-03 04:09:46 +00:00
|
|
|
buf.append(", ");
|
|
|
|
}
|
|
|
|
buf.append('}');
|
|
|
|
return buf.toString();
|
|
|
|
}
|
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelPipeline fireChannelRegistered() {
|
2012-06-08 23:11:15 +09:00
|
|
|
head.fireChannelRegistered();
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
|
|
|
|
2013-07-31 20:45:37 +09:00
|
|
|
/**
|
|
|
|
* Removes all handlers from the pipeline one by one from tail (exclusive) to head (inclusive) to trigger
|
|
|
|
* handlerRemoved(). Note that the tail handler is excluded because it's neither an outbound handler nor it
|
|
|
|
* does anything in handlerRemoved().
|
|
|
|
*/
|
|
|
|
private void teardownAll() {
|
|
|
|
tail.prev.teardown();
|
|
|
|
}
|
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelPipeline fireChannelActive() {
|
2012-06-08 23:11:15 +09:00
|
|
|
head.fireChannelActive();
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
|
|
|
|
if (channel.config().isAutoRead()) {
|
|
|
|
channel.read();
|
|
|
|
}
|
|
|
|
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
2012-06-01 17:51:19 -07:00
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelPipeline fireChannelInactive() {
|
2012-06-08 23:11:15 +09:00
|
|
|
head.fireChannelInactive();
|
2013-08-23 14:52:52 -04:00
|
|
|
teardownAll();
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelPipeline fireExceptionCaught(Throwable cause) {
|
2012-06-08 23:11:15 +09:00
|
|
|
head.fireExceptionCaught(cause);
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
2008-08-08 00:37:18 +00:00
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelPipeline fireUserEventTriggered(Object event) {
|
2012-06-08 23:11:15 +09:00
|
|
|
head.fireUserEventTriggered(event);
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
2010-11-12 09:45:39 +09:00
|
|
|
@Override
|
2013-07-09 23:09:28 +09:00
|
|
|
public ChannelPipeline fireChannelRead(Object msg) {
|
|
|
|
head.fireChannelRead(msg);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-09 23:09:28 +09:00
|
|
|
public ChannelPipeline fireChannelReadComplete() {
|
|
|
|
head.fireChannelReadComplete();
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
if (channel.config().isAutoRead()) {
|
2013-01-15 16:23:09 +09:00
|
|
|
read();
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
}
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
@Override
|
|
|
|
public ChannelPipeline fireChannelWritabilityChanged() {
|
|
|
|
head.fireChannelWritabilityChanged();
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
@Override
|
2012-05-09 22:09:06 +09:00
|
|
|
public ChannelFuture bind(SocketAddress localAddress) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.bind(localAddress);
|
2012-05-09 22:09:06 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.connect(remoteAddress);
|
2012-05-09 22:09:06 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.connect(remoteAddress, localAddress);
|
2012-05-09 22:09:06 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture disconnect() {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.disconnect();
|
2012-05-09 22:09:06 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture close() {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.close();
|
2012-05-09 22:09:06 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public ChannelPipeline flush() {
|
|
|
|
tail.flush();
|
|
|
|
return this;
|
2012-05-09 22:09:06 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.bind(localAddress, promise);
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.connect(remoteAddress, promise);
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.connect(remoteAddress, localAddress, promise);
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture disconnect(ChannelPromise promise) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.disconnect(promise);
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture close(ChannelPromise promise) {
|
2013-01-15 16:23:09 +09:00
|
|
|
return tail.close(promise);
|
2012-05-01 17:19:41 +09:00
|
|
|
}
|
2008-08-08 00:37:18 +00:00
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
public ChannelPipeline read() {
|
2013-01-15 16:23:09 +09:00
|
|
|
tail.read();
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public ChannelFuture write(Object msg) {
|
|
|
|
return tail.write(msg);
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public ChannelFuture write(Object msg, ChannelPromise promise) {
|
|
|
|
return tail.write(msg, promise);
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
}
|
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) {
|
|
|
|
return tail.writeAndFlush(msg, promise);
|
2012-06-01 18:34:19 -07:00
|
|
|
}
|
|
|
|
|
2012-05-01 17:19:41 +09:00
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
public ChannelFuture writeAndFlush(Object msg) {
|
|
|
|
return tail.writeAndFlush(msg);
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private void checkDuplicateName(String name) {
|
|
|
|
if (name2ctx.containsKey(name)) {
|
2011-11-23 14:07:26 +09:00
|
|
|
throw new IllegalArgumentException("Duplicate handler name: " + name);
|
2008-08-08 00:37:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private DefaultChannelHandlerContext getContextOrDie(String name) {
|
2012-05-01 17:19:41 +09:00
|
|
|
DefaultChannelHandlerContext ctx = (DefaultChannelHandlerContext) context(name);
|
2013-01-09 19:16:09 +09:00
|
|
|
if (ctx == null) {
|
2008-08-08 00:37:18 +00:00
|
|
|
throw new NoSuchElementException(name);
|
|
|
|
} else {
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private DefaultChannelHandlerContext getContextOrDie(ChannelHandler handler) {
|
2012-05-01 17:19:41 +09:00
|
|
|
DefaultChannelHandlerContext ctx = (DefaultChannelHandlerContext) context(handler);
|
2013-01-09 19:16:09 +09:00
|
|
|
if (ctx == null) {
|
2008-08-08 00:37:18 +00:00
|
|
|
throw new NoSuchElementException(handler.getClass().getName());
|
|
|
|
} else {
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private DefaultChannelHandlerContext getContextOrDie(Class<? extends ChannelHandler> handlerType) {
|
2012-05-01 17:19:41 +09:00
|
|
|
DefaultChannelHandlerContext ctx = (DefaultChannelHandlerContext) context(handlerType);
|
2013-01-09 19:16:09 +09:00
|
|
|
if (ctx == null) {
|
2008-08-08 00:37:18 +00:00
|
|
|
throw new NoSuchElementException(handlerType.getName());
|
|
|
|
} else {
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
}
|
2012-06-03 18:51:42 -07:00
|
|
|
|
2013-02-06 12:55:42 +09:00
|
|
|
// A special catch-all handler that handles both bytes and messages.
|
2013-11-22 19:34:27 +09:00
|
|
|
static final class TailHandler extends ChannelHandlerAdapter {
|
2013-02-06 12:55:42 +09:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelRegistered(ChannelHandlerContext ctx) throws Exception { }
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelActive(ChannelHandlerContext ctx) throws Exception { }
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelInactive(ChannelHandlerContext ctx) throws Exception { }
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
@Override
|
|
|
|
public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception { }
|
|
|
|
|
2013-02-06 12:55:42 +09:00
|
|
|
@Override
|
|
|
|
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { }
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
|
|
|
logger.warn(
|
2013-03-21 20:22:25 +09:00
|
|
|
"An exceptionCaught() event was fired, and it reached at the tail of the pipeline. " +
|
2013-02-06 12:55:42 +09:00
|
|
|
"It usually means the last handler in the pipeline did not handle the exception.", cause);
|
2013-01-07 08:44:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-09 23:09:28 +09:00
|
|
|
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
try {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
logger.debug(
|
|
|
|
"Discarded inbound message {} that reached at the tail of the pipeline. " +
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
"Please check your pipeline configuration.", msg);
|
|
|
|
} finally {
|
|
|
|
ReferenceCountUtil.release(msg);
|
2013-06-13 13:23:52 +09:00
|
|
|
}
|
2013-01-07 08:44:16 +01:00
|
|
|
}
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
|
|
|
|
@Override
|
2013-07-09 23:09:28 +09:00
|
|
|
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { }
|
2013-01-07 08:44:16 +01:00
|
|
|
}
|
|
|
|
|
2013-11-22 19:34:27 +09:00
|
|
|
static final class HeadHandler extends ChannelHandlerAdapter {
|
2013-02-06 12:55:42 +09:00
|
|
|
|
2013-02-07 23:58:21 +09:00
|
|
|
protected final Unsafe unsafe;
|
2013-02-06 12:55:42 +09:00
|
|
|
|
2013-02-07 23:58:21 +09:00
|
|
|
protected HeadHandler(Unsafe unsafe) {
|
|
|
|
this.unsafe = unsafe;
|
|
|
|
}
|
|
|
|
|
2012-06-03 18:51:42 -07:00
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
public void bind(
|
2012-12-30 17:40:24 +01:00
|
|
|
ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise)
|
2012-06-03 18:51:42 -07:00
|
|
|
throws Exception {
|
2012-12-30 17:40:24 +01:00
|
|
|
unsafe.bind(localAddress, promise);
|
2012-06-03 18:51:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
public void connect(
|
2012-06-07 14:52:33 +09:00
|
|
|
ChannelHandlerContext ctx,
|
2012-06-03 18:51:42 -07:00
|
|
|
SocketAddress remoteAddress, SocketAddress localAddress,
|
2012-12-30 17:40:24 +01:00
|
|
|
ChannelPromise promise) throws Exception {
|
|
|
|
unsafe.connect(remoteAddress, localAddress, promise);
|
2012-06-03 18:51:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
|
2012-12-30 17:40:24 +01:00
|
|
|
unsafe.disconnect(promise);
|
2012-06-03 18:51:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
|
2012-12-30 17:40:24 +01:00
|
|
|
unsafe.close(promise);
|
2012-06-03 18:51:42 -07:00
|
|
|
}
|
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 20:40:19 +09:00
|
|
|
public void read(ChannelHandlerContext ctx) {
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 21:53:59 +09:00
|
|
|
unsafe.beginRead();
|
|
|
|
}
|
|
|
|
|
2012-10-24 18:27:26 +02:00
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
|
|
|
|
unsafe.write(msg, promise);
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 19:03:40 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public void flush(ChannelHandlerContext ctx) throws Exception {
|
2013-07-12 18:45:24 +02:00
|
|
|
unsafe.flush();
|
2013-01-05 15:04:25 +09:00
|
|
|
}
|
|
|
|
}
|
2012-06-08 10:57:38 +09:00
|
|
|
}
|