2012-06-04 22:31:44 +02:00
|
|
|
/*
|
|
|
|
* Copyright 2012 The Netty Project
|
|
|
|
*
|
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
2013-07-12 13:41:29 +02:00
|
|
|
*
|
2012-06-04 22:31:44 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
2012-06-02 02:51:19 +02:00
|
|
|
package io.netty.channel;
|
|
|
|
|
2012-11-15 22:04:37 +01:00
|
|
|
import io.netty.buffer.ByteBufAllocator;
|
2014-01-20 07:16:12 +01:00
|
|
|
import io.netty.util.Attribute;
|
|
|
|
import io.netty.util.AttributeKey;
|
2013-08-04 14:14:37 +02:00
|
|
|
import io.netty.util.Recycler;
|
2013-11-07 10:24:15 +01:00
|
|
|
import io.netty.util.ReferenceCountUtil;
|
2014-01-29 03:44:59 +01:00
|
|
|
import io.netty.util.ResourceLeakHint;
|
2013-03-05 21:41:19 +01:00
|
|
|
import io.netty.util.concurrent.EventExecutor;
|
|
|
|
import io.netty.util.concurrent.EventExecutorGroup;
|
2013-07-19 02:37:56 +02:00
|
|
|
import io.netty.util.internal.StringUtil;
|
2012-06-02 02:51:19 +02:00
|
|
|
|
|
|
|
import java.net.SocketAddress;
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
import static io.netty.channel.DefaultChannelPipeline.*;
|
|
|
|
|
2014-01-29 03:44:59 +01:00
|
|
|
final class DefaultChannelHandlerContext implements ChannelHandlerContext, ResourceLeakHint {
|
2012-06-07 07:52:33 +02:00
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
volatile DefaultChannelHandlerContext next;
|
|
|
|
volatile DefaultChannelHandlerContext prev;
|
2012-11-15 22:04:37 +01:00
|
|
|
|
2013-08-30 06:31:58 +02:00
|
|
|
private final boolean inbound;
|
|
|
|
private final boolean outbound;
|
2013-08-05 14:58:16 +02:00
|
|
|
private final AbstractChannel channel;
|
2012-06-02 02:51:19 +02:00
|
|
|
private final DefaultChannelPipeline pipeline;
|
|
|
|
private final String name;
|
|
|
|
private final ChannelHandler handler;
|
2013-06-10 11:14:41 +02:00
|
|
|
private boolean removed;
|
2012-06-07 07:52:33 +02:00
|
|
|
|
2012-12-13 10:38:44 +01:00
|
|
|
// Will be set to null if no child executor should be used, otherwise it will be set to the
|
|
|
|
// child executor.
|
|
|
|
final EventExecutor executor;
|
2013-03-05 21:41:19 +01:00
|
|
|
private ChannelFuture succeededFuture;
|
2012-11-15 22:04:37 +01:00
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
// Lazily instantiated tasks used to trigger events to a handler with different executor.
|
2014-02-07 22:03:05 +01:00
|
|
|
// These needs to be volatile as otherwise an other Thread may see an half initialized instance.
|
|
|
|
// See the JMM for more details
|
|
|
|
private volatile Runnable invokeChannelReadCompleteTask;
|
|
|
|
private volatile Runnable invokeReadTask;
|
|
|
|
private volatile Runnable invokeChannelWritableStateChangedTask;
|
|
|
|
private volatile Runnable invokeFlushTask;
|
2012-11-15 22:04:37 +01:00
|
|
|
|
2013-08-21 16:12:58 +02:00
|
|
|
DefaultChannelHandlerContext(DefaultChannelPipeline pipeline, EventExecutorGroup group, String name,
|
|
|
|
ChannelHandler handler) {
|
2012-06-02 02:51:19 +02:00
|
|
|
|
|
|
|
if (name == null) {
|
|
|
|
throw new NullPointerException("name");
|
|
|
|
}
|
|
|
|
if (handler == null) {
|
|
|
|
throw new NullPointerException("handler");
|
|
|
|
}
|
|
|
|
|
2012-06-02 03:34:19 +02:00
|
|
|
channel = pipeline.channel;
|
2012-06-02 02:51:19 +02:00
|
|
|
this.pipeline = pipeline;
|
|
|
|
this.name = name;
|
|
|
|
this.handler = handler;
|
|
|
|
|
2012-08-10 13:17:18 +02:00
|
|
|
if (group != null) {
|
2012-06-02 02:51:19 +02:00
|
|
|
// Pin one of the child executors once and remember it so that the same child executor
|
|
|
|
// is used to fire events for the same channel.
|
2012-08-10 13:17:18 +02:00
|
|
|
EventExecutor childExecutor = pipeline.childExecutors.get(group);
|
2012-06-02 02:51:19 +02:00
|
|
|
if (childExecutor == null) {
|
2012-08-10 13:17:18 +02:00
|
|
|
childExecutor = group.next();
|
|
|
|
pipeline.childExecutors.put(group, childExecutor);
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2012-08-10 13:17:18 +02:00
|
|
|
executor = childExecutor;
|
2012-06-02 02:51:19 +02:00
|
|
|
} else {
|
2012-08-10 13:17:18 +02:00
|
|
|
executor = null;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2013-08-30 06:31:58 +02:00
|
|
|
|
|
|
|
inbound = handler instanceof ChannelInboundHandler;
|
|
|
|
outbound = handler instanceof ChannelOutboundHandler;
|
2013-01-24 18:58:05 +01:00
|
|
|
}
|
|
|
|
|
2013-07-31 13:45:37 +02:00
|
|
|
/** Invocation initiated by {@link DefaultChannelPipeline#teardownAll()}}. */
|
|
|
|
void teardown() {
|
2013-05-16 12:32:39 +02:00
|
|
|
EventExecutor executor = executor();
|
|
|
|
if (executor.inEventLoop()) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
teardown0();
|
2013-05-16 12:32:39 +02:00
|
|
|
} else {
|
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
teardown0();
|
2013-05-16 12:32:39 +02:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
private void teardown0() {
|
2013-05-16 12:32:39 +02:00
|
|
|
DefaultChannelHandlerContext prev = this.prev;
|
|
|
|
if (prev != null) {
|
|
|
|
synchronized (pipeline) {
|
2013-06-14 03:47:31 +02:00
|
|
|
pipeline.remove0(this);
|
2013-05-16 12:32:39 +02:00
|
|
|
}
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
prev.teardown();
|
2013-05-16 12:32:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
|
|
|
public Channel channel() {
|
2012-06-02 03:34:19 +02:00
|
|
|
return channel;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelPipeline pipeline() {
|
|
|
|
return pipeline;
|
|
|
|
}
|
|
|
|
|
2012-11-15 22:04:37 +01:00
|
|
|
@Override
|
|
|
|
public ByteBufAllocator alloc() {
|
2013-01-15 08:23:09 +01:00
|
|
|
return channel().config().getAllocator();
|
2012-11-15 22:04:37 +01:00
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
|
|
|
public EventExecutor executor() {
|
|
|
|
if (executor == null) {
|
2013-01-15 08:23:09 +01:00
|
|
|
return channel().eventLoop();
|
2012-06-02 02:51:19 +02:00
|
|
|
} else {
|
|
|
|
return executor;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelHandler handler() {
|
|
|
|
return handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String name() {
|
|
|
|
return name;
|
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
@Override
|
|
|
|
public <T> Attribute<T> attr(AttributeKey<T> key) {
|
|
|
|
return channel.attr(key);
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelHandlerContext fireChannelRegistered() {
|
2013-01-15 08:23:09 +01:00
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
2013-02-06 04:55:42 +01:00
|
|
|
EventExecutor executor = next.executor();
|
|
|
|
if (executor.inEventLoop()) {
|
|
|
|
next.invokeChannelRegistered();
|
|
|
|
} else {
|
2013-02-22 00:58:15 +01:00
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
next.invokeChannelRegistered();
|
|
|
|
}
|
|
|
|
});
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
private void invokeChannelRegistered() {
|
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).channelRegistered(this);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-03-05 04:13:14 +01:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelHandlerContext fireChannelUnregistered() {
|
2013-01-15 08:23:09 +01:00
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
2013-02-06 04:55:42 +01:00
|
|
|
EventExecutor executor = next.executor();
|
2013-04-22 12:40:23 +02:00
|
|
|
if (executor.inEventLoop()) {
|
2013-02-06 04:55:42 +01:00
|
|
|
next.invokeChannelUnregistered();
|
|
|
|
} else {
|
2013-02-22 00:58:15 +01:00
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
next.invokeChannelUnregistered();
|
|
|
|
}
|
|
|
|
});
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
@SuppressWarnings("deprecation")
|
2013-01-15 08:23:09 +01:00
|
|
|
private void invokeChannelUnregistered() {
|
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).channelUnregistered(this);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-03-05 04:13:14 +01:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelHandlerContext fireChannelActive() {
|
2013-01-15 08:23:09 +01:00
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
2013-02-06 04:55:42 +01:00
|
|
|
EventExecutor executor = next.executor();
|
|
|
|
if (executor.inEventLoop()) {
|
|
|
|
next.invokeChannelActive();
|
|
|
|
} else {
|
2013-02-22 00:58:15 +01:00
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
next.invokeChannelActive();
|
|
|
|
}
|
|
|
|
});
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
private void invokeChannelActive() {
|
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).channelActive(this);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-03-05 04:13:14 +01:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelHandlerContext fireChannelInactive() {
|
2013-01-15 08:23:09 +01:00
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
2013-02-06 04:55:42 +01:00
|
|
|
EventExecutor executor = next.executor();
|
2013-04-22 12:40:23 +02:00
|
|
|
if (executor.inEventLoop()) {
|
2013-02-06 04:55:42 +01:00
|
|
|
next.invokeChannelInactive();
|
|
|
|
} else {
|
2013-02-22 00:58:15 +01:00
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
next.invokeChannelInactive();
|
|
|
|
}
|
|
|
|
});
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
private void invokeChannelInactive() {
|
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).channelInactive(this);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-03-05 04:13:14 +01:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelHandlerContext fireExceptionCaught(final Throwable cause) {
|
2012-06-08 16:11:15 +02:00
|
|
|
if (cause == null) {
|
|
|
|
throw new NullPointerException("cause");
|
|
|
|
}
|
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = this.next;
|
2013-03-05 04:13:14 +01:00
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
EventExecutor executor = next.executor();
|
2013-04-22 12:40:23 +02:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeExceptionCaught(cause);
|
2013-02-06 04:55:42 +01:00
|
|
|
} else {
|
|
|
|
try {
|
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeExceptionCaught(cause);
|
2012-06-08 16:11:15 +02:00
|
|
|
}
|
2013-02-06 04:55:42 +01:00
|
|
|
});
|
|
|
|
} catch (Throwable t) {
|
|
|
|
if (logger.isWarnEnabled()) {
|
|
|
|
logger.warn("Failed to submit an exceptionCaught() event.", t);
|
|
|
|
logger.warn("The exceptionCaught() event that was failed to submit was:", cause);
|
2012-06-12 10:25:27 +02:00
|
|
|
}
|
2012-06-08 16:11:15 +02:00
|
|
|
}
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2013-07-31 09:32:16 +02:00
|
|
|
|
|
|
|
return this;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
@SuppressWarnings("deprecation")
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeExceptionCaught(final Throwable cause) {
|
2012-12-17 18:58:36 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
handler.exceptionCaught(this, cause);
|
2012-12-17 18:58:36 +01:00
|
|
|
} catch (Throwable t) {
|
|
|
|
if (logger.isWarnEnabled()) {
|
|
|
|
logger.warn(
|
|
|
|
"An exception was thrown by a user handler's " +
|
|
|
|
"exceptionCaught() method while handling the following exception:", cause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2013-02-11 09:44:04 +01:00
|
|
|
public ChannelHandlerContext fireUserEventTriggered(final Object event) {
|
2012-06-08 16:11:15 +02:00
|
|
|
if (event == null) {
|
|
|
|
throw new NullPointerException("event");
|
|
|
|
}
|
|
|
|
|
2013-03-01 11:45:48 +01:00
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
2013-02-06 04:55:42 +01:00
|
|
|
EventExecutor executor = next.executor();
|
|
|
|
if (executor.inEventLoop()) {
|
|
|
|
next.invokeUserEventTriggered(event);
|
|
|
|
} else {
|
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
next.invokeUserEventTriggered(event);
|
|
|
|
}
|
|
|
|
});
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
private void invokeUserEventTriggered(Object event) {
|
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).userEventTriggered(this, event);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-03-05 04:13:14 +01:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2013-07-09 16:09:28 +02:00
|
|
|
public ChannelHandlerContext fireChannelRead(final Object msg) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
if (msg == null) {
|
|
|
|
throw new NullPointerException("msg");
|
|
|
|
}
|
|
|
|
|
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
2014-01-29 03:44:59 +01:00
|
|
|
ReferenceCountUtil.touch(msg, next);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
EventExecutor executor = next.executor();
|
2012-06-04 09:24:34 +02:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-09 16:09:28 +02:00
|
|
|
next.invokeChannelRead(msg);
|
2012-06-04 09:24:34 +02:00
|
|
|
} else {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
executor.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-09 16:09:28 +02:00
|
|
|
next.invokeChannelRead(msg);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
}
|
|
|
|
});
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
2013-07-09 16:09:28 +02:00
|
|
|
private void invokeChannelRead(Object msg) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).channelRead(this, msg);
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
notifyHandlerException(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-09 16:09:28 +02:00
|
|
|
public ChannelHandlerContext fireChannelReadComplete() {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
|
|
|
EventExecutor executor = next.executor();
|
|
|
|
if (executor.inEventLoop()) {
|
2013-07-09 16:09:28 +02:00
|
|
|
next.invokeChannelReadComplete();
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
} else {
|
2013-07-09 16:09:28 +02:00
|
|
|
Runnable task = next.invokeChannelReadCompleteTask;
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
if (task == null) {
|
2013-07-09 16:09:28 +02:00
|
|
|
next.invokeChannelReadCompleteTask = task = new Runnable() {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-09 16:09:28 +02:00
|
|
|
next.invokeChannelReadComplete();
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
executor.execute(task);
|
|
|
|
}
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
2013-07-09 16:09:28 +02:00
|
|
|
private void invokeChannelReadComplete() {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).channelReadComplete(this);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
notifyHandlerException(t);
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public ChannelHandlerContext fireChannelWritabilityChanged() {
|
2013-01-15 08:23:09 +01:00
|
|
|
final DefaultChannelHandlerContext next = findContextInbound();
|
2013-02-06 04:55:42 +01:00
|
|
|
EventExecutor executor = next.executor();
|
2013-02-08 17:05:33 +01:00
|
|
|
if (executor.inEventLoop()) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
next.invokeChannelWritabilityChanged();
|
2013-02-06 04:55:42 +01:00
|
|
|
} else {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
Runnable task = next.invokeChannelWritableStateChangedTask;
|
2013-02-06 04:55:42 +01:00
|
|
|
if (task == null) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
next.invokeChannelWritableStateChangedTask = task = new Runnable() {
|
2013-02-06 04:55:42 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-09 16:09:28 +02:00
|
|
|
next.invokeChannelWritabilityChanged();
|
2013-02-06 04:55:42 +01:00
|
|
|
}
|
|
|
|
};
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
}
|
2013-02-06 04:55:42 +01:00
|
|
|
executor.execute(task);
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
}
|
2013-02-11 09:44:04 +01:00
|
|
|
return this;
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
}
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
private void invokeChannelWritabilityChanged() {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelInboundHandler) handler).channelWritabilityChanged(this);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-03-05 04:13:14 +01:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
|
|
|
public ChannelFuture bind(SocketAddress localAddress) {
|
2012-12-30 17:40:24 +01:00
|
|
|
return bind(localAddress, newPromise());
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress) {
|
2012-12-30 17:40:24 +01:00
|
|
|
return connect(remoteAddress, newPromise());
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
|
2012-12-30 17:40:24 +01:00
|
|
|
return connect(remoteAddress, localAddress, newPromise());
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture disconnect() {
|
2012-12-30 17:40:24 +01:00
|
|
|
return disconnect(newPromise());
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture close() {
|
2012-12-30 17:40:24 +01:00
|
|
|
return close(newPromise());
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture deregister() {
|
2012-12-30 17:40:24 +01:00
|
|
|
return deregister(newPromise());
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-31 09:32:16 +02:00
|
|
|
public ChannelFuture bind(final SocketAddress localAddress, final ChannelPromise promise) {
|
2013-01-15 08:23:09 +01:00
|
|
|
if (localAddress == null) {
|
|
|
|
throw new NullPointerException("localAddress");
|
|
|
|
}
|
2013-07-02 02:36:01 +02:00
|
|
|
validatePromise(promise, false);
|
2013-01-15 08:23:09 +01:00
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextOutbound();
|
|
|
|
EventExecutor executor = next.executor();
|
2013-01-15 08:23:09 +01:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeBind(localAddress, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} else {
|
2013-10-24 15:10:32 +02:00
|
|
|
safeExecute(executor, new Runnable() {
|
2013-01-15 08:23:09 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeBind(localAddress, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-11-07 10:24:15 +01:00
|
|
|
}, promise, null);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-07-31 09:32:16 +02:00
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeBind(SocketAddress localAddress, ChannelPromise promise) {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).bind(this, localAddress, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-06-13 08:18:11 +02:00
|
|
|
notifyOutboundHandlerException(t, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) {
|
|
|
|
return connect(remoteAddress, null, promise);
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-31 09:32:16 +02:00
|
|
|
public ChannelFuture connect(
|
|
|
|
final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise) {
|
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
if (remoteAddress == null) {
|
|
|
|
throw new NullPointerException("remoteAddress");
|
|
|
|
}
|
2013-07-02 02:36:01 +02:00
|
|
|
validatePromise(promise, false);
|
2013-01-15 08:23:09 +01:00
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextOutbound();
|
|
|
|
EventExecutor executor = next.executor();
|
2013-01-15 08:23:09 +01:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeConnect(remoteAddress, localAddress, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} else {
|
2013-10-24 15:10:32 +02:00
|
|
|
safeExecute(executor, new Runnable() {
|
2013-01-15 08:23:09 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeConnect(remoteAddress, localAddress, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-11-07 10:24:15 +01:00
|
|
|
}, promise, null);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeConnect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).connect(this, remoteAddress, localAddress, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-06-13 08:18:11 +02:00
|
|
|
notifyOutboundHandlerException(t, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-31 09:32:16 +02:00
|
|
|
public ChannelFuture disconnect(final ChannelPromise promise) {
|
2013-07-02 02:36:01 +02:00
|
|
|
validatePromise(promise, false);
|
2013-01-15 08:23:09 +01:00
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextOutbound();
|
|
|
|
EventExecutor executor = next.executor();
|
2013-01-15 08:23:09 +01:00
|
|
|
if (executor.inEventLoop()) {
|
2013-08-09 22:24:14 +02:00
|
|
|
// Translate disconnect to close if the channel has no notion of disconnect-reconnect.
|
|
|
|
// So far, UDP/IP is the only transport that has such behavior.
|
|
|
|
if (!channel().metadata().hasDisconnect()) {
|
|
|
|
next.invokeClose(promise);
|
|
|
|
} else {
|
|
|
|
next.invokeDisconnect(promise);
|
|
|
|
}
|
2013-01-15 08:23:09 +01:00
|
|
|
} else {
|
2013-10-24 15:10:32 +02:00
|
|
|
safeExecute(executor, new Runnable() {
|
2013-01-15 08:23:09 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-08-09 22:24:14 +02:00
|
|
|
if (!channel().metadata().hasDisconnect()) {
|
|
|
|
next.invokeClose(promise);
|
|
|
|
} else {
|
|
|
|
next.invokeDisconnect(promise);
|
|
|
|
}
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-11-07 10:24:15 +01:00
|
|
|
}, promise, null);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeDisconnect(ChannelPromise promise) {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).disconnect(this, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-06-13 08:18:11 +02:00
|
|
|
notifyOutboundHandlerException(t, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-31 09:32:16 +02:00
|
|
|
public ChannelFuture close(final ChannelPromise promise) {
|
2013-07-02 02:36:01 +02:00
|
|
|
validatePromise(promise, false);
|
2013-01-15 08:23:09 +01:00
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextOutbound();
|
|
|
|
EventExecutor executor = next.executor();
|
2013-01-15 08:23:09 +01:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeClose(promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} else {
|
2013-10-24 15:10:32 +02:00
|
|
|
safeExecute(executor, new Runnable() {
|
2013-01-15 08:23:09 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeClose(promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-11-07 10:24:15 +01:00
|
|
|
}, promise, null);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeClose(ChannelPromise promise) {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).close(this, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-06-13 08:18:11 +02:00
|
|
|
notifyOutboundHandlerException(t, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-31 09:32:16 +02:00
|
|
|
public ChannelFuture deregister(final ChannelPromise promise) {
|
2013-07-02 02:36:01 +02:00
|
|
|
validatePromise(promise, false);
|
2013-01-15 08:23:09 +01:00
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextOutbound();
|
|
|
|
EventExecutor executor = next.executor();
|
2013-01-15 08:23:09 +01:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeDeregister(promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} else {
|
2013-10-24 15:10:32 +02:00
|
|
|
safeExecute(executor, new Runnable() {
|
2013-01-15 08:23:09 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeDeregister(promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-11-07 10:24:15 +01:00
|
|
|
}, promise, null);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
@SuppressWarnings("deprecation")
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeDeregister(ChannelPromise promise) {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).deregister(this, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-06-13 08:18:11 +02:00
|
|
|
notifyOutboundHandlerException(t, promise);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
public ChannelHandlerContext read() {
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextOutbound();
|
|
|
|
EventExecutor executor = next.executor();
|
2013-01-15 08:23:09 +01:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeRead();
|
2013-01-15 08:23:09 +01:00
|
|
|
} else {
|
2013-07-31 09:32:16 +02:00
|
|
|
Runnable task = next.invokeReadTask;
|
2013-01-15 08:23:09 +01:00
|
|
|
if (task == null) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeReadTask = task = new Runnable() {
|
2013-01-15 08:23:09 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeRead();
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
executor.execute(task);
|
|
|
|
}
|
2013-07-31 09:32:16 +02:00
|
|
|
|
|
|
|
return this;
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeRead() {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).read(this);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-03-05 04:13:14 +01:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public ChannelFuture write(Object msg) {
|
|
|
|
return write(msg, newPromise());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-31 09:32:16 +02:00
|
|
|
public ChannelFuture write(final Object msg, final ChannelPromise promise) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
if (msg == null) {
|
|
|
|
throw new NullPointerException("msg");
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-08-21 16:12:58 +02:00
|
|
|
|
2013-07-10 13:00:42 +02:00
|
|
|
validatePromise(promise, true);
|
2013-01-15 08:23:09 +01:00
|
|
|
|
2013-08-21 16:12:58 +02:00
|
|
|
write(msg, false, promise);
|
2013-07-31 09:32:16 +02:00
|
|
|
|
|
|
|
return promise;
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
}
|
2013-07-02 02:36:01 +02:00
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeWrite(Object msg, ChannelPromise promise) {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).write(this, msg, promise);
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
} catch (Throwable t) {
|
2013-07-10 13:00:42 +02:00
|
|
|
notifyOutboundHandlerException(t, promise);
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public ChannelHandlerContext flush() {
|
2013-07-31 09:32:16 +02:00
|
|
|
final DefaultChannelHandlerContext next = findContextOutbound();
|
|
|
|
EventExecutor executor = next.executor();
|
2012-06-04 09:24:34 +02:00
|
|
|
if (executor.inEventLoop()) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeFlush();
|
2012-06-04 09:24:34 +02:00
|
|
|
} else {
|
2013-07-31 09:32:16 +02:00
|
|
|
Runnable task = next.invokeFlushTask;
|
2013-07-12 11:57:33 +02:00
|
|
|
if (task == null) {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeFlushTask = task = new Runnable() {
|
2013-07-12 11:57:33 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2013-07-31 09:32:16 +02:00
|
|
|
next.invokeFlush();
|
2013-07-12 11:57:33 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2013-11-07 10:24:15 +01:00
|
|
|
safeExecute(executor, task, channel.voidPromise(), null);
|
2012-06-04 09:24:34 +02:00
|
|
|
}
|
2013-07-31 09:32:16 +02:00
|
|
|
|
|
|
|
return this;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2013-07-31 09:32:16 +02:00
|
|
|
private void invokeFlush() {
|
2013-01-15 08:23:09 +01:00
|
|
|
try {
|
2013-08-01 09:54:07 +02:00
|
|
|
((ChannelOutboundHandler) handler).flush(this);
|
2013-01-15 08:23:09 +01:00
|
|
|
} catch (Throwable t) {
|
2013-07-10 13:00:42 +02:00
|
|
|
notifyHandlerException(t);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
@Override
|
|
|
|
public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) {
|
2013-08-07 20:15:10 +02:00
|
|
|
if (msg == null) {
|
|
|
|
throw new NullPointerException("msg");
|
|
|
|
}
|
2013-08-21 16:12:58 +02:00
|
|
|
|
2013-08-07 20:15:10 +02:00
|
|
|
validatePromise(promise, true);
|
|
|
|
|
2013-08-21 16:12:58 +02:00
|
|
|
write(msg, true, promise);
|
2013-08-07 20:15:10 +02:00
|
|
|
|
|
|
|
return promise;
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
}
|
|
|
|
|
2013-08-21 16:12:58 +02:00
|
|
|
private void write(Object msg, boolean flush, ChannelPromise promise) {
|
|
|
|
|
|
|
|
DefaultChannelHandlerContext next = findContextOutbound();
|
2014-01-29 03:44:59 +01:00
|
|
|
ReferenceCountUtil.touch(msg, next);
|
2013-08-21 16:12:58 +02:00
|
|
|
EventExecutor executor = next.executor();
|
|
|
|
if (executor.inEventLoop()) {
|
|
|
|
next.invokeWrite(msg, promise);
|
|
|
|
if (flush) {
|
|
|
|
next.invokeFlush();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int size = channel.estimatorHandle().size(msg);
|
|
|
|
if (size > 0) {
|
|
|
|
ChannelOutboundBuffer buffer = channel.unsafe().outboundBuffer();
|
|
|
|
// Check for null as it may be set to null if the channel is closed already
|
|
|
|
if (buffer != null) {
|
2013-09-30 17:33:12 +02:00
|
|
|
buffer.incrementPendingOutboundBytes(size);
|
2013-08-21 16:12:58 +02:00
|
|
|
}
|
2013-08-07 20:28:33 +02:00
|
|
|
}
|
2014-01-30 20:02:15 +01:00
|
|
|
Runnable task;
|
|
|
|
if (flush) {
|
|
|
|
task = WriteAndFlushTask.newInstance(next, msg, size, promise);
|
|
|
|
} else {
|
|
|
|
task = WriteTask.newInstance(next, msg, size, promise);
|
|
|
|
}
|
|
|
|
safeExecute(executor, task, promise, msg);
|
2013-08-07 20:28:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
@Override
|
|
|
|
public ChannelFuture writeAndFlush(Object msg) {
|
2013-07-10 13:00:42 +02:00
|
|
|
return writeAndFlush(msg, newPromise());
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
}
|
|
|
|
|
2013-06-13 08:18:11 +02:00
|
|
|
private static void notifyOutboundHandlerException(Throwable cause, ChannelPromise promise) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
// only try to fail the promise if its not a VoidChannelPromise, as
|
|
|
|
// the VoidChannelPromise would also fire the cause through the pipeline
|
2013-06-13 08:18:11 +02:00
|
|
|
if (promise instanceof VoidChannelPromise) {
|
2013-01-15 08:23:09 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-13 08:18:11 +02:00
|
|
|
if (!promise.tryFailure(cause)) {
|
|
|
|
if (logger.isWarnEnabled()) {
|
|
|
|
logger.warn("Failed to fail the promise because it's done already: {}", promise, cause);
|
|
|
|
}
|
|
|
|
}
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
2013-03-05 04:13:14 +01:00
|
|
|
private void notifyHandlerException(Throwable cause) {
|
|
|
|
if (inExceptionCaught(cause)) {
|
|
|
|
if (logger.isWarnEnabled()) {
|
|
|
|
logger.warn(
|
|
|
|
"An exception was thrown by a user handler " +
|
|
|
|
"while handling an exceptionCaught event", cause);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-22 12:40:23 +02:00
|
|
|
invokeExceptionCaught(cause);
|
2013-03-05 04:13:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private static boolean inExceptionCaught(Throwable cause) {
|
|
|
|
do {
|
|
|
|
StackTraceElement[] trace = cause.getStackTrace();
|
|
|
|
if (trace != null) {
|
|
|
|
for (StackTraceElement t : trace) {
|
|
|
|
if (t == null) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ("exceptionCaught".equals(t.getMethodName())) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cause = cause.getCause();
|
|
|
|
} while (cause != null);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelPromise newPromise() {
|
2013-03-05 21:41:19 +01:00
|
|
|
return new DefaultChannelPromise(channel(), executor());
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
2013-04-13 14:38:16 +02:00
|
|
|
@Override
|
2013-04-15 13:11:02 +02:00
|
|
|
public ChannelProgressivePromise newProgressivePromise() {
|
|
|
|
return new DefaultChannelProgressivePromise(channel(), executor());
|
2013-04-13 14:38:16 +02:00
|
|
|
}
|
|
|
|
|
2012-06-02 02:51:19 +02:00
|
|
|
@Override
|
|
|
|
public ChannelFuture newSucceededFuture() {
|
2013-03-05 21:41:19 +01:00
|
|
|
ChannelFuture succeededFuture = this.succeededFuture;
|
|
|
|
if (succeededFuture == null) {
|
|
|
|
this.succeededFuture = succeededFuture = new SucceededChannelFuture(channel(), executor());
|
|
|
|
}
|
|
|
|
return succeededFuture;
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture newFailedFuture(Throwable cause) {
|
2013-03-05 21:41:19 +01:00
|
|
|
return new FailedChannelFuture(channel(), executor(), cause);
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
|
|
|
|
2013-07-23 06:47:43 +02:00
|
|
|
private void validatePromise(ChannelPromise promise, boolean allowVoidPromise) {
|
2013-07-02 02:36:01 +02:00
|
|
|
if (promise == null) {
|
|
|
|
throw new NullPointerException("promise");
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-07-22 11:28:02 +02:00
|
|
|
|
|
|
|
if (promise.isDone()) {
|
|
|
|
throw new IllegalArgumentException("promise already done: " + promise);
|
|
|
|
}
|
|
|
|
|
2013-07-02 02:36:01 +02:00
|
|
|
if (promise.channel() != channel()) {
|
2013-01-15 08:23:09 +01:00
|
|
|
throw new IllegalArgumentException(String.format(
|
2013-07-02 02:36:01 +02:00
|
|
|
"promise.channel does not match: %s (expected: %s)", promise.channel(), channel()));
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2013-07-23 06:47:43 +02:00
|
|
|
|
|
|
|
if (promise.getClass() == DefaultChannelPromise.class) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!allowVoidPromise && promise instanceof VoidChannelPromise) {
|
2013-07-19 02:37:56 +02:00
|
|
|
throw new IllegalArgumentException(
|
|
|
|
StringUtil.simpleClassName(VoidChannelPromise.class) + " not allowed for this operation");
|
2013-05-15 15:10:41 +02:00
|
|
|
}
|
2013-07-23 06:47:43 +02:00
|
|
|
|
2013-07-02 02:36:01 +02:00
|
|
|
if (promise instanceof AbstractChannel.CloseFuture) {
|
2013-07-19 02:37:56 +02:00
|
|
|
throw new IllegalArgumentException(
|
|
|
|
StringUtil.simpleClassName(AbstractChannel.CloseFuture.class) + " not allowed in a pipeline");
|
2013-01-15 08:23:09 +01:00
|
|
|
}
|
2012-06-02 02:51:19 +02:00
|
|
|
}
|
2012-06-04 09:24:34 +02:00
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
private DefaultChannelHandlerContext findContextInbound() {
|
2013-03-08 14:05:40 +01:00
|
|
|
DefaultChannelHandlerContext ctx = this;
|
|
|
|
do {
|
|
|
|
ctx = ctx.next;
|
2013-08-30 06:31:58 +02:00
|
|
|
} while (!ctx.inbound);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
return ctx;
|
2013-03-08 14:05:40 +01:00
|
|
|
}
|
|
|
|
|
2013-01-15 08:23:09 +01:00
|
|
|
private DefaultChannelHandlerContext findContextOutbound() {
|
|
|
|
DefaultChannelHandlerContext ctx = this;
|
|
|
|
do {
|
|
|
|
ctx = ctx.prev;
|
2013-08-30 06:31:58 +02:00
|
|
|
} while (!ctx.outbound);
|
2013-01-15 08:23:09 +01:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
2013-05-15 15:10:41 +02:00
|
|
|
@Override
|
|
|
|
public ChannelPromise voidPromise() {
|
|
|
|
return channel.voidPromise();
|
|
|
|
}
|
2013-06-10 11:14:41 +02:00
|
|
|
|
|
|
|
void setRemoved() {
|
|
|
|
removed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean isRemoved() {
|
|
|
|
return removed;
|
|
|
|
}
|
2013-08-04 14:14:37 +02:00
|
|
|
|
2013-11-07 10:24:15 +01:00
|
|
|
private static void safeExecute(EventExecutor executor, Runnable runnable, ChannelPromise promise, Object msg) {
|
2013-10-24 15:10:32 +02:00
|
|
|
try {
|
|
|
|
executor.execute(runnable);
|
|
|
|
} catch (Throwable cause) {
|
2013-11-07 10:24:15 +01:00
|
|
|
try {
|
|
|
|
promise.setFailure(cause);
|
|
|
|
} finally {
|
|
|
|
if (msg != null) {
|
|
|
|
ReferenceCountUtil.release(msg);
|
|
|
|
}
|
|
|
|
}
|
2013-10-24 15:10:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
abstract static class AbstractWriteTask<T extends AbstractWriteTask<T>> implements Runnable {
|
|
|
|
private final Recycler.Handle<T> handle;
|
2014-01-30 20:02:15 +01:00
|
|
|
|
2013-08-04 14:14:37 +02:00
|
|
|
private DefaultChannelHandlerContext ctx;
|
|
|
|
private Object msg;
|
|
|
|
private ChannelPromise promise;
|
2013-08-05 14:58:16 +02:00
|
|
|
private int size;
|
2013-08-04 14:14:37 +02:00
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
private AbstractWriteTask(Recycler.Handle<T> handle) {
|
2014-01-30 20:02:15 +01:00
|
|
|
this.handle = handle;
|
|
|
|
}
|
2013-08-04 14:14:37 +02:00
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
protected static void init(AbstractWriteTask<?> task, DefaultChannelHandlerContext ctx,
|
2014-01-30 20:02:15 +01:00
|
|
|
Object msg, int size, ChannelPromise promise) {
|
2013-08-04 14:14:37 +02:00
|
|
|
task.ctx = ctx;
|
|
|
|
task.msg = msg;
|
|
|
|
task.promise = promise;
|
2013-08-05 14:58:16 +02:00
|
|
|
task.size = size;
|
2013-08-04 14:14:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2014-01-30 20:02:15 +01:00
|
|
|
public final void run() {
|
2013-08-04 14:14:37 +02:00
|
|
|
try {
|
2013-08-05 14:58:16 +02:00
|
|
|
if (size > 0) {
|
|
|
|
ChannelOutboundBuffer buffer = ctx.channel.unsafe().outboundBuffer();
|
|
|
|
// Check for null as it may be set to null if the channel is closed already
|
|
|
|
if (buffer != null) {
|
2013-09-30 17:33:12 +02:00
|
|
|
buffer.decrementPendingOutboundBytes(size);
|
2013-08-05 14:58:16 +02:00
|
|
|
}
|
|
|
|
}
|
2014-01-30 20:02:15 +01:00
|
|
|
write(ctx, msg, promise);
|
2013-08-04 14:14:37 +02:00
|
|
|
} finally {
|
|
|
|
// Set to null so the GC can collect them directly
|
|
|
|
ctx = null;
|
|
|
|
msg = null;
|
|
|
|
promise = null;
|
2014-01-30 20:02:15 +01:00
|
|
|
recycle(handle);
|
|
|
|
}
|
|
|
|
}
|
2013-08-04 14:14:37 +02:00
|
|
|
|
2014-01-30 20:02:15 +01:00
|
|
|
protected void write(DefaultChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
|
|
|
|
ctx.invokeWrite(msg, promise);
|
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
protected abstract void recycle(Recycler.Handle<T> handle);
|
2014-01-30 20:02:15 +01:00
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
static final class WriteTask
|
|
|
|
extends AbstractWriteTask<WriteTask> implements SingleThreadEventLoop.NonWakeupRunnable {
|
2014-01-30 20:02:15 +01:00
|
|
|
|
|
|
|
private static final Recycler<WriteTask> RECYCLER = new Recycler<WriteTask>() {
|
|
|
|
@Override
|
2014-01-20 07:16:12 +01:00
|
|
|
protected WriteTask newObject(Handle<WriteTask> handle) {
|
2014-01-30 20:02:15 +01:00
|
|
|
return new WriteTask(handle);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
private static WriteTask newInstance(
|
|
|
|
DefaultChannelHandlerContext ctx, Object msg, int size, ChannelPromise promise) {
|
|
|
|
WriteTask task = RECYCLER.get();
|
|
|
|
init(task, ctx, msg, size, promise);
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
private WriteTask(Recycler.Handle<WriteTask> handle) {
|
2014-01-30 20:02:15 +01:00
|
|
|
super(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2014-01-20 07:16:12 +01:00
|
|
|
protected void recycle(Recycler.Handle<WriteTask> handle) {
|
2014-01-30 20:02:15 +01:00
|
|
|
RECYCLER.recycle(this, handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
static final class WriteAndFlushTask extends AbstractWriteTask<WriteAndFlushTask> {
|
2014-01-30 20:02:15 +01:00
|
|
|
|
|
|
|
private static final Recycler<WriteAndFlushTask> RECYCLER = new Recycler<WriteAndFlushTask>() {
|
|
|
|
@Override
|
2014-01-20 07:16:12 +01:00
|
|
|
protected WriteAndFlushTask newObject(Handle<WriteAndFlushTask> handle) {
|
2014-01-30 20:02:15 +01:00
|
|
|
return new WriteAndFlushTask(handle);
|
2013-08-04 14:14:37 +02:00
|
|
|
}
|
2014-01-30 20:02:15 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
private static WriteAndFlushTask newInstance(
|
|
|
|
DefaultChannelHandlerContext ctx, Object msg, int size, ChannelPromise promise) {
|
|
|
|
WriteAndFlushTask task = RECYCLER.get();
|
|
|
|
init(task, ctx, msg, size, promise);
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2014-01-20 07:16:12 +01:00
|
|
|
private WriteAndFlushTask(Recycler.Handle<WriteAndFlushTask> handle) {
|
2014-01-30 20:02:15 +01:00
|
|
|
super(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void write(DefaultChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
|
|
|
|
super.write(ctx, msg, promise);
|
|
|
|
ctx.invokeFlush();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2014-01-20 07:16:12 +01:00
|
|
|
protected void recycle(Recycler.Handle<WriteAndFlushTask> handle) {
|
2014-01-30 20:02:15 +01:00
|
|
|
RECYCLER.recycle(this, handle);
|
2013-08-04 14:14:37 +02:00
|
|
|
}
|
|
|
|
}
|
2014-01-29 03:44:59 +01:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public String toHintString() {
|
|
|
|
return '\'' + name + "' will handle the message from this point.";
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String toString() {
|
|
|
|
return StringUtil.simpleClassName(ChannelHandlerContext.class) + '(' + name + ", " + channel + ')';
|
|
|
|
}
|
2012-06-04 22:43:02 +02:00
|
|
|
}
|