2008-08-08 02:37:18 +02:00
|
|
|
/*
|
2012-06-04 22:31:44 +02:00
|
|
|
* Copyright 2012 The Netty Project
|
2008-08-08 02:37:18 +02:00
|
|
|
*
|
2011-12-09 06:18:34 +01:00
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
2008-08-08 02:37:18 +02:00
|
|
|
*
|
2012-06-04 22:31:44 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2008-08-08 03:27:24 +02:00
|
|
|
*
|
2009-08-28 09:15:49 +02:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
2011-12-09 06:18:34 +01:00
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
2009-08-28 09:15:49 +02:00
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2008-08-08 02:37:18 +02:00
|
|
|
*/
|
2011-12-09 04:38:59 +01:00
|
|
|
package io.netty.channel;
|
2008-08-08 02:37:18 +02:00
|
|
|
|
2012-11-15 22:04:37 +01:00
|
|
|
import io.netty.buffer.ByteBufAllocator;
|
2012-05-01 10:19:41 +02:00
|
|
|
import io.netty.util.DefaultAttributeMap;
|
2013-07-22 10:59:06 +02:00
|
|
|
import io.netty.util.ReferenceCountUtil;
|
2013-07-17 14:14:07 +02:00
|
|
|
import io.netty.util.internal.EmptyArrays;
|
2014-02-26 12:00:04 +01:00
|
|
|
import io.netty.util.internal.OneTimeTask;
|
2013-01-11 06:03:27 +01:00
|
|
|
import io.netty.util.internal.PlatformDependent;
|
2013-02-26 23:54:25 +01:00
|
|
|
import io.netty.util.internal.logging.InternalLogger;
|
|
|
|
import io.netty.util.internal.logging.InternalLoggerFactory;
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2012-10-24 18:27:26 +02:00
|
|
|
import java.io.EOFException;
|
2012-05-09 16:42:01 +02:00
|
|
|
import java.io.IOException;
|
2012-08-30 08:50:55 +02:00
|
|
|
import java.net.InetSocketAddress;
|
2008-08-08 02:37:18 +02:00
|
|
|
import java.net.SocketAddress;
|
2012-05-09 15:09:06 +02:00
|
|
|
import java.nio.channels.ClosedChannelException;
|
2013-06-14 04:15:46 +02:00
|
|
|
import java.nio.channels.NotYetConnectedException;
|
2013-10-15 15:21:42 +02:00
|
|
|
import java.util.concurrent.RejectedExecutionException;
|
2008-08-08 02:37:18 +02:00
|
|
|
|
|
|
|
/**
|
2008-09-02 09:13:20 +02:00
|
|
|
* A skeletal {@link Channel} implementation.
|
2008-08-08 02:37:18 +02:00
|
|
|
*/
|
2012-05-01 10:19:41 +02:00
|
|
|
public abstract class AbstractChannel extends DefaultAttributeMap implements Channel {
|
|
|
|
|
|
|
|
private static final InternalLogger logger = InternalLoggerFactory.getInstance(AbstractChannel.class);
|
2008-08-08 02:37:18 +02:00
|
|
|
|
2013-07-17 14:14:07 +02:00
|
|
|
static final ClosedChannelException CLOSED_CHANNEL_EXCEPTION = new ClosedChannelException();
|
|
|
|
static final NotYetConnectedException NOT_YET_CONNECTED_EXCEPTION = new NotYetConnectedException();
|
|
|
|
|
|
|
|
static {
|
|
|
|
CLOSED_CHANNEL_EXCEPTION.setStackTrace(EmptyArrays.EMPTY_STACK_TRACE);
|
|
|
|
NOT_YET_CONNECTED_EXCEPTION.setStackTrace(EmptyArrays.EMPTY_STACK_TRACE);
|
|
|
|
}
|
|
|
|
|
2013-08-05 14:58:16 +02:00
|
|
|
private MessageSizeEstimator.Handle estimatorHandle;
|
|
|
|
|
2008-08-08 02:37:18 +02:00
|
|
|
private final Channel parent;
|
2013-11-15 14:50:53 +01:00
|
|
|
private final ChannelId id = DefaultChannelId.newInstance();
|
2012-05-01 10:19:41 +02:00
|
|
|
private final Unsafe unsafe;
|
2012-06-04 03:51:42 +02:00
|
|
|
private final DefaultChannelPipeline pipeline;
|
2013-03-05 21:41:19 +01:00
|
|
|
private final ChannelFuture succeededFuture = new SucceededChannelFuture(this, null);
|
2013-05-25 14:35:22 +02:00
|
|
|
private final VoidChannelPromise voidPromise = new VoidChannelPromise(this, true);
|
|
|
|
private final VoidChannelPromise unsafeVoidPromise = new VoidChannelPromise(this, false);
|
2012-05-12 18:37:16 +02:00
|
|
|
private final CloseFuture closeFuture = new CloseFuture(this);
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2012-05-16 16:02:06 +02:00
|
|
|
private volatile SocketAddress localAddress;
|
|
|
|
private volatile SocketAddress remoteAddress;
|
2012-05-01 10:19:41 +02:00
|
|
|
private volatile EventLoop eventLoop;
|
2012-05-02 14:05:53 +02:00
|
|
|
private volatile boolean registered;
|
2012-05-09 15:09:06 +02:00
|
|
|
|
2008-08-08 02:37:18 +02:00
|
|
|
/** Cache for the string representation of this channel */
|
2012-05-01 10:19:41 +02:00
|
|
|
private boolean strValActive;
|
2008-08-08 02:37:18 +02:00
|
|
|
private String strVal;
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2008-09-02 09:13:20 +02:00
|
|
|
/**
|
|
|
|
* Creates a new instance.
|
|
|
|
*
|
|
|
|
* @param parent
|
|
|
|
* the parent of this channel. {@code null} if there's no parent.
|
|
|
|
*/
|
2013-07-09 07:49:06 +02:00
|
|
|
protected AbstractChannel(Channel parent) {
|
2012-05-01 10:19:41 +02:00
|
|
|
this.parent = parent;
|
2012-05-27 07:48:48 +02:00
|
|
|
unsafe = newUnsafe();
|
2012-06-04 03:51:42 +02:00
|
|
|
pipeline = new DefaultChannelPipeline(this);
|
2010-05-19 08:29:43 +02:00
|
|
|
}
|
|
|
|
|
2013-11-15 14:50:53 +01:00
|
|
|
@Override
|
|
|
|
public final ChannelId id() {
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
@Override
|
|
|
|
public boolean isWritable() {
|
2013-07-18 16:33:39 +02:00
|
|
|
ChannelOutboundBuffer buf = unsafe.outboundBuffer();
|
|
|
|
return buf != null && buf.getWritable();
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public Channel parent() {
|
2008-08-08 02:37:18 +02:00
|
|
|
return parent;
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public ChannelPipeline pipeline() {
|
2008-08-08 02:37:18 +02:00
|
|
|
return pipeline;
|
|
|
|
}
|
|
|
|
|
2012-11-15 22:04:37 +01:00
|
|
|
@Override
|
|
|
|
public ByteBufAllocator alloc() {
|
|
|
|
return config().getAllocator();
|
|
|
|
}
|
|
|
|
|
2008-08-08 02:37:18 +02:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public EventLoop eventLoop() {
|
2012-06-02 03:34:19 +02:00
|
|
|
EventLoop eventLoop = this.eventLoop;
|
2012-05-09 15:09:06 +02:00
|
|
|
if (eventLoop == null) {
|
|
|
|
throw new IllegalStateException("channel not registered to an event loop");
|
|
|
|
}
|
2012-05-01 10:19:41 +02:00
|
|
|
return eventLoop;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-16 16:02:06 +02:00
|
|
|
@Override
|
|
|
|
public SocketAddress localAddress() {
|
|
|
|
SocketAddress localAddress = this.localAddress;
|
|
|
|
if (localAddress == null) {
|
|
|
|
try {
|
|
|
|
this.localAddress = localAddress = unsafe().localAddress();
|
|
|
|
} catch (Throwable t) {
|
|
|
|
// Sometimes fails on a closed socket in Windows.
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return localAddress;
|
|
|
|
}
|
|
|
|
|
|
|
|
protected void invalidateLocalAddress() {
|
|
|
|
localAddress = null;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public SocketAddress remoteAddress() {
|
|
|
|
SocketAddress remoteAddress = this.remoteAddress;
|
|
|
|
if (remoteAddress == null) {
|
|
|
|
try {
|
|
|
|
this.remoteAddress = remoteAddress = unsafe().remoteAddress();
|
|
|
|
} catch (Throwable t) {
|
|
|
|
// Sometimes fails on a closed socket in Windows.
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return remoteAddress;
|
|
|
|
}
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Reset the stored remoteAddress
|
|
|
|
*/
|
2012-05-16 16:02:06 +02:00
|
|
|
protected void invalidateRemoteAddress() {
|
|
|
|
remoteAddress = null;
|
|
|
|
}
|
|
|
|
|
2012-05-02 14:05:53 +02:00
|
|
|
@Override
|
|
|
|
public boolean isRegistered() {
|
|
|
|
return registered;
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture bind(SocketAddress localAddress) {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.bind(localAddress);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress) {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.connect(remoteAddress);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.connect(remoteAddress, localAddress);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture disconnect() {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.disconnect();
|
2008-10-02 06:40:46 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture close() {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.close();
|
2008-11-26 10:21:00 +01:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture deregister() {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.deregister();
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public Channel flush() {
|
|
|
|
pipeline.flush();
|
|
|
|
return this;
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
return pipeline.bind(localAddress, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) {
|
|
|
|
return pipeline.connect(remoteAddress, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
return pipeline.connect(remoteAddress, localAddress, promise);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture disconnect(ChannelPromise promise) {
|
|
|
|
return pipeline.disconnect(promise);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture close(ChannelPromise promise) {
|
|
|
|
return pipeline.close(promise);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-09 15:09:06 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture deregister(ChannelPromise promise) {
|
|
|
|
return pipeline.deregister(promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
2012-05-01 10:48:06 +02:00
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
public Channel read() {
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
pipeline.read();
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public ChannelFuture write(Object msg) {
|
|
|
|
return pipeline.write(msg);
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
}
|
|
|
|
|
2012-05-09 15:09:06 +02:00
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public ChannelFuture write(Object msg, ChannelPromise promise) {
|
|
|
|
return pipeline.write(msg, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
public ChannelFuture writeAndFlush(Object msg) {
|
|
|
|
return pipeline.writeAndFlush(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) {
|
|
|
|
return pipeline.writeAndFlush(msg, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
2012-05-01 10:48:06 +02:00
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelPromise newPromise() {
|
|
|
|
return new DefaultChannelPromise(this);
|
2012-05-01 10:48:06 +02:00
|
|
|
}
|
|
|
|
|
2013-04-13 14:38:16 +02:00
|
|
|
@Override
|
2013-04-15 13:11:02 +02:00
|
|
|
public ChannelProgressivePromise newProgressivePromise() {
|
|
|
|
return new DefaultChannelProgressivePromise(this);
|
2013-04-13 14:38:16 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:48:06 +02:00
|
|
|
@Override
|
|
|
|
public ChannelFuture newSucceededFuture() {
|
|
|
|
return succeededFuture;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture newFailedFuture(Throwable cause) {
|
2013-03-05 21:41:19 +01:00
|
|
|
return new FailedChannelFuture(this, null, cause);
|
2012-05-01 10:48:06 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-12 18:37:16 +02:00
|
|
|
public ChannelFuture closeFuture() {
|
|
|
|
return closeFuture;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public Unsafe unsafe() {
|
|
|
|
return unsafe;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Create a new {@link AbstractUnsafe} instance which will be used for the life-time of the {@link Channel}
|
|
|
|
*/
|
|
|
|
protected abstract AbstractUnsafe newUnsafe();
|
2012-05-27 07:48:48 +02:00
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
/**
|
2012-08-25 05:36:34 +02:00
|
|
|
* Returns the ID of this channel.
|
2012-05-01 10:19:41 +02:00
|
|
|
*/
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public final int hashCode() {
|
2013-11-15 14:50:53 +01:00
|
|
|
return id.hashCode();
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
/**
|
|
|
|
* Returns {@code true} if and only if the specified object is identical
|
|
|
|
* with this channel (i.e: {@code this == o}).
|
|
|
|
*/
|
2011-12-07 07:54:15 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public final boolean equals(Object o) {
|
|
|
|
return this == o;
|
2011-12-07 07:54:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public final int compareTo(Channel o) {
|
2013-07-09 07:49:06 +02:00
|
|
|
if (this == o) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-15 14:50:53 +01:00
|
|
|
return id().compareTo(o.id());
|
2011-12-07 07:54:15 +01:00
|
|
|
}
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2008-09-02 09:13:20 +02:00
|
|
|
/**
|
|
|
|
* Returns the {@link String} representation of this channel. The returned
|
2013-07-08 13:31:59 +02:00
|
|
|
* string contains the {@linkplain #hashCode()} ID}, {@linkplain #localAddress() local address},
|
2012-05-01 10:19:41 +02:00
|
|
|
* and {@linkplain #remoteAddress() remote address} of this channel for
|
2008-09-02 09:13:20 +02:00
|
|
|
* easier identification.
|
|
|
|
*/
|
2008-08-08 02:37:18 +02:00
|
|
|
@Override
|
|
|
|
public String toString() {
|
2012-05-01 10:19:41 +02:00
|
|
|
boolean active = isActive();
|
|
|
|
if (strValActive == active && strVal != null) {
|
2008-08-08 02:37:18 +02:00
|
|
|
return strVal;
|
|
|
|
}
|
|
|
|
|
2012-05-11 03:47:45 +02:00
|
|
|
SocketAddress remoteAddr = remoteAddress();
|
|
|
|
SocketAddress localAddr = localAddress();
|
|
|
|
if (remoteAddr != null) {
|
|
|
|
SocketAddress srcAddr;
|
|
|
|
SocketAddress dstAddr;
|
|
|
|
if (parent == null) {
|
|
|
|
srcAddr = localAddr;
|
|
|
|
dstAddr = remoteAddr;
|
2008-08-08 02:37:18 +02:00
|
|
|
} else {
|
2012-05-11 03:47:45 +02:00
|
|
|
srcAddr = remoteAddr;
|
|
|
|
dstAddr = localAddr;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
2013-11-15 14:50:53 +01:00
|
|
|
|
|
|
|
StringBuilder buf = new StringBuilder(96);
|
|
|
|
buf.append("[id: 0x");
|
|
|
|
buf.append(id.asShortText());
|
|
|
|
buf.append(", ");
|
|
|
|
buf.append(srcAddr);
|
|
|
|
buf.append(active? " => " : " :> ");
|
|
|
|
buf.append(dstAddr);
|
|
|
|
buf.append(']');
|
|
|
|
strVal = buf.toString();
|
2012-05-11 03:47:45 +02:00
|
|
|
} else if (localAddr != null) {
|
2013-11-15 14:50:53 +01:00
|
|
|
StringBuilder buf = new StringBuilder(64);
|
|
|
|
buf.append("[id: 0x");
|
|
|
|
buf.append(id.asShortText());
|
|
|
|
buf.append(", ");
|
|
|
|
buf.append(localAddr);
|
|
|
|
buf.append(']');
|
|
|
|
strVal = buf.toString();
|
2012-05-11 03:47:45 +02:00
|
|
|
} else {
|
2013-11-15 14:50:53 +01:00
|
|
|
StringBuilder buf = new StringBuilder(16);
|
|
|
|
buf.append("[id: 0x");
|
|
|
|
buf.append(id.asShortText());
|
|
|
|
buf.append(']');
|
|
|
|
strVal = buf.toString();
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
strValActive = active;
|
2009-02-09 09:00:26 +01:00
|
|
|
return strVal;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
2009-02-16 15:32:40 +01:00
|
|
|
|
2013-05-15 15:10:41 +02:00
|
|
|
@Override
|
|
|
|
public final ChannelPromise voidPromise() {
|
|
|
|
return voidPromise;
|
|
|
|
}
|
|
|
|
|
2013-08-05 14:58:16 +02:00
|
|
|
final MessageSizeEstimator.Handle estimatorHandle() {
|
|
|
|
if (estimatorHandle == null) {
|
|
|
|
estimatorHandle = config().getMessageSizeEstimator().newHandle();
|
|
|
|
}
|
|
|
|
return estimatorHandle;
|
|
|
|
}
|
|
|
|
|
2013-05-15 15:10:41 +02:00
|
|
|
/**
|
|
|
|
* {@link Unsafe} implementation which sub-classes must extend and use.
|
|
|
|
*/
|
|
|
|
protected abstract class AbstractUnsafe implements Unsafe {
|
|
|
|
|
2014-02-18 10:08:20 +01:00
|
|
|
private ChannelOutboundBuffer outboundBuffer = newOutboundBuffer();
|
|
|
|
|
2013-07-18 16:33:39 +02:00
|
|
|
private boolean inFlush0;
|
|
|
|
|
2014-03-24 10:09:27 +01:00
|
|
|
@Override
|
|
|
|
public final ChannelHandlerInvoker invoker() {
|
|
|
|
return eventLoop().asInvoker();
|
|
|
|
}
|
|
|
|
|
2013-07-18 16:33:39 +02:00
|
|
|
@Override
|
|
|
|
public final ChannelOutboundBuffer outboundBuffer() {
|
|
|
|
return outboundBuffer;
|
|
|
|
}
|
|
|
|
|
2012-05-02 08:01:58 +02:00
|
|
|
@Override
|
2012-05-30 12:58:14 +02:00
|
|
|
public final SocketAddress localAddress() {
|
2012-05-02 08:01:58 +02:00
|
|
|
return localAddress0();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-05-30 12:58:14 +02:00
|
|
|
public final SocketAddress remoteAddress() {
|
2012-05-02 08:01:58 +02:00
|
|
|
return remoteAddress0();
|
|
|
|
}
|
|
|
|
|
2012-05-01 16:18:29 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void register(EventLoop eventLoop, final ChannelPromise promise) {
|
2012-05-01 16:18:29 +02:00
|
|
|
if (eventLoop == null) {
|
|
|
|
throw new NullPointerException("eventLoop");
|
|
|
|
}
|
2012-05-14 16:57:23 +02:00
|
|
|
if (isRegistered()) {
|
2013-03-21 09:48:10 +01:00
|
|
|
promise.setFailure(new IllegalStateException("registered to an event loop already"));
|
|
|
|
return;
|
2012-05-01 16:18:29 +02:00
|
|
|
}
|
2012-05-12 17:40:28 +02:00
|
|
|
if (!isCompatible(eventLoop)) {
|
2013-03-21 11:02:34 +01:00
|
|
|
promise.setFailure(
|
|
|
|
new IllegalStateException("incompatible event loop type: " + eventLoop.getClass().getName()));
|
|
|
|
return;
|
2012-05-12 17:40:28 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 16:18:29 +02:00
|
|
|
AbstractChannel.this.eventLoop = eventLoop;
|
|
|
|
|
2012-11-07 19:06:20 +01:00
|
|
|
if (eventLoop.inEventLoop()) {
|
2012-12-30 17:40:24 +01:00
|
|
|
register0(promise);
|
2012-11-07 19:06:20 +01:00
|
|
|
} else {
|
2013-03-21 09:48:10 +01:00
|
|
|
try {
|
2014-02-26 12:00:04 +01:00
|
|
|
eventLoop.execute(new OneTimeTask() {
|
2013-03-21 09:48:10 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
register0(promise);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
} catch (Throwable t) {
|
|
|
|
logger.warn(
|
2013-07-28 22:05:34 +02:00
|
|
|
"Force-closing a channel whose registration task was not accepted by an event loop: {}",
|
2013-03-21 09:48:10 +01:00
|
|
|
AbstractChannel.this, t);
|
|
|
|
closeForcibly();
|
2013-07-26 14:10:31 +02:00
|
|
|
closeFuture.setClosed();
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, t);
|
2013-03-21 09:48:10 +01:00
|
|
|
}
|
2012-11-07 19:06:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-30 17:40:24 +01:00
|
|
|
private void register0(ChannelPromise promise) {
|
2012-05-09 15:09:06 +02:00
|
|
|
try {
|
2013-02-08 11:31:48 +01:00
|
|
|
// check if the channel is still open as it could be closed in the mean time when the register
|
|
|
|
// call was outside of the eventLoop
|
2014-02-10 23:04:47 +01:00
|
|
|
if (!promise.setUncancellable() || !ensureOpen(promise)) {
|
2013-02-08 11:31:48 +01:00
|
|
|
return;
|
|
|
|
}
|
2013-07-27 20:08:45 +02:00
|
|
|
doRegister();
|
2012-05-09 15:09:06 +02:00
|
|
|
registered = true;
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetSuccess(promise);
|
2012-06-03 13:25:03 +02:00
|
|
|
pipeline.fireChannelRegistered();
|
2012-05-10 16:19:59 +02:00
|
|
|
if (isActive()) {
|
2012-06-03 13:25:03 +02:00
|
|
|
pipeline.fireChannelActive();
|
2012-05-10 16:19:59 +02:00
|
|
|
}
|
2012-05-09 15:09:06 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
// Close the channel directly to avoid FD leak.
|
2013-03-21 11:19:14 +01:00
|
|
|
closeForcibly();
|
2013-07-26 14:10:31 +02:00
|
|
|
closeFuture.setClosed();
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, t);
|
2012-05-02 14:05:53 +02:00
|
|
|
}
|
2012-05-01 16:18:29 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void bind(final SocketAddress localAddress, final ChannelPromise promise) {
|
2014-02-10 23:04:47 +01:00
|
|
|
if (!promise.setUncancellable() || !ensureOpen(promise)) {
|
2013-05-17 12:20:46 +02:00
|
|
|
return;
|
|
|
|
}
|
2012-05-09 15:09:06 +02:00
|
|
|
|
2013-07-28 22:05:34 +02:00
|
|
|
// See: https://github.com/netty/netty/issues/576
|
|
|
|
if (!PlatformDependent.isWindows() && !PlatformDependent.isRoot() &&
|
|
|
|
Boolean.TRUE.equals(config().getOption(ChannelOption.SO_BROADCAST)) &&
|
|
|
|
localAddress instanceof InetSocketAddress &&
|
|
|
|
!((InetSocketAddress) localAddress).getAddress().isAnyLocalAddress()) {
|
|
|
|
// Warn a user about the fact that a non-root user can't receive a
|
|
|
|
// broadcast packet on *nix if the socket is bound on non-wildcard address.
|
|
|
|
logger.warn(
|
|
|
|
"A non-root user can't receive a broadcast packet if the socket " +
|
|
|
|
"is not bound to a wildcard address; binding to a non-wildcard " +
|
|
|
|
"address (" + localAddress + ") anyway as requested.");
|
|
|
|
}
|
2013-05-17 12:20:46 +02:00
|
|
|
|
2013-07-28 22:05:34 +02:00
|
|
|
boolean wasActive = isActive();
|
|
|
|
try {
|
2013-05-17 12:20:46 +02:00
|
|
|
doBind(localAddress);
|
|
|
|
} catch (Throwable t) {
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, t);
|
2013-10-08 05:24:16 +02:00
|
|
|
closeIfClosed();
|
2013-07-28 22:05:34 +02:00
|
|
|
return;
|
|
|
|
}
|
2013-10-15 15:21:42 +02:00
|
|
|
|
2013-07-28 22:05:34 +02:00
|
|
|
if (!wasActive && isActive()) {
|
2014-02-26 12:00:04 +01:00
|
|
|
invokeLater(new OneTimeTask() {
|
2013-07-28 22:05:34 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireChannelActive();
|
|
|
|
}
|
|
|
|
});
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
2014-02-10 23:52:24 +01:00
|
|
|
|
|
|
|
safeSetSuccess(promise);
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void disconnect(final ChannelPromise promise) {
|
2014-02-10 23:04:47 +01:00
|
|
|
if (!promise.setUncancellable()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-28 22:05:34 +02:00
|
|
|
boolean wasActive = isActive();
|
2013-05-17 12:20:46 +02:00
|
|
|
try {
|
|
|
|
doDisconnect();
|
|
|
|
} catch (Throwable t) {
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, t);
|
2013-10-08 05:24:16 +02:00
|
|
|
closeIfClosed();
|
2013-07-28 22:05:34 +02:00
|
|
|
return;
|
|
|
|
}
|
2014-02-10 23:52:24 +01:00
|
|
|
|
2013-07-28 22:05:34 +02:00
|
|
|
if (wasActive && !isActive()) {
|
2014-02-26 12:00:04 +01:00
|
|
|
invokeLater(new OneTimeTask() {
|
2013-07-28 22:05:34 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireChannelInactive();
|
|
|
|
}
|
|
|
|
});
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
2014-02-10 23:52:24 +01:00
|
|
|
|
|
|
|
safeSetSuccess(promise);
|
2013-10-08 05:24:16 +02:00
|
|
|
closeIfClosed(); // doDisconnect() might have closed the channel
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void close(final ChannelPromise promise) {
|
2014-02-10 23:04:47 +01:00
|
|
|
if (!promise.setUncancellable()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-18 03:14:55 +02:00
|
|
|
if (inFlush0) {
|
2014-02-26 12:00:04 +01:00
|
|
|
invokeLater(new OneTimeTask() {
|
2013-07-18 03:14:55 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
close(promise);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-26 14:21:11 +02:00
|
|
|
if (closeFuture.isDone()) {
|
|
|
|
// Closed already.
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetSuccess(promise);
|
2013-07-26 14:21:11 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-26 08:21:40 +02:00
|
|
|
boolean wasActive = isActive();
|
2013-07-26 14:21:11 +02:00
|
|
|
ChannelOutboundBuffer outboundBuffer = this.outboundBuffer;
|
|
|
|
this.outboundBuffer = null; // Disallow adding any messages and flushes to outboundBuffer.
|
2012-05-12 17:40:28 +02:00
|
|
|
|
2013-07-26 14:21:11 +02:00
|
|
|
try {
|
|
|
|
doClose();
|
|
|
|
closeFuture.setClosed();
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetSuccess(promise);
|
2013-07-26 14:21:11 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
closeFuture.setClosed();
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, t);
|
2013-07-26 14:21:11 +02:00
|
|
|
}
|
2013-07-18 18:09:19 +02:00
|
|
|
|
2013-07-26 14:21:11 +02:00
|
|
|
// Fail all the queued messages
|
|
|
|
try {
|
|
|
|
outboundBuffer.failFlushed(CLOSED_CHANNEL_EXCEPTION);
|
|
|
|
outboundBuffer.close(CLOSED_CHANNEL_EXCEPTION);
|
|
|
|
} finally {
|
2013-07-26 08:21:40 +02:00
|
|
|
|
2013-07-26 14:21:11 +02:00
|
|
|
if (wasActive && !isActive()) {
|
2014-02-26 12:00:04 +01:00
|
|
|
invokeLater(new OneTimeTask() {
|
2013-07-26 14:21:11 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireChannelInactive();
|
|
|
|
}
|
|
|
|
});
|
2013-07-26 11:23:11 +02:00
|
|
|
}
|
2013-07-26 14:21:11 +02:00
|
|
|
|
|
|
|
deregister(voidPromise());
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-22 05:05:00 +02:00
|
|
|
@Override
|
|
|
|
public final void closeForcibly() {
|
|
|
|
try {
|
|
|
|
doClose();
|
|
|
|
} catch (Exception e) {
|
|
|
|
logger.warn("Failed to close a channel.", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void deregister(final ChannelPromise promise) {
|
2014-02-10 23:04:47 +01:00
|
|
|
if (!promise.setUncancellable()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
if (!registered) {
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetSuccess(promise);
|
2013-05-17 12:20:46 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
2013-07-26 18:13:56 +02:00
|
|
|
doDeregister();
|
2013-05-17 12:20:46 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
logger.warn("Unexpected exception occurred while deregistering a channel.", t);
|
|
|
|
} finally {
|
|
|
|
if (registered) {
|
|
|
|
registered = false;
|
2014-02-26 12:00:04 +01:00
|
|
|
invokeLater(new OneTimeTask() {
|
2013-05-17 12:20:46 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireChannelUnregistered();
|
|
|
|
}
|
|
|
|
});
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetSuccess(promise);
|
2013-05-17 12:20:46 +02:00
|
|
|
} else {
|
|
|
|
// Some transports like local and AIO does not allow the deregistration of
|
|
|
|
// an open channel. Their doDeregister() calls close(). Consequently,
|
|
|
|
// close() calls deregister() again - no need to fire channelUnregistered.
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetSuccess(promise);
|
2012-05-30 12:58:14 +02:00
|
|
|
}
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
@Override
|
|
|
|
public void beginRead() {
|
2013-01-09 13:28:31 +01:00
|
|
|
if (!isActive()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
try {
|
|
|
|
doBeginRead();
|
|
|
|
} catch (final Exception e) {
|
2014-02-26 12:00:04 +01:00
|
|
|
invokeLater(new OneTimeTask() {
|
2013-05-17 12:20:46 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireExceptionCaught(e);
|
|
|
|
}
|
|
|
|
});
|
2013-05-15 15:10:41 +02:00
|
|
|
close(voidPromise());
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public void write(Object msg, ChannelPromise promise) {
|
2013-07-17 14:14:07 +02:00
|
|
|
if (!isActive()) {
|
2013-07-22 13:51:09 +02:00
|
|
|
// Mark the write request as failure if the channel is inactive.
|
2013-07-17 14:14:07 +02:00
|
|
|
if (isOpen()) {
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, NOT_YET_CONNECTED_EXCEPTION);
|
2013-07-17 14:14:07 +02:00
|
|
|
} else {
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, CLOSED_CHANNEL_EXCEPTION);
|
2013-07-17 14:14:07 +02:00
|
|
|
}
|
2013-07-22 10:59:06 +02:00
|
|
|
// release message now to prevent resource-leak
|
|
|
|
ReferenceCountUtil.release(msg);
|
2013-07-17 14:14:07 +02:00
|
|
|
} else {
|
|
|
|
outboundBuffer.addMessage(msg, promise);
|
|
|
|
}
|
2012-10-24 18:27:26 +02:00
|
|
|
}
|
|
|
|
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
@Override
|
2013-07-12 18:45:24 +02:00
|
|
|
public void flush() {
|
2013-07-18 16:33:39 +02:00
|
|
|
ChannelOutboundBuffer outboundBuffer = this.outboundBuffer;
|
2013-07-18 16:29:13 +02:00
|
|
|
if (outboundBuffer == null) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-10 13:00:42 +02:00
|
|
|
outboundBuffer.addFlush();
|
2013-07-12 18:45:24 +02:00
|
|
|
flush0();
|
2013-07-12 13:01:38 +02:00
|
|
|
}
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
|
2014-03-24 10:09:27 +01:00
|
|
|
@SuppressWarnings("deprecation")
|
2013-07-12 18:45:24 +02:00
|
|
|
protected void flush0() {
|
|
|
|
if (inFlush0) {
|
2013-07-12 13:10:39 +02:00
|
|
|
// Avoid re-entrance
|
|
|
|
return;
|
2012-10-24 18:27:26 +02:00
|
|
|
}
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
|
2013-07-18 16:33:39 +02:00
|
|
|
final ChannelOutboundBuffer outboundBuffer = this.outboundBuffer;
|
2013-07-18 16:29:13 +02:00
|
|
|
if (outboundBuffer == null || outboundBuffer.isEmpty()) {
|
2013-07-18 13:59:14 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inFlush0 = true;
|
2013-06-14 04:15:46 +02:00
|
|
|
|
|
|
|
// Mark all pending write requests as failure if the channel is inactive.
|
|
|
|
if (!isActive()) {
|
2013-07-19 01:56:17 +02:00
|
|
|
try {
|
|
|
|
if (isOpen()) {
|
|
|
|
outboundBuffer.failFlushed(NOT_YET_CONNECTED_EXCEPTION);
|
|
|
|
} else {
|
|
|
|
outboundBuffer.failFlushed(CLOSED_CHANNEL_EXCEPTION);
|
|
|
|
}
|
|
|
|
} finally {
|
|
|
|
inFlush0 = false;
|
2013-06-14 04:15:46 +02:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-05-12 22:09:05 +02:00
|
|
|
try {
|
2013-07-18 13:59:14 +02:00
|
|
|
doWrite(outboundBuffer);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
} catch (Throwable t) {
|
2013-07-18 13:59:14 +02:00
|
|
|
outboundBuffer.failFlushed(t);
|
2013-11-05 09:23:22 +01:00
|
|
|
if (t instanceof IOException && config().isAutoClose()) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
close(voidPromise());
|
|
|
|
}
|
2012-05-28 14:05:49 +02:00
|
|
|
} finally {
|
2013-07-12 18:45:24 +02:00
|
|
|
inFlush0 = false;
|
2012-05-12 22:09:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-25 14:35:22 +02:00
|
|
|
@Override
|
|
|
|
public ChannelPromise voidPromise() {
|
|
|
|
return unsafeVoidPromise;
|
|
|
|
}
|
|
|
|
|
2012-12-30 17:40:24 +01:00
|
|
|
protected final boolean ensureOpen(ChannelPromise promise) {
|
2012-05-27 07:48:48 +02:00
|
|
|
if (isOpen()) {
|
|
|
|
return true;
|
2012-05-26 00:32:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-10 23:52:24 +01:00
|
|
|
safeSetFailure(promise, CLOSED_CHANNEL_EXCEPTION);
|
2012-05-27 07:48:48 +02:00
|
|
|
return false;
|
2012-05-24 17:57:10 +02:00
|
|
|
}
|
|
|
|
|
2014-02-10 23:52:24 +01:00
|
|
|
/**
|
|
|
|
* Marks the specified {@code promise} as success. If the {@code promise} is done already, log a message.
|
|
|
|
*/
|
|
|
|
protected final void safeSetSuccess(ChannelPromise promise) {
|
|
|
|
if (!(promise instanceof VoidChannelPromise) && !promise.trySuccess()) {
|
|
|
|
logger.warn("Failed to mark a promise as success because it is done already: {}", promise);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Marks the specified {@code promise} as failure. If the {@code promise} is done already, log a message.
|
|
|
|
*/
|
|
|
|
protected final void safeSetFailure(ChannelPromise promise, Throwable cause) {
|
|
|
|
if (!(promise instanceof VoidChannelPromise) && !promise.tryFailure(cause)) {
|
|
|
|
logger.warn("Failed to mark a promise as failure because it's done already: {}", promise, cause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-30 12:58:14 +02:00
|
|
|
protected final void closeIfClosed() {
|
2012-05-27 07:48:48 +02:00
|
|
|
if (isOpen()) {
|
|
|
|
return;
|
2012-05-24 17:57:10 +02:00
|
|
|
}
|
2013-05-15 15:10:41 +02:00
|
|
|
close(voidPromise());
|
2012-05-24 17:57:10 +02:00
|
|
|
}
|
2013-04-22 12:40:23 +02:00
|
|
|
|
|
|
|
private void invokeLater(Runnable task) {
|
2013-10-15 15:21:42 +02:00
|
|
|
try {
|
|
|
|
// This method is used by outbound operation implementations to trigger an inbound event later.
|
|
|
|
// They do not trigger an inbound event immediately because an outbound operation might have been
|
|
|
|
// triggered by another inbound event handler method. If fired immediately, the call stack
|
|
|
|
// will look like this for example:
|
|
|
|
//
|
|
|
|
// handlerA.inboundBufferUpdated() - (1) an inbound handler method closes a connection.
|
|
|
|
// -> handlerA.ctx.close()
|
|
|
|
// -> channel.unsafe.close()
|
|
|
|
// -> handlerA.channelInactive() - (2) another inbound handler method called while in (1) yet
|
|
|
|
//
|
|
|
|
// which means the execution of two inbound handler methods of the same handler overlap undesirably.
|
|
|
|
eventLoop().execute(task);
|
|
|
|
} catch (RejectedExecutionException e) {
|
|
|
|
logger.warn("Can't invoke task later as EventLoop rejected it", e);
|
|
|
|
}
|
2013-04-22 12:40:23 +02:00
|
|
|
}
|
2012-05-27 07:48:48 +02:00
|
|
|
}
|
2012-05-24 17:57:10 +02:00
|
|
|
|
2014-02-18 10:08:20 +01:00
|
|
|
/**
|
|
|
|
* Create a new {@link ChannelOutboundBuffer} which holds the pending messages for this {@link AbstractChannel}.
|
|
|
|
*/
|
|
|
|
protected ChannelOutboundBuffer newOutboundBuffer() {
|
|
|
|
return ChannelOutboundBuffer.newInstance(this);
|
|
|
|
}
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Return {@code true} if the given {@link EventLoop} is compatible with this instance.
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract boolean isCompatible(EventLoop loop);
|
2012-05-12 17:40:28 +02:00
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Returns the {@link SocketAddress} which is bound locally.
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract SocketAddress localAddress0();
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the {@link SocketAddress} which the {@link Channel} is connected to.
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract SocketAddress remoteAddress0();
|
2012-05-12 17:40:28 +02:00
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Is called after the {@link Channel} is registered with its {@link EventLoop} as part of the register process.
|
|
|
|
*
|
2013-07-27 20:08:45 +02:00
|
|
|
* Sub-classes may override this method
|
2012-12-21 17:06:24 +01:00
|
|
|
*/
|
2013-07-27 20:08:45 +02:00
|
|
|
protected void doRegister() throws Exception {
|
|
|
|
// NOOP
|
2012-12-21 17:06:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Bind the {@link Channel} to the {@link SocketAddress}
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract void doBind(SocketAddress localAddress) throws Exception;
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Disconnect this {@link Channel} from its remote peer
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract void doDisconnect() throws Exception;
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Close the {@link Channel}
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract void doClose() throws Exception;
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Deregister the {@link Channel} from its {@link EventLoop}.
|
|
|
|
*
|
|
|
|
* Sub-classes may override this method
|
|
|
|
*/
|
2013-07-26 18:13:56 +02:00
|
|
|
protected void doDeregister() throws Exception {
|
|
|
|
// NOOP
|
2012-12-21 17:06:24 +01:00
|
|
|
}
|
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
/**
|
|
|
|
* Schedule a read operation.
|
|
|
|
*/
|
|
|
|
protected abstract void doBeginRead() throws Exception;
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
2013-07-18 13:59:14 +02:00
|
|
|
* Flush the content of the given buffer to the remote peer.
|
2012-12-21 17:06:24 +01:00
|
|
|
*/
|
2013-07-18 13:59:14 +02:00
|
|
|
protected abstract void doWrite(ChannelOutboundBuffer in) throws Exception;
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
|
|
|
|
protected static void checkEOF(FileRegion region) throws IOException {
|
|
|
|
if (region.transfered() < region.count()) {
|
|
|
|
throw new EOFException("Expected to be able to write "
|
|
|
|
+ region.count() + " bytes, but only wrote "
|
|
|
|
+ region.transfered());
|
|
|
|
}
|
2012-06-07 07:52:33 +02:00
|
|
|
}
|
2012-05-27 07:48:48 +02:00
|
|
|
|
2013-08-26 07:52:47 +02:00
|
|
|
static final class CloseFuture extends DefaultChannelPromise {
|
2012-05-12 18:37:16 +02:00
|
|
|
|
|
|
|
CloseFuture(AbstractChannel ch) {
|
2012-12-30 17:40:24 +01:00
|
|
|
super(ch);
|
2012-05-12 18:37:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-02-08 07:24:55 +01:00
|
|
|
public ChannelPromise setSuccess() {
|
2012-05-12 18:37:16 +02:00
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-02-08 07:24:55 +01:00
|
|
|
public ChannelPromise setFailure(Throwable cause) {
|
2012-12-30 17:40:24 +01:00
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean trySuccess() {
|
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean tryFailure(Throwable cause) {
|
2012-05-12 18:37:16 +02:00
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
2012-05-14 07:17:40 +02:00
|
|
|
boolean setClosed() {
|
2012-12-30 17:40:24 +01:00
|
|
|
return super.trySuccess();
|
2012-05-12 18:37:16 +02:00
|
|
|
}
|
|
|
|
}
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|