2008-08-08 02:37:18 +02:00
|
|
|
/*
|
2012-06-04 22:31:44 +02:00
|
|
|
* Copyright 2012 The Netty Project
|
2008-08-08 02:37:18 +02:00
|
|
|
*
|
2011-12-09 06:18:34 +01:00
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
2008-08-08 02:37:18 +02:00
|
|
|
*
|
2012-06-04 22:31:44 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2008-08-08 03:27:24 +02:00
|
|
|
*
|
2009-08-28 09:15:49 +02:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
2011-12-09 06:18:34 +01:00
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
2009-08-28 09:15:49 +02:00
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2008-08-08 02:37:18 +02:00
|
|
|
*/
|
2011-12-09 04:38:59 +01:00
|
|
|
package io.netty.channel;
|
2008-08-08 02:37:18 +02:00
|
|
|
|
2012-06-10 04:08:43 +02:00
|
|
|
import io.netty.buffer.ByteBuf;
|
2012-11-15 22:04:37 +01:00
|
|
|
import io.netty.buffer.ByteBufAllocator;
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
import io.netty.buffer.ByteBufHolder;
|
2012-05-01 10:19:41 +02:00
|
|
|
import io.netty.util.DefaultAttributeMap;
|
2013-01-11 06:03:27 +01:00
|
|
|
import io.netty.util.internal.PlatformDependent;
|
2013-02-26 23:54:25 +01:00
|
|
|
import io.netty.util.internal.logging.InternalLogger;
|
|
|
|
import io.netty.util.internal.logging.InternalLoggerFactory;
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2012-10-24 18:27:26 +02:00
|
|
|
import java.io.EOFException;
|
2012-05-09 16:42:01 +02:00
|
|
|
import java.io.IOException;
|
2012-08-30 08:50:55 +02:00
|
|
|
import java.net.InetSocketAddress;
|
2008-08-08 02:37:18 +02:00
|
|
|
import java.net.SocketAddress;
|
2012-05-09 15:09:06 +02:00
|
|
|
import java.nio.channels.ClosedChannelException;
|
2012-08-23 01:56:58 +02:00
|
|
|
import java.util.Random;
|
2009-02-16 12:59:33 +01:00
|
|
|
import java.util.concurrent.ConcurrentMap;
|
2008-08-08 02:37:18 +02:00
|
|
|
|
|
|
|
/**
|
2008-09-02 09:13:20 +02:00
|
|
|
* A skeletal {@link Channel} implementation.
|
2008-08-08 02:37:18 +02:00
|
|
|
*/
|
2012-05-01 10:19:41 +02:00
|
|
|
public abstract class AbstractChannel extends DefaultAttributeMap implements Channel {
|
|
|
|
|
|
|
|
private static final InternalLogger logger = InternalLoggerFactory.getInstance(AbstractChannel.class);
|
2008-08-08 02:37:18 +02:00
|
|
|
|
2013-02-27 00:54:51 +01:00
|
|
|
static final ConcurrentMap<Integer, Channel> allChannels = PlatformDependent.newConcurrentHashMap();
|
2009-02-16 12:59:33 +01:00
|
|
|
|
2012-08-23 01:56:58 +02:00
|
|
|
private static final Random random = new Random();
|
|
|
|
|
2012-05-09 15:09:06 +02:00
|
|
|
/**
|
|
|
|
* Generates a negative unique integer ID. This method generates only
|
|
|
|
* negative integers to avoid conflicts with user-specified IDs where only
|
|
|
|
* non-negative integers are allowed.
|
|
|
|
*/
|
2009-02-16 12:59:33 +01:00
|
|
|
private static Integer allocateId(Channel channel) {
|
2012-08-23 01:56:58 +02:00
|
|
|
int idVal = random.nextInt();
|
2012-05-10 14:56:10 +02:00
|
|
|
if (idVal > 0) {
|
|
|
|
idVal = -idVal;
|
|
|
|
} else if (idVal == 0) {
|
2012-05-09 15:09:06 +02:00
|
|
|
idVal = -1;
|
|
|
|
}
|
2012-05-10 14:56:10 +02:00
|
|
|
|
2012-05-09 15:09:06 +02:00
|
|
|
Integer id;
|
2009-02-16 12:59:33 +01:00
|
|
|
for (;;) {
|
2012-05-09 15:09:06 +02:00
|
|
|
id = Integer.valueOf(idVal);
|
2009-02-16 12:59:33 +01:00
|
|
|
// Loop until a unique ID is acquired.
|
|
|
|
// It should be found in one loop practically.
|
|
|
|
if (allChannels.putIfAbsent(id, channel) == null) {
|
|
|
|
// Successfully acquired.
|
|
|
|
return id;
|
|
|
|
} else {
|
|
|
|
// Taken by other channel at almost the same moment.
|
2012-05-09 15:09:06 +02:00
|
|
|
idVal --;
|
|
|
|
if (idVal >= 0) {
|
|
|
|
idVal = -1;
|
|
|
|
}
|
2009-02-16 12:59:33 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-08-08 02:37:18 +02:00
|
|
|
private final Channel parent;
|
2012-05-09 15:09:06 +02:00
|
|
|
private final Integer id;
|
2012-05-01 10:19:41 +02:00
|
|
|
private final Unsafe unsafe;
|
2012-06-04 03:51:42 +02:00
|
|
|
private final DefaultChannelPipeline pipeline;
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
private final ChannelOutboundBuffer outboundBuffer = new ChannelOutboundBuffer(this);
|
2013-03-05 21:41:19 +01:00
|
|
|
private final ChannelFuture succeededFuture = new SucceededChannelFuture(this, null);
|
2013-05-25 14:35:22 +02:00
|
|
|
private final VoidChannelPromise voidPromise = new VoidChannelPromise(this, true);
|
|
|
|
private final VoidChannelPromise unsafeVoidPromise = new VoidChannelPromise(this, false);
|
2012-05-12 18:37:16 +02:00
|
|
|
private final CloseFuture closeFuture = new CloseFuture(this);
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2012-05-16 16:02:06 +02:00
|
|
|
private volatile SocketAddress localAddress;
|
|
|
|
private volatile SocketAddress remoteAddress;
|
2012-05-01 10:19:41 +02:00
|
|
|
private volatile EventLoop eventLoop;
|
2012-05-02 14:05:53 +02:00
|
|
|
private volatile boolean registered;
|
2012-05-09 15:09:06 +02:00
|
|
|
|
2012-05-12 17:40:28 +02:00
|
|
|
private ClosedChannelException closedChannelException;
|
2012-06-13 22:24:32 +02:00
|
|
|
private boolean inFlushNow;
|
2012-05-28 14:05:49 +02:00
|
|
|
private boolean flushNowPending;
|
2008-08-08 02:37:18 +02:00
|
|
|
|
|
|
|
/** Cache for the string representation of this channel */
|
2012-05-01 10:19:41 +02:00
|
|
|
private boolean strValActive;
|
2008-08-08 02:37:18 +02:00
|
|
|
private String strVal;
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2008-09-02 09:13:20 +02:00
|
|
|
/**
|
|
|
|
* Creates a new instance.
|
|
|
|
*
|
2012-05-09 15:09:06 +02:00
|
|
|
* @param id
|
|
|
|
* the unique non-negative integer ID of this channel.
|
|
|
|
* Specify {@code null} to auto-generate a unique negative integer
|
|
|
|
* ID.
|
2008-09-02 09:13:20 +02:00
|
|
|
* @param parent
|
|
|
|
* the parent of this channel. {@code null} if there's no parent.
|
|
|
|
*/
|
2012-06-07 14:06:56 +02:00
|
|
|
protected AbstractChannel(Channel parent, Integer id) {
|
2012-05-09 15:09:06 +02:00
|
|
|
if (id == null) {
|
|
|
|
id = allocateId(this);
|
|
|
|
} else {
|
|
|
|
if (id.intValue() < 0) {
|
|
|
|
throw new IllegalArgumentException("id: " + id + " (expected: >= 0)");
|
|
|
|
}
|
|
|
|
if (allChannels.putIfAbsent(id, this) != null) {
|
|
|
|
throw new IllegalArgumentException("duplicate ID: " + id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
this.parent = parent;
|
2012-05-09 15:09:06 +02:00
|
|
|
this.id = id;
|
2012-05-27 07:48:48 +02:00
|
|
|
unsafe = newUnsafe();
|
2012-06-04 03:51:42 +02:00
|
|
|
pipeline = new DefaultChannelPipeline(this);
|
2010-05-19 08:29:43 +02:00
|
|
|
|
2012-05-12 18:37:16 +02:00
|
|
|
closeFuture().addListener(new ChannelFutureListener() {
|
2012-05-01 10:19:41 +02:00
|
|
|
@Override
|
|
|
|
public void operationComplete(ChannelFuture future) {
|
|
|
|
allChannels.remove(id());
|
|
|
|
}
|
|
|
|
});
|
2010-05-19 08:29:43 +02:00
|
|
|
}
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
@Override
|
|
|
|
public boolean isWritable() {
|
|
|
|
return outboundBuffer.getWritable();
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public final Integer id() {
|
2008-08-08 02:37:18 +02:00
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public Channel parent() {
|
2008-08-08 02:37:18 +02:00
|
|
|
return parent;
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public ChannelPipeline pipeline() {
|
2008-08-08 02:37:18 +02:00
|
|
|
return pipeline;
|
|
|
|
}
|
|
|
|
|
2012-11-15 22:04:37 +01:00
|
|
|
@Override
|
|
|
|
public ByteBufAllocator alloc() {
|
|
|
|
return config().getAllocator();
|
|
|
|
}
|
|
|
|
|
2008-08-08 02:37:18 +02:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public EventLoop eventLoop() {
|
2012-06-02 03:34:19 +02:00
|
|
|
EventLoop eventLoop = this.eventLoop;
|
2012-05-09 15:09:06 +02:00
|
|
|
if (eventLoop == null) {
|
|
|
|
throw new IllegalStateException("channel not registered to an event loop");
|
|
|
|
}
|
2012-05-01 10:19:41 +02:00
|
|
|
return eventLoop;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-16 16:02:06 +02:00
|
|
|
@Override
|
|
|
|
public SocketAddress localAddress() {
|
|
|
|
SocketAddress localAddress = this.localAddress;
|
|
|
|
if (localAddress == null) {
|
|
|
|
try {
|
|
|
|
this.localAddress = localAddress = unsafe().localAddress();
|
|
|
|
} catch (Throwable t) {
|
|
|
|
// Sometimes fails on a closed socket in Windows.
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return localAddress;
|
|
|
|
}
|
|
|
|
|
|
|
|
protected void invalidateLocalAddress() {
|
|
|
|
localAddress = null;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public SocketAddress remoteAddress() {
|
|
|
|
SocketAddress remoteAddress = this.remoteAddress;
|
|
|
|
if (remoteAddress == null) {
|
|
|
|
try {
|
|
|
|
this.remoteAddress = remoteAddress = unsafe().remoteAddress();
|
|
|
|
} catch (Throwable t) {
|
|
|
|
// Sometimes fails on a closed socket in Windows.
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return remoteAddress;
|
|
|
|
}
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Reset the stored remoteAddress
|
|
|
|
*/
|
2012-05-16 16:02:06 +02:00
|
|
|
protected void invalidateRemoteAddress() {
|
|
|
|
remoteAddress = null;
|
|
|
|
}
|
|
|
|
|
2012-05-02 14:05:53 +02:00
|
|
|
@Override
|
|
|
|
public boolean isRegistered() {
|
|
|
|
return registered;
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture bind(SocketAddress localAddress) {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.bind(localAddress);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress) {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.connect(remoteAddress);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.connect(remoteAddress, localAddress);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture disconnect() {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.disconnect();
|
2008-10-02 06:40:46 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture close() {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.close();
|
2008-11-26 10:21:00 +01:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-09 15:09:06 +02:00
|
|
|
public ChannelFuture deregister() {
|
2012-06-03 13:25:03 +02:00
|
|
|
return pipeline.deregister();
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public ChannelFuture write(Object msg) {
|
|
|
|
return pipeline.write(msg);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public ChannelFuture write(MessageList<?> msgs) {
|
|
|
|
return pipeline.write(msgs);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
return pipeline.bind(localAddress, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) {
|
|
|
|
return pipeline.connect(remoteAddress, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
return pipeline.connect(remoteAddress, localAddress, promise);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture disconnect(ChannelPromise promise) {
|
|
|
|
return pipeline.disconnect(promise);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture close(ChannelPromise promise) {
|
|
|
|
return pipeline.close(promise);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-09 15:09:06 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelFuture deregister(ChannelPromise promise) {
|
|
|
|
return pipeline.deregister(promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
2012-05-01 10:48:06 +02:00
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
@Override
|
|
|
|
public void read() {
|
|
|
|
pipeline.read();
|
|
|
|
}
|
|
|
|
|
2012-05-09 15:09:06 +02:00
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public ChannelFuture write(Object msg, ChannelPromise promise) {
|
|
|
|
return pipeline.write(msg, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public ChannelFuture write(MessageList<?> msgs, ChannelPromise promise) {
|
|
|
|
return pipeline.write(msgs, promise);
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
2012-05-01 10:48:06 +02:00
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public ChannelPromise newPromise() {
|
|
|
|
return new DefaultChannelPromise(this);
|
2012-05-01 10:48:06 +02:00
|
|
|
}
|
|
|
|
|
2013-04-13 14:38:16 +02:00
|
|
|
@Override
|
2013-04-15 13:11:02 +02:00
|
|
|
public ChannelProgressivePromise newProgressivePromise() {
|
|
|
|
return new DefaultChannelProgressivePromise(this);
|
2013-04-13 14:38:16 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:48:06 +02:00
|
|
|
@Override
|
|
|
|
public ChannelFuture newSucceededFuture() {
|
|
|
|
return succeededFuture;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture newFailedFuture(Throwable cause) {
|
2013-03-05 21:41:19 +01:00
|
|
|
return new FailedChannelFuture(this, null, cause);
|
2012-05-01 10:48:06 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-12 18:37:16 +02:00
|
|
|
public ChannelFuture closeFuture() {
|
|
|
|
return closeFuture;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public Unsafe unsafe() {
|
|
|
|
return unsafe;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Create a new {@link AbstractUnsafe} instance which will be used for the life-time of the {@link Channel}
|
|
|
|
*/
|
|
|
|
protected abstract AbstractUnsafe newUnsafe();
|
2012-05-27 07:48:48 +02:00
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
/**
|
2012-08-25 05:36:34 +02:00
|
|
|
* Returns the ID of this channel.
|
2012-05-01 10:19:41 +02:00
|
|
|
*/
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public final int hashCode() {
|
2012-08-30 08:50:55 +02:00
|
|
|
return id;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
/**
|
|
|
|
* Returns {@code true} if and only if the specified object is identical
|
|
|
|
* with this channel (i.e: {@code this == o}).
|
|
|
|
*/
|
2011-12-07 07:54:15 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public final boolean equals(Object o) {
|
|
|
|
return this == o;
|
2011-12-07 07:54:15 +01:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
/**
|
|
|
|
* Compares the {@linkplain #id() ID} of the two channels.
|
|
|
|
*/
|
2011-12-07 07:54:15 +01:00
|
|
|
@Override
|
2012-05-01 10:19:41 +02:00
|
|
|
public final int compareTo(Channel o) {
|
|
|
|
return id().compareTo(o.id());
|
2011-12-07 07:54:15 +01:00
|
|
|
}
|
2012-05-01 10:19:41 +02:00
|
|
|
|
2008-09-02 09:13:20 +02:00
|
|
|
/**
|
|
|
|
* Returns the {@link String} representation of this channel. The returned
|
2012-05-01 10:19:41 +02:00
|
|
|
* string contains the {@linkplain #id() ID}, {@linkplain #localAddress() local address},
|
|
|
|
* and {@linkplain #remoteAddress() remote address} of this channel for
|
2008-09-02 09:13:20 +02:00
|
|
|
* easier identification.
|
|
|
|
*/
|
2008-08-08 02:37:18 +02:00
|
|
|
@Override
|
|
|
|
public String toString() {
|
2012-05-01 10:19:41 +02:00
|
|
|
boolean active = isActive();
|
|
|
|
if (strValActive == active && strVal != null) {
|
2008-08-08 02:37:18 +02:00
|
|
|
return strVal;
|
|
|
|
}
|
|
|
|
|
2012-05-11 03:47:45 +02:00
|
|
|
SocketAddress remoteAddr = remoteAddress();
|
|
|
|
SocketAddress localAddr = localAddress();
|
|
|
|
if (remoteAddr != null) {
|
|
|
|
SocketAddress srcAddr;
|
|
|
|
SocketAddress dstAddr;
|
|
|
|
if (parent == null) {
|
|
|
|
srcAddr = localAddr;
|
|
|
|
dstAddr = remoteAddr;
|
2008-08-08 02:37:18 +02:00
|
|
|
} else {
|
2012-05-11 03:47:45 +02:00
|
|
|
srcAddr = remoteAddr;
|
|
|
|
dstAddr = localAddr;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
2012-05-11 03:47:45 +02:00
|
|
|
strVal = String.format("[id: 0x%08x, %s %s %s]", id, srcAddr, active? "=>" : ":>", dstAddr);
|
|
|
|
} else if (localAddr != null) {
|
|
|
|
strVal = String.format("[id: 0x%08x, %s]", id, localAddr);
|
|
|
|
} else {
|
|
|
|
strVal = String.format("[id: 0x%08x]", id);
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
strValActive = active;
|
2009-02-09 09:00:26 +01:00
|
|
|
return strVal;
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|
2009-02-16 15:32:40 +01:00
|
|
|
|
2013-05-15 15:10:41 +02:00
|
|
|
@Override
|
|
|
|
public final ChannelPromise voidPromise() {
|
|
|
|
return voidPromise;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* {@link Unsafe} implementation which sub-classes must extend and use.
|
|
|
|
*/
|
|
|
|
protected abstract class AbstractUnsafe implements Unsafe {
|
|
|
|
|
2012-12-17 18:58:36 +01:00
|
|
|
private final Runnable flushLaterTask = new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
flushNowPending = false;
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
flush();
|
2012-12-17 18:58:36 +01:00
|
|
|
}
|
|
|
|
};
|
2012-05-28 14:05:49 +02:00
|
|
|
|
2012-05-02 08:01:58 +02:00
|
|
|
@Override
|
2012-05-30 12:58:14 +02:00
|
|
|
public final SocketAddress localAddress() {
|
2012-05-02 08:01:58 +02:00
|
|
|
return localAddress0();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-05-30 12:58:14 +02:00
|
|
|
public final SocketAddress remoteAddress() {
|
2012-05-02 08:01:58 +02:00
|
|
|
return remoteAddress0();
|
|
|
|
}
|
|
|
|
|
2012-05-01 16:18:29 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void register(EventLoop eventLoop, final ChannelPromise promise) {
|
2012-05-01 16:18:29 +02:00
|
|
|
if (eventLoop == null) {
|
|
|
|
throw new NullPointerException("eventLoop");
|
|
|
|
}
|
2012-05-14 16:57:23 +02:00
|
|
|
if (isRegistered()) {
|
2013-03-21 09:48:10 +01:00
|
|
|
promise.setFailure(new IllegalStateException("registered to an event loop already"));
|
|
|
|
return;
|
2012-05-01 16:18:29 +02:00
|
|
|
}
|
2012-05-12 17:40:28 +02:00
|
|
|
if (!isCompatible(eventLoop)) {
|
2013-03-21 11:02:34 +01:00
|
|
|
promise.setFailure(
|
|
|
|
new IllegalStateException("incompatible event loop type: " + eventLoop.getClass().getName()));
|
|
|
|
return;
|
2012-05-12 17:40:28 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 16:18:29 +02:00
|
|
|
AbstractChannel.this.eventLoop = eventLoop;
|
|
|
|
|
2012-11-07 19:06:20 +01:00
|
|
|
if (eventLoop.inEventLoop()) {
|
2012-12-30 17:40:24 +01:00
|
|
|
register0(promise);
|
2012-11-07 19:06:20 +01:00
|
|
|
} else {
|
2013-03-21 09:48:10 +01:00
|
|
|
try {
|
|
|
|
eventLoop.execute(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
register0(promise);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
} catch (Throwable t) {
|
|
|
|
logger.warn(
|
|
|
|
"Force-closing a channel whose registration task was unaccepted by an event loop: {}",
|
|
|
|
AbstractChannel.this, t);
|
|
|
|
closeForcibly();
|
|
|
|
promise.setFailure(t);
|
|
|
|
}
|
2012-11-07 19:06:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-30 17:40:24 +01:00
|
|
|
private void register0(ChannelPromise promise) {
|
2012-05-09 15:09:06 +02:00
|
|
|
try {
|
2013-02-08 11:31:48 +01:00
|
|
|
// check if the channel is still open as it could be closed in the mean time when the register
|
|
|
|
// call was outside of the eventLoop
|
|
|
|
if (!ensureOpen(promise)) {
|
|
|
|
return;
|
|
|
|
}
|
2012-06-03 21:54:26 +02:00
|
|
|
Runnable postRegisterTask = doRegister();
|
2012-05-09 15:09:06 +02:00
|
|
|
registered = true;
|
2012-12-30 17:40:24 +01:00
|
|
|
promise.setSuccess();
|
2012-06-03 13:25:03 +02:00
|
|
|
pipeline.fireChannelRegistered();
|
2012-06-03 21:54:26 +02:00
|
|
|
if (postRegisterTask != null) {
|
|
|
|
postRegisterTask.run();
|
|
|
|
}
|
2012-05-10 16:19:59 +02:00
|
|
|
if (isActive()) {
|
2012-06-03 13:25:03 +02:00
|
|
|
pipeline.fireChannelActive();
|
2012-05-10 16:19:59 +02:00
|
|
|
}
|
2012-05-09 15:09:06 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
// Close the channel directly to avoid FD leak.
|
2013-03-21 11:19:14 +01:00
|
|
|
closeForcibly();
|
2013-04-30 11:30:17 +02:00
|
|
|
if (!promise.tryFailure(t)) {
|
|
|
|
logger.warn(
|
|
|
|
"Tried to fail the registration promise, but it is complete already. " +
|
2013-05-15 15:10:41 +02:00
|
|
|
"Swallowing the cause of the registration failure:", t);
|
2013-04-30 11:30:17 +02:00
|
|
|
}
|
2012-07-10 06:57:45 +02:00
|
|
|
closeFuture.setClosed();
|
2012-05-02 14:05:53 +02:00
|
|
|
}
|
2012-05-01 16:18:29 +02:00
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void bind(final SocketAddress localAddress, final ChannelPromise promise) {
|
2013-05-17 12:20:46 +02:00
|
|
|
if (!ensureOpen(promise)) {
|
|
|
|
return;
|
|
|
|
}
|
2012-05-09 15:09:06 +02:00
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
try {
|
|
|
|
boolean wasActive = isActive();
|
2012-08-30 08:50:55 +02:00
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
// See: https://github.com/netty/netty/issues/576
|
|
|
|
if (!PlatformDependent.isWindows() && !PlatformDependent.isRoot() &&
|
|
|
|
Boolean.TRUE.equals(config().getOption(ChannelOption.SO_BROADCAST)) &&
|
|
|
|
localAddress instanceof InetSocketAddress &&
|
|
|
|
!((InetSocketAddress) localAddress).getAddress().isAnyLocalAddress()) {
|
|
|
|
// Warn a user about the fact that a non-root user can't receive a
|
|
|
|
// broadcast packet on *nix if the socket is bound on non-wildcard address.
|
|
|
|
logger.warn(
|
|
|
|
"A non-root user can't receive a broadcast packet if the socket " +
|
|
|
|
"is not bound to a wildcard address; binding to a non-wildcard " +
|
|
|
|
"address (" + localAddress + ") anyway as requested.");
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
2013-05-17 12:20:46 +02:00
|
|
|
|
|
|
|
doBind(localAddress);
|
|
|
|
promise.setSuccess();
|
|
|
|
if (!wasActive && isActive()) {
|
|
|
|
pipeline.fireChannelActive();
|
|
|
|
}
|
|
|
|
} catch (Throwable t) {
|
|
|
|
promise.setFailure(t);
|
|
|
|
closeIfClosed();
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void disconnect(final ChannelPromise promise) {
|
2013-05-17 12:20:46 +02:00
|
|
|
try {
|
|
|
|
boolean wasActive = isActive();
|
|
|
|
doDisconnect();
|
|
|
|
promise.setSuccess();
|
|
|
|
if (wasActive && !isActive()) {
|
|
|
|
invokeLater(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireChannelInactive();
|
|
|
|
}
|
|
|
|
});
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
2013-05-17 12:20:46 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
promise.setFailure(t);
|
|
|
|
closeIfClosed();
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void close(final ChannelPromise promise) {
|
2013-05-17 12:20:46 +02:00
|
|
|
boolean wasActive = isActive();
|
|
|
|
if (closeFuture.setClosed()) {
|
|
|
|
try {
|
|
|
|
doClose();
|
|
|
|
promise.setSuccess();
|
|
|
|
} catch (Throwable t) {
|
|
|
|
promise.setFailure(t);
|
|
|
|
}
|
2012-05-12 17:40:28 +02:00
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
if (closedChannelException == null) {
|
|
|
|
closedChannelException = new ClosedChannelException();
|
|
|
|
}
|
2012-05-12 17:40:28 +02:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
// fail all queued messages
|
|
|
|
if (outboundBuffer.next()) {
|
|
|
|
outboundBuffer.fail(closedChannelException);
|
|
|
|
}
|
2012-05-10 16:19:59 +02:00
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
if (wasActive && !isActive()) {
|
|
|
|
invokeLater(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireChannelInactive();
|
|
|
|
}
|
|
|
|
});
|
2012-05-09 15:09:06 +02:00
|
|
|
}
|
2013-05-17 12:20:46 +02:00
|
|
|
|
2013-05-15 15:10:41 +02:00
|
|
|
deregister(voidPromise());
|
2012-05-01 10:19:41 +02:00
|
|
|
} else {
|
2013-05-17 12:20:46 +02:00
|
|
|
// Closed already.
|
|
|
|
promise.setSuccess();
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-22 05:05:00 +02:00
|
|
|
@Override
|
|
|
|
public final void closeForcibly() {
|
|
|
|
try {
|
|
|
|
doClose();
|
|
|
|
} catch (Exception e) {
|
|
|
|
logger.warn("Failed to close a channel.", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
@Override
|
2012-12-30 17:40:24 +01:00
|
|
|
public final void deregister(final ChannelPromise promise) {
|
2013-05-17 12:20:46 +02:00
|
|
|
if (!registered) {
|
|
|
|
promise.setSuccess();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Runnable postTask = null;
|
|
|
|
try {
|
|
|
|
postTask = doDeregister();
|
|
|
|
} catch (Throwable t) {
|
|
|
|
logger.warn("Unexpected exception occurred while deregistering a channel.", t);
|
|
|
|
} finally {
|
|
|
|
if (registered) {
|
|
|
|
registered = false;
|
|
|
|
promise.setSuccess();
|
|
|
|
invokeLater(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireChannelUnregistered();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
// Some transports like local and AIO does not allow the deregistration of
|
|
|
|
// an open channel. Their doDeregister() calls close(). Consequently,
|
|
|
|
// close() calls deregister() again - no need to fire channelUnregistered.
|
2012-12-30 17:40:24 +01:00
|
|
|
promise.setSuccess();
|
2012-05-30 12:58:14 +02:00
|
|
|
}
|
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
if (postTask != null) {
|
|
|
|
postTask.run();
|
2012-05-01 16:19:31 +02:00
|
|
|
}
|
2012-05-01 10:19:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
@Override
|
|
|
|
public void beginRead() {
|
2013-01-09 13:28:31 +01:00
|
|
|
if (!isActive()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-05-17 12:20:46 +02:00
|
|
|
try {
|
|
|
|
doBeginRead();
|
|
|
|
} catch (final Exception e) {
|
|
|
|
invokeLater(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
pipeline.fireExceptionCaught(e);
|
|
|
|
}
|
|
|
|
});
|
2013-05-15 15:10:41 +02:00
|
|
|
close(voidPromise());
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-01 10:19:41 +02:00
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public void write(MessageList<?> msgs, ChannelPromise promise) {
|
|
|
|
outboundBuffer.add(msgs, promise);
|
|
|
|
flush();
|
2012-10-24 18:27:26 +02:00
|
|
|
}
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
private void flush() {
|
2012-10-24 18:27:26 +02:00
|
|
|
if (!inFlushNow) { // Avoid re-entrance
|
|
|
|
try {
|
2013-04-22 12:40:23 +02:00
|
|
|
// Flush immediately only when there's no pending flush.
|
|
|
|
// If there's a pending flush operation, event loop will call flushNow() later,
|
|
|
|
// and thus there's no need to call it now.
|
2012-10-24 18:27:26 +02:00
|
|
|
if (!isFlushPending()) {
|
|
|
|
flushNow();
|
|
|
|
}
|
|
|
|
} catch (Throwable t) {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
outboundBuffer.fail(t);
|
|
|
|
close(voidPromise());
|
2012-10-24 18:27:26 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!flushNowPending) {
|
|
|
|
flushNowPending = true;
|
|
|
|
eventLoop().execute(flushLaterTask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
|
2012-05-27 14:07:23 +02:00
|
|
|
@Override
|
2012-06-13 22:24:32 +02:00
|
|
|
public final void flushNow() {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
if (inFlushNow) {
|
2012-05-28 14:05:49 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inFlushNow = true;
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
final ChannelOutboundBuffer outboundBuffer = AbstractChannel.this.outboundBuffer;
|
2012-05-12 22:09:05 +02:00
|
|
|
try {
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
for (;;) {
|
|
|
|
ChannelPromise promise = outboundBuffer.currentPromise;
|
|
|
|
if (promise == null) {
|
|
|
|
if (!outboundBuffer.next()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
promise = outboundBuffer.currentPromise;
|
2012-06-07 07:52:33 +02:00
|
|
|
}
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
|
|
|
|
MessageList<Object> messages = outboundBuffer.currentMessages;
|
|
|
|
int messageIndex = outboundBuffer.currentMessageIndex;
|
|
|
|
int messageCount = messages.size();
|
|
|
|
if (messageCount == 0) {
|
|
|
|
messages.recycle();
|
|
|
|
promise.trySuccess();
|
|
|
|
if (!outboundBuffer.next()) {
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
continue;
|
|
|
|
}
|
2012-06-01 01:59:54 +02:00
|
|
|
}
|
2012-06-13 22:24:32 +02:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
int writtenMessages = doWrite(messages, messageIndex);
|
|
|
|
outboundBuffer.currentMessageIndex = messageIndex += writtenMessages;
|
|
|
|
if (messageIndex >= messageCount) {
|
|
|
|
messages.recycle();
|
|
|
|
promise.trySuccess();
|
|
|
|
if (!outboundBuffer.next()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Could not flush the current write request completely. Try again later.
|
|
|
|
break;
|
2012-05-28 14:05:49 +02:00
|
|
|
}
|
|
|
|
}
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
} catch (Throwable t) {
|
|
|
|
outboundBuffer.fail(t);
|
|
|
|
if (t instanceof IOException) {
|
|
|
|
close(voidPromise());
|
|
|
|
}
|
2012-05-28 14:05:49 +02:00
|
|
|
} finally {
|
|
|
|
inFlushNow = false;
|
2012-05-12 22:09:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-25 14:35:22 +02:00
|
|
|
@Override
|
|
|
|
public ChannelPromise voidPromise() {
|
|
|
|
return unsafeVoidPromise;
|
|
|
|
}
|
|
|
|
|
2012-12-30 17:40:24 +01:00
|
|
|
protected final boolean ensureOpen(ChannelPromise promise) {
|
2012-05-27 07:48:48 +02:00
|
|
|
if (isOpen()) {
|
|
|
|
return true;
|
2012-05-26 00:32:28 +02:00
|
|
|
}
|
|
|
|
|
2012-05-27 07:48:48 +02:00
|
|
|
Exception e = new ClosedChannelException();
|
2012-12-30 17:40:24 +01:00
|
|
|
promise.setFailure(e);
|
2012-05-27 07:48:48 +02:00
|
|
|
return false;
|
2012-05-24 17:57:10 +02:00
|
|
|
}
|
|
|
|
|
2012-05-30 12:58:14 +02:00
|
|
|
protected final void closeIfClosed() {
|
2012-05-27 07:48:48 +02:00
|
|
|
if (isOpen()) {
|
|
|
|
return;
|
2012-05-24 17:57:10 +02:00
|
|
|
}
|
2013-05-15 15:10:41 +02:00
|
|
|
close(voidPromise());
|
2012-05-24 17:57:10 +02:00
|
|
|
}
|
2013-04-22 12:40:23 +02:00
|
|
|
|
|
|
|
private void invokeLater(Runnable task) {
|
|
|
|
// This method is used by outbound operation implementations to trigger an inbound event later.
|
|
|
|
// They do not trigger an inbound event immediately because an outbound operation might have been
|
|
|
|
// triggered by another inbound event handler method. If fired immediately, the call stack
|
|
|
|
// will look like this for example:
|
|
|
|
//
|
|
|
|
// handlerA.inboundBufferUpdated() - (1) an inbound handler method closes a connection.
|
|
|
|
// -> handlerA.ctx.close()
|
|
|
|
// -> channel.unsafe.close()
|
|
|
|
// -> handlerA.channelInactive() - (2) another inbound handler method called while in (1) yet
|
|
|
|
//
|
|
|
|
// which means the execution of two inbound handler methods of the same handler overlap undesirably.
|
|
|
|
eventLoop().execute(task);
|
|
|
|
}
|
2012-05-27 07:48:48 +02:00
|
|
|
}
|
2012-05-24 17:57:10 +02:00
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Return {@code true} if the given {@link EventLoop} is compatible with this instance.
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract boolean isCompatible(EventLoop loop);
|
2012-05-12 17:40:28 +02:00
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Returns the {@link SocketAddress} which is bound locally.
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract SocketAddress localAddress0();
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the {@link SocketAddress} which the {@link Channel} is connected to.
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract SocketAddress remoteAddress0();
|
2012-05-12 17:40:28 +02:00
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Is called after the {@link Channel} is registered with its {@link EventLoop} as part of the register process.
|
|
|
|
* You can return a {@link Runnable} which will be run as post-task of the registration process.
|
|
|
|
*
|
|
|
|
* Sub-classes may override this method as it will just return {@code null}
|
|
|
|
*/
|
|
|
|
protected Runnable doRegister() throws Exception {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Bind the {@link Channel} to the {@link SocketAddress}
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract void doBind(SocketAddress localAddress) throws Exception;
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Disconnect this {@link Channel} from its remote peer
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract void doDisconnect() throws Exception;
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Will be called before the actual close operation will be performed. Sub-classes may override this as the default
|
|
|
|
* is to do nothing.
|
|
|
|
*/
|
2012-06-11 04:53:43 +02:00
|
|
|
protected void doPreClose() throws Exception {
|
|
|
|
// NOOP by default
|
|
|
|
}
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Close the {@link Channel}
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract void doClose() throws Exception;
|
2012-12-21 17:06:24 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Deregister the {@link Channel} from its {@link EventLoop}.
|
2013-03-14 07:51:33 +01:00
|
|
|
* You can return a {@link Runnable} which will be run as post-task of the registration process.
|
2012-12-21 17:06:24 +01:00
|
|
|
*
|
|
|
|
* Sub-classes may override this method
|
|
|
|
*/
|
2013-03-14 07:51:33 +01:00
|
|
|
protected Runnable doDeregister() throws Exception {
|
|
|
|
return null;
|
2012-12-21 17:06:24 +01:00
|
|
|
}
|
|
|
|
|
Read only when requested (read-on-demand)
This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not.
Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this.
This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly.
This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false.
Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above.
There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following:
public void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
|
|
|
/**
|
|
|
|
* Schedule a read operation.
|
|
|
|
*/
|
|
|
|
protected abstract void doBeginRead() throws Exception;
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Flush the content of the given {@link ByteBuf} to the remote peer.
|
|
|
|
*
|
|
|
|
* Sub-classes may override this as this implementation will just thrown an {@link UnsupportedOperationException}
|
|
|
|
*
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
* @return the number of written messages
|
2012-12-21 17:06:24 +01:00
|
|
|
*/
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
protected abstract int doWrite(MessageList<Object> msgs, int index) throws Exception;
|
|
|
|
|
|
|
|
protected static void checkEOF(FileRegion region) throws IOException {
|
|
|
|
if (region.transfered() < region.count()) {
|
|
|
|
throw new EOFException("Expected to be able to write "
|
|
|
|
+ region.count() + " bytes, but only wrote "
|
|
|
|
+ region.transfered());
|
|
|
|
}
|
2012-06-07 07:52:33 +02:00
|
|
|
}
|
2012-05-27 07:48:48 +02:00
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
* Calculate the number of bytes a message takes up in memory. Sub-classes may override this if they use different
|
|
|
|
* messages then {@link ByteBuf} or {@link ByteBufHolder}. If the size can not be calculated 0 should be returned.
|
2012-12-21 17:06:24 +01:00
|
|
|
*/
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
protected int calculateMessageSize(Object message) {
|
|
|
|
if (message instanceof ByteBuf) {
|
|
|
|
return ((ByteBuf) message).readableBytes();
|
|
|
|
}
|
|
|
|
if (message instanceof ByteBufHolder) {
|
|
|
|
return ((ByteBufHolder) message).content().readableBytes();
|
2012-10-24 18:27:26 +02:00
|
|
|
}
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
return 0;
|
2012-10-24 18:27:26 +02:00
|
|
|
}
|
|
|
|
|
2012-12-21 17:06:24 +01:00
|
|
|
/**
|
|
|
|
* Return {@code true} if a flush to the {@link Channel} is currently pending.
|
|
|
|
*/
|
2012-05-27 07:48:48 +02:00
|
|
|
protected abstract boolean isFlushPending();
|
|
|
|
|
2013-05-15 15:10:41 +02:00
|
|
|
final class CloseFuture extends DefaultChannelPromise {
|
2012-05-12 18:37:16 +02:00
|
|
|
|
|
|
|
CloseFuture(AbstractChannel ch) {
|
2012-12-30 17:40:24 +01:00
|
|
|
super(ch);
|
2012-05-12 18:37:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-02-08 07:24:55 +01:00
|
|
|
public ChannelPromise setSuccess() {
|
2012-05-12 18:37:16 +02:00
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-02-08 07:24:55 +01:00
|
|
|
public ChannelPromise setFailure(Throwable cause) {
|
2012-12-30 17:40:24 +01:00
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean trySuccess() {
|
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean tryFailure(Throwable cause) {
|
2012-05-12 18:37:16 +02:00
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
2012-05-14 07:17:40 +02:00
|
|
|
boolean setClosed() {
|
2012-06-11 04:53:43 +02:00
|
|
|
try {
|
|
|
|
doPreClose();
|
|
|
|
} catch (Exception e) {
|
|
|
|
logger.warn("doPreClose() raised an exception.", e);
|
|
|
|
}
|
2012-12-30 17:40:24 +01:00
|
|
|
return super.trySuccess();
|
2012-05-12 18:37:16 +02:00
|
|
|
}
|
|
|
|
}
|
2008-08-08 02:37:18 +02:00
|
|
|
}
|