2011-12-11 09:21:29 +01:00
|
|
|
/*
|
2012-06-04 22:31:44 +02:00
|
|
|
* Copyright 2012 The Netty Project
|
2011-12-11 09:21:29 +01:00
|
|
|
*
|
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
|
|
|
*
|
2012-06-04 22:31:44 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2011-12-11 09:21:29 +01:00
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
|
|
|
package io.netty.channel;
|
|
|
|
|
2013-01-07 08:44:16 +01:00
|
|
|
|
2013-05-17 03:54:20 +02:00
|
|
|
import io.netty.bootstrap.Bootstrap;
|
|
|
|
import io.netty.bootstrap.ServerBootstrap;
|
2014-03-31 07:29:31 +02:00
|
|
|
import io.netty.buffer.ByteBuf;
|
|
|
|
import io.netty.buffer.Unpooled;
|
2012-06-04 21:14:42 +02:00
|
|
|
import io.netty.channel.ChannelHandler.Sharable;
|
2019-04-01 08:37:09 +02:00
|
|
|
import io.netty.channel.ChannelHandlerMask.Skip;
|
2015-06-19 21:33:54 +02:00
|
|
|
import io.netty.channel.embedded.EmbeddedChannel;
|
2013-05-17 03:54:20 +02:00
|
|
|
import io.netty.channel.local.LocalAddress;
|
2012-05-30 12:58:14 +02:00
|
|
|
import io.netty.channel.local.LocalChannel;
|
2019-01-23 08:32:05 +01:00
|
|
|
import io.netty.channel.local.LocalHandler;
|
2013-05-17 03:54:20 +02:00
|
|
|
import io.netty.channel.local.LocalServerChannel;
|
2019-01-23 08:32:05 +01:00
|
|
|
import io.netty.channel.nio.NioHandler;
|
2016-07-06 12:03:45 +02:00
|
|
|
import io.netty.channel.socket.nio.NioSocketChannel;
|
2013-07-18 13:59:14 +02:00
|
|
|
import io.netty.util.AbstractReferenceCounted;
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
import io.netty.util.ReferenceCountUtil;
|
2013-06-13 06:14:21 +02:00
|
|
|
import io.netty.util.ReferenceCounted;
|
2015-11-04 13:32:05 +01:00
|
|
|
import io.netty.util.concurrent.AbstractEventExecutor;
|
2016-02-08 19:45:05 +01:00
|
|
|
import io.netty.util.concurrent.EventExecutor;
|
2015-11-04 13:32:05 +01:00
|
|
|
import io.netty.util.concurrent.Future;
|
2016-04-09 21:38:45 +02:00
|
|
|
import io.netty.util.concurrent.ImmediateEventExecutor;
|
2016-02-08 19:45:05 +01:00
|
|
|
import io.netty.util.concurrent.Promise;
|
2019-01-23 08:32:05 +01:00
|
|
|
import io.netty.util.concurrent.ScheduledFuture;
|
2013-05-17 03:54:20 +02:00
|
|
|
import org.junit.After;
|
|
|
|
import org.junit.AfterClass;
|
2011-12-11 09:21:29 +01:00
|
|
|
import org.junit.Test;
|
|
|
|
|
2019-01-22 08:58:58 +01:00
|
|
|
import java.net.SocketAddress;
|
2013-07-09 16:09:28 +02:00
|
|
|
import java.util.ArrayDeque;
|
2013-03-14 10:43:12 +01:00
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.Collections;
|
|
|
|
import java.util.List;
|
2018-01-31 13:03:49 +01:00
|
|
|
import java.util.NoSuchElementException;
|
2013-07-09 16:09:28 +02:00
|
|
|
import java.util.Queue;
|
2019-01-23 08:32:05 +01:00
|
|
|
import java.util.concurrent.Callable;
|
2013-01-07 08:44:16 +01:00
|
|
|
import java.util.concurrent.CountDownLatch;
|
2015-11-04 13:32:05 +01:00
|
|
|
import java.util.concurrent.ExecutorService;
|
|
|
|
import java.util.concurrent.Executors;
|
2013-01-07 08:44:16 +01:00
|
|
|
import java.util.concurrent.TimeUnit;
|
2016-02-08 19:45:05 +01:00
|
|
|
import java.util.concurrent.atomic.AtomicBoolean;
|
2013-05-17 03:54:20 +02:00
|
|
|
import java.util.concurrent.atomic.AtomicReference;
|
2013-01-07 08:44:16 +01:00
|
|
|
|
2018-01-22 20:09:17 +01:00
|
|
|
import static org.junit.Assert.assertEquals;
|
|
|
|
import static org.junit.Assert.assertFalse;
|
|
|
|
import static org.junit.Assert.assertNotNull;
|
|
|
|
import static org.junit.Assert.assertNull;
|
|
|
|
import static org.junit.Assert.assertSame;
|
|
|
|
import static org.junit.Assert.assertTrue;
|
|
|
|
import static org.junit.Assert.fail;
|
2012-11-16 06:30:34 +01:00
|
|
|
|
2011-12-11 09:21:29 +01:00
|
|
|
public class DefaultChannelPipelineTest {
|
2013-05-17 03:54:20 +02:00
|
|
|
|
2019-01-23 08:32:05 +01:00
|
|
|
private static final EventLoopGroup group = new MultithreadEventLoopGroup(1, LocalHandler.newFactory());
|
2013-05-17 03:54:20 +02:00
|
|
|
|
|
|
|
private Channel self;
|
|
|
|
private Channel peer;
|
|
|
|
|
|
|
|
@AfterClass
|
2013-06-12 01:00:54 +02:00
|
|
|
public static void afterClass() throws Exception {
|
|
|
|
group.shutdownGracefully().sync();
|
2013-05-17 03:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
private void setUp(final ChannelHandler... handlers) throws Exception {
|
2019-01-22 16:07:26 +01:00
|
|
|
final AtomicReference<Channel> peerRef = new AtomicReference<>();
|
2013-05-17 03:54:20 +02:00
|
|
|
ServerBootstrap sb = new ServerBootstrap();
|
|
|
|
sb.group(group).channel(LocalServerChannel.class);
|
2019-03-28 10:28:27 +01:00
|
|
|
sb.childHandler(new ChannelHandler() {
|
2013-05-17 03:54:20 +02:00
|
|
|
@Override
|
|
|
|
public void channelRegistered(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
peerRef.set(ctx.channel());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-09 16:09:28 +02:00
|
|
|
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
ReferenceCountUtil.release(msg);
|
2013-05-17 03:54:20 +02:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
ChannelFuture bindFuture = sb.bind(LocalAddress.ANY).sync();
|
|
|
|
|
|
|
|
Bootstrap b = new Bootstrap();
|
|
|
|
b.group(group).channel(LocalChannel.class);
|
|
|
|
b.handler(new ChannelInitializer<LocalChannel>() {
|
|
|
|
@Override
|
|
|
|
protected void initChannel(LocalChannel ch) throws Exception {
|
|
|
|
ch.pipeline().addLast(handlers);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
self = b.connect(bindFuture.channel().localAddress()).sync().channel();
|
|
|
|
peer = peerRef.get();
|
|
|
|
|
|
|
|
bindFuture.channel().close().sync();
|
|
|
|
}
|
|
|
|
|
|
|
|
@After
|
|
|
|
public void tearDown() throws Exception {
|
|
|
|
if (peer != null) {
|
|
|
|
peer.close();
|
|
|
|
peer = null;
|
|
|
|
}
|
|
|
|
if (self != null) {
|
|
|
|
self = null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-07 08:44:16 +01:00
|
|
|
@Test
|
2013-05-17 03:54:20 +02:00
|
|
|
public void testFreeCalled() throws Exception {
|
2013-01-07 08:44:16 +01:00
|
|
|
final CountDownLatch free = new CountDownLatch(1);
|
|
|
|
|
2013-07-18 13:59:14 +02:00
|
|
|
final ReferenceCounted holder = new AbstractReferenceCounted() {
|
2013-01-07 08:44:16 +01:00
|
|
|
@Override
|
2013-07-18 13:59:14 +02:00
|
|
|
protected void deallocate() {
|
2013-01-07 08:44:16 +01:00
|
|
|
free.countDown();
|
|
|
|
}
|
2014-01-28 12:00:28 +01:00
|
|
|
|
|
|
|
@Override
|
2014-01-29 03:44:59 +01:00
|
|
|
public ReferenceCounted touch(Object hint) {
|
2014-01-28 12:00:28 +01:00
|
|
|
return this;
|
|
|
|
}
|
2013-01-07 08:44:16 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
StringInboundHandler handler = new StringInboundHandler();
|
2013-05-17 03:54:20 +02:00
|
|
|
setUp(handler);
|
|
|
|
|
2013-07-10 13:00:42 +02:00
|
|
|
peer.writeAndFlush(holder).sync();
|
2013-01-07 08:44:16 +01:00
|
|
|
|
|
|
|
assertTrue(free.await(10, TimeUnit.SECONDS));
|
|
|
|
assertTrue(handler.called);
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:46:10 +01:00
|
|
|
private static final class StringInboundHandler implements ChannelInboundHandler {
|
2013-01-07 08:44:16 +01:00
|
|
|
boolean called;
|
|
|
|
|
|
|
|
@Override
|
2013-07-09 16:09:28 +02:00
|
|
|
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
|
2013-01-07 08:44:16 +01:00
|
|
|
called = true;
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
if (!(msg instanceof String)) {
|
2013-07-09 16:09:28 +02:00
|
|
|
ctx.fireChannelRead(msg);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
}
|
2013-01-07 08:44:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
private static LocalChannel newLocalChannel() {
|
|
|
|
return new LocalChannel(group.next());
|
|
|
|
}
|
|
|
|
|
2013-01-07 08:44:16 +01:00
|
|
|
@Test
|
|
|
|
public void testRemoveChannelHandler() {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2013-01-07 08:44:16 +01:00
|
|
|
|
|
|
|
ChannelHandler handler1 = newHandler();
|
|
|
|
ChannelHandler handler2 = newHandler();
|
|
|
|
ChannelHandler handler3 = newHandler();
|
|
|
|
|
|
|
|
pipeline.addLast("handler1", handler1);
|
|
|
|
pipeline.addLast("handler2", handler2);
|
|
|
|
pipeline.addLast("handler3", handler3);
|
|
|
|
assertSame(pipeline.get("handler1"), handler1);
|
|
|
|
assertSame(pipeline.get("handler2"), handler2);
|
|
|
|
assertSame(pipeline.get("handler3"), handler3);
|
|
|
|
|
|
|
|
pipeline.remove(handler1);
|
2014-03-03 01:21:50 +01:00
|
|
|
assertNull(pipeline.get("handler1"));
|
2013-01-07 08:44:16 +01:00
|
|
|
pipeline.remove(handler2);
|
2014-03-03 01:21:50 +01:00
|
|
|
assertNull(pipeline.get("handler2"));
|
2013-01-07 08:44:16 +01:00
|
|
|
pipeline.remove(handler3);
|
2014-03-03 01:21:50 +01:00
|
|
|
assertNull(pipeline.get("handler3"));
|
2013-01-07 08:44:16 +01:00
|
|
|
}
|
|
|
|
|
2018-01-31 13:03:49 +01:00
|
|
|
@Test
|
|
|
|
public void testRemoveIfExists() {
|
2019-01-14 20:11:13 +01:00
|
|
|
DefaultChannelPipeline pipeline = new DefaultChannelPipeline(newLocalChannel());
|
2018-01-31 13:03:49 +01:00
|
|
|
|
|
|
|
ChannelHandler handler1 = newHandler();
|
|
|
|
ChannelHandler handler2 = newHandler();
|
|
|
|
ChannelHandler handler3 = newHandler();
|
|
|
|
|
|
|
|
pipeline.addLast("handler1", handler1);
|
|
|
|
pipeline.addLast("handler2", handler2);
|
|
|
|
pipeline.addLast("handler3", handler3);
|
|
|
|
|
|
|
|
assertNotNull(pipeline.removeIfExists(handler1));
|
|
|
|
assertNull(pipeline.get("handler1"));
|
|
|
|
|
|
|
|
assertNotNull(pipeline.removeIfExists("handler2"));
|
|
|
|
assertNull(pipeline.get("handler2"));
|
|
|
|
|
|
|
|
assertNotNull(pipeline.removeIfExists(TestHandler.class));
|
|
|
|
assertNull(pipeline.get("handler3"));
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testRemoveIfExistsDoesNotThrowException() {
|
2019-01-14 20:11:13 +01:00
|
|
|
DefaultChannelPipeline pipeline = new DefaultChannelPipeline(newLocalChannel());
|
2018-01-31 13:03:49 +01:00
|
|
|
|
|
|
|
ChannelHandler handler1 = newHandler();
|
|
|
|
ChannelHandler handler2 = newHandler();
|
|
|
|
pipeline.addLast("handler1", handler1);
|
|
|
|
|
|
|
|
assertNull(pipeline.removeIfExists("handlerXXX"));
|
|
|
|
assertNull(pipeline.removeIfExists(handler2));
|
2019-03-13 09:46:10 +01:00
|
|
|
|
|
|
|
class NonExistingHandler implements ChannelHandler { }
|
|
|
|
|
|
|
|
assertNull(pipeline.removeIfExists(NonExistingHandler.class));
|
2018-01-31 13:03:49 +01:00
|
|
|
assertNotNull(pipeline.get("handler1"));
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test(expected = NoSuchElementException.class)
|
|
|
|
public void testRemoveThrowNoSuchElementException() {
|
2019-01-14 20:11:13 +01:00
|
|
|
DefaultChannelPipeline pipeline = new DefaultChannelPipeline(newLocalChannel());
|
2018-01-31 13:03:49 +01:00
|
|
|
|
|
|
|
ChannelHandler handler1 = newHandler();
|
|
|
|
pipeline.addLast("handler1", handler1);
|
|
|
|
|
|
|
|
pipeline.remove("handlerXXX");
|
|
|
|
}
|
|
|
|
|
2011-12-11 09:21:29 +01:00
|
|
|
@Test
|
|
|
|
public void testReplaceChannelHandler() {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2012-05-30 12:58:14 +02:00
|
|
|
|
|
|
|
ChannelHandler handler1 = newHandler();
|
2011-12-11 09:21:29 +01:00
|
|
|
pipeline.addLast("handler1", handler1);
|
|
|
|
pipeline.addLast("handler2", handler1);
|
|
|
|
pipeline.addLast("handler3", handler1);
|
2012-11-12 04:45:06 +01:00
|
|
|
assertSame(pipeline.get("handler1"), handler1);
|
|
|
|
assertSame(pipeline.get("handler2"), handler1);
|
|
|
|
assertSame(pipeline.get("handler3"), handler1);
|
2012-05-30 12:58:14 +02:00
|
|
|
|
|
|
|
ChannelHandler newHandler1 = newHandler();
|
2011-12-11 09:21:29 +01:00
|
|
|
pipeline.replace("handler1", "handler1", newHandler1);
|
2012-11-12 04:45:06 +01:00
|
|
|
assertSame(pipeline.get("handler1"), newHandler1);
|
2012-05-30 12:58:14 +02:00
|
|
|
|
|
|
|
ChannelHandler newHandler3 = newHandler();
|
2011-12-11 09:21:29 +01:00
|
|
|
pipeline.replace("handler3", "handler3", newHandler3);
|
2012-11-12 04:45:06 +01:00
|
|
|
assertSame(pipeline.get("handler3"), newHandler3);
|
2012-05-30 12:58:14 +02:00
|
|
|
|
|
|
|
ChannelHandler newHandler2 = newHandler();
|
2011-12-11 09:21:29 +01:00
|
|
|
pipeline.replace("handler2", "handler2", newHandler2);
|
2012-11-12 04:45:06 +01:00
|
|
|
assertSame(pipeline.get("handler2"), newHandler2);
|
2011-12-11 09:21:29 +01:00
|
|
|
}
|
2012-05-30 12:58:14 +02:00
|
|
|
|
2012-11-16 06:30:34 +01:00
|
|
|
@Test
|
|
|
|
public void testChannelHandlerContextNavigation() {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2012-11-16 06:30:34 +01:00
|
|
|
|
|
|
|
final int HANDLER_ARRAY_LEN = 5;
|
|
|
|
ChannelHandler[] firstHandlers = newHandlers(HANDLER_ARRAY_LEN);
|
|
|
|
ChannelHandler[] lastHandlers = newHandlers(HANDLER_ARRAY_LEN);
|
|
|
|
|
|
|
|
pipeline.addFirst(firstHandlers);
|
|
|
|
pipeline.addLast(lastHandlers);
|
|
|
|
|
|
|
|
verifyContextNumber(pipeline, HANDLER_ARRAY_LEN * 2);
|
|
|
|
}
|
|
|
|
|
2013-08-23 17:17:28 +02:00
|
|
|
@Test
|
|
|
|
public void testFireChannelRegistered() throws Exception {
|
|
|
|
final CountDownLatch latch = new CountDownLatch(1);
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2013-08-23 17:17:28 +02:00
|
|
|
pipeline.addLast(new ChannelInitializer<Channel>() {
|
|
|
|
@Override
|
|
|
|
protected void initChannel(Channel ch) throws Exception {
|
2019-03-28 10:28:27 +01:00
|
|
|
ch.pipeline().addLast(new ChannelHandler() {
|
2013-08-23 17:17:28 +02:00
|
|
|
@Override
|
|
|
|
public void channelRegistered(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
latch.countDown();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
});
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().register();
|
2013-08-23 17:17:28 +02:00
|
|
|
assertTrue(latch.await(2, TimeUnit.SECONDS));
|
|
|
|
}
|
|
|
|
|
2012-11-16 06:30:34 +01:00
|
|
|
@Test
|
|
|
|
public void testPipelineOperation() {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2013-05-17 03:54:20 +02:00
|
|
|
|
2012-11-16 06:30:34 +01:00
|
|
|
final int handlerNum = 5;
|
|
|
|
ChannelHandler[] handlers1 = newHandlers(handlerNum);
|
|
|
|
ChannelHandler[] handlers2 = newHandlers(handlerNum);
|
|
|
|
|
|
|
|
final String prefixX = "x";
|
|
|
|
for (int i = 0; i < handlerNum; i++) {
|
2012-11-30 15:01:57 +01:00
|
|
|
if (i % 2 == 0) {
|
2012-11-30 16:10:42 +01:00
|
|
|
pipeline.addFirst(prefixX + i, handlers1[i]);
|
2012-11-30 15:01:57 +01:00
|
|
|
} else {
|
2012-11-30 16:10:42 +01:00
|
|
|
pipeline.addLast(prefixX + i, handlers1[i]);
|
2012-11-30 15:01:57 +01:00
|
|
|
}
|
2012-11-16 06:30:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < handlerNum; i++) {
|
2012-11-30 15:01:57 +01:00
|
|
|
if (i % 2 != 0) {
|
2012-11-30 16:10:42 +01:00
|
|
|
pipeline.addBefore(prefixX + i, String.valueOf(i), handlers2[i]);
|
2012-11-30 15:01:57 +01:00
|
|
|
} else {
|
2012-11-30 16:10:42 +01:00
|
|
|
pipeline.addAfter(prefixX + i, String.valueOf(i), handlers2[i]);
|
2012-11-30 15:01:57 +01:00
|
|
|
}
|
2012-11-16 06:30:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
verifyContextNumber(pipeline, handlerNum * 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testChannelHandlerContextOrder() {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2013-05-17 03:54:20 +02:00
|
|
|
|
2012-11-16 06:30:34 +01:00
|
|
|
pipeline.addFirst("1", newHandler());
|
|
|
|
pipeline.addLast("10", newHandler());
|
|
|
|
|
|
|
|
pipeline.addBefore("10", "5", newHandler());
|
|
|
|
pipeline.addAfter("1", "3", newHandler());
|
|
|
|
pipeline.addBefore("5", "4", newHandler());
|
|
|
|
pipeline.addAfter("5", "6", newHandler());
|
|
|
|
|
|
|
|
pipeline.addBefore("1", "0", newHandler());
|
|
|
|
pipeline.addAfter("10", "11", newHandler());
|
|
|
|
|
2019-01-31 07:19:00 +01:00
|
|
|
DefaultChannelHandlerContext ctx = (DefaultChannelHandlerContext) pipeline.firstContext();
|
2012-11-16 06:30:34 +01:00
|
|
|
assertNotNull(ctx);
|
|
|
|
while (ctx != null) {
|
|
|
|
int i = toInt(ctx.name());
|
|
|
|
int j = next(ctx);
|
2013-01-07 08:44:16 +01:00
|
|
|
if (j != -1) {
|
|
|
|
assertTrue(i < j);
|
|
|
|
} else {
|
|
|
|
assertNull(ctx.next.next);
|
|
|
|
}
|
2012-11-16 06:30:34 +01:00
|
|
|
ctx = ctx.next;
|
|
|
|
}
|
|
|
|
|
|
|
|
verifyContextNumber(pipeline, 8);
|
|
|
|
}
|
|
|
|
|
2013-05-17 03:54:20 +02:00
|
|
|
@Test(timeout = 10000)
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public void testLifeCycleAwareness() throws Exception {
|
|
|
|
setUp();
|
2013-03-12 07:19:31 +01:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
ChannelPipeline p = self.pipeline();
|
2013-03-12 07:19:31 +01:00
|
|
|
|
2019-01-22 16:07:26 +01:00
|
|
|
final List<LifeCycleAwareTestHandler> handlers = new ArrayList<>();
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
final int COUNT = 20;
|
|
|
|
final CountDownLatch addLatch = new CountDownLatch(COUNT);
|
|
|
|
for (int i = 0; i < COUNT; i++) {
|
|
|
|
final LifeCycleAwareTestHandler handler = new LifeCycleAwareTestHandler("handler-" + i);
|
2013-03-12 07:19:31 +01:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
// Add handler.
|
|
|
|
p.addFirst(handler.name, handler);
|
2019-01-25 10:51:05 +01:00
|
|
|
self.eventLoop().execute(() -> {
|
|
|
|
// Validate handler life-cycle methods called.
|
|
|
|
handler.validate(true, false);
|
2013-03-12 07:19:31 +01:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
// Store handler into the list.
|
|
|
|
handlers.add(handler);
|
2013-05-17 03:54:20 +02:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
addLatch.countDown();
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
addLatch.await();
|
2013-03-12 07:19:31 +01:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
// Change the order of remove operations over all handlers in the pipeline.
|
|
|
|
Collections.shuffle(handlers);
|
2013-03-12 07:19:31 +01:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
final CountDownLatch removeLatch = new CountDownLatch(COUNT);
|
2013-03-12 07:19:31 +01:00
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
for (final LifeCycleAwareTestHandler handler : handlers) {
|
|
|
|
assertSame(handler, p.remove(handler.name));
|
2013-03-12 07:19:31 +01:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
self.eventLoop().execute(() -> {
|
|
|
|
// Validate handler life-cycle methods called.
|
|
|
|
handler.validate(true, true);
|
|
|
|
removeLatch.countDown();
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
removeLatch.await();
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
@Test(timeout = 100000)
|
|
|
|
public void testRemoveAndForwardInbound() throws Exception {
|
|
|
|
final BufferedTestHandler handler1 = new BufferedTestHandler();
|
|
|
|
final BufferedTestHandler handler2 = new BufferedTestHandler();
|
2013-05-17 03:54:20 +02:00
|
|
|
|
|
|
|
setUp(handler1, handler2);
|
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
self.eventLoop().submit(() -> {
|
|
|
|
ChannelPipeline p = self.pipeline();
|
|
|
|
handler1.inboundBuffer.add(8);
|
|
|
|
assertEquals(8, handler1.inboundBuffer.peek());
|
|
|
|
assertTrue(handler2.inboundBuffer.isEmpty());
|
|
|
|
p.remove(handler1);
|
|
|
|
assertEquals(1, handler2.inboundBuffer.size());
|
|
|
|
assertEquals(8, handler2.inboundBuffer.peek());
|
2013-05-17 03:54:20 +02:00
|
|
|
}).sync();
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
|
2013-05-17 03:54:20 +02:00
|
|
|
@Test(timeout = 10000)
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public void testRemoveAndForwardOutbound() throws Exception {
|
|
|
|
final BufferedTestHandler handler1 = new BufferedTestHandler();
|
|
|
|
final BufferedTestHandler handler2 = new BufferedTestHandler();
|
2013-05-17 03:54:20 +02:00
|
|
|
|
|
|
|
setUp(handler1, handler2);
|
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
self.eventLoop().submit(() -> {
|
|
|
|
ChannelPipeline p = self.pipeline();
|
|
|
|
handler2.outboundBuffer.add(8);
|
|
|
|
assertEquals(8, handler2.outboundBuffer.peek());
|
|
|
|
assertTrue(handler1.outboundBuffer.isEmpty());
|
|
|
|
p.remove(handler2);
|
|
|
|
assertEquals(1, handler1.outboundBuffer.size());
|
|
|
|
assertEquals(8, handler1.outboundBuffer.peek());
|
2013-05-17 03:54:20 +02:00
|
|
|
}).sync();
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
|
2013-05-17 03:54:20 +02:00
|
|
|
@Test(timeout = 10000)
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public void testReplaceAndForwardOutbound() throws Exception {
|
|
|
|
final BufferedTestHandler handler1 = new BufferedTestHandler();
|
|
|
|
final BufferedTestHandler handler2 = new BufferedTestHandler();
|
2013-05-17 03:54:20 +02:00
|
|
|
|
|
|
|
setUp(handler1);
|
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
self.eventLoop().submit(() -> {
|
|
|
|
ChannelPipeline p = self.pipeline();
|
|
|
|
handler1.outboundBuffer.add(8);
|
|
|
|
assertEquals(8, handler1.outboundBuffer.peek());
|
|
|
|
assertTrue(handler2.outboundBuffer.isEmpty());
|
|
|
|
p.replace(handler1, "handler2", handler2);
|
|
|
|
assertEquals(8, handler2.outboundBuffer.peek());
|
2013-05-17 03:54:20 +02:00
|
|
|
}).sync();
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
|
2013-05-17 03:54:20 +02:00
|
|
|
@Test(timeout = 10000)
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public void testReplaceAndForwardInboundAndOutbound() throws Exception {
|
|
|
|
final BufferedTestHandler handler1 = new BufferedTestHandler();
|
|
|
|
final BufferedTestHandler handler2 = new BufferedTestHandler();
|
2013-05-17 03:54:20 +02:00
|
|
|
|
|
|
|
setUp(handler1);
|
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
self.eventLoop().submit(() -> {
|
|
|
|
ChannelPipeline p = self.pipeline();
|
|
|
|
handler1.inboundBuffer.add(8);
|
|
|
|
handler1.outboundBuffer.add(8);
|
2013-03-12 07:19:31 +01:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
assertEquals(8, handler1.inboundBuffer.peek());
|
|
|
|
assertEquals(8, handler1.outboundBuffer.peek());
|
|
|
|
assertTrue(handler2.inboundBuffer.isEmpty());
|
|
|
|
assertTrue(handler2.outboundBuffer.isEmpty());
|
2013-03-12 07:19:31 +01:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
p.replace(handler1, "handler2", handler2);
|
|
|
|
assertEquals(8, handler2.outboundBuffer.peek());
|
|
|
|
assertEquals(8, handler2.inboundBuffer.peek());
|
2013-05-17 03:54:20 +02:00
|
|
|
}).sync();
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
|
2013-05-17 03:54:20 +02:00
|
|
|
@Test(timeout = 10000)
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public void testRemoveAndForwardInboundOutbound() throws Exception {
|
|
|
|
final BufferedTestHandler handler1 = new BufferedTestHandler();
|
|
|
|
final BufferedTestHandler handler2 = new BufferedTestHandler();
|
|
|
|
final BufferedTestHandler handler3 = new BufferedTestHandler();
|
2013-05-17 03:54:20 +02:00
|
|
|
|
|
|
|
setUp(handler1, handler2, handler3);
|
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
self.eventLoop().submit(() -> {
|
|
|
|
ChannelPipeline p = self.pipeline();
|
|
|
|
handler2.inboundBuffer.add(8);
|
|
|
|
handler2.outboundBuffer.add(8);
|
2013-05-17 03:54:20 +02:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
assertEquals(8, handler2.inboundBuffer.peek());
|
|
|
|
assertEquals(8, handler2.outboundBuffer.peek());
|
2013-05-17 03:54:20 +02:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
assertEquals(0, handler1.outboundBuffer.size());
|
|
|
|
assertEquals(0, handler3.inboundBuffer.size());
|
2013-05-17 03:54:20 +02:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
p.remove(handler2);
|
|
|
|
assertEquals(8, handler3.inboundBuffer.peek());
|
|
|
|
assertEquals(8, handler1.outboundBuffer.peek());
|
2013-05-17 03:54:20 +02:00
|
|
|
}).sync();
|
2013-03-14 10:43:12 +01:00
|
|
|
}
|
|
|
|
|
2014-03-31 07:29:31 +02:00
|
|
|
// Tests for https://github.com/netty/netty/issues/2349
|
|
|
|
@Test
|
|
|
|
public void testCancelBind() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register();
|
2014-03-31 07:29:31 +02:00
|
|
|
|
|
|
|
ChannelPromise promise = pipeline.channel().newPromise();
|
|
|
|
assertTrue(promise.cancel(false));
|
|
|
|
ChannelFuture future = pipeline.bind(new LocalAddress("test"), promise);
|
|
|
|
assertTrue(future.isCancelled());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testCancelConnect() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register();
|
2014-03-31 07:29:31 +02:00
|
|
|
|
|
|
|
ChannelPromise promise = pipeline.channel().newPromise();
|
|
|
|
assertTrue(promise.cancel(false));
|
|
|
|
ChannelFuture future = pipeline.connect(new LocalAddress("test"), promise);
|
|
|
|
assertTrue(future.isCancelled());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testCancelDisconnect() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register();
|
2014-03-31 07:29:31 +02:00
|
|
|
|
|
|
|
ChannelPromise promise = pipeline.channel().newPromise();
|
|
|
|
assertTrue(promise.cancel(false));
|
|
|
|
ChannelFuture future = pipeline.disconnect(promise);
|
|
|
|
assertTrue(future.isCancelled());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testCancelClose() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register();
|
2014-03-31 07:29:31 +02:00
|
|
|
|
|
|
|
ChannelPromise promise = pipeline.channel().newPromise();
|
|
|
|
assertTrue(promise.cancel(false));
|
|
|
|
ChannelFuture future = pipeline.close(promise);
|
|
|
|
assertTrue(future.isCancelled());
|
|
|
|
}
|
|
|
|
|
2017-02-09 19:10:49 +01:00
|
|
|
@Test(expected = IllegalArgumentException.class)
|
|
|
|
public void testWrongPromiseChannel() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register().sync();
|
2017-02-09 19:10:49 +01:00
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline2 = newLocalChannel().pipeline();
|
|
|
|
pipeline2.channel().register().sync();
|
2017-02-09 19:10:49 +01:00
|
|
|
|
|
|
|
try {
|
|
|
|
ChannelPromise promise2 = pipeline2.channel().newPromise();
|
|
|
|
pipeline.close(promise2);
|
|
|
|
} finally {
|
|
|
|
pipeline.close();
|
|
|
|
pipeline2.close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test(expected = IllegalArgumentException.class)
|
|
|
|
public void testUnexpectedVoidChannelPromise() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register().sync();
|
2017-02-09 19:10:49 +01:00
|
|
|
|
|
|
|
try {
|
|
|
|
ChannelPromise promise = new VoidChannelPromise(pipeline.channel(), false);
|
|
|
|
pipeline.close(promise);
|
|
|
|
} finally {
|
|
|
|
pipeline.close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test(expected = IllegalArgumentException.class)
|
|
|
|
public void testUnexpectedVoidChannelPromiseCloseFuture() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register().sync();
|
2017-02-09 19:10:49 +01:00
|
|
|
|
|
|
|
try {
|
|
|
|
ChannelPromise promise = (ChannelPromise) pipeline.channel().closeFuture();
|
|
|
|
pipeline.close(promise);
|
|
|
|
} finally {
|
|
|
|
pipeline.close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-31 07:29:31 +02:00
|
|
|
@Test
|
|
|
|
public void testCancelDeregister() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register().sync();
|
2014-03-31 07:29:31 +02:00
|
|
|
|
|
|
|
ChannelPromise promise = pipeline.channel().newPromise();
|
|
|
|
assertTrue(promise.cancel(false));
|
|
|
|
ChannelFuture future = pipeline.deregister(promise);
|
|
|
|
assertTrue(future.isCancelled());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testCancelWrite() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register().sync();
|
2014-03-31 07:29:31 +02:00
|
|
|
|
|
|
|
ChannelPromise promise = pipeline.channel().newPromise();
|
|
|
|
assertTrue(promise.cancel(false));
|
|
|
|
ByteBuf buffer = Unpooled.buffer();
|
|
|
|
assertEquals(1, buffer.refCnt());
|
|
|
|
ChannelFuture future = pipeline.write(buffer, promise);
|
|
|
|
assertTrue(future.isCancelled());
|
|
|
|
assertEquals(0, buffer.refCnt());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testCancelWriteAndFlush() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
|
|
|
pipeline.channel().register().sync();
|
2014-03-31 07:29:31 +02:00
|
|
|
|
|
|
|
ChannelPromise promise = pipeline.channel().newPromise();
|
|
|
|
assertTrue(promise.cancel(false));
|
|
|
|
ByteBuf buffer = Unpooled.buffer();
|
|
|
|
assertEquals(1, buffer.refCnt());
|
|
|
|
ChannelFuture future = pipeline.writeAndFlush(buffer, promise);
|
|
|
|
assertTrue(future.isCancelled());
|
|
|
|
assertEquals(0, buffer.refCnt());
|
|
|
|
}
|
|
|
|
|
2014-05-04 21:09:58 +02:00
|
|
|
@Test
|
|
|
|
public void testFirstContextEmptyPipeline() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2014-05-04 21:09:58 +02:00
|
|
|
assertNull(pipeline.firstContext());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testLastContextEmptyPipeline() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2014-05-04 21:09:58 +02:00
|
|
|
assertNull(pipeline.lastContext());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testFirstHandlerEmptyPipeline() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2014-05-04 21:09:58 +02:00
|
|
|
assertNull(pipeline.first());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testLastHandlerEmptyPipeline() throws Exception {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2014-05-04 21:09:58 +02:00
|
|
|
assertNull(pipeline.last());
|
|
|
|
}
|
|
|
|
|
2015-06-19 21:33:54 +02:00
|
|
|
@Test(timeout = 5000)
|
|
|
|
public void testChannelInitializerException() throws Exception {
|
|
|
|
final IllegalStateException exception = new IllegalStateException();
|
2019-01-22 16:07:26 +01:00
|
|
|
final AtomicReference<Throwable> error = new AtomicReference<>();
|
2015-06-19 21:33:54 +02:00
|
|
|
final CountDownLatch latch = new CountDownLatch(1);
|
2019-01-14 20:11:13 +01:00
|
|
|
EmbeddedChannel channel = new EmbeddedChannel(false, false, new ChannelInitializer<Channel>() {
|
2015-06-19 21:33:54 +02:00
|
|
|
@Override
|
|
|
|
protected void initChannel(Channel ch) throws Exception {
|
|
|
|
throw exception;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
|
|
|
super.exceptionCaught(ctx, cause);
|
|
|
|
error.set(cause);
|
|
|
|
latch.countDown();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
latch.await();
|
|
|
|
assertFalse(channel.isActive());
|
|
|
|
assertSame(exception, error.get());
|
|
|
|
}
|
|
|
|
|
2016-02-08 19:45:05 +01:00
|
|
|
@Test(timeout = 3000)
|
|
|
|
public void testAddHandlerBeforeRegisteredThenRemove() {
|
|
|
|
final EventLoop loop = group.next();
|
|
|
|
|
|
|
|
CheckEventExecutorHandler handler = new CheckEventExecutorHandler(loop);
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2016-02-08 19:45:05 +01:00
|
|
|
pipeline.addFirst(handler);
|
|
|
|
handler.addedPromise.syncUninterruptibly();
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().register();
|
2016-02-08 19:45:05 +01:00
|
|
|
pipeline.remove(handler);
|
|
|
|
handler.removedPromise.syncUninterruptibly();
|
2019-01-14 20:11:13 +01:00
|
|
|
|
|
|
|
pipeline.channel().close().syncUninterruptibly();
|
2016-02-08 19:45:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Test(timeout = 3000)
|
|
|
|
public void testAddHandlerBeforeRegisteredThenReplace() throws Exception {
|
|
|
|
final EventLoop loop = group.next();
|
|
|
|
final CountDownLatch latch = new CountDownLatch(1);
|
|
|
|
|
|
|
|
CheckEventExecutorHandler handler = new CheckEventExecutorHandler(loop);
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2016-02-08 19:45:05 +01:00
|
|
|
pipeline.addFirst(handler);
|
|
|
|
handler.addedPromise.syncUninterruptibly();
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().register();
|
2016-02-08 19:45:05 +01:00
|
|
|
pipeline.replace(handler, null, new ChannelHandlerAdapter() {
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
latch.countDown();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
handler.removedPromise.syncUninterruptibly();
|
|
|
|
latch.await();
|
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().close().syncUninterruptibly();
|
2016-02-08 19:45:05 +01:00
|
|
|
}
|
|
|
|
|
2016-04-09 21:38:45 +02:00
|
|
|
@Test(timeout = 2000)
|
2019-01-14 20:11:13 +01:00
|
|
|
public void testAddRemoveHandlerCalled() throws Throwable {
|
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2016-02-08 19:45:05 +01:00
|
|
|
CallbackCheckHandler handler = new CallbackCheckHandler();
|
|
|
|
|
|
|
|
pipeline.addFirst(handler);
|
|
|
|
pipeline.remove(handler);
|
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
assertTrue(handler.addedHandler.get());
|
|
|
|
assertTrue(handler.removedHandler.get());
|
2016-02-08 19:45:05 +01:00
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().register().syncUninterruptibly();
|
2016-02-08 19:45:05 +01:00
|
|
|
Throwable cause = handler.error.get();
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().close().syncUninterruptibly();
|
|
|
|
|
2016-02-08 19:45:05 +01:00
|
|
|
if (cause != null) {
|
|
|
|
throw cause;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-09 21:38:45 +02:00
|
|
|
@Test(timeout = 3000)
|
2019-01-14 20:11:13 +01:00
|
|
|
public void testAddReplaceHandlerCalled() throws Throwable {
|
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2016-02-08 19:45:05 +01:00
|
|
|
CallbackCheckHandler handler = new CallbackCheckHandler();
|
|
|
|
CallbackCheckHandler handler2 = new CallbackCheckHandler();
|
|
|
|
|
|
|
|
pipeline.addFirst(handler);
|
|
|
|
pipeline.replace(handler, null, handler2);
|
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
assertTrue(handler.addedHandler.get());
|
|
|
|
assertTrue(handler.removedHandler.get());
|
|
|
|
assertTrue(handler2.addedHandler.get());
|
2016-04-09 21:38:45 +02:00
|
|
|
assertNull(handler2.removedHandler.getNow());
|
2016-02-08 19:45:05 +01:00
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().register().syncUninterruptibly();
|
2016-02-08 19:45:05 +01:00
|
|
|
Throwable cause = handler.error.get();
|
|
|
|
if (cause != null) {
|
|
|
|
throw cause;
|
|
|
|
}
|
|
|
|
|
|
|
|
Throwable cause2 = handler2.error.get();
|
|
|
|
if (cause2 != null) {
|
|
|
|
throw cause2;
|
|
|
|
}
|
|
|
|
|
2016-04-09 21:38:45 +02:00
|
|
|
assertNull(handler2.removedHandler.getNow());
|
2016-02-08 19:45:05 +01:00
|
|
|
pipeline.remove(handler2);
|
|
|
|
assertTrue(handler2.removedHandler.get());
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline.channel().close().syncUninterruptibly();
|
2016-02-08 19:45:05 +01:00
|
|
|
}
|
|
|
|
|
2016-04-09 21:38:45 +02:00
|
|
|
@Test(timeout = 3000)
|
|
|
|
public void testAddBefore() throws Throwable {
|
2019-01-23 08:32:05 +01:00
|
|
|
EventLoopGroup defaultGroup = new MultithreadEventLoopGroup(2, LocalHandler.newFactory());
|
2016-04-09 21:38:45 +02:00
|
|
|
try {
|
|
|
|
EventLoop eventLoop1 = defaultGroup.next();
|
|
|
|
EventLoop eventLoop2 = defaultGroup.next();
|
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline1 = new LocalChannel(eventLoop1).pipeline();
|
|
|
|
ChannelPipeline pipeline2 = new LocalChannel(eventLoop2).pipeline();
|
|
|
|
|
|
|
|
pipeline1.channel().register().syncUninterruptibly();
|
|
|
|
pipeline2.channel().register().syncUninterruptibly();
|
2016-04-09 21:38:45 +02:00
|
|
|
|
|
|
|
CountDownLatch latch = new CountDownLatch(2 * 10);
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
eventLoop1.execute(new TestTask(pipeline2, latch));
|
|
|
|
eventLoop2.execute(new TestTask(pipeline1, latch));
|
|
|
|
}
|
|
|
|
latch.await();
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline1.channel().close().syncUninterruptibly();
|
|
|
|
pipeline2.channel().close().syncUninterruptibly();
|
2016-04-09 21:38:45 +02:00
|
|
|
} finally {
|
|
|
|
defaultGroup.shutdownGracefully();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-06 12:03:45 +02:00
|
|
|
@Test(timeout = 3000)
|
|
|
|
public void testAddInListenerNio() throws Throwable {
|
2019-01-23 08:32:05 +01:00
|
|
|
EventLoopGroup nioEventLoopGroup = new MultithreadEventLoopGroup(1, NioHandler.newFactory());
|
2019-01-14 20:11:13 +01:00
|
|
|
try {
|
|
|
|
testAddInListener(new NioSocketChannel(nioEventLoopGroup.next()));
|
|
|
|
} finally {
|
|
|
|
nioEventLoopGroup.shutdownGracefully();
|
|
|
|
}
|
2016-07-06 12:03:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Test(timeout = 3000)
|
|
|
|
public void testAddInListenerLocal() throws Throwable {
|
2019-01-14 20:11:13 +01:00
|
|
|
testAddInListener(newLocalChannel());
|
2016-07-06 12:03:45 +02:00
|
|
|
}
|
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
private static void testAddInListener(Channel channel) throws Throwable {
|
2016-07-06 12:03:45 +02:00
|
|
|
ChannelPipeline pipeline1 = channel.pipeline();
|
|
|
|
try {
|
|
|
|
final Object event = new Object();
|
|
|
|
final Promise<Object> promise = ImmediateEventExecutor.INSTANCE.newPromise();
|
2019-01-25 10:51:05 +01:00
|
|
|
pipeline1.channel().register().addListener((ChannelFutureListener) future -> {
|
|
|
|
ChannelPipeline pipeline = future.channel().pipeline();
|
|
|
|
final AtomicBoolean handlerAddedCalled = new AtomicBoolean();
|
2019-03-28 10:28:27 +01:00
|
|
|
pipeline.addLast(new ChannelHandler() {
|
2019-01-25 10:51:05 +01:00
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
handlerAddedCalled.set(true);
|
|
|
|
}
|
2016-07-06 12:03:45 +02:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
@Override
|
|
|
|
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
|
|
|
|
promise.setSuccess(event);
|
|
|
|
}
|
2016-07-06 12:03:45 +02:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
|
|
|
promise.setFailure(cause);
|
2016-07-06 12:03:45 +02:00
|
|
|
}
|
2019-01-25 10:51:05 +01:00
|
|
|
});
|
|
|
|
if (!handlerAddedCalled.get()) {
|
|
|
|
promise.setFailure(new AssertionError("handlerAdded(...) should have been called"));
|
|
|
|
return;
|
2016-07-06 12:03:45 +02:00
|
|
|
}
|
2019-01-25 10:51:05 +01:00
|
|
|
// This event must be captured by the added handler.
|
|
|
|
pipeline.fireUserEventTriggered(event);
|
2016-07-06 12:03:45 +02:00
|
|
|
});
|
|
|
|
assertSame(event, promise.syncUninterruptibly().getNow());
|
|
|
|
} finally {
|
|
|
|
pipeline1.channel().close().syncUninterruptibly();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-23 19:01:15 +02:00
|
|
|
@Test
|
|
|
|
public void testNullName() {
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2016-05-23 19:01:15 +02:00
|
|
|
pipeline.addLast(newHandler());
|
|
|
|
pipeline.addLast(null, newHandler());
|
|
|
|
pipeline.addFirst(newHandler());
|
|
|
|
pipeline.addFirst(null, newHandler());
|
|
|
|
|
|
|
|
pipeline.addLast("test", newHandler());
|
|
|
|
pipeline.addAfter("test", null, newHandler());
|
|
|
|
|
|
|
|
pipeline.addBefore("test", null, newHandler());
|
|
|
|
}
|
|
|
|
|
2017-04-18 07:52:23 +02:00
|
|
|
@Test(timeout = 3000)
|
|
|
|
public void testVoidPromiseNotify() throws Throwable {
|
2019-01-23 08:32:05 +01:00
|
|
|
EventLoopGroup defaultGroup = new MultithreadEventLoopGroup(1, LocalHandler.newFactory());
|
2017-04-18 07:52:23 +02:00
|
|
|
EventLoop eventLoop1 = defaultGroup.next();
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline1 = new LocalChannel(eventLoop1).pipeline();
|
|
|
|
|
2017-04-18 07:52:23 +02:00
|
|
|
final Promise<Throwable> promise = eventLoop1.newPromise();
|
|
|
|
final Exception exception = new IllegalArgumentException();
|
|
|
|
try {
|
2019-01-14 20:11:13 +01:00
|
|
|
pipeline1.channel().register().syncUninterruptibly();
|
2019-03-28 10:28:27 +01:00
|
|
|
pipeline1.addLast(new ChannelHandler() {
|
2017-04-18 07:52:23 +02:00
|
|
|
@Override
|
|
|
|
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
|
|
|
|
throw exception;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
|
|
|
|
promise.setSuccess(cause);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
pipeline1.write("test", pipeline1.voidPromise());
|
|
|
|
assertSame(exception, promise.syncUninterruptibly().getNow());
|
|
|
|
} finally {
|
|
|
|
pipeline1.channel().close().syncUninterruptibly();
|
|
|
|
defaultGroup.shutdownGracefully();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-14 08:19:48 +01:00
|
|
|
// Test for https://github.com/netty/netty/issues/8676.
|
|
|
|
@Test
|
|
|
|
public void testHandlerRemovedOnlyCalledWhenHandlerAddedCalled() throws Exception {
|
2019-01-23 08:32:05 +01:00
|
|
|
EventLoopGroup group = new MultithreadEventLoopGroup(1, LocalHandler.newFactory());
|
2019-01-14 08:19:48 +01:00
|
|
|
try {
|
2019-01-22 16:07:26 +01:00
|
|
|
final AtomicReference<Error> errorRef = new AtomicReference<>();
|
2019-01-14 08:19:48 +01:00
|
|
|
|
|
|
|
// As this only happens via a race we will verify 500 times. This was good enough to have it failed most of
|
|
|
|
// the time.
|
|
|
|
for (int i = 0; i < 500; i++) {
|
|
|
|
|
2019-01-14 20:11:13 +01:00
|
|
|
ChannelPipeline pipeline = new LocalChannel(group.next()).pipeline();
|
|
|
|
pipeline.channel().register().sync();
|
2019-01-14 08:19:48 +01:00
|
|
|
|
|
|
|
final CountDownLatch latch = new CountDownLatch(1);
|
|
|
|
|
2019-03-28 10:28:27 +01:00
|
|
|
pipeline.addLast(new ChannelHandler() {
|
2019-01-14 08:19:48 +01:00
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
// Block just for a bit so we have a chance to trigger the race mentioned in the issue.
|
|
|
|
latch.await(50, TimeUnit.MILLISECONDS);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Close the pipeline which will call destroy0(). This will remove each handler in the pipeline and
|
|
|
|
// should call handlerRemoved(...) if and only if handlerAdded(...) was called for the handler before.
|
|
|
|
pipeline.close();
|
|
|
|
|
2019-03-28 10:28:27 +01:00
|
|
|
pipeline.addLast(new ChannelHandler() {
|
2019-01-14 08:19:48 +01:00
|
|
|
private boolean handerAddedCalled;
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) {
|
|
|
|
handerAddedCalled = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) {
|
|
|
|
if (!handerAddedCalled) {
|
|
|
|
errorRef.set(new AssertionError(
|
|
|
|
"handlerRemoved(...) called without handlerAdded(...) before"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
latch.countDown();
|
|
|
|
|
|
|
|
pipeline.channel().closeFuture().syncUninterruptibly();
|
|
|
|
|
|
|
|
// Schedule something on the EventLoop to ensure all other scheduled tasks had a chance to complete.
|
2019-01-25 10:51:05 +01:00
|
|
|
pipeline.channel().eventLoop().submit(() -> {
|
|
|
|
// NOOP
|
2019-01-14 08:19:48 +01:00
|
|
|
}).syncUninterruptibly();
|
|
|
|
Error error = errorRef.get();
|
|
|
|
if (error != null) {
|
|
|
|
throw error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} finally {
|
|
|
|
group.shutdownGracefully();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-22 08:58:58 +01:00
|
|
|
@Test
|
|
|
|
public void testSkipHandlerMethodsIfAnnotated() {
|
|
|
|
EmbeddedChannel channel = new EmbeddedChannel(true);
|
|
|
|
ChannelPipeline pipeline = channel.pipeline();
|
|
|
|
|
|
|
|
final class SkipHandler implements ChannelInboundHandler, ChannelOutboundHandler {
|
|
|
|
private int state = 2;
|
|
|
|
private Error errorRef;
|
|
|
|
|
|
|
|
private void fail() {
|
|
|
|
errorRef = new AssertionError("Method should never been called");
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void bind(ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
fail();
|
|
|
|
ctx.bind(localAddress, promise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress,
|
|
|
|
SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
fail();
|
|
|
|
ctx.connect(remoteAddress, localAddress, promise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) {
|
|
|
|
fail();
|
|
|
|
ctx.disconnect(promise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
|
|
|
|
fail();
|
|
|
|
ctx.close(promise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void register(ChannelHandlerContext ctx, ChannelPromise promise) {
|
|
|
|
fail();
|
|
|
|
ctx.register(promise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void deregister(ChannelHandlerContext ctx, ChannelPromise promise) {
|
|
|
|
fail();
|
|
|
|
ctx.deregister(promise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void read(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.read();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
|
|
|
|
fail();
|
|
|
|
ctx.write(msg, promise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void flush(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void channelRegistered(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.fireChannelRegistered();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void channelUnregistered(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.fireChannelUnregistered();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void channelActive(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.fireChannelActive();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void channelInactive(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.fireChannelInactive();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void channelRead(ChannelHandlerContext ctx, Object msg) {
|
|
|
|
fail();
|
|
|
|
ctx.fireChannelRead(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void channelReadComplete(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.fireChannelReadComplete();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
|
|
|
|
fail();
|
|
|
|
ctx.fireUserEventTriggered(evt);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void channelWritabilityChanged(ChannelHandlerContext ctx) {
|
|
|
|
fail();
|
|
|
|
ctx.fireChannelWritabilityChanged();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Skip
|
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
|
|
|
|
fail();
|
|
|
|
ctx.fireExceptionCaught(cause);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) {
|
|
|
|
state--;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) {
|
|
|
|
state--;
|
|
|
|
}
|
|
|
|
|
|
|
|
void assertSkipped() {
|
|
|
|
assertEquals(0, state);
|
|
|
|
Error error = errorRef;
|
|
|
|
if (error != null) {
|
|
|
|
throw error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:46:10 +01:00
|
|
|
final class OutboundCalledHandler implements ChannelOutboundHandler {
|
2019-01-22 08:58:58 +01:00
|
|
|
private static final int MASK_BIND = 1;
|
|
|
|
private static final int MASK_CONNECT = 1 << 1;
|
|
|
|
private static final int MASK_DISCONNECT = 1 << 2;
|
|
|
|
private static final int MASK_CLOSE = 1 << 3;
|
|
|
|
private static final int MASK_REGISTER = 1 << 4;
|
|
|
|
private static final int MASK_DEREGISTER = 1 << 5;
|
|
|
|
private static final int MASK_READ = 1 << 6;
|
|
|
|
private static final int MASK_WRITE = 1 << 7;
|
|
|
|
private static final int MASK_FLUSH = 1 << 8;
|
|
|
|
private static final int MASK_ADDED = 1 << 9;
|
|
|
|
private static final int MASK_REMOVED = 1 << 10;
|
|
|
|
|
|
|
|
private int executionMask;
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_ADDED;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_REMOVED;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void bind(ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
executionMask |= MASK_BIND;
|
|
|
|
promise.setSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress,
|
|
|
|
SocketAddress localAddress, ChannelPromise promise) {
|
|
|
|
executionMask |= MASK_CONNECT;
|
|
|
|
promise.setSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2019-01-24 07:43:04 +01:00
|
|
|
public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) {
|
2019-01-22 08:58:58 +01:00
|
|
|
executionMask |= MASK_DISCONNECT;
|
|
|
|
promise.setSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
|
|
|
|
executionMask |= MASK_CLOSE;
|
|
|
|
promise.setSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void register(ChannelHandlerContext ctx, ChannelPromise promise) {
|
|
|
|
executionMask |= MASK_REGISTER;
|
|
|
|
promise.setSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void deregister(ChannelHandlerContext ctx, ChannelPromise promise) {
|
|
|
|
executionMask |= MASK_DEREGISTER;
|
|
|
|
promise.setSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2019-01-24 07:43:04 +01:00
|
|
|
public void read(ChannelHandlerContext ctx) {
|
2019-01-22 08:58:58 +01:00
|
|
|
executionMask |= MASK_READ;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
|
|
|
|
executionMask |= MASK_WRITE;
|
|
|
|
promise.setSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void flush(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_FLUSH;
|
|
|
|
}
|
|
|
|
|
|
|
|
void assertCalled() {
|
|
|
|
assertCalled("handlerAdded", MASK_ADDED);
|
|
|
|
assertCalled("handlerRemoved", MASK_REMOVED);
|
|
|
|
assertCalled("bind", MASK_BIND);
|
|
|
|
assertCalled("connect", MASK_CONNECT);
|
|
|
|
assertCalled("disconnect", MASK_DISCONNECT);
|
|
|
|
assertCalled("close", MASK_CLOSE);
|
|
|
|
assertCalled("register", MASK_REGISTER);
|
|
|
|
assertCalled("deregister", MASK_DEREGISTER);
|
|
|
|
assertCalled("read", MASK_READ);
|
|
|
|
assertCalled("write", MASK_WRITE);
|
|
|
|
assertCalled("flush", MASK_FLUSH);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void assertCalled(String methodName, int mask) {
|
|
|
|
assertTrue(methodName + " was not called", (executionMask & mask) != 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:46:10 +01:00
|
|
|
final class InboundCalledHandler implements ChannelInboundHandler {
|
2019-01-22 08:58:58 +01:00
|
|
|
|
|
|
|
private static final int MASK_CHANNEL_REGISTER = 1;
|
|
|
|
private static final int MASK_CHANNEL_UNREGISTER = 1 << 1;
|
|
|
|
private static final int MASK_CHANNEL_ACTIVE = 1 << 2;
|
|
|
|
private static final int MASK_CHANNEL_INACTIVE = 1 << 3;
|
|
|
|
private static final int MASK_CHANNEL_READ = 1 << 4;
|
|
|
|
private static final int MASK_CHANNEL_READ_COMPLETE = 1 << 5;
|
|
|
|
private static final int MASK_USER_EVENT_TRIGGERED = 1 << 6;
|
|
|
|
private static final int MASK_CHANNEL_WRITABILITY_CHANGED = 1 << 7;
|
|
|
|
private static final int MASK_EXCEPTION_CAUGHT = 1 << 8;
|
|
|
|
private static final int MASK_ADDED = 1 << 9;
|
|
|
|
private static final int MASK_REMOVED = 1 << 10;
|
|
|
|
|
|
|
|
private int executionMask;
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_ADDED;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_REMOVED;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelRegistered(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_CHANNEL_REGISTER;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelUnregistered(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_CHANNEL_UNREGISTER;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelActive(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_CHANNEL_ACTIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelInactive(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_CHANNEL_INACTIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelRead(ChannelHandlerContext ctx, Object msg) {
|
|
|
|
executionMask |= MASK_CHANNEL_READ;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelReadComplete(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_CHANNEL_READ_COMPLETE;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
|
|
|
|
executionMask |= MASK_USER_EVENT_TRIGGERED;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void channelWritabilityChanged(ChannelHandlerContext ctx) {
|
|
|
|
executionMask |= MASK_CHANNEL_WRITABILITY_CHANGED;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
|
|
|
|
executionMask |= MASK_EXCEPTION_CAUGHT;
|
|
|
|
}
|
|
|
|
|
|
|
|
void assertCalled() {
|
|
|
|
assertCalled("handlerAdded", MASK_ADDED);
|
|
|
|
assertCalled("handlerRemoved", MASK_REMOVED);
|
|
|
|
assertCalled("channelRegistered", MASK_CHANNEL_REGISTER);
|
|
|
|
assertCalled("channelUnregistered", MASK_CHANNEL_UNREGISTER);
|
|
|
|
assertCalled("channelActive", MASK_CHANNEL_ACTIVE);
|
|
|
|
assertCalled("channelInactive", MASK_CHANNEL_INACTIVE);
|
|
|
|
assertCalled("channelRead", MASK_CHANNEL_READ);
|
|
|
|
assertCalled("channelReadComplete", MASK_CHANNEL_READ_COMPLETE);
|
|
|
|
assertCalled("userEventTriggered", MASK_USER_EVENT_TRIGGERED);
|
|
|
|
assertCalled("channelWritabilityChanged", MASK_CHANNEL_WRITABILITY_CHANGED);
|
|
|
|
assertCalled("exceptionCaught", MASK_EXCEPTION_CAUGHT);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void assertCalled(String methodName, int mask) {
|
|
|
|
assertTrue(methodName + " was not called", (executionMask & mask) != 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OutboundCalledHandler outboundCalledHandler = new OutboundCalledHandler();
|
|
|
|
SkipHandler skipHandler = new SkipHandler();
|
|
|
|
InboundCalledHandler inboundCalledHandler = new InboundCalledHandler();
|
|
|
|
pipeline.addLast(outboundCalledHandler, skipHandler, inboundCalledHandler);
|
|
|
|
|
|
|
|
pipeline.fireChannelRegistered();
|
|
|
|
pipeline.fireChannelUnregistered();
|
|
|
|
pipeline.fireChannelActive();
|
|
|
|
pipeline.fireChannelInactive();
|
|
|
|
pipeline.fireChannelRead("");
|
|
|
|
pipeline.fireChannelReadComplete();
|
|
|
|
pipeline.fireChannelWritabilityChanged();
|
|
|
|
pipeline.fireUserEventTriggered("");
|
|
|
|
pipeline.fireExceptionCaught(new Exception());
|
|
|
|
|
|
|
|
pipeline.register().syncUninterruptibly();
|
|
|
|
pipeline.deregister().syncUninterruptibly();
|
2019-01-24 07:43:04 +01:00
|
|
|
pipeline.bind(new SocketAddress() {
|
|
|
|
}).syncUninterruptibly();
|
|
|
|
pipeline.connect(new SocketAddress() {
|
|
|
|
}).syncUninterruptibly();
|
2019-01-22 08:58:58 +01:00
|
|
|
pipeline.disconnect().syncUninterruptibly();
|
|
|
|
pipeline.close().syncUninterruptibly();
|
|
|
|
pipeline.write("");
|
|
|
|
pipeline.flush();
|
|
|
|
pipeline.read();
|
|
|
|
|
|
|
|
pipeline.remove(outboundCalledHandler);
|
|
|
|
pipeline.remove(inboundCalledHandler);
|
|
|
|
pipeline.remove(skipHandler);
|
|
|
|
|
|
|
|
assertFalse(channel.finish());
|
|
|
|
|
|
|
|
outboundCalledHandler.assertCalled();
|
|
|
|
inboundCalledHandler.assertCalled();
|
|
|
|
skipHandler.assertSkipped();
|
|
|
|
}
|
|
|
|
|
2019-01-24 07:43:04 +01:00
|
|
|
@Test
|
|
|
|
public void testWriteThrowsReleaseMessage() {
|
|
|
|
testWriteThrowsReleaseMessage0(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testWriteAndFlushThrowsReleaseMessage() {
|
|
|
|
testWriteThrowsReleaseMessage0(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void testWriteThrowsReleaseMessage0(boolean flush) {
|
|
|
|
ReferenceCounted referenceCounted = new AbstractReferenceCounted() {
|
|
|
|
@Override
|
|
|
|
protected void deallocate() {
|
|
|
|
// NOOP
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ReferenceCounted touch(Object hint) {
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
assertEquals(1, referenceCounted.refCnt());
|
|
|
|
|
|
|
|
Channel channel = new LocalChannel(group.next());
|
|
|
|
Channel channel2 = new LocalChannel(group.next());
|
|
|
|
channel.register().syncUninterruptibly();
|
|
|
|
channel2.register().syncUninterruptibly();
|
|
|
|
|
|
|
|
try {
|
|
|
|
if (flush) {
|
|
|
|
channel.writeAndFlush(referenceCounted, channel2.newPromise());
|
|
|
|
} else {
|
|
|
|
channel.write(referenceCounted, channel2.newPromise());
|
|
|
|
}
|
|
|
|
fail();
|
|
|
|
} catch (IllegalArgumentException expected) {
|
|
|
|
// expected
|
|
|
|
}
|
|
|
|
assertEquals(0, referenceCounted.refCnt());
|
|
|
|
|
|
|
|
channel.close().syncUninterruptibly();
|
|
|
|
channel2.close().syncUninterruptibly();
|
|
|
|
}
|
|
|
|
|
2018-01-22 20:09:17 +01:00
|
|
|
@Test(timeout = 5000)
|
|
|
|
public void handlerAddedStateUpdatedBeforeHandlerAddedDoneForceEventLoop() throws InterruptedException {
|
|
|
|
handlerAddedStateUpdatedBeforeHandlerAddedDone(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test(timeout = 5000)
|
|
|
|
public void handlerAddedStateUpdatedBeforeHandlerAddedDoneOnCallingThread() throws InterruptedException {
|
|
|
|
handlerAddedStateUpdatedBeforeHandlerAddedDone(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void handlerAddedStateUpdatedBeforeHandlerAddedDone(boolean executeInEventLoop)
|
|
|
|
throws InterruptedException {
|
2019-01-14 20:11:13 +01:00
|
|
|
final ChannelPipeline pipeline = newLocalChannel().pipeline();
|
2018-01-22 20:09:17 +01:00
|
|
|
final Object userEvent = new Object();
|
|
|
|
final Object writeObject = new Object();
|
|
|
|
final CountDownLatch doneLatch = new CountDownLatch(1);
|
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
Runnable r = () -> {
|
2019-03-28 10:28:27 +01:00
|
|
|
pipeline.addLast(new ChannelHandler() {
|
2019-01-25 10:51:05 +01:00
|
|
|
@Override
|
|
|
|
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
|
|
|
|
if (evt == userEvent) {
|
|
|
|
ctx.write(writeObject);
|
2018-01-22 20:09:17 +01:00
|
|
|
}
|
2019-01-25 10:51:05 +01:00
|
|
|
ctx.fireUserEventTriggered(evt);
|
|
|
|
}
|
|
|
|
});
|
2019-03-28 10:28:27 +01:00
|
|
|
pipeline.addFirst(new ChannelHandler() {
|
2019-01-25 10:51:05 +01:00
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) {
|
|
|
|
ctx.fireUserEventTriggered(userEvent);
|
|
|
|
}
|
2018-01-22 20:09:17 +01:00
|
|
|
|
2019-01-25 10:51:05 +01:00
|
|
|
@Override
|
|
|
|
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
|
|
|
|
if (msg == writeObject) {
|
|
|
|
doneLatch.countDown();
|
2018-01-22 20:09:17 +01:00
|
|
|
}
|
2019-01-25 10:51:05 +01:00
|
|
|
ctx.write(msg, promise);
|
|
|
|
}
|
|
|
|
});
|
2018-01-22 20:09:17 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if (executeInEventLoop) {
|
|
|
|
pipeline.channel().eventLoop().execute(r);
|
|
|
|
} else {
|
|
|
|
r.run();
|
|
|
|
}
|
|
|
|
|
|
|
|
doneLatch.await();
|
|
|
|
}
|
|
|
|
|
2016-04-09 21:38:45 +02:00
|
|
|
private static final class TestTask implements Runnable {
|
|
|
|
|
|
|
|
private final ChannelPipeline pipeline;
|
|
|
|
private final CountDownLatch latch;
|
|
|
|
|
|
|
|
TestTask(ChannelPipeline pipeline, CountDownLatch latch) {
|
|
|
|
this.pipeline = pipeline;
|
|
|
|
this.latch = latch;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void run() {
|
2019-03-28 10:28:27 +01:00
|
|
|
pipeline.addLast(new ChannelHandler() { });
|
2016-04-09 21:38:45 +02:00
|
|
|
latch.countDown();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-08 19:45:05 +01:00
|
|
|
private static final class CallbackCheckHandler extends ChannelHandlerAdapter {
|
2016-04-09 21:38:45 +02:00
|
|
|
final Promise<Boolean> addedHandler = ImmediateEventExecutor.INSTANCE.newPromise();
|
|
|
|
final Promise<Boolean> removedHandler = ImmediateEventExecutor.INSTANCE.newPromise();
|
2019-01-22 16:07:26 +01:00
|
|
|
final AtomicReference<Throwable> error = new AtomicReference<>();
|
2016-02-08 19:45:05 +01:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
|
2016-04-09 21:38:45 +02:00
|
|
|
if (!addedHandler.trySuccess(true)) {
|
2016-02-08 19:45:05 +01:00
|
|
|
error.set(new AssertionError("handlerAdded(...) called multiple times: " + ctx.name()));
|
2016-04-09 21:38:45 +02:00
|
|
|
} else if (removedHandler.getNow() == Boolean.TRUE) {
|
2016-02-08 19:45:05 +01:00
|
|
|
error.set(new AssertionError("handlerRemoved(...) called before handlerAdded(...): " + ctx.name()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
|
2016-04-09 21:38:45 +02:00
|
|
|
if (!removedHandler.trySuccess(true)) {
|
2016-02-08 19:45:05 +01:00
|
|
|
error.set(new AssertionError("handlerRemoved(...) called multiple times: " + ctx.name()));
|
2016-04-09 21:38:45 +02:00
|
|
|
} else if (addedHandler.getNow() == Boolean.FALSE) {
|
2016-02-08 19:45:05 +01:00
|
|
|
error.set(new AssertionError("handlerRemoved(...) called before handlerAdded(...): " + ctx.name()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:46:10 +01:00
|
|
|
private static final class CheckExceptionHandler implements ChannelInboundHandler {
|
2016-02-08 19:45:05 +01:00
|
|
|
private final Throwable expected;
|
|
|
|
private final Promise<Void> promise;
|
|
|
|
|
|
|
|
CheckExceptionHandler(Throwable expected, Promise<Void> promise) {
|
|
|
|
this.expected = expected;
|
|
|
|
this.promise = promise;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
|
|
|
if (cause instanceof ChannelPipelineException && cause.getCause() == expected) {
|
|
|
|
promise.setSuccess(null);
|
|
|
|
} else {
|
|
|
|
promise.setFailure(new AssertionError("cause not the expected instance"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-09 21:38:45 +02:00
|
|
|
private static void assertHandler(CheckOrderHandler actual, CheckOrderHandler... handlers) throws Throwable {
|
|
|
|
for (CheckOrderHandler h : handlers) {
|
|
|
|
if (h == actual) {
|
|
|
|
actual.checkError();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fail("handler was not one of the expected handlers");
|
2016-02-08 19:45:05 +01:00
|
|
|
}
|
|
|
|
|
2019-03-13 09:46:10 +01:00
|
|
|
private static final class CheckOrderHandler implements ChannelInboundHandler {
|
2016-02-08 19:45:05 +01:00
|
|
|
private final Queue<CheckOrderHandler> addedQueue;
|
|
|
|
private final Queue<CheckOrderHandler> removedQueue;
|
2019-01-22 16:07:26 +01:00
|
|
|
private final AtomicReference<Throwable> error = new AtomicReference<>();
|
2016-02-08 19:45:05 +01:00
|
|
|
|
|
|
|
CheckOrderHandler(Queue<CheckOrderHandler> addedQueue, Queue<CheckOrderHandler> removedQueue) {
|
|
|
|
this.addedQueue = addedQueue;
|
|
|
|
this.removedQueue = removedQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
addedQueue.add(this);
|
|
|
|
checkExecutor(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
removedQueue.add(this);
|
|
|
|
checkExecutor(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
|
|
|
error.set(cause);
|
|
|
|
}
|
|
|
|
|
|
|
|
void checkError() throws Throwable {
|
|
|
|
Throwable cause = error.get();
|
|
|
|
if (cause != null) {
|
|
|
|
throw cause;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private void checkExecutor(ChannelHandlerContext ctx) {
|
|
|
|
if (!ctx.executor().inEventLoop()) {
|
|
|
|
error.set(new AssertionError());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private static final class CheckEventExecutorHandler extends ChannelHandlerAdapter {
|
|
|
|
final EventExecutor executor;
|
|
|
|
final Promise<Void> addedPromise;
|
|
|
|
final Promise<Void> removedPromise;
|
|
|
|
|
|
|
|
CheckEventExecutorHandler(EventExecutor executor) {
|
|
|
|
this.executor = executor;
|
|
|
|
addedPromise = executor.newPromise();
|
|
|
|
removedPromise = executor.newPromise();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
assertExecutor(ctx, addedPromise);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
assertExecutor(ctx, removedPromise);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void assertExecutor(ChannelHandlerContext ctx, Promise<Void> promise) {
|
|
|
|
final boolean same;
|
|
|
|
try {
|
|
|
|
same = executor == ctx.executor();
|
|
|
|
} catch (Throwable cause) {
|
|
|
|
promise.setFailure(cause);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (same) {
|
|
|
|
promise.setSuccess(null);
|
|
|
|
} else {
|
|
|
|
promise.setFailure(new AssertionError("EventExecutor not the same"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
private static final class ErrorChannelHandler extends ChannelHandlerAdapter {
|
|
|
|
private final AtomicReference<Throwable> error;
|
|
|
|
|
|
|
|
ErrorChannelHandler(AtomicReference<Throwable> error) {
|
|
|
|
this.error = error;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
error.set(new AssertionError());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
error.set(new AssertionError());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-31 07:19:00 +01:00
|
|
|
private static int next(DefaultChannelHandlerContext ctx) {
|
|
|
|
DefaultChannelHandlerContext next = ctx.next;
|
2012-11-30 15:01:57 +01:00
|
|
|
if (next == null) {
|
2012-11-16 06:30:34 +01:00
|
|
|
return Integer.MAX_VALUE;
|
2012-11-30 15:01:57 +01:00
|
|
|
}
|
2012-11-16 06:30:34 +01:00
|
|
|
|
|
|
|
return toInt(next.name());
|
|
|
|
}
|
|
|
|
|
2012-11-30 15:01:57 +01:00
|
|
|
private static int toInt(String name) {
|
2013-01-07 08:44:16 +01:00
|
|
|
try {
|
|
|
|
return Integer.parseInt(name);
|
|
|
|
} catch (NumberFormatException e) {
|
|
|
|
return -1;
|
|
|
|
}
|
2012-11-16 06:30:34 +01:00
|
|
|
}
|
|
|
|
|
2013-05-17 03:54:20 +02:00
|
|
|
private static void verifyContextNumber(ChannelPipeline pipeline, int expectedNumber) {
|
2019-01-30 13:34:20 +01:00
|
|
|
assertEquals(expectedNumber, pipeline.names().size());
|
|
|
|
assertEquals(expectedNumber, pipeline.toMap().size());
|
|
|
|
|
|
|
|
pipeline.executor().submit(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
2019-01-31 07:19:00 +01:00
|
|
|
DefaultChannelHandlerContext ctx = (DefaultChannelHandlerContext) pipeline.firstContext();
|
2019-01-30 13:34:20 +01:00
|
|
|
int handlerNumber = 0;
|
2019-01-31 07:19:00 +01:00
|
|
|
if (ctx != null) {
|
|
|
|
for (;;) {
|
|
|
|
handlerNumber++;
|
|
|
|
if (ctx == pipeline.lastContext()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ctx = ctx.next;
|
|
|
|
}
|
2019-01-30 13:34:20 +01:00
|
|
|
}
|
|
|
|
assertEquals(expectedNumber, handlerNumber);
|
|
|
|
}
|
|
|
|
}).syncUninterruptibly();
|
2012-11-16 06:30:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private static ChannelHandler[] newHandlers(int num) {
|
|
|
|
assert num > 0;
|
|
|
|
|
|
|
|
ChannelHandler[] handlers = new ChannelHandler[num];
|
2012-11-30 15:01:57 +01:00
|
|
|
for (int i = 0; i < num; i++) {
|
2012-11-16 06:30:34 +01:00
|
|
|
handlers[i] = newHandler();
|
2012-11-30 15:01:57 +01:00
|
|
|
}
|
2012-11-16 06:30:34 +01:00
|
|
|
|
|
|
|
return handlers;
|
|
|
|
}
|
|
|
|
|
2012-05-30 12:58:14 +02:00
|
|
|
private static ChannelHandler newHandler() {
|
2012-06-04 21:14:42 +02:00
|
|
|
return new TestHandler();
|
2012-05-30 12:58:14 +02:00
|
|
|
}
|
2012-06-04 21:14:42 +02:00
|
|
|
|
|
|
|
@Sharable
|
2019-03-28 10:28:27 +01:00
|
|
|
private static class TestHandler implements ChannelHandler { }
|
2013-03-12 07:19:31 +01:00
|
|
|
|
2019-03-28 10:28:27 +01:00
|
|
|
private static class BufferedTestHandler implements ChannelHandler {
|
2019-01-22 16:07:26 +01:00
|
|
|
final Queue<Object> inboundBuffer = new ArrayDeque<>();
|
|
|
|
final Queue<Object> outboundBuffer = new ArrayDeque<>();
|
2013-03-12 07:19:31 +01:00
|
|
|
|
|
|
|
@Override
|
2013-07-10 13:00:42 +02:00
|
|
|
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
outboundBuffer.add(msg);
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-07-09 16:09:28 +02:00
|
|
|
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
inboundBuffer.add(msg);
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
|
|
|
|
if (!inboundBuffer.isEmpty()) {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
for (Object o: inboundBuffer) {
|
2013-07-09 16:09:28 +02:00
|
|
|
ctx.fireChannelRead(o);
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
}
|
2013-07-09 16:09:28 +02:00
|
|
|
ctx.fireChannelReadComplete();
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
}
|
|
|
|
if (!outboundBuffer.isEmpty()) {
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
for (Object o: outboundBuffer) {
|
|
|
|
ctx.write(o);
|
|
|
|
}
|
|
|
|
ctx.flush();
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
}
|
2013-03-12 07:19:31 +01:00
|
|
|
}
|
|
|
|
}
|
2013-03-14 10:43:12 +01:00
|
|
|
|
|
|
|
/** Test handler to validate life-cycle aware behavior. */
|
|
|
|
private static final class LifeCycleAwareTestHandler extends ChannelHandlerAdapter {
|
|
|
|
private final String name;
|
|
|
|
|
|
|
|
private boolean afterAdd;
|
|
|
|
private boolean afterRemove;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Constructs life-cycle aware test handler.
|
|
|
|
*
|
|
|
|
* @param name Handler name to display in assertion messages.
|
|
|
|
*/
|
|
|
|
private LifeCycleAwareTestHandler(String name) {
|
|
|
|
this.name = name;
|
|
|
|
}
|
|
|
|
|
2013-04-05 15:46:18 +02:00
|
|
|
public void validate(boolean afterAdd, boolean afterRemove) {
|
2013-03-14 10:43:12 +01:00
|
|
|
assertEquals(name, afterAdd, this.afterAdd);
|
|
|
|
assertEquals(name, afterRemove, this.afterRemove);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-04-05 15:46:18 +02:00
|
|
|
public void handlerAdded(ChannelHandlerContext ctx) {
|
|
|
|
validate(false, false);
|
2013-03-14 10:43:12 +01:00
|
|
|
|
|
|
|
afterAdd = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2013-04-05 15:46:18 +02:00
|
|
|
public void handlerRemoved(ChannelHandlerContext ctx) {
|
|
|
|
validate(true, false);
|
2013-03-14 10:43:12 +01:00
|
|
|
|
|
|
|
afterRemove = true;
|
|
|
|
}
|
|
|
|
}
|
2015-11-04 13:32:05 +01:00
|
|
|
|
|
|
|
private static final class WrapperExecutor extends AbstractEventExecutor {
|
|
|
|
|
|
|
|
private final ExecutorService wrapped = Executors.newSingleThreadExecutor();
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean isShuttingDown() {
|
|
|
|
return wrapped.isShutdown();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public Future<?> shutdownGracefully(long l, long l2, TimeUnit timeUnit) {
|
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public Future<?> terminationFuture() {
|
|
|
|
throw new IllegalStateException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void shutdown() {
|
|
|
|
wrapped.shutdown();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<Runnable> shutdownNow() {
|
|
|
|
return wrapped.shutdownNow();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean isShutdown() {
|
|
|
|
return wrapped.isShutdown();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean isTerminated() {
|
|
|
|
return wrapped.isTerminated();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
|
|
|
|
return wrapped.awaitTermination(timeout, unit);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean inEventLoop(Thread thread) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void execute(Runnable command) {
|
|
|
|
wrapped.execute(command);
|
|
|
|
}
|
2019-01-23 08:32:05 +01:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
|
|
|
|
throw new UnsupportedOperationException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) {
|
|
|
|
throw new UnsupportedOperationException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) {
|
|
|
|
throw new UnsupportedOperationException();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ScheduledFuture<?> scheduleWithFixedDelay(
|
|
|
|
Runnable command, long initialDelay, long delay, TimeUnit unit) {
|
|
|
|
throw new UnsupportedOperationException();
|
|
|
|
}
|
2015-11-04 13:32:05 +01:00
|
|
|
}
|
2011-12-11 09:21:29 +01:00
|
|
|
}
|