netty5/transport/src/main/java/io/netty/channel/DefaultChannelPipeline.java

1309 lines
43 KiB
Java
Raw Normal View History

/*
2012-06-04 22:31:44 +02:00
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
2012-06-04 22:31:44 +02:00
* http://www.apache.org/licenses/LICENSE-2.0
*
2009-08-28 09:15:49 +02:00
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
2009-08-28 09:15:49 +02:00
* License for the specific language governing permissions and limitations
* under the License.
*/
2011-12-09 04:38:59 +01:00
package io.netty.channel;
import static java.util.Objects.requireNonNull;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.ResourceLeakDetector;
import io.netty.util.concurrent.EventExecutor;
import io.netty.util.concurrent.FastThreadLocal;
import io.netty.util.internal.StringUtil;
import io.netty.util.internal.UnstableApi;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.net.SocketAddress;
import java.util.ArrayList;
import java.util.Collections;
2013-02-08 07:10:46 +01:00
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.WeakHashMap;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.IntSupplier;
import java.util.function.Predicate;
2008-09-02 09:13:20 +02:00
/**
* The default {@link ChannelPipeline} implementation. It is usually created
* by a {@link Channel} implementation when the {@link Channel} is created.
2008-09-02 09:13:20 +02:00
*/
public class DefaultChannelPipeline implements ChannelPipeline {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(DefaultChannelPipeline.class);
private static final String HEAD_NAME = generateName0(HeadHandler.class);
private static final String TAIL_NAME = generateName0(TailHandler.class);
2019-11-29 08:59:47 +01:00
private static final String EMPTY_NAME = generateName0(EmptyHandler.class);
private static final ChannelHandler HEAD_HANDLER = new HeadHandler();
private static final ChannelHandler TAIL_HANDLER = new TailHandler();
2019-11-29 11:38:50 +01:00
private static final ChannelHandler UNLINK_HANDLER = new EmptyHandler();
private static final FastThreadLocal<Map<Class<?>, String>> nameCaches =
new FastThreadLocal<Map<Class<?>, String>>() {
@Override
2019-11-29 08:59:47 +01:00
protected Map<Class<?>, String> initialValue() {
return new WeakHashMap<>();
}
};
private static final AtomicReferenceFieldUpdater<DefaultChannelPipeline, MessageSizeEstimator.Handle> ESTIMATOR =
AtomicReferenceFieldUpdater.newUpdater(
DefaultChannelPipeline.class, MessageSizeEstimator.Handle.class, "estimatorHandle");
private final DefaultChannelHandlerContext head;
private final DefaultChannelHandlerContext tail;
2019-11-29 11:38:50 +01:00
private final DefaultChannelHandlerContext empty;
private final Channel channel;
private final ChannelFuture succeededFuture;
private final VoidChannelPromise voidPromise;
private final boolean touch = ResourceLeakDetector.isEnabled();
private final List<DefaultChannelHandlerContext> handlers = new ArrayList<>(4);
private volatile MessageSizeEstimator.Handle estimatorHandle;
public DefaultChannelPipeline(Channel channel) {
this.channel = requireNonNull(channel, "channel");
succeededFuture = new SucceededChannelFuture(channel, channel.eventLoop());
voidPromise = new VoidChannelPromise(channel, true);
2019-11-29 11:38:50 +01:00
empty = new DefaultChannelHandlerContext(this, EMPTY_NAME, UNLINK_HANDLER);
tail = new DefaultChannelHandlerContext(this, TAIL_NAME, TAIL_HANDLER);
head = new DefaultChannelHandlerContext(this, HEAD_NAME, HEAD_HANDLER);
2013-01-09 12:36:16 +01:00
head.next = tail;
2013-01-09 11:13:43 +01:00
tail.prev = head;
head.setAddComplete();
tail.setAddComplete();
}
final MessageSizeEstimator.Handle estimatorHandle() {
MessageSizeEstimator.Handle handle = estimatorHandle;
if (handle == null) {
handle = channel.config().getMessageSizeEstimator().newHandle();
if (!ESTIMATOR.compareAndSet(this, null, handle)) {
handle = estimatorHandle;
}
}
return handle;
}
final Object touch(Object msg, DefaultChannelHandlerContext next) {
return touch ? ReferenceCountUtil.touch(msg, next) : msg;
}
private DefaultChannelHandlerContext newContext(String name, ChannelHandler handler) {
return new DefaultChannelHandlerContext(this, name, handler);
}
@Override
public final Channel channel() {
return channel;
}
@Override
public final EventExecutor executor() {
return channel().eventLoop();
}
@Override
public final ChannelPipeline addFirst(String name, ChannelHandler handler) {
checkMultiplicity(handler);
if (name == null) {
name = generateName(handler);
}
DefaultChannelHandlerContext newCtx = newContext(name, handler);
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
if (context(name) != null) {
throw new IllegalArgumentException("Duplicate handler name: " + name);
}
handlers.add(0, newCtx);
if (!inEventLoop) {
try {
executor.execute(() -> addFirst0(newCtx));
return this;
} catch (Throwable cause) {
handlers.remove(0);
throw cause;
}
}
}
addFirst0(newCtx);
return this;
}
private void addFirst0(DefaultChannelHandlerContext newCtx) {
DefaultChannelHandlerContext nextCtx = head.next;
2013-01-09 11:13:43 +01:00
newCtx.prev = head;
newCtx.next = nextCtx;
head.next = newCtx;
2013-01-09 11:13:43 +01:00
nextCtx.prev = newCtx;
callHandlerAdded0(newCtx);
}
@Override
public final ChannelPipeline addLast(String name, ChannelHandler handler) {
checkMultiplicity(handler);
if (name == null) {
name = generateName(handler);
}
DefaultChannelHandlerContext newCtx = newContext(name, handler);
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
if (context(name) != null) {
throw new IllegalArgumentException("Duplicate handler name: " + name);
}
handlers.add(newCtx);
if (!inEventLoop) {
try {
executor.execute(() -> addLast0(newCtx));
return this;
} catch (Throwable cause) {
handlers.remove(handlers.size() - 1);
throw cause;
}
}
}
addLast0(newCtx);
return this;
}
private void addLast0(DefaultChannelHandlerContext newCtx) {
DefaultChannelHandlerContext prev = tail.prev;
2013-01-09 11:13:43 +01:00
newCtx.prev = prev;
newCtx.next = tail;
prev.next = newCtx;
2013-01-09 11:13:43 +01:00
tail.prev = newCtx;
callHandlerAdded0(newCtx);
}
@Override
public final ChannelPipeline addBefore(String baseName, String name, ChannelHandler handler) {
final DefaultChannelHandlerContext ctx;
checkMultiplicity(handler);
if (name == null) {
name = generateName(handler);
}
DefaultChannelHandlerContext newCtx = newContext(name, handler);
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
int i = findCtxIdx(context -> context.name().equals(baseName));
if (i == -1) {
throw new NoSuchElementException(baseName);
}
if (context(name) != null) {
throw new IllegalArgumentException("Duplicate handler name: " + name);
}
ctx = handlers.get(i);
handlers.add(i, newCtx);
if (!inEventLoop) {
try {
executor.execute(() -> addBefore0(ctx, newCtx));
return this;
} catch (Throwable cause) {
handlers.remove(i);
throw cause;
}
}
}
addBefore0(ctx, newCtx);
return this;
}
private void addBefore0(DefaultChannelHandlerContext ctx, DefaultChannelHandlerContext newCtx) {
newCtx.prev = ctx.prev;
newCtx.next = ctx;
ctx.prev.next = newCtx;
ctx.prev = newCtx;
callHandlerAdded0(newCtx);
}
@Override
public final ChannelPipeline addAfter(String baseName, String name, ChannelHandler handler) {
final DefaultChannelHandlerContext ctx;
checkMultiplicity(handler);
if (name == null) {
name = generateName(handler);
}
DefaultChannelHandlerContext newCtx = newContext(name, handler);
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
int i = findCtxIdx(context -> context.name().equals(baseName));
if (i == -1) {
throw new NoSuchElementException(baseName);
}
if (context(name) != null) {
throw new IllegalArgumentException("Duplicate handler name: " + name);
}
ctx = handlers.get(i);
handlers.add(i + 1, newCtx);
if (!inEventLoop) {
try {
executor.execute(() -> addAfter0(ctx, newCtx));
return this;
} catch (Throwable cause) {
handlers.remove(i + 1);
throw cause;
}
}
}
addAfter0(ctx, newCtx);
return this;
}
private void addAfter0(DefaultChannelHandlerContext ctx, DefaultChannelHandlerContext newCtx) {
2013-01-09 11:13:43 +01:00
newCtx.prev = ctx;
newCtx.next = ctx.next;
ctx.next.prev = newCtx;
ctx.next = newCtx;
callHandlerAdded0(newCtx);
}
public final ChannelPipeline addFirst(ChannelHandler handler) {
return addFirst(null, handler);
}
@Override
public final ChannelPipeline addFirst(ChannelHandler... handlers) {
requireNonNull(handlers, "handlers");
if (handlers.length == 0 || handlers[0] == null) {
return this;
}
int size;
for (size = 1; size < handlers.length; size ++) {
if (handlers[size] == null) {
break;
}
}
for (int i = size - 1; i >= 0; i --) {
ChannelHandler h = handlers[i];
addFirst(null, h);
}
return this;
}
public final ChannelPipeline addLast(ChannelHandler handler) {
return addLast(null, handler);
}
@Override
public final ChannelPipeline addLast(ChannelHandler... handlers) {
requireNonNull(handlers, "handlers");
for (ChannelHandler h: handlers) {
if (h == null) {
break;
}
addLast(null, h);
}
return this;
}
private String generateName(ChannelHandler handler) {
Map<Class<?>, String> cache = nameCaches.get();
Class<?> handlerType = handler.getClass();
String name = cache.get(handlerType);
if (name == null) {
name = generateName0(handlerType);
cache.put(handlerType, name);
}
synchronized (handlers) {
// It's not very likely for a user to put more than one handler of the same type, but make sure to avoid
// any name conflicts. Note that we don't cache the names generated here.
if (context(name) != null) {
String baseName = name.substring(0, name.length() - 1); // Strip the trailing '0'.
for (int i = 1;; i ++) {
String newName = baseName + i;
if (context(newName) == null) {
name = newName;
break;
}
}
}
}
return name;
}
private static String generateName0(Class<?> handlerType) {
return StringUtil.simpleClassName(handlerType) + "#0";
}
private int findCtxIdx(Predicate<DefaultChannelHandlerContext> predicate) {
for (int i = 0; i < handlers.size(); i++) {
if (predicate.test(handlers.get(i))) {
return i;
}
}
return -1;
}
@Override
public final ChannelPipeline remove(ChannelHandler handler) {
final DefaultChannelHandlerContext ctx;
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
int idx = findCtxIdx(context -> context.handler() == handler);
if (idx == -1) {
throw new NoSuchElementException();
}
ctx = handlers.remove(idx);
assert ctx != null;
if (!inEventLoop) {
try {
executor.execute(() -> remove0(ctx));
return this;
} catch (Throwable cause) {
handlers.add(idx, ctx);
throw cause;
}
}
}
remove0(ctx);
return this;
}
@Override
public final ChannelHandler remove(String name) {
final DefaultChannelHandlerContext ctx;
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
int idx = findCtxIdx(context -> context.name().equals(name));
if (idx == -1) {
throw new NoSuchElementException();
}
ctx = handlers.remove(idx);
assert ctx != null;
if (!inEventLoop) {
try {
executor.execute(() -> remove0(ctx));
return ctx.handler();
} catch (Throwable cause) {
handlers.add(idx, ctx);
throw cause;
}
}
}
remove0(ctx);
return ctx.handler();
}
2012-09-21 22:33:11 +02:00
@SuppressWarnings("unchecked")
@Override
public final <T extends ChannelHandler> T remove(Class<T> handlerType) {
final DefaultChannelHandlerContext ctx;
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
int idx = findCtxIdx(context -> handlerType.isAssignableFrom(context.handler().getClass()));
if (idx == -1) {
throw new NoSuchElementException();
}
ctx = handlers.remove(idx);
assert ctx != null;
if (!inEventLoop) {
try {
executor.execute(() -> remove0(ctx));
return (T) ctx.handler();
} catch (Throwable cause) {
handlers.add(idx, ctx);
throw cause;
}
}
}
remove0(ctx);
return (T) ctx.handler();
}
public final <T extends ChannelHandler> T removeIfExists(String name) {
return removeIfExists(() -> findCtxIdx(context -> name.equals(context.name())));
}
public final <T extends ChannelHandler> T removeIfExists(Class<T> handlerType) {
return removeIfExists(() -> findCtxIdx(
context -> handlerType.isAssignableFrom(context.handler().getClass())));
}
public final <T extends ChannelHandler> T removeIfExists(ChannelHandler handler) {
return removeIfExists(() -> findCtxIdx(context -> handler == context.handler()));
}
@SuppressWarnings("unchecked")
private <T extends ChannelHandler> T removeIfExists(IntSupplier idxSupplier) {
final DefaultChannelHandlerContext ctx;
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
int idx = idxSupplier.getAsInt();
if (idx == -1) {
return null;
}
ctx = handlers.remove(idx);
assert ctx != null;
if (!inEventLoop) {
try {
executor.execute(() -> remove0(ctx));
return (T) ctx.handler();
} catch (Throwable cause) {
handlers.add(idx, ctx);
throw cause;
}
}
}
remove0(ctx);
return (T) ctx.handler();
}
2019-11-29 08:59:47 +01:00
private void relink(DefaultChannelHandlerContext ctx) {
2019-11-29 11:38:50 +01:00
assert ctx != head && ctx != tail && ctx != empty;
DefaultChannelHandlerContext prev = ctx.prev;
DefaultChannelHandlerContext next = ctx.next;
prev.next = next;
next.prev = prev;
}
2019-11-29 11:38:50 +01:00
private void unlink(DefaultChannelHandlerContext ctx) {
assert ctx != head && ctx != tail && ctx != empty;
ctx.next = empty;
ctx.prev = empty;
}
private void remove0(DefaultChannelHandlerContext ctx) {
2019-11-29 08:59:47 +01:00
relink(ctx);
callHandlerRemoved0(ctx);
2019-11-29 11:38:50 +01:00
unlink(ctx);
}
@Override
public final ChannelPipeline replace(ChannelHandler oldHandler, String newName, ChannelHandler newHandler) {
replace(ctx -> ctx.handler() == oldHandler, newName, newHandler);
return this;
}
@Override
public final ChannelHandler replace(String oldName, String newName, ChannelHandler newHandler) {
return replace(ctx -> ctx.name().equals(oldName), newName, newHandler);
}
@Override
@SuppressWarnings("unchecked")
public final <T extends ChannelHandler> T replace(
Class<T> oldHandlerType, String newName, ChannelHandler newHandler) {
return (T) replace(ctx -> oldHandlerType.isAssignableFrom(ctx.handler().getClass()), newName, newHandler);
}
2012-06-08 12:28:12 +02:00
private ChannelHandler replace(
Predicate<DefaultChannelHandlerContext> predicate, String newName, ChannelHandler newHandler) {
checkMultiplicity(newHandler);
2013-01-09 11:13:43 +01:00
if (newName == null) {
newName = generateName(newHandler);
}
DefaultChannelHandlerContext oldCtx;
DefaultChannelHandlerContext newCtx = newContext(newName, newHandler);
EventExecutor executor = executor();
boolean inEventLoop = executor.inEventLoop();
synchronized (handlers) {
int idx = findCtxIdx(predicate);
if (idx == -1) {
throw new NoSuchElementException();
}
oldCtx = handlers.get(idx);
assert oldCtx != head && oldCtx != tail && oldCtx != null;
if (!oldCtx.name().equals(newName)) {
if (context(newName) != null) {
throw new IllegalArgumentException("Duplicate handler name: " + newName);
}
}
DefaultChannelHandlerContext removed = handlers.set(idx, newCtx);
assert removed != null;
if (!inEventLoop) {
try {
executor.execute(() -> replace0(oldCtx, newCtx));
return oldCtx.handler();
} catch (Throwable cause) {
handlers.set(idx, oldCtx);
throw cause;
}
}
}
replace0(oldCtx, newCtx);
return oldCtx.handler();
}
private void replace0(DefaultChannelHandlerContext oldCtx, DefaultChannelHandlerContext newCtx) {
2019-11-29 08:59:47 +01:00
try {
DefaultChannelHandlerContext prev = oldCtx.prev;
DefaultChannelHandlerContext next = oldCtx.next;
newCtx.prev = prev;
newCtx.next = next;
// Finish the replacement of oldCtx with newCtx in the linked list.
// Note that this doesn't mean events will be sent to the new handler immediately
// because we are currently at the event handler thread and no more than one handler methods can be invoked
// at the same time (we ensured that in replace().)
prev.next = newCtx;
next.prev = newCtx;
// update the reference to the replacement so forward of buffered content will work correctly
oldCtx.prev = newCtx;
oldCtx.next = newCtx;
// Invoke newHandler.handlerAdded() first (i.e. before oldHandler.handlerRemoved() is invoked)
// because callHandlerRemoved() will trigger channelRead() or flush() on newHandler and those
// event handlers must be called after handlerAdded().
callHandlerAdded0(newCtx);
callHandlerRemoved0(oldCtx);
} finally {
2019-11-29 11:38:50 +01:00
unlink(oldCtx);
2019-11-29 08:59:47 +01:00
}
}
private static void checkMultiplicity(ChannelHandler handler) {
if (handler instanceof ChannelHandlerAdapter) {
ChannelHandlerAdapter h = (ChannelHandlerAdapter) handler;
if (!h.isSharable() && h.added) {
throw new ChannelPipelineException(
h.getClass().getName() +
" is not a @Sharable handler, so can't be added or removed multiple times.");
}
h.added = true;
}
}
private void callHandlerAdded0(final DefaultChannelHandlerContext ctx) {
try {
ctx.callHandlerAdded();
} catch (Throwable t) {
boolean removed = false;
try {
synchronized (handlers) {
handlers.remove(ctx);
}
2019-11-29 08:59:47 +01:00
relink(ctx);
ctx.callHandlerRemoved();
removed = true;
} catch (Throwable t2) {
if (logger.isWarnEnabled()) {
logger.warn("Failed to remove a handler: " + ctx.name(), t2);
}
2019-11-29 08:59:47 +01:00
} finally {
2019-11-29 11:38:50 +01:00
unlink(ctx);
}
if (removed) {
fireExceptionCaught(new ChannelPipelineException(
ctx.handler().getClass().getName() +
".handlerAdded() has thrown an exception; removed.", t));
} else {
fireExceptionCaught(new ChannelPipelineException(
ctx.handler().getClass().getName() +
".handlerAdded() has thrown an exception; also failed to remove.", t));
}
}
}
private void callHandlerRemoved0(final DefaultChannelHandlerContext ctx) {
// Notify the complete removal.
try {
ctx.callHandlerRemoved();
} catch (Throwable t) {
fireExceptionCaught(new ChannelPipelineException(
2013-04-24 11:57:14 +02:00
ctx.handler().getClass().getName() + ".handlerRemoved() has thrown an exception.", t));
}
}
@Override
public final ChannelHandler get(String name) {
ChannelHandlerContext ctx = context(name);
return ctx == null ? null : ctx.handler();
}
2012-09-21 22:33:11 +02:00
@SuppressWarnings("unchecked")
@Override
public final <T extends ChannelHandler> T get(Class<T> handlerType) {
ChannelHandlerContext ctx = context(handlerType);
return ctx == null ? null : (T) ctx.handler();
}
private DefaultChannelHandlerContext findCtx(Predicate<DefaultChannelHandlerContext> predicate) {
for (int i = 0; i < handlers.size(); i++) {
DefaultChannelHandlerContext ctx = handlers.get(i);
if (predicate.test(ctx)) {
return ctx;
}
}
return null;
}
@Override
public final ChannelHandlerContext context(String name) {
requireNonNull(name, "name");
synchronized (handlers) {
return findCtx(ctx -> ctx.name().equals(name));
}
}
@Override
public final ChannelHandlerContext context(ChannelHandler handler) {
requireNonNull(handler, "handler");
synchronized (handlers) {
return findCtx(ctx -> ctx.handler() == handler);
}
}
@Override
public final ChannelHandlerContext context(Class<? extends ChannelHandler> handlerType) {
requireNonNull(handlerType, "handlerType");
2009-12-17 11:57:57 +01:00
synchronized (handlers) {
return findCtx(ctx -> handlerType.isAssignableFrom(ctx.handler().getClass()));
}
}
@Override
public final List<String> names() {
synchronized (handlers) {
List<String> names = new ArrayList<>(handlers.size());
for (int i = 0; i < handlers.size(); i++) {
names.add(handlers.get(i).name());
}
return names;
}
}
/**
* Returns the {@link String} representation of this pipeline.
*/
@Override
public final String toString() {
StringBuilder buf = new StringBuilder()
.append(StringUtil.simpleClassName(this))
.append('{');
synchronized (handlers) {
if (!handlers.isEmpty()) {
for (int i = 0; i < handlers.size(); i++) {
DefaultChannelHandlerContext ctx = handlers.get(i);
buf.append('(')
.append(ctx.name())
.append(" = ")
.append(ctx.handler().getClass().getName())
.append("), ");
}
buf.setLength(buf.length() - 2);
}
}
buf.append('}');
return buf.toString();
}
@Override
public ChannelHandler removeFirst() {
synchronized (handlers) {
if (handlers.isEmpty()) {
throw new NoSuchElementException();
}
return handlers.remove(0).handler();
}
}
@Override
public ChannelHandler removeLast() {
synchronized (handlers) {
if (handlers.isEmpty()) {
throw new NoSuchElementException();
}
return handlers.remove(handlers.size() - 1).handler();
}
}
@Override
public ChannelHandler first() {
ChannelHandlerContext ctx = firstContext();
return ctx == null ? null : ctx.handler();
}
@Override
public ChannelHandlerContext firstContext() {
synchronized (handlers) {
return handlers.isEmpty() ? null : handlers.get(0);
}
}
@Override
public ChannelHandler last() {
ChannelHandlerContext ctx = lastContext();
return ctx == null ? null : ctx.handler();
}
@Override
public ChannelHandlerContext lastContext() {
synchronized (handlers) {
return handlers.isEmpty() ? null : handlers.get(handlers.size() - 1);
}
}
@Override
public Map<String, ChannelHandler> toMap() {
Map<String, ChannelHandler> map;
synchronized (handlers) {
if (handlers.isEmpty()) {
return Collections.emptyMap();
}
map = new LinkedHashMap<>(handlers.size());
for (int i = 0; i < handlers.size(); i++) {
ChannelHandlerContext ctx = handlers.get(i);
map.put(ctx.name(), ctx.handler());
}
return map;
}
}
@Override
public Iterator<Map.Entry<String, ChannelHandler>> iterator() {
return toMap().entrySet().iterator();
}
@Override
public final ChannelPipeline fireChannelRegistered() {
head.invokeChannelRegistered();
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public final ChannelPipeline fireChannelUnregistered() {
head.invokeChannelUnregistered();
2013-02-11 09:44:04 +01:00
return this;
}
/**
* Removes all handlers from the pipeline one by one from tail (exclusive) to head (exclusive) to trigger
* handlerRemoved().
*/
private void destroy() {
EventExecutor executor = executor();
if (executor.inEventLoop()) {
destroy0();
} else {
executor.execute(this::destroy0);
}
}
private void destroy0() {
assert executor().inEventLoop();
DefaultChannelHandlerContext ctx = this.tail.prev;
while (ctx != head) {
synchronized (handlers) {
handlers.remove(ctx);
}
2019-11-29 08:59:47 +01:00
DefaultChannelHandlerContext prev = ctx.prev;
remove0(ctx);
2019-11-29 08:59:47 +01:00
ctx = prev;
}
}
@Override
public final ChannelPipeline fireChannelActive() {
head.invokeChannelActive();
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public final ChannelPipeline fireChannelInactive() {
head.invokeChannelInactive();
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public final ChannelPipeline fireExceptionCaught(Throwable cause) {
head.invokeExceptionCaught(cause);
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public final ChannelPipeline fireUserEventTriggered(Object event) {
head.invokeUserEventTriggered(event);
2013-02-11 09:44:04 +01:00
return this;
}
@Override
public final ChannelPipeline fireChannelRead(Object msg) {
head.invokeChannelRead(msg);
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
return this;
}
@Override
public final ChannelPipeline fireChannelReadComplete() {
head.invokeChannelReadComplete();
2013-02-11 09:44:04 +01:00
return this;
Read only when requested (read-on-demand) This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not. Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this. This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly. This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false. Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above. There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following: public void read(ChannelHandlerContext ctx) throws Exception { ctx.read(); } Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
}
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
@Override
public final ChannelPipeline fireChannelWritabilityChanged() {
head.invokeChannelWritabilityChanged();
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
return this;
}
@Override
public final ChannelFuture bind(SocketAddress localAddress) {
return tail.bind(localAddress);
}
@Override
public final ChannelFuture connect(SocketAddress remoteAddress) {
return tail.connect(remoteAddress);
}
@Override
public final ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) {
return tail.connect(remoteAddress, localAddress);
}
@Override
public final ChannelFuture disconnect() {
return tail.disconnect();
}
@Override
public final ChannelFuture close() {
return tail.close();
}
Tighten contract between Channel and EventLoop by require the EventLoop on Channel construction. (#8587) Motivation: At the moment it’s possible to have a Channel in Netty that is not registered / assigned to an EventLoop until register(...) is called. This is suboptimal as if the Channel is not registered it is also not possible to do anything useful with a ChannelFuture that belongs to the Channel. We should think about if we should have the EventLoop as a constructor argument of a Channel and have the register / deregister method only have the effect of add a Channel to KQueue/Epoll/... It is also currently possible to deregister a Channel from one EventLoop and register it with another EventLoop. This operation defeats the threading model assumptions that are wide spread in Netty, and requires careful user level coordination to pull off without any concurrency issues. It is not a commonly used feature in practice, may be better handled by other means (e.g. client side load balancing), and therefore we propose removing this feature. Modifications: - Change all Channel implementations to require an EventLoop for construction ( + an EventLoopGroup for all ServerChannel implementations) - Remove all register(...) methods from EventLoopGroup - Add ChannelOutboundInvoker.register(...) which now basically means we want to register on the EventLoop for IO. - Change ChannelUnsafe.register(...) to not take an EventLoop as parameter (as the EventLoop is supplied on custruction). - Change ChannelFactory to take an EventLoop to create new Channels and introduce ServerChannelFactory which takes an EventLoop and one EventLoopGroup to create new ServerChannel instances. - Add ServerChannel.childEventLoopGroup() - Ensure all operations on the accepted Channel is done in the EventLoop of the Channel in ServerBootstrap - Change unit tests for new behaviour Result: A Channel always has an EventLoop assigned which will never change during its life-time. This ensures we are always be able to call any operation on the Channel once constructed (unit the EventLoop is shutdown). This also simplifies the logic in DefaultChannelPipeline a lot as we can always call handlerAdded / handlerRemoved directly without the need to wait for register() to happen. Also note that its still possible to deregister a Channel and register it again. It's just not possible anymore to move from one EventLoop to another (which was not really safe anyway). Fixes https://github.com/netty/netty/issues/8513.
2019-01-14 20:11:13 +01:00
@Override
public final ChannelFuture register() {
return tail.register();
}
@Override
public final ChannelFuture deregister() {
return tail.deregister();
}
@Override
public final ChannelPipeline flush() {
tail.flush();
return this;
}
@Override
public final ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) {
return tail.bind(localAddress, promise);
}
@Override
public final ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) {
return tail.connect(remoteAddress, promise);
}
@Override
public final ChannelFuture connect(
SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) {
return tail.connect(remoteAddress, localAddress, promise);
}
@Override
public final ChannelFuture disconnect(ChannelPromise promise) {
return tail.disconnect(promise);
}
@Override
public final ChannelFuture close(ChannelPromise promise) {
return tail.close(promise);
}
Tighten contract between Channel and EventLoop by require the EventLoop on Channel construction. (#8587) Motivation: At the moment it’s possible to have a Channel in Netty that is not registered / assigned to an EventLoop until register(...) is called. This is suboptimal as if the Channel is not registered it is also not possible to do anything useful with a ChannelFuture that belongs to the Channel. We should think about if we should have the EventLoop as a constructor argument of a Channel and have the register / deregister method only have the effect of add a Channel to KQueue/Epoll/... It is also currently possible to deregister a Channel from one EventLoop and register it with another EventLoop. This operation defeats the threading model assumptions that are wide spread in Netty, and requires careful user level coordination to pull off without any concurrency issues. It is not a commonly used feature in practice, may be better handled by other means (e.g. client side load balancing), and therefore we propose removing this feature. Modifications: - Change all Channel implementations to require an EventLoop for construction ( + an EventLoopGroup for all ServerChannel implementations) - Remove all register(...) methods from EventLoopGroup - Add ChannelOutboundInvoker.register(...) which now basically means we want to register on the EventLoop for IO. - Change ChannelUnsafe.register(...) to not take an EventLoop as parameter (as the EventLoop is supplied on custruction). - Change ChannelFactory to take an EventLoop to create new Channels and introduce ServerChannelFactory which takes an EventLoop and one EventLoopGroup to create new ServerChannel instances. - Add ServerChannel.childEventLoopGroup() - Ensure all operations on the accepted Channel is done in the EventLoop of the Channel in ServerBootstrap - Change unit tests for new behaviour Result: A Channel always has an EventLoop assigned which will never change during its life-time. This ensures we are always be able to call any operation on the Channel once constructed (unit the EventLoop is shutdown). This also simplifies the logic in DefaultChannelPipeline a lot as we can always call handlerAdded / handlerRemoved directly without the need to wait for register() to happen. Also note that its still possible to deregister a Channel and register it again. It's just not possible anymore to move from one EventLoop to another (which was not really safe anyway). Fixes https://github.com/netty/netty/issues/8513.
2019-01-14 20:11:13 +01:00
@Override
public final ChannelFuture register(final ChannelPromise promise) {
return tail.register(promise);
}
@Override
public final ChannelFuture deregister(final ChannelPromise promise) {
return tail.deregister(promise);
}
Read only when requested (read-on-demand) This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not. Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this. This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly. This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false. Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above. There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following: public void read(ChannelHandlerContext ctx) throws Exception { ctx.read(); } Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
@Override
public final ChannelPipeline read() {
tail.read();
return this;
}
@Override
public final ChannelFuture write(Object msg) {
return tail.write(msg);
}
@Override
public final ChannelFuture write(Object msg, ChannelPromise promise) {
return tail.write(msg, promise);
Read only when requested (read-on-demand) This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not. Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this. This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly. This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false. Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above. There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following: public void read(ChannelHandlerContext ctx) throws Exception { ctx.read(); } Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
}
@Override
public final ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) {
return tail.writeAndFlush(msg, promise);
2012-06-02 03:34:19 +02:00
}
@Override
public final ChannelFuture writeAndFlush(Object msg) {
return tail.writeAndFlush(msg);
}
@Override
public final ChannelPromise newPromise() {
return new DefaultChannelPromise(channel(), executor());
}
@Override
public final ChannelProgressivePromise newProgressivePromise() {
return new DefaultChannelProgressivePromise(channel(), executor());
}
@Override
public final ChannelFuture newSucceededFuture() {
return succeededFuture;
}
@Override
public final ChannelFuture newFailedFuture(Throwable cause) {
return new FailedChannelFuture(channel(), executor(), cause);
}
@Override
public final ChannelPromise voidPromise() {
return voidPromise;
}
/**
* Called once a {@link Throwable} hit the end of the {@link ChannelPipeline} without been handled by the user
* in {@link ChannelInboundHandler#exceptionCaught(ChannelHandlerContext, Throwable)}.
*/
protected void onUnhandledInboundException(Throwable cause) {
try {
logger.warn(
"An exceptionCaught() event was fired, and it reached at the tail of the pipeline. " +
"It usually means the last handler in the pipeline did not handle the exception.",
cause);
} finally {
ReferenceCountUtil.release(cause);
}
}
/**
* Called once the {@link ChannelInboundHandler#channelActive(ChannelHandlerContext)}event hit
* the end of the {@link ChannelPipeline}.
*/
protected void onUnhandledInboundChannelActive() {
}
/**
* Called once the {@link ChannelInboundHandler#channelInactive(ChannelHandlerContext)} event hit
* the end of the {@link ChannelPipeline}.
*/
protected void onUnhandledInboundChannelInactive() {
}
/**
* Called once a message hit the end of the {@link ChannelPipeline} without been handled by the user
* in {@link ChannelInboundHandler#channelRead(ChannelHandlerContext, Object)}. This method is responsible
* to call {@link ReferenceCountUtil#release(Object)} on the given msg at some point.
*/
protected void onUnhandledInboundMessage(ChannelHandlerContext ctx, Object msg) {
try {
logger.debug(
"Discarded inbound message {} that reached at the tail of the pipeline. " +
"Please check your pipeline configuration. Discarded message pipeline : {}. Channel : {}.",
msg, ctx.pipeline().names(), ctx.channel());
} finally {
ReferenceCountUtil.release(msg);
}
}
/**
* Called once the {@link ChannelInboundHandler#channelReadComplete(ChannelHandlerContext)} event hit
* the end of the {@link ChannelPipeline}.
*/
protected void onUnhandledInboundChannelReadComplete() {
}
/**
* Called once an user event hit the end of the {@link ChannelPipeline} without been handled by the user
* in {@link ChannelInboundHandler#userEventTriggered(ChannelHandlerContext, Object)}. This method is responsible
* to call {@link ReferenceCountUtil#release(Object)} on the given event at some point.
*/
protected void onUnhandledInboundUserEventTriggered(Object evt) {
// This may not be a configuration error and so don't log anything.
// The event may be superfluous for the current pipeline configuration.
ReferenceCountUtil.release(evt);
}
/**
* Called once the {@link ChannelInboundHandler#channelWritabilityChanged(ChannelHandlerContext)} event hit
* the end of the {@link ChannelPipeline}.
*/
protected void onUnhandledChannelWritabilityChanged() {
}
@UnstableApi
protected void incrementPendingOutboundBytes(long size) {
ChannelOutboundBuffer buffer = channel.unsafe().outboundBuffer();
if (buffer != null) {
buffer.incrementPendingOutboundBytes(size);
}
}
@UnstableApi
protected void decrementPendingOutboundBytes(long size) {
ChannelOutboundBuffer buffer = channel.unsafe().outboundBuffer();
if (buffer != null) {
buffer.decrementPendingOutboundBytes(size);
}
}
// A special catch-all handler that handles both bytes and messages.
private static final class TailHandler implements ChannelInboundHandler {
@Override
public void channelRegistered(ChannelHandlerContext ctx) { }
@Override
public void channelUnregistered(ChannelHandlerContext ctx) { }
@Override
public void channelActive(ChannelHandlerContext ctx) {
((DefaultChannelPipeline) ctx.pipeline()).onUnhandledInboundChannelActive();
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
((DefaultChannelPipeline) ctx.pipeline()).onUnhandledInboundChannelInactive();
}
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
@Override
public void channelWritabilityChanged(ChannelHandlerContext ctx) {
((DefaultChannelPipeline) ctx.pipeline()).onUnhandledChannelWritabilityChanged();
}
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
@Override
public void handlerAdded(ChannelHandlerContext ctx) { }
@Override
public void handlerRemoved(ChannelHandlerContext ctx) { }
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
((DefaultChannelPipeline) ctx.pipeline()).onUnhandledInboundUserEventTriggered(evt);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
((DefaultChannelPipeline) ctx.pipeline()).onUnhandledInboundException(cause);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
((DefaultChannelPipeline) ctx.pipeline()).onUnhandledInboundMessage(ctx, msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
((DefaultChannelPipeline) ctx.pipeline()).onUnhandledInboundChannelReadComplete();
}
}
private static final class HeadHandler implements ChannelHandler {
@Override
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
public void bind(
ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) {
ctx.channel().unsafe().bind(localAddress, promise);
}
@Override
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
public void connect(
ChannelHandlerContext ctx,
SocketAddress remoteAddress, SocketAddress localAddress,
ChannelPromise promise) {
ctx.channel().unsafe().connect(remoteAddress, localAddress, promise);
}
@Override
public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) {
ctx.channel().unsafe().disconnect(promise);
}
@Override
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
ctx.channel().unsafe().close(promise);
}
Tighten contract between Channel and EventLoop by require the EventLoop on Channel construction. (#8587) Motivation: At the moment it’s possible to have a Channel in Netty that is not registered / assigned to an EventLoop until register(...) is called. This is suboptimal as if the Channel is not registered it is also not possible to do anything useful with a ChannelFuture that belongs to the Channel. We should think about if we should have the EventLoop as a constructor argument of a Channel and have the register / deregister method only have the effect of add a Channel to KQueue/Epoll/... It is also currently possible to deregister a Channel from one EventLoop and register it with another EventLoop. This operation defeats the threading model assumptions that are wide spread in Netty, and requires careful user level coordination to pull off without any concurrency issues. It is not a commonly used feature in practice, may be better handled by other means (e.g. client side load balancing), and therefore we propose removing this feature. Modifications: - Change all Channel implementations to require an EventLoop for construction ( + an EventLoopGroup for all ServerChannel implementations) - Remove all register(...) methods from EventLoopGroup - Add ChannelOutboundInvoker.register(...) which now basically means we want to register on the EventLoop for IO. - Change ChannelUnsafe.register(...) to not take an EventLoop as parameter (as the EventLoop is supplied on custruction). - Change ChannelFactory to take an EventLoop to create new Channels and introduce ServerChannelFactory which takes an EventLoop and one EventLoopGroup to create new ServerChannel instances. - Add ServerChannel.childEventLoopGroup() - Ensure all operations on the accepted Channel is done in the EventLoop of the Channel in ServerBootstrap - Change unit tests for new behaviour Result: A Channel always has an EventLoop assigned which will never change during its life-time. This ensures we are always be able to call any operation on the Channel once constructed (unit the EventLoop is shutdown). This also simplifies the logic in DefaultChannelPipeline a lot as we can always call handlerAdded / handlerRemoved directly without the need to wait for register() to happen. Also note that its still possible to deregister a Channel and register it again. It's just not possible anymore to move from one EventLoop to another (which was not really safe anyway). Fixes https://github.com/netty/netty/issues/8513.
2019-01-14 20:11:13 +01:00
@Override
public void register(ChannelHandlerContext ctx, ChannelPromise promise) {
ctx.channel().unsafe().register(promise);
Tighten contract between Channel and EventLoop by require the EventLoop on Channel construction. (#8587) Motivation: At the moment it’s possible to have a Channel in Netty that is not registered / assigned to an EventLoop until register(...) is called. This is suboptimal as if the Channel is not registered it is also not possible to do anything useful with a ChannelFuture that belongs to the Channel. We should think about if we should have the EventLoop as a constructor argument of a Channel and have the register / deregister method only have the effect of add a Channel to KQueue/Epoll/... It is also currently possible to deregister a Channel from one EventLoop and register it with another EventLoop. This operation defeats the threading model assumptions that are wide spread in Netty, and requires careful user level coordination to pull off without any concurrency issues. It is not a commonly used feature in practice, may be better handled by other means (e.g. client side load balancing), and therefore we propose removing this feature. Modifications: - Change all Channel implementations to require an EventLoop for construction ( + an EventLoopGroup for all ServerChannel implementations) - Remove all register(...) methods from EventLoopGroup - Add ChannelOutboundInvoker.register(...) which now basically means we want to register on the EventLoop for IO. - Change ChannelUnsafe.register(...) to not take an EventLoop as parameter (as the EventLoop is supplied on custruction). - Change ChannelFactory to take an EventLoop to create new Channels and introduce ServerChannelFactory which takes an EventLoop and one EventLoopGroup to create new ServerChannel instances. - Add ServerChannel.childEventLoopGroup() - Ensure all operations on the accepted Channel is done in the EventLoop of the Channel in ServerBootstrap - Change unit tests for new behaviour Result: A Channel always has an EventLoop assigned which will never change during its life-time. This ensures we are always be able to call any operation on the Channel once constructed (unit the EventLoop is shutdown). This also simplifies the logic in DefaultChannelPipeline a lot as we can always call handlerAdded / handlerRemoved directly without the need to wait for register() to happen. Also note that its still possible to deregister a Channel and register it again. It's just not possible anymore to move from one EventLoop to another (which was not really safe anyway). Fixes https://github.com/netty/netty/issues/8513.
2019-01-14 20:11:13 +01:00
}
@Override
public void deregister(ChannelHandlerContext ctx, ChannelPromise promise) {
ctx.channel().unsafe().deregister(promise);
}
Read only when requested (read-on-demand) This pull request introduces a new operation called read() that replaces the existing inbound traffic control method. EventLoop now performs socket reads only when the read() operation has been issued. Once the requested read() operation is actually performed, EventLoop triggers an inboundBufferSuspended event that tells the handlers that the requested read() operation has been performed and the inbound traffic has been suspended again. A handler can decide to continue reading or not. Unlike other outbound operations, read() does not use ChannelFuture at all to avoid GC cost. If there's a good reason to create a new future per read at the GC cost, I'll change this. This pull request consequently removes the readable property in ChannelHandlerContext, which means how the traffic control works changed significantly. This pull request also adds a new configuration property ChannelOption.AUTO_READ whose default value is true. If true, Netty will call ctx.read() for you. If you need a close control over when read() is called, you can set it to false. Another interesting fact is that non-terminal handlers do not really need to call read() at all. Only the last inbound handler will have to call it, and that's just enough. Actually, you don't even need to call it at the last handler in most cases because of the ChannelOption.AUTO_READ mentioned above. There's no serious backward compatibility issue. If the compiler complains your handler does not implement the read() method, add the following: public void read(ChannelHandlerContext ctx) throws Exception { ctx.read(); } Note that this pull request certainly makes bounded inbound buffer support very easy, but itself does not add the bounded inbound buffer support.
2012-12-30 13:53:59 +01:00
@Override
Revamp the core API to reduce memory footprint and consumption The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
public void read(ChannelHandlerContext ctx) {
ctx.channel().unsafe().beginRead();
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
ctx.channel().unsafe().write(msg, promise);
}
@Override
public void flush(ChannelHandlerContext ctx) {
ctx.channel().unsafe().flush();
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) {
ctx.fireChannelUnregistered();
// Remove all handlers sequentially if channel is closed and unregistered.
if (!ctx.channel().isOpen()) {
((DefaultChannelPipeline) ctx.pipeline()).destroy();
}
}
}
2019-11-29 08:59:47 +01:00
private static final class EmptyHandler implements ChannelHandler {
@Override
public void channelRegistered(ChannelHandlerContext ctx) { }
@Override
public void channelUnregistered(ChannelHandlerContext ctx) { }
@Override
public void channelActive(ChannelHandlerContext ctx) { }
@Override
public void channelInactive(ChannelHandlerContext ctx) { }
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
2019-12-04 09:31:45 +01:00
2019-11-29 08:59:47 +01:00
ReferenceCountUtil.release(msg);
}
@Override
2019-11-29 11:38:50 +01:00
public void channelReadComplete(ChannelHandlerContext ctx) { }
2019-11-29 08:59:47 +01:00
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
ReferenceCountUtil.release(evt);
}
@Override
public void channelWritabilityChanged(ChannelHandlerContext ctx) { }
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { }
@Override
public void bind(ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) {
promise.setFailure(new ChannelPipelineException("Handler " + ctx.handler() + " removed already"));
}
@Override
public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress,
ChannelPromise promise) {
promise.setFailure(new ChannelPipelineException("Handler " + ctx.handler() + " removed already"));
}
@Override
public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) {
promise.setFailure(new ChannelPipelineException("Handler " + ctx.handler() + " removed already"));
}
@Override
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
promise.setFailure(new ChannelPipelineException("Handler " + ctx.handler() + " removed already"));
}
@Override
public void register(ChannelHandlerContext ctx, ChannelPromise promise) {
promise.setFailure(new ChannelPipelineException("Handler " + ctx.handler() + " removed already"));
}
@Override
public void deregister(ChannelHandlerContext ctx, ChannelPromise promise) {
promise.setFailure(new ChannelPipelineException("Handler " + ctx.handler() + " removed already"));
}
@Override
public void read(ChannelHandlerContext ctx) { }
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
ReferenceCountUtil.release(msg);
2019-12-04 09:31:45 +01:00
new Throwable().printStackTrace();
2019-11-29 08:59:47 +01:00
promise.setFailure(new ChannelPipelineException("Handler " + ctx.handler() + " removed already"));
}
@Override
2019-12-04 09:31:45 +01:00
public void flush(ChannelHandlerContext ctx) {
new Throwable().printStackTrace();
}
2019-11-29 08:59:47 +01:00
}
}