Fix buffer leak in local transport when a close triggers the close of a remote peer and there are still messages in the inbound buffer.

Motivation:

We need to release all the buffers that may be put into our inbound queue since we closed the Channel to ensure we not leak any memory. This is fine as it basically gives the same guarantees as TCP which  means even if the promise was notified before its not really guaranteed that the "remote peer" will see the buffer at all.

Modifications:

Ensure we release all buffers in the inbound buffer if a doClose() is called.

Result:

No more leaks.
This commit is contained in:
Norman Maurer 2017-04-20 08:08:40 +02:00
parent 4dd6c14ba2
commit c663a94359
2 changed files with 126 additions and 53 deletions

View File

@ -62,7 +62,7 @@ public class LocalChannel extends AbstractChannel {
private final ChannelConfig config = new DefaultChannelConfig(this);
// To further optimize this we could write our own SPSC queue.
private final Queue<Object> inboundBuffer = PlatformDependent.newSpscQueue();
final Queue<Object> inboundBuffer = PlatformDependent.newSpscQueue();
private final Runnable readTask = new Runnable() {
@Override
public void run() {
@ -220,60 +220,72 @@ public class LocalChannel extends AbstractChannel {
@Override
protected void doClose() throws Exception {
final LocalChannel peer = this.peer;
if (state != State.CLOSED) {
// Update all internal state before the closeFuture is notified.
if (localAddress != null) {
if (parent() == null) {
LocalChannelRegistry.unregister(localAddress);
}
localAddress = null;
}
// State change must happen before finishPeerRead to ensure writes are released either in doWrite or
// channelRead.
state = State.CLOSED;
// Preserve order of event and force a read operation now before the close operation is processed.
finishPeerRead(this);
ChannelPromise promise = connectPromise;
if (promise != null) {
// Use tryFailure() instead of setFailure() to avoid the race against cancel().
promise.tryFailure(DO_CLOSE_CLOSED_CHANNEL_EXCEPTION);
connectPromise = null;
}
}
if (peer != null) {
this.peer = null;
// Need to execute the close in the correct EventLoop (see https://github.com/netty/netty/issues/1777).
// Also check if the registration was not done yet. In this case we submit the close to the EventLoop
// to make sure its run after the registration completes (see https://github.com/netty/netty/issues/2144).
EventLoop peerEventLoop = peer.eventLoop();
final boolean peerIsActive = peer.isActive();
if (peerEventLoop.inEventLoop() && !registerInProgress) {
peer.tryClose(peerIsActive);
} else {
try {
peerEventLoop.execute(new Runnable() {
@Override
public void run() {
peer.tryClose(peerIsActive);
}
});
} catch (Throwable cause) {
logger.warn("Releasing Inbound Queues for channels {}-{} because exception occurred!",
this, peer, cause);
releaseInboundBuffers();
if (peerEventLoop.inEventLoop()) {
peer.releaseInboundBuffers();
} else {
// inboundBuffers is a SPSC so we may leak if the event loop is shutdown prematurely or rejects
// the close Runnable but give a best effort.
peer.close();
State oldState = state;
try {
if (oldState != State.CLOSED) {
// Update all internal state before the closeFuture is notified.
if (localAddress != null) {
if (parent() == null) {
LocalChannelRegistry.unregister(localAddress);
}
PlatformDependent.throwException(cause);
localAddress = null;
}
// State change must happen before finishPeerRead to ensure writes are released either in doWrite or
// channelRead.
state = State.CLOSED;
// Preserve order of event and force a read operation now before the close operation is processed.
finishPeerRead(this);
ChannelPromise promise = connectPromise;
if (promise != null) {
// Use tryFailure() instead of setFailure() to avoid the race against cancel().
promise.tryFailure(DO_CLOSE_CLOSED_CHANNEL_EXCEPTION);
connectPromise = null;
}
}
if (peer != null) {
this.peer = null;
// Need to execute the close in the correct EventLoop (see https://github.com/netty/netty/issues/1777).
// Also check if the registration was not done yet. In this case we submit the close to the EventLoop
// to make sure its run after the registration completes
// (see https://github.com/netty/netty/issues/2144).
EventLoop peerEventLoop = peer.eventLoop();
final boolean peerIsActive = peer.isActive();
if (peerEventLoop.inEventLoop() && !registerInProgress) {
peer.tryClose(peerIsActive);
} else {
try {
peerEventLoop.execute(new Runnable() {
@Override
public void run() {
peer.tryClose(peerIsActive);
}
});
} catch (Throwable cause) {
logger.warn("Releasing Inbound Queues for channels {}-{} because exception occurred!",
this, peer, cause);
if (peerEventLoop.inEventLoop()) {
peer.releaseInboundBuffers();
} else {
// inboundBuffers is a SPSC so we may leak if the event loop is shutdown prematurely or
// rejects the close Runnable but give a best effort.
peer.close();
}
PlatformDependent.throwException(cause);
}
}
}
} finally {
// Release all buffers if the Channel was already registered in the past and if it was not closed before.
if (oldState != null && oldState != State.CLOSED) {
// We need to release all the buffers that may be put into our inbound queue since we closed the Channel
// to ensure we not leak any memory. This is fine as it basically gives the same guarantees as TCP which
// means even if the promise was notified before its not really guaranteed that the "remote peer" will
// see the buffer at all.
releaseInboundBuffers();
}
}
}

View File

@ -896,4 +896,65 @@ public class LocalChannelTest {
ReferenceCountUtil.safeRelease(msg);
}
}
@Test
public void testNotLeakBuffersWhenCloseByRemotePeer() throws Exception {
Bootstrap cb = new Bootstrap();
ServerBootstrap sb = new ServerBootstrap();
cb.group(sharedGroup)
.channel(LocalChannel.class)
.handler(new SimpleChannelInboundHandler<ByteBuf>() {
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
ctx.writeAndFlush(ctx.alloc().buffer().writeZero(100));
}
@Override
public void channelRead0(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
// Just drop the buffer
}
});
sb.group(sharedGroup)
.channel(LocalServerChannel.class)
.childHandler(new ChannelInitializer<LocalChannel>() {
@Override
public void initChannel(LocalChannel ch) throws Exception {
ch.pipeline().addLast(new SimpleChannelInboundHandler<ByteBuf>() {
@Override
public void channelRead0(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
while (buffer.isReadable()) {
// Fill the ChannelOutboundBuffer with multiple buffers
ctx.write(buffer.readRetainedSlice(1));
}
// Flush and so transfer the written buffers to the inboundBuffer of the remote peer.
// After this point the remote peer is responsible to release all the buffers.
ctx.flush();
// This close call will trigger the remote peer close as well.
ctx.close();
}
});
}
});
Channel sc = null;
LocalChannel cc = null;
try {
// Start server
sc = sb.bind(TEST_ADDRESS).sync().channel();
// Connect to the server
cc = (LocalChannel) cb.connect(sc.localAddress()).sync().channel();
// Close the channel
closeChannel(cc);
assertTrue(cc.inboundBuffer.isEmpty());
closeChannel(sc);
} finally {
closeChannel(cc);
closeChannel(sc);
}
}
}