Fix DefaultHttpDataFactory cleanup bug

Motivation:

DefaultHttpDataFactory uses HttpRequest as map keys.

Because of the implementation of "hashCode"" and "equals" in DefaultHttpRequest,
if we use normal maps, HttpDatas of different requests may end up in the same map entry,
causing cleanup bug.

Consider this example:
- Suppose that request1 is equal to request2, causing their HttpDatas to be stored in one single map entry.
- request1 is cleaned up first, while request2 is still being decoded.
- Consequently request2's HttpDatas are suddenly gone, causing NPE, or worse loss of data.

This bug can be reproduced by starting the HttpUploadServer example,
then run this command:
ab -T 'application/x-www-form-urlencoded' -n 100 -c 5 -p post.txt http://localhost:8080/form

post.txt file content:
a=1&b=2

There will be errors like this:
java.lang.NullPointerException
        at io.netty.handler.codec.http.multipart.MemoryAttribute.getValue(MemoryAttribute.java:64)
        at io.netty.handler.codec.http.multipart.MixedAttribute.getValue(MixedAttribute.java:243)
        at io.netty.example.http.upload.HttpUploadServerHandler.writeHttpData(HttpUploadServerHandler.java:271)
        at io.netty.example.http.upload.HttpUploadServerHandler.readHttpDataChunkByChunk(HttpUploadServerHandler.java:230)
        at io.netty.example.http.upload.HttpUploadServerHandler.channelRead0(HttpUploadServerHandler.java:193)
        at io.netty.example.http.upload.HttpUploadServerHandler.channelRead0(HttpUploadServerHandler.java:66)
        at io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
        at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
        at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)
        at io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
        at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
        at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)
        at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
        at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
        at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1412)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
        at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
        at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:943)
        at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:141)
        at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:645)
        at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:580)
        at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:497)
        at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:459)
        at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:886)
        at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
        at java.lang.Thread.run(Thread.java:748)

Modifications:

Keep identity of requests by using IdentityHashMap

Result:

DefaultHttpDataFactory is fixed.

The ConcurrentHashMap is replaced with a synchronized map, but I think the performance won't be affected much in real web apps.
This commit is contained in:
Ngoc Dao 2017-12-23 09:22:54 +11:00 committed by Norman Maurer
parent b6c42f6547
commit 2b4f667791
3 changed files with 223 additions and 47 deletions

View File

@ -15,25 +15,28 @@
*/
package io.netty.handler.codec.http.multipart;
import io.netty.handler.codec.http.DefaultHttpRequest;
import io.netty.handler.codec.http.HttpConstants;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.util.internal.PlatformDependent;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
/**
* Default factory giving Attribute and FileUpload according to constructor
* Default factory giving {@link Attribute} and {@link FileUpload} according to constructor.
*
* Attribute and FileUpload could be :<br>
* - MemoryAttribute, DiskAttribute or MixedAttribute<br>
* - MemoryFileUpload, DiskFileUpload or MixedFileUpload<br>
* according to the constructor.
* <p>According to the constructor, {@link Attribute} and {@link FileUpload} can be:</p>
* <ul>
* <li>MemoryAttribute, DiskAttribute or MixedAttribute</li>
* <li>MemoryFileUpload, DiskFileUpload or MixedFileUpload</li>
* </ul>
*/
public class DefaultHttpDataFactory implements HttpDataFactory {
@ -57,9 +60,14 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
private Charset charset = HttpConstants.DEFAULT_CHARSET;
/**
* Keep all HttpDatas until cleanAllHttpData() is called.
* Keep all {@link HttpData}s until cleaning methods are called.
* We need to use {@link IdentityHashMap} because different requests may be equal.
* See {@link DefaultHttpRequest#hashCode} and {@link DefaultHttpRequest#equals}.
* Similarly, when removing data items, we need to check their identities because
* different data items may be equal.
*/
private final Map<HttpRequest, List<HttpData>> requestFileDeleteMap = PlatformDependent.newConcurrentHashMap();
private final Map<HttpRequest, List<HttpData>> requestFileDeleteMap =
Collections.synchronizedMap(new IdentityHashMap<HttpRequest, List<HttpData>>());
/**
* HttpData will be in memory if less than default size (16KB).
@ -109,7 +117,7 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
}
/**
* @return the associated list of Files for the request
* @return the associated list of {@link HttpData} for the request
*/
private List<HttpData> getList(HttpRequest request) {
List<HttpData> list = requestFileDeleteMap.get(request);
@ -125,15 +133,15 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
if (useDisk) {
Attribute attribute = new DiskAttribute(name, charset);
attribute.setMaxSize(maxSize);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
List<HttpData> list = getList(request);
list.add(attribute);
return attribute;
}
if (checkSize) {
Attribute attribute = new MixedAttribute(name, minSize, charset);
attribute.setMaxSize(maxSize);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
List<HttpData> list = getList(request);
list.add(attribute);
return attribute;
}
MemoryAttribute attribute = new MemoryAttribute(name);
@ -146,15 +154,15 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
if (useDisk) {
Attribute attribute = new DiskAttribute(name, definedSize, charset);
attribute.setMaxSize(maxSize);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
List<HttpData> list = getList(request);
list.add(attribute);
return attribute;
}
if (checkSize) {
Attribute attribute = new MixedAttribute(name, definedSize, minSize, charset);
attribute.setMaxSize(maxSize);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
List<HttpData> list = getList(request);
list.add(attribute);
return attribute;
}
MemoryAttribute attribute = new MemoryAttribute(name, definedSize);
@ -186,16 +194,16 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
attribute.setMaxSize(maxSize);
}
checkHttpDataSize(attribute);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
List<HttpData> list = getList(request);
list.add(attribute);
return attribute;
}
if (checkSize) {
Attribute attribute = new MixedAttribute(name, value, minSize, charset);
attribute.setMaxSize(maxSize);
checkHttpDataSize(attribute);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
List<HttpData> list = getList(request);
list.add(attribute);
return attribute;
}
try {
@ -217,8 +225,8 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
contentTransferEncoding, charset, size);
fileUpload.setMaxSize(maxSize);
checkHttpDataSize(fileUpload);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(fileUpload);
List<HttpData> list = getList(request);
list.add(fileUpload);
return fileUpload;
}
if (checkSize) {
@ -226,8 +234,8 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
contentTransferEncoding, charset, size, minSize);
fileUpload.setMaxSize(maxSize);
checkHttpDataSize(fileUpload);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(fileUpload);
List<HttpData> list = getList(request);
list.add(fileUpload);
return fileUpload;
}
MemoryFileUpload fileUpload = new MemoryFileUpload(name, filename, contentType,
@ -239,20 +247,42 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
@Override
public void removeHttpDataFromClean(HttpRequest request, InterfaceHttpData data) {
if (data instanceof HttpData) {
List<HttpData> fileToDelete = getList(request);
fileToDelete.remove(data);
if (!(data instanceof HttpData)) {
return;
}
// Do not use getList because it adds empty list to requestFileDeleteMap
// if request is not found
List<HttpData> list = requestFileDeleteMap.get(request);
if (list == null) {
return;
}
// Can't simply call list.remove(data), because different data items may be equal.
// Need to check identity.
Iterator<HttpData> i = list.iterator();
while (i.hasNext()) {
HttpData n = i.next();
if (n == data) {
i.remove();
// Remove empty list to avoid memory leak
if (list.isEmpty()) {
requestFileDeleteMap.remove(request);
}
return;
}
}
}
@Override
public void cleanRequestHttpData(HttpRequest request) {
List<HttpData> fileToDelete = requestFileDeleteMap.remove(request);
if (fileToDelete != null) {
for (HttpData data: fileToDelete) {
data.delete();
List<HttpData> list = requestFileDeleteMap.remove(request);
if (list != null) {
for (HttpData data : list) {
data.release();
}
fileToDelete.clear();
}
}
@ -261,15 +291,16 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
Iterator<Entry<HttpRequest, List<HttpData>>> i = requestFileDeleteMap.entrySet().iterator();
while (i.hasNext()) {
Entry<HttpRequest, List<HttpData>> e = i.next();
i.remove();
List<HttpData> fileToDelete = e.getValue();
if (fileToDelete != null) {
for (HttpData data : fileToDelete) {
data.delete();
}
fileToDelete.clear();
// Calling i.remove() here will cause "java.lang.IllegalStateException: Entry was removed"
// at e.getValue() below
List<HttpData> list = e.getValue();
for (HttpData data : list) {
data.release();
}
i.remove();
}
}

View File

@ -653,23 +653,19 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
*/
@Override
public void destroy() {
checkDestroyed();
// Release all data items, including those not yet pulled
cleanFiles();
destroyed = true;
if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {
undecodedChunk.release();
undecodedChunk = null;
}
// release all data which was not yet pulled
for (int i = bodyListHttpDataRank; i < bodyListHttpData.size(); i++) {
bodyListHttpData.get(i).release();
}
}
/**
* Clean all HttpDatas (on Disk) for the current request.
* Clean all {@link HttpData}s for the current request.
*/
@Override
public void cleanFiles() {

View File

@ -0,0 +1,149 @@
/*
* Copyright 2017 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http.multipart;
import io.netty.buffer.Unpooled;
import io.netty.handler.codec.http.DefaultHttpRequest;
import io.netty.handler.codec.http.HttpRequest;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static io.netty.handler.codec.http.HttpHeaderValues.IDENTITY;
import static io.netty.handler.codec.http.HttpMethod.POST;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.DEFAULT_TEXT_CONTENT_TYPE;
import static io.netty.util.CharsetUtil.UTF_8;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class DefaultHttpDataFactoryTest {
// req1 equals req2
private static final HttpRequest req1 = new DefaultHttpRequest(HTTP_1_1, POST, "/form");
private static final HttpRequest req2 = new DefaultHttpRequest(HTTP_1_1, POST, "/form");
private DefaultHttpDataFactory factory;
@BeforeClass
public static void assertReq1EqualsReq2() {
// Before doing anything, assert that the requests are equal
assertEquals(req1.hashCode(), req2.hashCode());
assertTrue(req1.equals(req2));
}
@Before
public void setupFactory() {
factory = new DefaultHttpDataFactory();
}
@After
public void cleanupFactory() {
factory.cleanAllHttpData();
}
@Test
public void cleanRequestHttpDataShouldIdentifiesRequestsByTheirIdentities() throws Exception {
// Create some data belonging to req1 and req2
Attribute attribute1 = factory.createAttribute(req1, "attribute1", "value1");
Attribute attribute2 = factory.createAttribute(req2, "attribute2", "value2");
FileUpload file1 = factory.createFileUpload(
req1, "file1", "file1.txt",
DEFAULT_TEXT_CONTENT_TYPE, IDENTITY.toString(), UTF_8, 123
);
FileUpload file2 = factory.createFileUpload(
req2, "file2", "file2.txt",
DEFAULT_TEXT_CONTENT_TYPE, IDENTITY.toString(), UTF_8, 123
);
file1.setContent(Unpooled.copiedBuffer("file1 content", UTF_8));
file2.setContent(Unpooled.copiedBuffer("file2 content", UTF_8));
// Assert that they are not deleted
assertNotNull(attribute1.getByteBuf());
assertNotNull(attribute2.getByteBuf());
assertNotNull(file1.getByteBuf());
assertNotNull(file2.getByteBuf());
assertEquals(1, attribute1.refCnt());
assertEquals(1, attribute2.refCnt());
assertEquals(1, file1.refCnt());
assertEquals(1, file2.refCnt());
// Clean up by req1
factory.cleanRequestHttpData(req1);
// Assert that data belonging to req1 has been cleaned up
assertNull(attribute1.getByteBuf());
assertNull(file1.getByteBuf());
assertEquals(0, attribute1.refCnt());
assertEquals(0, file1.refCnt());
// But not req2
assertNotNull(attribute2.getByteBuf());
assertNotNull(file2.getByteBuf());
assertEquals(1, attribute2.refCnt());
assertEquals(1, file2.refCnt());
}
@Test
public void removeHttpDataFromCleanShouldIdentifiesDataByTheirIdentities() throws Exception {
// Create some equal data items belonging to the same request
Attribute attribute1 = factory.createAttribute(req1, "attribute", "value");
Attribute attribute2 = factory.createAttribute(req1, "attribute", "value");
FileUpload file1 = factory.createFileUpload(
req1, "file", "file.txt",
DEFAULT_TEXT_CONTENT_TYPE, IDENTITY.toString(), UTF_8, 123
);
FileUpload file2 = factory.createFileUpload(
req1, "file", "file.txt",
DEFAULT_TEXT_CONTENT_TYPE, IDENTITY.toString(), UTF_8, 123
);
file1.setContent(Unpooled.copiedBuffer("file content", UTF_8));
file2.setContent(Unpooled.copiedBuffer("file content", UTF_8));
// Before doing anything, assert that the data items are equal
assertEquals(attribute1.hashCode(), attribute2.hashCode());
assertTrue(attribute1.equals(attribute2));
assertEquals(file1.hashCode(), file2.hashCode());
assertTrue(file1.equals(file2));
// Remove attribute2 and file2 from being cleaned up by factory
factory.removeHttpDataFromClean(req1, attribute2);
factory.removeHttpDataFromClean(req1, file2);
// Clean up by req1
factory.cleanRequestHttpData(req1);
// Assert that attribute1 and file1 have been cleaned up
assertNull(attribute1.getByteBuf());
assertNull(file1.getByteBuf());
assertEquals(0, attribute1.refCnt());
assertEquals(0, file1.refCnt());
// But not attribute2 and file2
assertNotNull(attribute2.getByteBuf());
assertNotNull(file2.getByteBuf());
assertEquals(1, attribute2.refCnt());
assertEquals(1, file2.refCnt());
// Cleanup attribute2 and file2 manually to avoid memory leak, not via factory
attribute2.release();
file2.release();
assertEquals(0, attribute2.refCnt());
assertEquals(0, file2.refCnt());
}
}