Fix Memory release not correctly in Multipart Decoder (#11188)

Motivation:
2 years ago a change remove the default clearing of all HttpData, whatever
they are disk based or memory based.

A lot of users were probably releasing HttpData directly, so there was no issue.
But now, it seems, and as the Javadoc said, that `decoder.destroy()` shall clean up
also Memory based HttpData, and not only Disk based HttpData as currently.

Change:
- Add in `destroy()` method the necessary code to release if necessary
the underlying Memory based HttpDatas.

- Change one Junit Test (using Mixed, Memory and Disk based factories)
in order to check the correctness of this behavior and to really act
as a handler (releasing buffers or requests).

- Modify one Junit core to check validity when a delimiter is present in the Chunk
but not CRLF/LF (false delimiter), to ensure correctness.

Result:
No more issue on memory leak

Note that still the List and the Map are not cleaned, since they were not
before. No change is done on this, since it could produce backward issue compatibility.

Fix issues #11175 and #11184
This commit is contained in:
Frédéric Brégier 2021-04-29 12:13:45 +02:00 committed by Norman Maurer
parent a0516ee414
commit 5309422669
3 changed files with 64 additions and 41 deletions

View File

@ -43,7 +43,6 @@ import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import static io.netty.buffer.Unpooled.EMPTY_BUFFER;
import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BINARY_STRING;
import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BIT_7_STRING;
import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BIT_8_STRING;
@ -341,13 +340,8 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
ByteBuf buf = content.content();
if (undecodedChunk == null) {
undecodedChunk = isLastChunk ?
// Take a slice instead of copying when the first chunk is also the last
// as undecodedChunk.writeBytes will never be called.
buf.retainedSlice() :
// Maybe we should better not copy here for performance reasons but this will need
// more care by the caller to release the content in a correct manner later
// So maybe something to optimize on a later stage
undecodedChunk =
// Since the Handler will release the incoming later on, we need to copy it
//
// We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity
// which is not really usable for us as we may exceed it once we add more bytes.
@ -943,8 +937,15 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
*/
@Override
public void destroy() {
// Release all data items, including those not yet pulled
// Release all data items, including those not yet pulled, only file based items
cleanFiles();
// Clean Memory based data
for (InterfaceHttpData httpData : bodyListHttpData) {
// Might have been already released by the user
if (httpData.refCnt() > 0) {
httpData.release();
}
}
destroyed = true;

View File

@ -154,7 +154,7 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
this.factory = requireNonNull(factory, "factory");
try {
if (request instanceof HttpContent) {
// Offer automatically if the given request is als type of HttpContent
// Offer automatically if the given request is as type of HttpContent
// See #1089
offer((HttpContent) request);
} else {
@ -288,13 +288,8 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
ByteBuf buf = content.content();
if (undecodedChunk == null) {
undecodedChunk = isLastChunk ?
// Take a slice instead of copying when the first chunk is also the last
// as undecodedChunk.writeBytes will never be called.
buf.retainedSlice() :
// Maybe we should better not copy here for performance reasons but this will need
// more care by the caller to release the content in a correct manner later
// So maybe something to optimize on a later stage.
undecodedChunk =
// Since the Handler will release the incoming later on, we need to copy it
//
// We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity
// which is not really usable for us as we may exceed it once we add more bytes.
@ -683,8 +678,15 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
*/
@Override
public void destroy() {
// Release all data items, including those not yet pulled
// Release all data items, including those not yet pulled, only file based items
cleanFiles();
// Clean Memory based data
for (InterfaceHttpData httpData : bodyListHttpData) {
// Might have been already released by the user
if (httpData.refCnt() > 0) {
httpData.release();
}
}
destroyed = true;

View File

@ -88,7 +88,8 @@ public class HttpPostRequestDecoderTest {
// Create decoder instance to test.
final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req);
decoder.offer(new DefaultHttpContent(Unpooled.copiedBuffer(body, CharsetUtil.UTF_8)));
ByteBuf buf = Unpooled.copiedBuffer(body, CharsetUtil.UTF_8);
decoder.offer(new DefaultHttpContent(buf));
decoder.offer(new DefaultHttpContent(Unpooled.EMPTY_BUFFER));
// Validate it's enough chunks to decode upload.
@ -102,6 +103,7 @@ public class HttpPostRequestDecoderTest {
data, upload.getString(CharsetUtil.UTF_8));
upload.release();
decoder.destroy();
buf.release();
}
}
@ -134,8 +136,8 @@ public class HttpPostRequestDecoderTest {
// Create decoder instance to test.
final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req);
assertFalse(decoder.getBodyHttpDatas().isEmpty());
req.release();
decoder.destroy();
assertTrue(req.release());
}
// See https://github.com/netty/netty/issues/2544
@ -181,8 +183,8 @@ public class HttpPostRequestDecoderTest {
assertNotNull(datar);
assertEquals(datas[i].getBytes(CharsetUtil.UTF_8).length, datar.length);
req.release();
decoder.destroy();
assertTrue(req.release());
}
}
@ -215,8 +217,8 @@ public class HttpPostRequestDecoderTest {
// Create decoder instance to test.
final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req);
assertFalse(decoder.getBodyHttpDatas().isEmpty());
req.release();
decoder.destroy();
assertTrue(req.release());
}
// See https://github.com/netty/netty/issues/1848
@ -276,6 +278,8 @@ public class HttpPostRequestDecoderTest {
aDecodedData.release();
aDecoder.destroy();
aSmallBuf.release();
aLargeBuf.release();
}
// See https://github.com/netty/netty/issues/2305
@ -373,8 +377,8 @@ public class HttpPostRequestDecoderTest {
// Create decoder instance to test.
final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req);
assertFalse(decoder.getBodyHttpDatas().isEmpty());
req.release();
decoder.destroy();
assertTrue(req.release());
}
@Test
@ -403,8 +407,8 @@ public class HttpPostRequestDecoderTest {
assertTrue(part1 instanceof FileUpload);
FileUpload fileUpload = (FileUpload) part1;
assertEquals("tmp 0.txt", fileUpload.getFilename());
req.release();
decoder.destroy();
assertTrue(req.release());
}
@Test
@ -434,8 +438,8 @@ public class HttpPostRequestDecoderTest {
// Create decoder instance to test without any exception.
final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req);
assertFalse(decoder.getBodyHttpDatas().isEmpty());
req.release();
decoder.destroy();
assertTrue(req.release());
}
@Test
@ -467,8 +471,8 @@ public class HttpPostRequestDecoderTest {
FileUpload fileUpload = (FileUpload) part1;
byte[] fileBytes = fileUpload.get();
assertTrue("the filecontent should not be decoded", filecontent.equals(new String(fileBytes)));
req.release();
decoder.destroy();
assertTrue(req.release());
}
@Test
@ -497,7 +501,7 @@ public class HttpPostRequestDecoderTest {
} catch (HttpPostRequestDecoder.ErrorDataDecoderException e) {
assertTrue(e.getCause() instanceof UnsupportedCharsetException);
} finally {
req.release();
assertTrue(req.release());
}
}
@ -530,7 +534,7 @@ public class HttpPostRequestDecoderTest {
} catch (HttpPostRequestDecoder.ErrorDataDecoderException e) {
assertTrue(e.getCause() instanceof UnsupportedCharsetException);
} finally {
req.release();
assertTrue(req.release());
}
}
@ -581,8 +585,8 @@ public class HttpPostRequestDecoderTest {
assertTrue("the item should be a FileUpload", part1 instanceof FileUpload);
FileUpload fileUpload = (FileUpload) part1;
assertEquals("the filename should be decoded", filename, fileUpload.getFilename());
req.release();
decoder.destroy();
assertTrue(req.release());
}
// https://github.com/netty/netty/pull/7265
@ -617,8 +621,8 @@ public class HttpPostRequestDecoderTest {
assertTrue("the item should be a FileUpload", part1 instanceof FileUpload);
FileUpload fileUpload = (FileUpload) part1;
assertEquals("the filename should be decoded", filename, fileUpload.getFilename());
req.release();
decoder.destroy();
assertTrue(req.release());
}
// https://github.com/netty/netty/pull/7265
@ -649,7 +653,7 @@ public class HttpPostRequestDecoderTest {
} catch (HttpPostRequestDecoder.ErrorDataDecoderException e) {
assertTrue(e.getCause() instanceof ArrayIndexOutOfBoundsException);
} finally {
req.release();
assertTrue(req.release());
}
}
@ -681,7 +685,7 @@ public class HttpPostRequestDecoderTest {
} catch (HttpPostRequestDecoder.ErrorDataDecoderException e) {
assertTrue(e.getCause() instanceof UnsupportedCharsetException);
} finally {
req.release();
assertTrue(req.release());
}
}
@ -712,8 +716,8 @@ public class HttpPostRequestDecoderTest {
assertTrue(part1 instanceof FileUpload);
FileUpload fileUpload = (FileUpload) part1;
assertEquals("tmp-0.txt", fileUpload.getFilename());
req.release();
decoder.destroy();
assertTrue(req.release());
}
// https://github.com/netty/netty/issues/8575
@ -832,8 +836,8 @@ public class HttpPostRequestDecoderTest {
FileUpload fileUpload = (FileUpload) part1;
assertEquals("the filename should be decoded", filename, fileUpload.getFilename());
req.release();
decoder.destroy();
assertTrue(req.release());
}
@Test
@ -869,8 +873,8 @@ public class HttpPostRequestDecoderTest {
assertTrue(attr.getByteBuf().isDirect());
assertEquals("los angeles", attr.getValue());
req.release();
decoder.destroy();
assertTrue(req.release());
}
@Test
@ -987,7 +991,8 @@ public class HttpPostRequestDecoderTest {
int bytesLastChunk = 10000;
int fileSize = bytesPerChunk * nbChunks + bytesLastChunk; // set Xmx to a number lower than this and it crashes
String prefix = "--861fbeab-cd20-470c-9609-d40a0f704466\n" +
String delimiter = "--861fbeab-cd20-470c-9609-d40a0f704466";
String prefix = delimiter + "\n" +
"Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" +
"Content-Type: image/jpeg\n" +
"Content-Length: " + fileSize + "\n" +
@ -1003,8 +1008,10 @@ public class HttpPostRequestDecoderTest {
request.headers().set("content-length", prefix.length() + fileSize + suffix.length());
HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request);
decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8))));
ByteBuf buf = Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8));
decoder.offer(new DefaultHttpContent(buf));
assertNotNull(((HttpData) decoder.currentPartialHttpData()).content());
buf.release();
byte[] body = new byte[bytesPerChunk];
Arrays.fill(body, (byte) 1);
@ -1020,11 +1027,16 @@ public class HttpPostRequestDecoderTest {
byte[] bsuffix1 = suffix1.getBytes(CharsetUtil.UTF_8);
byte[] previousLastbody = new byte[bytesLastChunk - bsuffix1.length];
byte[] bdelimiter = delimiter.getBytes(CharsetUtil.UTF_8);
byte[] lastbody = new byte[2 * bsuffix1.length];
Arrays.fill(previousLastbody, (byte) 1);
previousLastbody[0] = HttpConstants.CR;
previousLastbody[1] = HttpConstants.LF;
Arrays.fill(lastbody, (byte) 1);
// put somewhere a not valid delimiter
for (int i = 0; i < bdelimiter.length; i++) {
previousLastbody[i + 10] = bdelimiter[i];
}
lastbody[0] = HttpConstants.CR;
lastbody[1] = HttpConstants.LF;
for (int i = 0; i < bsuffix1.length; i++) {
@ -1055,12 +1067,18 @@ public class HttpPostRequestDecoderTest {
}
assertTrue("Capacity should be less than 1M", decoder.getCurrentAllocatedCapacity()
< 1024 * 1024);
for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {
httpData.release();
factory.removeHttpDataFromClean(request, httpData);
InterfaceHttpData[] httpDatas = decoder.getBodyHttpDatas().toArray(new InterfaceHttpData[0]);
for (InterfaceHttpData httpData : httpDatas) {
assertEquals("Before cleanAllHttpData should be 1", 1, httpData.refCnt());
}
factory.cleanAllHttpData();
for (InterfaceHttpData httpData : httpDatas) {
assertEquals("Before cleanAllHttpData should be 1 if in Memory", inMemory? 1 : 0, httpData.refCnt());
}
decoder.destroy();
for (InterfaceHttpData httpData : httpDatas) {
assertEquals("RefCnt should be 0", 0, httpData.refCnt());
}
}
@Test
@ -1121,7 +1139,9 @@ public class HttpPostRequestDecoderTest {
for (int i = 0; i < bp2.length; i++) {
prefix[bp1.length + 2 + i] = bp2[i];
}
decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix)));
ByteBuf buf = Unpooled.wrappedBuffer(prefix);
decoder.offer(new DefaultHttpContent(buf));
buf.release();
byte[] body = new byte[bytesPerItem];
Arrays.fill(body, (byte) rank);
ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerItem);