Minimize get byte multipart and fix buffer reuse (#11001)
Motivation: - Underlying buffer usages might be erroneous when releasing them internaly in HttpPostMultipartRequestDecoder. 2 bugs occurs: 1) Final File upload seems not to be of the right size. 2) Memory, even in Disk mode, is increasing continuously, while it shouldn't. - Method `getByte(position)` is too often called within the current implementation of the HttpPostMultipartRequestDecoder. This implies too much activities which is visible when PARANOID mode is active. This is also true in standard mode. Apply the same fix on buffer from HttpPostMultipartRequestDecoder to HttpPostStandardRequestDecoder made previously. Finally in order to ensure we do not rewrite already decoded HttpData when decoding next ones within multipart, we must ensure the buffers are copied and not a retained slice. Modification: - Add some tests to check consistency for HttpPostMultipartRequestDecoder. Add a package protected method for testing purpose only. - Use the `bytesBefore(...)` method instead of `getByte(pos)` in order to limit the external access to the underlying buffer by retrieving iteratively the beginning of a correct start position. It is used to find both LF/CRLF and delimiter. 2 methods in HttpPostBodyUtil were created for that. The undecodedChunk is copied when adding a chunk to a DataMultipart is loaded. The same buffer is also rewritten in order to release the copied memory part. Result: Just for note, for both Memory or Disk or Mixed mode factories, the release has to be done as: for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) { httpData.release(); factory.removeHttpDataFromClean(request, httpData); } factory.cleanAllHttpData(); decoder.destroy(); The memory used is minimal in Disk or Mixed mode. In Memory mode, a big file is still in memory but not more in the undecodedChunk but its own buffer (copied). In terms of benchmarking, the results are: Original code Benchmark Mode Cnt Score Error Units HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 0,152 ± 0,100 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 0,543 ± 0,218 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,001 ± 0,001 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 0,615 ± 0,070 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 0,114 ± 0,063 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 0,664 ± 0,034 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,001 ± 0,001 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 0,620 ± 0,140 ops/ms New code Benchmark Mode Cnt Score Error Units HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigAdvancedLevel thrpt 6 4,037 ± 0,358 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigDisabledLevel thrpt 6 4,226 ± 0,471 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigParanoidLevel thrpt 6 0,875 ± 0,029 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderBigSimpleLevel thrpt 6 4,346 ± 0,275 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighAdvancedLevel thrpt 6 2,044 ± 0,020 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighDisabledLevel thrpt 6 2,278 ± 0,159 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighParanoidLevel thrpt 6 0,174 ± 0,004 ops/ms HttpPostMultipartRequestDecoderBenchmark.multipartRequestDecoderHighSimpleLevel thrpt 6 2,370 ± 0,065 ops/ms In short, using big file transfers, this is about 7 times faster with new code, while using high number of HttpData, this is about 4 times faster with new code when using Simple Level. When using Paranoid Level, using big file transfers, this is about 800 times faster with new code, while using high number of HttpData, this is about 170 times faster with new code.
This commit is contained in:
parent
edf44732de
commit
1529ef1794
@ -106,6 +106,9 @@ public abstract class AbstractMemoryHttpData extends AbstractHttpData {
|
||||
size += localsize;
|
||||
if (byteBuf == null) {
|
||||
byteBuf = buffer;
|
||||
} else if (localsize == 0) {
|
||||
// Nothing to add and byteBuf already exists
|
||||
buffer.release();
|
||||
} else if (byteBuf instanceof CompositeByteBuf) {
|
||||
CompositeByteBuf cbb = (CompositeByteBuf) byteBuf;
|
||||
cbb.addComponent(true, buffer);
|
||||
|
@ -16,6 +16,7 @@
|
||||
package io.netty.handler.codec.http.multipart;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.handler.codec.http.HttpConstants;
|
||||
|
||||
/**
|
||||
* Shared Static object between HttpMessageDecoder, HttpPostRequestDecoder and HttpPostRequestEncoder
|
||||
@ -150,4 +151,85 @@ final class HttpPostBodyUtil {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to find LF or CRLF as Line Breaking
|
||||
*
|
||||
* @param buffer the buffer to search in
|
||||
* @param index the index to start from in the buffer
|
||||
* @return a relative position from index > 0 if LF or CRLF is found
|
||||
* or < 0 if not found
|
||||
*/
|
||||
static int findLineBreak(ByteBuf buffer, int index) {
|
||||
int toRead = buffer.readableBytes() - (index - buffer.readerIndex());
|
||||
int posFirstChar = buffer.bytesBefore(index, toRead, HttpConstants.LF);
|
||||
if (posFirstChar == -1) {
|
||||
// No LF, so neither CRLF
|
||||
return -1;
|
||||
}
|
||||
if (posFirstChar > 0 && buffer.getByte(index + posFirstChar - 1) == HttpConstants.CR) {
|
||||
posFirstChar--;
|
||||
}
|
||||
return posFirstChar;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to find the delimiter, with LF or CRLF in front of it (added as delimiters) if needed
|
||||
*
|
||||
* @param buffer the buffer to search in
|
||||
* @param index the index to start from in the buffer
|
||||
* @param delimiter the delimiter as byte array
|
||||
* @param precededByLineBreak true if it must be preceded by LF or CRLF, else false
|
||||
* @return a relative position from index > 0 if delimiter found designing the start of it
|
||||
* (including LF or CRLF is asked)
|
||||
* or a number < 0 if delimiter is not found
|
||||
* @throws IndexOutOfBoundsException
|
||||
* if {@code offset + delimiter.length} is greater than {@code buffer.capacity}
|
||||
*/
|
||||
static int findDelimiter(ByteBuf buffer, int index, byte[] delimiter, boolean precededByLineBreak) {
|
||||
final int delimiterLength = delimiter.length;
|
||||
final int readerIndex = buffer.readerIndex();
|
||||
final int writerIndex = buffer.writerIndex();
|
||||
int toRead = writerIndex - index;
|
||||
int newOffset = index;
|
||||
boolean delimiterNotFound = true;
|
||||
while (delimiterNotFound && delimiterLength <= toRead) {
|
||||
// Find first position: delimiter
|
||||
int posDelimiter = buffer.bytesBefore(newOffset, toRead, delimiter[0]);
|
||||
if (posDelimiter < 0) {
|
||||
return -1;
|
||||
}
|
||||
newOffset += posDelimiter;
|
||||
toRead -= posDelimiter;
|
||||
// Now check for delimiter
|
||||
delimiterNotFound = false;
|
||||
for (int i = 0; i < delimiterLength; i++) {
|
||||
if (buffer.getByte(newOffset + i) != delimiter[i]) {
|
||||
newOffset++;
|
||||
toRead--;
|
||||
delimiterNotFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!delimiterNotFound) {
|
||||
// Delimiter found, find if necessary: LF or CRLF
|
||||
if (precededByLineBreak && newOffset > readerIndex) {
|
||||
if (buffer.getByte(newOffset - 1) == HttpConstants.LF) {
|
||||
newOffset--;
|
||||
// Check if CR before: not mandatory to be there
|
||||
if (newOffset > readerIndex && buffer.getByte(newOffset - 1) == HttpConstants.CR) {
|
||||
newOffset--;
|
||||
}
|
||||
} else {
|
||||
// Delimiter with Line Break could be further: iterate after first char of delimiter
|
||||
newOffset++;
|
||||
toRead--;
|
||||
delimiterNotFound = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return newOffset - readerIndex;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -551,7 +551,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
}
|
||||
}
|
||||
// load data
|
||||
if (!loadDataMultipart(undecodedChunk, multipartDataBoundary, currentAttribute)) {
|
||||
if (!loadDataMultipartOptimized(undecodedChunk, multipartDataBoundary, currentAttribute)) {
|
||||
// Delimiter is not found. Need more chunks.
|
||||
return null;
|
||||
}
|
||||
@ -648,7 +648,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
skipOneLine();
|
||||
String newline;
|
||||
try {
|
||||
newline = readDelimiter(undecodedChunk, delimiter);
|
||||
newline = readDelimiterOptimized(undecodedChunk, delimiter, charset);
|
||||
} catch (NotEnoughDataDecoderException ignored) {
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
return null;
|
||||
@ -688,7 +688,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
String newline;
|
||||
try {
|
||||
skipControlCharacters(undecodedChunk);
|
||||
newline = readLine(undecodedChunk, charset);
|
||||
newline = readLineOptimized(undecodedChunk, charset);
|
||||
} catch (NotEnoughDataDecoderException ignored) {
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
return null;
|
||||
@ -918,7 +918,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
}
|
||||
}
|
||||
// load data as much as possible
|
||||
if (!loadDataMultipart(undecodedChunk, delimiter, currentFileUpload)) {
|
||||
if (!loadDataMultipartOptimized(undecodedChunk, delimiter, currentFileUpload)) {
|
||||
// Delimiter is not found. Need more chunks.
|
||||
return null;
|
||||
}
|
||||
@ -998,83 +998,32 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
* Need more chunks and reset the {@code readerIndex} to the previous
|
||||
* value
|
||||
*/
|
||||
private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) {
|
||||
private static String readLineOptimized(ByteBuf undecodedChunk, Charset charset) {
|
||||
int readerIndex = undecodedChunk.readerIndex();
|
||||
ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
|
||||
ByteBuf line = null;
|
||||
try {
|
||||
while (undecodedChunk.isReadable()) {
|
||||
byte nextByte = undecodedChunk.readByte();
|
||||
if (nextByte == HttpConstants.CR) {
|
||||
// check but do not changed readerIndex
|
||||
nextByte = undecodedChunk.getByte(undecodedChunk.readerIndex());
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
// force read
|
||||
undecodedChunk.readByte();
|
||||
return line.toString(charset);
|
||||
} else {
|
||||
// Write CR (not followed by LF)
|
||||
line.writeByte(HttpConstants.CR);
|
||||
}
|
||||
} else if (nextByte == HttpConstants.LF) {
|
||||
return line.toString(charset);
|
||||
} else {
|
||||
line.writeByte(nextByte);
|
||||
if (undecodedChunk.isReadable()) {
|
||||
int posLfOrCrLf = HttpPostBodyUtil.findLineBreak(undecodedChunk, undecodedChunk.readerIndex());
|
||||
if (posLfOrCrLf <= 0) {
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
}
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException(e);
|
||||
} finally {
|
||||
line.release();
|
||||
}
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
try {
|
||||
line = undecodedChunk.alloc().heapBuffer(posLfOrCrLf);
|
||||
line.writeBytes(undecodedChunk, posLfOrCrLf);
|
||||
|
||||
/**
|
||||
* Read one line up to the CRLF or LF
|
||||
*
|
||||
* @return the String from one line
|
||||
* @throws NotEnoughDataDecoderException
|
||||
* Need more chunks and reset the {@code readerIndex} to the previous
|
||||
* value
|
||||
*/
|
||||
private static String readLine(ByteBuf undecodedChunk, Charset charset) {
|
||||
if (!undecodedChunk.hasArray()) {
|
||||
return readLineStandard(undecodedChunk, charset);
|
||||
}
|
||||
SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
|
||||
int readerIndex = undecodedChunk.readerIndex();
|
||||
ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
|
||||
try {
|
||||
while (sao.pos < sao.limit) {
|
||||
byte nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == HttpConstants.CR) {
|
||||
if (sao.pos < sao.limit) {
|
||||
nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
sao.setReadPosition(0);
|
||||
return line.toString(charset);
|
||||
} else {
|
||||
// Write CR (not followed by LF)
|
||||
sao.pos--;
|
||||
line.writeByte(HttpConstants.CR);
|
||||
}
|
||||
} else {
|
||||
line.writeByte(nextByte);
|
||||
byte nextByte = undecodedChunk.readByte();
|
||||
if (nextByte == HttpConstants.CR) {
|
||||
// force read next byte since LF is the following one
|
||||
undecodedChunk.readByte();
|
||||
}
|
||||
} else if (nextByte == HttpConstants.LF) {
|
||||
sao.setReadPosition(0);
|
||||
return line.toString(charset);
|
||||
} else {
|
||||
line.writeByte(nextByte);
|
||||
} finally {
|
||||
line.release();
|
||||
}
|
||||
}
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException(e);
|
||||
} finally {
|
||||
line.release();
|
||||
}
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
@ -1095,23 +1044,19 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
* Need more chunks and reset the {@code readerIndex} to the previous
|
||||
* value
|
||||
*/
|
||||
private static String readDelimiterStandard(ByteBuf undecodedChunk, String delimiter) {
|
||||
int readerIndex = undecodedChunk.readerIndex();
|
||||
private static String readDelimiterOptimized(ByteBuf undecodedChunk, String delimiter, Charset charset) {
|
||||
final int readerIndex = undecodedChunk.readerIndex();
|
||||
final byte[] bdelimiter = delimiter.getBytes(charset);
|
||||
final int delimiterLength = bdelimiter.length;
|
||||
try {
|
||||
StringBuilder sb = new StringBuilder(64);
|
||||
int delimiterPos = 0;
|
||||
int len = delimiter.length();
|
||||
while (undecodedChunk.isReadable() && delimiterPos < len) {
|
||||
byte nextByte = undecodedChunk.readByte();
|
||||
if (nextByte == delimiter.charAt(delimiterPos)) {
|
||||
delimiterPos++;
|
||||
sb.append((char) nextByte);
|
||||
} else {
|
||||
// delimiter not found so break here !
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
int delimiterPos = HttpPostBodyUtil.findDelimiter(undecodedChunk, readerIndex, bdelimiter, false);
|
||||
if (delimiterPos < 0) {
|
||||
// delimiter not found so break here !
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
StringBuilder sb = new StringBuilder(delimiter);
|
||||
undecodedChunk.readerIndex(readerIndex + delimiterPos + delimiterLength);
|
||||
// Now check if either opening delimiter or closing delimiter
|
||||
if (undecodedChunk.isReadable()) {
|
||||
byte nextByte = undecodedChunk.readByte();
|
||||
@ -1176,127 +1121,28 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
}
|
||||
|
||||
/**
|
||||
* Read one line up to --delimiter or --delimiter-- and if existing the CRLF
|
||||
* or LF. Note that CRLF or LF are mandatory for opening delimiter
|
||||
* (--delimiter) but not for closing delimiter (--delimiter--) since some
|
||||
* clients does not include CRLF in this case.
|
||||
* Rewrite buffer in order to skip lengthToSkip bytes from current readerIndex,
|
||||
* such that any readable bytes available after readerIndex + lengthToSkip (so before writerIndex)
|
||||
* are moved at readerIndex position,
|
||||
* therefore decreasing writerIndex of lengthToSkip at the end of the process.
|
||||
*
|
||||
* @param delimiter
|
||||
* of the form --string, such that '--' is already included
|
||||
* @return the String from one line as the delimiter searched (opening or
|
||||
* closing)
|
||||
* @throws NotEnoughDataDecoderException
|
||||
* Need more chunks and reset the readerInder to the previous
|
||||
* value
|
||||
* @param buffer the buffer to rewrite from current readerIndex
|
||||
* @param lengthToSkip the size to skip from readerIndex
|
||||
*/
|
||||
private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) {
|
||||
if (!undecodedChunk.hasArray()) {
|
||||
return readDelimiterStandard(undecodedChunk, delimiter);
|
||||
private static void rewriteCurrentBuffer(ByteBuf buffer, int lengthToSkip) {
|
||||
if (lengthToSkip == 0) {
|
||||
return;
|
||||
}
|
||||
SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
|
||||
int readerIndex = undecodedChunk.readerIndex();
|
||||
int delimiterPos = 0;
|
||||
int len = delimiter.length();
|
||||
try {
|
||||
StringBuilder sb = new StringBuilder(64);
|
||||
// check conformity with delimiter
|
||||
while (sao.pos < sao.limit && delimiterPos < len) {
|
||||
byte nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == delimiter.charAt(delimiterPos)) {
|
||||
delimiterPos++;
|
||||
sb.append((char) nextByte);
|
||||
} else {
|
||||
// delimiter not found so break here !
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
}
|
||||
// Now check if either opening delimiter or closing delimiter
|
||||
if (sao.pos < sao.limit) {
|
||||
byte nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == HttpConstants.CR) {
|
||||
// first check for opening delimiter
|
||||
if (sao.pos < sao.limit) {
|
||||
nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
sao.setReadPosition(0);
|
||||
return sb.toString();
|
||||
} else {
|
||||
// error CR without LF
|
||||
// delimiter not found so break here !
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
} else {
|
||||
// error since CR must be followed by LF
|
||||
// delimiter not found so break here !
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
} else if (nextByte == HttpConstants.LF) {
|
||||
// same first check for opening delimiter where LF used with
|
||||
// no CR
|
||||
sao.setReadPosition(0);
|
||||
return sb.toString();
|
||||
} else if (nextByte == '-') {
|
||||
sb.append('-');
|
||||
// second check for closing delimiter
|
||||
if (sao.pos < sao.limit) {
|
||||
nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == '-') {
|
||||
sb.append('-');
|
||||
// now try to find if CRLF or LF there
|
||||
if (sao.pos < sao.limit) {
|
||||
nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == HttpConstants.CR) {
|
||||
if (sao.pos < sao.limit) {
|
||||
nextByte = sao.bytes[sao.pos++];
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
sao.setReadPosition(0);
|
||||
return sb.toString();
|
||||
} else {
|
||||
// error CR without LF
|
||||
// delimiter not found so break here !
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
} else {
|
||||
// error CR without LF
|
||||
// delimiter not found so break here !
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
}
|
||||
} else if (nextByte == HttpConstants.LF) {
|
||||
sao.setReadPosition(0);
|
||||
return sb.toString();
|
||||
} else {
|
||||
// No CRLF but ok however (Adobe Flash
|
||||
// uploader)
|
||||
// minus 1 since we read one char ahead but
|
||||
// should not
|
||||
sao.setReadPosition(1);
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
// FIXME what do we do here?
|
||||
// either considering it is fine, either waiting for
|
||||
// more data to come?
|
||||
// lets try considering it is fine...
|
||||
sao.setReadPosition(0);
|
||||
return sb.toString();
|
||||
}
|
||||
// whatever now => error since incomplete
|
||||
// only one '-' => not enough or whatever not enough
|
||||
// element
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException(e);
|
||||
final int readerIndex = buffer.readerIndex();
|
||||
final int readableBytes = buffer.readableBytes();
|
||||
if (readableBytes == lengthToSkip) {
|
||||
buffer.readerIndex(readerIndex);
|
||||
buffer.writerIndex(readerIndex);
|
||||
return;
|
||||
}
|
||||
undecodedChunk.readerIndex(readerIndex);
|
||||
throw new NotEnoughDataDecoderException();
|
||||
buffer.setBytes(readerIndex, buffer, readerIndex + lengthToSkip, readableBytes - lengthToSkip);
|
||||
buffer.readerIndex(readerIndex);
|
||||
buffer.writerIndex(readerIndex + readableBytes - lengthToSkip);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1305,91 +1151,50 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
* @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks
|
||||
* @throws ErrorDataDecoderException
|
||||
*/
|
||||
private static boolean loadDataMultipartStandard(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
|
||||
private static boolean loadDataMultipartOptimized(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
|
||||
if (!undecodedChunk.isReadable()) {
|
||||
return false;
|
||||
}
|
||||
final int startReaderIndex = undecodedChunk.readerIndex();
|
||||
final int delimeterLength = delimiter.length();
|
||||
int index = 0;
|
||||
int lastPosition = startReaderIndex;
|
||||
byte prevByte = HttpConstants.LF;
|
||||
boolean delimiterFound = false;
|
||||
while (undecodedChunk.isReadable()) {
|
||||
final byte nextByte = undecodedChunk.readByte();
|
||||
// Check the delimiter
|
||||
if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) {
|
||||
index++;
|
||||
if (delimeterLength == index) {
|
||||
delimiterFound = true;
|
||||
break;
|
||||
final byte[] bdelimiter = delimiter.getBytes(httpData.getCharset());
|
||||
int posDelimiter = HttpPostBodyUtil.findDelimiter(undecodedChunk, startReaderIndex, bdelimiter, true);
|
||||
if (posDelimiter < 0) {
|
||||
// Not found but however perhaps because incomplete so search LF or CRLF
|
||||
posDelimiter = HttpPostBodyUtil.findLineBreak(undecodedChunk, startReaderIndex);
|
||||
if (posDelimiter < 0) {
|
||||
// not found so this chunk can be fully added
|
||||
ByteBuf content = undecodedChunk.copy();
|
||||
try {
|
||||
httpData.addContent(content, false);
|
||||
} catch (IOException e) {
|
||||
throw new ErrorDataDecoderException(e);
|
||||
}
|
||||
continue;
|
||||
undecodedChunk.readerIndex(startReaderIndex);
|
||||
undecodedChunk.writerIndex(startReaderIndex);
|
||||
return false;
|
||||
} else if (posDelimiter > 0) {
|
||||
// Not fully but still some bytes to provide: httpData is not yet finished since delimiter not found
|
||||
ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter);
|
||||
try {
|
||||
httpData.addContent(content, false);
|
||||
} catch (IOException e) {
|
||||
throw new ErrorDataDecoderException(e);
|
||||
}
|
||||
rewriteCurrentBuffer(undecodedChunk, posDelimiter);
|
||||
return false;
|
||||
}
|
||||
lastPosition = undecodedChunk.readerIndex();
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
index = 0;
|
||||
lastPosition -= (prevByte == HttpConstants.CR)? 2 : 1;
|
||||
}
|
||||
prevByte = nextByte;
|
||||
// Empty chunk or so
|
||||
return false;
|
||||
}
|
||||
if (prevByte == HttpConstants.CR) {
|
||||
lastPosition--;
|
||||
}
|
||||
ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex);
|
||||
// Delimiter found at posDelimiter, including LF or CRLF, so httpData has its last chunk
|
||||
ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter);
|
||||
try {
|
||||
httpData.addContent(content, delimiterFound);
|
||||
httpData.addContent(content, true);
|
||||
} catch (IOException e) {
|
||||
throw new ErrorDataDecoderException(e);
|
||||
}
|
||||
undecodedChunk.readerIndex(lastPosition);
|
||||
return delimiterFound;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the field value from a Multipart request
|
||||
*
|
||||
* @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks
|
||||
* @throws ErrorDataDecoderException
|
||||
*/
|
||||
private static boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
|
||||
if (!undecodedChunk.hasArray()) {
|
||||
return loadDataMultipartStandard(undecodedChunk, delimiter, httpData);
|
||||
}
|
||||
final SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
|
||||
final int startReaderIndex = undecodedChunk.readerIndex();
|
||||
final int delimeterLength = delimiter.length();
|
||||
int index = 0;
|
||||
int lastRealPos = sao.pos;
|
||||
byte prevByte = HttpConstants.LF;
|
||||
boolean delimiterFound = false;
|
||||
while (sao.pos < sao.limit) {
|
||||
final byte nextByte = sao.bytes[sao.pos++];
|
||||
// Check the delimiter
|
||||
if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) {
|
||||
index++;
|
||||
if (delimeterLength == index) {
|
||||
delimiterFound = true;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
lastRealPos = sao.pos;
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
index = 0;
|
||||
lastRealPos -= (prevByte == HttpConstants.CR)? 2 : 1;
|
||||
}
|
||||
prevByte = nextByte;
|
||||
}
|
||||
if (prevByte == HttpConstants.CR) {
|
||||
lastRealPos--;
|
||||
}
|
||||
final int lastPosition = sao.getReadPosition(lastRealPos);
|
||||
final ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex);
|
||||
try {
|
||||
httpData.addContent(content, delimiterFound);
|
||||
} catch (IOException e) {
|
||||
throw new ErrorDataDecoderException(e);
|
||||
}
|
||||
undecodedChunk.readerIndex(lastPosition);
|
||||
return delimiterFound;
|
||||
rewriteCurrentBuffer(undecodedChunk, posDelimiter);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1529,4 +1334,15 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
values.add(svalue.substring(start));
|
||||
return values.toArray(new String[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is package private intentionally in order to allow during tests
|
||||
* to access to the amount of memory allocated (capacity) within the private
|
||||
* ByteBuf undecodedChunk
|
||||
*
|
||||
* @return the number of bytes the internal buffer can contain
|
||||
*/
|
||||
int getCurrentAllocatedCapacity() {
|
||||
return undecodedChunk.capacity();
|
||||
}
|
||||
}
|
||||
|
@ -943,12 +943,12 @@ public class HttpPostRequestEncoder implements ChunkedInput<HttpContent> {
|
||||
// Set name=
|
||||
if (isKey) {
|
||||
String key = currentData.getName();
|
||||
buffer = wrappedBuffer(key.getBytes());
|
||||
buffer = wrappedBuffer(key.getBytes(charset));
|
||||
isKey = false;
|
||||
if (currentBuffer == null) {
|
||||
currentBuffer = wrappedBuffer(buffer, wrappedBuffer("=".getBytes()));
|
||||
currentBuffer = wrappedBuffer(buffer, wrappedBuffer("=".getBytes(charset)));
|
||||
} else {
|
||||
currentBuffer = wrappedBuffer(currentBuffer, buffer, wrappedBuffer("=".getBytes()));
|
||||
currentBuffer = wrappedBuffer(currentBuffer, buffer, wrappedBuffer("=".getBytes(charset)));
|
||||
}
|
||||
// continue
|
||||
size -= buffer.readableBytes() + 1;
|
||||
@ -969,7 +969,7 @@ public class HttpPostRequestEncoder implements ChunkedInput<HttpContent> {
|
||||
ByteBuf delimiter = null;
|
||||
if (buffer.readableBytes() < size) {
|
||||
isKey = true;
|
||||
delimiter = iterator.hasNext() ? wrappedBuffer("&".getBytes()) : null;
|
||||
delimiter = iterator.hasNext() ? wrappedBuffer("&".getBytes(charset)) : null;
|
||||
}
|
||||
|
||||
// End for current InterfaceHttpData, need potentially more data
|
||||
|
@ -303,7 +303,17 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
|
||||
}
|
||||
parseBody();
|
||||
if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
|
||||
undecodedChunk.discardReadBytes();
|
||||
if (undecodedChunk.refCnt() == 1) {
|
||||
// It's safe to call discardBytes() as we are the only owner of the buffer.
|
||||
undecodedChunk.discardReadBytes();
|
||||
} else {
|
||||
// There seems to be multiple references of the buffer. Let's copy the data and release the buffer to
|
||||
// ensure we can give back memory to the system.
|
||||
ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes());
|
||||
buffer.writeBytes(undecodedChunk);
|
||||
undecodedChunk.release();
|
||||
undecodedChunk = buffer;
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -270,9 +270,6 @@ public class MixedAttribute implements Attribute {
|
||||
|
||||
@Override
|
||||
public void setValue(String value) throws IOException {
|
||||
if (value != null) {
|
||||
checkSize(value.getBytes().length);
|
||||
}
|
||||
attribute.setValue(value);
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,7 @@ import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.util.CharsetUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.UnsupportedCharsetException;
|
||||
import java.util.Arrays;
|
||||
@ -961,6 +962,7 @@ public class HttpPostRequestDecoderTest {
|
||||
|
||||
try {
|
||||
HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(new DefaultHttpDataFactory(false), req);
|
||||
assertEquals(2, decoder.getBodyHttpDatas().size());
|
||||
InterfaceHttpData data = decoder.getBodyHttpData("title");
|
||||
assertTrue(data instanceof MemoryAttribute);
|
||||
assertEquals("bar-stream", ((MemoryAttribute) data).getString());
|
||||
@ -976,4 +978,177 @@ public class HttpPostRequestDecoderTest {
|
||||
assertTrue(req.release());
|
||||
}
|
||||
}
|
||||
|
||||
private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, boolean inMemory)
|
||||
throws IOException {
|
||||
int nbChunks = 100;
|
||||
int bytesPerChunk = 100000;
|
||||
int bytesLastChunk = 10000;
|
||||
int fileSize = bytesPerChunk * nbChunks + bytesLastChunk; // set Xmx to a number lower than this and it crashes
|
||||
|
||||
String prefix = "--861fbeab-cd20-470c-9609-d40a0f704466\n" +
|
||||
"Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" +
|
||||
"Content-Type: image/jpeg\n" +
|
||||
"Content-Length: " + fileSize + "\n" +
|
||||
"\n";
|
||||
|
||||
String suffix1 = "\n" +
|
||||
"--861fbeab-";
|
||||
String suffix2 = "cd20-470c-9609-d40a0f704466--\n";
|
||||
String suffix = suffix1 + suffix2;
|
||||
|
||||
HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload");
|
||||
request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466");
|
||||
request.headers().set("content-length", prefix.length() + fileSize + suffix.length());
|
||||
|
||||
HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request);
|
||||
decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8))));
|
||||
|
||||
byte[] body = new byte[bytesPerChunk];
|
||||
Arrays.fill(body, (byte) 1);
|
||||
for (int i = 0; i < nbChunks; i++) {
|
||||
ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerChunk);
|
||||
decoder.offer(new DefaultHttpContent(content)); // **OutOfMemory here**
|
||||
content.release();
|
||||
}
|
||||
|
||||
byte[] bsuffix1 = suffix1.getBytes(CharsetUtil.UTF_8);
|
||||
byte[] lastbody = new byte[bytesLastChunk + bsuffix1.length];
|
||||
Arrays.fill(body, (byte) 1);
|
||||
for (int i = 0; i < bsuffix1.length; i++) {
|
||||
lastbody[bytesLastChunk + i] = bsuffix1[i];
|
||||
}
|
||||
|
||||
ByteBuf content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length);
|
||||
decoder.offer(new DefaultHttpContent(content2));
|
||||
content2.release();
|
||||
content2 = Unpooled.wrappedBuffer(suffix2.getBytes(CharsetUtil.UTF_8));
|
||||
decoder.offer(new DefaultHttpContent(content2));
|
||||
content2.release();
|
||||
decoder.offer(new DefaultLastHttpContent());
|
||||
|
||||
FileUpload data = (FileUpload) decoder.getBodyHttpDatas().get(0);
|
||||
assertEquals(data.length(), fileSize);
|
||||
assertEquals(inMemory, data.isInMemory());
|
||||
if (data.isInMemory()) {
|
||||
// To be done only if not inMemory: assertEquals(data.get().length, fileSize);
|
||||
assertFalse("Capacity should be higher than 1M", data.getByteBuf().capacity()
|
||||
< 1024 * 1024);
|
||||
}
|
||||
assertTrue("Capacity should be less than 1M", decoder.getCurrentAllocatedCapacity()
|
||||
< 1024 * 1024);
|
||||
for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {
|
||||
httpData.release();
|
||||
factory.removeHttpDataFromClean(request, httpData);
|
||||
}
|
||||
factory.cleanAllHttpData();
|
||||
decoder.destroy();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBIgFileUploadDelimiterInMiddleChunkDecoderDiskFactory() throws IOException {
|
||||
// Factory using Disk mode
|
||||
HttpDataFactory factory = new DefaultHttpDataFactory(true);
|
||||
|
||||
commonTestBigFileDelimiterInMiddleChunk(factory, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBIgFileUploadDelimiterInMiddleChunkDecoderMemoryFactory() throws IOException {
|
||||
// Factory using Memory mode
|
||||
HttpDataFactory factory = new DefaultHttpDataFactory(false);
|
||||
|
||||
commonTestBigFileDelimiterInMiddleChunk(factory, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBIgFileUploadDelimiterInMiddleChunkDecoderMixedFactory() throws IOException {
|
||||
// Factory using Mixed mode, where file shall be on Disk
|
||||
HttpDataFactory factory = new DefaultHttpDataFactory(10000);
|
||||
|
||||
commonTestBigFileDelimiterInMiddleChunk(factory, false);
|
||||
}
|
||||
|
||||
private void commonNotBadReleaseBuffersDuringDecoding(HttpDataFactory factory, boolean inMemory)
|
||||
throws IOException {
|
||||
int nbItems = 20;
|
||||
int bytesPerItem = 1000;
|
||||
int maxMemory = 500;
|
||||
|
||||
String prefix1 = "\n--861fbeab-cd20-470c-9609-d40a0f704466\n" +
|
||||
"Content-Disposition: form-data; name=\"image";
|
||||
String prefix2 =
|
||||
"\"; filename=\"guangzhou.jpeg\"\n" +
|
||||
"Content-Type: image/jpeg\n" +
|
||||
"Content-Length: " + bytesPerItem + "\n" + "\n";
|
||||
|
||||
String suffix = "\n--861fbeab-cd20-470c-9609-d40a0f704466--\n";
|
||||
|
||||
HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload");
|
||||
request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466");
|
||||
request.headers().set("content-length", nbItems * (prefix1.length() + prefix2.length() + 2 + bytesPerItem)
|
||||
+ suffix.length());
|
||||
HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request);
|
||||
decoder.setDiscardThreshold(maxMemory);
|
||||
for (int rank = 0; rank < nbItems; rank++) {
|
||||
byte[] bp1 = prefix1.getBytes(CharsetUtil.UTF_8);
|
||||
byte[] bp2 = prefix2.getBytes(CharsetUtil.UTF_8);
|
||||
byte[] prefix = new byte[bp1.length + 2 + bp2.length];
|
||||
for (int i = 0; i < bp1.length; i++) {
|
||||
prefix[i] = bp1[i];
|
||||
}
|
||||
byte[] brank = Integer.toString(10 + rank).getBytes(CharsetUtil.UTF_8);
|
||||
prefix[bp1.length] = brank[0];
|
||||
prefix[bp1.length + 1] = brank[1];
|
||||
for (int i = 0; i < bp2.length; i++) {
|
||||
prefix[bp1.length + 2 + i] = bp2[i];
|
||||
}
|
||||
decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(prefix)));
|
||||
byte[] body = new byte[bytesPerItem];
|
||||
Arrays.fill(body, (byte) rank);
|
||||
ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerItem);
|
||||
decoder.offer(new DefaultHttpContent(content));
|
||||
content.release();
|
||||
}
|
||||
byte[] lastbody = suffix.getBytes(CharsetUtil.UTF_8);
|
||||
ByteBuf content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length);
|
||||
decoder.offer(new DefaultHttpContent(content2));
|
||||
content2.release();
|
||||
decoder.offer(new DefaultLastHttpContent());
|
||||
|
||||
for (int rank = 0; rank < nbItems; rank++) {
|
||||
FileUpload data = (FileUpload) decoder.getBodyHttpData("image" + (10 + rank));
|
||||
assertEquals(data.length(), bytesPerItem);
|
||||
assertEquals(inMemory, data.isInMemory());
|
||||
byte[] body = new byte[bytesPerItem];
|
||||
Arrays.fill(body, (byte) rank);
|
||||
assertTrue(Arrays.equals(body, data.get()));
|
||||
}
|
||||
// To not be done since will load full file on memory: assertEquals(data.get().length, fileSize);
|
||||
// Not mandatory since implicitely called during destroy of decoder
|
||||
for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {
|
||||
httpData.release();
|
||||
factory.removeHttpDataFromClean(request, httpData);
|
||||
}
|
||||
factory.cleanAllHttpData();
|
||||
decoder.destroy();
|
||||
}
|
||||
@Test
|
||||
public void testNotBadReleaseBuffersDuringDecodingDiskFactory() throws IOException {
|
||||
// Using Disk Factory
|
||||
HttpDataFactory factory = new DefaultHttpDataFactory(true);
|
||||
commonNotBadReleaseBuffersDuringDecoding(factory, false);
|
||||
}
|
||||
@Test
|
||||
public void testNotBadReleaseBuffersDuringDecodingMemoryFactory() throws IOException {
|
||||
// Using Memory Factory
|
||||
HttpDataFactory factory = new DefaultHttpDataFactory(false);
|
||||
commonNotBadReleaseBuffersDuringDecoding(factory, true);
|
||||
}
|
||||
@Test
|
||||
public void testNotBadReleaseBuffersDuringDecodingMixedFactory() throws IOException {
|
||||
// Using Mixed Factory
|
||||
HttpDataFactory factory = new DefaultHttpDataFactory(100);
|
||||
commonNotBadReleaseBuffersDuringDecoding(factory, false);
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import io.netty.handler.codec.http.HttpHeaderNames;
|
||||
import io.netty.handler.codec.http.HttpMethod;
|
||||
import io.netty.handler.codec.http.HttpVersion;
|
||||
import io.netty.microbench.util.AbstractMicrobenchmark;
|
||||
import io.netty.util.CharsetUtil;
|
||||
import io.netty.util.ResourceLeakDetector;
|
||||
import io.netty.util.ResourceLeakDetector.Level;
|
||||
import org.openjdk.jmh.annotations.Benchmark;
|
||||
@ -54,12 +55,12 @@ public class HttpPostMultipartRequestDecoderBenchmark
|
||||
"Content-Disposition: form-data; name=\"msg_id\"\n\n15200\n--" +
|
||||
BOUNDARY +
|
||||
"\nContent-Disposition: form-data; name=\"msg1\"; filename=\"file1.txt\"\n\n" +
|
||||
data).getBytes();
|
||||
byte[] bodyPartBigBytes = data.getBytes();
|
||||
data).getBytes(CharsetUtil.UTF_8);
|
||||
byte[] bodyPartBigBytes = data.getBytes(CharsetUtil.UTF_8);
|
||||
byte[] intermediaryBytes = ("\n--" + BOUNDARY +
|
||||
"\nContent-Disposition: form-data; name=\"msg2\"; filename=\"file2.txt\"\n\n" +
|
||||
data).getBytes();
|
||||
byte[] finalBigBytes = ("\n" + "--" + BOUNDARY + "--\n").getBytes();
|
||||
data).getBytes(CharsetUtil.UTF_8);
|
||||
byte[] finalBigBytes = ("\n" + "--" + BOUNDARY + "--\n").getBytes(CharsetUtil.UTF_8);
|
||||
ByteBuf firstBuf = Unpooled.wrappedBuffer(bodyStartBytes);
|
||||
ByteBuf finalBuf = Unpooled.wrappedBuffer(finalBigBytes);
|
||||
ByteBuf nextBuf;
|
||||
|
Loading…
Reference in New Issue
Block a user