Revert HttpPostMultipartRequestDecoder and HttpPostStandardRequestDecoder to e5951d46fc (#10989)

Motivation:

The changes introduced in 1c230405fd did cause various issues while the fix itself is not considered critical. For now it is considered the best to just rollback and investigate more.

Modifications:

- Revert changes done in 1c230405fd (and later) for
the post decoders.
- Ensure we give memory back to the system as soon as possible in a safe manner

Result:

Fixes https://github.com/netty/netty/issues/10973
This commit is contained in:
Norman Maurer 2021-02-03 20:40:29 +01:00
parent f25b12077a
commit d7fb0f5c0b
2 changed files with 347 additions and 155 deletions

View File

@ -128,11 +128,6 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
*/ */
private Attribute currentAttribute; private Attribute currentAttribute;
/**
* The current Data position before finding delimiter
*/
private int lastDataPosition;
private boolean destroyed; private boolean destroyed;
private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD; private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD;
@ -347,15 +342,22 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
// which is not really usable for us as we may exceed it once we add more bytes. // which is not really usable for us as we may exceed it once we add more bytes.
buf.alloc().buffer(buf.readableBytes()).writeBytes(buf); buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
} else { } else {
int readPos = undecodedChunk.readerIndex();
int writable = undecodedChunk.writableBytes();
int toWrite = buf.readableBytes();
if (undecodedChunk.refCnt() == 1 && writable < toWrite && readPos + writable >= toWrite) {
undecodedChunk.discardReadBytes();
}
undecodedChunk.writeBytes(buf); undecodedChunk.writeBytes(buf);
} }
parseBody(); parseBody();
if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
if (undecodedChunk.refCnt() == 1) {
// It's safe to call discardBytes() as we are the only owner of the buffer.
undecodedChunk.discardReadBytes();
} else {
// There seems to be multiple references of the buffer. Let's copy the data and release the buffer to
// ensure we can give back memory to the system.
ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes());
buffer.writeBytes(undecodedChunk);
undecodedChunk.release();
undecodedChunk = buffer;
}
}
return this; return this;
} }
@ -973,6 +975,47 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
currentFieldAttributes.remove(HttpHeaderValues.FILENAME); currentFieldAttributes.remove(HttpHeaderValues.FILENAME);
} }
/**
* Read one line up to the CRLF or LF
*
* @return the String from one line
* @throws NotEnoughDataDecoderException
* Need more chunks and reset the {@code readerIndex} to the previous
* value
*/
private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) {
int readerIndex = undecodedChunk.readerIndex();
ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
try {
while (undecodedChunk.isReadable()) {
byte nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.CR) {
// check but do not changed readerIndex
nextByte = undecodedChunk.getByte(undecodedChunk.readerIndex());
if (nextByte == HttpConstants.LF) {
// force read
undecodedChunk.readByte();
return line.toString(charset);
} else {
// Write CR (not followed by LF)
line.writeByte(HttpConstants.CR);
}
} else if (nextByte == HttpConstants.LF) {
return line.toString(charset);
} else {
line.writeByte(nextByte);
}
}
} catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException(e);
} finally {
line.release();
}
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
/** /**
* Read one line up to the CRLF or LF * Read one line up to the CRLF or LF
* *
@ -982,23 +1025,44 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
* value * value
*/ */
private static String readLine(ByteBuf undecodedChunk, Charset charset) { private static String readLine(ByteBuf undecodedChunk, Charset charset) {
final int readerIndex = undecodedChunk.readerIndex(); if (!undecodedChunk.hasArray()) {
int posLf = undecodedChunk.bytesBefore(HttpConstants.LF); return readLineStandard(undecodedChunk, charset);
if (posLf == -1) {
throw new NotEnoughDataDecoderException();
} }
boolean crFound = SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
undecodedChunk.getByte(readerIndex + posLf - 1) == HttpConstants.CR; int readerIndex = undecodedChunk.readerIndex();
if (crFound) { ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
posLf--; try {
while (sao.pos < sao.limit) {
byte nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.CR) {
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return line.toString(charset);
} else {
// Write CR (not followed by LF)
sao.pos--;
line.writeByte(HttpConstants.CR);
}
} else {
line.writeByte(nextByte);
}
} else if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return line.toString(charset);
} else {
line.writeByte(nextByte);
}
}
} catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException(e);
} finally {
line.release();
} }
CharSequence line = undecodedChunk.readCharSequence(posLf, charset); undecodedChunk.readerIndex(readerIndex);
if (crFound) { throw new NotEnoughDataDecoderException();
undecodedChunk.skipBytes(2);
} else {
undecodedChunk.skipBytes(1);
}
return line.toString();
} }
/** /**
@ -1016,73 +1080,77 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
* Need more chunks and reset the {@code readerIndex} to the previous * Need more chunks and reset the {@code readerIndex} to the previous
* value * value
*/ */
private String readDelimiter(ByteBuf undecodedChunk, String delimiter) { private static String readDelimiterStandard(ByteBuf undecodedChunk, String delimiter) {
final int readerIndex = undecodedChunk.readerIndex(); int readerIndex = undecodedChunk.readerIndex();
try { try {
final int len = delimiter.length(); StringBuilder sb = new StringBuilder(64);
if (len + 2 > undecodedChunk.readableBytes()) { int delimiterPos = 0;
// Not able to check if "--" is present int len = delimiter.length();
throw new NotEnoughDataDecoderException(); while (undecodedChunk.isReadable() && delimiterPos < len) {
} byte nextByte = undecodedChunk.readByte();
int newPositionDelimiter = findDelimiter(undecodedChunk, delimiter, 0); if (nextByte == delimiter.charAt(delimiterPos)) {
if (newPositionDelimiter != 0) { delimiterPos++;
// Delimiter not fully found sb.append((char) nextByte);
throw new NotEnoughDataDecoderException();
}
byte nextByte = undecodedChunk.getByte(readerIndex + len);
// first check for opening delimiter
if (nextByte == HttpConstants.CR) {
nextByte = undecodedChunk.getByte(readerIndex + len + 1);
if (nextByte == HttpConstants.LF) {
CharSequence line = undecodedChunk.readCharSequence(len, charset);
undecodedChunk.skipBytes(2);
return line.toString();
} else { } else {
// error since CR must be followed by LF
// delimiter not found so break here ! // delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex); undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException(); throw new NotEnoughDataDecoderException();
} }
} else if (nextByte == HttpConstants.LF) { }
CharSequence line = undecodedChunk.readCharSequence(len, charset); // Now check if either opening delimiter or closing delimiter
undecodedChunk.skipBytes(1); if (undecodedChunk.isReadable()) {
return line.toString(); byte nextByte = undecodedChunk.readByte();
} else if (nextByte == '-') { // first check for opening delimiter
// second check for closing delimiter if (nextByte == HttpConstants.CR) {
nextByte = undecodedChunk.getByte(readerIndex + len + 1); nextByte = undecodedChunk.readByte();
if (nextByte == '-') { if (nextByte == HttpConstants.LF) {
CharSequence line = undecodedChunk.readCharSequence(len + 2, charset); return sb.toString();
// now try to find if CRLF or LF there } else {
if (undecodedChunk.isReadable()) { // error since CR must be followed by LF
nextByte = undecodedChunk.readByte(); // delimiter not found so break here !
if (nextByte == HttpConstants.CR) { undecodedChunk.readerIndex(readerIndex);
nextByte = undecodedChunk.readByte(); throw new NotEnoughDataDecoderException();
if (nextByte == HttpConstants.LF) {
return line.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
return line.toString();
} else {
// No CRLF but ok however (Adobe Flash uploader)
// minus 1 since we read one char ahead but
// should not
undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
return line.toString();
}
} }
// FIXME what do we do here? } else if (nextByte == HttpConstants.LF) {
// either considering it is fine, either waiting for return sb.toString();
// more data to come? } else if (nextByte == '-') {
// lets try considering it is fine... sb.append('-');
return line.toString(); // second check for closing delimiter
nextByte = undecodedChunk.readByte();
if (nextByte == '-') {
sb.append('-');
// now try to find if CRLF or LF there
if (undecodedChunk.isReadable()) {
nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.CR) {
nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.LF) {
return sb.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
return sb.toString();
} else {
// No CRLF but ok however (Adobe Flash uploader)
// minus 1 since we read one char ahead but
// should not
undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
return sb.toString();
}
}
// FIXME what do we do here?
// either considering it is fine, either waiting for
// more data to come?
// lets try considering it is fine...
return sb.toString();
}
// only one '-' => not enough
// whatever now => error since incomplete
} }
// only one '-' => not enough
// whatever now => error since incomplete
} }
} catch (IndexOutOfBoundsException e) { } catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex); undecodedChunk.readerIndex(readerIndex);
@ -1093,93 +1161,220 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
} }
/** /**
* @param undecodedChunk the source where the delimiter is to be found * Read one line up to --delimiter or --delimiter-- and if existing the CRLF
* @param delimiter the string to find out * or LF. Note that CRLF or LF are mandatory for opening delimiter
* @param offset the offset from readerIndex within the undecodedChunk to * (--delimiter) but not for closing delimiter (--delimiter--) since some
* start from to find out the delimiter * clients does not include CRLF in this case.
* *
* @return a number >= 0 if found, else new offset with negative value * @param delimiter
* (to inverse), both from readerIndex * of the form --string, such that '--' is already included
* @return the String from one line as the delimiter searched (opening or
* closing)
* @throws NotEnoughDataDecoderException * @throws NotEnoughDataDecoderException
* Need more chunks while relative position with readerIndex is 0 * Need more chunks and reset the readerInder to the previous
* value
*/ */
private static int findDelimiter(ByteBuf undecodedChunk, String delimiter, int offset) { private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) {
final int startReaderIndex = undecodedChunk.readerIndex(); if (!undecodedChunk.hasArray()) {
final int delimeterLength = delimiter.length(); return readDelimiterStandard(undecodedChunk, delimiter);
final int toRead = undecodedChunk.readableBytes(); }
int newOffset = offset; SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
boolean delimiterNotFound = true; int readerIndex = undecodedChunk.readerIndex();
while (delimiterNotFound && newOffset + delimeterLength <= toRead) { int delimiterPos = 0;
int posFirstChar = undecodedChunk int len = delimiter.length();
.bytesBefore(startReaderIndex + newOffset, toRead - newOffset, try {
(byte) delimiter.codePointAt(0)); StringBuilder sb = new StringBuilder(64);
if (posFirstChar == -1) { // check conformity with delimiter
newOffset = toRead; while (sao.pos < sao.limit && delimiterPos < len) {
return -newOffset; byte nextByte = sao.bytes[sao.pos++];
} if (nextByte == delimiter.charAt(delimiterPos)) {
newOffset = posFirstChar + newOffset; delimiterPos++;
if (newOffset + delimeterLength > toRead) { sb.append((char) nextByte);
return -newOffset; } else {
} // delimiter not found so break here !
// assume will found it undecodedChunk.readerIndex(readerIndex);
delimiterNotFound = false; throw new NotEnoughDataDecoderException();
for (int index = 1; index < delimeterLength; index++) {
if (undecodedChunk.getByte(startReaderIndex + newOffset + index) != delimiter.codePointAt(index)) {
// ignore first found offset and redo search from next char
newOffset++;
delimiterNotFound = true;
break;
} }
} }
} // Now check if either opening delimiter or closing delimiter
if (delimiterNotFound || newOffset + delimeterLength > toRead) { if (sao.pos < sao.limit) {
if (newOffset == 0) { byte nextByte = sao.bytes[sao.pos++];
throw new NotEnoughDataDecoderException(); if (nextByte == HttpConstants.CR) {
// first check for opening delimiter
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return sb.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else {
// error since CR must be followed by LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
// same first check for opening delimiter where LF used with
// no CR
sao.setReadPosition(0);
return sb.toString();
} else if (nextByte == '-') {
sb.append('-');
// second check for closing delimiter
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == '-') {
sb.append('-');
// now try to find if CRLF or LF there
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.CR) {
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return sb.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return sb.toString();
} else {
// No CRLF but ok however (Adobe Flash
// uploader)
// minus 1 since we read one char ahead but
// should not
sao.setReadPosition(1);
return sb.toString();
}
}
// FIXME what do we do here?
// either considering it is fine, either waiting for
// more data to come?
// lets try considering it is fine...
sao.setReadPosition(0);
return sb.toString();
}
// whatever now => error since incomplete
// only one '-' => not enough or whatever not enough
// element
}
}
} }
return -newOffset; } catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException(e);
} }
return newOffset; undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
/**
* Load the field value or file data from a Multipart request
*
* @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks
* @throws ErrorDataDecoderException
*/
private static boolean loadDataMultipartStandard(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
final int startReaderIndex = undecodedChunk.readerIndex();
final int delimeterLength = delimiter.length();
int index = 0;
int lastPosition = startReaderIndex;
byte prevByte = HttpConstants.LF;
boolean delimiterFound = false;
while (undecodedChunk.isReadable()) {
final byte nextByte = undecodedChunk.readByte();
// Check the delimiter
if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) {
index++;
if (delimeterLength == index) {
delimiterFound = true;
break;
}
continue;
}
lastPosition = undecodedChunk.readerIndex();
if (nextByte == HttpConstants.LF) {
index = 0;
lastPosition -= (prevByte == HttpConstants.CR)? 2 : 1;
}
prevByte = nextByte;
}
if (prevByte == HttpConstants.CR) {
lastPosition--;
}
ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex);
try {
httpData.addContent(content, delimiterFound);
} catch (IOException e) {
throw new ErrorDataDecoderException(e);
}
undecodedChunk.readerIndex(lastPosition);
return delimiterFound;
} }
/** /**
* Load the field value from a Multipart request * Load the field value from a Multipart request
* *
* @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks
*
* @throws ErrorDataDecoderException * @throws ErrorDataDecoderException
*/ */
private boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, private static boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
HttpData httpData) { if (!undecodedChunk.hasArray()) {
return loadDataMultipartStandard(undecodedChunk, delimiter, httpData);
}
final SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
final int startReaderIndex = undecodedChunk.readerIndex(); final int startReaderIndex = undecodedChunk.readerIndex();
int newOffset; final int delimeterLength = delimiter.length();
try { int index = 0;
newOffset = findDelimiter(undecodedChunk, delimiter, lastDataPosition); int lastRealPos = sao.pos;
if (newOffset < 0) { byte prevByte = HttpConstants.LF;
// delimiter not found boolean delimiterFound = false;
lastDataPosition = -newOffset; while (sao.pos < sao.limit) {
return false; final byte nextByte = sao.bytes[sao.pos++];
// Check the delimiter
if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) {
index++;
if (delimeterLength == index) {
delimiterFound = true;
break;
}
continue;
} }
} catch (NotEnoughDataDecoderException e) { lastRealPos = sao.pos;
// Not enough data and no change to lastDataPosition if (nextByte == HttpConstants.LF) {
return false; index = 0;
} lastRealPos -= (prevByte == HttpConstants.CR)? 2 : 1;
// found delimiter but still need to check if CRLF before
int startDelimiter = newOffset;
if (undecodedChunk.getByte(startReaderIndex + startDelimiter - 1) == HttpConstants.LF) {
startDelimiter--;
if (undecodedChunk.getByte(startReaderIndex + startDelimiter - 1) == HttpConstants.CR) {
startDelimiter--;
} }
prevByte = nextByte;
} }
ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, startDelimiter); if (prevByte == HttpConstants.CR) {
lastRealPos--;
}
final int lastPosition = sao.getReadPosition(lastRealPos);
final ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex);
try { try {
httpData.addContent(content, true); httpData.addContent(content, delimiterFound);
} catch (IOException e) { } catch (IOException e) {
throw new ErrorDataDecoderException(e); throw new ErrorDataDecoderException(e);
} }
lastDataPosition = 0; undecodedChunk.readerIndex(lastPosition);
undecodedChunk.readerIndex(startReaderIndex + startDelimiter); return delimiterFound;
return true;
} }
/** /**

View File

@ -300,15 +300,12 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
// which is not really usable for us as we may exceed it once we add more bytes. // which is not really usable for us as we may exceed it once we add more bytes.
buf.alloc().buffer(buf.readableBytes()).writeBytes(buf); buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
} else { } else {
int readPos = undecodedChunk.readerIndex();
int writable = undecodedChunk.writableBytes();
int toWrite = buf.readableBytes();
if (undecodedChunk.refCnt() == 1 && writable < toWrite && readPos + writable >= toWrite) {
undecodedChunk.discardReadBytes();
}
undecodedChunk.writeBytes(buf); undecodedChunk.writeBytes(buf);
} }
parseBody(); parseBody();
if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
undecodedChunk.discardReadBytes();
}
return this; return this;
} }