Revert HttpPostMultipartRequestDecoder and HttpPostStandardRequestDecoder to e5951d46fc (#10989)

Motivation:

The changes introduced in 1c230405fd did cause various issues while the fix itself is not considered critical. For now it is considered the best to just rollback and investigate more.

Modifications:

- Revert changes done in 1c230405fd (and later) for
the post decoders.
- Ensure we give memory back to the system as soon as possible in a safe manner

Result:

Fixes https://github.com/netty/netty/issues/10973
This commit is contained in:
Norman Maurer 2021-02-03 20:40:29 +01:00
parent f25b12077a
commit d7fb0f5c0b
2 changed files with 347 additions and 155 deletions

View File

@ -128,11 +128,6 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
*/
private Attribute currentAttribute;
/**
* The current Data position before finding delimiter
*/
private int lastDataPosition;
private boolean destroyed;
private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD;
@ -347,15 +342,22 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
// which is not really usable for us as we may exceed it once we add more bytes.
buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
} else {
int readPos = undecodedChunk.readerIndex();
int writable = undecodedChunk.writableBytes();
int toWrite = buf.readableBytes();
if (undecodedChunk.refCnt() == 1 && writable < toWrite && readPos + writable >= toWrite) {
undecodedChunk.discardReadBytes();
}
undecodedChunk.writeBytes(buf);
}
parseBody();
if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
if (undecodedChunk.refCnt() == 1) {
// It's safe to call discardBytes() as we are the only owner of the buffer.
undecodedChunk.discardReadBytes();
} else {
// There seems to be multiple references of the buffer. Let's copy the data and release the buffer to
// ensure we can give back memory to the system.
ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes());
buffer.writeBytes(undecodedChunk);
undecodedChunk.release();
undecodedChunk = buffer;
}
}
return this;
}
@ -973,6 +975,47 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
currentFieldAttributes.remove(HttpHeaderValues.FILENAME);
}
/**
* Read one line up to the CRLF or LF
*
* @return the String from one line
* @throws NotEnoughDataDecoderException
* Need more chunks and reset the {@code readerIndex} to the previous
* value
*/
private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) {
int readerIndex = undecodedChunk.readerIndex();
ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
try {
while (undecodedChunk.isReadable()) {
byte nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.CR) {
// check but do not changed readerIndex
nextByte = undecodedChunk.getByte(undecodedChunk.readerIndex());
if (nextByte == HttpConstants.LF) {
// force read
undecodedChunk.readByte();
return line.toString(charset);
} else {
// Write CR (not followed by LF)
line.writeByte(HttpConstants.CR);
}
} else if (nextByte == HttpConstants.LF) {
return line.toString(charset);
} else {
line.writeByte(nextByte);
}
}
} catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException(e);
} finally {
line.release();
}
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
/**
* Read one line up to the CRLF or LF
*
@ -982,23 +1025,44 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
* value
*/
private static String readLine(ByteBuf undecodedChunk, Charset charset) {
final int readerIndex = undecodedChunk.readerIndex();
int posLf = undecodedChunk.bytesBefore(HttpConstants.LF);
if (posLf == -1) {
throw new NotEnoughDataDecoderException();
if (!undecodedChunk.hasArray()) {
return readLineStandard(undecodedChunk, charset);
}
boolean crFound =
undecodedChunk.getByte(readerIndex + posLf - 1) == HttpConstants.CR;
if (crFound) {
posLf--;
SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
int readerIndex = undecodedChunk.readerIndex();
ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
try {
while (sao.pos < sao.limit) {
byte nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.CR) {
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return line.toString(charset);
} else {
// Write CR (not followed by LF)
sao.pos--;
line.writeByte(HttpConstants.CR);
}
} else {
line.writeByte(nextByte);
}
} else if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return line.toString(charset);
} else {
line.writeByte(nextByte);
}
}
} catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException(e);
} finally {
line.release();
}
CharSequence line = undecodedChunk.readCharSequence(posLf, charset);
if (crFound) {
undecodedChunk.skipBytes(2);
} else {
undecodedChunk.skipBytes(1);
}
return line.toString();
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
/**
@ -1016,73 +1080,77 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
* Need more chunks and reset the {@code readerIndex} to the previous
* value
*/
private String readDelimiter(ByteBuf undecodedChunk, String delimiter) {
final int readerIndex = undecodedChunk.readerIndex();
private static String readDelimiterStandard(ByteBuf undecodedChunk, String delimiter) {
int readerIndex = undecodedChunk.readerIndex();
try {
final int len = delimiter.length();
if (len + 2 > undecodedChunk.readableBytes()) {
// Not able to check if "--" is present
throw new NotEnoughDataDecoderException();
}
int newPositionDelimiter = findDelimiter(undecodedChunk, delimiter, 0);
if (newPositionDelimiter != 0) {
// Delimiter not fully found
throw new NotEnoughDataDecoderException();
}
byte nextByte = undecodedChunk.getByte(readerIndex + len);
// first check for opening delimiter
if (nextByte == HttpConstants.CR) {
nextByte = undecodedChunk.getByte(readerIndex + len + 1);
if (nextByte == HttpConstants.LF) {
CharSequence line = undecodedChunk.readCharSequence(len, charset);
undecodedChunk.skipBytes(2);
return line.toString();
StringBuilder sb = new StringBuilder(64);
int delimiterPos = 0;
int len = delimiter.length();
while (undecodedChunk.isReadable() && delimiterPos < len) {
byte nextByte = undecodedChunk.readByte();
if (nextByte == delimiter.charAt(delimiterPos)) {
delimiterPos++;
sb.append((char) nextByte);
} else {
// error since CR must be followed by LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
CharSequence line = undecodedChunk.readCharSequence(len, charset);
undecodedChunk.skipBytes(1);
return line.toString();
} else if (nextByte == '-') {
// second check for closing delimiter
nextByte = undecodedChunk.getByte(readerIndex + len + 1);
if (nextByte == '-') {
CharSequence line = undecodedChunk.readCharSequence(len + 2, charset);
// now try to find if CRLF or LF there
if (undecodedChunk.isReadable()) {
nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.CR) {
nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.LF) {
return line.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
return line.toString();
} else {
// No CRLF but ok however (Adobe Flash uploader)
// minus 1 since we read one char ahead but
// should not
undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
return line.toString();
}
}
// Now check if either opening delimiter or closing delimiter
if (undecodedChunk.isReadable()) {
byte nextByte = undecodedChunk.readByte();
// first check for opening delimiter
if (nextByte == HttpConstants.CR) {
nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.LF) {
return sb.toString();
} else {
// error since CR must be followed by LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
// FIXME what do we do here?
// either considering it is fine, either waiting for
// more data to come?
// lets try considering it is fine...
return line.toString();
} else if (nextByte == HttpConstants.LF) {
return sb.toString();
} else if (nextByte == '-') {
sb.append('-');
// second check for closing delimiter
nextByte = undecodedChunk.readByte();
if (nextByte == '-') {
sb.append('-');
// now try to find if CRLF or LF there
if (undecodedChunk.isReadable()) {
nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.CR) {
nextByte = undecodedChunk.readByte();
if (nextByte == HttpConstants.LF) {
return sb.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
return sb.toString();
} else {
// No CRLF but ok however (Adobe Flash uploader)
// minus 1 since we read one char ahead but
// should not
undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
return sb.toString();
}
}
// FIXME what do we do here?
// either considering it is fine, either waiting for
// more data to come?
// lets try considering it is fine...
return sb.toString();
}
// only one '-' => not enough
// whatever now => error since incomplete
}
// only one '-' => not enough
// whatever now => error since incomplete
}
} catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex);
@ -1093,93 +1161,220 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
}
/**
* @param undecodedChunk the source where the delimiter is to be found
* @param delimiter the string to find out
* @param offset the offset from readerIndex within the undecodedChunk to
* start from to find out the delimiter
* Read one line up to --delimiter or --delimiter-- and if existing the CRLF
* or LF. Note that CRLF or LF are mandatory for opening delimiter
* (--delimiter) but not for closing delimiter (--delimiter--) since some
* clients does not include CRLF in this case.
*
* @return a number >= 0 if found, else new offset with negative value
* (to inverse), both from readerIndex
* @param delimiter
* of the form --string, such that '--' is already included
* @return the String from one line as the delimiter searched (opening or
* closing)
* @throws NotEnoughDataDecoderException
* Need more chunks while relative position with readerIndex is 0
* Need more chunks and reset the readerInder to the previous
* value
*/
private static int findDelimiter(ByteBuf undecodedChunk, String delimiter, int offset) {
final int startReaderIndex = undecodedChunk.readerIndex();
final int delimeterLength = delimiter.length();
final int toRead = undecodedChunk.readableBytes();
int newOffset = offset;
boolean delimiterNotFound = true;
while (delimiterNotFound && newOffset + delimeterLength <= toRead) {
int posFirstChar = undecodedChunk
.bytesBefore(startReaderIndex + newOffset, toRead - newOffset,
(byte) delimiter.codePointAt(0));
if (posFirstChar == -1) {
newOffset = toRead;
return -newOffset;
}
newOffset = posFirstChar + newOffset;
if (newOffset + delimeterLength > toRead) {
return -newOffset;
}
// assume will found it
delimiterNotFound = false;
for (int index = 1; index < delimeterLength; index++) {
if (undecodedChunk.getByte(startReaderIndex + newOffset + index) != delimiter.codePointAt(index)) {
// ignore first found offset and redo search from next char
newOffset++;
delimiterNotFound = true;
break;
private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) {
if (!undecodedChunk.hasArray()) {
return readDelimiterStandard(undecodedChunk, delimiter);
}
SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
int readerIndex = undecodedChunk.readerIndex();
int delimiterPos = 0;
int len = delimiter.length();
try {
StringBuilder sb = new StringBuilder(64);
// check conformity with delimiter
while (sao.pos < sao.limit && delimiterPos < len) {
byte nextByte = sao.bytes[sao.pos++];
if (nextByte == delimiter.charAt(delimiterPos)) {
delimiterPos++;
sb.append((char) nextByte);
} else {
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
}
}
if (delimiterNotFound || newOffset + delimeterLength > toRead) {
if (newOffset == 0) {
throw new NotEnoughDataDecoderException();
// Now check if either opening delimiter or closing delimiter
if (sao.pos < sao.limit) {
byte nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.CR) {
// first check for opening delimiter
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return sb.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else {
// error since CR must be followed by LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
// same first check for opening delimiter where LF used with
// no CR
sao.setReadPosition(0);
return sb.toString();
} else if (nextByte == '-') {
sb.append('-');
// second check for closing delimiter
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == '-') {
sb.append('-');
// now try to find if CRLF or LF there
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.CR) {
if (sao.pos < sao.limit) {
nextByte = sao.bytes[sao.pos++];
if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return sb.toString();
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else {
// error CR without LF
// delimiter not found so break here !
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
} else if (nextByte == HttpConstants.LF) {
sao.setReadPosition(0);
return sb.toString();
} else {
// No CRLF but ok however (Adobe Flash
// uploader)
// minus 1 since we read one char ahead but
// should not
sao.setReadPosition(1);
return sb.toString();
}
}
// FIXME what do we do here?
// either considering it is fine, either waiting for
// more data to come?
// lets try considering it is fine...
sao.setReadPosition(0);
return sb.toString();
}
// whatever now => error since incomplete
// only one '-' => not enough or whatever not enough
// element
}
}
}
return -newOffset;
} catch (IndexOutOfBoundsException e) {
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException(e);
}
return newOffset;
undecodedChunk.readerIndex(readerIndex);
throw new NotEnoughDataDecoderException();
}
/**
* Load the field value or file data from a Multipart request
*
* @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks
* @throws ErrorDataDecoderException
*/
private static boolean loadDataMultipartStandard(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
final int startReaderIndex = undecodedChunk.readerIndex();
final int delimeterLength = delimiter.length();
int index = 0;
int lastPosition = startReaderIndex;
byte prevByte = HttpConstants.LF;
boolean delimiterFound = false;
while (undecodedChunk.isReadable()) {
final byte nextByte = undecodedChunk.readByte();
// Check the delimiter
if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) {
index++;
if (delimeterLength == index) {
delimiterFound = true;
break;
}
continue;
}
lastPosition = undecodedChunk.readerIndex();
if (nextByte == HttpConstants.LF) {
index = 0;
lastPosition -= (prevByte == HttpConstants.CR)? 2 : 1;
}
prevByte = nextByte;
}
if (prevByte == HttpConstants.CR) {
lastPosition--;
}
ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex);
try {
httpData.addContent(content, delimiterFound);
} catch (IOException e) {
throw new ErrorDataDecoderException(e);
}
undecodedChunk.readerIndex(lastPosition);
return delimiterFound;
}
/**
* Load the field value from a Multipart request
*
* @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks
*
* @throws ErrorDataDecoderException
*/
private boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter,
HttpData httpData) {
private static boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
if (!undecodedChunk.hasArray()) {
return loadDataMultipartStandard(undecodedChunk, delimiter, httpData);
}
final SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
final int startReaderIndex = undecodedChunk.readerIndex();
int newOffset;
try {
newOffset = findDelimiter(undecodedChunk, delimiter, lastDataPosition);
if (newOffset < 0) {
// delimiter not found
lastDataPosition = -newOffset;
return false;
final int delimeterLength = delimiter.length();
int index = 0;
int lastRealPos = sao.pos;
byte prevByte = HttpConstants.LF;
boolean delimiterFound = false;
while (sao.pos < sao.limit) {
final byte nextByte = sao.bytes[sao.pos++];
// Check the delimiter
if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) {
index++;
if (delimeterLength == index) {
delimiterFound = true;
break;
}
continue;
}
} catch (NotEnoughDataDecoderException e) {
// Not enough data and no change to lastDataPosition
return false;
}
// found delimiter but still need to check if CRLF before
int startDelimiter = newOffset;
if (undecodedChunk.getByte(startReaderIndex + startDelimiter - 1) == HttpConstants.LF) {
startDelimiter--;
if (undecodedChunk.getByte(startReaderIndex + startDelimiter - 1) == HttpConstants.CR) {
startDelimiter--;
lastRealPos = sao.pos;
if (nextByte == HttpConstants.LF) {
index = 0;
lastRealPos -= (prevByte == HttpConstants.CR)? 2 : 1;
}
prevByte = nextByte;
}
ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, startDelimiter);
if (prevByte == HttpConstants.CR) {
lastRealPos--;
}
final int lastPosition = sao.getReadPosition(lastRealPos);
final ByteBuf content = undecodedChunk.retainedSlice(startReaderIndex, lastPosition - startReaderIndex);
try {
httpData.addContent(content, true);
httpData.addContent(content, delimiterFound);
} catch (IOException e) {
throw new ErrorDataDecoderException(e);
}
lastDataPosition = 0;
undecodedChunk.readerIndex(startReaderIndex + startDelimiter);
return true;
undecodedChunk.readerIndex(lastPosition);
return delimiterFound;
}
/**

View File

@ -300,15 +300,12 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
// which is not really usable for us as we may exceed it once we add more bytes.
buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
} else {
int readPos = undecodedChunk.readerIndex();
int writable = undecodedChunk.writableBytes();
int toWrite = buf.readableBytes();
if (undecodedChunk.refCnt() == 1 && writable < toWrite && readPos + writable >= toWrite) {
undecodedChunk.discardReadBytes();
}
undecodedChunk.writeBytes(buf);
}
parseBody();
if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
undecodedChunk.discardReadBytes();
}
return this;
}