Reuse the same allocator as used by the ByteBuf
that is used during… (#10226)
Motivation: We should not use Unpooled to allocate buffers if possible to ensure we can make use of pooling etc. Modifications: - Only allocate a buffer if really needed - Use the ByteBufAllocator of the offered ByteBuf - Ensure we not use buffer.copy() but explicitly allocate a buffer and then copy into it to not hit the limit of maxCapacity() Result: Improve allocations
This commit is contained in:
parent
23e0b878af
commit
a62fcd9d50
@ -41,13 +41,13 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static io.netty.buffer.Unpooled.buffer;
|
||||
import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BINARY_STRING;
|
||||
import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BIT_7_STRING;
|
||||
import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BIT_8_STRING;
|
||||
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
|
||||
/**
|
||||
* This decoder will decode Body and can handle POST BODY.
|
||||
*
|
||||
@ -188,7 +188,6 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
// See #1089
|
||||
offer((HttpContent) request);
|
||||
} else {
|
||||
undecodedChunk = buffer();
|
||||
parseBody();
|
||||
}
|
||||
}
|
||||
@ -331,14 +330,17 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
|
||||
ByteBuf buf = content.content();
|
||||
if (undecodedChunk == null) {
|
||||
undecodedChunk = isLastChunk
|
||||
// Take a slice instead of copying when the first chunk is also the last
|
||||
// as undecodedChunk.writeBytes will never be called.
|
||||
? buf.retainedSlice()
|
||||
// Maybe we should better not copy here for performance reasons but this will need
|
||||
// more care by the caller to release the content in a correct manner later
|
||||
// So maybe something to optimize on a later stage
|
||||
: buf.copy();
|
||||
undecodedChunk = isLastChunk ?
|
||||
// Take a slice instead of copying when the first chunk is also the last
|
||||
// as undecodedChunk.writeBytes will never be called.
|
||||
buf.retainedSlice() :
|
||||
// Maybe we should better not copy here for performance reasons but this will need
|
||||
// more care by the caller to release the content in a correct manner later
|
||||
// So maybe something to optimize on a later stage
|
||||
//
|
||||
// We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity
|
||||
// which is not really usable for us as we may exceed it once we add more bytes.
|
||||
buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
|
||||
} else {
|
||||
undecodedChunk.writeBytes(buf);
|
||||
}
|
||||
@ -973,7 +975,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
*/
|
||||
private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) {
|
||||
int readerIndex = undecodedChunk.readerIndex();
|
||||
ByteBuf line = buffer(64);
|
||||
ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
|
||||
try {
|
||||
while (undecodedChunk.isReadable()) {
|
||||
byte nextByte = undecodedChunk.readByte();
|
||||
@ -1018,7 +1020,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
|
||||
}
|
||||
SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
|
||||
int readerIndex = undecodedChunk.readerIndex();
|
||||
ByteBuf line = buffer(64);
|
||||
ByteBuf line = undecodedChunk.alloc().heapBuffer(64);
|
||||
try {
|
||||
while (sao.pos < sao.limit) {
|
||||
byte nextByte = sao.bytes[sao.pos++];
|
||||
|
@ -16,6 +16,7 @@
|
||||
package io.netty.handler.codec.http.multipart;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import io.netty.handler.codec.http.HttpConstants;
|
||||
import io.netty.handler.codec.http.HttpContent;
|
||||
import io.netty.handler.codec.http.HttpRequest;
|
||||
@ -27,7 +28,6 @@ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDec
|
||||
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.MultiPartStatus;
|
||||
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException;
|
||||
import io.netty.util.ByteProcessor;
|
||||
import io.netty.util.CharsetUtil;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
|
||||
@ -38,10 +38,10 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static io.netty.buffer.Unpooled.*;
|
||||
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
|
||||
/**
|
||||
* This decoder will decode Body and can handle POST BODY.
|
||||
*
|
||||
@ -159,7 +159,6 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
|
||||
// See #1089
|
||||
offer((HttpContent) request);
|
||||
} else {
|
||||
undecodedChunk = buffer();
|
||||
parseBody();
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
@ -290,14 +289,17 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
|
||||
|
||||
ByteBuf buf = content.content();
|
||||
if (undecodedChunk == null) {
|
||||
undecodedChunk = isLastChunk
|
||||
// Take a slice instead of copying when the first chunk is also the last
|
||||
// as undecodedChunk.writeBytes will never be called.
|
||||
? buf.retainedSlice()
|
||||
// Maybe we should better not copy here for performance reasons but this will need
|
||||
// more care by the caller to release the content in a correct manner later
|
||||
// So maybe something to optimize on a later stage
|
||||
: buf.copy();
|
||||
undecodedChunk = isLastChunk ?
|
||||
// Take a slice instead of copying when the first chunk is also the last
|
||||
// as undecodedChunk.writeBytes will never be called.
|
||||
buf.retainedSlice() :
|
||||
// Maybe we should better not copy here for performance reasons but this will need
|
||||
// more care by the caller to release the content in a correct manner later
|
||||
// So maybe something to optimize on a later stage.
|
||||
//
|
||||
// We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity
|
||||
// which is not really usable for us as we may exceed it once we add more bytes.
|
||||
buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
|
||||
} else {
|
||||
undecodedChunk.writeBytes(buf);
|
||||
}
|
||||
@ -477,7 +479,7 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
|
||||
if (ampersandpos > firstpos) {
|
||||
setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
|
||||
} else if (!currentAttribute.isCompleted()) {
|
||||
setFinalBuffer(EMPTY_BUFFER);
|
||||
setFinalBuffer(Unpooled.EMPTY_BUFFER);
|
||||
}
|
||||
firstpos = currentpos;
|
||||
currentStatus = MultiPartStatus.EPILOGUE;
|
||||
@ -512,6 +514,9 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
|
||||
* errors
|
||||
*/
|
||||
private void parseBodyAttributes() {
|
||||
if (undecodedChunk == null) {
|
||||
return;
|
||||
}
|
||||
if (!undecodedChunk.hasArray()) {
|
||||
parseBodyAttributesStandard();
|
||||
return;
|
||||
@ -603,7 +608,7 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
|
||||
if (ampersandpos > firstpos) {
|
||||
setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
|
||||
} else if (!currentAttribute.isCompleted()) {
|
||||
setFinalBuffer(EMPTY_BUFFER);
|
||||
setFinalBuffer(Unpooled.EMPTY_BUFFER);
|
||||
}
|
||||
firstpos = currentpos;
|
||||
currentStatus = MultiPartStatus.EPILOGUE;
|
||||
|
Loading…
Reference in New Issue
Block a user