Backport 4.1 to 4.0 on HttpPostRequestDecoder

Motivation
4.0 was not modified in the same time than 4.1 while the difference was
limited.
Include the fix on "=" character in Boundary.

Issue #3004 shows that "=" character was not supported as it should in
the HttpPostRequestDecoder in form-data boundary.

Modifications:
Backport from 4.1 to 4.0 while respecting interfaces.

Add 2 methods in StringUtil
- split with maxParm argument: String split with max parts only (to prevent multiple '='
to be source of extra split while not needed)
- substringAfter: String part after delimiter (since first part is not
needed)
Use those methods in HttpPostRequestDecoder.
Change and the HttpPostRequestDecoderTest to check using a boundary
beginning with "=".

Results:
Backport done (Issue #2886 fix)
Issue #3004 fix too
The fix implies more stability and fix the relative issues.
This commit is contained in:
Frederic Bregier 2014-10-15 22:49:49 +02:00 committed by Trustin Lee
parent 6150de5eb2
commit 2fc421b2ba
7 changed files with 2860 additions and 1966 deletions

View File

@ -0,0 +1,731 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http.multipart;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpConstants;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.netty.handler.codec.http.multipart.HttpPostBodyUtil.SeekAheadNoBackArrayException;
import io.netty.handler.codec.http.multipart.HttpPostBodyUtil.SeekAheadOptimize;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.EndOfDataDecoderException;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.MultiPartStatus;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import static io.netty.buffer.Unpooled.*;
/**
* This decoder will decode Body and can handle POST BODY.
*
* You <strong>MUST</strong> call {@link #destroy()} after completion to release all resources.
*
*/
public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestDecoder {
/**
* Factory used to create InterfaceHttpData
*/
private final HttpDataFactory factory;
/**
* Request to decode
*/
private final HttpRequest request;
/**
* Default charset to use
*/
private final Charset charset;
/**
* Does the last chunk already received
*/
private boolean isLastChunk;
/**
* HttpDatas from Body
*/
private final List<InterfaceHttpData> bodyListHttpData = new ArrayList<InterfaceHttpData>();
/**
* HttpDatas as Map from Body
*/
private final Map<String, List<InterfaceHttpData>> bodyMapHttpData = new TreeMap<String, List<InterfaceHttpData>>(
CaseIgnoringComparator.INSTANCE);
/**
* The current channelBuffer
*/
private ByteBuf undecodedChunk;
/**
* Body HttpDatas current position
*/
private int bodyListHttpDataRank;
/**
* Current getStatus
*/
private MultiPartStatus currentStatus = MultiPartStatus.NOTSTARTED;
/**
* The current Attribute that is currently in decode process
*/
private Attribute currentAttribute;
private boolean destroyed;
private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD;
/**
*
* @param request
* the request to decode
* @throws NullPointerException
* for request
* @throws ErrorDataDecoderException
* if the default charset was wrong when decoding or other
* errors
*/
public HttpPostStandardRequestDecoder(HttpRequest request) {
this(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), request, HttpConstants.DEFAULT_CHARSET);
}
/**
*
* @param factory
* the factory used to create InterfaceHttpData
* @param request
* the request to decode
* @throws NullPointerException
* for request or factory
* @throws ErrorDataDecoderException
* if the default charset was wrong when decoding or other
* errors
*/
public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request) {
this(factory, request, HttpConstants.DEFAULT_CHARSET);
}
/**
*
* @param factory
* the factory used to create InterfaceHttpData
* @param request
* the request to decode
* @param charset
* the charset to use as default
* @throws NullPointerException
* for request or charset or factory
* @throws ErrorDataDecoderException
* if the default charset was wrong when decoding or other
* errors
*/
public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) {
if (factory == null) {
throw new NullPointerException("factory");
}
if (request == null) {
throw new NullPointerException("request");
}
if (charset == null) {
throw new NullPointerException("charset");
}
this.request = request;
this.charset = charset;
this.factory = factory;
if (request instanceof HttpContent) {
// Offer automatically if the given request is als type of HttpContent
// See #1089
offer((HttpContent) request);
} else {
undecodedChunk = buffer();
parseBody();
}
}
private void checkDestroyed() {
if (destroyed) {
throw new IllegalStateException(HttpPostStandardRequestDecoder.class.getSimpleName()
+ " was destroyed already");
}
}
/**
* True if this request is a Multipart request
*
* @return True if this request is a Multipart request
*/
@Override
public boolean isMultipart() {
checkDestroyed();
return false;
}
/**
* Set the amount of bytes after which read bytes in the buffer should be discarded.
* Setting this lower gives lower memory usage but with the overhead of more memory copies.
* Use {@code 0} to disable it.
*/
@Override
public void setDiscardThreshold(int discardThreshold) {
if (discardThreshold < 0) {
throw new IllegalArgumentException("discardThreshold must be >= 0");
}
this.discardThreshold = discardThreshold;
}
/**
* Return the threshold in bytes after which read data in the buffer should be discarded.
*/
@Override
public int getDiscardThreshold() {
return discardThreshold;
}
/**
* This getMethod returns a List of all HttpDatas from body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return the list of HttpDatas from Body part for POST getMethod
* @throws NotEnoughDataDecoderException
* Need more chunks
*/
@Override
public List<InterfaceHttpData> getBodyHttpDatas() {
checkDestroyed();
if (!isLastChunk) {
throw new NotEnoughDataDecoderException();
}
return bodyListHttpData;
}
/**
* This getMethod returns a List of all HttpDatas with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return All Body HttpDatas with the given name (ignore case)
* @throws NotEnoughDataDecoderException
* need more chunks
*/
@Override
public List<InterfaceHttpData> getBodyHttpDatas(String name) {
checkDestroyed();
if (!isLastChunk) {
throw new NotEnoughDataDecoderException();
}
return bodyMapHttpData.get(name);
}
/**
* This getMethod returns the first InterfaceHttpData with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return The first Body InterfaceHttpData with the given name (ignore
* case)
* @throws NotEnoughDataDecoderException
* need more chunks
*/
@Override
public InterfaceHttpData getBodyHttpData(String name) {
checkDestroyed();
if (!isLastChunk) {
throw new NotEnoughDataDecoderException();
}
List<InterfaceHttpData> list = bodyMapHttpData.get(name);
if (list != null) {
return list.get(0);
}
return null;
}
/**
* Initialized the internals from a new chunk
*
* @param content
* the new received chunk
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
@Override
public HttpPostStandardRequestDecoder offer(HttpContent content) {
checkDestroyed();
// Maybe we should better not copy here for performance reasons but this will need
// more care by the caller to release the content in a correct manner later
// So maybe something to optimize on a later stage
ByteBuf buf = content.content();
if (undecodedChunk == null) {
undecodedChunk = buf.copy();
} else {
undecodedChunk.writeBytes(buf);
}
if (content instanceof LastHttpContent) {
isLastChunk = true;
}
parseBody();
if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
undecodedChunk.discardReadBytes();
}
return this;
}
/**
* True if at current getStatus, there is an available decoded
* InterfaceHttpData from the Body.
*
* This getMethod works for chunked and not chunked request.
*
* @return True if at current getStatus, there is a decoded InterfaceHttpData
* @throws EndOfDataDecoderException
* No more data will be available
*/
@Override
public boolean hasNext() {
checkDestroyed();
if (currentStatus == MultiPartStatus.EPILOGUE) {
// OK except if end of list
if (bodyListHttpDataRank >= bodyListHttpData.size()) {
throw new EndOfDataDecoderException();
}
}
return !bodyListHttpData.isEmpty() && bodyListHttpDataRank < bodyListHttpData.size();
}
/**
* Returns the next available InterfaceHttpData or null if, at the time it
* is called, there is no more available InterfaceHttpData. A subsequent
* call to offer(httpChunk) could enable more data.
*
* Be sure to call {@link InterfaceHttpData#release()} after you are done
* with processing to make sure to not leak any resources
*
* @return the next available InterfaceHttpData or null if none
* @throws EndOfDataDecoderException
* No more data will be available
*/
@Override
public InterfaceHttpData next() {
checkDestroyed();
if (hasNext()) {
return bodyListHttpData.get(bodyListHttpDataRank++);
}
return null;
}
/**
* This getMethod will parse as much as possible data and fill the list and map
*
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
private void parseBody() {
if (currentStatus == MultiPartStatus.PREEPILOGUE || currentStatus == MultiPartStatus.EPILOGUE) {
if (isLastChunk) {
currentStatus = MultiPartStatus.EPILOGUE;
}
return;
}
parseBodyAttributes();
}
/**
* Utility function to add a new decoded data
*/
protected void addHttpData(InterfaceHttpData data) {
if (data == null) {
return;
}
List<InterfaceHttpData> datas = bodyMapHttpData.get(data.getName());
if (datas == null) {
datas = new ArrayList<InterfaceHttpData>(1);
bodyMapHttpData.put(data.getName(), datas);
}
datas.add(data);
bodyListHttpData.add(data);
}
/**
* This getMethod fill the map and list with as much Attribute as possible from
* Body in not Multipart mode.
*
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
private void parseBodyAttributesStandard() {
int firstpos = undecodedChunk.readerIndex();
int currentpos = firstpos;
int equalpos;
int ampersandpos;
if (currentStatus == MultiPartStatus.NOTSTARTED) {
currentStatus = MultiPartStatus.DISPOSITION;
}
boolean contRead = true;
try {
while (undecodedChunk.isReadable() && contRead) {
char read = (char) undecodedChunk.readUnsignedByte();
currentpos++;
switch (currentStatus) {
case DISPOSITION:// search '='
if (read == '=') {
currentStatus = MultiPartStatus.FIELD;
equalpos = currentpos - 1;
String key = decodeAttribute(undecodedChunk.toString(firstpos, equalpos - firstpos, charset),
charset);
currentAttribute = factory.createAttribute(request, key);
firstpos = currentpos;
} else if (read == '&') { // special empty FIELD
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
String key = decodeAttribute(
undecodedChunk.toString(firstpos, ampersandpos - firstpos, charset), charset);
currentAttribute = factory.createAttribute(request, key);
currentAttribute.setValue(""); // empty
addHttpData(currentAttribute);
currentAttribute = null;
firstpos = currentpos;
contRead = true;
}
break;
case FIELD:// search '&' or end of line
if (read == '&') {
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = true;
} else if (read == HttpConstants.CR) {
if (undecodedChunk.isReadable()) {
read = (char) undecodedChunk.readUnsignedByte();
currentpos++;
if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 2;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
} else {
// Error
throw new ErrorDataDecoderException("Bad end of line");
}
} else {
currentpos--;
}
} else if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 1;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
}
break;
default:
// just stop
contRead = false;
}
}
if (isLastChunk && currentAttribute != null) {
// special case
ampersandpos = currentpos;
if (ampersandpos > firstpos) {
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
} else if (!currentAttribute.isCompleted()) {
setFinalBuffer(EMPTY_BUFFER);
}
firstpos = currentpos;
currentStatus = MultiPartStatus.EPILOGUE;
undecodedChunk.readerIndex(firstpos);
return;
}
if (contRead && currentAttribute != null) {
// reset index except if to continue in case of FIELD getStatus
if (currentStatus == MultiPartStatus.FIELD) {
currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos),
false);
firstpos = currentpos;
}
undecodedChunk.readerIndex(firstpos);
} else {
// end of line or end of block so keep index to last valid position
undecodedChunk.readerIndex(firstpos);
}
} catch (ErrorDataDecoderException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw e;
} catch (IOException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw new ErrorDataDecoderException(e);
}
}
/**
* This getMethod fill the map and list with as much Attribute as possible from
* Body in not Multipart mode.
*
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
private void parseBodyAttributes() {
SeekAheadOptimize sao;
try {
sao = new SeekAheadOptimize(undecodedChunk);
} catch (SeekAheadNoBackArrayException ignored) {
parseBodyAttributesStandard();
return;
}
int firstpos = undecodedChunk.readerIndex();
int currentpos = firstpos;
int equalpos;
int ampersandpos;
if (currentStatus == MultiPartStatus.NOTSTARTED) {
currentStatus = MultiPartStatus.DISPOSITION;
}
boolean contRead = true;
try {
loop: while (sao.pos < sao.limit) {
char read = (char) (sao.bytes[sao.pos++] & 0xFF);
currentpos++;
switch (currentStatus) {
case DISPOSITION:// search '='
if (read == '=') {
currentStatus = MultiPartStatus.FIELD;
equalpos = currentpos - 1;
String key = decodeAttribute(undecodedChunk.toString(firstpos, equalpos - firstpos, charset),
charset);
currentAttribute = factory.createAttribute(request, key);
firstpos = currentpos;
} else if (read == '&') { // special empty FIELD
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
String key = decodeAttribute(
undecodedChunk.toString(firstpos, ampersandpos - firstpos, charset), charset);
currentAttribute = factory.createAttribute(request, key);
currentAttribute.setValue(""); // empty
addHttpData(currentAttribute);
currentAttribute = null;
firstpos = currentpos;
contRead = true;
}
break;
case FIELD:// search '&' or end of line
if (read == '&') {
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = true;
} else if (read == HttpConstants.CR) {
if (sao.pos < sao.limit) {
read = (char) (sao.bytes[sao.pos++] & 0xFF);
currentpos++;
if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 2;
sao.setReadPosition(0);
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
break loop;
} else {
// Error
sao.setReadPosition(0);
throw new ErrorDataDecoderException("Bad end of line");
}
} else {
if (sao.limit > 0) {
currentpos--;
}
}
} else if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 1;
sao.setReadPosition(0);
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
break loop;
}
break;
default:
// just stop
sao.setReadPosition(0);
contRead = false;
break loop;
}
}
if (isLastChunk && currentAttribute != null) {
// special case
ampersandpos = currentpos;
if (ampersandpos > firstpos) {
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
} else if (!currentAttribute.isCompleted()) {
setFinalBuffer(EMPTY_BUFFER);
}
firstpos = currentpos;
currentStatus = MultiPartStatus.EPILOGUE;
undecodedChunk.readerIndex(firstpos);
return;
}
if (contRead && currentAttribute != null) {
// reset index except if to continue in case of FIELD getStatus
if (currentStatus == MultiPartStatus.FIELD) {
currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos),
false);
firstpos = currentpos;
}
undecodedChunk.readerIndex(firstpos);
} else {
// end of line or end of block so keep index to last valid position
undecodedChunk.readerIndex(firstpos);
}
} catch (ErrorDataDecoderException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw e;
} catch (IOException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw new ErrorDataDecoderException(e);
}
}
private void setFinalBuffer(ByteBuf buffer) throws IOException {
currentAttribute.addContent(buffer, true);
String value = decodeAttribute(currentAttribute.getByteBuf().toString(charset), charset);
currentAttribute.setValue(value);
addHttpData(currentAttribute);
currentAttribute = null;
}
/**
* Decode component
*
* @return the decoded component
*/
private static String decodeAttribute(String s, Charset charset) {
try {
return QueryStringDecoder.decodeComponent(s, charset);
} catch (IllegalArgumentException e) {
throw new ErrorDataDecoderException("Bad string: '" + s + '\'', e);
}
}
/**
* Skip control Characters
*/
void skipControlCharacters() {
SeekAheadOptimize sao;
try {
sao = new SeekAheadOptimize(undecodedChunk);
} catch (SeekAheadNoBackArrayException ignored) {
try {
skipControlCharactersStandard();
} catch (IndexOutOfBoundsException e) {
throw new NotEnoughDataDecoderException(e);
}
return;
}
while (sao.pos < sao.limit) {
char c = (char) (sao.bytes[sao.pos++] & 0xFF);
if (!Character.isISOControl(c) && !Character.isWhitespace(c)) {
sao.setReadPosition(1);
return;
}
}
throw new NotEnoughDataDecoderException("Access out of bounds");
}
void skipControlCharactersStandard() {
for (;;) {
char c = (char) undecodedChunk.readUnsignedByte();
if (!Character.isISOControl(c) && !Character.isWhitespace(c)) {
undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
break;
}
}
}
/**
* Destroy the {@link HttpPostStandardRequestDecoder} and release all it resources. After this method
* was called it is not possible to operate on it anymore.
*/
@Override
public void destroy() {
checkDestroyed();
cleanFiles();
destroyed = true;
if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {
undecodedChunk.release();
undecodedChunk = null;
}
// release all data which was not yet pulled
for (int i = bodyListHttpDataRank; i < bodyListHttpData.size(); i++) {
bodyListHttpData.get(i).release();
}
}
/**
* Clean all HttpDatas (on Disk) for the current request.
*/
@Override
public void cleanFiles() {
checkDestroyed();
factory.cleanRequestHttpDatas(request);
}
/**
* Remove the given FileUpload from the list of FileUploads to clean
*/
@Override
public void removeHttpDataFromClean(InterfaceHttpData data) {
checkDestroyed();
factory.removeHttpDataFromClean(request, data);
}
}

View File

@ -0,0 +1,139 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http.multipart;
import io.netty.handler.codec.http.HttpContent;
import java.util.List;
/**
* This decoder will decode Body and can handle POST BODY (or for PUT, PATCH or OPTIONS).
*
* You <strong>MUST</strong> call {@link #destroy()} after completion to release all resources.
*
*/
public interface InterfaceHttpPostRequestDecoder {
/**
* True if this request is a Multipart request
*
* @return True if this request is a Multipart request
*/
boolean isMultipart();
/**
* Set the amount of bytes after which read bytes in the buffer should be discarded.
* Setting this lower gives lower memory usage but with the overhead of more memory copies.
* Use {@code 0} to disable it.
*/
void setDiscardThreshold(int discardThreshold);
/**
* Return the threshold in bytes after which read data in the buffer should be discarded.
*/
int getDiscardThreshold();
/**
* This getMethod returns a List of all HttpDatas from body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return the list of HttpDatas from Body part for POST getMethod
* @throws HttpPostRequestDecoder.NotEnoughDataDecoderException
* Need more chunks
*/
List<InterfaceHttpData> getBodyHttpDatas();
/**
* This getMethod returns a List of all HttpDatas with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return All Body HttpDatas with the given name (ignore case)
* @throws HttpPostRequestDecoder.NotEnoughDataDecoderException
* need more chunks
*/
List<InterfaceHttpData> getBodyHttpDatas(String name);
/**
* This getMethod returns the first InterfaceHttpData with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return The first Body InterfaceHttpData with the given name (ignore
* case)
* @throws HttpPostRequestDecoder.NotEnoughDataDecoderException
* need more chunks
*/
InterfaceHttpData getBodyHttpData(String name);
/**
* Initialized the internals from a new chunk
*
* @param content
* the new received chunk
* @throws HttpPostRequestDecoder.ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
InterfaceHttpPostRequestDecoder offer(HttpContent content);
/**
* True if at current getStatus, there is an available decoded
* InterfaceHttpData from the Body.
*
* This getMethod works for chunked and not chunked request.
*
* @return True if at current getStatus, there is a decoded InterfaceHttpData
* @throws HttpPostRequestDecoder.EndOfDataDecoderException
* No more data will be available
*/
boolean hasNext();
/**
* Returns the next available InterfaceHttpData or null if, at the time it
* is called, there is no more available InterfaceHttpData. A subsequent
* call to offer(httpChunk) could enable more data.
*
* Be sure to call {@link InterfaceHttpData#release()} after you are done
* with processing to make sure to not leak any resources
*
* @return the next available InterfaceHttpData or null if none
* @throws HttpPostRequestDecoder.EndOfDataDecoderException
* No more data will be available
*/
InterfaceHttpData next();
/**
* Destroy the {@link InterfaceHttpPostRequestDecoder} and release all it resources. After this method
* was called it is not possible to operate on it anymore.
*/
void destroy();
/**
* Clean all HttpDatas (on Disk) for the current request.
*/
void cleanFiles();
/**
* Remove the given FileUpload from the list of FileUploads to clean
*/
void removeHttpDataFromClean(InterfaceHttpData data);
}

View File

@ -51,7 +51,8 @@ public class HttpPostRequestDecoderTest {
}
private static void testBinaryStreamUpload(boolean withSpace) throws Exception {
final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO";
// Boundary starts here with '=' to check against issue https://github.com/netty/netty/issues/3004
final String boundary = "=dLV9Wyq26L_-JQxk6ferf-RT153LhOO";
final String contentTypeValue;
if (withSpace) {
contentTypeValue = "multipart/form-data; boundary=" + boundary;

View File

@ -111,6 +111,63 @@ public final class StringUtil {
return res.toArray(new String[res.size()]);
}
/**
* Splits the specified {@link String} with the specified delimiter in maxParts maximum parts.
* This operation is a simplified and optimized
* version of {@link String#split(String, int)}.
*/
public static String[] split(String value, char delim, int maxParts) {
final int end = value.length();
final List<String> res = new ArrayList<String>();
int start = 0;
int cpt = 1;
for (int i = 0; i < end && cpt < maxParts; i ++) {
if (value.charAt(i) == delim) {
if (start == i) {
res.add(EMPTY_STRING);
} else {
res.add(value.substring(start, i));
}
start = i + 1;
cpt++;
}
}
if (start == 0) { // If no delimiter was found in the value
res.add(value);
} else {
if (start != end) {
// Add the last element if it's not empty.
res.add(value.substring(start, end));
} else {
// Truncate trailing empty elements.
for (int i = res.size() - 1; i >= 0; i --) {
if (res.get(i).isEmpty()) {
res.remove(i);
} else {
break;
}
}
}
}
return res.toArray(new String[res.size()]);
}
/**
* Get the item after one char delim if the delim is found (else null).
* This operation is a simplified and optimized
* version of {@link String#split(String, int)}.
*/
public static String substringAfter(String value, char delim) {
int pos = value.indexOf(delim);
if (pos >= 0) {
return value.substring(pos + 1);
}
return null;
}
/**
* Converts the specified byte value into a 2-digit hexadecimal integer.
*/

View File

@ -70,4 +70,15 @@ public class StringUtilTest {
public void splitWithDelimiterAtBeginning() {
assertArrayEquals(new String[] { "", "foo", "bar" }, split("#foo#bar", '#'));
}
@Test
public void splitMaxPart() {
assertArrayEquals(new String[] { "foo", "bar:bar2" }, split("foo:bar:bar2", ':', 2));
assertArrayEquals(new String[] { "foo", "bar", "bar2" }, split("foo:bar:bar2", ':', 3));
}
@Test
public void substringAfterTest() {
assertEquals("bar:bar2", substringAfter("foo:bar:bar2", ':'));
}
}