Split HttpPostRequestDecoder into HttpPostStandardRequestDecoder and HttpPostMultipartRequestDecoder / Add HttpData.maxSize

- Related issues: #1937 #1938 and #1946
- Add InterfaceHttpPostRequestDecoder and Make HttpPostRequestDecoder implement it
- HttpPostRequestDecoder actually delegates itself to HttpPostStandardRequestDecoder or HttpPostMultipartRequestDecoder
- Remove IncompatibleDataDecoderException because it's not thrown anywhere now
This commit is contained in:
fredericBregier 2013-08-19 23:22:22 +02:00 committed by Trustin Lee
parent 2eb5d4f0dd
commit cf1970c31b
16 changed files with 2805 additions and 1959 deletions

View File

@ -101,6 +101,7 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
}
try {
size = buffer.readableBytes();
checkSize(size);
if (definedSize > 0 && definedSize < size) {
throw new IOException("Out of size: " + size + " > " + definedSize);
}
@ -137,6 +138,7 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
if (buffer != null) {
try {
int localsize = buffer.readableBytes();
checkSize(size + localsize);
if (definedSize > 0 && definedSize < size + localsize) {
throw new IOException("Out of size: " + (size + localsize) +
" > " + definedSize);
@ -187,6 +189,7 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
}
this.file = file;
size = file.length();
checkSize(size);
isRenamed = true;
completed = true;
}
@ -209,6 +212,7 @@ public abstract class AbstractDiskHttpData extends AbstractHttpData {
while (read > 0) {
byteBuffer.position(read).flip();
written += localfileChannel.write(byteBuffer);
checkSize(written);
read = inputStream.read(bytes);
}
localfileChannel.force(false);

View File

@ -37,6 +37,7 @@ public abstract class AbstractHttpData extends AbstractReferenceCounted implemen
protected long size;
protected Charset charset = HttpConstants.DEFAULT_CHARSET;
protected boolean completed;
protected long maxSize = DefaultHttpDataFactory.MAXSIZE;
protected AbstractHttpData(String name, Charset charset, long size) {
if (name == null) {
@ -57,6 +58,16 @@ public abstract class AbstractHttpData extends AbstractReferenceCounted implemen
definedSize = size;
}
public void setMaxSize(long maxSize) {
this.maxSize = maxSize;
}
public void checkSize(long newSize) throws IOException {
if (maxSize >= 0 && newSize > maxSize) {
throw new IOException("Size exceed allowed maximum capacity");
}
}
@Override
public String getName() {
return name;

View File

@ -49,6 +49,7 @@ public abstract class AbstractMemoryHttpData extends AbstractHttpData {
throw new NullPointerException("buffer");
}
long localsize = buffer.readableBytes();
checkSize(localsize);
if (definedSize > 0 && definedSize < localsize) {
throw new IOException("Out of size: " + localsize + " > " +
definedSize);
@ -73,6 +74,7 @@ public abstract class AbstractMemoryHttpData extends AbstractHttpData {
while (read > 0) {
buffer.writeBytes(bytes, 0, read);
written += read;
checkSize(written);
read = inputStream.read(bytes);
}
size = written;
@ -91,6 +93,7 @@ public abstract class AbstractMemoryHttpData extends AbstractHttpData {
throws IOException {
if (buffer != null) {
long localsize = buffer.readableBytes();
checkSize(size + localsize);
if (definedSize > 0 && definedSize < size + localsize) {
throw new IOException("Out of size: " + (size + localsize) +
" > " + definedSize);
@ -128,6 +131,7 @@ public abstract class AbstractMemoryHttpData extends AbstractHttpData {
throw new IllegalArgumentException(
"File too big to be loaded in memory");
}
checkSize(newsize);
FileInputStream inputStream = new FileInputStream(file);
FileChannel fileChannel = inputStream.getChannel();
byte[] array = new byte[(int) newsize];

View File

@ -37,6 +37,10 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
* Proposed default MINSIZE as 16 KB.
*/
public static final long MINSIZE = 0x4000;
/**
* Proposed default MAXSIZE = -1 as UNLIMITED
*/
public static final long MAXSIZE = -1;
private final boolean useDisk;
@ -44,6 +48,8 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
private long minSize;
private long maxSize = MAXSIZE;
/**
* Keep all HttpDatas until cleanAllHttpDatas() is called.
*/
@ -78,6 +84,10 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
this.minSize = minSize;
}
public void setMaxLimit(long max) {
this.maxSize = max;
}
/**
* @return the associated list of Files for the request
*/
@ -94,17 +104,33 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
public Attribute createAttribute(HttpRequest request, String name) {
if (useDisk) {
Attribute attribute = new DiskAttribute(name);
attribute.setMaxSize(maxSize);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
return attribute;
}
if (checkSize) {
Attribute attribute = new MixedAttribute(name, minSize);
attribute.setMaxSize(maxSize);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
return attribute;
}
return new MemoryAttribute(name);
MemoryAttribute attribute = new MemoryAttribute(name);
attribute.setMaxSize(maxSize);
return attribute;
}
/**
* Utility method
* @param data
*/
private void checkHttpDataSize(HttpData data) {
try {
data.checkSize(data.length());
} catch (IOException e) {
throw new IllegalArgumentException("Attribute bigger than maxSize allowed");
}
}
@Override
@ -113,22 +139,30 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
Attribute attribute;
try {
attribute = new DiskAttribute(name, value);
attribute.setMaxSize(maxSize);
} catch (IOException e) {
// revert to Mixed mode
attribute = new MixedAttribute(name, value, minSize);
attribute.setMaxSize(maxSize);
}
checkHttpDataSize(attribute);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
return attribute;
}
if (checkSize) {
Attribute attribute = new MixedAttribute(name, value, minSize);
attribute.setMaxSize(maxSize);
checkHttpDataSize(attribute);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(attribute);
return attribute;
}
try {
return new MemoryAttribute(name, value);
MemoryAttribute attribute = new MemoryAttribute(name, value);
attribute.setMaxSize(maxSize);
checkHttpDataSize(attribute);
return attribute;
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
@ -141,6 +175,8 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
if (useDisk) {
FileUpload fileUpload = new DiskFileUpload(name, filename, contentType,
contentTransferEncoding, charset, size);
fileUpload.setMaxSize(maxSize);
checkHttpDataSize(fileUpload);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(fileUpload);
return fileUpload;
@ -148,12 +184,17 @@ public class DefaultHttpDataFactory implements HttpDataFactory {
if (checkSize) {
FileUpload fileUpload = new MixedFileUpload(name, filename, contentType,
contentTransferEncoding, charset, size, minSize);
fileUpload.setMaxSize(maxSize);
checkHttpDataSize(fileUpload);
List<HttpData> fileToDelete = getList(request);
fileToDelete.add(fileUpload);
return fileUpload;
}
return new MemoryFileUpload(name, filename, contentType,
MemoryFileUpload fileUpload = new MemoryFileUpload(name, filename, contentType,
contentTransferEncoding, charset, size);
fileUpload.setMaxSize(maxSize);
checkHttpDataSize(fileUpload);
return fileUpload;
}
@Override

View File

@ -64,6 +64,7 @@ public class DiskAttribute extends AbstractDiskHttpData implements Attribute {
throw new NullPointerException("value");
}
byte [] bytes = value.getBytes(charset.name());
checkSize(bytes.length);
ByteBuf buffer = wrappedBuffer(bytes);
if (definedSize > 0) {
definedSize = buffer.readableBytes();
@ -74,6 +75,7 @@ public class DiskAttribute extends AbstractDiskHttpData implements Attribute {
@Override
public void addContent(ByteBuf buffer, boolean last) throws IOException {
int localsize = buffer.readableBytes();
checkSize(size + localsize);
if (definedSize > 0 && definedSize < size + localsize) {
definedSize = size + localsize;
}

View File

@ -27,6 +27,23 @@ import java.nio.charset.Charset;
* Extended interface for InterfaceHttpData
*/
public interface HttpData extends InterfaceHttpData, ByteBufHolder {
/**
* Set the maxSize for this HttpData. When limit will be reached, an exception will be raised.
* Setting it to (-1) means no limitation.
*
* By default, to be set from the HttpDataFactory.
* @param maxSize
*/
void setMaxSize(long maxSize);
/**
* Check if the new size is not reaching the max limit allowed.
* The limit is always computed in term of bytes.
* @param newSize
* @throws IOException
*/
void checkSize(long newSize) throws IOException;
/**
* Set the content from the ChannelBuffer (erase any previous data)
*

View File

@ -23,6 +23,13 @@ import java.nio.charset.Charset;
* Interface to enable creation of InterfaceHttpData objects
*/
public interface HttpDataFactory {
/**
* To set a max size limitation on fields. Exceeding it will generate an ErrorDataDecoderException.
* A value of -1 means no limitation (default).
* @param max
*/
void setMaxLimit(long max);
/**
*
* @param request associated request

View File

@ -187,7 +187,9 @@ public class HttpPostRequestEncoder implements ChunkedInput<HttpContent> {
if (charset == null) {
throw new NullPointerException("charset");
}
if (request.getMethod() != HttpMethod.POST) {
HttpMethod method = request.getMethod();
if (!(method.equals(HttpMethod.POST) || method.equals(HttpMethod.PUT)
|| method.equals(HttpMethod.PATCH) || method.equals(HttpMethod.OPTIONS))) {
throw new ErrorDataEncoderException("Cannot create a Encoder if not a POST");
}
this.request = request;

View File

@ -0,0 +1,728 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http.multipart;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpConstants;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.handler.codec.http.multipart.HttpPostBodyUtil.SeekAheadNoBackArrayException;
import io.netty.handler.codec.http.multipart.HttpPostBodyUtil.SeekAheadOptimize;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.EndOfDataDecoderException;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.MultiPartStatus;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import static io.netty.buffer.Unpooled.*;
/**
* This decoder will decode Body and can handle POST BODY.
*
* You <strong>MUST</strong> call {@link #destroy()} after completion to release all resources.
*
*/
public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestDecoder {
/**
* Factory used to create InterfaceHttpData
*/
private final HttpDataFactory factory;
/**
* Request to decode
*/
private final HttpRequest request;
/**
* Default charset to use
*/
private final Charset charset;
/**
* Does the last chunk already received
*/
private boolean isLastChunk;
/**
* HttpDatas from Body
*/
private final List<InterfaceHttpData> bodyListHttpData = new ArrayList<InterfaceHttpData>();
/**
* HttpDatas as Map from Body
*/
private final Map<String, List<InterfaceHttpData>> bodyMapHttpData = new TreeMap<String, List<InterfaceHttpData>>(
CaseIgnoringComparator.INSTANCE);
/**
* The current channelBuffer
*/
private ByteBuf undecodedChunk;
/**
* Body HttpDatas current position
*/
private int bodyListHttpDataRank;
/**
* Current getStatus
*/
private MultiPartStatus currentStatus = MultiPartStatus.NOTSTARTED;
/**
* The current Attribute that is currently in decode process
*/
private Attribute currentAttribute;
private boolean destroyed;
private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD;
/**
*
* @param request
* the request to decode
* @throws NullPointerException
* for request
* @throws ErrorDataDecoderException
* if the default charset was wrong when decoding or other
* errors
*/
public HttpPostStandardRequestDecoder(HttpRequest request)
throws ErrorDataDecoderException {
this(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), request, HttpConstants.DEFAULT_CHARSET);
}
/**
*
* @param factory
* the factory used to create InterfaceHttpData
* @param request
* the request to decode
* @throws NullPointerException
* for request or factory
* @throws ErrorDataDecoderException
* if the default charset was wrong when decoding or other
* errors
*/
public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request)
throws ErrorDataDecoderException {
this(factory, request, HttpConstants.DEFAULT_CHARSET);
}
/**
*
* @param factory
* the factory used to create InterfaceHttpData
* @param request
* the request to decode
* @param charset
* the charset to use as default
* @throws NullPointerException
* for request or charset or factory
* @throws ErrorDataDecoderException
* if the default charset was wrong when decoding or other
* errors
*/
public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset)
throws ErrorDataDecoderException {
if (factory == null) {
throw new NullPointerException("factory");
}
if (request == null) {
throw new NullPointerException("request");
}
if (charset == null) {
throw new NullPointerException("charset");
}
this.request = request;
this.charset = charset;
this.factory = factory;
if (request instanceof HttpContent) {
// Offer automatically if the given request is als type of HttpContent
// See #1089
offer((HttpContent) request);
} else {
undecodedChunk = buffer();
parseBody();
}
}
private void checkDestroyed() {
if (destroyed) {
throw new IllegalStateException(HttpPostStandardRequestDecoder.class.getSimpleName()
+ " was destroyed already");
}
}
/**
* True if this request is a Multipart request
*
* @return True if this request is a Multipart request
*/
public boolean isMultipart() {
checkDestroyed();
return false;
}
/**
* Set the amount of bytes after which read bytes in the buffer should be discarded.
* Setting this lower gives lower memory usage but with the overhead of more memory copies.
* Use {@code 0} to disable it.
*/
public void setDiscardThreshold(int discardThreshold) {
if (discardThreshold < 0) {
throw new IllegalArgumentException("discardThreshold must be >= 0");
}
this.discardThreshold = discardThreshold;
}
/**
* Return the threshold in bytes after which read data in the buffer should be discarded.
*/
public int getDiscardThreshold() {
return discardThreshold;
}
/**
* This getMethod returns a List of all HttpDatas from body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return the list of HttpDatas from Body part for POST getMethod
* @throws NotEnoughDataDecoderException
* Need more chunks
*/
public List<InterfaceHttpData> getBodyHttpDatas() throws NotEnoughDataDecoderException {
checkDestroyed();
if (!isLastChunk) {
throw new NotEnoughDataDecoderException();
}
return bodyListHttpData;
}
/**
* This getMethod returns a List of all HttpDatas with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return All Body HttpDatas with the given name (ignore case)
* @throws NotEnoughDataDecoderException
* need more chunks
*/
public List<InterfaceHttpData> getBodyHttpDatas(String name)
throws NotEnoughDataDecoderException {
checkDestroyed();
if (!isLastChunk) {
throw new NotEnoughDataDecoderException();
}
return bodyMapHttpData.get(name);
}
/**
* This getMethod returns the first InterfaceHttpData with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return The first Body InterfaceHttpData with the given name (ignore
* case)
* @throws NotEnoughDataDecoderException
* need more chunks
*/
public InterfaceHttpData getBodyHttpData(String name) throws NotEnoughDataDecoderException {
checkDestroyed();
if (!isLastChunk) {
throw new NotEnoughDataDecoderException();
}
List<InterfaceHttpData> list = bodyMapHttpData.get(name);
if (list != null) {
return list.get(0);
}
return null;
}
/**
* Initialized the internals from a new chunk
*
* @param content
* the new received chunk
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
public HttpPostStandardRequestDecoder offer(HttpContent content)
throws ErrorDataDecoderException {
checkDestroyed();
// Maybe we should better not copy here for performance reasons but this will need
// more care by the caller to release the content in a correct manner later
// So maybe something to optimize on a later stage
ByteBuf buf = content.content();
if (undecodedChunk == null) {
undecodedChunk = buf.copy();
} else {
undecodedChunk.writeBytes(buf);
}
if (content instanceof LastHttpContent) {
isLastChunk = true;
}
parseBody();
if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
undecodedChunk.discardReadBytes();
}
return this;
}
/**
* True if at current getStatus, there is an available decoded
* InterfaceHttpData from the Body.
*
* This getMethod works for chunked and not chunked request.
*
* @return True if at current getStatus, there is a decoded InterfaceHttpData
* @throws EndOfDataDecoderException
* No more data will be available
*/
public boolean hasNext() throws EndOfDataDecoderException {
checkDestroyed();
if (currentStatus == MultiPartStatus.EPILOGUE) {
// OK except if end of list
if (bodyListHttpDataRank >= bodyListHttpData.size()) {
throw new EndOfDataDecoderException();
}
}
return !bodyListHttpData.isEmpty() && bodyListHttpDataRank < bodyListHttpData.size();
}
/**
* Returns the next available InterfaceHttpData or null if, at the time it
* is called, there is no more available InterfaceHttpData. A subsequent
* call to offer(httpChunk) could enable more data.
*
* Be sure to call {@link InterfaceHttpData#release()} after you are done
* with processing to make sure to not leak any resources
*
* @return the next available InterfaceHttpData or null if none
* @throws EndOfDataDecoderException
* No more data will be available
*/
public InterfaceHttpData next() throws EndOfDataDecoderException {
checkDestroyed();
if (hasNext()) {
return bodyListHttpData.get(bodyListHttpDataRank++);
}
return null;
}
/**
* This getMethod will parse as much as possible data and fill the list and map
*
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
private void parseBody() throws ErrorDataDecoderException {
if (currentStatus == MultiPartStatus.PREEPILOGUE || currentStatus == MultiPartStatus.EPILOGUE) {
if (isLastChunk) {
currentStatus = MultiPartStatus.EPILOGUE;
}
return;
}
parseBodyAttributes();
}
/**
* Utility function to add a new decoded data
*/
protected void addHttpData(InterfaceHttpData data) {
if (data == null) {
return;
}
List<InterfaceHttpData> datas = bodyMapHttpData.get(data.getName());
if (datas == null) {
datas = new ArrayList<InterfaceHttpData>(1);
bodyMapHttpData.put(data.getName(), datas);
}
datas.add(data);
bodyListHttpData.add(data);
}
/**
* This getMethod fill the map and list with as much Attribute as possible from
* Body in not Multipart mode.
*
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
private void parseBodyAttributesStandard() throws ErrorDataDecoderException {
int firstpos = undecodedChunk.readerIndex();
int currentpos = firstpos;
int equalpos;
int ampersandpos;
if (currentStatus == MultiPartStatus.NOTSTARTED) {
currentStatus = MultiPartStatus.DISPOSITION;
}
boolean contRead = true;
try {
while (undecodedChunk.isReadable() && contRead) {
char read = (char) undecodedChunk.readUnsignedByte();
currentpos++;
switch (currentStatus) {
case DISPOSITION:// search '='
if (read == '=') {
currentStatus = MultiPartStatus.FIELD;
equalpos = currentpos - 1;
String key = decodeAttribute(undecodedChunk.toString(firstpos, equalpos - firstpos, charset),
charset);
currentAttribute = factory.createAttribute(request, key);
firstpos = currentpos;
} else if (read == '&') { // special empty FIELD
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
String key = decodeAttribute(
undecodedChunk.toString(firstpos, ampersandpos - firstpos, charset), charset);
currentAttribute = factory.createAttribute(request, key);
currentAttribute.setValue(""); // empty
addHttpData(currentAttribute);
currentAttribute = null;
firstpos = currentpos;
contRead = true;
}
break;
case FIELD:// search '&' or end of line
if (read == '&') {
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = true;
} else if (read == HttpConstants.CR) {
if (undecodedChunk.isReadable()) {
read = (char) undecodedChunk.readUnsignedByte();
currentpos++;
if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 2;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
} else {
// Error
throw new ErrorDataDecoderException("Bad end of line");
}
} else {
currentpos--;
}
} else if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 1;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
}
break;
default:
// just stop
contRead = false;
}
}
if (isLastChunk && currentAttribute != null) {
// special case
ampersandpos = currentpos;
if (ampersandpos > firstpos) {
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
} else if (!currentAttribute.isCompleted()) {
setFinalBuffer(EMPTY_BUFFER);
}
firstpos = currentpos;
currentStatus = MultiPartStatus.EPILOGUE;
return;
}
if (contRead && currentAttribute != null) {
// reset index except if to continue in case of FIELD getStatus
if (currentStatus == MultiPartStatus.FIELD) {
currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos),
false);
firstpos = currentpos;
}
undecodedChunk.readerIndex(firstpos);
} else {
// end of line so keep index
}
} catch (ErrorDataDecoderException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw e;
} catch (IOException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw new ErrorDataDecoderException(e);
}
}
/**
* This getMethod fill the map and list with as much Attribute as possible from
* Body in not Multipart mode.
*
* @throws ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
private void parseBodyAttributes() throws ErrorDataDecoderException {
SeekAheadOptimize sao;
try {
sao = new SeekAheadOptimize(undecodedChunk);
} catch (SeekAheadNoBackArrayException e1) {
parseBodyAttributesStandard();
return;
}
int firstpos = undecodedChunk.readerIndex();
int currentpos = firstpos;
int equalpos;
int ampersandpos;
if (currentStatus == MultiPartStatus.NOTSTARTED) {
currentStatus = MultiPartStatus.DISPOSITION;
}
boolean contRead = true;
try {
loop: while (sao.pos < sao.limit) {
char read = (char) (sao.bytes[sao.pos++] & 0xFF);
currentpos++;
switch (currentStatus) {
case DISPOSITION:// search '='
if (read == '=') {
currentStatus = MultiPartStatus.FIELD;
equalpos = currentpos - 1;
String key = decodeAttribute(undecodedChunk.toString(firstpos, equalpos - firstpos, charset),
charset);
currentAttribute = factory.createAttribute(request, key);
firstpos = currentpos;
} else if (read == '&') { // special empty FIELD
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
String key = decodeAttribute(
undecodedChunk.toString(firstpos, ampersandpos - firstpos, charset), charset);
currentAttribute = factory.createAttribute(request, key);
currentAttribute.setValue(""); // empty
addHttpData(currentAttribute);
currentAttribute = null;
firstpos = currentpos;
contRead = true;
}
break;
case FIELD:// search '&' or end of line
if (read == '&') {
currentStatus = MultiPartStatus.DISPOSITION;
ampersandpos = currentpos - 1;
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = true;
} else if (read == HttpConstants.CR) {
if (sao.pos < sao.limit) {
read = (char) (sao.bytes[sao.pos++] & 0xFF);
currentpos++;
if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 2;
sao.setReadPosition(0);
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
break loop;
} else {
// Error
sao.setReadPosition(0);
throw new ErrorDataDecoderException("Bad end of line");
}
} else {
if (sao.limit > 0) {
currentpos--;
}
}
} else if (read == HttpConstants.LF) {
currentStatus = MultiPartStatus.PREEPILOGUE;
ampersandpos = currentpos - 1;
sao.setReadPosition(0);
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
firstpos = currentpos;
contRead = false;
break loop;
}
break;
default:
// just stop
sao.setReadPosition(0);
contRead = false;
break loop;
}
}
if (isLastChunk && currentAttribute != null) {
// special case
ampersandpos = currentpos;
if (ampersandpos > firstpos) {
setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos));
} else if (!currentAttribute.isCompleted()) {
setFinalBuffer(EMPTY_BUFFER);
}
firstpos = currentpos;
currentStatus = MultiPartStatus.EPILOGUE;
return;
}
if (contRead && currentAttribute != null) {
// reset index except if to continue in case of FIELD getStatus
if (currentStatus == MultiPartStatus.FIELD) {
currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos),
false);
firstpos = currentpos;
}
undecodedChunk.readerIndex(firstpos);
} else {
// end of line so keep index
}
} catch (ErrorDataDecoderException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw e;
} catch (IOException e) {
// error while decoding
undecodedChunk.readerIndex(firstpos);
throw new ErrorDataDecoderException(e);
}
}
private void setFinalBuffer(ByteBuf buffer) throws ErrorDataDecoderException, IOException {
currentAttribute.addContent(buffer, true);
String value = decodeAttribute(currentAttribute.getByteBuf().toString(charset), charset);
currentAttribute.setValue(value);
addHttpData(currentAttribute);
currentAttribute = null;
}
/**
* Decode component
*
* @return the decoded component
*/
private static String decodeAttribute(String s, Charset charset)
throws ErrorDataDecoderException {
if (s == null) {
return "";
}
try {
return URLDecoder.decode(s, charset.name());
} catch (UnsupportedEncodingException e) {
throw new ErrorDataDecoderException(charset.toString(), e);
} catch (IllegalArgumentException e) {
throw new ErrorDataDecoderException("Bad string: '" + s + '\'', e);
}
}
/**
* Skip control Characters
*
* @throws NotEnoughDataDecoderException
*/
void skipControlCharacters() throws NotEnoughDataDecoderException {
SeekAheadOptimize sao;
try {
sao = new SeekAheadOptimize(undecodedChunk);
} catch (SeekAheadNoBackArrayException e) {
try {
skipControlCharactersStandard();
} catch (IndexOutOfBoundsException e1) {
throw new NotEnoughDataDecoderException(e1);
}
return;
}
while (sao.pos < sao.limit) {
char c = (char) (sao.bytes[sao.pos++] & 0xFF);
if (!Character.isISOControl(c) && !Character.isWhitespace(c)) {
sao.setReadPosition(1);
return;
}
}
throw new NotEnoughDataDecoderException("Access out of bounds");
}
void skipControlCharactersStandard() {
for (;;) {
char c = (char) undecodedChunk.readUnsignedByte();
if (!Character.isISOControl(c) && !Character.isWhitespace(c)) {
undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
break;
}
}
}
/**
* Destroy the {@link HttpPostStandardRequestDecoder} and release all it resources. After this method
* was called it is not possible to operate on it anymore.
*/
public void destroy() {
checkDestroyed();
cleanFiles();
destroyed = true;
if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {
undecodedChunk.release();
undecodedChunk = null;
}
// release all data which was not yet pulled
for (int i = bodyListHttpDataRank; i < bodyListHttpData.size(); i++) {
bodyListHttpData.get(i).release();
}
}
/**
* Clean all HttpDatas (on Disk) for the current request.
*/
public void cleanFiles() {
checkDestroyed();
factory.cleanRequestHttpDatas(request);
}
/**
* Remove the given FileUpload from the list of FileUploads to clean
*/
public void removeHttpDataFromClean(InterfaceHttpData data) {
checkDestroyed();
factory.removeHttpDataFromClean(request, data);
}
}

View File

@ -0,0 +1,140 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http.multipart;
import io.netty.handler.codec.http.HttpContent;
import java.util.List;
/**
* This decoder will decode Body and can handle POST BODY (or for PUT, PATCH or OPTIONS).
*
* You <strong>MUST</strong> call {@link #destroy()} after completion to release all resources.
*
*/
public interface InterfaceHttpPostRequestDecoder {
/**
* True if this request is a Multipart request
*
* @return True if this request is a Multipart request
*/
boolean isMultipart();
/**
* Set the amount of bytes after which read bytes in the buffer should be discarded.
* Setting this lower gives lower memory usage but with the overhead of more memory copies.
* Use {@code 0} to disable it.
*/
void setDiscardThreshold(int discardThreshold);
/**
* Return the threshold in bytes after which read data in the buffer should be discarded.
*/
int getDiscardThreshold();
/**
* This getMethod returns a List of all HttpDatas from body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return the list of HttpDatas from Body part for POST getMethod
* @throws HttpPostRequestDecoder.NotEnoughDataDecoderException
* Need more chunks
*/
List<InterfaceHttpData> getBodyHttpDatas() throws HttpPostRequestDecoder.NotEnoughDataDecoderException;
/**
* This getMethod returns a List of all HttpDatas with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return All Body HttpDatas with the given name (ignore case)
* @throws HttpPostRequestDecoder.NotEnoughDataDecoderException
* need more chunks
*/
List<InterfaceHttpData> getBodyHttpDatas(String name) throws HttpPostRequestDecoder.NotEnoughDataDecoderException;
/**
* This getMethod returns the first InterfaceHttpData with the given name from
* body.<br>
*
* If chunked, all chunks must have been offered using offer() getMethod. If
* not, NotEnoughDataDecoderException will be raised.
*
* @return The first Body InterfaceHttpData with the given name (ignore
* case)
* @throws HttpPostRequestDecoder.NotEnoughDataDecoderException
* need more chunks
*/
InterfaceHttpData getBodyHttpData(String name) throws HttpPostRequestDecoder.NotEnoughDataDecoderException;
/**
* Initialized the internals from a new chunk
*
* @param content
* the new received chunk
* @throws HttpPostRequestDecoder.ErrorDataDecoderException
* if there is a problem with the charset decoding or other
* errors
*/
InterfaceHttpPostRequestDecoder offer(HttpContent content)
throws HttpPostRequestDecoder.ErrorDataDecoderException;
/**
* True if at current getStatus, there is an available decoded
* InterfaceHttpData from the Body.
*
* This getMethod works for chunked and not chunked request.
*
* @return True if at current getStatus, there is a decoded InterfaceHttpData
* @throws HttpPostRequestDecoder.EndOfDataDecoderException
* No more data will be available
*/
boolean hasNext() throws HttpPostRequestDecoder.EndOfDataDecoderException;
/**
* Returns the next available InterfaceHttpData or null if, at the time it
* is called, there is no more available InterfaceHttpData. A subsequent
* call to offer(httpChunk) could enable more data.
*
* Be sure to call {@link InterfaceHttpData#release()} after you are done
* with processing to make sure to not leak any resources
*
* @return the next available InterfaceHttpData or null if none
* @throws HttpPostRequestDecoder.EndOfDataDecoderException
* No more data will be available
*/
InterfaceHttpData next() throws HttpPostRequestDecoder.EndOfDataDecoderException;
/**
* Destroy the {@link InterfaceHttpPostRequestDecoder} and release all it resources. After this method
* was called it is not possible to operate on it anymore.
*/
void destroy();
/**
* Clean all HttpDatas (on Disk) for the current request.
*/
void cleanFiles();
/**
* Remove the given FileUpload from the list of FileUploads to clean
*/
void removeHttpDataFromClean(InterfaceHttpData data);
}

View File

@ -53,6 +53,7 @@ public class MemoryAttribute extends AbstractMemoryHttpData implements Attribute
throw new NullPointerException("value");
}
byte [] bytes = value.getBytes(charset.name());
checkSize(bytes.length);
ByteBuf buffer = wrappedBuffer(bytes);
if (definedSize > 0) {
definedSize = buffer.readableBytes();
@ -63,6 +64,7 @@ public class MemoryAttribute extends AbstractMemoryHttpData implements Attribute
@Override
public void addContent(ByteBuf buffer, boolean last) throws IOException {
int localsize = buffer.readableBytes();
checkSize(size + localsize);
if (definedSize > 0 && definedSize < size + localsize) {
definedSize = size + localsize;
}

View File

@ -29,6 +29,7 @@ public class MixedAttribute implements Attribute {
private Attribute attribute;
private final long limitSize;
protected long maxSize = DefaultHttpDataFactory.MAXSIZE;
public MixedAttribute(String name, long limitSize) {
this.limitSize = limitSize;
@ -57,12 +58,25 @@ public class MixedAttribute implements Attribute {
}
}
public void setMaxSize(long maxSize) {
this.maxSize = maxSize;
attribute.setMaxSize(maxSize);
}
public void checkSize(long newSize) throws IOException {
if (maxSize >= 0 && newSize > maxSize) {
throw new IOException("Size exceed allowed maximum capacity");
}
}
@Override
public void addContent(ByteBuf buffer, boolean last) throws IOException {
if (attribute instanceof MemoryAttribute) {
checkSize(attribute.length() + buffer.readableBytes());
if (attribute.length() + buffer.readableBytes() > limitSize) {
DiskAttribute diskAttribute = new DiskAttribute(attribute
.getName());
diskAttribute.setMaxSize(maxSize);
if (((MemoryAttribute) attribute).getByteBuf() != null) {
diskAttribute.addContent(((MemoryAttribute) attribute)
.getByteBuf(), false);
@ -130,10 +144,12 @@ public class MixedAttribute implements Attribute {
@Override
public void setContent(ByteBuf buffer) throws IOException {
checkSize(buffer.readableBytes());
if (buffer.readableBytes() > limitSize) {
if (attribute instanceof MemoryAttribute) {
// change to Disk
attribute = new DiskAttribute(attribute.getName());
attribute.setMaxSize(maxSize);
}
}
attribute.setContent(buffer);
@ -141,10 +157,12 @@ public class MixedAttribute implements Attribute {
@Override
public void setContent(File file) throws IOException {
checkSize(file.length());
if (file.length() > limitSize) {
if (attribute instanceof MemoryAttribute) {
// change to Disk
attribute = new DiskAttribute(attribute.getName());
attribute.setMaxSize(maxSize);
}
}
attribute.setContent(file);
@ -155,6 +173,7 @@ public class MixedAttribute implements Attribute {
if (attribute instanceof MemoryAttribute) {
// change to Disk even if we don't know the size
attribute = new DiskAttribute(attribute.getName());
attribute.setMaxSize(maxSize);
}
attribute.setContent(inputStream);
}
@ -186,6 +205,9 @@ public class MixedAttribute implements Attribute {
@Override
public void setValue(String value) throws IOException {
if (value != null) {
checkSize(value.getBytes().length);
}
attribute.setValue(value);
}

View File

@ -32,6 +32,7 @@ public class MixedFileUpload implements FileUpload {
private final long limitSize;
private final long definedSize;
protected long maxSize = DefaultHttpDataFactory.MAXSIZE;
public MixedFileUpload(String name, String filename, String contentType,
String contentTransferEncoding, Charset charset, long size,
@ -47,17 +48,29 @@ public class MixedFileUpload implements FileUpload {
definedSize = size;
}
public void setMaxSize(long maxSize) {
this.maxSize = maxSize;
fileUpload.setMaxSize(maxSize);
}
public void checkSize(long newSize) throws IOException {
if (maxSize >= 0 && newSize > maxSize) {
throw new IOException("Size exceed allowed maximum capacity");
}
}
@Override
public void addContent(ByteBuf buffer, boolean last)
throws IOException {
if (fileUpload instanceof MemoryFileUpload) {
checkSize(fileUpload.length() + buffer.readableBytes());
if (fileUpload.length() + buffer.readableBytes() > limitSize) {
DiskFileUpload diskFileUpload = new DiskFileUpload(fileUpload
.getName(), fileUpload.getFilename(), fileUpload
.getContentType(), fileUpload
.getContentTransferEncoding(), fileUpload.getCharset(),
definedSize);
diskFileUpload.setMaxSize(maxSize);
ByteBuf data = fileUpload.getByteBuf();
if (data != null && data.isReadable()) {
diskFileUpload.addContent(data.retain(), false);
@ -143,6 +156,7 @@ public class MixedFileUpload implements FileUpload {
@Override
public void setContent(ByteBuf buffer) throws IOException {
checkSize(buffer.readableBytes());
if (buffer.readableBytes() > limitSize) {
if (fileUpload instanceof MemoryFileUpload) {
FileUpload memoryUpload = fileUpload;
@ -152,6 +166,7 @@ public class MixedFileUpload implements FileUpload {
.getContentType(), memoryUpload
.getContentTransferEncoding(), memoryUpload.getCharset(),
definedSize);
fileUpload.setMaxSize(maxSize);
// release old upload
memoryUpload.release();
@ -162,6 +177,7 @@ public class MixedFileUpload implements FileUpload {
@Override
public void setContent(File file) throws IOException {
checkSize(file.length());
if (file.length() > limitSize) {
if (fileUpload instanceof MemoryFileUpload) {
FileUpload memoryUpload = fileUpload;
@ -172,6 +188,7 @@ public class MixedFileUpload implements FileUpload {
.getContentType(), memoryUpload
.getContentTransferEncoding(), memoryUpload.getCharset(),
definedSize);
fileUpload.setMaxSize(maxSize);
// release old upload
memoryUpload.release();
@ -191,6 +208,7 @@ public class MixedFileUpload implements FileUpload {
.getContentType(), fileUpload
.getContentTransferEncoding(), fileUpload.getCharset(),
definedSize);
fileUpload.setMaxSize(maxSize);
// release old upload
memoryUpload.release();

View File

@ -27,6 +27,7 @@ import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpObject;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpResponseStatus;
@ -43,7 +44,6 @@ import io.netty.handler.codec.http.multipart.HttpDataFactory;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.EndOfDataDecoderException;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.IncompatibleDataDecoderException;
import io.netty.handler.codec.http.multipart.InterfaceHttpData;
import io.netty.handler.codec.http.multipart.InterfaceHttpData.HttpDataType;
import io.netty.util.CharsetUtil;
@ -142,6 +142,13 @@ public class HttpUploadServerHandler extends SimpleChannelInboundHandler<HttpObj
responseContent.append("\r\n\r\n");
// if GET Method: should not try to create a HttpPostRequestDecoder
if (request.getMethod().equals(HttpMethod.GET)) {
// GET Method: should not try to create a HttpPostRequestDecoder
// So stop here
responseContent.append("\r\n\r\nEND OF GET CONTENT\r\n");
writeResponse(ctx.channel());
return;
}
try {
decoder = new HttpPostRequestDecoder(factory, request);
} catch (ErrorDataDecoderException e1) {
@ -150,13 +157,6 @@ public class HttpUploadServerHandler extends SimpleChannelInboundHandler<HttpObj
writeResponse(ctx.channel());
ctx.channel().close();
return;
} catch (IncompatibleDataDecoderException e1) {
// GET Method: should not try to create a HttpPostRequestDecoder
// So OK but stop here
responseContent.append(e1.getMessage());
responseContent.append("\r\n\r\nEND OF GET CONTENT\r\n");
writeResponse(ctx.channel());
return;
}
readingChunks = HttpHeaders.isTransferEncodingChunked(request);