* all limit parameters are mandatory to create HTTP decoders now for better security

* chunks are not merged anymore for better security
* modified HTTP client example to handle HTTP chunks
This commit is contained in:
Trustin Lee 2009-03-10 07:53:15 +00:00
parent 4a72aafd56
commit a5ebbfb111
6 changed files with 59 additions and 56 deletions

View File

@ -43,7 +43,7 @@ public class HttpClientPipelineFactory implements ChannelPipelineFactory {
public ChannelPipeline getPipeline() throws Exception {
// Create a default pipeline implementation.
ChannelPipeline pipeline = pipeline();
pipeline.addLast("decoder", new HttpResponseDecoder());
pipeline.addLast("decoder", new HttpResponseDecoder(8192, 8192, 8192));
pipeline.addLast("encoder", new HttpRequestEncoder());
pipeline.addLast("handler", handler);
return pipeline;

View File

@ -93,7 +93,6 @@ public class HttpRequestHandler extends SimpleChannelHandler {
if (request.isChunked()) {
readingChunks = true;
return;
} else {
ChannelBuffer content = request.getContent();
if (content.readable()) {
@ -107,7 +106,6 @@ public class HttpRequestHandler extends SimpleChannelHandler {
readingChunks = false;
responseContent.append("END OF CONTENT\r\n");
writeResponse(e);
return;
} else {
responseContent.append("CHUNK: " + chunk.getContent().toString("UTF-8") + "\r\n");
}

View File

@ -21,23 +21,59 @@
*/
package org.jboss.netty.example.http;
import java.nio.charset.Charset;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelPipelineCoverage;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
import org.jboss.netty.handler.codec.http.HttpChunk;
import org.jboss.netty.handler.codec.http.HttpResponse;
/**
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Andy Taylor (andy.taylor@jboss.org)
* @author Trustin Lee (tlee@redhat.com)
*/
@ChannelPipelineCoverage("all")
@ChannelPipelineCoverage("one")
public class HttpResponseHandler extends SimpleChannelHandler {
private volatile boolean readingChunks;
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
HttpResponse response = (HttpResponse) e.getMessage();
System.out.println(response.getContent().toString(Charset.defaultCharset().name()));
if (!readingChunks) {
HttpResponse response = (HttpResponse) e.getMessage();
System.out.println("STATUS: " + response.getStatus());
System.out.println("VERSION: " + response.getProtocolVersion());
System.out.println();
if (!response.getHeaderNames().isEmpty()) {
for (String name: response.getHeaderNames()) {
for (String value: response.getHeaders(name)) {
System.out.println("HEADER: " + name + " = " + value);
}
}
System.out.println();
}
if (response.getStatus().getCode() == 200 && response.isChunked()) {
readingChunks = true;
System.out.println("CHUNKED CONTENT:");
} else {
ChannelBuffer content = response.getContent();
if (content.readable()) {
System.out.println("CONTENT:");
System.out.println(content.toString("UTF-8"));
}
}
} else {
HttpChunk chunk = (HttpChunk) e.getMessage();
if (chunk.isLast()) {
readingChunks = false;
} else {
System.out.println(chunk.getContent().toString("UTF-8"));
}
}
}
}

View File

@ -77,10 +77,6 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<HttpMessageDec
READ_CHUNK_FOOTER;
}
protected HttpMessageDecoder() {
this(8192, 8192, 0);
}
protected HttpMessageDecoder(int maxInitialLineLength, int maxHeaderSize, int maxChunkSize) {
super(State.SKIP_CONTROL_CHARS, true);
if (maxInitialLineLength <= 0) {
@ -95,7 +91,7 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<HttpMessageDec
}
if (maxChunkSize < 0) {
throw new IllegalArgumentException(
"maxChunkSize must not be a negative integer: " +
"maxChunkSize must be a positive integer: " +
maxChunkSize);
}
this.maxInitialLineLength = maxInitialLineLength;
@ -103,10 +99,6 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<HttpMessageDec
this.maxChunkSize = maxChunkSize;
}
private boolean canGenerateChunks() {
return maxChunkSize > 0;
}
@Override
protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buffer, State state) throws Exception {
switch (state) {
@ -135,12 +127,8 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<HttpMessageDec
checkpoint(nextState);
if (nextState == State.READ_CHUNK_SIZE) {
// Chunked encoding
if (canGenerateChunks()) {
// Generate HttpMessage first. HttpChunks will follow.
return message;
} else {
// Merge all chunks.
}
// Generate HttpMessage first. HttpChunks will follow.
return message;
} else {
int contentLength = message.getContentLength(-1);
if (contentLength == 0 || contentLength == -1 && isDecodingRequest()) {
@ -148,10 +136,9 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<HttpMessageDec
return reset();
}
if (canGenerateChunks()) {
// Emulate chunked encoding if the content is too large or
// the content length is indefinite.
if (contentLength > maxChunkSize && nextState == State.READ_FIXED_LENGTH_CONTENT) {
switch (nextState) {
case READ_FIXED_LENGTH_CONTENT:
if (contentLength > maxChunkSize) {
// Generate HttpMessage first. HttpChunks will follow.
checkpoint(State.READ_FIXED_LENGTH_CONTENT_AS_CHUNKS);
message.addHeader(HttpHeaders.Names.TRANSFER_ENCODING, HttpHeaders.Values.CHUNKED);
@ -159,12 +146,16 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<HttpMessageDec
// state reads data chunk by chunk.
chunkSize = message.getContentLength(-1);
return message;
} else if (nextState == State.READ_VARIABLE_LENGTH_CONTENT) {
}
break;
case READ_VARIABLE_LENGTH_CONTENT:
if (buffer.readableBytes() > maxChunkSize) {
// Generate HttpMessage first. HttpChunks will follow.
checkpoint(State.READ_VARIABLE_LENGTH_CONTENT_AS_CHUNKS);
message.addHeader(HttpHeaders.Names.TRANSFER_ENCODING, HttpHeaders.Values.CHUNKED);
return message;
}
break;
}
}
// We return null here, this forces decode to be called again where we will decode the content
@ -231,31 +222,17 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<HttpMessageDec
if (chunkSize == 0) {
checkpoint(State.READ_CHUNK_FOOTER);
return null;
} else if (canGenerateChunks()) {
if (chunkSize <= maxChunkSize) {
checkpoint(State.READ_CHUNKED_CONTENT);
} else {
// A chunk is too large. Split them into multiple chunks again.
checkpoint(State.READ_CHUNKED_CONTENT_AS_CHUNKS);
}
} else if (chunkSize > maxChunkSize) {
// A chunk is too large. Split them into multiple chunks again.
checkpoint(State.READ_CHUNKED_CONTENT_AS_CHUNKS);
} else {
checkpoint(State.READ_CHUNKED_CONTENT);
}
}
case READ_CHUNKED_CONTENT: {
if (canGenerateChunks()) {
HttpChunk chunk = new DefaultHttpChunk(buffer.readBytes(chunkSize));
checkpoint(State.READ_CHUNK_DELIMITER);
return chunk;
} else {
if (content == null) {
content = ChannelBuffers.dynamicBuffer(
chunkSize, channel.getConfig().getBufferFactory());
}
content.writeBytes(buffer, chunkSize);
checkpoint(State.READ_CHUNK_DELIMITER);
return null;
}
HttpChunk chunk = new DefaultHttpChunk(buffer.readBytes(chunkSize));
checkpoint(State.READ_CHUNK_DELIMITER);
return chunk;
}
case READ_CHUNKED_CONTENT_AS_CHUNKS: {
int chunkSize = this.chunkSize;

View File

@ -32,10 +32,6 @@ package org.jboss.netty.handler.codec.http;
*/
public class HttpRequestDecoder extends HttpMessageDecoder {
public HttpRequestDecoder() {
super();
}
public HttpRequestDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize) {
super(maxInitialLineLength, maxHeaderSize, maxChunkSize);

View File

@ -32,10 +32,6 @@ package org.jboss.netty.handler.codec.http;
*/
public class HttpResponseDecoder extends HttpMessageDecoder {
public HttpResponseDecoder() {
super();
}
public HttpResponseDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize) {
super(maxInitialLineLength, maxHeaderSize, maxChunkSize);