netty5/codec-http/src/main/java/io/netty/handler/codec/http/HttpChunkAggregator.java
Trustin Lee 8663716d38 Issue #60: Make the project multi-module
Split the project into the following modules:
* common
* buffer
* codec
* codec-http
* transport
* transport-*
* handler
* example
* testsuite (integration tests that involve 2+ modules)
* all (does nothing yet, but will make it generate netty.jar)

This commit also fixes the compilation errors with transport-sctp on
non-Linux systems.  It will at least compile without complaints.
2011-12-28 19:44:04 +09:00

161 lines
6.5 KiB
Java

/*
* Copyright 2011 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
import static io.netty.channel.Channels.*;
import static io.netty.handler.codec.http.HttpHeaders.*;
import java.util.List;
import java.util.Map.Entry;
import io.netty.buffer.ChannelBuffer;
import io.netty.buffer.ChannelBuffers;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.Channels;
import io.netty.channel.MessageEvent;
import io.netty.channel.SimpleChannelUpstreamHandler;
import io.netty.handler.codec.frame.TooLongFrameException;
import io.netty.util.CharsetUtil;
/**
* A {@link ChannelHandler} that aggregates an {@link HttpMessage}
* and its following {@link HttpChunk}s into a single {@link HttpMessage} with
* no following {@link HttpChunk}s. It is useful when you don't want to take
* care of HTTP messages whose transfer encoding is 'chunked'. Insert this
* handler after {@link HttpMessageDecoder} in the {@link ChannelPipeline}:
* <pre>
* {@link ChannelPipeline} p = ...;
* ...
* p.addLast("decoder", new {@link HttpRequestDecoder}());
* p.addLast("aggregator", <b>new {@link HttpChunkAggregator}(1048576)</b>);
* ...
* p.addLast("encoder", new {@link HttpResponseEncoder}());
* p.addLast("handler", new HttpRequestHandler());
* </pre>
* @apiviz.landmark
* @apiviz.has io.netty.handler.codec.http.HttpChunk oneway - - filters out
*/
public class HttpChunkAggregator extends SimpleChannelUpstreamHandler {
private static final ChannelBuffer CONTINUE = ChannelBuffers.copiedBuffer(
"HTTP/1.1 100 Continue\r\n\r\n", CharsetUtil.US_ASCII);
private final int maxContentLength;
private HttpMessage currentMessage;
/**
* Creates a new instance.
*
* @param maxContentLength
* the maximum length of the aggregated content.
* If the length of the aggregated content exceeds this value,
* a {@link TooLongFrameException} will be raised.
*/
public HttpChunkAggregator(int maxContentLength) {
if (maxContentLength <= 0) {
throw new IllegalArgumentException(
"maxContentLength must be a positive integer: " +
maxContentLength);
}
this.maxContentLength = maxContentLength;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
Object msg = e.getMessage();
HttpMessage currentMessage = this.currentMessage;
if (msg instanceof HttpMessage) {
HttpMessage m = (HttpMessage) msg;
// Handle the 'Expect: 100-continue' header if necessary.
// TODO: Respond with 413 Request Entity Too Large
// and discard the traffic or close the connection.
// No need to notify the upstream handlers - just log.
// If decoding a response, just throw an exception.
if (is100ContinueExpected(m)) {
write(ctx, succeededFuture(ctx.getChannel()), CONTINUE.duplicate());
}
if (m.isChunked()) {
// A chunked message - remove 'Transfer-Encoding' header,
// initialize the cumulative buffer, and wait for incoming chunks.
List<String> encodings = m.getHeaders(HttpHeaders.Names.TRANSFER_ENCODING);
encodings.remove(HttpHeaders.Values.CHUNKED);
if (encodings.isEmpty()) {
m.removeHeader(HttpHeaders.Names.TRANSFER_ENCODING);
}
m.setChunked(false);
m.setContent(ChannelBuffers.dynamicBuffer(e.getChannel().getConfig().getBufferFactory()));
this.currentMessage = m;
} else {
// Not a chunked message - pass through.
this.currentMessage = null;
ctx.sendUpstream(e);
}
} else if (msg instanceof HttpChunk) {
// Sanity check
if (currentMessage == null) {
throw new IllegalStateException(
"received " + HttpChunk.class.getSimpleName() +
" without " + HttpMessage.class.getSimpleName());
}
// Merge the received chunk into the content of the current message.
HttpChunk chunk = (HttpChunk) msg;
ChannelBuffer content = currentMessage.getContent();
if (content.readableBytes() > maxContentLength - chunk.getContent().readableBytes()) {
// TODO: Respond with 413 Request Entity Too Large
// and discard the traffic or close the connection.
// No need to notify the upstream handlers - just log.
// If decoding a response, just throw an exception.
throw new TooLongFrameException(
"HTTP content length exceeded " + maxContentLength +
" bytes.");
}
content.writeBytes(chunk.getContent());
if (chunk.isLast()) {
this.currentMessage = null;
// Merge trailing headers into the message.
if (chunk instanceof HttpChunkTrailer) {
HttpChunkTrailer trailer = (HttpChunkTrailer) chunk;
for (Entry<String, String> header: trailer.getHeaders()) {
currentMessage.setHeader(header.getKey(), header.getValue());
}
}
// Set the 'Content-Length' header.
currentMessage.setHeader(
HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(content.readableBytes()));
// All done - generate the event.
Channels.fireMessageReceived(ctx, currentMessage, e.getRemoteAddress());
}
} else {
// Neither HttpMessage or HttpChunk
ctx.sendUpstream(e);
}
}
}