2008-11-19 08:22:15 +01:00
|
|
|
/*
|
2012-06-04 22:31:44 +02:00
|
|
|
* Copyright 2012 The Netty Project
|
2009-06-19 19:48:17 +02:00
|
|
|
*
|
2011-12-09 06:18:34 +01:00
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
2008-11-19 08:22:15 +01:00
|
|
|
*
|
2012-06-04 22:31:44 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2008-11-19 08:22:15 +01:00
|
|
|
*
|
2009-08-28 09:15:49 +02:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
2011-12-09 06:18:34 +01:00
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
2009-08-28 09:15:49 +02:00
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2008-11-19 08:22:15 +01:00
|
|
|
*/
|
2011-12-09 04:38:59 +01:00
|
|
|
package io.netty.handler.codec.http;
|
2008-11-19 08:22:15 +01:00
|
|
|
|
2012-06-10 04:08:43 +02:00
|
|
|
import io.netty.buffer.ByteBuf;
|
2012-06-11 10:02:00 +02:00
|
|
|
import io.netty.buffer.Unpooled;
|
2012-06-07 07:52:33 +02:00
|
|
|
import io.netty.channel.ChannelHandlerContext;
|
2011-12-09 04:38:59 +01:00
|
|
|
import io.netty.channel.ChannelPipeline;
|
2012-09-28 08:19:08 +02:00
|
|
|
import io.netty.handler.codec.DecoderResult;
|
2012-05-17 05:37:37 +02:00
|
|
|
import io.netty.handler.codec.ReplayingDecoder;
|
2012-05-16 16:02:06 +02:00
|
|
|
import io.netty.handler.codec.TooLongFrameException;
|
2008-11-19 08:22:15 +01:00
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
import java.util.List;
|
|
|
|
|
2008-11-19 08:22:15 +01:00
|
|
|
/**
|
2013-01-16 05:22:50 +01:00
|
|
|
* Decodes {@link ByteBuf}s into {@link HttpMessage}s and
|
2013-01-14 16:52:30 +01:00
|
|
|
* {@link HttpContent}s.
|
2009-06-19 17:35:19 +02:00
|
|
|
*
|
|
|
|
* <h3>Parameters that prevents excessive memory consumption</h3>
|
2009-06-19 18:45:30 +02:00
|
|
|
* <table border="1">
|
2009-06-19 17:35:19 +02:00
|
|
|
* <tr>
|
|
|
|
* <th>Name</th><th>Meaning</th>
|
|
|
|
* </tr>
|
|
|
|
* <tr>
|
|
|
|
* <td>{@code maxInitialLineLength}</td>
|
|
|
|
* <td>The maximum length of the initial line
|
|
|
|
* (e.g. {@code "GET / HTTP/1.0"} or {@code "HTTP/1.0 200 OK"})
|
|
|
|
* If the length of the initial line exceeds this value, a
|
|
|
|
* {@link TooLongFrameException} will be raised.</td>
|
|
|
|
* </tr>
|
|
|
|
* <tr>
|
|
|
|
* <td>{@code maxHeaderSize}</td>
|
|
|
|
* <td>The maximum length of all headers. If the sum of the length of each
|
|
|
|
* header exceeds this value, a {@link TooLongFrameException} will be raised.</td>
|
|
|
|
* </tr>
|
|
|
|
* <tr>
|
|
|
|
* <td>{@code maxChunkSize}</td>
|
|
|
|
* <td>The maximum length of the content or each chunk. If the content length
|
2009-11-17 07:02:42 +01:00
|
|
|
* (or the length of each chunk) exceeds this value, the content or chunk
|
2013-01-14 16:52:30 +01:00
|
|
|
* will be split into multiple {@link HttpContent}s whose length is
|
2009-11-17 07:02:42 +01:00
|
|
|
* {@code maxChunkSize} at maximum.</td>
|
2009-06-19 17:35:19 +02:00
|
|
|
* </tr>
|
|
|
|
* </table>
|
|
|
|
*
|
2009-11-17 07:02:42 +01:00
|
|
|
* <h3>Chunked Content</h3>
|
|
|
|
*
|
|
|
|
* If the content of an HTTP message is greater than {@code maxChunkSize} or
|
|
|
|
* the transfer encoding of the HTTP message is 'chunked', this decoder
|
2013-01-16 05:22:50 +01:00
|
|
|
* generates one {@link HttpMessage} instance and its following
|
2013-01-14 16:52:30 +01:00
|
|
|
* {@link HttpContent}s per single HTTP message to avoid excessive memory
|
2009-11-17 07:02:42 +01:00
|
|
|
* consumption. For example, the following HTTP message:
|
|
|
|
* <pre>
|
|
|
|
* GET / HTTP/1.1
|
|
|
|
* Transfer-Encoding: chunked
|
|
|
|
*
|
|
|
|
* 1a
|
|
|
|
* abcdefghijklmnopqrstuvwxyz
|
|
|
|
* 10
|
|
|
|
* 1234567890abcdef
|
|
|
|
* 0
|
2009-11-17 08:19:28 +01:00
|
|
|
* Content-MD5: ...
|
|
|
|
* <i>[blank line]</i>
|
2010-01-26 10:43:20 +01:00
|
|
|
* </pre>
|
2013-01-14 16:52:30 +01:00
|
|
|
* triggers {@link HttpRequestDecoder} to generate 3 objects:
|
2009-11-17 07:02:42 +01:00
|
|
|
* <ol>
|
2013-01-16 05:22:50 +01:00
|
|
|
* <li>An {@link HttpRequest},</li>
|
2013-01-14 16:52:30 +01:00
|
|
|
* <li>The first {@link HttpContent} whose content is {@code 'abcdefghijklmnopqrstuvwxyz'},</li>
|
|
|
|
* <li>The second {@link LastHttpContent} whose content is {@code '1234567890abcdef'}, which marks
|
|
|
|
* the end of the content.</li>
|
2009-11-17 07:02:42 +01:00
|
|
|
* </ol>
|
|
|
|
*
|
2013-01-14 16:52:30 +01:00
|
|
|
* If you prefer not to handle {@link HttpContent}s by yourself for your
|
|
|
|
* convenience, insert {@link HttpObjectAggregator} after this decoder in the
|
2009-11-17 07:02:42 +01:00
|
|
|
* {@link ChannelPipeline}. However, please note that your server might not
|
|
|
|
* be as memory efficient as without the aggregator.
|
|
|
|
*
|
2009-06-19 17:35:19 +02:00
|
|
|
* <h3>Extensibility</h3>
|
|
|
|
*
|
|
|
|
* Please note that this decoder is designed to be extended to implement
|
|
|
|
* a protocol derived from HTTP, such as
|
|
|
|
* <a href="http://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol">RTSP</a> and
|
|
|
|
* <a href="http://en.wikipedia.org/wiki/Internet_Content_Adaptation_Protocol">ICAP</a>.
|
|
|
|
* To implement the decoder of such a derived protocol, extend this class and
|
|
|
|
* implement all abstract methods properly.
|
2008-11-19 08:22:15 +01:00
|
|
|
*/
|
2013-01-14 16:52:30 +01:00
|
|
|
public abstract class HttpObjectDecoder extends ReplayingDecoder<HttpObjectDecoder.State> {
|
2008-11-19 08:22:15 +01:00
|
|
|
|
2009-03-10 08:20:27 +01:00
|
|
|
private final int maxInitialLineLength;
|
|
|
|
private final int maxHeaderSize;
|
2009-03-09 09:50:24 +01:00
|
|
|
private final int maxChunkSize;
|
2013-01-20 10:40:54 +01:00
|
|
|
private final boolean chunkedSupported;
|
2012-06-10 04:08:43 +02:00
|
|
|
private ByteBuf content;
|
2013-01-16 05:22:50 +01:00
|
|
|
private HttpMessage message;
|
2010-02-19 10:00:00 +01:00
|
|
|
private long chunkSize;
|
2009-06-15 10:45:39 +02:00
|
|
|
private int headerSize;
|
2012-06-05 18:56:01 +02:00
|
|
|
private int contentRead;
|
2009-02-12 07:09:29 +01:00
|
|
|
|
2008-12-03 10:00:29 +01:00
|
|
|
/**
|
2013-01-14 16:52:30 +01:00
|
|
|
* The internal state of {@link HttpObjectDecoder}.
|
2009-06-19 17:35:19 +02:00
|
|
|
* <em>Internal use only</em>.
|
2008-12-03 10:00:29 +01:00
|
|
|
*/
|
2013-02-14 21:09:16 +01:00
|
|
|
enum State {
|
2009-02-12 06:02:22 +01:00
|
|
|
SKIP_CONTROL_CHARS,
|
2008-11-19 08:22:15 +01:00
|
|
|
READ_INITIAL,
|
|
|
|
READ_HEADER,
|
2009-02-12 08:17:29 +01:00
|
|
|
READ_VARIABLE_LENGTH_CONTENT,
|
2009-03-09 09:50:24 +01:00
|
|
|
READ_VARIABLE_LENGTH_CONTENT_AS_CHUNKS,
|
2008-11-19 08:22:15 +01:00
|
|
|
READ_FIXED_LENGTH_CONTENT,
|
2009-03-09 09:50:24 +01:00
|
|
|
READ_FIXED_LENGTH_CONTENT_AS_CHUNKS,
|
2008-11-19 08:22:15 +01:00
|
|
|
READ_CHUNK_SIZE,
|
|
|
|
READ_CHUNKED_CONTENT,
|
2009-03-09 09:50:24 +01:00
|
|
|
READ_CHUNKED_CONTENT_AS_CHUNKS,
|
2009-02-12 06:41:22 +01:00
|
|
|
READ_CHUNK_DELIMITER,
|
2012-09-28 08:16:29 +02:00
|
|
|
READ_CHUNK_FOOTER,
|
|
|
|
BAD_MESSAGE
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
|
2009-06-19 17:35:19 +02:00
|
|
|
/**
|
|
|
|
* Creates a new instance with the default
|
2009-09-07 05:27:26 +02:00
|
|
|
* {@code maxInitialLineLength (4096}}, {@code maxHeaderSize (8192)}, and
|
|
|
|
* {@code maxChunkSize (8192)}.
|
2009-06-19 17:35:19 +02:00
|
|
|
*/
|
2013-01-14 16:52:30 +01:00
|
|
|
protected HttpObjectDecoder() {
|
2013-01-20 10:40:54 +01:00
|
|
|
this(4096, 8192, 8192, true);
|
2009-03-10 09:57:02 +01:00
|
|
|
}
|
|
|
|
|
2009-06-19 17:35:19 +02:00
|
|
|
/**
|
|
|
|
* Creates a new instance with the specified parameters.
|
|
|
|
*/
|
2013-01-14 16:52:30 +01:00
|
|
|
protected HttpObjectDecoder(
|
2013-01-20 10:40:54 +01:00
|
|
|
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean chunkedSupported) {
|
2009-03-10 09:57:02 +01:00
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
super(State.SKIP_CONTROL_CHARS);
|
2009-03-10 09:57:02 +01:00
|
|
|
|
2009-03-10 08:20:27 +01:00
|
|
|
if (maxInitialLineLength <= 0) {
|
|
|
|
throw new IllegalArgumentException(
|
|
|
|
"maxInitialLineLength must be a positive integer: " +
|
|
|
|
maxInitialLineLength);
|
|
|
|
}
|
|
|
|
if (maxHeaderSize <= 0) {
|
|
|
|
throw new IllegalArgumentException(
|
|
|
|
"maxHeaderSize must be a positive integer: " +
|
2010-08-26 06:18:53 +02:00
|
|
|
maxHeaderSize);
|
2009-03-10 08:20:27 +01:00
|
|
|
}
|
2009-03-09 09:50:24 +01:00
|
|
|
if (maxChunkSize < 0) {
|
|
|
|
throw new IllegalArgumentException(
|
2009-03-10 08:53:15 +01:00
|
|
|
"maxChunkSize must be a positive integer: " +
|
2009-03-09 09:50:24 +01:00
|
|
|
maxChunkSize);
|
|
|
|
}
|
2009-03-10 08:20:27 +01:00
|
|
|
this.maxInitialLineLength = maxInitialLineLength;
|
|
|
|
this.maxHeaderSize = maxHeaderSize;
|
2009-03-09 09:50:24 +01:00
|
|
|
this.maxChunkSize = maxChunkSize;
|
2013-01-20 10:40:54 +01:00
|
|
|
this.chunkedSupported = chunkedSupported;
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
|
|
|
|
2008-11-19 08:22:15 +01:00
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception {
|
2012-05-20 07:19:11 +02:00
|
|
|
switch (state()) {
|
2009-02-12 06:02:22 +01:00
|
|
|
case SKIP_CONTROL_CHARS: {
|
|
|
|
try {
|
|
|
|
skipControlCharacters(buffer);
|
|
|
|
checkpoint(State.READ_INITIAL);
|
|
|
|
} finally {
|
|
|
|
checkpoint();
|
|
|
|
}
|
|
|
|
}
|
2012-09-28 08:16:29 +02:00
|
|
|
case READ_INITIAL: try {
|
2009-03-10 08:20:27 +01:00
|
|
|
String[] initialLine = splitInitialLine(readLine(buffer, maxInitialLineLength));
|
|
|
|
if (initialLine.length < 3) {
|
|
|
|
// Invalid initial line - ignore.
|
|
|
|
checkpoint(State.SKIP_CONTROL_CHARS);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2009-03-10 08:20:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
message = createMessage(initialLine);
|
|
|
|
checkpoint(State.READ_HEADER);
|
2013-01-14 16:52:30 +01:00
|
|
|
|
2012-09-28 08:16:29 +02:00
|
|
|
} catch (Exception e) {
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(invalidMessage(e));
|
|
|
|
return;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2012-09-28 08:16:29 +02:00
|
|
|
case READ_HEADER: try {
|
2009-03-04 14:50:19 +01:00
|
|
|
State nextState = readHeaders(buffer);
|
|
|
|
checkpoint(nextState);
|
|
|
|
if (nextState == State.READ_CHUNK_SIZE) {
|
2013-01-20 10:40:54 +01:00
|
|
|
if (!chunkedSupported) {
|
|
|
|
throw new IllegalArgumentException("Chunked messages not supported");
|
|
|
|
}
|
2012-08-19 12:06:47 +02:00
|
|
|
// Chunked encoding - generate HttpMessage first. HttpChunks will follow.
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(message);
|
|
|
|
return;
|
2012-11-12 01:31:40 +01:00
|
|
|
}
|
|
|
|
if (nextState == State.SKIP_CONTROL_CHARS) {
|
2009-03-30 04:14:11 +02:00
|
|
|
// No content is expected.
|
2013-05-24 19:58:55 +02:00
|
|
|
reset(out);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2012-11-12 01:31:40 +01:00
|
|
|
}
|
|
|
|
long contentLength = HttpHeaders.getContentLength(message, -1);
|
|
|
|
if (contentLength == 0 || contentLength == -1 && isDecodingRequest()) {
|
|
|
|
content = Unpooled.EMPTY_BUFFER;
|
2013-05-24 19:58:55 +02:00
|
|
|
reset(out);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2012-11-12 01:31:40 +01:00
|
|
|
}
|
2009-03-09 09:50:24 +01:00
|
|
|
|
2012-11-12 01:31:40 +01:00
|
|
|
switch (nextState) {
|
|
|
|
case READ_FIXED_LENGTH_CONTENT:
|
|
|
|
if (contentLength > maxChunkSize || HttpHeaders.is100ContinueExpected(message)) {
|
2013-01-16 05:22:50 +01:00
|
|
|
// Generate FullHttpMessage first. HttpChunks will follow.
|
2012-11-12 01:31:40 +01:00
|
|
|
checkpoint(State.READ_FIXED_LENGTH_CONTENT_AS_CHUNKS);
|
|
|
|
// chunkSize will be decreased as the READ_FIXED_LENGTH_CONTENT_AS_CHUNKS
|
|
|
|
// state reads data chunk by chunk.
|
|
|
|
chunkSize = HttpHeaders.getContentLength(message, -1);
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(message);
|
|
|
|
return;
|
2012-11-12 01:31:40 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case READ_VARIABLE_LENGTH_CONTENT:
|
|
|
|
if (buffer.readableBytes() > maxChunkSize || HttpHeaders.is100ContinueExpected(message)) {
|
2013-01-16 05:22:50 +01:00
|
|
|
// Generate FullHttpMessage first. HttpChunks will follow.
|
2012-11-12 01:31:40 +01:00
|
|
|
checkpoint(State.READ_VARIABLE_LENGTH_CONTENT_AS_CHUNKS);
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(message);
|
|
|
|
return;
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
2012-11-12 01:31:40 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
throw new IllegalStateException("Unexpected state: " + nextState);
|
2008-11-30 17:29:35 +01:00
|
|
|
}
|
2013-04-03 11:32:33 +02:00
|
|
|
// We return here, this forces decode to be called again where we will decode the content
|
|
|
|
return;
|
2012-09-28 08:16:29 +02:00
|
|
|
} catch (Exception e) {
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(invalidMessage(e));
|
|
|
|
return;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2009-02-12 08:17:29 +01:00
|
|
|
case READ_VARIABLE_LENGTH_CONTENT: {
|
2012-06-05 18:56:01 +02:00
|
|
|
int toRead = actualReadableBytes();
|
|
|
|
if (toRead > maxChunkSize) {
|
|
|
|
toRead = maxChunkSize;
|
|
|
|
}
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(message);
|
|
|
|
out.add(new DefaultHttpContent(buffer.readBytes(toRead)));
|
|
|
|
return;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2009-03-09 09:50:24 +01:00
|
|
|
case READ_VARIABLE_LENGTH_CONTENT_AS_CHUNKS: {
|
|
|
|
// Keep reading data as a chunk until the end of connection is reached.
|
2012-06-05 18:56:01 +02:00
|
|
|
int toRead = actualReadableBytes();
|
|
|
|
if (toRead > maxChunkSize) {
|
|
|
|
toRead = maxChunkSize;
|
|
|
|
}
|
2013-01-14 16:52:30 +01:00
|
|
|
ByteBuf content = buffer.readBytes(toRead);
|
2013-01-31 15:39:57 +01:00
|
|
|
if (!buffer.isReadable()) {
|
2009-03-09 09:50:24 +01:00
|
|
|
reset();
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(new DefaultLastHttpContent(content));
|
|
|
|
return;
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(new DefaultHttpContent(content));
|
|
|
|
return;
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
2008-11-19 08:22:15 +01:00
|
|
|
case READ_FIXED_LENGTH_CONTENT: {
|
2013-05-24 19:58:55 +02:00
|
|
|
readFixedLengthContent(buffer, out);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2009-03-09 09:50:24 +01:00
|
|
|
case READ_FIXED_LENGTH_CONTENT_AS_CHUNKS: {
|
2012-12-24 21:52:04 +01:00
|
|
|
long chunkSize = this.chunkSize;
|
2012-06-05 18:56:01 +02:00
|
|
|
int readLimit = actualReadableBytes();
|
2012-07-04 15:27:10 +02:00
|
|
|
|
2012-07-04 15:14:05 +02:00
|
|
|
// Check if the buffer is readable first as we use the readable byte count
|
|
|
|
// to create the HttpChunk. This is needed as otherwise we may end up with
|
|
|
|
// create a HttpChunk instance that contains an empty buffer and so is
|
|
|
|
// handled like it is the last HttpChunk.
|
|
|
|
//
|
|
|
|
// See https://github.com/netty/netty/issues/433
|
|
|
|
if (readLimit == 0) {
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2012-07-04 15:14:05 +02:00
|
|
|
}
|
|
|
|
|
2012-12-24 21:52:04 +01:00
|
|
|
int toRead = readLimit;
|
2012-06-05 18:56:01 +02:00
|
|
|
if (toRead > maxChunkSize) {
|
|
|
|
toRead = maxChunkSize;
|
|
|
|
}
|
2012-12-24 21:52:04 +01:00
|
|
|
if (toRead > chunkSize) {
|
|
|
|
toRead = (int) chunkSize;
|
2012-06-05 18:56:01 +02:00
|
|
|
}
|
2013-01-14 16:52:30 +01:00
|
|
|
ByteBuf content = buffer.readBytes(toRead);
|
2012-06-05 18:56:01 +02:00
|
|
|
if (chunkSize > toRead) {
|
|
|
|
chunkSize -= toRead;
|
2009-03-09 09:50:24 +01:00
|
|
|
} else {
|
|
|
|
chunkSize = 0;
|
|
|
|
}
|
|
|
|
this.chunkSize = chunkSize;
|
|
|
|
|
|
|
|
if (chunkSize == 0) {
|
|
|
|
// Read all content.
|
|
|
|
reset();
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(new DefaultLastHttpContent(content));
|
|
|
|
return;
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(new DefaultHttpContent(content));
|
|
|
|
return;
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
2008-11-19 08:22:15 +01:00
|
|
|
/**
|
|
|
|
* everything else after this point takes care of reading chunked content. basically, read chunk size,
|
|
|
|
* read chunk, read and ignore the CRLF and repeat until 0
|
|
|
|
*/
|
2012-09-28 08:16:29 +02:00
|
|
|
case READ_CHUNK_SIZE: try {
|
2009-03-10 08:20:27 +01:00
|
|
|
String line = readLine(buffer, maxInitialLineLength);
|
2009-03-09 09:50:24 +01:00
|
|
|
int chunkSize = getChunkSize(line);
|
|
|
|
this.chunkSize = chunkSize;
|
2008-11-19 08:22:15 +01:00
|
|
|
if (chunkSize == 0) {
|
2009-02-12 06:41:22 +01:00
|
|
|
checkpoint(State.READ_CHUNK_FOOTER);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2009-03-10 08:53:15 +01:00
|
|
|
} else if (chunkSize > maxChunkSize) {
|
|
|
|
// A chunk is too large. Split them into multiple chunks again.
|
|
|
|
checkpoint(State.READ_CHUNKED_CONTENT_AS_CHUNKS);
|
2009-02-12 06:41:22 +01:00
|
|
|
} else {
|
2008-12-03 10:00:29 +01:00
|
|
|
checkpoint(State.READ_CHUNKED_CONTENT);
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2012-09-28 08:16:29 +02:00
|
|
|
} catch (Exception e) {
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(invalidChunk(e));
|
|
|
|
return;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
case READ_CHUNKED_CONTENT: {
|
2009-05-08 14:45:10 +02:00
|
|
|
assert chunkSize <= Integer.MAX_VALUE;
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpContent chunk = new DefaultHttpContent(buffer.readBytes((int) chunkSize));
|
2009-03-10 08:53:15 +01:00
|
|
|
checkpoint(State.READ_CHUNK_DELIMITER);
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(chunk);
|
|
|
|
return;
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
|
|
|
case READ_CHUNKED_CONTENT_AS_CHUNKS: {
|
2012-06-11 15:54:28 +02:00
|
|
|
assert chunkSize <= Integer.MAX_VALUE;
|
2012-06-05 18:56:01 +02:00
|
|
|
int chunkSize = (int) this.chunkSize;
|
|
|
|
int readLimit = actualReadableBytes();
|
2012-07-04 15:14:05 +02:00
|
|
|
|
|
|
|
// Check if the buffer is readable first as we use the readable byte count
|
|
|
|
// to create the HttpChunk. This is needed as otherwise we may end up with
|
|
|
|
// create a HttpChunk instance that contains an empty buffer and so is
|
|
|
|
// handled like it is the last HttpChunk.
|
|
|
|
//
|
|
|
|
// See https://github.com/netty/netty/issues/433
|
|
|
|
if (readLimit == 0) {
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2012-07-04 15:14:05 +02:00
|
|
|
}
|
|
|
|
|
2012-06-05 18:56:01 +02:00
|
|
|
int toRead = chunkSize;
|
|
|
|
if (toRead > maxChunkSize) {
|
|
|
|
toRead = maxChunkSize;
|
|
|
|
}
|
|
|
|
if (toRead > readLimit) {
|
|
|
|
toRead = readLimit;
|
|
|
|
}
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpContent chunk = new DefaultHttpContent(buffer.readBytes(toRead));
|
2012-06-05 18:56:01 +02:00
|
|
|
if (chunkSize > toRead) {
|
|
|
|
chunkSize -= toRead;
|
2009-02-12 07:09:29 +01:00
|
|
|
} else {
|
2009-03-09 09:50:24 +01:00
|
|
|
chunkSize = 0;
|
|
|
|
}
|
|
|
|
this.chunkSize = chunkSize;
|
|
|
|
|
|
|
|
if (chunkSize == 0) {
|
|
|
|
// Read all content.
|
2009-02-12 07:09:29 +01:00
|
|
|
checkpoint(State.READ_CHUNK_DELIMITER);
|
2009-03-09 09:50:24 +01:00
|
|
|
}
|
|
|
|
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(chunk);
|
|
|
|
return;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2009-02-12 06:41:22 +01:00
|
|
|
case READ_CHUNK_DELIMITER: {
|
2009-02-12 06:48:25 +01:00
|
|
|
for (;;) {
|
|
|
|
byte next = buffer.readByte();
|
2012-05-31 20:32:42 +02:00
|
|
|
if (next == HttpConstants.CR) {
|
|
|
|
if (buffer.readByte() == HttpConstants.LF) {
|
2009-02-12 06:48:25 +01:00
|
|
|
checkpoint(State.READ_CHUNK_SIZE);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2009-02-12 06:48:25 +01:00
|
|
|
}
|
2012-05-31 20:32:42 +02:00
|
|
|
} else if (next == HttpConstants.LF) {
|
2009-02-12 06:48:25 +01:00
|
|
|
checkpoint(State.READ_CHUNK_SIZE);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2012-09-28 08:16:29 +02:00
|
|
|
} else {
|
|
|
|
checkpoint();
|
2009-02-12 06:48:25 +01:00
|
|
|
}
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
}
|
2012-09-28 08:16:29 +02:00
|
|
|
case READ_CHUNK_FOOTER: try {
|
2013-01-14 16:52:30 +01:00
|
|
|
LastHttpContent trailer = readTrailingHeaders(buffer);
|
2009-11-17 05:55:56 +01:00
|
|
|
if (maxChunkSize == 0) {
|
|
|
|
// Chunked encoding disabled.
|
2013-05-24 19:58:55 +02:00
|
|
|
reset(out);
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2009-11-17 05:55:56 +01:00
|
|
|
} else {
|
|
|
|
reset();
|
|
|
|
// The last chunk, which is empty
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(trailer);
|
|
|
|
return;
|
2009-02-12 06:41:22 +01:00
|
|
|
}
|
2012-09-28 08:16:29 +02:00
|
|
|
} catch (Exception e) {
|
2013-04-03 11:32:33 +02:00
|
|
|
out.add(invalidChunk(e));
|
|
|
|
return;
|
2012-09-28 08:16:29 +02:00
|
|
|
}
|
|
|
|
case BAD_MESSAGE: {
|
|
|
|
// Keep discarding until disconnection.
|
|
|
|
buffer.skipBytes(actualReadableBytes());
|
2013-04-03 11:32:33 +02:00
|
|
|
return;
|
2009-02-12 06:41:22 +01:00
|
|
|
}
|
2008-11-19 08:22:15 +01:00
|
|
|
default: {
|
|
|
|
throw new Error("Shouldn't reach here.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-13 03:57:06 +02:00
|
|
|
@Override
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
|
2013-06-13 03:57:06 +02:00
|
|
|
decode(ctx, in, out);
|
|
|
|
|
|
|
|
// Handle the last unfinished message.
|
|
|
|
if (message != null) {
|
|
|
|
// Get the length of the content received so far for the last message.
|
|
|
|
HttpMessage message = this.message;
|
|
|
|
int actualContentLength;
|
|
|
|
if (content != null) {
|
|
|
|
actualContentLength = content.readableBytes();
|
|
|
|
} else {
|
|
|
|
actualContentLength = 0;
|
|
|
|
}
|
|
|
|
|
2013-06-13 04:51:03 +02:00
|
|
|
// Check if the closure of the connection signifies the end of the content.
|
2013-06-13 03:57:06 +02:00
|
|
|
boolean prematureClosure;
|
|
|
|
if (isDecodingRequest()) {
|
|
|
|
// The last request did not wait for a response.
|
|
|
|
prematureClosure = true;
|
|
|
|
} else {
|
|
|
|
// Compare the length of the received content and the 'Content-Length' header.
|
|
|
|
// If the 'Content-Length' header is absent, the length of the content is determined by the end of the
|
|
|
|
// connection, so it is perfectly fine.
|
|
|
|
long expectedContentLength = HttpHeaders.getContentLength(message, -1);
|
|
|
|
prematureClosure = expectedContentLength >= 0 && actualContentLength != expectedContentLength;
|
|
|
|
}
|
|
|
|
|
2013-06-13 04:51:03 +02:00
|
|
|
if (!prematureClosure) {
|
|
|
|
if (actualContentLength == 0) {
|
|
|
|
out.add(LastHttpContent.EMPTY_LAST_CONTENT);
|
|
|
|
} else {
|
|
|
|
out.add(new DefaultLastHttpContent(content));
|
|
|
|
}
|
2013-06-13 03:57:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-16 05:22:50 +01:00
|
|
|
protected boolean isContentAlwaysEmpty(HttpMessage msg) {
|
|
|
|
if (msg instanceof HttpResponse) {
|
|
|
|
HttpResponse res = (HttpResponse) msg;
|
2013-01-30 07:42:18 +01:00
|
|
|
int code = res.getStatus().code();
|
2012-05-20 07:19:11 +02:00
|
|
|
|
2012-03-20 15:40:34 +01:00
|
|
|
// Correctly handle return codes of 1xx.
|
2012-05-20 07:19:11 +02:00
|
|
|
//
|
|
|
|
// See:
|
2012-03-20 15:40:34 +01:00
|
|
|
// - http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html Section 4.4
|
|
|
|
// - https://github.com/netty/netty/issues/222
|
2012-03-20 15:44:59 +01:00
|
|
|
if (code >= 100 && code < 200) {
|
2013-01-16 05:22:50 +01:00
|
|
|
if (code == 101 && !res.headers().contains(HttpHeaders.Names.SEC_WEBSOCKET_ACCEPT)) {
|
2012-03-21 19:00:32 +01:00
|
|
|
// It's Hixie 76 websocket handshake response
|
|
|
|
return false;
|
2012-06-11 15:54:28 +02:00
|
|
|
}
|
2009-03-30 04:14:11 +02:00
|
|
|
return true;
|
|
|
|
}
|
2012-03-09 03:07:26 +01:00
|
|
|
|
2009-03-30 04:14:11 +02:00
|
|
|
switch (code) {
|
|
|
|
case 204: case 205: case 304:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-05-24 19:58:55 +02:00
|
|
|
private void reset() {
|
|
|
|
reset(null);
|
|
|
|
}
|
2013-06-12 09:56:00 +02:00
|
|
|
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
private void reset(List<Object> out) {
|
2013-05-24 19:58:55 +02:00
|
|
|
if (out != null) {
|
|
|
|
HttpMessage message = this.message;
|
|
|
|
ByteBuf content = this.content;
|
|
|
|
LastHttpContent httpContent;
|
|
|
|
|
|
|
|
if (content == null || !content.isReadable()) {
|
|
|
|
httpContent = LastHttpContent.EMPTY_LAST_CONTENT;
|
|
|
|
} else {
|
|
|
|
httpContent = new DefaultLastHttpContent(content);
|
|
|
|
}
|
2009-02-13 09:55:06 +01:00
|
|
|
|
2013-05-24 19:58:55 +02:00
|
|
|
out.add(message);
|
|
|
|
out.add(httpContent);
|
2009-03-10 09:37:45 +01:00
|
|
|
}
|
2013-01-14 16:52:30 +01:00
|
|
|
|
2013-05-24 19:58:55 +02:00
|
|
|
content = null;
|
|
|
|
message = null;
|
2009-03-10 09:37:45 +01:00
|
|
|
|
2009-02-12 06:02:22 +01:00
|
|
|
checkpoint(State.SKIP_CONTROL_CHARS);
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
|
2013-01-16 05:22:50 +01:00
|
|
|
private HttpMessage invalidMessage(Exception cause) {
|
2012-09-28 08:16:29 +02:00
|
|
|
checkpoint(State.BAD_MESSAGE);
|
|
|
|
if (message != null) {
|
2013-03-12 05:02:50 +01:00
|
|
|
message.setDecoderResult(DecoderResult.failure(cause));
|
2012-09-28 08:16:29 +02:00
|
|
|
} else {
|
|
|
|
message = createInvalidMessage();
|
2013-01-30 07:42:18 +01:00
|
|
|
message.setDecoderResult(DecoderResult.failure(cause));
|
2012-09-28 08:16:29 +02:00
|
|
|
}
|
|
|
|
return message;
|
|
|
|
}
|
|
|
|
|
2013-01-14 16:52:30 +01:00
|
|
|
private HttpContent invalidChunk(Exception cause) {
|
2012-09-28 08:16:29 +02:00
|
|
|
checkpoint(State.BAD_MESSAGE);
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpContent chunk = new DefaultHttpContent(Unpooled.EMPTY_BUFFER);
|
2013-01-30 07:42:18 +01:00
|
|
|
chunk.setDecoderResult(DecoderResult.failure(cause));
|
2012-09-28 08:16:29 +02:00
|
|
|
return chunk;
|
|
|
|
}
|
|
|
|
|
2012-06-10 04:08:43 +02:00
|
|
|
private static void skipControlCharacters(ByteBuf buffer) {
|
2009-02-12 06:02:22 +01:00
|
|
|
for (;;) {
|
|
|
|
char c = (char) buffer.readUnsignedByte();
|
|
|
|
if (!Character.isISOControl(c) &&
|
|
|
|
!Character.isWhitespace(c)) {
|
|
|
|
buffer.readerIndex(buffer.readerIndex() - 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Remove MessageList from public API and change ChannelInbound/OutboundHandler accordingly
I must admit MesageList was pain in the ass. Instead of forcing a
handler always loop over the list of messages, this commit splits
messageReceived(ctx, list) into two event handlers:
- messageReceived(ctx, msg)
- mmessageReceivedLast(ctx)
When Netty reads one or more messages, messageReceived(ctx, msg) event
is triggered for each message. Once the current read operation is
finished, messageReceivedLast() is triggered to tell the handler that
the last messageReceived() was the last message in the current batch.
Similarly, for outbound, write(ctx, list) has been split into two:
- write(ctx, msg)
- flush(ctx, promise)
Instead of writing a list of message with a promise, a user is now
supposed to call write(msg) multiple times and then call flush() to
actually flush the buffered messages.
Please note that write() doesn't have a promise with it. You must call
flush() to get notified on completion. (or you can use writeAndFlush())
Other changes:
- Because MessageList is completely hidden, codec framework uses
List<Object> instead of MessageList as an output parameter.
2013-07-08 12:03:40 +02:00
|
|
|
private void readFixedLengthContent(ByteBuf buffer, List<Object> out) {
|
2012-06-05 18:56:01 +02:00
|
|
|
//we have a content-length so we just read the correct number of bytes
|
2010-01-26 05:31:54 +01:00
|
|
|
long length = HttpHeaders.getContentLength(message, -1);
|
2009-05-08 14:45:10 +02:00
|
|
|
assert length <= Integer.MAX_VALUE;
|
2012-06-05 18:56:01 +02:00
|
|
|
int toRead = (int) length - contentRead;
|
|
|
|
if (toRead > actualReadableBytes()) {
|
|
|
|
toRead = actualReadableBytes();
|
|
|
|
}
|
2012-06-11 15:54:28 +02:00
|
|
|
contentRead += toRead;
|
2012-06-05 18:56:01 +02:00
|
|
|
if (length < contentRead) {
|
2013-05-24 19:58:55 +02:00
|
|
|
out.add(message);
|
|
|
|
out.add(new DefaultHttpContent(buffer.readBytes(toRead)));
|
|
|
|
return;
|
2012-06-05 18:56:01 +02:00
|
|
|
}
|
2008-11-19 08:22:15 +01:00
|
|
|
if (content == null) {
|
2012-12-19 10:28:55 +01:00
|
|
|
content = buffer.readBytes((int) length);
|
2008-11-30 17:22:03 +01:00
|
|
|
} else {
|
2012-12-19 10:36:31 +01:00
|
|
|
content.writeBytes(buffer, (int) length);
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2013-05-24 19:58:55 +02:00
|
|
|
reset(out);
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
|
2012-11-09 23:07:37 +01:00
|
|
|
private State readHeaders(ByteBuf buffer) {
|
2009-06-15 10:45:39 +02:00
|
|
|
headerSize = 0;
|
2013-01-16 05:22:50 +01:00
|
|
|
final HttpMessage message = this.message;
|
2013-01-16 16:33:40 +01:00
|
|
|
final HttpHeaders headers = message.headers();
|
|
|
|
|
2009-03-10 08:20:27 +01:00
|
|
|
String line = readHeader(buffer);
|
2010-07-07 08:43:34 +02:00
|
|
|
String name = null;
|
|
|
|
String value = null;
|
2012-11-09 17:55:33 +01:00
|
|
|
if (!line.isEmpty()) {
|
2013-01-16 16:33:40 +01:00
|
|
|
headers.clear();
|
2009-06-15 10:05:27 +02:00
|
|
|
do {
|
2009-06-15 10:13:52 +02:00
|
|
|
char firstChar = line.charAt(0);
|
2010-07-07 08:43:34 +02:00
|
|
|
if (name != null && (firstChar == ' ' || firstChar == '\t')) {
|
|
|
|
value = value + ' ' + line.trim();
|
2009-06-15 10:13:52 +02:00
|
|
|
} else {
|
2010-07-07 08:43:34 +02:00
|
|
|
if (name != null) {
|
2013-01-16 16:33:40 +01:00
|
|
|
headers.add(name, value);
|
2010-07-07 08:43:34 +02:00
|
|
|
}
|
2009-06-15 10:05:27 +02:00
|
|
|
String[] header = splitHeader(line);
|
2010-07-07 08:43:34 +02:00
|
|
|
name = header[0];
|
|
|
|
value = header[1];
|
2009-06-15 10:05:27 +02:00
|
|
|
}
|
2009-06-15 10:35:24 +02:00
|
|
|
|
|
|
|
line = readHeader(buffer);
|
2012-11-09 17:24:04 +01:00
|
|
|
} while (!line.isEmpty());
|
2010-07-07 08:43:34 +02:00
|
|
|
|
|
|
|
// Add the last header.
|
|
|
|
if (name != null) {
|
2013-01-16 16:33:40 +01:00
|
|
|
headers.add(name, value);
|
2010-07-07 08:43:34 +02:00
|
|
|
}
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2008-11-30 17:22:03 +01:00
|
|
|
|
2008-12-03 10:00:29 +01:00
|
|
|
State nextState;
|
2009-03-30 04:14:11 +02:00
|
|
|
|
|
|
|
if (isContentAlwaysEmpty(message)) {
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpHeaders.removeTransferEncodingChunked(message);
|
2009-03-30 04:14:11 +02:00
|
|
|
nextState = State.SKIP_CONTROL_CHARS;
|
2013-01-14 16:52:30 +01:00
|
|
|
} else if (HttpHeaders.isTransferEncodingChunked(message)) {
|
2008-12-03 10:00:29 +01:00
|
|
|
nextState = State.READ_CHUNK_SIZE;
|
2010-01-26 05:31:54 +01:00
|
|
|
} else if (HttpHeaders.getContentLength(message, -1) >= 0) {
|
2009-02-12 05:37:48 +01:00
|
|
|
nextState = State.READ_FIXED_LENGTH_CONTENT;
|
|
|
|
} else {
|
2009-02-12 08:17:29 +01:00
|
|
|
nextState = State.READ_VARIABLE_LENGTH_CONTENT;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2009-03-04 14:50:19 +01:00
|
|
|
return nextState;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
|
2013-01-14 16:52:30 +01:00
|
|
|
private LastHttpContent readTrailingHeaders(ByteBuf buffer) {
|
2009-11-17 05:55:56 +01:00
|
|
|
headerSize = 0;
|
|
|
|
String line = readHeader(buffer);
|
|
|
|
String lastHeader = null;
|
2012-11-09 17:24:04 +01:00
|
|
|
if (!line.isEmpty()) {
|
2013-01-14 16:52:30 +01:00
|
|
|
LastHttpContent trailer = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER);
|
2009-11-17 05:55:56 +01:00
|
|
|
do {
|
|
|
|
char firstChar = line.charAt(0);
|
|
|
|
if (lastHeader != null && (firstChar == ' ' || firstChar == '\t')) {
|
2013-01-16 05:22:50 +01:00
|
|
|
List<String> current = trailer.trailingHeaders().getAll(lastHeader);
|
2012-11-09 17:24:04 +01:00
|
|
|
if (!current.isEmpty()) {
|
2009-11-17 06:39:01 +01:00
|
|
|
int lastPos = current.size() - 1;
|
|
|
|
String newString = current.get(lastPos) + line.trim();
|
|
|
|
current.set(lastPos, newString);
|
|
|
|
} else {
|
|
|
|
// Content-Length, Transfer-Encoding, or Trailer
|
|
|
|
}
|
2009-11-17 05:55:56 +01:00
|
|
|
} else {
|
|
|
|
String[] header = splitHeader(line);
|
2009-11-17 06:39:01 +01:00
|
|
|
String name = header[0];
|
|
|
|
if (!name.equalsIgnoreCase(HttpHeaders.Names.CONTENT_LENGTH) &&
|
|
|
|
!name.equalsIgnoreCase(HttpHeaders.Names.TRANSFER_ENCODING) &&
|
|
|
|
!name.equalsIgnoreCase(HttpHeaders.Names.TRAILER)) {
|
2013-01-16 05:22:50 +01:00
|
|
|
trailer.trailingHeaders().add(name, header[1]);
|
2009-11-17 06:39:01 +01:00
|
|
|
}
|
|
|
|
lastHeader = name;
|
2009-11-17 05:55:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
line = readHeader(buffer);
|
2012-11-09 17:24:04 +01:00
|
|
|
} while (!line.isEmpty());
|
2009-11-17 05:55:56 +01:00
|
|
|
|
2009-11-17 06:39:01 +01:00
|
|
|
return trailer;
|
2009-11-17 05:55:56 +01:00
|
|
|
}
|
2009-11-17 06:39:01 +01:00
|
|
|
|
2013-01-16 05:22:50 +01:00
|
|
|
return LastHttpContent.EMPTY_LAST_CONTENT;
|
2009-11-17 05:55:56 +01:00
|
|
|
}
|
|
|
|
|
2012-11-09 23:07:37 +01:00
|
|
|
private String readHeader(ByteBuf buffer) {
|
2009-03-10 08:20:27 +01:00
|
|
|
StringBuilder sb = new StringBuilder(64);
|
|
|
|
int headerSize = this.headerSize;
|
2009-06-15 11:06:56 +02:00
|
|
|
|
|
|
|
loop:
|
|
|
|
for (;;) {
|
|
|
|
char nextByte = (char) buffer.readByte();
|
|
|
|
headerSize ++;
|
|
|
|
|
|
|
|
switch (nextByte) {
|
2012-05-31 20:32:42 +02:00
|
|
|
case HttpConstants.CR:
|
2009-06-15 11:06:56 +02:00
|
|
|
nextByte = (char) buffer.readByte();
|
|
|
|
headerSize ++;
|
2012-05-31 20:32:42 +02:00
|
|
|
if (nextByte == HttpConstants.LF) {
|
2009-06-15 11:06:56 +02:00
|
|
|
break loop;
|
2009-03-10 08:20:27 +01:00
|
|
|
}
|
2009-06-15 11:06:56 +02:00
|
|
|
break;
|
2012-05-31 20:32:42 +02:00
|
|
|
case HttpConstants.LF:
|
2009-06-15 11:06:56 +02:00
|
|
|
break loop;
|
2009-03-10 08:20:27 +01:00
|
|
|
}
|
|
|
|
|
2009-06-15 11:06:56 +02:00
|
|
|
// Abort decoding if the header part is too large.
|
|
|
|
if (headerSize >= maxHeaderSize) {
|
2010-10-19 07:40:44 +02:00
|
|
|
// TODO: Respond with Bad Request and discard the traffic
|
|
|
|
// or close the connection.
|
|
|
|
// No need to notify the upstream handlers - just log.
|
|
|
|
// If decoding a response, just throw an exception.
|
2009-06-15 11:06:56 +02:00
|
|
|
throw new TooLongFrameException(
|
|
|
|
"HTTP header is larger than " +
|
|
|
|
maxHeaderSize + " bytes.");
|
2009-03-10 08:20:27 +01:00
|
|
|
}
|
2009-06-15 11:06:56 +02:00
|
|
|
|
|
|
|
sb.append(nextByte);
|
2009-03-10 08:20:27 +01:00
|
|
|
}
|
2009-06-15 11:06:56 +02:00
|
|
|
|
|
|
|
this.headerSize = headerSize;
|
|
|
|
return sb.toString();
|
2009-03-10 08:20:27 +01:00
|
|
|
}
|
|
|
|
|
2009-02-12 08:32:53 +01:00
|
|
|
protected abstract boolean isDecodingRequest();
|
2013-01-16 05:22:50 +01:00
|
|
|
protected abstract HttpMessage createMessage(String[] initialLine) throws Exception;
|
|
|
|
protected abstract HttpMessage createInvalidMessage();
|
2012-09-28 08:16:29 +02:00
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
private static int getChunkSize(String hex) {
|
2009-02-12 06:23:39 +01:00
|
|
|
hex = hex.trim();
|
|
|
|
for (int i = 0; i < hex.length(); i ++) {
|
|
|
|
char c = hex.charAt(i);
|
|
|
|
if (c == ';' || Character.isWhitespace(c) || Character.isISOControl(c)) {
|
|
|
|
hex = hex.substring(0, i);
|
|
|
|
break;
|
|
|
|
}
|
2009-02-12 06:10:25 +01:00
|
|
|
}
|
2009-02-12 06:23:39 +01:00
|
|
|
|
2009-02-12 06:10:25 +01:00
|
|
|
return Integer.parseInt(hex, 16);
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
|
2012-11-09 23:07:37 +01:00
|
|
|
private static String readLine(ByteBuf buffer, int maxLineLength) {
|
2009-02-12 06:49:19 +01:00
|
|
|
StringBuilder sb = new StringBuilder(64);
|
2009-03-10 08:20:27 +01:00
|
|
|
int lineLength = 0;
|
2008-11-19 08:22:15 +01:00
|
|
|
while (true) {
|
2009-02-12 06:48:25 +01:00
|
|
|
byte nextByte = buffer.readByte();
|
2012-05-31 20:32:42 +02:00
|
|
|
if (nextByte == HttpConstants.CR) {
|
2009-02-12 06:48:25 +01:00
|
|
|
nextByte = buffer.readByte();
|
2012-05-31 20:32:42 +02:00
|
|
|
if (nextByte == HttpConstants.LF) {
|
2008-11-30 17:22:03 +01:00
|
|
|
return sb.toString();
|
|
|
|
}
|
2012-05-31 20:32:42 +02:00
|
|
|
} else if (nextByte == HttpConstants.LF) {
|
2008-11-19 08:22:15 +01:00
|
|
|
return sb.toString();
|
2012-01-11 12:16:14 +01:00
|
|
|
} else {
|
2009-03-10 08:20:27 +01:00
|
|
|
if (lineLength >= maxLineLength) {
|
2010-10-19 07:40:44 +02:00
|
|
|
// TODO: Respond with Bad Request and discard the traffic
|
|
|
|
// or close the connection.
|
|
|
|
// No need to notify the upstream handlers - just log.
|
|
|
|
// If decoding a response, just throw an exception.
|
2009-03-10 08:20:27 +01:00
|
|
|
throw new TooLongFrameException(
|
|
|
|
"An HTTP line is larger than " + maxLineLength +
|
|
|
|
" bytes.");
|
|
|
|
}
|
|
|
|
lineLength ++;
|
2008-11-19 08:22:15 +01:00
|
|
|
sb.append((char) nextByte);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
private static String[] splitInitialLine(String sb) {
|
2009-06-15 09:30:07 +02:00
|
|
|
int aStart;
|
|
|
|
int aEnd;
|
|
|
|
int bStart;
|
|
|
|
int bEnd;
|
|
|
|
int cStart;
|
|
|
|
int cEnd;
|
|
|
|
|
|
|
|
aStart = findNonWhitespace(sb, 0);
|
|
|
|
aEnd = findWhitespace(sb, aStart);
|
|
|
|
|
|
|
|
bStart = findNonWhitespace(sb, aEnd);
|
|
|
|
bEnd = findWhitespace(sb, bStart);
|
|
|
|
|
|
|
|
cStart = findNonWhitespace(sb, bEnd);
|
|
|
|
cEnd = findEndOfString(sb);
|
|
|
|
|
|
|
|
return new String[] {
|
|
|
|
sb.substring(aStart, aEnd),
|
|
|
|
sb.substring(bStart, bEnd),
|
2010-05-27 14:38:54 +02:00
|
|
|
cStart < cEnd? sb.substring(cStart, cEnd) : "" };
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
private static String[] splitHeader(String sb) {
|
2009-09-10 12:34:49 +02:00
|
|
|
final int length = sb.length();
|
2009-06-15 09:30:07 +02:00
|
|
|
int nameStart;
|
|
|
|
int nameEnd;
|
|
|
|
int colonEnd;
|
|
|
|
int valueStart;
|
|
|
|
int valueEnd;
|
|
|
|
|
|
|
|
nameStart = findNonWhitespace(sb, 0);
|
2009-09-10 12:34:49 +02:00
|
|
|
for (nameEnd = nameStart; nameEnd < length; nameEnd ++) {
|
2009-06-15 09:30:07 +02:00
|
|
|
char ch = sb.charAt(nameEnd);
|
|
|
|
if (ch == ':' || Character.isWhitespace(ch)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-10 12:34:49 +02:00
|
|
|
for (colonEnd = nameEnd; colonEnd < length; colonEnd ++) {
|
2009-06-15 09:30:07 +02:00
|
|
|
if (sb.charAt(colonEnd) == ':') {
|
|
|
|
colonEnd ++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
valueStart = findNonWhitespace(sb, colonEnd);
|
2009-09-10 12:34:49 +02:00
|
|
|
if (valueStart == length) {
|
|
|
|
return new String[] {
|
|
|
|
sb.substring(nameStart, nameEnd),
|
|
|
|
""
|
|
|
|
};
|
|
|
|
}
|
2009-06-15 09:30:07 +02:00
|
|
|
|
2009-09-10 12:34:49 +02:00
|
|
|
valueEnd = findEndOfString(sb);
|
2009-06-15 09:30:07 +02:00
|
|
|
return new String[] {
|
|
|
|
sb.substring(nameStart, nameEnd),
|
2009-09-10 12:34:49 +02:00
|
|
|
sb.substring(valueStart, valueEnd)
|
|
|
|
};
|
2009-06-15 09:30:07 +02:00
|
|
|
}
|
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
private static int findNonWhitespace(String sb, int offset) {
|
2009-06-15 09:30:07 +02:00
|
|
|
int result;
|
|
|
|
for (result = offset; result < sb.length(); result ++) {
|
|
|
|
if (!Character.isWhitespace(sb.charAt(result))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
private static int findWhitespace(String sb, int offset) {
|
2009-06-15 09:30:07 +02:00
|
|
|
int result;
|
|
|
|
for (result = offset; result < sb.length(); result ++) {
|
|
|
|
if (Character.isWhitespace(sb.charAt(result))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-05-20 07:19:11 +02:00
|
|
|
private static int findEndOfString(String sb) {
|
2009-06-15 09:30:07 +02:00
|
|
|
int result;
|
|
|
|
for (result = sb.length(); result > 0; result --) {
|
|
|
|
if (!Character.isWhitespace(sb.charAt(result - 1))) {
|
|
|
|
break;
|
|
|
|
}
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
2009-06-15 09:30:07 +02:00
|
|
|
return result;
|
2008-11-19 08:22:15 +01:00
|
|
|
}
|
|
|
|
}
|