Fix header and initial line length counting
Related: #3445 Motivation: HttpObjectDecoder.HeaderParser does not reset its counter (the size field) when it failed to find the end of line. If a header is split into multiple fragments, the counter is increased as many times as the number of fragments, resulting an unexpected TooLongFrameException. Modifications: - Add test cases that reproduces the problem - Reset the HeaderParser.size field when no EOL is found. Result: One less bug
This commit is contained in:
parent
f230d59148
commit
c42ab4bfd1
@ -214,7 +214,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
currentState = State.READ_HEADER;
|
||||
// fall-through
|
||||
} catch (Exception e) {
|
||||
out.add(invalidMessage(e));
|
||||
out.add(invalidMessage(buffer, e));
|
||||
return;
|
||||
}
|
||||
case READ_HEADER: try {
|
||||
@ -261,7 +261,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
out.add(invalidMessage(e));
|
||||
out.add(invalidMessage(buffer, e));
|
||||
return;
|
||||
}
|
||||
case READ_VARIABLE_LENGTH_CONTENT: {
|
||||
@ -320,7 +320,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
currentState = State.READ_CHUNKED_CONTENT;
|
||||
// fall-through
|
||||
} catch (Exception e) {
|
||||
out.add(invalidChunk(e));
|
||||
out.add(invalidChunk(buffer, e));
|
||||
return;
|
||||
}
|
||||
case READ_CHUNKED_CONTENT: {
|
||||
@ -363,7 +363,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
resetNow();
|
||||
return;
|
||||
} catch (Exception e) {
|
||||
out.add(invalidChunk(e));
|
||||
out.add(invalidChunk(buffer, e));
|
||||
return;
|
||||
}
|
||||
case BAD_MESSAGE: {
|
||||
@ -468,8 +468,13 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
currentState = State.SKIP_CONTROL_CHARS;
|
||||
}
|
||||
|
||||
private HttpMessage invalidMessage(Exception cause) {
|
||||
private HttpMessage invalidMessage(ByteBuf in, Exception cause) {
|
||||
currentState = State.BAD_MESSAGE;
|
||||
|
||||
// Advance the readerIndex so that ByteToMessageDecoder does not complain
|
||||
// when we produced an invalid message without consuming anything.
|
||||
in.skipBytes(in.readableBytes());
|
||||
|
||||
if (message != null) {
|
||||
message.setDecoderResult(DecoderResult.failure(cause));
|
||||
} else {
|
||||
@ -482,8 +487,13 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
return ret;
|
||||
}
|
||||
|
||||
private HttpContent invalidChunk(Exception cause) {
|
||||
private HttpContent invalidChunk(ByteBuf in, Exception cause) {
|
||||
currentState = State.BAD_MESSAGE;
|
||||
|
||||
// Advance the readerIndex so that ByteToMessageDecoder does not complain
|
||||
// when we produced an invalid message without consuming anything.
|
||||
in.skipBytes(in.readableBytes());
|
||||
|
||||
HttpContent chunk = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER);
|
||||
chunk.setDecoderResult(DecoderResult.failure(cause));
|
||||
message = null;
|
||||
@ -735,9 +745,11 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
}
|
||||
|
||||
public AppendableCharSequence parse(ByteBuf buffer) {
|
||||
final int oldSize = size;
|
||||
seq.reset();
|
||||
int i = buffer.forEachByte(this);
|
||||
if (i == -1) {
|
||||
size = oldSize;
|
||||
return null;
|
||||
}
|
||||
buffer.readerIndex(i + 1);
|
||||
@ -757,14 +769,15 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
return false;
|
||||
}
|
||||
if (size >= maxLength) {
|
||||
|
||||
if (++ size > maxLength) {
|
||||
// TODO: Respond with Bad Request and discard the traffic
|
||||
// or close the connection.
|
||||
// No need to notify the upstream handlers - just log.
|
||||
// If decoding a response, just throw an exception.
|
||||
throw newException(maxLength);
|
||||
}
|
||||
size++;
|
||||
|
||||
seq.append(nextByte);
|
||||
return true;
|
||||
}
|
||||
|
@ -17,9 +17,11 @@ package io.netty.handler.codec.http;
|
||||
|
||||
import io.netty.buffer.Unpooled;
|
||||
import io.netty.channel.embedded.EmbeddedChannel;
|
||||
import io.netty.handler.codec.TooLongFrameException;
|
||||
import io.netty.util.CharsetUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.*;
|
||||
@ -27,6 +29,69 @@ import static org.junit.Assert.*;
|
||||
|
||||
public class HttpResponseDecoderTest {
|
||||
|
||||
/**
|
||||
* The size of headers should be calculated correctly even if a single header is split into multiple fragments.
|
||||
* @see <a href="https://github.com/netty/netty/issues/3445">#3445</a>
|
||||
*/
|
||||
@Test
|
||||
public void testMaxHeaderSize1() {
|
||||
final int maxHeaderSize = 8192;
|
||||
|
||||
final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize, 8192));
|
||||
final char[] bytes = new char[maxHeaderSize / 2 - 2];
|
||||
Arrays.fill(bytes, 'a');
|
||||
|
||||
ch.writeInbound(Unpooled.copiedBuffer("HTTP/1.1 200 OK\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
// Write two 4096-byte headers (= 8192 bytes)
|
||||
ch.writeInbound(Unpooled.copiedBuffer("A:", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
assertNull(ch.readInbound());
|
||||
ch.writeInbound(Unpooled.copiedBuffer("B:", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
HttpResponse res = ch.readInbound();
|
||||
assertNull(res.decoderResult().cause());
|
||||
assertTrue(res.decoderResult().isSuccess());
|
||||
|
||||
assertNull(ch.readInbound());
|
||||
assertTrue(ch.finish());
|
||||
assertThat(ch.readInbound(), instanceOf(LastHttpContent.class));
|
||||
}
|
||||
|
||||
/**
|
||||
* Complementary test case of {@link #testMaxHeaderSize1()} When it actually exceeds the maximum, it should fail.
|
||||
*/
|
||||
@Test
|
||||
public void testMaxHeaderSize2() {
|
||||
final int maxHeaderSize = 8192;
|
||||
|
||||
final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize, 8192));
|
||||
final char[] bytes = new char[maxHeaderSize / 2 - 2];
|
||||
Arrays.fill(bytes, 'a');
|
||||
|
||||
ch.writeInbound(Unpooled.copiedBuffer("HTTP/1.1 200 OK\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
// Write a 4096-byte header and a 4097-byte header to test an off-by-one case (= 8193 bytes)
|
||||
ch.writeInbound(Unpooled.copiedBuffer("A:", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
assertNull(ch.readInbound());
|
||||
ch.writeInbound(Unpooled.copiedBuffer("B: ", CharsetUtil.US_ASCII)); // Note an extra space.
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
HttpResponse res = ch.readInbound();
|
||||
assertTrue(res.decoderResult().cause() instanceof TooLongFrameException);
|
||||
|
||||
assertFalse(ch.finish());
|
||||
assertNull(ch.readInbound());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResponseChunked() {
|
||||
EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder());
|
||||
|
Loading…
Reference in New Issue
Block a user