Fix header and initial line length counting
Related: #3445 Motivation: HttpObjectDecoder.HeaderParser does not reset its counter (the size field) when it failed to find the end of line. If a header is split into multiple fragments, the counter is increased as many times as the number of fragments, resulting an unexpected TooLongFrameException. Modifications: - Add test cases that reproduces the problem - Reset the HeaderParser.size field when no EOL is found. Result: One less bug
This commit is contained in:
parent
2cc7466266
commit
2c14406b55
@ -214,7 +214,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
currentState = State.READ_HEADER;
|
||||
// fall-through
|
||||
} catch (Exception e) {
|
||||
out.add(invalidMessage(e));
|
||||
out.add(invalidMessage(buffer, e));
|
||||
return;
|
||||
}
|
||||
case READ_HEADER: try {
|
||||
@ -262,7 +262,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
out.add(invalidMessage(e));
|
||||
out.add(invalidMessage(buffer, e));
|
||||
return;
|
||||
}
|
||||
case READ_VARIABLE_LENGTH_CONTENT: {
|
||||
@ -321,7 +321,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
currentState = State.READ_CHUNKED_CONTENT;
|
||||
// fall-through
|
||||
} catch (Exception e) {
|
||||
out.add(invalidChunk(e));
|
||||
out.add(invalidChunk(buffer, e));
|
||||
return;
|
||||
}
|
||||
case READ_CHUNKED_CONTENT: {
|
||||
@ -364,7 +364,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
resetNow();
|
||||
return;
|
||||
} catch (Exception e) {
|
||||
out.add(invalidChunk(e));
|
||||
out.add(invalidChunk(buffer, e));
|
||||
return;
|
||||
}
|
||||
case BAD_MESSAGE: {
|
||||
@ -469,8 +469,13 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
currentState = State.SKIP_CONTROL_CHARS;
|
||||
}
|
||||
|
||||
private HttpMessage invalidMessage(Exception cause) {
|
||||
private HttpMessage invalidMessage(ByteBuf in, Exception cause) {
|
||||
currentState = State.BAD_MESSAGE;
|
||||
|
||||
// Advance the readerIndex so that ByteToMessageDecoder does not complain
|
||||
// when we produced an invalid message without consuming anything.
|
||||
in.skipBytes(in.readableBytes());
|
||||
|
||||
if (message != null) {
|
||||
message.setDecoderResult(DecoderResult.failure(cause));
|
||||
} else {
|
||||
@ -483,8 +488,13 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
return ret;
|
||||
}
|
||||
|
||||
private HttpContent invalidChunk(Exception cause) {
|
||||
private HttpContent invalidChunk(ByteBuf in, Exception cause) {
|
||||
currentState = State.BAD_MESSAGE;
|
||||
|
||||
// Advance the readerIndex so that ByteToMessageDecoder does not complain
|
||||
// when we produced an invalid message without consuming anything.
|
||||
in.skipBytes(in.readableBytes());
|
||||
|
||||
HttpContent chunk = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER);
|
||||
chunk.setDecoderResult(DecoderResult.failure(cause));
|
||||
message = null;
|
||||
@ -736,9 +746,11 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
}
|
||||
|
||||
public AppendableCharSequence parse(ByteBuf buffer) {
|
||||
final int oldSize = size;
|
||||
seq.reset();
|
||||
int i = buffer.forEachByte(this);
|
||||
if (i == -1) {
|
||||
size = oldSize;
|
||||
return null;
|
||||
}
|
||||
buffer.readerIndex(i + 1);
|
||||
@ -758,14 +770,15 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
|
||||
if (nextByte == HttpConstants.LF) {
|
||||
return false;
|
||||
}
|
||||
if (size >= maxLength) {
|
||||
|
||||
if (++ size > maxLength) {
|
||||
// TODO: Respond with Bad Request and discard the traffic
|
||||
// or close the connection.
|
||||
// No need to notify the upstream handlers - just log.
|
||||
// If decoding a response, just throw an exception.
|
||||
throw newException(maxLength);
|
||||
}
|
||||
size++;
|
||||
|
||||
seq.append(nextByte);
|
||||
return true;
|
||||
}
|
||||
|
@ -17,10 +17,12 @@ package io.netty.handler.codec.http;
|
||||
|
||||
import io.netty.buffer.Unpooled;
|
||||
import io.netty.channel.embedded.EmbeddedChannel;
|
||||
import io.netty.handler.codec.TooLongFrameException;
|
||||
import io.netty.handler.codec.http.HttpHeaders.Names;
|
||||
import io.netty.util.CharsetUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.*;
|
||||
@ -28,6 +30,69 @@ import static org.junit.Assert.*;
|
||||
|
||||
public class HttpResponseDecoderTest {
|
||||
|
||||
/**
|
||||
* The size of headers should be calculated correctly even if a single header is split into multiple fragments.
|
||||
* @see https://github.com/netty/netty/issues/3445
|
||||
*/
|
||||
@Test
|
||||
public void testMaxHeaderSize1() {
|
||||
final int maxHeaderSize = 8192;
|
||||
|
||||
final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize, 8192));
|
||||
final char[] bytes = new char[maxHeaderSize / 2 - 2];
|
||||
Arrays.fill(bytes, 'a');
|
||||
|
||||
ch.writeInbound(Unpooled.copiedBuffer("HTTP/1.1 200 OK\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
// Write two 4096-byte headers (= 8192 bytes)
|
||||
ch.writeInbound(Unpooled.copiedBuffer("A:", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
assertNull(ch.readInbound());
|
||||
ch.writeInbound(Unpooled.copiedBuffer("B:", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
HttpResponse res = (HttpResponse) ch.readInbound();
|
||||
assertNull(res.getDecoderResult().cause());
|
||||
assertTrue(res.getDecoderResult().isSuccess());
|
||||
|
||||
assertNull(ch.readInbound());
|
||||
assertTrue(ch.finish());
|
||||
assertThat(ch.readInbound(), instanceOf(LastHttpContent.class));
|
||||
}
|
||||
|
||||
/**
|
||||
* Complementary test case of {@link #testHeaderReplay()}. When it actually exceeds the maximum, it should fail.
|
||||
*/
|
||||
@Test
|
||||
public void testMaxHeaderSize2() {
|
||||
final int maxHeaderSize = 8192;
|
||||
|
||||
final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize, 8192));
|
||||
final char[] bytes = new char[maxHeaderSize / 2 - 2];
|
||||
Arrays.fill(bytes, 'a');
|
||||
|
||||
ch.writeInbound(Unpooled.copiedBuffer("HTTP/1.1 200 OK\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
// Write a 4096-byte header and a 4097-byte header to test an off-by-one case (= 8193 bytes)
|
||||
ch.writeInbound(Unpooled.copiedBuffer("A:", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
assertNull(ch.readInbound());
|
||||
ch.writeInbound(Unpooled.copiedBuffer("B: ", CharsetUtil.US_ASCII)); // Note an extra space.
|
||||
ch.writeInbound(Unpooled.copiedBuffer(bytes, CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII));
|
||||
|
||||
HttpResponse res = (HttpResponse) ch.readInbound();
|
||||
assertTrue(res.getDecoderResult().cause() instanceof TooLongFrameException);
|
||||
|
||||
assertFalse(ch.finish());
|
||||
assertNull(ch.readInbound());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResponseChunked() {
|
||||
EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder());
|
||||
|
Loading…
Reference in New Issue
Block a user