Support large or variable chunk sizes (#11469)

Motivation:

Chunks are splitted up into even smaller chunks when the underlying
buffer's readable bytes are less than the chunk size.

The underlying buffer can be smaller than a chunk size if:

- The chunk size is larger than the maximum plaintext chunk allowed by the TLS RFC,
  see: io.netty.handler.ssl.SslHandler.MAX_PLAINTEXT_LENGTH.

- The chunk sizes are variable in size,
  which may cause Netty guess a buffer size that is smaller than a chunk size.

Modification:

Create a variable in HttpObjectDecoder: ByteBuf chunkedContent

- Initialize chunkedContent in READ_CHUNK_SIZE with chunkSize as buffer size.

- In READ_CHUNKED_CONTENT write bytes into chunkedContent

  - If the remaining chunk size is not 0 and toRead ==maxChunkSize,
    create a chunk using the chunkedContent and add it to the output messages
    before re-initializing chunkedContent with the remaining chunkSize as buffer size.

  - If the remaining chunk size is not 0 and toRead != maxChunkSize,
    return without adding any output messages.

  - If the remaining chunk size is 0,
    create a chunk using the chunkedContent and add it to the output messages;
    set chunkedContent = null and fall-through.

Result:

Support chunk sizes higher than the underlying buffer's readable bytes.

Co-authored-by: Nitesh Kant <nitesh_kant@apple.com>
Co-authored-by: Norman Maurer <norman_maurer@apple.com>
This commit is contained in:
Mads Johannessen 2021-07-20 15:21:26 +02:00 committed by GitHub
parent 1ce76e7e99
commit 3a41a97b0e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 177 additions and 4 deletions

View File

@ -29,6 +29,7 @@ import java.util.Queue;
import java.util.concurrent.atomic.AtomicLong;
import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS;
import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_ALLOW_PARTIAL_CHUNKS;
import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_CHUNK_SIZE;
import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_HEADER_SIZE;
import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_INITIAL_LINE_LENGTH;
@ -136,8 +137,20 @@ public final class HttpClientCodec extends CombinedChannelDuplexHandler<HttpResp
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean failOnMissingResponse,
boolean validateHeaders, int initialBufferSize, boolean parseHttpAfterConnectRequest,
boolean allowDuplicateContentLengths) {
this(maxInitialLineLength, maxHeaderSize, maxChunkSize, failOnMissingResponse, validateHeaders,
initialBufferSize, parseHttpAfterConnectRequest, allowDuplicateContentLengths,
DEFAULT_ALLOW_PARTIAL_CHUNKS);
}
/**
* Creates a new instance with the specified decoder options.
*/
public HttpClientCodec(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean failOnMissingResponse,
boolean validateHeaders, int initialBufferSize, boolean parseHttpAfterConnectRequest,
boolean allowDuplicateContentLengths, boolean allowPartialChunks) {
init(new Decoder(maxInitialLineLength, maxHeaderSize, maxChunkSize, validateHeaders, initialBufferSize,
allowDuplicateContentLengths),
allowDuplicateContentLengths, allowPartialChunks),
new Encoder());
this.parseHttpAfterConnectRequest = parseHttpAfterConnectRequest;
this.failOnMissingResponse = failOnMissingResponse;
@ -204,9 +217,9 @@ public final class HttpClientCodec extends CombinedChannelDuplexHandler<HttpResp
}
Decoder(int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders,
int initialBufferSize, boolean allowDuplicateContentLengths) {
int initialBufferSize, boolean allowDuplicateContentLengths, boolean allowPartialChunks) {
super(maxInitialLineLength, maxHeaderSize, maxChunkSize, validateHeaders, initialBufferSize,
allowDuplicateContentLengths);
allowDuplicateContentLengths, allowPartialChunks);
}
@Override

View File

@ -76,6 +76,15 @@ import java.util.List;
* The duplicated field-values will be replaced with a single valid Content-Length field.
* See <a href="https://tools.ietf.org/html/rfc7230#section-3.3.2">RFC 7230, Section 3.3.2</a>.</td>
* </tr>
* <tr>
* <td>{@code allowPartialChunks}</td>
* <td>{@value #DEFAULT_ALLOW_PARTIAL_CHUNKS}</td>
* <td>If the length of a chunk exceeds the {@link ByteBuf}s readable bytes and {@code allowPartialChunks}
* is set to {@code true}, the chunk will be split into multiple {@link HttpContent}s.
* Otherwise, if the chunk size does not exceed {@code maxChunkSize} and {@code allowPartialChunks}
* is set to {@code false}, the {@link ByteBuf} is not decoded into an {@link HttpContent} until
* the readable bytes are greater or equal to the chunk size.</td>
* </tr>
* </table>
*
* <h3>Chunked Content</h3>
@ -123,6 +132,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
public static final int DEFAULT_MAX_INITIAL_LINE_LENGTH = 4096;
public static final int DEFAULT_MAX_HEADER_SIZE = 8192;
public static final boolean DEFAULT_CHUNKED_SUPPORTED = true;
public static final boolean DEFAULT_ALLOW_PARTIAL_CHUNKS = true;
public static final int DEFAULT_MAX_CHUNK_SIZE = 8192;
public static final boolean DEFAULT_VALIDATE_HEADERS = true;
public static final int DEFAULT_INITIAL_BUFFER_SIZE = 128;
@ -132,6 +142,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
private final int maxChunkSize;
private final boolean chunkedSupported;
private final boolean allowPartialChunks;
protected final boolean validateHeaders;
private final boolean allowDuplicateContentLengths;
private final HeaderParser headerParser;
@ -206,10 +217,24 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS);
}
/**
* Creates a new instance with the specified parameters.
*/
protected HttpObjectDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize,
boolean chunkedSupported, boolean validateHeaders, int initialBufferSize,
boolean allowDuplicateContentLengths) {
this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported, validateHeaders, initialBufferSize,
allowDuplicateContentLengths, DEFAULT_ALLOW_PARTIAL_CHUNKS);
}
/**
* Creates a new instance with the specified parameters.
*/
protected HttpObjectDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize,
boolean chunkedSupported, boolean validateHeaders, int initialBufferSize,
boolean allowDuplicateContentLengths, boolean allowPartialChunks) {
checkPositive(maxInitialLineLength, "maxInitialLineLength");
checkPositive(maxHeaderSize, "maxHeaderSize");
checkPositive(maxChunkSize, "maxChunkSize");
@ -221,6 +246,7 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
this.chunkedSupported = chunkedSupported;
this.validateHeaders = validateHeaders;
this.allowDuplicateContentLengths = allowDuplicateContentLengths;
this.allowPartialChunks = allowPartialChunks;
}
@Override
@ -366,6 +392,9 @@ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
case READ_CHUNKED_CONTENT: {
assert chunkSize <= Integer.MAX_VALUE;
int toRead = Math.min((int) chunkSize, maxChunkSize);
if (!allowPartialChunks && buffer.readableBytes() < toRead) {
return;
}
toRead = Math.min(toRead, buffer.readableBytes());
if (toRead == 0) {
return;

View File

@ -51,6 +51,30 @@ import io.netty.handler.codec.TooLongFrameException;
* after this decoder in the {@link ChannelPipeline}.</td>
* </tr>
* </table>
*
* <h3>Parameters that control parsing behavior</h3>
* <table border="1">
* <tr>
* <th>Name</th><th>Default value</th><th>Meaning</th>
* </tr>
* <tr>
* <td>{@code allowDuplicateContentLengths}</td>
* <td>{@value #DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS}</td>
* <td>When set to {@code false}, will reject any messages that contain multiple Content-Length header fields.
* When set to {@code true}, will allow multiple Content-Length headers only if they are all the same decimal value.
* The duplicated field-values will be replaced with a single valid Content-Length field.
* See <a href="https://tools.ietf.org/html/rfc7230#section-3.3.2">RFC 7230, Section 3.3.2</a>.</td>
* </tr>
* <tr>
* <td>{@code allowPartialChunks}</td>
* <td>{@value #DEFAULT_ALLOW_PARTIAL_CHUNKS}</td>
* <td>If the length of a chunk exceeds the {@link ByteBuf}s readable bytes and {@code allowPartialChunks}
* is set to {@code true}, the chunk will be split into multiple {@link HttpContent}s.
* Otherwise, if the chunk size does not exceed {@code maxChunkSize} and {@code allowPartialChunks}
* is set to {@code false}, the {@link ByteBuf} is not decoded into an {@link HttpContent} until
* the readable bytes are greater or equal to the chunk size.</td>
* </tr>
* </table>
*/
public class HttpRequestDecoder extends HttpObjectDecoder {
@ -89,6 +113,13 @@ public class HttpRequestDecoder extends HttpObjectDecoder {
initialBufferSize, allowDuplicateContentLengths);
}
public HttpRequestDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders,
int initialBufferSize, boolean allowDuplicateContentLengths, boolean allowPartialChunks) {
super(maxInitialLineLength, maxHeaderSize, maxChunkSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders,
initialBufferSize, allowDuplicateContentLengths, allowPartialChunks);
}
@Override
protected HttpMessage createMessage(String[] initialLine) throws Exception {
return new DefaultHttpRequest(

View File

@ -53,6 +53,30 @@ import io.netty.handler.codec.TooLongFrameException;
* </tr>
* </table>
*
* <h3>Parameters that control parsing behavior</h3>
* <table border="1">
* <tr>
* <th>Name</th><th>Default value</th><th>Meaning</th>
* </tr>
* <tr>
* <td>{@code allowDuplicateContentLengths}</td>
* <td>{@value #DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS}</td>
* <td>When set to {@code false}, will reject any messages that contain multiple Content-Length header fields.
* When set to {@code true}, will allow multiple Content-Length headers only if they are all the same decimal value.
* The duplicated field-values will be replaced with a single valid Content-Length field.
* See <a href="https://tools.ietf.org/html/rfc7230#section-3.3.2">RFC 7230, Section 3.3.2</a>.</td>
* </tr>
* <tr>
* <td>{@code allowPartialChunks}</td>
* <td>{@value #DEFAULT_ALLOW_PARTIAL_CHUNKS}</td>
* <td>If the length of a chunk exceeds the {@link ByteBuf}s readable bytes and {@code allowPartialChunks}
* is set to {@code true}, the chunk will be split into multiple {@link HttpContent}s.
* Otherwise, if the chunk size does not exceed {@code maxChunkSize} and {@code allowPartialChunks}
* is set to {@code false}, the {@link ByteBuf} is not decoded into an {@link HttpContent} until
* the readable bytes are greater or equal to the chunk size.</td>
* </tr>
* </table>
*
* <h3>Decoding a response for a <tt>HEAD</tt> request</h3>
* <p>
* Unlike other HTTP requests, the successful response of a <tt>HEAD</tt>
@ -120,6 +144,13 @@ public class HttpResponseDecoder extends HttpObjectDecoder {
initialBufferSize, allowDuplicateContentLengths);
}
public HttpResponseDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders,
int initialBufferSize, boolean allowDuplicateContentLengths, boolean allowPartialChunks) {
super(maxInitialLineLength, maxHeaderSize, maxChunkSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders,
initialBufferSize, allowDuplicateContentLengths, allowPartialChunks);
}
@Override
protected HttpMessage createMessage(String[] initialLine) {
return new DefaultHttpResponse(

View File

@ -85,6 +85,16 @@ public final class HttpServerCodec extends CombinedChannelDuplexHandler<HttpRequ
new HttpServerResponseEncoder());
}
/**
* Creates a new instance with the specified decoder options.
*/
public HttpServerCodec(int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders,
int initialBufferSize, boolean allowDuplicateContentLengths, boolean allowPartialChunks) {
init(new HttpServerRequestDecoder(maxInitialLineLength, maxHeaderSize, maxChunkSize, validateHeaders,
initialBufferSize, allowDuplicateContentLengths, allowPartialChunks),
new HttpServerResponseEncoder());
}
/**
* Upgrades to another protocol from HTTP. Removes the {@link HttpRequestDecoder} and
* {@link HttpResponseEncoder} from the pipeline.
@ -117,6 +127,13 @@ public final class HttpServerCodec extends CombinedChannelDuplexHandler<HttpRequ
allowDuplicateContentLengths);
}
HttpServerRequestDecoder(int maxInitialLineLength, int maxHeaderSize, int maxChunkSize,
boolean validateHeaders, int initialBufferSize, boolean allowDuplicateContentLengths,
boolean allowPartialChunks) {
super(maxInitialLineLength, maxHeaderSize, maxChunkSize, validateHeaders, initialBufferSize,
allowDuplicateContentLengths, allowPartialChunks);
}
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception {
int oldSize = out.size();

View File

@ -25,7 +25,7 @@ import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import static io.netty.handler.codec.http.HttpHeadersTestUtils.of;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.is;
@ -146,6 +146,58 @@ public class HttpResponseDecoderTest {
assertNull(ch.readInbound());
}
@Test
public void testResponseDisallowPartialChunks() {
HttpResponseDecoder decoder = new HttpResponseDecoder(
HttpObjectDecoder.DEFAULT_MAX_INITIAL_LINE_LENGTH,
HttpObjectDecoder.DEFAULT_MAX_HEADER_SIZE,
HttpObjectDecoder.DEFAULT_MAX_CHUNK_SIZE,
HttpObjectDecoder.DEFAULT_VALIDATE_HEADERS,
HttpObjectDecoder.DEFAULT_INITIAL_BUFFER_SIZE,
HttpObjectDecoder.DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS,
false);
EmbeddedChannel ch = new EmbeddedChannel(decoder);
String headers = "HTTP/1.1 200 OK\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n";
assertTrue(ch.writeInbound(Unpooled.copiedBuffer(headers, CharsetUtil.US_ASCII)));
HttpResponse res = ch.readInbound();
assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1));
assertThat(res.status(), is(HttpResponseStatus.OK));
byte[] chunkBytes = new byte[10];
Random random = new Random();
random.nextBytes(chunkBytes);
final ByteBuf chunk = ch.alloc().buffer().writeBytes(chunkBytes);
final int chunkSize = chunk.readableBytes();
ByteBuf partialChunk1 = chunk.retainedSlice(0, 5);
ByteBuf partialChunk2 = chunk.retainedSlice(5, 5);
assertFalse(ch.writeInbound(Unpooled.copiedBuffer(Integer.toHexString(chunkSize)
+ "\r\n", CharsetUtil.US_ASCII)));
assertFalse(ch.writeInbound(partialChunk1));
assertTrue(ch.writeInbound(partialChunk2));
HttpContent content = ch.readInbound();
assertEquals(chunk, content.content());
content.release();
chunk.release();
assertFalse(ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII)));
// Write the last chunk.
assertTrue(ch.writeInbound(Unpooled.copiedBuffer("0\r\n\r\n", CharsetUtil.US_ASCII)));
// Ensure the last chunk was decoded.
HttpContent lastContent = ch.readInbound();
assertFalse(lastContent.content().isReadable());
lastContent.release();
assertFalse(ch.finish());
}
@Test
public void testResponseChunkedExceedMaxChunkSize() {
EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, 8192, 32));