2012-07-03 10:37:05 +02:00
|
|
|
/*
|
|
|
|
* Copyright 2012 The Netty Project
|
|
|
|
*
|
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
|
|
|
package io.netty.handler.codec.http;
|
|
|
|
|
|
|
|
import io.netty.buffer.ByteBuf;
|
|
|
|
import io.netty.buffer.CompositeByteBuf;
|
2012-07-19 13:23:55 +02:00
|
|
|
import io.netty.buffer.Unpooled;
|
2012-07-03 10:37:05 +02:00
|
|
|
import io.netty.channel.ChannelHandlerContext;
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
import io.netty.channel.embedded.EmbeddedChannel;
|
2017-02-05 00:56:40 +01:00
|
|
|
import io.netty.handler.codec.DecoderResult;
|
2014-07-02 12:04:11 +02:00
|
|
|
import io.netty.handler.codec.DecoderResultProvider;
|
2014-02-20 20:46:41 +01:00
|
|
|
import io.netty.handler.codec.TooLongFrameException;
|
2019-02-04 09:57:54 +01:00
|
|
|
import io.netty.util.AsciiString;
|
2012-07-03 10:37:05 +02:00
|
|
|
import io.netty.util.CharsetUtil;
|
2019-02-04 09:57:54 +01:00
|
|
|
import io.netty.util.ReferenceCountUtil;
|
|
|
|
|
2013-01-16 05:22:50 +01:00
|
|
|
import org.junit.Test;
|
2017-01-09 22:04:18 +01:00
|
|
|
import org.mockito.Mockito;
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2014-02-07 17:59:51 +01:00
|
|
|
import java.nio.channels.ClosedChannelException;
|
2012-07-19 13:23:55 +02:00
|
|
|
import java.util.List;
|
|
|
|
|
2015-09-03 00:10:58 +02:00
|
|
|
import static io.netty.handler.codec.http.HttpHeadersTestUtils.of;
|
2015-08-04 00:09:44 +02:00
|
|
|
import static org.hamcrest.CoreMatchers.instanceOf;
|
|
|
|
import static org.hamcrest.CoreMatchers.is;
|
2019-04-08 21:09:06 +02:00
|
|
|
import static org.junit.Assert.assertArrayEquals;
|
2015-08-04 00:09:44 +02:00
|
|
|
import static org.junit.Assert.assertEquals;
|
|
|
|
import static org.junit.Assert.assertFalse;
|
|
|
|
import static org.junit.Assert.assertNotNull;
|
|
|
|
import static org.junit.Assert.assertNull;
|
|
|
|
import static org.junit.Assert.assertThat;
|
|
|
|
import static org.junit.Assert.assertTrue;
|
|
|
|
import static org.junit.Assert.fail;
|
2019-02-04 09:57:54 +01:00
|
|
|
import static org.junit.Assert.assertSame;
|
2012-07-03 10:37:05 +02:00
|
|
|
|
2013-04-30 20:41:50 +02:00
|
|
|
public class HttpObjectAggregatorTest {
|
2012-07-03 10:37:05 +02:00
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testAggregate() {
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpObjectAggregator aggr = new HttpObjectAggregator(1024 * 1024);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(aggr);
|
2012-07-03 10:37:05 +02:00
|
|
|
|
2014-02-20 20:35:23 +01:00
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http://localhost");
|
2015-09-03 00:10:58 +02:00
|
|
|
message.headers().set(of("X-Test"), true);
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk3 = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER);
|
2012-07-03 10:37:05 +02:00
|
|
|
assertFalse(embedder.writeInbound(message));
|
|
|
|
assertFalse(embedder.writeInbound(chunk1));
|
|
|
|
assertFalse(embedder.writeInbound(chunk2));
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2013-07-09 16:09:28 +02:00
|
|
|
// this should trigger a channelRead event so return true
|
2012-07-03 10:37:05 +02:00
|
|
|
assertTrue(embedder.writeInbound(chunk3));
|
|
|
|
assertTrue(embedder.finish());
|
2017-04-19 22:37:03 +02:00
|
|
|
FullHttpRequest aggregatedMessage = embedder.readInbound();
|
|
|
|
assertNotNull(aggregatedMessage);
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2013-05-01 10:04:43 +02:00
|
|
|
assertEquals(chunk1.content().readableBytes() + chunk2.content().readableBytes(),
|
2017-04-19 22:37:03 +02:00
|
|
|
HttpUtil.getContentLength(aggregatedMessage));
|
|
|
|
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.headers().get(of("X-Test")));
|
|
|
|
checkContentBuffer(aggregatedMessage);
|
2012-07-03 10:37:05 +02:00
|
|
|
assertNull(embedder.readInbound());
|
|
|
|
}
|
|
|
|
|
2013-04-30 20:41:20 +02:00
|
|
|
private static void checkContentBuffer(FullHttpRequest aggregatedMessage) {
|
2013-05-01 10:04:43 +02:00
|
|
|
CompositeByteBuf buffer = (CompositeByteBuf) aggregatedMessage.content();
|
2012-07-03 10:37:05 +02:00
|
|
|
assertEquals(2, buffer.numComponents());
|
|
|
|
List<ByteBuf> buffers = buffer.decompose(0, buffer.capacity());
|
|
|
|
assertEquals(2, buffers.size());
|
|
|
|
for (ByteBuf buf: buffers) {
|
|
|
|
// This should be false as we decompose the buffer before to not have deep hierarchy
|
|
|
|
assertFalse(buf instanceof CompositeByteBuf);
|
|
|
|
}
|
2013-10-01 09:57:20 +02:00
|
|
|
aggregatedMessage.release();
|
2012-07-03 10:37:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testAggregateWithTrailer() {
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpObjectAggregator aggr = new HttpObjectAggregator(1024 * 1024);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(aggr);
|
2014-02-20 20:35:23 +01:00
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http://localhost");
|
2015-09-03 00:10:58 +02:00
|
|
|
message.headers().set(of("X-Test"), true);
|
2015-08-22 17:25:57 +02:00
|
|
|
HttpUtil.setTransferEncodingChunked(message, true);
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
|
|
|
|
LastHttpContent trailer = new DefaultLastHttpContent();
|
2015-09-03 00:10:58 +02:00
|
|
|
trailer.trailingHeaders().set(of("X-Trailer"), true);
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2012-07-03 10:37:05 +02:00
|
|
|
assertFalse(embedder.writeInbound(message));
|
|
|
|
assertFalse(embedder.writeInbound(chunk1));
|
|
|
|
assertFalse(embedder.writeInbound(chunk2));
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2013-07-09 16:09:28 +02:00
|
|
|
// this should trigger a channelRead event so return true
|
2012-07-03 10:37:05 +02:00
|
|
|
assertTrue(embedder.writeInbound(trailer));
|
|
|
|
assertTrue(embedder.finish());
|
2017-04-19 22:37:03 +02:00
|
|
|
FullHttpRequest aggregatedMessage = embedder.readInbound();
|
|
|
|
assertNotNull(aggregatedMessage);
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2013-05-01 10:04:43 +02:00
|
|
|
assertEquals(chunk1.content().readableBytes() + chunk2.content().readableBytes(),
|
2017-04-19 22:37:03 +02:00
|
|
|
HttpUtil.getContentLength(aggregatedMessage));
|
|
|
|
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.headers().get(of("X-Test")));
|
|
|
|
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.trailingHeaders().get(of("X-Trailer")));
|
|
|
|
checkContentBuffer(aggregatedMessage);
|
2012-07-03 10:37:05 +02:00
|
|
|
assertNull(embedder.readInbound());
|
|
|
|
}
|
|
|
|
|
2013-05-09 19:44:39 +02:00
|
|
|
@Test
|
2014-02-20 20:35:23 +01:00
|
|
|
public void testOversizedRequest() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4));
|
2014-02-20 22:41:54 +01:00
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost");
|
2014-02-20 20:46:41 +01:00
|
|
|
HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
|
2014-02-07 17:59:51 +01:00
|
|
|
HttpContent chunk3 = LastHttpContent.EMPTY_LAST_CONTENT;
|
2013-05-09 19:44:39 +02:00
|
|
|
|
2012-07-03 10:37:05 +02:00
|
|
|
assertFalse(embedder.writeInbound(message));
|
2014-02-20 20:46:41 +01:00
|
|
|
assertFalse(embedder.writeInbound(chunk1));
|
|
|
|
assertFalse(embedder.writeInbound(chunk2));
|
2014-02-07 17:59:51 +01:00
|
|
|
|
|
|
|
FullHttpResponse response = embedder.readOutbound();
|
2014-06-24 10:39:46 +02:00
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
2014-10-31 08:48:28 +01:00
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
2014-02-07 17:59:51 +01:00
|
|
|
assertFalse(embedder.isOpen());
|
|
|
|
|
2013-05-09 19:44:39 +02:00
|
|
|
try {
|
2014-02-20 20:46:41 +01:00
|
|
|
assertFalse(embedder.writeInbound(chunk3));
|
2013-05-09 19:44:39 +02:00
|
|
|
fail();
|
2014-02-07 17:59:51 +01:00
|
|
|
} catch (Exception e) {
|
|
|
|
assertTrue(e instanceof ClosedChannelException);
|
2013-05-09 19:44:39 +02:00
|
|
|
}
|
2014-02-07 17:59:51 +01:00
|
|
|
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
2019-05-17 21:18:03 +02:00
|
|
|
@Test
|
|
|
|
public void testOversizedRequestWithContentLengthAndDecoder() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpRequestDecoder(), new HttpObjectAggregator(4, false));
|
|
|
|
assertFalse(embedder.writeInbound(Unpooled.copiedBuffer(
|
|
|
|
"PUT /upload HTTP/1.1\r\n" +
|
|
|
|
"Content-Length: 5\r\n\r\n", CharsetUtil.US_ASCII)));
|
|
|
|
|
|
|
|
assertNull(embedder.readInbound());
|
|
|
|
|
|
|
|
FullHttpResponse response = embedder.readOutbound();
|
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
|
|
|
|
|
|
|
assertTrue(embedder.isOpen());
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(Unpooled.wrappedBuffer(new byte[] { 1, 2, 3, 4 })));
|
|
|
|
assertFalse(embedder.writeInbound(Unpooled.wrappedBuffer(new byte[] { 5 })));
|
|
|
|
|
|
|
|
assertNull(embedder.readOutbound());
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(Unpooled.copiedBuffer(
|
|
|
|
"PUT /upload HTTP/1.1\r\n" +
|
|
|
|
"Content-Length: 2\r\n\r\n", CharsetUtil.US_ASCII)));
|
|
|
|
|
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
|
|
|
|
|
|
|
assertThat(response, instanceOf(LastHttpContent.class));
|
|
|
|
ReferenceCountUtil.release(response);
|
|
|
|
|
|
|
|
assertTrue(embedder.isOpen());
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(Unpooled.copiedBuffer(new byte[] { 1 })));
|
|
|
|
assertNull(embedder.readOutbound());
|
|
|
|
assertTrue(embedder.writeInbound(Unpooled.copiedBuffer(new byte[] { 2 })));
|
|
|
|
assertNull(embedder.readOutbound());
|
|
|
|
|
|
|
|
FullHttpRequest request = embedder.readInbound();
|
|
|
|
assertEquals(HttpVersion.HTTP_1_1, request.protocolVersion());
|
|
|
|
assertEquals(HttpMethod.PUT, request.method());
|
|
|
|
assertEquals("/upload", request.uri());
|
|
|
|
assertEquals(2, HttpUtil.getContentLength(request));
|
|
|
|
|
|
|
|
byte[] actual = new byte[request.content().readableBytes()];
|
|
|
|
request.content().readBytes(actual);
|
|
|
|
assertArrayEquals(new byte[] { 1, 2 }, actual);
|
|
|
|
request.release();
|
|
|
|
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
2014-02-07 17:59:51 +01:00
|
|
|
@Test
|
2014-02-20 20:35:23 +01:00
|
|
|
public void testOversizedRequestWithoutKeepAlive() {
|
2014-02-07 17:59:51 +01:00
|
|
|
// send a HTTP/1.0 request with no keep-alive header
|
2014-02-20 22:41:54 +01:00
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.PUT, "http://localhost");
|
2015-08-22 17:25:57 +02:00
|
|
|
HttpUtil.setContentLength(message, 5);
|
2014-02-20 20:46:41 +01:00
|
|
|
checkOversizedRequest(message);
|
2014-02-07 17:59:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
2014-02-20 20:35:23 +01:00
|
|
|
public void testOversizedRequestWithContentLength() {
|
2014-02-20 22:41:54 +01:00
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost");
|
2015-08-22 17:25:57 +02:00
|
|
|
HttpUtil.setContentLength(message, 5);
|
2014-02-20 20:46:41 +01:00
|
|
|
checkOversizedRequest(message);
|
2014-02-07 17:59:51 +01:00
|
|
|
}
|
|
|
|
|
2014-02-20 20:46:41 +01:00
|
|
|
private static void checkOversizedRequest(HttpRequest message) {
|
2014-02-20 20:35:23 +01:00
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4));
|
2014-02-07 17:59:51 +01:00
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(message));
|
|
|
|
HttpResponse response = embedder.readOutbound();
|
2014-06-24 10:39:46 +02:00
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
2014-10-31 08:48:28 +01:00
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
2014-10-07 15:56:15 +02:00
|
|
|
|
2019-04-08 21:09:06 +02:00
|
|
|
assertThat(response, instanceOf(LastHttpContent.class));
|
|
|
|
ReferenceCountUtil.release(response);
|
|
|
|
|
2016-11-04 22:22:16 +01:00
|
|
|
if (serverShouldCloseConnection(message, response)) {
|
2014-10-07 15:56:15 +02:00
|
|
|
assertFalse(embedder.isOpen());
|
2019-04-08 21:09:06 +02:00
|
|
|
|
|
|
|
try {
|
|
|
|
embedder.writeInbound(new DefaultHttpContent(Unpooled.EMPTY_BUFFER));
|
|
|
|
fail();
|
|
|
|
} catch (Exception e) {
|
|
|
|
assertThat(e, instanceOf(ClosedChannelException.class));
|
|
|
|
// expected
|
|
|
|
}
|
2014-10-07 15:56:15 +02:00
|
|
|
assertFalse(embedder.finish());
|
|
|
|
} else {
|
|
|
|
assertTrue(embedder.isOpen());
|
2019-04-08 21:09:06 +02:00
|
|
|
assertFalse(embedder.writeInbound(new DefaultHttpContent(Unpooled.copiedBuffer(new byte[8]))));
|
|
|
|
assertFalse(embedder.writeInbound(new DefaultHttpContent(Unpooled.copiedBuffer(new byte[8]))));
|
|
|
|
|
|
|
|
// Now start a new message and ensure we will not reject it again.
|
|
|
|
HttpRequest message2 = new DefaultHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.PUT, "http://localhost");
|
|
|
|
HttpUtil.setContentLength(message, 2);
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(message2));
|
|
|
|
assertNull(embedder.readOutbound());
|
|
|
|
assertFalse(embedder.writeInbound(new DefaultHttpContent(Unpooled.copiedBuffer(new byte[] { 1 }))));
|
|
|
|
assertNull(embedder.readOutbound());
|
|
|
|
assertTrue(embedder.writeInbound(new DefaultLastHttpContent(Unpooled.copiedBuffer(new byte[] { 2 }))));
|
|
|
|
assertNull(embedder.readOutbound());
|
|
|
|
|
|
|
|
FullHttpRequest request = embedder.readInbound();
|
|
|
|
assertEquals(message2.protocolVersion(), request.protocolVersion());
|
|
|
|
assertEquals(message2.method(), request.method());
|
|
|
|
assertEquals(message2.uri(), request.uri());
|
|
|
|
assertEquals(2, HttpUtil.getContentLength(request));
|
|
|
|
|
|
|
|
byte[] actual = new byte[request.content().readableBytes()];
|
|
|
|
request.content().readBytes(actual);
|
|
|
|
assertArrayEquals(new byte[] { 1, 2 }, actual);
|
|
|
|
request.release();
|
|
|
|
|
|
|
|
assertFalse(embedder.finish());
|
2014-10-07 15:56:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 22:22:16 +01:00
|
|
|
private static boolean serverShouldCloseConnection(HttpRequest message, HttpResponse response) {
|
|
|
|
// If the response wasn't keep-alive, the server should close the connection.
|
|
|
|
if (!HttpUtil.isKeepAlive(response)) {
|
|
|
|
return true;
|
|
|
|
}
|
2014-10-07 15:56:15 +02:00
|
|
|
// The connection should only be kept open if Expect: 100-continue is set,
|
|
|
|
// or if keep-alive is on.
|
2015-08-22 17:25:57 +02:00
|
|
|
if (HttpUtil.is100ContinueExpected(message)) {
|
2014-10-07 15:56:15 +02:00
|
|
|
return false;
|
|
|
|
}
|
2015-08-22 17:25:57 +02:00
|
|
|
if (HttpUtil.isKeepAlive(message)) {
|
2014-10-07 15:56:15 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2012-07-03 10:37:05 +02:00
|
|
|
}
|
|
|
|
|
2014-02-20 20:46:41 +01:00
|
|
|
@Test
|
|
|
|
public void testOversizedResponse() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4));
|
|
|
|
HttpResponse message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
|
|
|
|
HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(message));
|
|
|
|
assertFalse(embedder.writeInbound(chunk1));
|
|
|
|
|
|
|
|
try {
|
|
|
|
embedder.writeInbound(chunk2);
|
|
|
|
fail();
|
|
|
|
} catch (TooLongFrameException expected) {
|
|
|
|
// Expected
|
|
|
|
}
|
|
|
|
|
|
|
|
assertFalse(embedder.isOpen());
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
2012-07-03 10:37:05 +02:00
|
|
|
@Test(expected = IllegalArgumentException.class)
|
|
|
|
public void testInvalidConstructorUsage() {
|
2015-06-10 05:20:50 +02:00
|
|
|
new HttpObjectAggregator(-1);
|
2012-07-03 10:37:05 +02:00
|
|
|
}
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2012-07-03 10:37:05 +02:00
|
|
|
@Test(expected = IllegalArgumentException.class)
|
|
|
|
public void testInvalidMaxCumulationBufferComponents() {
|
2013-03-27 08:31:43 +01:00
|
|
|
HttpObjectAggregator aggr = new HttpObjectAggregator(Integer.MAX_VALUE);
|
2012-07-03 10:37:05 +02:00
|
|
|
aggr.setMaxCumulationBufferComponents(1);
|
|
|
|
}
|
2012-07-19 13:23:55 +02:00
|
|
|
|
2012-07-03 10:37:05 +02:00
|
|
|
@Test(expected = IllegalStateException.class)
|
|
|
|
public void testSetMaxCumulationBufferComponentsAfterInit() throws Exception {
|
2013-01-14 16:52:30 +01:00
|
|
|
HttpObjectAggregator aggr = new HttpObjectAggregator(Integer.MAX_VALUE);
|
2017-01-09 22:04:18 +01:00
|
|
|
ChannelHandlerContext ctx = Mockito.mock(ChannelHandlerContext.class);
|
2013-04-05 15:46:18 +02:00
|
|
|
aggr.handlerAdded(ctx);
|
2017-01-09 22:04:18 +01:00
|
|
|
Mockito.verifyNoMoreInteractions(ctx);
|
2012-07-03 10:37:05 +02:00
|
|
|
aggr.setMaxCumulationBufferComponents(10);
|
|
|
|
}
|
2013-04-30 20:41:20 +02:00
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testAggregateTransferEncodingChunked() {
|
|
|
|
HttpObjectAggregator aggr = new HttpObjectAggregator(1024 * 1024);
|
Revamp the core API to reduce memory footprint and consumption
The API changes made so far turned out to increase the memory footprint
and consumption while our intention was actually decreasing them.
Memory consumption issue:
When there are many connections which does not exchange data frequently,
the old Netty 4 API spent a lot more memory than 3 because it always
allocates per-handler buffer for each connection unless otherwise
explicitly stated by a user. In a usual real world load, a client
doesn't always send requests without pausing, so the idea of having a
buffer whose life cycle if bound to the life cycle of a connection
didn't work as expected.
Memory footprint issue:
The old Netty 4 API decreased overall memory footprint by a great deal
in many cases. It was mainly because the old Netty 4 API did not
allocate a new buffer and event object for each read. Instead, it
created a new buffer for each handler in a pipeline. This works pretty
well as long as the number of handlers in a pipeline is only a few.
However, for a highly modular application with many handlers which
handles connections which lasts for relatively short period, it actually
makes the memory footprint issue much worse.
Changes:
All in all, this is about retaining all the good changes we made in 4 so
far such as better thread model and going back to the way how we dealt
with message events in 3.
To fix the memory consumption/footprint issue mentioned above, we made a
hard decision to break the backward compatibility again with the
following changes:
- Remove MessageBuf
- Merge Buf into ByteBuf
- Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler
- Similar changes were made to the adapter classes
- Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler
- Similar changes were made to the adapter classes
- Introduce MessageList which is similar to `MessageEvent` in Netty 3
- Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList)
- Replace flush(ctx, promise) with write(ctx, MessageList, promise)
- Remove ByteToByteEncoder/Decoder/Codec
- Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf>
- Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel
- Add SimpleChannelInboundHandler which is sometimes more useful than
ChannelInboundHandlerAdapter
- Bring back Channel.isWritable() from Netty 3
- Add ChannelInboundHandler.channelWritabilityChanges() event
- Add RecvByteBufAllocator configuration property
- Similar to ReceiveBufferSizePredictor in Netty 3
- Some existing configuration properties such as
DatagramChannelConfig.receivePacketSize is gone now.
- Remove suspend/resumeIntermediaryDeallocation() in ByteBuf
This change would have been impossible without @normanmaurer's help. He
fixed, ported, and improved many parts of the changes.
2013-05-28 13:40:19 +02:00
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(aggr);
|
2013-04-30 20:41:20 +02:00
|
|
|
|
2014-02-20 22:41:54 +01:00
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost");
|
2015-09-03 00:10:58 +02:00
|
|
|
message.headers().set(of("X-Test"), true);
|
|
|
|
message.headers().set(of("Transfer-Encoding"), of("Chunked"));
|
2013-04-30 20:41:20 +02:00
|
|
|
HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk3 = LastHttpContent.EMPTY_LAST_CONTENT;
|
|
|
|
assertFalse(embedder.writeInbound(message));
|
|
|
|
assertFalse(embedder.writeInbound(chunk1));
|
|
|
|
assertFalse(embedder.writeInbound(chunk2));
|
|
|
|
|
2013-07-09 16:09:28 +02:00
|
|
|
// this should trigger a channelRead event so return true
|
2013-04-30 20:41:20 +02:00
|
|
|
assertTrue(embedder.writeInbound(chunk3));
|
|
|
|
assertTrue(embedder.finish());
|
2017-04-19 22:37:03 +02:00
|
|
|
FullHttpRequest aggregatedMessage = embedder.readInbound();
|
|
|
|
assertNotNull(aggregatedMessage);
|
2013-04-30 20:41:20 +02:00
|
|
|
|
2013-05-01 10:04:43 +02:00
|
|
|
assertEquals(chunk1.content().readableBytes() + chunk2.content().readableBytes(),
|
2017-04-19 22:37:03 +02:00
|
|
|
HttpUtil.getContentLength(aggregatedMessage));
|
|
|
|
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.headers().get(of("X-Test")));
|
|
|
|
checkContentBuffer(aggregatedMessage);
|
2013-04-30 20:41:20 +02:00
|
|
|
assertNull(embedder.readInbound());
|
|
|
|
}
|
2014-02-20 00:31:53 +01:00
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testBadRequest() {
|
|
|
|
EmbeddedChannel ch = new EmbeddedChannel(new HttpRequestDecoder(), new HttpObjectAggregator(1024 * 1024));
|
|
|
|
ch.writeInbound(Unpooled.copiedBuffer("GET / HTTP/1.0 with extra\r\n", CharsetUtil.UTF_8));
|
2014-06-28 06:27:21 +02:00
|
|
|
Object inbound = ch.readInbound();
|
|
|
|
assertThat(inbound, is(instanceOf(FullHttpRequest.class)));
|
2014-07-02 12:04:11 +02:00
|
|
|
assertTrue(((DecoderResultProvider) inbound).decoderResult().isFailure());
|
2014-02-20 00:31:53 +01:00
|
|
|
assertNull(ch.readInbound());
|
|
|
|
ch.finish();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testBadResponse() throws Exception {
|
|
|
|
EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(), new HttpObjectAggregator(1024 * 1024));
|
|
|
|
ch.writeInbound(Unpooled.copiedBuffer("HTTP/1.0 BAD_CODE Bad Server\r\n", CharsetUtil.UTF_8));
|
2014-06-28 06:27:21 +02:00
|
|
|
Object inbound = ch.readInbound();
|
|
|
|
assertThat(inbound, is(instanceOf(FullHttpResponse.class)));
|
2014-07-02 12:04:11 +02:00
|
|
|
assertTrue(((DecoderResultProvider) inbound).decoderResult().isFailure());
|
2014-02-20 00:31:53 +01:00
|
|
|
assertNull(ch.readInbound());
|
|
|
|
ch.finish();
|
|
|
|
}
|
2015-08-04 00:09:44 +02:00
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testOversizedRequestWith100Continue() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(8));
|
|
|
|
|
|
|
|
// Send an oversized request with 100 continue.
|
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost");
|
2015-08-22 17:25:57 +02:00
|
|
|
HttpUtil.set100ContinueExpected(message, true);
|
|
|
|
HttpUtil.setContentLength(message, 16);
|
2015-08-04 00:09:44 +02:00
|
|
|
|
2016-11-03 08:04:57 +01:00
|
|
|
HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("some", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
|
2015-08-04 00:09:44 +02:00
|
|
|
HttpContent chunk3 = LastHttpContent.EMPTY_LAST_CONTENT;
|
|
|
|
|
|
|
|
// Send a request with 100-continue + large Content-Length header value.
|
|
|
|
assertFalse(embedder.writeInbound(message));
|
|
|
|
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
// The aggregator should respond with '413.'
|
2016-11-03 08:04:57 +01:00
|
|
|
FullHttpResponse response = embedder.readOutbound();
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
2015-08-04 00:09:44 +02:00
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
|
|
|
|
|
|
|
// An ill-behaving client could continue to send data without a respect, and such data should be discarded.
|
|
|
|
assertFalse(embedder.writeInbound(chunk1));
|
|
|
|
|
|
|
|
// The aggregator should not close the connection because keep-alive is on.
|
|
|
|
assertTrue(embedder.isOpen());
|
|
|
|
|
|
|
|
// Now send a valid request.
|
|
|
|
HttpRequest message2 = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost");
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(message2));
|
|
|
|
assertFalse(embedder.writeInbound(chunk2));
|
|
|
|
assertTrue(embedder.writeInbound(chunk3));
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
FullHttpRequest fullMsg = embedder.readInbound();
|
2015-08-04 00:09:44 +02:00
|
|
|
assertNotNull(fullMsg);
|
|
|
|
|
|
|
|
assertEquals(
|
|
|
|
chunk2.content().readableBytes() + chunk3.content().readableBytes(),
|
2015-08-22 17:25:57 +02:00
|
|
|
HttpUtil.getContentLength(fullMsg));
|
2015-08-04 00:09:44 +02:00
|
|
|
|
2015-08-22 17:25:57 +02:00
|
|
|
assertEquals(HttpUtil.getContentLength(fullMsg), fullMsg.content().readableBytes());
|
2015-08-04 00:09:44 +02:00
|
|
|
|
|
|
|
fullMsg.release();
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
@Test
|
|
|
|
public void testUnsupportedExpectHeaderExpectation() {
|
|
|
|
runUnsupportedExceptHeaderExceptionTest(true);
|
|
|
|
runUnsupportedExceptHeaderExceptionTest(false);
|
|
|
|
}
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
private static void runUnsupportedExceptHeaderExceptionTest(final boolean close) {
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
final HttpObjectAggregator aggregator;
|
|
|
|
final int maxContentLength = 4;
|
|
|
|
if (close) {
|
|
|
|
aggregator = new HttpObjectAggregator(maxContentLength, true);
|
|
|
|
} else {
|
|
|
|
aggregator = new HttpObjectAggregator(maxContentLength);
|
|
|
|
}
|
|
|
|
final EmbeddedChannel embedder = new EmbeddedChannel(new HttpRequestDecoder(), aggregator);
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(Unpooled.copiedBuffer(
|
|
|
|
"GET / HTTP/1.1\r\n" +
|
|
|
|
"Expect: chocolate=yummy\r\n" +
|
|
|
|
"Content-Length: 100\r\n\r\n", CharsetUtil.US_ASCII)));
|
|
|
|
assertNull(embedder.readInbound());
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
final FullHttpResponse response = embedder.readOutbound();
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
assertEquals(HttpResponseStatus.EXPECTATION_FAILED, response.status());
|
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
|
|
|
response.release();
|
|
|
|
|
|
|
|
if (close) {
|
|
|
|
assertFalse(embedder.isOpen());
|
|
|
|
} else {
|
|
|
|
// keep-alive is on by default in HTTP/1.1, so the connection should be still alive
|
|
|
|
assertTrue(embedder.isOpen());
|
|
|
|
|
|
|
|
// the decoder should be reset by the aggregator at this point and be able to decode the next request
|
|
|
|
assertTrue(embedder.writeInbound(Unpooled.copiedBuffer("GET / HTTP/1.1\r\n\r\n", CharsetUtil.US_ASCII)));
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
final FullHttpRequest request = embedder.readInbound();
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
assertThat(request.method(), is(HttpMethod.GET));
|
|
|
|
assertThat(request.uri(), is("/"));
|
|
|
|
assertThat(request.content().readableBytes(), is(0));
|
|
|
|
request.release();
|
|
|
|
}
|
|
|
|
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
2017-08-11 01:50:00 +02:00
|
|
|
@Test
|
|
|
|
public void testValidRequestWith100ContinueAndDecoder() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpRequestDecoder(), new HttpObjectAggregator(100));
|
|
|
|
embedder.writeInbound(Unpooled.copiedBuffer(
|
|
|
|
"GET /upload HTTP/1.1\r\n" +
|
|
|
|
"Expect: 100-continue\r\n" +
|
|
|
|
"Content-Length: 0\r\n\r\n", CharsetUtil.US_ASCII));
|
|
|
|
|
|
|
|
FullHttpResponse response = embedder.readOutbound();
|
|
|
|
assertEquals(HttpResponseStatus.CONTINUE, response.status());
|
|
|
|
FullHttpRequest request = embedder.readInbound();
|
|
|
|
assertFalse(request.headers().contains(HttpHeaderNames.EXPECT));
|
|
|
|
request.release();
|
|
|
|
response.release();
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
2015-08-04 00:09:44 +02:00
|
|
|
@Test
|
|
|
|
public void testOversizedRequestWith100ContinueAndDecoder() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpRequestDecoder(), new HttpObjectAggregator(4));
|
|
|
|
embedder.writeInbound(Unpooled.copiedBuffer(
|
|
|
|
"PUT /upload HTTP/1.1\r\n" +
|
|
|
|
"Expect: 100-continue\r\n" +
|
|
|
|
"Content-Length: 100\r\n\r\n", CharsetUtil.US_ASCII));
|
|
|
|
|
|
|
|
assertNull(embedder.readInbound());
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
FullHttpResponse response = embedder.readOutbound();
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
2015-08-04 00:09:44 +02:00
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
|
|
|
|
|
|
|
// Keep-alive is on by default in HTTP/1.1, so the connection should be still alive.
|
|
|
|
assertTrue(embedder.isOpen());
|
|
|
|
|
|
|
|
// The decoder should be reset by the aggregator at this point and be able to decode the next request.
|
|
|
|
embedder.writeInbound(Unpooled.copiedBuffer("GET /max-upload-size HTTP/1.1\r\n\r\n", CharsetUtil.US_ASCII));
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
FullHttpRequest request = embedder.readInbound();
|
2015-08-04 00:09:44 +02:00
|
|
|
assertThat(request.method(), is(HttpMethod.GET));
|
|
|
|
assertThat(request.uri(), is("/max-upload-size"));
|
|
|
|
assertThat(request.content().readableBytes(), is(0));
|
|
|
|
request.release();
|
|
|
|
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testOversizedRequestWith100ContinueAndDecoderCloseConnection() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpRequestDecoder(), new HttpObjectAggregator(4, true));
|
|
|
|
embedder.writeInbound(Unpooled.copiedBuffer(
|
|
|
|
"PUT /upload HTTP/1.1\r\n" +
|
|
|
|
"Expect: 100-continue\r\n" +
|
|
|
|
"Content-Length: 100\r\n\r\n", CharsetUtil.US_ASCII));
|
|
|
|
|
|
|
|
assertNull(embedder.readInbound());
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
FullHttpResponse response = embedder.readOutbound();
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
2015-08-04 00:09:44 +02:00
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
|
|
|
|
|
|
|
// We are forcing the connection closed if an expectation is exceeded.
|
|
|
|
assertFalse(embedder.isOpen());
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testRequestAfterOversized100ContinueAndDecoder() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpRequestDecoder(), new HttpObjectAggregator(15));
|
|
|
|
|
|
|
|
// Write first request with Expect: 100-continue.
|
|
|
|
HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost");
|
2015-08-22 17:25:57 +02:00
|
|
|
HttpUtil.set100ContinueExpected(message, true);
|
|
|
|
HttpUtil.setContentLength(message, 16);
|
2015-08-04 00:09:44 +02:00
|
|
|
|
2016-11-03 08:04:57 +01:00
|
|
|
HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("some", CharsetUtil.US_ASCII));
|
|
|
|
HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
|
2015-08-04 00:09:44 +02:00
|
|
|
HttpContent chunk3 = LastHttpContent.EMPTY_LAST_CONTENT;
|
|
|
|
|
|
|
|
// Send a request with 100-continue + large Content-Length header value.
|
|
|
|
assertFalse(embedder.writeInbound(message));
|
|
|
|
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
// The aggregator should respond with '413'.
|
2016-11-03 08:04:57 +01:00
|
|
|
FullHttpResponse response = embedder.readOutbound();
|
Correct expect header handling
Motivation:
Today, the HTTP codec in Netty responds to HTTP/1.1 requests containing
an "expect: 100-continue" header and a content-length that exceeds the
max content length for the server with a 417 status (Expectation
Failed). This is a violation of the HTTP specification. The purpose of
this commit is to address this situation by modifying the HTTP codec to
respond in this situation with a 413 status (Request Entity Too
Large). Additionally, the HTTP codec ignores expectations in the expect
header that are currently unsupported. This commit also addresses this
situation by responding with a 417 status.
Handling the expect header is tricky business as the specification (RFC
2616) is more complicated than it needs to be. The specification defines
the legitimate values for this header as "100-continue" and defines the
notion of expectatation extensions. Further, the specification defines a
417 status (Expectation Failed) and this is where implementations go
astray. The intent of the specification was for servers to respond with
417 status when they do not support the expectation in the expect
header.
The key sentence from the specification follows:
The server MUST respond with a 417 (Expectation Failed) status if
any of the expectations cannot be met or, if there are other
problems with the request, some other 4xx status.
That is, a server should respond with a 417 status if and only if there
is an expectation that the server does not support (whether it be
100-continue, or another expectation extension), and should respond with
another 4xx status code if the expectation is supported but there is
something else wrong with the request.
Modifications:
This commit modifies the HTTP codec by changing the handling for the
expect header in the HTTP object aggregator. In particular, the codec
will now respond with 417 status if any expectation other than
100-continue is present in the expect header, the codec will respond
with 413 status if the 100-continue expectation is present in the expect
header and the content-length is larger than the max content length for
the aggregator, and otherwise the codec will respond with 100 status.
Result:
The HTTP codec can now be used to correctly reply to clients that send a
100-continue expectation with a content-length that is too large for the
server with a 413 status, and servers that use the HTTP codec will now
no longer ignore expectations that are not supported (any value other
than 100-continue).
2017-02-14 18:09:52 +01:00
|
|
|
assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
|
2015-08-04 00:09:44 +02:00
|
|
|
assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH));
|
|
|
|
|
|
|
|
// An ill-behaving client could continue to send data without a respect, and such data should be discarded.
|
|
|
|
assertFalse(embedder.writeInbound(chunk1));
|
|
|
|
|
|
|
|
// The aggregator should not close the connection because keep-alive is on.
|
|
|
|
assertTrue(embedder.isOpen());
|
|
|
|
|
|
|
|
// Now send a valid request.
|
|
|
|
HttpRequest message2 = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost");
|
|
|
|
|
|
|
|
assertFalse(embedder.writeInbound(message2));
|
|
|
|
assertFalse(embedder.writeInbound(chunk2));
|
|
|
|
assertTrue(embedder.writeInbound(chunk3));
|
|
|
|
|
2017-03-08 20:16:06 +01:00
|
|
|
FullHttpRequest fullMsg = embedder.readInbound();
|
2015-08-04 00:09:44 +02:00
|
|
|
assertNotNull(fullMsg);
|
|
|
|
|
|
|
|
assertEquals(
|
|
|
|
chunk2.content().readableBytes() + chunk3.content().readableBytes(),
|
2015-08-22 17:25:57 +02:00
|
|
|
HttpUtil.getContentLength(fullMsg));
|
2015-08-04 00:09:44 +02:00
|
|
|
|
2015-08-22 17:25:57 +02:00
|
|
|
assertEquals(HttpUtil.getContentLength(fullMsg), fullMsg.content().readableBytes());
|
2015-08-04 00:09:44 +02:00
|
|
|
|
|
|
|
fullMsg.release();
|
|
|
|
assertFalse(embedder.finish());
|
|
|
|
}
|
2017-02-05 00:56:40 +01:00
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testReplaceAggregatedRequest() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(1024 * 1024));
|
|
|
|
|
|
|
|
Exception boom = new Exception("boom");
|
|
|
|
HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http://localhost");
|
|
|
|
req.setDecoderResult(DecoderResult.failure(boom));
|
|
|
|
|
|
|
|
assertTrue(embedder.writeInbound(req) && embedder.finish());
|
|
|
|
|
|
|
|
FullHttpRequest aggregatedReq = embedder.readInbound();
|
|
|
|
FullHttpRequest replacedReq = aggregatedReq.replace(Unpooled.EMPTY_BUFFER);
|
|
|
|
|
|
|
|
assertEquals(replacedReq.decoderResult(), aggregatedReq.decoderResult());
|
|
|
|
aggregatedReq.release();
|
|
|
|
replacedReq.release();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testReplaceAggregatedResponse() {
|
|
|
|
EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(1024 * 1024));
|
|
|
|
|
|
|
|
Exception boom = new Exception("boom");
|
|
|
|
HttpResponse rep = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
|
|
|
|
rep.setDecoderResult(DecoderResult.failure(boom));
|
|
|
|
|
|
|
|
assertTrue(embedder.writeInbound(rep) && embedder.finish());
|
|
|
|
|
|
|
|
FullHttpResponse aggregatedRep = embedder.readInbound();
|
|
|
|
FullHttpResponse replacedRep = aggregatedRep.replace(Unpooled.EMPTY_BUFFER);
|
|
|
|
|
|
|
|
assertEquals(replacedRep.decoderResult(), aggregatedRep.decoderResult());
|
|
|
|
aggregatedRep.release();
|
|
|
|
replacedRep.release();
|
|
|
|
}
|
2019-02-04 09:57:54 +01:00
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testSelectiveRequestAggregation() {
|
|
|
|
HttpObjectAggregator myPostAggregator = new HttpObjectAggregator(1024 * 1024) {
|
|
|
|
@Override
|
|
|
|
protected boolean isStartMessage(HttpObject msg) throws Exception {
|
|
|
|
if (msg instanceof HttpRequest) {
|
|
|
|
HttpRequest request = (HttpRequest) msg;
|
|
|
|
HttpMethod method = request.method();
|
|
|
|
|
|
|
|
if (method.equals(HttpMethod.POST)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
EmbeddedChannel channel = new EmbeddedChannel(myPostAggregator);
|
|
|
|
|
|
|
|
try {
|
|
|
|
// Aggregate: POST
|
|
|
|
HttpRequest request1 = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
|
|
|
|
HttpContent content1 = new DefaultHttpContent(Unpooled.copiedBuffer("Hello, World!", CharsetUtil.UTF_8));
|
|
|
|
request1.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.TEXT_PLAIN);
|
|
|
|
|
|
|
|
assertTrue(channel.writeInbound(request1, content1, LastHttpContent.EMPTY_LAST_CONTENT));
|
|
|
|
|
|
|
|
// Getting an aggregated response out
|
|
|
|
Object msg1 = channel.readInbound();
|
|
|
|
try {
|
|
|
|
assertTrue(msg1 instanceof FullHttpRequest);
|
|
|
|
} finally {
|
|
|
|
ReferenceCountUtil.release(msg1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't aggregate: non-POST
|
|
|
|
HttpRequest request2 = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "/");
|
|
|
|
HttpContent content2 = new DefaultHttpContent(Unpooled.copiedBuffer("Hello, World!", CharsetUtil.UTF_8));
|
|
|
|
request2.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.TEXT_PLAIN);
|
|
|
|
|
|
|
|
try {
|
|
|
|
assertTrue(channel.writeInbound(request2, content2, LastHttpContent.EMPTY_LAST_CONTENT));
|
|
|
|
|
|
|
|
// Getting the same response objects out
|
|
|
|
assertSame(request2, channel.readInbound());
|
|
|
|
assertSame(content2, channel.readInbound());
|
|
|
|
assertSame(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound());
|
|
|
|
} finally {
|
|
|
|
ReferenceCountUtil.release(request2);
|
|
|
|
ReferenceCountUtil.release(content2);
|
|
|
|
}
|
|
|
|
|
|
|
|
assertFalse(channel.finish());
|
|
|
|
} finally {
|
|
|
|
channel.close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
|
|
|
public void testSelectiveResponseAggregation() {
|
|
|
|
HttpObjectAggregator myTextAggregator = new HttpObjectAggregator(1024 * 1024) {
|
|
|
|
@Override
|
|
|
|
protected boolean isStartMessage(HttpObject msg) throws Exception {
|
|
|
|
if (msg instanceof HttpResponse) {
|
|
|
|
HttpResponse response = (HttpResponse) msg;
|
|
|
|
HttpHeaders headers = response.headers();
|
|
|
|
|
|
|
|
String contentType = headers.get(HttpHeaderNames.CONTENT_TYPE);
|
|
|
|
if (AsciiString.contentEqualsIgnoreCase(contentType, HttpHeaderValues.TEXT_PLAIN)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
EmbeddedChannel channel = new EmbeddedChannel(myTextAggregator);
|
|
|
|
|
|
|
|
try {
|
|
|
|
// Aggregate: text/plain
|
|
|
|
HttpResponse response1 = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
|
|
|
|
HttpContent content1 = new DefaultHttpContent(Unpooled.copiedBuffer("Hello, World!", CharsetUtil.UTF_8));
|
|
|
|
response1.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.TEXT_PLAIN);
|
|
|
|
|
|
|
|
assertTrue(channel.writeInbound(response1, content1, LastHttpContent.EMPTY_LAST_CONTENT));
|
|
|
|
|
|
|
|
// Getting an aggregated response out
|
|
|
|
Object msg1 = channel.readInbound();
|
|
|
|
try {
|
|
|
|
assertTrue(msg1 instanceof FullHttpResponse);
|
|
|
|
} finally {
|
|
|
|
ReferenceCountUtil.release(msg1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't aggregate: application/json
|
|
|
|
HttpResponse response2 = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
|
|
|
|
HttpContent content2 = new DefaultHttpContent(Unpooled.copiedBuffer("{key: 'value'}", CharsetUtil.UTF_8));
|
|
|
|
response2.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_JSON);
|
|
|
|
|
|
|
|
try {
|
|
|
|
assertTrue(channel.writeInbound(response2, content2, LastHttpContent.EMPTY_LAST_CONTENT));
|
|
|
|
|
|
|
|
// Getting the same response objects out
|
|
|
|
assertSame(response2, channel.readInbound());
|
|
|
|
assertSame(content2, channel.readInbound());
|
|
|
|
assertSame(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound());
|
|
|
|
} finally {
|
|
|
|
ReferenceCountUtil.release(response2);
|
|
|
|
ReferenceCountUtil.release(content2);
|
|
|
|
}
|
|
|
|
|
|
|
|
assertFalse(channel.finish());
|
|
|
|
} finally {
|
|
|
|
channel.close();
|
|
|
|
}
|
|
|
|
}
|
2012-07-03 10:37:05 +02:00
|
|
|
}
|