The API changes made so far turned out to increase the memory footprint and consumption while our intention was actually decreasing them. Memory consumption issue: When there are many connections which does not exchange data frequently, the old Netty 4 API spent a lot more memory than 3 because it always allocates per-handler buffer for each connection unless otherwise explicitly stated by a user. In a usual real world load, a client doesn't always send requests without pausing, so the idea of having a buffer whose life cycle if bound to the life cycle of a connection didn't work as expected. Memory footprint issue: The old Netty 4 API decreased overall memory footprint by a great deal in many cases. It was mainly because the old Netty 4 API did not allocate a new buffer and event object for each read. Instead, it created a new buffer for each handler in a pipeline. This works pretty well as long as the number of handlers in a pipeline is only a few. However, for a highly modular application with many handlers which handles connections which lasts for relatively short period, it actually makes the memory footprint issue much worse. Changes: All in all, this is about retaining all the good changes we made in 4 so far such as better thread model and going back to the way how we dealt with message events in 3. To fix the memory consumption/footprint issue mentioned above, we made a hard decision to break the backward compatibility again with the following changes: - Remove MessageBuf - Merge Buf into ByteBuf - Merge ChannelInboundByte/MessageHandler and ChannelStateHandler into ChannelInboundHandler - Similar changes were made to the adapter classes - Merge ChannelOutboundByte/MessageHandler and ChannelOperationHandler into ChannelOutboundHandler - Similar changes were made to the adapter classes - Introduce MessageList which is similar to `MessageEvent` in Netty 3 - Replace inboundBufferUpdated(ctx) with messageReceived(ctx, MessageList) - Replace flush(ctx, promise) with write(ctx, MessageList, promise) - Remove ByteToByteEncoder/Decoder/Codec - Replaced by MessageToByteEncoder<ByteBuf>, ByteToMessageDecoder<ByteBuf>, and ByteMessageCodec<ByteBuf> - Merge EmbeddedByteChannel and EmbeddedMessageChannel into EmbeddedChannel - Add SimpleChannelInboundHandler which is sometimes more useful than ChannelInboundHandlerAdapter - Bring back Channel.isWritable() from Netty 3 - Add ChannelInboundHandler.channelWritabilityChanges() event - Add RecvByteBufAllocator configuration property - Similar to ReceiveBufferSizePredictor in Netty 3 - Some existing configuration properties such as DatagramChannelConfig.receivePacketSize is gone now. - Remove suspend/resumeIntermediaryDeallocation() in ByteBuf This change would have been impossible without @normanmaurer's help. He fixed, ported, and improved many parts of the changes.
105 lines
4.5 KiB
Java
105 lines
4.5 KiB
Java
/*
|
|
* Copyright 2012 The Netty Project
|
|
*
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
* with the License. You may obtain a copy of the License at:
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
* License for the specific language governing permissions and limitations
|
|
* under the License.
|
|
*/
|
|
package io.netty.handler.codec.http.websocketx;
|
|
|
|
import io.netty.channel.ChannelFuture;
|
|
import io.netty.channel.ChannelFutureListener;
|
|
import io.netty.channel.ChannelHandlerContext;
|
|
import io.netty.channel.ChannelInboundHandlerAdapter;
|
|
import io.netty.channel.ChannelPipeline;
|
|
import io.netty.channel.MessageList;
|
|
import io.netty.handler.codec.http.DefaultFullHttpResponse;
|
|
import io.netty.handler.codec.http.FullHttpRequest;
|
|
import io.netty.handler.codec.http.HttpHeaders;
|
|
import io.netty.handler.codec.http.HttpRequest;
|
|
import io.netty.handler.codec.http.HttpResponse;
|
|
import io.netty.handler.ssl.SslHandler;
|
|
|
|
import static io.netty.handler.codec.http.HttpHeaders.*;
|
|
import static io.netty.handler.codec.http.HttpMethod.*;
|
|
import static io.netty.handler.codec.http.HttpResponseStatus.*;
|
|
import static io.netty.handler.codec.http.HttpVersion.*;
|
|
|
|
/**
|
|
* Handles the HTTP handshake (the HTTP Upgrade request) for {@link WebSocketServerProtocolHandler}.
|
|
*/
|
|
class WebSocketServerProtocolHandshakeHandler
|
|
extends ChannelInboundHandlerAdapter {
|
|
|
|
private final String websocketPath;
|
|
private final String subprotocols;
|
|
private final boolean allowExtensions;
|
|
|
|
public WebSocketServerProtocolHandshakeHandler(String websocketPath, String subprotocols,
|
|
boolean allowExtensions) {
|
|
this.websocketPath = websocketPath;
|
|
this.subprotocols = subprotocols;
|
|
this.allowExtensions = allowExtensions;
|
|
}
|
|
|
|
@Override
|
|
public void messageReceived(final ChannelHandlerContext ctx, MessageList<Object> msgs) throws Exception {
|
|
MessageList<FullHttpRequest> requests = msgs.cast();
|
|
for (int i = 0; i < requests.size(); i++) {
|
|
FullHttpRequest req = requests.get(i);
|
|
if (req.getMethod() != GET) {
|
|
sendHttpResponse(ctx, req, new DefaultFullHttpResponse(HTTP_1_1, FORBIDDEN));
|
|
return;
|
|
}
|
|
|
|
final WebSocketServerHandshakerFactory wsFactory = new WebSocketServerHandshakerFactory(
|
|
getWebSocketLocation(ctx.pipeline(), req, websocketPath), subprotocols, allowExtensions);
|
|
final WebSocketServerHandshaker handshaker = wsFactory.newHandshaker(req);
|
|
if (handshaker == null) {
|
|
WebSocketServerHandshakerFactory.sendUnsupportedWebSocketVersionResponse(ctx.channel());
|
|
} else {
|
|
final ChannelFuture handshakeFuture = handshaker.handshake(ctx.channel(), req);
|
|
handshakeFuture.addListener(new ChannelFutureListener() {
|
|
@Override
|
|
public void operationComplete(ChannelFuture future) throws Exception {
|
|
if (!future.isSuccess()) {
|
|
ctx.fireExceptionCaught(future.cause());
|
|
} else {
|
|
ctx.fireUserEventTriggered(
|
|
WebSocketServerProtocolHandler.ServerHandshakeStateEvent.HANDSHAKE_COMPLETE);
|
|
}
|
|
}
|
|
});
|
|
WebSocketServerProtocolHandler.setHandshaker(ctx, handshaker);
|
|
ctx.pipeline().replace(this, "WS403Responder",
|
|
WebSocketServerProtocolHandler.forbiddenHttpRequestResponder());
|
|
}
|
|
}
|
|
}
|
|
|
|
private static void sendHttpResponse(ChannelHandlerContext ctx, HttpRequest req, HttpResponse res) {
|
|
ChannelFuture f = ctx.channel().write(res);
|
|
if (!isKeepAlive(req) || res.getStatus().code() != 200) {
|
|
f.addListener(ChannelFutureListener.CLOSE);
|
|
}
|
|
}
|
|
|
|
private static String getWebSocketLocation(ChannelPipeline cp, HttpRequest req, String path) {
|
|
String protocol = "ws";
|
|
if (cp.get(SslHandler.class) != null) {
|
|
// SSL in use so use Secure WebSockets
|
|
protocol = "wss";
|
|
}
|
|
return protocol + "://" + req.headers().get(HttpHeaders.Names.HOST) + path;
|
|
}
|
|
|
|
}
|