diff --git a/pom.xml b/pom.xml index 81531efc91..310357121b 100644 --- a/pom.xml +++ b/pom.xml @@ -221,6 +221,37 @@ + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-antrun-plugin + [1.7,) + + run + + + + + + + + + + + + + @@ -514,135 +545,6 @@ ${project.name} Source Xref (${project.version}) - - org.jboss.maven.plugins - maven-jdocbook-plugin - 2.2.1 - - - generate-docbook - package - - resources - generate - - - - - - org.eclipse.wst.css - core - 1.1.101-v200705302225 - - - org.apache - xerces - - - com.ibm - icu - - - - - org.eclipse.wst.sse - core - 1.1.202-v200709061102 - - - org.apache - xerces - - - com.ibm - icu - - - - - org.jboss - jbossorg-docbook-xslt - 1.1.0 - - - org.eclipse.wst.css - core - - - org.eclipse.wst.sse - core - - - - - org.jboss - jbossorg-jdocbook-style - 1.1.0 - jdocbook-style - - - org.eclipse.wst.css - core - - - org.eclipse.wst.sse - core - - - - - org.jboss - jbossorg-fonts - 1.0.0 - jdocbook-style - - - - master.xml - ${basedir}/src/docbook - - ${basedir}/src/docbook - - css/**/* - - - - ${basedir}/src/docbook - - images/**/* - - - - - html - file:///${basedir}/src/docbook/xslt/xhtml.xsl - index.html - - - html_single - file:///${basedir}/src/docbook/xslt/xhtml-single.xsl - index.html - - - eclipse - file:///${basedir}/src/docbook/xslt/eclipse.xsl - index.html - - - pdf - file:///${basedir}/src/docbook/xslt/pdf.xsl - netty.pdf - - - - true - saxon - 1.72.0 - - - true - - - maven-assembly-plugin 2.2.1 diff --git a/src/assembly/default.xml b/src/assembly/default.xml index 4bb59b634b..4ffabed425 100644 --- a/src/assembly/default.xml +++ b/src/assembly/default.xml @@ -69,15 +69,6 @@ **/** - - - - target/docbook/publish/en-US - doc/guide - - **/** - - diff --git a/src/docbook/css/jbossorg.css b/src/docbook/css/jbossorg.css deleted file mode 100644 index c036504b0e..0000000000 --- a/src/docbook/css/jbossorg.css +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2009 Red Hat, Inc. - * - * Red Hat licenses this file to you under the Apache License, version 2.0 - * (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -@import url("documentation.css"); -@import url("docnav.css"); -@import url("reports.css"); -@import url("extensions.css"); -@import url("codehighlight.css"); - -body { - background-image:url(../images/community/bkg_gradient.gif); - background-repeat:repeat-x; - margin:0 auto; - font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif; - font-size:12px; - max-width:55em; - padding:0em 2em; - color:#333; - line-height:150%; - text-align:justify; -} - -/* Links */ - -a:link {color:#0066cc;} - -a:visited {color:#6699cc;} - -div.longdesc-link { - float:right; - color:#999; -} - -/* Headings */ - -h1, h2, h3, h4, h5, h6 { - color:#4a5d75; - line-height:130%; - margin-top:0em; - font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif; - background-color:transparent; -} - -h1 { - background-image:url(../images/community/title_hdr.png); - background-repeat:no-repeat; - border-top:1px dotted #CCCCCC; - line-height:1.2em; - color:#182737; - font-size:2em; - padding:1.5em; -} - -h2 {font-size:1.6em;} - -h3 { - font-size:1.3em; - padding-top:0em; - padding-bottom:0em; -} - -h4 { - font-size:1.1em; - padding-top:0em; - padding-bottom:0em; -} - -h5.formalpara { - font-size:1em; - margin-top:2em; - margin-bottom:.8em; -} - -/* Element rules */ - -hr { - border-collapse:collapse; - border-style:none; - border-top:1px dotted #ccc; - width:100% !important; -} - -sup {color:#999;} - -/* Custom overrides */ - -tt, tt *, pre, pre *, code, code * { - font-size: 100% !important; - font-family: "Liberation Mono", "DejaVu Sans Mono", Consolas, Monaco, "Vera Sans Mono", "Lucida Console", "Courier New", monospace !important; -} - -pre a:link * {color:#0066cc !important;} - -pre a:visited * {color:#6699cc !important;} - -.programlisting, .programlistingco pre { - line-height: 160%; -} - -.programlisting img { - margin: 0; - padding: 0; - vertical-align: middle; -} - -span.co { - position: relative; - left: 0; - top: 0; - margin: 0 0; - padding: 0 0; - height: 17px; - float: right; -} - -span.co * { - margin: 0 0; - padding: 0 0; -} diff --git a/src/docbook/en-US/custom.dtd b/src/docbook/en-US/custom.dtd deleted file mode 100644 index 202ca02a26..0000000000 --- a/src/docbook/en-US/custom.dtd +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - -Bootstrap"> -ClientBootstrap"> -ServerBootstrap"> - - - -ChannelBuffer"> -ChannelBuffers"> - - - -Channel"> -ChannelDownstreamHandler"> -ChannelEvent"> -ChannelFactory"> -ChannelFuture"> -ChannelFutureListener"> -ChannelHandler"> -ChannelHandlerContext"> -ChannelPipeline"> -ChannelPipelineCoverage"> -ChannelPipelineFactory"> -Channels"> -ChannelStateEvent"> -ChannelUpstreamHandler"> -ExceptionEvent"> -MessageEvent"> -SimpleChannelHandler"> - - - -ChannelGroup"> -ChannelGroupFuture"> -DefaultChannelGroup"> - - - -ServerSocketChannel"> -SocketChannel"> - - - -NioClientSocketChannelFactory"> -NioServerSocketChannelFactory"> - - - -FrameDecoder"> - - - -ProtobufEncoder"> -ProtobufDecoder"> - - - -ReplayingDecoder"> -VoidEnum"> - - -SslHandler"> diff --git a/src/docbook/en-US/master.xml b/src/docbook/en-US/master.xml deleted file mode 100644 index 3eb0e25f7d..0000000000 --- a/src/docbook/en-US/master.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - -%CustomDTD; -]> - - - The Netty Project 3.2 User Guide - The Proven Approach to Rapid Network Application Development - - - - - - - - - - - - - - - - - diff --git a/src/docbook/en-US/module/appendix.xml b/src/docbook/en-US/module/appendix.xml deleted file mode 100644 index ce1391e086..0000000000 --- a/src/docbook/en-US/module/appendix.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -%CustomDTD; -]> - - Additional Resources - To be written... - diff --git a/src/docbook/en-US/module/architecture.xml b/src/docbook/en-US/module/architecture.xml deleted file mode 100644 index 5e4f883c91..0000000000 --- a/src/docbook/en-US/module/architecture.xml +++ /dev/null @@ -1,346 +0,0 @@ - - - -%CustomDTD; -]> - - Architectural Overview - - - - - - - The Architecture Diagram of Netty - - - - - In this chapter, we will examine what core functionalities are provided in - Netty and how they constitute a complete network application development - stack on top of the core. Please keep this diagram in mind as you read this - chapter. - - -
- Rich Buffer Data Structure - - Netty uses its own buffer API instead of NIO ByteBuffer - to represent a sequence of bytes. This approach has significant advantages - over using ByteBuffer. Netty's new buffer type, - &ChannelBuffer; has been designed from the ground up to address the problems - of ByteBuffer and to meet the daily needs of - network application developers. To list a few cool features: - - - - You can define your own buffer type if necessary. - - - - - Transparent zero copy is achieved by a built-in composite buffer type. - - - - - A dynamic buffer type is provided out-of-the-box, whose capacity is - expanded on demand, just like StringBuffer. - - - - - There's no need to call flip() anymore. - - - - - It is often faster than ByteBuffer. - - - - - - For more information, please refer to the - org.jboss.netty.buffer package description. - -
- -
- Universal Asynchronous I/O API - - Traditional I/O APIs in Java provide different types and methods for - different transport types. For example, - java.net.Socket and - java.net.DatagramSocket do not have any common - super type and therefore they have very different ways to perform socket - I/O. - - - This mismatch makes porting a network application from one transport to - another tedious and difficult. The lack of portability between - transports becomes a problem when you need to support additional - transports, as this often entails rewriting the network layer of the - application. Logically, many protocols can run on more than one - transport such as TCP/IP, UDP/IP, SCTP, and serial port communication. - - - To make matters worse, Java's New I/O (NIO) API introduced - incompatibilities with the old blocking I/O (OIO) API and will continue - to do so in the next release, NIO.2 (AIO). Because all these APIs are - different from each other in design and performance characteristics, you - are often forced to determine which API your application will depend on - before you even begin the implementation phase. - - - For instance, you might want to start with OIO because the number of - clients you are going to serve will be very small and writing a socket - server using OIO is much easier than using NIO. However, you are going - to be in trouble when your business grows exponentially and your server - needs to serve tens of thousands of clients simultaneously. You could - start with NIO, but doing so may hinder rapid development by greatly - increasing development time due to the complexity of the NIO Selector - API. - - - Netty has a universal asynchronous I/O interface called a &Channel;, which - abstracts away all operations required for point-to-point communication. - That is, once you wrote your application on one Netty transport, your - application can run on other Netty transports. Netty provides a number - of essential transports via one universal API: - - - - NIO-based TCP/IP transport - (See org.jboss.netty.channel.socket.nio), - - - - - OIO-based TCP/IP transport - (See org.jboss.netty.channel.socket.oio), - - - - OIO-based UDP/IP transport, and - - - - Local transport (See org.jboss.netty.channel.local). - - - - Switching from one transport to another usually takes just a couple - lines of changes such as choosing a different &ChannelFactory; - implementation. - - - Also, you are even able to take advantage of new transports which aren't - yet written (such as serial port communication transport), again - by replacing just a couple lines of constructor calls. Moreover, you can - write your own transport by extending the core API. - -
- -
- Event Model based on the Interceptor Chain Pattern - - A well-defined and extensible event model is a must for an event-driven - application. Netty has a well-defined event model focused on I/O. It - also allows you to implement your own event type without breaking the - existing code because each event type is distinguished from another by - a strict type hierarchy. This is another differentiator against other - frameworks. Many NIO frameworks have no or a very limited notion of an - event model. If they offer extension at all, they often break the - existing code when you try to add custom event types - - - A &ChannelEvent; is handled by a list of &ChannelHandler;s in a - &ChannelPipeline;. The pipeline implements an advanced form of the - Intercepting Filter - pattern to give a user full control over how an event is handled and how - the handlers in the pipeline interact with each other. For example, - you can define what to do when data is read from a socket: - - public class MyReadHandler implements &SimpleChannelHandler; { - public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; evt) { - Object message = evt.getMessage(); - // Do something with the received message. - ... - - // And forward the event to the next handler. - ctx.sendUpstream(evt); - } -} - - You can also define what to do when a handler receives a write request: - - public class MyWriteHandler implements &SimpleChannelHandler; { - public void writeRequested(&ChannelHandlerContext; ctx, &MessageEvent; evt) { - Object message = evt.getMessage(); - // Do something with the message to be written. - ... - - // And forward the event to the next handler. - ctx.sendDownstream(evt); - } -} - - For more information on the event model, please refer to the - API documentation of &ChannelEvent; and &ChannelPipeline;. - -
- -
- Advanced Components for More Rapid Development - - On top of the core components mentioned above, that already enable the - implementation of all types of network applications, Netty provides a set - of advanced features to accelerate the page of development even more. - - -
- Codec framework - - As demonstrated in , it is always a good - idea to separate a protocol codec from business logic. However, there - are some complications when implementing this idea from scratch. You - have to deal with the fragmentation of messages. Some protocols are - multi-layered (i.e. built on top of other lower level protocols). Some - are too complicated to be implemented in a single state machine. - - - Consequently, a good network application framework should provide an - extensible, reusable, unit-testable, and multi-layered codec framework - that generates maintainable user codecs. - - - Netty provides a number of basic and advanced codecs to address most - issues you will encounter when you write a protocol codec regardless - if it is simple or not, binary or text - simply whatever. - -
- -
- SSL / TLS Support - - Unlike old blocking I/O, it is a non-trivial task to support SSL in NIO. - You can't simply wrap a stream to encrypt or decrypt data but you have - to use javax.net.ssl.SSLEngine. - SSLEngine is a state machine which is as complex - as SSL itself. You have to manage all possible states such as cipher - suite and encryption key negotiation (or re-negotiation), certificate - exchange, and validation. Moreover, SSLEngine is - not even completely thread-safe, as one would expect. - - - In Netty, &SslHandler; takes care of all the gory details and pitfalls - of SSLEngine. All you need to do is to configure - the &SslHandler; and insert it into your &ChannelPipeline;. It also - allows you to implement advanced features like - StartTLS - very easily. - -
- -
- HTTP Implementation - - HTTP is definitely the most popular protocol in the Internet. There are - already a number of HTTP implementations such as a Servlet container. - Then why does Netty have HTTP on top of its core? - - - Netty's HTTP support is very different from the existing HTTP libraries. - It gives you complete control over how HTTP messages are exchanged at a - low level. Because it is basically the combination of an HTTP codec and - HTTP message classes, there is no restriction such as an enforced thread - model. That is, you can write your own HTTP client or server that works - exactly the way you want. You have full control over everything that's - in the HTTP specification, including the thread model, connection life - cycle, and chunked encoding. - - - Thanks to its highly customizable nature, you can write a very efficient - HTTP server such as: - - - - Chat server that requires persistent connections and server push - technology (e.g. Comet - and WebSockets) - - - - - Media streaming server that needs to keep the connection open - until the whole media is streamed (e.g. 2 hours of video) - - - - - File server that allows the uploading of large files without - memory pressure (e.g. uploading 1GB per request) - - - - - Scalable mash-up client that connects to tens of thousands of 3rd - party web services asynchronously - - - - -
- -
- Google Protocol Buffer Integration - - Google Protocol Buffers - are an ideal solution for the rapid implementation of a highly efficient - binary protocols that evolve over time. With &ProtobufEncoder; and - &ProtobufDecoder;, you can turn the message classes generated by the - Google Protocol Buffers Compiler (protoc) into Netty codec. Please take - a look into the - 'LocalTime' example - that shows how easily you can create a high-performing binary protocol - client and server from the - sample protocol definition. - -
-
- -
- Summary - - In this chapter, we reviewed the overall architecture of Netty from the - feature standpoint. Netty has a simple, yet powerful architecture. - It is composed of three components - buffer, channel, and event model - - and all advanced features are built on top of the three core components. - Once you understood how these three work together, it should not be - difficult to understand the more advanced features which were covered - briefly in this chapter. - - - You might still have unanswered questions about what the overall - architecture looks like exactly and how each of the features work - together. If so, it is a good idea to - talk to us to improve this guide. - -
-
diff --git a/src/docbook/en-US/module/codec.xml b/src/docbook/en-US/module/codec.xml deleted file mode 100644 index 1016148500..0000000000 --- a/src/docbook/en-US/module/codec.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -%CustomDTD; -]> - - Encoders and Decoders - To be written... - diff --git a/src/docbook/en-US/module/preface.xml b/src/docbook/en-US/module/preface.xml deleted file mode 100644 index 989fc63828..0000000000 --- a/src/docbook/en-US/module/preface.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - -%CustomDTD; -]> - - Preface - -
- The Problem - - Nowadays we use general purpose applications or libraries to communicate - with each other. For example, we often use an HTTP client library to - retrieve information from a web server and to invoke a remote procedure - call via web services. - - - However, a general purpose protocol or its implementation sometimes - does not scale very well. It is like we don't use a general purpose - HTTP server to exchange huge files, e-mail messages, and near-realtime - messages such as financial information and multiplayer game data. - What's required is a highly optimized protocol implementation which is - dedicated to a special purpose. For example, you might want to - implement an HTTP server which is optimized for AJAX-based chat - application, media streaming, or large file transfer. You could even - want to design and implement a whole new protocol which is precisely - tailored to your need. - - - Another inevitable case is when you have to deal with a legacy - proprietary protocol to ensure the interoperability with an old system. - What matters in this case is how quickly we can implement that protocol - while not sacrificing the stability and performance of the resulting - application. - -
- -
- The Solution - - The Netty project is - an effort to provide an asynchronous event-driven network application - framework and tooling for the rapid development of maintainable - high-performance · high-scalability protocol servers and clients. - - - In other words, Netty is a NIO client server framework which enables - quick and easy development of network applications such as protocol - servers and clients. It greatly simplifies and streamlines network - programming such as TCP and UDP socket server development. - - - 'Quick and easy' does not mean that a resulting application will suffer - from a maintainability or a performance issue. Netty has been designed - carefully with the experiences earned from the implementation of a lot - of protocols such as FTP, SMTP, HTTP, and various binary and text-based - legacy protocols. As a result, Netty has succeeded to find a way to - achieve ease of development, performance, stability, and flexibility - without a compromise. - - - Some users might already have found other network application - framework that claims to have the same advantage, and you might want - to ask what makes Netty so different from them. The answer is the - philosophy where it is built on. Netty is designed to give you the most - comfortable experience both in terms of the API and the implementation - from the day one. It is not something tangible but you will realize that - this philosophy will make your life much easier as you read this guide - and play with Netty. - -
-
diff --git a/src/docbook/en-US/module/security.xml b/src/docbook/en-US/module/security.xml deleted file mode 100644 index 039aaf6e94..0000000000 --- a/src/docbook/en-US/module/security.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -%CustomDTD; -]> - - Securing the Wire - To be written... - diff --git a/src/docbook/en-US/module/start.xml b/src/docbook/en-US/module/start.xml deleted file mode 100644 index 73a86f4aa7..0000000000 --- a/src/docbook/en-US/module/start.xml +++ /dev/null @@ -1,1163 +0,0 @@ - - - -%CustomDTD; -]> - - Getting Started - - This chapter tours around the core constructs of Netty with simple - examples to let you get started quickly. You will be able to write a - client and a server on top of Netty right away when you are at the - end of this chapter. - - - - If you prefer top-down approach in learning something, you might want to - start from and get back here. - - -
- Before Getting Started - - The minimum requirements to run the examples which are introduced in - this chapter are only two; the latest version of Netty and JDK 1.5 or - above. The latest version of Netty is available in - the project download page. To download - the right version of JDK, please refer to your preferred JDK vendor's web - site. - - - As you read, you might have more questions about the classes introduced - in this chapter. Please refer to the API reference whenever you want to - know more about them. All class names in this document are linked to the - online API reference for your convenience. Also, please don't hesitate to - contact the Netty project community and - let us know if there's any incorrect information, errors in grammar and - typo, and if you have a good idea to improve the documentation. - -
- -
- Writing a Discard Server - - The most simplistic protocol in the world is not 'Hello, World!' but - DISCARD. It's - a protocol which discards any received data without any response. - - - To implement the DISCARD protocol, the only thing you need to do is - to ignore all received data. Let us start straight from the handler - implementation, which handles I/O events generated by Netty. - - package org.jboss.netty.example.discard; - -public class DiscardServerHandler extends &SimpleChannelHandler; { - - @Override - public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; e) { - } - - @Override - public void exceptionCaught(&ChannelHandlerContext; ctx, &ExceptionEvent; e) { - e.getCause().printStackTrace(); - - &Channel; ch = e.getChannel(); - ch.close(); - } -} - - - - DiscardServerHandler extends - &SimpleChannelHandler;, which is an implementation of - &ChannelHandler;. &SimpleChannelHandler; provides various event - handler methods that you can override. For now, it is just enough - to extend &SimpleChannelHandler; rather than to implement - the handler interfaces by yourself. - - - - - We override the messageReceived event - handler method here. This method is called with a &MessageEvent;, - which contains the received data, whenever new data is received - from a client. In this example, we ignore the received data by doing - nothing to implement the DISCARD protocol. - - - - - exceptionCaught event handler method is - called with an &ExceptionEvent; when an exception was raised by - Netty due to I/O error or by a handler implementation due to the - exception thrown while processing events. In most cases, the - caught exception should be logged and its associated channel - should be closed here, although the implementation of this method - can be different depending on what you want to do to deal with an - exceptional situation. For example, you might want to send a - response message with an error code before closing the connection. - - - - - So far so good. We have implemented the first half of the DISCARD server. - What's left now is to write the main method - which starts the server with the DiscardServerHandler. - - package org.jboss.netty.example.discard; - -import java.net.InetSocketAddress; -import java.util.concurrent.Executors; - -public class DiscardServer { - - public static void main(String[] args) throws Exception { - &ChannelFactory; factory = - new &NioServerSocketChannelFactory;( - Executors.newCachedThreadPool(), - Executors.newCachedThreadPool()); - - &ServerBootstrap; bootstrap = new &ServerBootstrap;(factory); - - bootstrap.setPipelineFactory(new &ChannelPipelineFactory;() { - public &ChannelPipeline; getPipeline() { - return &Channels;.pipeline(new DiscardServerHandler()); - } - }); - - bootstrap.setOption("child.tcpNoDelay", true); - bootstrap.setOption("child.keepAlive", true); - - bootstrap.bind(new InetSocketAddress(8080)); - } -} - - - - &ChannelFactory; is a factory which creates and manages &Channel;s - and its related resources. It processes all I/O requests and - performs I/O to generate &ChannelEvent;s. Netty provides various - &ChannelFactory; implementations. We are implementing a server-side - application in this example, and therefore - &NioServerSocketChannelFactory; was used. Another thing to note is - that it does not create I/O threads by itself. It is supposed to - acquire threads from the thread pool you specified in the - constructor, and it gives you more control over how threads should - be managed in the environment where your application runs, such as - an application server with a security manager. - - - - - &ServerBootstrap; is a helper class that sets up a server. You can - set up the server using a &Channel; directly. However, please note - that this is a tedious process and you do not need to do that in most - cases. - - - - - Here, we configure the &ChannelPipelineFactory;. Whenever a new - connection is accepted by the server, a new &ChannelPipeline; will be - created by the specified &ChannelPipelineFactory;. The new pipeline - contains the DiscardServerHandler. As the - application gets complicated, it is likely that you will add more - handlers to the pipeline and extract this anonymous class into a top - level class eventually. - - - - - You can also set the parameters which are specific to the &Channel; - implementation. We are writing a TCP/IP server, so we are allowed - to set the socket options such as tcpNoDelay and - keepAlive. Please note that the - "child." prefix was added to all options. It - means the options will be applied to the accepted &Channel;s instead - of the options of the &ServerSocketChannel;. You could do the - following to set the options of the &ServerSocketChannel;: - bootstrap.setOption("reuseAddress", true); - - - - - We are ready to go now. What's left is to bind to the port and to - start the server. Here, we bind to the port 8080 - of all NICs (network interface cards) in the machine. You can now - call the bind method as many times as - you want (with different bind addresses.) - - - - - Congratulations! You've just finished your first server on top of Netty. - -
- -
- Looking into the Received Data - - Now that we have written our first server, we need to test if it really - works. The easiest way to test it is to use the telnet - command. For example, you could enter "telnet localhost - 8080" in the command line and type something. - - - However, can we say that the server is working fine? We cannot really - know that because it is a discard server. You will not get any response - at all. To prove it is really working, let us modify the server to print - what it has received. - - - We already know that &MessageEvent; is generated whenever data is - received and the messageReceived handler method - will be invoked. Let us put some code into the - messageReceived method of the - DiscardServerHandler: - - @Override -public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; e) { - &ChannelBuffer; buf = (ChannelBuffer) e.getMessage(); - while(buf.readable()) { - System.out.println((char) buf.readByte()); - System.out.flush(); - } -} - - - - It is safe to assume the message type in socket transports is always - &ChannelBuffer;. &ChannelBuffer; is a fundamental data structure - which stores a sequence of bytes in Netty. It's similar to NIO - ByteBuffer, but it is easier to use and more - flexible. For example, Netty allows you to create a composite - &ChannelBuffer; which combines multiple &ChannelBuffer;s reducing - the number of unnecessary memory copy. - - - Although it resembles to NIO ByteBuffer a lot, - it is highly recommended to refer to the API reference. Learning how - to use &ChannelBuffer; correctly is a critical step in using Netty - without difficulty. - - - - - If you run the telnet command again, you will see the - server prints what has received. - - - The full source code of the discard server is located in the - org.jboss.netty.example.discard package of the - distribution. - -
-
- Writing an Echo Server - - So far, we have been consuming data without responding at all. A server, - however, is usually supposed to respond to a request. Let us learn how to - write a response message to a client by implementing the - ECHO protocol, - where any received data is sent back. - - - The only difference from the discard server we have implemented in the - previous sections is that it sends the received data back instead of - printing the received data out to the console. Therefore, it is enough - again to modify the messageReceived method: - - @Override -public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; e) { - &Channel; ch = e.getChannel(); - ch.write(e.getMessage()); -} - - - - A &ChannelEvent; object has a reference to its associated &Channel;. - Here, the returned &Channel; represents the connection which received - the &MessageEvent;. We can get the &Channel; and call the - write method to write something back to - the remote peer. - - - - - If you run the telnet command again, you will see the - server sends back whatever you have sent to it. - - - The full source code of the echo server is located in the - org.jboss.netty.example.echo package of the - distribution. - -
- -
- Writing a Time Server - - The protocol to implement in this section is the - TIME protocol. - It is different from the previous examples in that it sends a message, - which contains a 32-bit integer, without receiving any requests and - loses the connection once the message is sent. In this example, you - will learn how to construct and send a message, and to close the - connection on completion. - - - Because we are going to ignore any received data but to send a message - as soon as a connection is established, we cannot use the - messageReceived method this time. Instead, - we should override the channelConnected method. - The following is the implementation: - - package org.jboss.netty.example.time; - -public class TimeServerHandler extends &SimpleChannelHandler; { - - @Override - public void channelConnected(&ChannelHandlerContext; ctx, &ChannelStateEvent; e) { - &Channel; ch = e.getChannel(); - - &ChannelBuffer; time = &ChannelBuffers;.buffer(4); - time.writeInt((int) (System.currentTimeMillis() / 1000)); - - &ChannelFuture; f = ch.write(time); - - f.addListener(new &ChannelFutureListener;() { - public void operationComplete(&ChannelFuture; future) { - &Channel; ch = future.getChannel(); - ch.close(); - } - }); - } - - @Override - public void exceptionCaught(&ChannelHandlerContext; ctx, &ExceptionEvent; e) { - e.getCause().printStackTrace(); - e.getChannel().close(); - } -} - - - - As explained, channelConnected method will - be invoked when a connection is established. Let us write the 32-bit - integer that represents the current time in seconds here. - - - - - To send a new message, we need to allocate a new buffer which will - contain the message. We are going to write a 32-bit integer, and - therefore we need a &ChannelBuffer; whose capacity is - 4 bytes. The &ChannelBuffers; helper class is - used to allocate a new buffer. Besides the - buffer method, &ChannelBuffers; provides a - lot of useful methods related to the &ChannelBuffer;. For more - information, please refer to the API reference. - - - On the other hand, it is a good idea to use static imports for - &ChannelBuffers;: - import static org.jboss.netty.buffer.&ChannelBuffers;.*; -... -&ChannelBuffer; dynamicBuf = dynamicBuffer(256); -&ChannelBuffer; ordinaryBuf = buffer(1024); - - - - - As usual, we write the constructed message. - - - But wait, where's the flip? Didn't we used - to call ByteBuffer.flip() before sending a - message in NIO? &ChannelBuffer; does not have such a method because - it has two pointers; one for read operations and the other for write - operations. The writer index increases when you write something to - a &ChannelBuffer; while the reader index does not change. The reader - index and the writer index represents where the message starts and - ends respectively. - - - In contrast, NIO buffer does not provide a clean way to figure out - where the message content starts and ends without calling the - flip method. You will be in trouble when - you forget to flip the buffer because nothing or incorrect data will - be sent. Such an error does not happen in Netty because we have - different pointer for different operation types. You will find it - makes your life much easier as you get used to it -- a life without - flipping out! - - - Another point to note is that the write - method returns a &ChannelFuture;. A &ChannelFuture; represents an - I/O operation which has not yet occurred. It means, any requested - operation might not have been performed yet because all operations - are asynchronous in Netty. For example, the following code might - close the connection even before a message is sent: - - &Channel; ch = ...; -ch.write(message); -ch.close(); - - Therefore, you need to call the close - method after the &ChannelFuture;, which was returned by the - write method, notifies you when the write - operation has been done. Please note that, close - also might not close the connection immediately, and it returns a - &ChannelFuture;. - - - - - How do we get notified when the write request is finished then? - This is as simple as adding a &ChannelFutureListener; to the returned - &ChannelFuture;. Here, we created a new anonymous &ChannelFutureListener; - which closes the &Channel; when the operation is done. - - - Alternatively, you could simplify the code using a pre-defined - listener: - f.addListener(&ChannelFutureListener;.CLOSE); - - - -
- -
- Writing a Time Client - - Unlike DISCARD and ECHO servers, we need a client for the TIME protocol - because a human cannot translate a 32-bit binary data into a date on a - calendar. In this section, we discuss how to make sure the server works - correctly and learn how to write a client with Netty. - - - The biggest and only difference between a server and a client in Netty - is that different &Bootstrap; and &ChannelFactory; are required. Please - take a look at the following code: - - package org.jboss.netty.example.time; - -import java.net.InetSocketAddress; -import java.util.concurrent.Executors; - -public class TimeClient { - - public static void main(String[] args) throws Exception { - String host = args[0]; - int port = Integer.parseInt(args[1]); - - &ChannelFactory; factory = - new &NioClientSocketChannelFactory;( - Executors.newCachedThreadPool(), - Executors.newCachedThreadPool()); - - &ClientBootstrap; bootstrap = new &ClientBootstrap;(factory); - - bootstrap.setPipelineFactory(new &ChannelPipelineFactory;() { - public &ChannelPipeline; getPipeline() { - return &Channels;.pipeline(new TimeClientHandler()); - } - }); - - bootstrap.setOption("tcpNoDelay", true); - bootstrap.setOption("keepAlive", true); - - bootstrap.connect(new InetSocketAddress(host, port)); - } -} - - - - &NioClientSocketChannelFactory;, instead of &NioServerSocketChannelFactory; - was used to create a client-side &Channel;. - - - - - &ClientBootstrap; is a client-side counterpart of &ServerBootstrap;. - - - - - Please note that there's no "child." prefix. - A client-side &SocketChannel; does not have a parent. - - - - - We should call the connect method instead of - the bind method. - - - - - As you can see, it is not really different from the server side startup. - What about the &ChannelHandler; implementation? It should receive a - 32-bit integer from the server, translate it into a human readable format, - print the translated time, and close the connection: - - package org.jboss.netty.example.time; - -import java.util.Date; - -public class TimeClientHandler extends &SimpleChannelHandler; { - - @Override - public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; e) { - &ChannelBuffer; buf = (&ChannelBuffer;) e.getMessage(); - long currentTimeMillis = buf.readInt() * 1000L; - System.out.println(new Date(currentTimeMillis)); - e.getChannel().close(); - } - - @Override - public void exceptionCaught(&ChannelHandlerContext; ctx, &ExceptionEvent; e) { - e.getCause().printStackTrace(); - e.getChannel().close(); - } -} - - It looks very simple and does not look any different from the server side - example. However, this handler sometimes will refuse to work raising an - IndexOutOfBoundsException. We discuss why - this happens in the next section. - -
- -
- - Dealing with a Stream-based Transport - -
- - One Small Caveat of Socket Buffer - - - In a stream-based transport such as TCP/IP, received data is stored - into a socket receive buffer. Unfortunately, the buffer of a - stream-based transport is not a queue of packets but a queue of bytes. - It means, even if you sent two messages as two independent packets, an - operating system will not treat them as two messages but as just a - bunch of bytes. Therefore, there is no guarantee that what you read - is exactly what your remote peer wrote. For example, let us assume - that the TCP/IP stack of an operating system has received three packets: - - +-----+-----+-----+ -| ABC | DEF | GHI | -+-----+-----+-----+ - - Because of this general property of a stream-based protocol, there's - high chance of reading them in the following fragmented form in your - application: - - +----+-------+---+---+ -| AB | CDEFG | H | I | -+----+-------+---+---+ - - Therefore, a receiving part, regardless it is server-side or - client-side, should defrag the received data into one or more meaningful - frames that could be easily understood by the - application logic. In case of the example above, the received data - should be framed like the following: - - +-----+-----+-----+ -| ABC | DEF | GHI | -+-----+-----+-----+ -
-
- - The First Solution - - - Now let us get back to the TIME client example. We have the same - problem here. A 32-bit integer is a very small amount of data, and it - is not likely to be fragmented often. However, the problem is that it - can be fragmented, and the possibility of - fragmentation will increase as the traffic increases. - - - The simplistic solution is to create an internal cumulative buffer and - wait until all 4 bytes are received into the internal buffer. The - following is the modified TimeClientHandler - implementation that fixes the problem: - - package org.jboss.netty.example.time; - -import static org.jboss.netty.buffer.&ChannelBuffers;.*; - -import java.util.Date; - -public class TimeClientHandler extends &SimpleChannelHandler; { - - private final &ChannelBuffer; buf = dynamicBuffer(); - - @Override - public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; e) { - &ChannelBuffer; m = (&ChannelBuffer;) e.getMessage(); - buf.writeBytes(m); - - if (buf.readableBytes() >= 4) { - long currentTimeMillis = buf.readInt() * 1000L; - System.out.println(new Date(currentTimeMillis)); - e.getChannel().close(); - } - } - - @Override - public void exceptionCaught(&ChannelHandlerContext; ctx, &ExceptionEvent; e) { - e.getCause().printStackTrace(); - e.getChannel().close(); - } -} - - - - A dynamic buffer is a &ChannelBuffer; which - increases its capacity on demand. It's very useful when you don't - know the length of the message. - - - - - First, all received data should be cumulated into - buf. - - - - - And then, the handler must check if buf has enough - data, 4 bytes in this example, and proceed to the actual business - logic. Otherwise, Netty will call the - messageReceived method again when more - data arrives, and eventually all 4 bytes will be cumulated. - - - -
-
- - The Second Solution - - - Although the first solution has resolved the problem with the TIME - client, the modified handler does not look that clean. Imagine a more - complicated protocol which is composed of multiple fields such as a - variable length field. Your &ChannelHandler; implementation will - become unmaintainable very quickly. - - - As you may have noticed, you can add more than one &ChannelHandler; to - a &ChannelPipeline;, and therefore, you can split one monolithic - &ChannelHandler; into multiple modular ones to reduce the complexity of - your application. For example, you could split - TimeClientHandler into two handlers: - - - - TimeDecoder which deals with the - fragmentation issue, and - - - - - the initial simple version of TimeClientHandler. - - - - - - Fortunately, Netty provides an extensible class which helps you write - the first one out of the box: - - package org.jboss.netty.example.time; - -public class TimeDecoder extends &FrameDecoder; { - - @Override - protected Object decode( - &ChannelHandlerContext; ctx, &Channel; channel, &ChannelBuffer; buffer) { - - if (buffer.readableBytes() < 4) { - return null; - } - - return buffer.readBytes(4); - } -} - - - - &FrameDecoder; is an implementation of &ChannelHandler; which - makes it easy to which deals with the fragmentation issue. - - - - - &FrameDecoder; calls decode method with - an internally maintained cumulative buffer whenever new data is - received. - - - - - If null is returned, it means there's not - enough data yet. &FrameDecoder; will call again when there is a - sufficient amount of data. - - - - - If non-null is returned, it means the - decode method has decoded a message - successfully. &FrameDecoder; will discard the read part of its - internal cumulative buffer. Please remember that you don't need - to decode multiple messages. &FrameDecoder; will keep calling - the decoder method until it returns - null. - - - - - Now that we have another handler to insert into the &ChannelPipeline;, - we should modify the &ChannelPipelineFactory; implementation in the - TimeClient: - - bootstrap.setPipelineFactory(new &ChannelPipelineFactory;() { - public &ChannelPipeline; getPipeline() { - return &Channels;.pipeline( - new TimeDecoder(), - new TimeClientHandler()); - } - }); - - If you are an adventurous person, you might want to try the - &ReplayingDecoder; which simplifies the decoder even more. You will - need to consult the API reference for more information though. - - package org.jboss.netty.example.time; - -public class TimeDecoder extends &ReplayingDecoder;<&VoidEnum;> { - - @Override - protected Object decode( - &ChannelHandlerContext; ctx, &Channel; channel, - &ChannelBuffer; buffer, &VoidEnum; state) { - - return buffer.readBytes(4); - } -} - - Additionally, Netty provides out-of-the-box decoders which enables - you to implement most protocols very easily and helps you avoid from - ending up with a monolithic unmaintainable handler implementation. - Please refer to the following packages for more detailed examples: - - - - org.jboss.netty.example.factorial for - a binary protocol, and - - - - - org.jboss.netty.example.telnet for - a text line-based protocol. - - - - -
-
- -
- - Speaking in POJO instead of ChannelBuffer - - - All the examples we have reviewed so far used a &ChannelBuffer; as a - primary data structure of a protocol message. In this section, we will - improve the TIME protocol client and server example to use a - POJO instead of a - &ChannelBuffer;. - - - The advantage of using a POJO in your &ChannelHandler; is obvious; - your handler becomes more maintainable and reusable by separating the - code which extracts information from &ChannelBuffer; out from the - handler. In the TIME client and server examples, we read only one - 32-bit integer and it is not a major issue to use &ChannelBuffer; directly. - However, you will find it is necessary to make the separation as you - implement a real world protocol. - - - First, let us define a new type called UnixTime. - - package org.jboss.netty.example.time; - -import java.util.Date; - -public class UnixTime { - private final int value; - - public UnixTime(int value) { - this.value = value; - } - - public int getValue() { - return value; - } - - @Override - public String toString() { - return new Date(value * 1000L).toString(); - } -} - - We can now revise the TimeDecoder to return - a UnixTime instead of a &ChannelBuffer;. - - @Override -protected Object decode( - &ChannelHandlerContext; ctx, &Channel; channel, &ChannelBuffer; buffer) { - if (buffer.readableBytes() < 4) { - return null; - } - - return new UnixTime(buffer.readInt()); -} - - - - &FrameDecoder; and &ReplayingDecoder; allow you to return an object - of any type. If they were restricted to return only a - &ChannelBuffer;, we would have to insert another &ChannelHandler; - which transforms a &ChannelBuffer; into a - UnixTime. - - - - - With the updated decoder, the TimeClientHandler - does not use &ChannelBuffer; anymore: - - @Override -public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; e) { - UnixTime m = (UnixTime) e.getMessage(); - System.out.println(m); - e.getChannel().close(); -} - - Much simpler and elegant, right? The same technique can be applied on - the server side. Let us update the - TimeServerHandler first this time: - - @Override -public void channelConnected(&ChannelHandlerContext; ctx, &ChannelStateEvent; e) { - UnixTime time = new UnixTime(System.currentTimeMillis() / 1000); - &ChannelFuture; f = e.getChannel().write(time); - f.addListener(&ChannelFutureListener;.CLOSE); -} - - Now, the only missing piece is an encoder, which is an implementation of - &ChannelHandler; that translates a UnixTime back - into a &ChannelBuffer;. It's much simpler than writing a decoder because - there's no need to deal with packet fragmentation and assembly when - encoding a message. - - package org.jboss.netty.example.time; - -import static org.jboss.netty.buffer.&ChannelBuffers;.*; - -public class TimeEncoder extends &SimpleChannelHandler; { - - public void writeRequested(&ChannelHandlerContext; ctx, &MessageEvent; e) { - UnixTime time = (UnixTime) e.getMessage(); - - &ChannelBuffer; buf = buffer(4); - buf.writeInt(time.getValue()); - - &Channels;.write(ctx, e.getFuture(), buf); - } -} - - - - An encoder overrides the writeRequested - method to intercept a write request. Please note that the - &MessageEvent; parameter here is the same type which was specified - in messageReceived but they are interpreted - differently. A &ChannelEvent; can be either an - upstream or downstream - event depending on the direction where the event flows. - For instance, a &MessageEvent; can be an upstream event when called - for messageReceived or a downstream event - when called for writeRequested. - Please refer to the API reference to learn more about the difference - between a upstream event and a downstream event. - - - - - Once done with transforming a POJO into a &ChannelBuffer;, you should - forward the new buffer to the previous &ChannelDownstreamHandler; in - the &ChannelPipeline;. &Channels; provides various helper methods - which generates and sends a &ChannelEvent;. In this example, - &Channels;.write(...) method creates a new - &MessageEvent; and sends it to the previous &ChannelDownstreamHandler; - in the &ChannelPipeline;. - - - On the other hand, it is a good idea to use static imports for - &Channels;: - import static org.jboss.netty.channel.&Channels;.*; -... -&ChannelPipeline; pipeline = pipeline(); -write(ctx, e.getFuture(), buf); -fireChannelDisconnected(ctx); - - - - - The last task left is to insert a TimeEncoder - into the &ChannelPipeline; on the server side, and it is left as a - trivial exercise. - -
- -
- - Shutting Down Your Application - - - If you ran the TimeClient, you must have noticed - that the application doesn't exit but just keep running doing nothing. - Looking from the full stack trace, you will also find a couple I/O threads - are running. To shut down the I/O threads and let the application exit - gracefully, you need to release the resources allocated by &ChannelFactory;. - - - The shutdown process of a typical network application is composed of the - following three steps: - - - - Close all server sockets if there are any, - - - - - Close all non-server sockets (i.e. client sockets and accepted - sockets) if there are any, and - - - - - Release all resources used by &ChannelFactory;. - - - - - - To apply the three steps above to the TimeClient, - TimeClient.main() could shut itself down - gracefully by closing the only one client connection and releasing all - resources used by &ChannelFactory;: - - package org.jboss.netty.example.time; - -public class TimeClient { - public static void main(String[] args) throws Exception { - ... - &ChannelFactory; factory = ...; - &ClientBootstrap; bootstrap = ...; - ... - &ChannelFuture; future = bootstrap.connect(...); - future.awaitUninterruptibly(); - if (!future.isSuccess()) { - future.getCause().printStackTrace(); - } - future.getChannel().getCloseFuture().awaitUninterruptibly(); - factory.releaseExternalResources(); - } -} - - - - The connect method of &ClientBootstrap; - returns a &ChannelFuture; which notifies when a connection attempt - succeeds or fails. It also has a reference to the &Channel; which - is associated with the connection attempt. - - - - - Wait for the returned &ChannelFuture; to determine if the connection - attempt was successful or not. - - - - - If failed, we print the cause of the failure to know why it failed. - the getCause() method of &ChannelFuture; will - return the cause of the failure if the connection attempt was neither - successful nor cancelled. - - - - - Now that the connection attempt is over, we need to wait until the - connection is closed by waiting for the closeFuture - of the &Channel;. Every &Channel; has its own closeFuture - so that you are notified and can perform a certain action on closure. - - - Even if the connection attempt has failed the closeFuture - will be notified because the &Channel; will be closed automatically - when the connection attempt fails. - - - - - All connections have been closed at this point. The only task left - is to release the resources being used by &ChannelFactory;. It is as - simple as calling its releaseExternalResources() - method. All resources including the NIO Selectors - and thread pools will be shut down and terminated automatically. - - - - - Shutting down a client was pretty easy, but how about shutting down a - server? You need to unbind from the port and close all open accepted - connections. To do this, you need a data structure that keeps track of - the list of active connections, and it's not a trivial task. Fortunately, - there is a solution, &ChannelGroup;. - - - &ChannelGroup; is a special extension of Java collections API which - represents a set of open &Channel;s. If a &Channel; is added to a - &ChannelGroup; and the added &Channel; is closed, the closed &Channel; - is removed from its &ChannelGroup; automatically. You can also perform - an operation on all &Channel;s in the same group. For instance, you can - close all &Channel;s in a &ChannelGroup; when you shut down your server. - - - To keep track of open sockets, you need to modify the - TimeServerHandler to add a new open &Channel; to - the global &ChannelGroup;, TimeServer.allChannels: - - @Override -public void channelOpen(&ChannelHandlerContext; ctx, &ChannelStateEvent; e) { - TimeServer.allChannels.add(e.getChannel()); -} - - - - Yes, &ChannelGroup; is thread-safe. - - - - - Now that the list of all active &Channel;s are maintained automatically, - shutting down a server is as easy as shutting down a client: - - package org.jboss.netty.example.time; - -public class TimeServer { - - static final &ChannelGroup; allChannels = new &DefaultChannelGroup;("time-server"); - - public static void main(String[] args) throws Exception { - ... - &ChannelFactory; factory = ...; - &ServerBootstrap; bootstrap = ...; - ... - &Channel; channel = bootstrap.bind(...); - allChannels.add(channel); - waitForShutdownCommand(); - &ChannelGroupFuture; future = allChannels.close(); - future.awaitUninterruptibly(); - factory.releaseExternalResources(); - } -} - - - - &DefaultChannelGroup; requires the name of the group as a constructor - parameter. The group name is solely used to distinguish one group - from others. - - - - - The bind method of &ServerBootstrap; - returns a server side &Channel; which is bound to the specified - local address. Calling the close() method - of the returned &Channel; will make the &Channel; unbind from the - bound local address. - - - - - Any type of &Channel;s can be added to a &ChannelGroup; regardless if - it is either server side, client-side, or accepted. Therefore, - you can close the bound &Channel; along with the accepted &Channel;s - in one shot when the server shuts down. - - - - - waitForShutdownCommand() is an imaginary - method that waits for the shutdown signal. You could wait for a - message from a privileged client or the JVM shutdown hook. - - - - - You can perform the same operation on all channels in the same - &ChannelGroup;. In this case, we close all channels, which means - the bound server-side &Channel; will be unbound and all accepted - connections will be closed asynchronously. To notify when all - connections were closed successfully, it returns a &ChannelGroupFuture; - which has a similar role with &ChannelFuture;. - - - -
- -
- - Summary - - - In this chapter, we had a quick tour of Netty with a demonstration on how - to write a fully working network application on top of Netty. More - questions you may have will be covered in the upcoming chapters and the - revised version of this chapter. Please also note that the - community is always waiting for your - questions and ideas to help you and keep improving Netty based on your - feed back. - -
-
diff --git a/src/docbook/en-US/module/state-mgmt.xml b/src/docbook/en-US/module/state-mgmt.xml deleted file mode 100644 index 81625dfee1..0000000000 --- a/src/docbook/en-US/module/state-mgmt.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -%CustomDTD; -]> - - State Management - To be written... - diff --git a/src/docbook/en-US/module/template.xml b/src/docbook/en-US/module/template.xml deleted file mode 100644 index a0da7b554e..0000000000 --- a/src/docbook/en-US/module/template.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -%CustomDTD; -]> - - Chapter title - To be written... - diff --git a/src/docbook/en-US/module/threading.xml b/src/docbook/en-US/module/threading.xml deleted file mode 100644 index 94a2111c0a..0000000000 --- a/src/docbook/en-US/module/threading.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -%CustomDTD; -]> - - Thread Management - To be written... - diff --git a/src/docbook/en-US/module/transport.xml b/src/docbook/en-US/module/transport.xml deleted file mode 100644 index 7bda3673e8..0000000000 --- a/src/docbook/en-US/module/transport.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - -%CustomDTD; -]> - - Transports - To be written... - diff --git a/src/docbook/images/architecture.odg b/src/docbook/images/architecture.odg deleted file mode 100644 index f68b75909f..0000000000 Binary files a/src/docbook/images/architecture.odg and /dev/null differ diff --git a/src/docbook/images/architecture.png b/src/docbook/images/architecture.png deleted file mode 100644 index d8b2c22e7e..0000000000 Binary files a/src/docbook/images/architecture.png and /dev/null differ diff --git a/src/docbook/xslt/eclipse.xsl b/src/docbook/xslt/eclipse.xsl deleted file mode 100644 index 482f138e00..0000000000 --- a/src/docbook/xslt/eclipse.xsl +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - 1 - diff --git a/src/docbook/xslt/pdf.xsl b/src/docbook/xslt/pdf.xsl deleted file mode 100644 index f3a0b1c67e..0000000000 --- a/src/docbook/xslt/pdf.xsl +++ /dev/null @@ -1,153 +0,0 @@ - - - - - - - - - - - - - - Liberation Serif,serif - - - - - - - Setting 'title.font.family' param= - - - - - - - - - Setting 'body.font.family' param= - - - - - - - - - Setting 'monospace.font.family' param= - - - - - - - - - Setting 'sans.font.family' param= - - - - - - - - - Setting 'programlisting.font' param= - - - - - - - - - - - - - - - - - - normal - - - - - 1em - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - url( - - ) - - - - - - - - - diff --git a/src/docbook/xslt/xhtml-single.xsl b/src/docbook/xslt/xhtml-single.xsl deleted file mode 100644 index a158fab37b..0000000000 --- a/src/docbook/xslt/xhtml-single.xsl +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - 1 - diff --git a/src/docbook/xslt/xhtml.xsl b/src/docbook/xslt/xhtml.xsl deleted file mode 100644 index 2b261a5b4e..0000000000 --- a/src/docbook/xslt/xhtml.xsl +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - 1 - diff --git a/src/main/java/org/jboss/netty/channel/AbstractChannel.java b/src/main/java/org/jboss/netty/channel/AbstractChannel.java index 862884df3c..f8ffaec603 100644 --- a/src/main/java/org/jboss/netty/channel/AbstractChannel.java +++ b/src/main/java/org/jboss/netty/channel/AbstractChannel.java @@ -57,7 +57,8 @@ public abstract class AbstractChannel implements Channel { /** Cache for the string representation of this channel */ private boolean strValConnected; private String strVal; - + private volatile Object attachment; + /** * Creates a new instance. * @@ -272,6 +273,16 @@ public abstract class AbstractChannel implements Channel { return Channels.write(this, message, remoteAddress); } + @Override + public Object getAttachment() { + return attachment; + } + + @Override + public void setAttachment(Object attachment) { + this.attachment = attachment; + } + /** * Returns the {@link String} representation of this channel. The returned * string contains the {@linkplain #getId() ID}, {@linkplain #getLocalAddress() local address}, diff --git a/src/main/java/org/jboss/netty/channel/Channel.java b/src/main/java/org/jboss/netty/channel/Channel.java index c0e6e51597..f7ed787d0a 100644 --- a/src/main/java/org/jboss/netty/channel/Channel.java +++ b/src/main/java/org/jboss/netty/channel/Channel.java @@ -362,4 +362,19 @@ public interface Channel extends Comparable { * {@code interestOps} change request succeeds or fails */ ChannelFuture setReadable(boolean readable); + + /** + * Retrieves an object which is {@link #setAttachment(Object) attached} to + * this {@link Channel}. + * + * @return {@code null} if no object was attached or + * {@code null} was attached + */ + Object getAttachment(); + + /** + * Attaches an object to this {@link Channel} to store a stateful information + * + */ + void setAttachment(Object attachment); } diff --git a/src/main/java/org/jboss/netty/channel/ChannelLocal.java b/src/main/java/org/jboss/netty/channel/ChannelLocal.java index 928d4fcdf3..91062feada 100644 --- a/src/main/java/org/jboss/netty/channel/ChannelLocal.java +++ b/src/main/java/org/jboss/netty/channel/ChannelLocal.java @@ -32,8 +32,12 @@ import org.jboss.netty.util.internal.ConcurrentIdentityWeakKeyHashMap; * @author The Netty Project * @author Trustin Lee * + * @deprecated Use {@link Channel#setAttachment(Object)} and {@link Channel#getAttachment()} + * * @apiviz.stereotype utility + * */ +@Deprecated public class ChannelLocal { private final ConcurrentMap map = diff --git a/src/main/java/org/jboss/netty/channel/local/DefaultLocalChannel.java b/src/main/java/org/jboss/netty/channel/local/DefaultLocalChannel.java index 81c6355668..97fc9334fc 100644 --- a/src/main/java/org/jboss/netty/channel/local/DefaultLocalChannel.java +++ b/src/main/java/org/jboss/netty/channel/local/DefaultLocalChannel.java @@ -32,7 +32,7 @@ import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.DefaultChannelConfig; import org.jboss.netty.channel.MessageEvent; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; import org.jboss.netty.util.internal.ThreadLocalBoolean; /** @@ -52,7 +52,7 @@ final class DefaultLocalChannel extends AbstractChannel implements LocalChannel private final ChannelConfig config; private final ThreadLocalBoolean delivering = new ThreadLocalBoolean(); - final Queue writeBuffer = new LinkedTransferQueue(); + final Queue writeBuffer = QueueFactory.createQueue(MessageEvent.class); volatile DefaultLocalChannel pairedChannel; volatile LocalAddress localAddress; diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/AbstractWriteRequestQueue.java b/src/main/java/org/jboss/netty/channel/socket/nio/AbstractWriteRequestQueue.java new file mode 100644 index 0000000000..a73678cdeb --- /dev/null +++ b/src/main/java/org/jboss/netty/channel/socket/nio/AbstractWriteRequestQueue.java @@ -0,0 +1,158 @@ +/* + * Copyright 2011 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.channel.socket.nio; + +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; + +import org.jboss.netty.channel.MessageEvent; +import org.jboss.netty.util.internal.QueueFactory; + +/** + * + * + * + * + * @author The Netty Project + * @author Norman Maurer + * + */ +abstract class AbstractWriteRequestQueue implements BlockingQueue{ + + protected final BlockingQueue queue; + + public AbstractWriteRequestQueue() { + this.queue = QueueFactory.createQueue(MessageEvent.class); + } + + @Override + public MessageEvent remove() { + return queue.remove(); + } + + @Override + public MessageEvent element() { + return queue.element(); + } + + @Override + public MessageEvent peek() { + return queue.peek(); + } + + @Override + public int size() { + return queue.size(); + } + + @Override + public boolean isEmpty() { + return queue.isEmpty(); + } + + @Override + public Iterator iterator() { + return queue.iterator(); + } + + @Override + public Object[] toArray() { + return queue.toArray(); + } + + @Override + public T[] toArray(T[] a) { + return queue.toArray(a); + } + + @Override + public boolean containsAll(Collection c) { + return queue.containsAll(c); + } + + @Override + public boolean addAll(Collection c) { + return queue.addAll(c); + } + + @Override + public boolean removeAll(Collection c) { + return queue.removeAll(c); + } + + @Override + public boolean retainAll(Collection c) { + return queue.retainAll(c); + } + + @Override + public void clear() { + queue.clear(); + } + + @Override + public boolean add(MessageEvent e) { + return queue.add(e); + } + + @Override + public void put(MessageEvent e) throws InterruptedException { + queue.put(e); + } + + @Override + public boolean offer(MessageEvent e, long timeout, TimeUnit unit) throws InterruptedException { + return queue.offer(e, timeout, unit); + } + + @Override + public MessageEvent take() throws InterruptedException { + return queue.take(); + } + + @Override + public MessageEvent poll(long timeout, TimeUnit unit) throws InterruptedException { + return queue.poll(timeout, unit); + } + + @Override + public int remainingCapacity() { + return queue.remainingCapacity(); + } + + @Override + public boolean remove(Object o) { + return queue.remove(o); + } + + @Override + public boolean contains(Object o) { + return queue.contains(o); + } + + @Override + public int drainTo(Collection c) { + return queue.drainTo(c); + } + + @Override + public int drainTo(Collection c, int maxElements) { + return queue.drainTo(c, maxElements); + } + +} diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketPipelineSink.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketPipelineSink.java index f7c2d3862f..4a042dfe44 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketPipelineSink.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketPipelineSink.java @@ -43,7 +43,7 @@ import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.logging.InternalLogger; import org.jboss.netty.logging.InternalLoggerFactory; import org.jboss.netty.util.internal.DeadLockProofWorker; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * @@ -183,7 +183,7 @@ class NioClientSocketPipelineSink extends AbstractChannelSink { private boolean started; private final AtomicBoolean wakenUp = new AtomicBoolean(); private final Object startStopLock = new Object(); - private final Queue registerTaskQueue = new LinkedTransferQueue(); + private final Queue registerTaskQueue = QueueFactory.createQueue(Runnable.class); Boss() { } diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramChannel.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramChannel.java index 0403766d54..c2728d4784 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramChannel.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramChannel.java @@ -38,7 +38,7 @@ import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.socket.DatagramChannelConfig; import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.LegacyLinkedTransferQueue; import org.jboss.netty.util.internal.ThreadLocalBoolean; /** @@ -247,26 +247,22 @@ class NioDatagramChannel extends AbstractChannel } /** - * {@link WriteRequestQueue} is an extension of {@link LinkedTransferQueue} + * {@link WriteRequestQueue} is an extension of {@link AbstractWriteRequestQueue} * that adds support for highWaterMark checking of the write buffer size. */ private final class WriteRequestQueue extends - LinkedTransferQueue { - - private static final long serialVersionUID = 5057413071460766376L; + AbstractWriteRequestQueue { private final ThreadLocalBoolean notifying = new ThreadLocalBoolean(); - WriteRequestQueue() { - } - + /** - * This method first delegates to {@link LinkedTransferQueue#offer(Object)} and + * This method first delegates to {@link LegacyLinkedTransferQueue#offer(Object)} and * adds support for keeping track of the size of the this write buffer. */ @Override public boolean offer(MessageEvent e) { - boolean success = super.offer(e); + boolean success = queue.offer(e); assert success; int messageSize = getMessageSize(e); @@ -287,12 +283,12 @@ class NioDatagramChannel extends AbstractChannel } /** - * This method first delegates to {@link LinkedTransferQueue#poll()} and + * This method first delegates to {@link LegacyLinkedTransferQueue#poll()} and * adds support for keeping track of the size of the this writebuffers queue. */ @Override public MessageEvent poll() { - MessageEvent e = super.poll(); + MessageEvent e = queue.poll(); if (e != null) { int messageSize = getMessageSize(e); int newWriteBufferSize = writeBufferSize.addAndGet(-messageSize); diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramWorker.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramWorker.java index 5839cfe2ab..4ce2983610 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramWorker.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioDatagramWorker.java @@ -45,7 +45,7 @@ import org.jboss.netty.channel.ReceiveBufferSizePredictor; import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer; import org.jboss.netty.logging.InternalLogger; import org.jboss.netty.logging.InternalLoggerFactory; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * A class responsible for registering channels with {@link Selector}. @@ -105,12 +105,12 @@ class NioDatagramWorker implements Runnable { /** * Queue of {@link ChannelRegistionTask}s */ - private final Queue registerTaskQueue = new LinkedTransferQueue(); + private final Queue registerTaskQueue = QueueFactory.createQueue(Runnable.class); /** * Queue of WriteTasks */ - private final Queue writeTaskQueue = new LinkedTransferQueue(); + private final Queue writeTaskQueue = QueueFactory.createQueue(Runnable.class); private volatile int cancelledKeys; // should use AtomicInteger but we just need approximation diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannel.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannel.java index 37d6af9e53..16b91f16a9 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannel.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannel.java @@ -33,7 +33,6 @@ import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer; -import org.jboss.netty.util.internal.LinkedTransferQueue; import org.jboss.netty.util.internal.ThreadLocalBoolean; /** @@ -196,9 +195,7 @@ class NioSocketChannel extends AbstractChannel } } - private final class WriteRequestQueue extends LinkedTransferQueue { - - private static final long serialVersionUID = -246694024103520626L; + private final class WriteRequestQueue extends AbstractWriteRequestQueue { private final ThreadLocalBoolean notifying = new ThreadLocalBoolean(); @@ -207,7 +204,7 @@ class NioSocketChannel extends AbstractChannel @Override public boolean offer(MessageEvent e) { - boolean success = super.offer(e); + boolean success = queue.offer(e); assert success; int messageSize = getMessageSize(e); @@ -229,7 +226,7 @@ class NioSocketChannel extends AbstractChannel @Override public MessageEvent poll() { - MessageEvent e = super.poll(); + MessageEvent e = queue.poll(); if (e != null) { int messageSize = getMessageSize(e); int newWriteBufferSize = writeBufferSize.addAndGet(-messageSize); diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioWorker.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioWorker.java index 3e7a2fa2a0..4032db0a33 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioWorker.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioWorker.java @@ -47,7 +47,7 @@ import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer; import org.jboss.netty.logging.InternalLogger; import org.jboss.netty.logging.InternalLoggerFactory; import org.jboss.netty.util.internal.DeadLockProofWorker; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * @@ -70,8 +70,8 @@ class NioWorker implements Runnable { private final AtomicBoolean wakenUp = new AtomicBoolean(); private final ReadWriteLock selectorGuard = new ReentrantReadWriteLock(); private final Object startStopLock = new Object(); - private final Queue registerTaskQueue = new LinkedTransferQueue(); - private final Queue writeTaskQueue = new LinkedTransferQueue(); + private final Queue registerTaskQueue = QueueFactory.createQueue(Runnable.class); + private final Queue writeTaskQueue = QueueFactory.createQueue(Runnable.class); private volatile int cancelledKeys; // should use AtomicInteger but we just need approximation private final SocketReceiveBufferPool recvBufferPool = new SocketReceiveBufferPool(); diff --git a/src/main/java/org/jboss/netty/handler/codec/http/HttpClientCodec.java b/src/main/java/org/jboss/netty/handler/codec/http/HttpClientCodec.java index 656d5365d2..6a4f8056a6 100644 --- a/src/main/java/org/jboss/netty/handler/codec/http/HttpClientCodec.java +++ b/src/main/java/org/jboss/netty/handler/codec/http/HttpClientCodec.java @@ -23,7 +23,7 @@ import org.jboss.netty.channel.ChannelDownstreamHandler; import org.jboss.netty.channel.ChannelEvent; import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.ChannelUpstreamHandler; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * A combination of {@link HttpRequestEncoder} and {@link HttpResponseDecoder} @@ -46,7 +46,7 @@ public class HttpClientCodec implements ChannelUpstreamHandler, ChannelDownstreamHandler { /** A queue that is used for correlating a request and a response. */ - final Queue queue = new LinkedTransferQueue(); + final Queue queue = QueueFactory.createQueue(HttpMethod.class); /** If true, decoding stops (i.e. pass-through) */ volatile boolean done; diff --git a/src/main/java/org/jboss/netty/handler/codec/http/HttpContentEncoder.java b/src/main/java/org/jboss/netty/handler/codec/http/HttpContentEncoder.java index b471cdc948..6cac360ce5 100644 --- a/src/main/java/org/jboss/netty/handler/codec/http/HttpContentEncoder.java +++ b/src/main/java/org/jboss/netty/handler/codec/http/HttpContentEncoder.java @@ -24,7 +24,7 @@ import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.SimpleChannelHandler; import org.jboss.netty.handler.codec.embedder.EncoderEmbedder; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * Encodes the content of the outbound {@link HttpResponse} and {@link HttpChunk}. @@ -53,7 +53,7 @@ import org.jboss.netty.util.internal.LinkedTransferQueue; */ public abstract class HttpContentEncoder extends SimpleChannelHandler { - private final Queue acceptEncodingQueue = new LinkedTransferQueue(); + private final Queue acceptEncodingQueue = QueueFactory.createQueue(String.class); private volatile EncoderEmbedder encoder; /** diff --git a/src/main/java/org/jboss/netty/handler/execution/MemoryAwareThreadPoolExecutor.java b/src/main/java/org/jboss/netty/handler/execution/MemoryAwareThreadPoolExecutor.java index 9a5c09f664..38d04e6821 100644 --- a/src/main/java/org/jboss/netty/handler/execution/MemoryAwareThreadPoolExecutor.java +++ b/src/main/java/org/jboss/netty/handler/execution/MemoryAwareThreadPoolExecutor.java @@ -37,6 +37,7 @@ import org.jboss.netty.channel.WriteCompletionEvent; import org.jboss.netty.util.DefaultObjectSizeEstimator; import org.jboss.netty.util.ObjectSizeEstimator; import org.jboss.netty.util.internal.ConcurrentIdentityHashMap; +import org.jboss.netty.util.internal.QueueFactory; import org.jboss.netty.util.internal.SharedResourceMisuseDetector; /** @@ -212,7 +213,7 @@ public class MemoryAwareThreadPoolExecutor extends ThreadPoolExecutor { ThreadFactory threadFactory) { super(corePoolSize, corePoolSize, keepAliveTime, unit, - new LinkedTransferQueue(), threadFactory, new NewThreadRunsPolicy()); + QueueFactory.createQueue(Runnable.class), threadFactory, new NewThreadRunsPolicy()); if (objectSizeEstimator == null) { throw new NullPointerException("objectSizeEstimator"); diff --git a/src/main/java/org/jboss/netty/handler/execution/OrderedMemoryAwareThreadPoolExecutor.java b/src/main/java/org/jboss/netty/handler/execution/OrderedMemoryAwareThreadPoolExecutor.java index c583addcc2..f69a5cda25 100644 --- a/src/main/java/org/jboss/netty/handler/execution/OrderedMemoryAwareThreadPoolExecutor.java +++ b/src/main/java/org/jboss/netty/handler/execution/OrderedMemoryAwareThreadPoolExecutor.java @@ -31,7 +31,7 @@ import org.jboss.netty.channel.ChannelState; import org.jboss.netty.channel.ChannelStateEvent; import org.jboss.netty.util.ObjectSizeEstimator; import org.jboss.netty.util.internal.ConcurrentIdentityWeakKeyHashMap; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * A {@link MemoryAwareThreadPoolExecutor} which makes sure the events from the @@ -284,7 +284,7 @@ public class OrderedMemoryAwareThreadPoolExecutor extends } private final class ChildExecutor implements Executor, Runnable { - private final Queue tasks = new LinkedTransferQueue(); + private final Queue tasks = QueueFactory.createQueue(Runnable.class); private final AtomicBoolean isRunning = new AtomicBoolean(false); ChildExecutor() { diff --git a/src/main/java/org/jboss/netty/handler/queue/BlockingReadHandler.java b/src/main/java/org/jboss/netty/handler/queue/BlockingReadHandler.java index aa2cb9f835..509dd44724 100644 --- a/src/main/java/org/jboss/netty/handler/queue/BlockingReadHandler.java +++ b/src/main/java/org/jboss/netty/handler/queue/BlockingReadHandler.java @@ -29,7 +29,7 @@ import org.jboss.netty.channel.ExceptionEvent; import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.SimpleChannelUpstreamHandler; import org.jboss.netty.util.internal.DeadLockProofWorker; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * Emulates blocking read operation. This handler stores all received messages @@ -84,7 +84,7 @@ public class BlockingReadHandler extends SimpleChannelUpstreamHandler { * implementation. */ public BlockingReadHandler() { - this(new LinkedTransferQueue()); + this(QueueFactory.createQueue(ChannelEvent.class)); } /** diff --git a/src/main/java/org/jboss/netty/handler/queue/BufferedWriteHandler.java b/src/main/java/org/jboss/netty/handler/queue/BufferedWriteHandler.java index f9d7a8d8bd..4c43f01562 100644 --- a/src/main/java/org/jboss/netty/handler/queue/BufferedWriteHandler.java +++ b/src/main/java/org/jboss/netty/handler/queue/BufferedWriteHandler.java @@ -33,7 +33,7 @@ import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.SimpleChannelHandler; import org.jboss.netty.channel.socket.nio.NioSocketChannelConfig; import org.jboss.netty.util.HashedWheelTimer; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * Emulates buffered write operation. This handler stores all write requests @@ -193,7 +193,7 @@ public class BufferedWriteHandler extends SimpleChannelHandler { * into a single write request on {@link #flush()} */ public BufferedWriteHandler(boolean consolidateOnFlush) { - this(new LinkedTransferQueue(), consolidateOnFlush); + this(QueueFactory.createQueue(MessageEvent.class), consolidateOnFlush); } /** diff --git a/src/main/java/org/jboss/netty/handler/ssl/SslHandler.java b/src/main/java/org/jboss/netty/handler/ssl/SslHandler.java index 968da0e049..972e6707e4 100644 --- a/src/main/java/org/jboss/netty/handler/ssl/SslHandler.java +++ b/src/main/java/org/jboss/netty/handler/ssl/SslHandler.java @@ -50,8 +50,8 @@ import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.handler.codec.frame.FrameDecoder; import org.jboss.netty.logging.InternalLogger; import org.jboss.netty.logging.InternalLoggerFactory; -import org.jboss.netty.util.internal.LinkedTransferQueue; import org.jboss.netty.util.internal.NonReentrantLock; +import org.jboss.netty.util.internal.QueueFactory; /** * Adds SSL @@ -196,7 +196,7 @@ public class SslHandler extends FrameDecoder int ignoreClosedChannelException; final Object ignoreClosedChannelExceptionLock = new Object(); private final Queue pendingUnencryptedWrites = new LinkedList(); - private final Queue pendingEncryptedWrites = new LinkedTransferQueue(); + private final Queue pendingEncryptedWrites = QueueFactory.createQueue(MessageEvent.class); private final NonReentrantLock pendingEncryptedWritesLock = new NonReentrantLock(); private volatile boolean issueHandshake = false; diff --git a/src/main/java/org/jboss/netty/handler/stream/ChunkedWriteHandler.java b/src/main/java/org/jboss/netty/handler/stream/ChunkedWriteHandler.java index 81259816a4..d3bbe7aec0 100644 --- a/src/main/java/org/jboss/netty/handler/stream/ChunkedWriteHandler.java +++ b/src/main/java/org/jboss/netty/handler/stream/ChunkedWriteHandler.java @@ -35,7 +35,7 @@ import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.logging.InternalLogger; import org.jboss.netty.logging.InternalLoggerFactory; -import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.jboss.netty.util.internal.QueueFactory; /** * A {@link ChannelHandler} that adds support for writing a large data stream @@ -79,8 +79,7 @@ public class ChunkedWriteHandler implements ChannelUpstreamHandler, ChannelDowns private static final InternalLogger logger = InternalLoggerFactory.getInstance(ChunkedWriteHandler.class); - private final Queue queue = - new LinkedTransferQueue(); + private final Queue queue = QueueFactory.createQueue(MessageEvent.class); private ChannelHandlerContext ctx; private MessageEvent currentEvent; diff --git a/src/main/java/org/jboss/netty/util/UnsafeDetectUtil.java b/src/main/java/org/jboss/netty/util/UnsafeDetectUtil.java new file mode 100644 index 0000000000..caf4da2233 --- /dev/null +++ b/src/main/java/org/jboss/netty/util/UnsafeDetectUtil.java @@ -0,0 +1,51 @@ +/* + * Copyright 2011 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.util; + +import java.util.concurrent.atomic.AtomicInteger; + + +/** + * Utility which checks if {@value #UNSAFE} class can be found in the classpath + * + * + * + * @author The Netty Project + * @author Norman Maurer + * + */ +public class UnsafeDetectUtil { + + private static final String UNSAFE = "sun.misc.Unsafe"; + private static final boolean UNSAFE_FOUND = isUnsafeFound(AtomicInteger.class.getClassLoader()); + + public static boolean isUnsafeFound(ClassLoader loader) { + try { + Class.forName(UNSAFE, true, loader); + return true; + } catch (ClassNotFoundException e) { + return false; + } + } + + public static boolean isUnsafeFound() { + return UNSAFE_FOUND; + } + + private UnsafeDetectUtil() { + // only static method supported + } +} diff --git a/src/main/java/org/jboss/netty/util/internal/LegacyLinkedTransferQueue.java b/src/main/java/org/jboss/netty/util/internal/LegacyLinkedTransferQueue.java new file mode 100644 index 0000000000..5146c2b632 --- /dev/null +++ b/src/main/java/org/jboss/netty/util/internal/LegacyLinkedTransferQueue.java @@ -0,0 +1,1370 @@ +/* + * Copyright 2009 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.jboss.netty.util.internal; + +import java.util.AbstractQueue; +import java.util.Collection; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import java.util.concurrent.locks.LockSupport; +/** + * + * This version does work even if sun.misc.Unsafe is not found in the classpath. So this is kept for compatibility reasons. + * Please use {@link QueueFactory} to create a Queue as it will use the "optimal" implementation depending on the JVM + * + *
+ *
+ * + * An unbounded {@link BlockingQueue} based on linked nodes. + * This queue orders elements FIFO (first-in-first-out) with respect + * to any given producer. The head of the queue is that + * element that has been on the queue the longest time for some + * producer. The tail of the queue is that element that has + * been on the queue the shortest time for some producer. + * + *

Beware that, unlike in most collections, the {@code size} + * method is NOT a constant-time operation. Because of the + * asynchronous nature of these queues, determining the current number + * of elements requires a traversal of the elements. + * + *

This class and its iterator implement all of the + * optional methods of the {@link Collection} and {@link + * Iterator} interfaces. + * + *

Memory consistency effects: As with other concurrent + * collections, actions in a thread prior to placing an object into a + * {@code LinkedTransferQueue} + * happen-before + * actions subsequent to the access or removal of that element from + * the {@code LinkedTransferQueue} in another thread. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @author The Netty Project + * @author Doug Lea + * @author Trustin Lee + * + * @param the type of elements held in this collection + */ +public class LegacyLinkedTransferQueue extends AbstractQueue + implements BlockingQueue, java.io.Serializable { + private static final long serialVersionUID = -3223113410248163686L; + + /* + * *** Overview of Dual Queues with Slack *** + * + * Dual Queues, introduced by Scherer and Scott + * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are + * (linked) queues in which nodes may represent either data or + * requests. When a thread tries to enqueue a data node, but + * encounters a request node, it instead "matches" and removes it; + * and vice versa for enqueuing requests. Blocking Dual Queues + * arrange that threads enqueuing unmatched requests block until + * other threads provide the match. Dual Synchronous Queues (see + * Scherer, Lea, & Scott + * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) + * additionally arrange that threads enqueuing unmatched data also + * block. Dual Transfer Queues support all of these modes, as + * dictated by callers. + * + * A FIFO dual queue may be implemented using a variation of the + * Michael & Scott (M&S) lock-free queue algorithm + * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). + * It maintains two pointer fields, "head", pointing to a + * (matched) node that in turn points to the first actual + * (unmatched) queue node (or null if empty); and "tail" that + * points to the last node on the queue (or again null if + * empty). For example, here is a possible queue with four data + * elements: + * + * head tail + * | | + * v v + * M -> U -> U -> U -> U + * + * The M&S queue algorithm is known to be prone to scalability and + * overhead limitations when maintaining (via CAS) these head and + * tail pointers. This has led to the development of + * contention-reducing variants such as elimination arrays (see + * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and + * optimistic back pointers (see Ladan-Mozes & Shavit + * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). + * However, the nature of dual queues enables a simpler tactic for + * improving M&S-style implementations when dual-ness is needed. + * + * In a dual queue, each node must atomically maintain its match + * status. While there are other possible variants, we implement + * this here as: for a data-mode node, matching entails CASing an + * "item" field from a non-null data value to null upon match, and + * vice-versa for request nodes, CASing from null to a data + * value. (Note that the linearization properties of this style of + * queue are easy to verify -- elements are made available by + * linking, and unavailable by matching.) Compared to plain M&S + * queues, this property of dual queues requires one additional + * successful atomic operation per enq/deq pair. But it also + * enables lower cost variants of queue maintenance mechanics. (A + * variation of this idea applies even for non-dual queues that + * support deletion of interior elements, such as + * j.u.c.ConcurrentLinkedQueue.) + * + * Once a node is matched, its match status can never again + * change. We may thus arrange that the linked list of them + * contain a prefix of zero or more matched nodes, followed by a + * suffix of zero or more unmatched nodes. (Note that we allow + * both the prefix and suffix to be zero length, which in turn + * means that we do not use a dummy header.) If we were not + * concerned with either time or space efficiency, we could + * correctly perform enqueue and dequeue operations by traversing + * from a pointer to the initial node; CASing the item of the + * first unmatched node on match and CASing the next field of the + * trailing node on appends. (Plus some special-casing when + * initially empty). While this would be a terrible idea in + * itself, it does have the benefit of not requiring ANY atomic + * updates on head/tail fields. + * + * We introduce here an approach that lies between the extremes of + * never versus always updating queue (head and tail) pointers. + * This offers a tradeoff between sometimes requiring extra + * traversal steps to locate the first and/or last unmatched + * nodes, versus the reduced overhead and contention of fewer + * updates to queue pointers. For example, a possible snapshot of + * a queue is: + * + * head tail + * | | + * v v + * M -> M -> U -> U -> U -> U + * + * The best value for this "slack" (the targeted maximum distance + * between the value of "head" and the first unmatched node, and + * similarly for "tail") is an empirical matter. We have found + * that using very small constants in the range of 1-3 work best + * over a range of platforms. Larger values introduce increasing + * costs of cache misses and risks of long traversal chains, while + * smaller values increase CAS contention and overhead. + * + * Dual queues with slack differ from plain M&S dual queues by + * virtue of only sometimes updating head or tail pointers when + * matching, appending, or even traversing nodes; in order to + * maintain a targeted slack. The idea of "sometimes" may be + * operationalized in several ways. The simplest is to use a + * per-operation counter incremented on each traversal step, and + * to try (via CAS) to update the associated queue pointer + * whenever the count exceeds a threshold. Another, that requires + * more overhead, is to use random number generators to update + * with a given probability per traversal step. + * + * In any strategy along these lines, because CASes updating + * fields may fail, the actual slack may exceed targeted + * slack. However, they may be retried at any time to maintain + * targets. Even when using very small slack values, this + * approach works well for dual queues because it allows all + * operations up to the point of matching or appending an item + * (hence potentially allowing progress by another thread) to be + * read-only, thus not introducing any further contention. As + * described below, we implement this by performing slack + * maintenance retries only after these points. + * + * As an accompaniment to such techniques, traversal overhead can + * be further reduced without increasing contention of head + * pointer updates: Threads may sometimes shortcut the "next" link + * path from the current "head" node to be closer to the currently + * known first unmatched node, and similarly for tail. Again, this + * may be triggered with using thresholds or randomization. + * + * These ideas must be further extended to avoid unbounded amounts + * of costly-to-reclaim garbage caused by the sequential "next" + * links of nodes starting at old forgotten head nodes: As first + * described in detail by Boehm + * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC + * delays noticing that any arbitrarily old node has become + * garbage, all newer dead nodes will also be unreclaimed. + * (Similar issues arise in non-GC environments.) To cope with + * this in our implementation, upon CASing to advance the head + * pointer, we set the "next" link of the previous head to point + * only to itself; thus limiting the length of connected dead lists. + * (We also take similar care to wipe out possibly garbage + * retaining values held in other Node fields.) However, doing so + * adds some further complexity to traversal: If any "next" + * pointer links to itself, it indicates that the current thread + * has lagged behind a head-update, and so the traversal must + * continue from the "head". Traversals trying to find the + * current tail starting from "tail" may also encounter + * self-links, in which case they also continue at "head". + * + * It is tempting in slack-based scheme to not even use CAS for + * updates (similarly to Ladan-Mozes & Shavit). However, this + * cannot be done for head updates under the above link-forgetting + * mechanics because an update may leave head at a detached node. + * And while direct writes are possible for tail updates, they + * increase the risk of long retraversals, and hence long garbage + * chains, which can be much more costly than is worthwhile + * considering that the cost difference of performing a CAS vs + * write is smaller when they are not triggered on each operation + * (especially considering that writes and CASes equally require + * additional GC bookkeeping ("write barriers") that are sometimes + * more costly than the writes themselves because of contention). + * + * *** Overview of implementation *** + * + * We use a threshold-based approach to updates, with a slack + * threshold of two -- that is, we update head/tail when the + * current pointer appears to be two or more steps away from the + * first/last node. The slack value is hard-wired: a path greater + * than one is naturally implemented by checking equality of + * traversal pointers except when the list has only one element, + * in which case we keep slack threshold at one. Avoiding tracking + * explicit counts across method calls slightly simplifies an + * already-messy implementation. Using randomization would + * probably work better if there were a low-quality dirt-cheap + * per-thread one available, but even ThreadLocalRandom is too + * heavy for these purposes. + * + * With such a small slack threshold value, it is not worthwhile + * to augment this with path short-circuiting (i.e., unsplicing + * interior nodes) except in the case of cancellation/removal (see + * below). + * + * We allow both the head and tail fields to be null before any + * nodes are enqueued; initializing upon first append. This + * simplifies some other logic, as well as providing more + * efficient explicit control paths instead of letting JVMs insert + * implicit NullPointerExceptions when they are null. While not + * currently fully implemented, we also leave open the possibility + * of re-nulling these fields when empty (which is complicated to + * arrange, for little benefit.) + * + * All enqueue/dequeue operations are handled by the single method + * "xfer" with parameters indicating whether to act as some form + * of offer, put, poll, take, or transfer (each possibly with + * timeout). The relative complexity of using one monolithic + * method outweighs the code bulk and maintenance problems of + * using separate methods for each case. + * + * Operation consists of up to three phases. The first is + * implemented within method xfer, the second in tryAppend, and + * the third in method awaitMatch. + * + * 1. Try to match an existing node + * + * Starting at head, skip already-matched nodes until finding + * an unmatched node of opposite mode, if one exists, in which + * case matching it and returning, also if necessary updating + * head to one past the matched node (or the node itself if the + * list has no other unmatched nodes). If the CAS misses, then + * a loop retries advancing head by two steps until either + * success or the slack is at most two. By requiring that each + * attempt advances head by two (if applicable), we ensure that + * the slack does not grow without bound. Traversals also check + * if the initial head is now off-list, in which case they + * start at the new head. + * + * If no candidates are found and the call was untimed + * poll/offer, (argument "how" is NOW) return. + * + * 2. Try to append a new node (method tryAppend) + * + * Starting at current tail pointer, find the actual last node + * and try to append a new node (or if head was null, establish + * the first node). Nodes can be appended only if their + * predecessors are either already matched or are of the same + * mode. If we detect otherwise, then a new node with opposite + * mode must have been appended during traversal, so we must + * restart at phase 1. The traversal and update steps are + * otherwise similar to phase 1: Retrying upon CAS misses and + * checking for staleness. In particular, if a self-link is + * encountered, then we can safely jump to a node on the list + * by continuing the traversal at current head. + * + * On successful append, if the call was ASYNC, return. + * + * 3. Await match or cancellation (method awaitMatch) + * + * Wait for another thread to match node; instead cancelling if + * the current thread was interrupted or the wait timed out. On + * multiprocessors, we use front-of-queue spinning: If a node + * appears to be the first unmatched node in the queue, it + * spins a bit before blocking. In either case, before blocking + * it tries to unsplice any nodes between the current "head" + * and the first unmatched node. + * + * Front-of-queue spinning vastly improves performance of + * heavily contended queues. And so long as it is relatively + * brief and "quiet", spinning does not much impact performance + * of less-contended queues. During spins threads check their + * interrupt status and generate a thread-local random number + * to decide to occasionally perform a Thread.yield. While + * yield has underdefined specs, we assume that might it help, + * and will not hurt in limiting impact of spinning on busy + * systems. We also use smaller (1/2) spins for nodes that are + * not known to be front but whose predecessors have not + * blocked -- these "chained" spins avoid artifacts of + * front-of-queue rules which otherwise lead to alternating + * nodes spinning vs blocking. Further, front threads that + * represent phase changes (from data to request node or vice + * versa) compared to their predecessors receive additional + * chained spins, reflecting longer paths typically required to + * unblock threads during phase changes. + * + * + * ** Unlinking removed interior nodes ** + * + * In addition to minimizing garbage retention via self-linking + * described above, we also unlink removed interior nodes. These + * may arise due to timed out or interrupted waits, or calls to + * remove(x) or Iterator.remove. Normally, given a node that was + * at one time known to be the predecessor of some node s that is + * to be removed, we can unsplice s by CASing the next field of + * its predecessor if it still points to s (otherwise s must + * already have been removed or is now offlist). But there are two + * situations in which we cannot guarantee to make node s + * unreachable in this way: (1) If s is the trailing node of list + * (i.e., with null next), then it is pinned as the target node + * for appends, so can only be removed later after other nodes are + * appended. (2) We cannot necessarily unlink s given a + * predecessor node that is matched (including the case of being + * cancelled): the predecessor may already be unspliced, in which + * case some previous reachable node may still point to s. + * (For further explanation see Herlihy & Shavit "The Art of + * Multiprocessor Programming" chapter 9). Although, in both + * cases, we can rule out the need for further action if either s + * or its predecessor are (or can be made to be) at, or fall off + * from, the head of list. + * + * Without taking these into account, it would be possible for an + * unbounded number of supposedly removed nodes to remain + * reachable. Situations leading to such buildup are uncommon but + * can occur in practice; for example when a series of short timed + * calls to poll repeatedly time out but never otherwise fall off + * the list because of an untimed call to take at the front of the + * queue. + * + * When these cases arise, rather than always retraversing the + * entire list to find an actual predecessor to unlink (which + * won't help for case (1) anyway), we record a conservative + * estimate of possible unsplice failures (in "sweepVotes"). + * We trigger a full sweep when the estimate exceeds a threshold + * ("SWEEP_THRESHOLD") indicating the maximum number of estimated + * removal failures to tolerate before sweeping through, unlinking + * cancelled nodes that were not unlinked upon initial removal. + * We perform sweeps by the thread hitting threshold (rather than + * background threads or by spreading work to other threads) + * because in the main contexts in which removal occurs, the + * caller is already timed-out, cancelled, or performing a + * potentially O(n) operation (e.g. remove(x)), none of which are + * time-critical enough to warrant the overhead that alternatives + * would impose on other threads. + * + * Because the sweepVotes estimate is conservative, and because + * nodes become unlinked "naturally" as they fall off the head of + * the queue, and because we allow votes to accumulate even while + * sweeps are in progress, there are typically significantly fewer + * such nodes than estimated. Choice of a threshold value + * balances the likelihood of wasted effort and contention, versus + * providing a worst-case bound on retention of interior nodes in + * quiescent queues. The value defined below was chosen + * empirically to balance these under various timeout scenarios. + * + * Note that we cannot self-link unlinked interior nodes during + * sweeps. However, the associated garbage chains terminate when + * some successor ultimately falls off the head of the list and is + * self-linked. + */ + + /** True if on multiprocessor */ + private static final boolean MP = + Runtime.getRuntime().availableProcessors() > 1; + + /** + * The number of times to spin (with randomly interspersed calls + * to Thread.yield) on multiprocessor before blocking when a node + * is apparently the first waiter in the queue. See above for + * explanation. Must be a power of two. The value is empirically + * derived -- it works pretty well across a variety of processors, + * numbers of CPUs, and OSes. + */ + private static final int FRONT_SPINS = 1 << 7; + + /** + * The number of times to spin before blocking when a node is + * preceded by another node that is apparently spinning. Also + * serves as an increment to FRONT_SPINS on phase changes, and as + * base average frequency for yielding during spins. Must be a + * power of two. + */ + private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; + + /** + * The maximum number of estimated removal failures (sweepVotes) + * to tolerate before sweeping through the queue unlinking + * cancelled nodes that were not unlinked upon initial + * removal. See above for explanation. The value must be at least + * two to avoid useless sweeps when removing trailing nodes. + */ + static final int SWEEP_THRESHOLD = 32; + + /** + * Queue nodes. Uses Object, not E, for items to allow forgetting + * them after use. Relies heavily on Unsafe mechanics to minimize + * unnecessary ordering constraints: Writes that are intrinsically + * ordered wrt other accesses or CASes use simple relaxed forms. + */ + static final class Node { + final boolean isData; // false if this is a request node + volatile Object item; // initially non-null if isData; CASed to match + volatile Node next; + volatile Thread waiter; // null until waiting + + // CAS methods for fields + boolean casNext(Node cmp, Node val) { + if (AtomicFieldUpdaterUtil.isAvailable()) { + return nextUpdater.compareAndSet(this, cmp, val); + } else { + synchronized (this) { + if (next == cmp) { + next = val; + return true; + } else { + return false; + } + } + } + } + + boolean casItem(Object cmp, Object val) { + // assert cmp == null || cmp.getClass() != Node.class; + if (AtomicFieldUpdaterUtil.isAvailable()) { + return itemUpdater.compareAndSet(this, cmp, val); + } else { + synchronized (this) { + if (item == cmp) { + item = val; + return true; + } else { + return false; + } + } + } + } + + /** + * Constructs a new node. Uses relaxed write because item can + * only be seen after publication via casNext. + */ + Node(Object item, boolean isData) { + this.item = item; + this.isData = isData; + } + + /** + * Links node to itself to avoid garbage retention. Called + * only after CASing head field, so uses relaxed write. + */ + void forgetNext() { + this.next = this; + } + + /** + * Sets item to self and waiter to null, to avoid garbage + * retention after matching or cancelling. Uses relaxed writes + * bacause order is already constrained in the only calling + * contexts: item is forgotten only after volatile/atomic + * mechanics that extract items. Similarly, clearing waiter + * follows either CAS or return from park (if ever parked; + * else we don't care). + */ + void forgetContents() { + this.item = this; + this.waiter = null; + } + + /** + * Returns true if this node has been matched, including the + * case of artificial matches due to cancellation. + */ + boolean isMatched() { + Object x = item; + return x == this || x == null == isData; + } + + /** + * Returns true if this is an unmatched request node. + */ + boolean isUnmatchedRequest() { + return !isData && item == null; + } + + /** + * Returns true if a node with the given mode cannot be + * appended to this node because this node is unmatched and + * has opposite data mode. + */ + boolean cannotPrecede(boolean haveData) { + boolean d = isData; + Object x; + return d != haveData && (x = item) != this && x != null == d; + } + + /** + * Tries to artificially match a data node -- used by remove. + */ + boolean tryMatchData() { + // assert isData; + Object x = item; + if (x != null && x != this && casItem(x, null)) { + LockSupport.unpark(waiter); + return true; + } + return false; + } + + private static final AtomicReferenceFieldUpdater nextUpdater = + AtomicFieldUpdaterUtil.newRefUpdater(Node.class, Node.class, "next"); + private static final AtomicReferenceFieldUpdater itemUpdater = + AtomicFieldUpdaterUtil.newRefUpdater(Node.class, Object.class, "item"); + + } + + /** head of the queue; null until first enqueue */ + transient volatile Node head; + + /** tail of the queue; null until first append */ + transient volatile Node tail; + + /** The number of apparent failures to unsplice removed nodes */ + transient volatile int sweepVotes; + + // CAS methods for fields + private boolean casTail(Node cmp, Node val) { + if (AtomicFieldUpdaterUtil.isAvailable()) { + return tailUpdater.compareAndSet(this, cmp, val); + } else { + synchronized (this) { + if (tail == cmp) { + tail = val; + return true; + } else { + return false; + } + } + } + } + + private boolean casHead(Node cmp, Node val) { + if (AtomicFieldUpdaterUtil.isAvailable()) { + return headUpdater.compareAndSet(this, cmp, val); + } else { + synchronized (this) { + if (head == cmp) { + head = val; + return true; + } else { + return false; + } + } + } + } + + private boolean casSweepVotes(int cmp, int val) { + if (AtomicFieldUpdaterUtil.isAvailable()) { + return sweepVotesUpdater.compareAndSet(this, cmp, val); + } else { + synchronized (this) { + if (sweepVotes == cmp) { + sweepVotes = val; + return true; + } else { + return false; + } + } + } + } + + /* + * Possible values for "how" argument in xfer method. + */ + private static final int NOW = 0; // for untimed poll, tryTransfer + private static final int ASYNC = 1; // for offer, put, add + private static final int SYNC = 2; // for transfer, take + private static final int TIMED = 3; // for timed poll, tryTransfer + + @SuppressWarnings("unchecked") + static E cast(Object item) { + // assert item == null || item.getClass() != Node.class; + return (E) item; + } + + /** + * Implements all queuing methods. See above for explanation. + * + * @param e the item or null for take + * @param haveData true if this is a put, else a take + * @param how NOW, ASYNC, SYNC, or TIMED + * @param nanos timeout in nanosecs, used only if mode is TIMED + * @return an item if matched, else e + * @throws NullPointerException if haveData mode but e is null + */ + private E xfer(E e, boolean haveData, int how, long nanos) { + if (haveData && e == null) { + throw new NullPointerException(); + } + Node s = null; // the node to append, if needed + + retry: for (;;) { // restart on append race + + for (Node h = head, p = h; p != null;) { // find & match first node + boolean isData = p.isData; + Object item = p.item; + if (item != p && item != null == isData) { // unmatched + if (isData == haveData) { // can't match + break; + } + if (p.casItem(item, e)) { // match + for (Node q = p; q != h;) { + Node n = q.next; // update by 2 unless singleton + if (head == h && casHead(h, n == null? q : n)) { + h.forgetNext(); + break; + } // advance and retry + if ((h = head) == null || + (q = h.next) == null || !q.isMatched()) { + break; // unless slack < 2 + } + } + LockSupport.unpark(p.waiter); + return LegacyLinkedTransferQueue.cast(item); + } + } + Node n = p.next; + p = p != n ? n : (h = head); // Use head if p offlist + } + + if (how != NOW) { // No matches available + if (s == null) { + s = new Node(e, haveData); + } + Node pred = tryAppend(s, haveData); + if (pred == null) { + continue retry; // lost race vs opposite mode + } + if (how != ASYNC) { + return awaitMatch(s, pred, e, (how == TIMED), nanos); + } + } + return e; // not waiting + } + } + + /** + * Tries to append node s as tail. + * + * @param s the node to append + * @param haveData true if appending in data mode + * @return null on failure due to losing race with append in + * different mode, else s's predecessor, or s itself if no + * predecessor + */ + private Node tryAppend(Node s, boolean haveData) { + for (Node t = tail, p = t;;) { // move p to last node and append + Node n, u; // temps for reads of next & tail + if (p == null && (p = head) == null) { + if (casHead(null, s)) { + return s; // initialize + } + } + else if (p.cannotPrecede(haveData)) { + return null; // lost race vs opposite mode + } else if ((n = p.next) != null) { // not last; keep traversing + p = p != t && t != (u = tail) ? (t = u) : // stale tail + p != n ? n : null; // restart if off list + } else if (!p.casNext(null, s)) { + p = p.next; // re-read on CAS failure + } else { + if (p != t) { // update if slack now >= 2 + while ((tail != t || !casTail(t, s)) && + (t = tail) != null && + (s = t.next) != null && // advance and retry + (s = s.next) != null && s != t) { + continue; + } + } + return p; + } + } + } + + /** + * Spins/yields/blocks until node s is matched or caller gives up. + * + * @param s the waiting node + * @param pred the predecessor of s, or s itself if it has no + * predecessor, or null if unknown (the null case does not occur + * in any current calls but may in possible future extensions) + * @param e the comparison value for checking match + * @param timed if true, wait only until timeout elapses + * @param nanos timeout in nanosecs, used only if timed is true + * @return matched item, or e if unmatched on interrupt or timeout + */ + private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { + long lastTime = timed ? System.nanoTime() : 0L; + Thread w = Thread.currentThread(); + int spins = -1; // initialized after first item and cancel checks + ThreadLocalRandom randomYields = null; // bound if needed + + for (;;) { + Object item = s.item; + if (item != e) { // matched + // assert item != s; + s.forgetContents(); // avoid garbage + return LegacyLinkedTransferQueue.cast(item); + } + if ((w.isInterrupted() || timed && nanos <= 0) && + s.casItem(e, s)) { // cancel + unsplice(pred, s); + return e; + } + + if (spins < 0) { // establish spins at/near front + if ((spins = spinsFor(pred, s.isData)) > 0) { + randomYields = ThreadLocalRandom.current(); + } + } + else if (spins > 0) { // spin + --spins; + if (randomYields.nextInt(CHAINED_SPINS) == 0) { + Thread.yield(); // occasionally yield + } + } + else if (s.waiter == null) { + s.waiter = w; // request unpark then recheck + } + else if (timed) { + long now = System.nanoTime(); + if ((nanos -= now - lastTime) > 0) { + LockSupport.parkNanos(nanos); + } + lastTime = now; + } + else { + LockSupport.park(); + } + } + } + + /** + * Returns spin/yield value for a node with given predecessor and + * data mode. See above for explanation. + */ + private static int spinsFor(Node pred, boolean haveData) { + if (MP && pred != null) { + if (pred.isData != haveData) { // phase change + return FRONT_SPINS + CHAINED_SPINS; + } + if (pred.isMatched()) { // probably at front + return FRONT_SPINS; + } + if (pred.waiter == null) { // pred apparently spinning + return CHAINED_SPINS; + } + } + return 0; + } + + /* -------------- Traversal methods -------------- */ + + /** + * Returns the successor of p, or the head node if p.next has been + * linked to self, which will only be true if traversing with a + * stale pointer that is now off the list. + */ + final Node succ(Node p) { + Node next = p.next; + return p == next ? head : next; + } + + /** + * Returns the first unmatched node of the given mode, or null if + * none. Used by methods isEmpty, hasWaitingConsumer. + */ + private Node firstOfMode(boolean isData) { + for (Node p = head; p != null; p = succ(p)) { + if (!p.isMatched()) { + return p.isData == isData ? p : null; + } + } + return null; + } + + /** + * Returns the item in the first unmatched node with isData; or + * null if none. Used by peek. + */ + private E firstDataItem() { + for (Node p = head; p != null; p = succ(p)) { + Object item = p.item; + if (p.isData) { + if (item != null && item != p) { + return LegacyLinkedTransferQueue.cast(item); + } + } + else if (item == null) { + return null; + } + } + return null; + } + + /** + * Traverses and counts unmatched nodes of the given mode. + * Used by methods size and getWaitingConsumerCount. + */ + private int countOfMode(boolean data) { + int count = 0; + for (Node p = head; p != null; ) { + if (!p.isMatched()) { + if (p.isData != data) { + return 0; + } + if (++count == Integer.MAX_VALUE) { // saturated + break; + } + } + Node n = p.next; + if (n != p) { + p = n; + } else { + count = 0; + p = head; + } + } + return count; + } + + final class Itr implements Iterator { + private Node nextNode; // next node to return item for + private E nextItem; // the corresponding item + private Node lastRet; // last returned node, to support remove + private Node lastPred; // predecessor to unlink lastRet + + /** + * Moves to next node after prev, or first node if prev null. + */ + private void advance(Node prev) { + lastPred = lastRet; + lastRet = prev; + for (Node p = prev == null ? head : succ(prev); + p != null; p = succ(p)) { + Object item = p.item; + if (p.isData) { + if (item != null && item != p) { + nextItem = LegacyLinkedTransferQueue.cast(item); + nextNode = p; + return; + } + } + else if (item == null) { + break; + } + } + nextNode = null; + } + + Itr() { + advance(null); + } + + @Override + public boolean hasNext() { + return nextNode != null; + } + + @Override + public E next() { + Node p = nextNode; + if (p == null) { + throw new NoSuchElementException(); + } + E e = nextItem; + advance(p); + return e; + } + + @Override + public void remove() { + Node p = lastRet; + if (p == null) { + throw new IllegalStateException(); + } + if (p.tryMatchData()) { + unsplice(lastPred, p); + } + } + } + + /* -------------- Removal methods -------------- */ + + /** + * Unsplices (now or later) the given deleted/cancelled node with + * the given predecessor. + * + * @param pred a node that was at one time known to be the + * predecessor of s, or null or s itself if s is/was at head + * @param s the node to be unspliced + */ + final void unsplice(Node pred, Node s) { + s.forgetContents(); // forget unneeded fields + /* + * See above for rationale. Briefly: if pred still points to + * s, try to unlink s. If s cannot be unlinked, because it is + * trailing node or pred might be unlinked, and neither pred + * nor s are head or offlist, add to sweepVotes, and if enough + * votes have accumulated, sweep. + */ + if (pred != null && pred != s && pred.next == s) { + Node n = s.next; + if (n == null || + n != s && pred.casNext(s, n) && pred.isMatched()) { + for (;;) { // check if at, or could be, head + Node h = head; + if (h == pred || h == s || h == null) { + return; // at head or list empty + } + if (!h.isMatched()) { + break; + } + Node hn = h.next; + if (hn == null) { + return; // now empty + } + if (hn != h && casHead(h, hn)) { + h.forgetNext(); // advance head + } + } + if (pred.next != pred && s.next != s) { // recheck if offlist + for (;;) { // sweep now if enough votes + int v = sweepVotes; + if (v < SWEEP_THRESHOLD) { + if (casSweepVotes(v, v + 1)) { + break; + } + } + else if (casSweepVotes(v, 0)) { + sweep(); + break; + } + } + } + } + } + } + + /** + * Unlinks matched (typically cancelled) nodes encountered in a + * traversal from head. + */ + private void sweep() { + for (Node p = head, s, n; p != null && (s = p.next) != null; ) { + if (!s.isMatched()) { + // Unmatched nodes are never self-linked + p = s; + } else if ((n = s.next) == null) { // trailing node is pinned + break; + } else if (s == n) { // stale + // No need to also check for p == s, since that implies s == n + p = head; + } else { + p.casNext(s, n); + } + } + } + + /** + * Main implementation of remove(Object) + */ + private boolean findAndRemove(Object e) { + if (e != null) { + for (Node pred = null, p = head; p != null; ) { + Object item = p.item; + if (p.isData) { + if (item != null && item != p && e.equals(item) && + p.tryMatchData()) { + unsplice(pred, p); + return true; + } + } + else if (item == null) { + break; + } + pred = p; + if ((p = p.next) == pred) { // stale + pred = null; + p = head; + } + } + } + return false; + } + + + /** + * Creates an initially empty {@code LinkedTransferQueue}. + */ + public LegacyLinkedTransferQueue() { + } + + /** + * Creates a {@code LinkedTransferQueue} + * initially containing the elements of the given collection, + * added in traversal order of the collection's iterator. + * + * @param c the collection of elements to initially contain + * @throws NullPointerException if the specified collection or any + * of its elements are null + */ + public LegacyLinkedTransferQueue(Collection c) { + this(); + addAll(c); + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never block. + * + * @throws NullPointerException if the specified element is null + */ + @Override + public void put(E e) { + xfer(e, true, ASYNC, 0); + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never block or + * return {@code false}. + * + * @return {@code true} (as specified by + * {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer}) + * @throws NullPointerException if the specified element is null + */ + @Override + public boolean offer(E e, long timeout, TimeUnit unit) { + xfer(e, true, ASYNC, 0); + return true; + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never return {@code false}. + * + * @return {@code true} (as specified by + * {@link BlockingQueue#offer(Object) BlockingQueue.offer}) + * @throws NullPointerException if the specified element is null + */ + @Override + public boolean offer(E e) { + xfer(e, true, ASYNC, 0); + return true; + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never throw + * {@link IllegalStateException} or return {@code false}. + * + * @return {@code true} (as specified by {@link Collection#add}) + * @throws NullPointerException if the specified element is null + */ + @Override + public boolean add(E e) { + xfer(e, true, ASYNC, 0); + return true; + } + + /** + * Transfers the element to a waiting consumer immediately, if possible. + * + *

More precisely, transfers the specified element immediately + * if there exists a consumer already waiting to receive it (in + * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), + * otherwise returning {@code false} without enqueuing the element. + * + * @throws NullPointerException if the specified element is null + */ + public boolean tryTransfer(E e) { + return xfer(e, true, NOW, 0) == null; + } + + /** + * Transfers the element to a consumer, waiting if necessary to do so. + * + *

More precisely, transfers the specified element immediately + * if there exists a consumer already waiting to receive it (in + * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), + * else inserts the specified element at the tail of this queue + * and waits until the element is received by a consumer. + * + * @throws NullPointerException if the specified element is null + */ + public void transfer(E e) throws InterruptedException { + if (xfer(e, true, SYNC, 0) != null) { + Thread.interrupted(); // failure possible only due to interrupt + throw new InterruptedException(); + } + } + + /** + * Transfers the element to a consumer if it is possible to do so + * before the timeout elapses. + * + *

More precisely, transfers the specified element immediately + * if there exists a consumer already waiting to receive it (in + * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), + * else inserts the specified element at the tail of this queue + * and waits until the element is received by a consumer, + * returning {@code false} if the specified wait time elapses + * before the element can be transferred. + * + * @throws NullPointerException if the specified element is null + */ + public boolean tryTransfer(E e, long timeout, TimeUnit unit) + throws InterruptedException { + if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) { + return true; + } + if (!Thread.interrupted()) { + return false; + } + throw new InterruptedException(); + } + + @Override + public E take() throws InterruptedException { + E e = xfer(null, false, SYNC, 0); + if (e != null) { + return e; + } + Thread.interrupted(); + throw new InterruptedException(); + } + + @Override + public E poll(long timeout, TimeUnit unit) throws InterruptedException { + E e = xfer(null, false, TIMED, unit.toNanos(timeout)); + if (e != null || !Thread.interrupted()) { + return e; + } + throw new InterruptedException(); + } + + @Override + public E poll() { + return xfer(null, false, NOW, 0); + } + + /** + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + @Override + public int drainTo(Collection c) { + if (c == null) { + throw new NullPointerException(); + } + if (c == this) { + throw new IllegalArgumentException(); + } + int n = 0; + E e; + while ( (e = poll()) != null) { + c.add(e); + ++n; + } + return n; + } + + /** + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + @Override + public int drainTo(Collection c, int maxElements) { + if (c == null) { + throw new NullPointerException(); + } + if (c == this) { + throw new IllegalArgumentException(); + } + int n = 0; + E e; + while (n < maxElements && (e = poll()) != null) { + c.add(e); + ++n; + } + return n; + } + + /** + * Returns an iterator over the elements in this queue in proper + * sequence, from head to tail. + * + *

The returned iterator is a "weakly consistent" iterator that + * will never throw + * {@link ConcurrentModificationException ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed + * to) reflect any modifications subsequent to construction. + * + * @return an iterator over the elements in this queue in proper sequence + */ + @Override + public Iterator iterator() { + return new Itr(); + } + + @Override + public E peek() { + return firstDataItem(); + } + + /** + * Returns {@code true} if this queue contains no elements. + * + * @return {@code true} if this queue contains no elements + */ + @Override + public boolean isEmpty() { + for (Node p = head; p != null; p = succ(p)) { + if (!p.isMatched()) { + return !p.isData; + } + } + return true; + } + + public boolean hasWaitingConsumer() { + return firstOfMode(false) != null; + } + + /** + * Returns the number of elements in this queue. If this queue + * contains more than {@code Integer.MAX_VALUE} elements, returns + * {@code Integer.MAX_VALUE}. + * + *

Beware that, unlike in most collections, this method is + * NOT a constant-time operation. Because of the + * asynchronous nature of these queues, determining the current + * number of elements requires an O(n) traversal. + * + * @return the number of elements in this queue + */ + @Override + public int size() { + return countOfMode(true); + } + + public int getWaitingConsumerCount() { + return countOfMode(false); + } + + /** + * Removes a single instance of the specified element from this queue, + * if it is present. More formally, removes an element {@code e} such + * that {@code o.equals(e)}, if this queue contains one or more such + * elements. + * Returns {@code true} if this queue contained the specified element + * (or equivalently, if this queue changed as a result of the call). + * + * @param o element to be removed from this queue, if present + * @return {@code true} if this queue changed as a result of the call + */ + @Override + public boolean remove(Object o) { + return findAndRemove(o); + } + + /** + * Always returns {@code Integer.MAX_VALUE} because a + * {@code LinkedTransferQueue} is not capacity constrained. + * + * @return {@code Integer.MAX_VALUE} (as specified by + * {@link BlockingQueue#remainingCapacity()}) + */ + @Override + public int remainingCapacity() { + return Integer.MAX_VALUE; + } + + /** + * Saves the state to a stream (that is, serializes it). + * + * @serialData All of the elements (each an {@code E}) in + * the proper order, followed by a null + * @param s the stream + */ + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + for (E e : this) { + s.writeObject(e); + } + // Use trailing null as sentinel + s.writeObject(null); + } + + /** + * Reconstitutes the Queue instance from a stream (that is, + * deserializes it). + * + * @param s the stream + */ + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + for (;;) { + @SuppressWarnings("unchecked") E item = (E) s.readObject(); + if (item == null) { + break; + } else { + offer(item); + } + } + } + + @SuppressWarnings("rawtypes") + private static final AtomicReferenceFieldUpdater headUpdater = + AtomicFieldUpdaterUtil.newRefUpdater(LegacyLinkedTransferQueue.class, Node.class, "head"); + @SuppressWarnings("rawtypes") + private static final AtomicReferenceFieldUpdater tailUpdater = + AtomicFieldUpdaterUtil.newRefUpdater(LegacyLinkedTransferQueue.class, Node.class, "tail"); + @SuppressWarnings("rawtypes") + private static final AtomicIntegerFieldUpdater sweepVotesUpdater = + AtomicFieldUpdaterUtil.newIntUpdater(LegacyLinkedTransferQueue.class, "sweepVotes"); +} + diff --git a/src/main/java/org/jboss/netty/util/internal/LinkedTransferQueue.java b/src/main/java/org/jboss/netty/util/internal/LinkedTransferQueue.java index d7c821f061..9612592377 100644 --- a/src/main/java/org/jboss/netty/util/internal/LinkedTransferQueue.java +++ b/src/main/java/org/jboss/netty/util/internal/LinkedTransferQueue.java @@ -1,1363 +1,1364 @@ -/* - * Copyright 2009 Red Hat, Inc. - * - * Red Hat licenses this file to you under the Apache License, version 2.0 - * (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/licenses/publicdomain - */ - -package org.jboss.netty.util.internal; - -import java.util.AbstractQueue; -import java.util.Collection; -import java.util.ConcurrentModificationException; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import java.util.concurrent.locks.LockSupport; -/** - * An unbounded {@link BlockingQueue} based on linked nodes. - * This queue orders elements FIFO (first-in-first-out) with respect - * to any given producer. The head of the queue is that - * element that has been on the queue the longest time for some - * producer. The tail of the queue is that element that has - * been on the queue the shortest time for some producer. - * - *

Beware that, unlike in most collections, the {@code size} - * method is NOT a constant-time operation. Because of the - * asynchronous nature of these queues, determining the current number - * of elements requires a traversal of the elements. - * - *

This class and its iterator implement all of the - * optional methods of the {@link Collection} and {@link - * Iterator} interfaces. - * - *

Memory consistency effects: As with other concurrent - * collections, actions in a thread prior to placing an object into a - * {@code LinkedTransferQueue} - * happen-before - * actions subsequent to the access or removal of that element from - * the {@code LinkedTransferQueue} in another thread. - * - *

This class is a member of the - * - * Java Collections Framework. - * - * @author The Netty Project - * @author Doug Lea - * @author Trustin Lee - * - * @param the type of elements held in this collection - */ -public class LinkedTransferQueue extends AbstractQueue - implements BlockingQueue, java.io.Serializable { - private static final long serialVersionUID = -3223113410248163686L; - - /* - * *** Overview of Dual Queues with Slack *** - * - * Dual Queues, introduced by Scherer and Scott - * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are - * (linked) queues in which nodes may represent either data or - * requests. When a thread tries to enqueue a data node, but - * encounters a request node, it instead "matches" and removes it; - * and vice versa for enqueuing requests. Blocking Dual Queues - * arrange that threads enqueuing unmatched requests block until - * other threads provide the match. Dual Synchronous Queues (see - * Scherer, Lea, & Scott - * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) - * additionally arrange that threads enqueuing unmatched data also - * block. Dual Transfer Queues support all of these modes, as - * dictated by callers. - * - * A FIFO dual queue may be implemented using a variation of the - * Michael & Scott (M&S) lock-free queue algorithm - * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). - * It maintains two pointer fields, "head", pointing to a - * (matched) node that in turn points to the first actual - * (unmatched) queue node (or null if empty); and "tail" that - * points to the last node on the queue (or again null if - * empty). For example, here is a possible queue with four data - * elements: - * - * head tail - * | | - * v v - * M -> U -> U -> U -> U - * - * The M&S queue algorithm is known to be prone to scalability and - * overhead limitations when maintaining (via CAS) these head and - * tail pointers. This has led to the development of - * contention-reducing variants such as elimination arrays (see - * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and - * optimistic back pointers (see Ladan-Mozes & Shavit - * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). - * However, the nature of dual queues enables a simpler tactic for - * improving M&S-style implementations when dual-ness is needed. - * - * In a dual queue, each node must atomically maintain its match - * status. While there are other possible variants, we implement - * this here as: for a data-mode node, matching entails CASing an - * "item" field from a non-null data value to null upon match, and - * vice-versa for request nodes, CASing from null to a data - * value. (Note that the linearization properties of this style of - * queue are easy to verify -- elements are made available by - * linking, and unavailable by matching.) Compared to plain M&S - * queues, this property of dual queues requires one additional - * successful atomic operation per enq/deq pair. But it also - * enables lower cost variants of queue maintenance mechanics. (A - * variation of this idea applies even for non-dual queues that - * support deletion of interior elements, such as - * j.u.c.ConcurrentLinkedQueue.) - * - * Once a node is matched, its match status can never again - * change. We may thus arrange that the linked list of them - * contain a prefix of zero or more matched nodes, followed by a - * suffix of zero or more unmatched nodes. (Note that we allow - * both the prefix and suffix to be zero length, which in turn - * means that we do not use a dummy header.) If we were not - * concerned with either time or space efficiency, we could - * correctly perform enqueue and dequeue operations by traversing - * from a pointer to the initial node; CASing the item of the - * first unmatched node on match and CASing the next field of the - * trailing node on appends. (Plus some special-casing when - * initially empty). While this would be a terrible idea in - * itself, it does have the benefit of not requiring ANY atomic - * updates on head/tail fields. - * - * We introduce here an approach that lies between the extremes of - * never versus always updating queue (head and tail) pointers. - * This offers a tradeoff between sometimes requiring extra - * traversal steps to locate the first and/or last unmatched - * nodes, versus the reduced overhead and contention of fewer - * updates to queue pointers. For example, a possible snapshot of - * a queue is: - * - * head tail - * | | - * v v - * M -> M -> U -> U -> U -> U - * - * The best value for this "slack" (the targeted maximum distance - * between the value of "head" and the first unmatched node, and - * similarly for "tail") is an empirical matter. We have found - * that using very small constants in the range of 1-3 work best - * over a range of platforms. Larger values introduce increasing - * costs of cache misses and risks of long traversal chains, while - * smaller values increase CAS contention and overhead. - * - * Dual queues with slack differ from plain M&S dual queues by - * virtue of only sometimes updating head or tail pointers when - * matching, appending, or even traversing nodes; in order to - * maintain a targeted slack. The idea of "sometimes" may be - * operationalized in several ways. The simplest is to use a - * per-operation counter incremented on each traversal step, and - * to try (via CAS) to update the associated queue pointer - * whenever the count exceeds a threshold. Another, that requires - * more overhead, is to use random number generators to update - * with a given probability per traversal step. - * - * In any strategy along these lines, because CASes updating - * fields may fail, the actual slack may exceed targeted - * slack. However, they may be retried at any time to maintain - * targets. Even when using very small slack values, this - * approach works well for dual queues because it allows all - * operations up to the point of matching or appending an item - * (hence potentially allowing progress by another thread) to be - * read-only, thus not introducing any further contention. As - * described below, we implement this by performing slack - * maintenance retries only after these points. - * - * As an accompaniment to such techniques, traversal overhead can - * be further reduced without increasing contention of head - * pointer updates: Threads may sometimes shortcut the "next" link - * path from the current "head" node to be closer to the currently - * known first unmatched node, and similarly for tail. Again, this - * may be triggered with using thresholds or randomization. - * - * These ideas must be further extended to avoid unbounded amounts - * of costly-to-reclaim garbage caused by the sequential "next" - * links of nodes starting at old forgotten head nodes: As first - * described in detail by Boehm - * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC - * delays noticing that any arbitrarily old node has become - * garbage, all newer dead nodes will also be unreclaimed. - * (Similar issues arise in non-GC environments.) To cope with - * this in our implementation, upon CASing to advance the head - * pointer, we set the "next" link of the previous head to point - * only to itself; thus limiting the length of connected dead lists. - * (We also take similar care to wipe out possibly garbage - * retaining values held in other Node fields.) However, doing so - * adds some further complexity to traversal: If any "next" - * pointer links to itself, it indicates that the current thread - * has lagged behind a head-update, and so the traversal must - * continue from the "head". Traversals trying to find the - * current tail starting from "tail" may also encounter - * self-links, in which case they also continue at "head". - * - * It is tempting in slack-based scheme to not even use CAS for - * updates (similarly to Ladan-Mozes & Shavit). However, this - * cannot be done for head updates under the above link-forgetting - * mechanics because an update may leave head at a detached node. - * And while direct writes are possible for tail updates, they - * increase the risk of long retraversals, and hence long garbage - * chains, which can be much more costly than is worthwhile - * considering that the cost difference of performing a CAS vs - * write is smaller when they are not triggered on each operation - * (especially considering that writes and CASes equally require - * additional GC bookkeeping ("write barriers") that are sometimes - * more costly than the writes themselves because of contention). - * - * *** Overview of implementation *** - * - * We use a threshold-based approach to updates, with a slack - * threshold of two -- that is, we update head/tail when the - * current pointer appears to be two or more steps away from the - * first/last node. The slack value is hard-wired: a path greater - * than one is naturally implemented by checking equality of - * traversal pointers except when the list has only one element, - * in which case we keep slack threshold at one. Avoiding tracking - * explicit counts across method calls slightly simplifies an - * already-messy implementation. Using randomization would - * probably work better if there were a low-quality dirt-cheap - * per-thread one available, but even ThreadLocalRandom is too - * heavy for these purposes. - * - * With such a small slack threshold value, it is not worthwhile - * to augment this with path short-circuiting (i.e., unsplicing - * interior nodes) except in the case of cancellation/removal (see - * below). - * - * We allow both the head and tail fields to be null before any - * nodes are enqueued; initializing upon first append. This - * simplifies some other logic, as well as providing more - * efficient explicit control paths instead of letting JVMs insert - * implicit NullPointerExceptions when they are null. While not - * currently fully implemented, we also leave open the possibility - * of re-nulling these fields when empty (which is complicated to - * arrange, for little benefit.) - * - * All enqueue/dequeue operations are handled by the single method - * "xfer" with parameters indicating whether to act as some form - * of offer, put, poll, take, or transfer (each possibly with - * timeout). The relative complexity of using one monolithic - * method outweighs the code bulk and maintenance problems of - * using separate methods for each case. - * - * Operation consists of up to three phases. The first is - * implemented within method xfer, the second in tryAppend, and - * the third in method awaitMatch. - * - * 1. Try to match an existing node - * - * Starting at head, skip already-matched nodes until finding - * an unmatched node of opposite mode, if one exists, in which - * case matching it and returning, also if necessary updating - * head to one past the matched node (or the node itself if the - * list has no other unmatched nodes). If the CAS misses, then - * a loop retries advancing head by two steps until either - * success or the slack is at most two. By requiring that each - * attempt advances head by two (if applicable), we ensure that - * the slack does not grow without bound. Traversals also check - * if the initial head is now off-list, in which case they - * start at the new head. - * - * If no candidates are found and the call was untimed - * poll/offer, (argument "how" is NOW) return. - * - * 2. Try to append a new node (method tryAppend) - * - * Starting at current tail pointer, find the actual last node - * and try to append a new node (or if head was null, establish - * the first node). Nodes can be appended only if their - * predecessors are either already matched or are of the same - * mode. If we detect otherwise, then a new node with opposite - * mode must have been appended during traversal, so we must - * restart at phase 1. The traversal and update steps are - * otherwise similar to phase 1: Retrying upon CAS misses and - * checking for staleness. In particular, if a self-link is - * encountered, then we can safely jump to a node on the list - * by continuing the traversal at current head. - * - * On successful append, if the call was ASYNC, return. - * - * 3. Await match or cancellation (method awaitMatch) - * - * Wait for another thread to match node; instead cancelling if - * the current thread was interrupted or the wait timed out. On - * multiprocessors, we use front-of-queue spinning: If a node - * appears to be the first unmatched node in the queue, it - * spins a bit before blocking. In either case, before blocking - * it tries to unsplice any nodes between the current "head" - * and the first unmatched node. - * - * Front-of-queue spinning vastly improves performance of - * heavily contended queues. And so long as it is relatively - * brief and "quiet", spinning does not much impact performance - * of less-contended queues. During spins threads check their - * interrupt status and generate a thread-local random number - * to decide to occasionally perform a Thread.yield. While - * yield has underdefined specs, we assume that might it help, - * and will not hurt in limiting impact of spinning on busy - * systems. We also use smaller (1/2) spins for nodes that are - * not known to be front but whose predecessors have not - * blocked -- these "chained" spins avoid artifacts of - * front-of-queue rules which otherwise lead to alternating - * nodes spinning vs blocking. Further, front threads that - * represent phase changes (from data to request node or vice - * versa) compared to their predecessors receive additional - * chained spins, reflecting longer paths typically required to - * unblock threads during phase changes. - * - * - * ** Unlinking removed interior nodes ** - * - * In addition to minimizing garbage retention via self-linking - * described above, we also unlink removed interior nodes. These - * may arise due to timed out or interrupted waits, or calls to - * remove(x) or Iterator.remove. Normally, given a node that was - * at one time known to be the predecessor of some node s that is - * to be removed, we can unsplice s by CASing the next field of - * its predecessor if it still points to s (otherwise s must - * already have been removed or is now offlist). But there are two - * situations in which we cannot guarantee to make node s - * unreachable in this way: (1) If s is the trailing node of list - * (i.e., with null next), then it is pinned as the target node - * for appends, so can only be removed later after other nodes are - * appended. (2) We cannot necessarily unlink s given a - * predecessor node that is matched (including the case of being - * cancelled): the predecessor may already be unspliced, in which - * case some previous reachable node may still point to s. - * (For further explanation see Herlihy & Shavit "The Art of - * Multiprocessor Programming" chapter 9). Although, in both - * cases, we can rule out the need for further action if either s - * or its predecessor are (or can be made to be) at, or fall off - * from, the head of list. - * - * Without taking these into account, it would be possible for an - * unbounded number of supposedly removed nodes to remain - * reachable. Situations leading to such buildup are uncommon but - * can occur in practice; for example when a series of short timed - * calls to poll repeatedly time out but never otherwise fall off - * the list because of an untimed call to take at the front of the - * queue. - * - * When these cases arise, rather than always retraversing the - * entire list to find an actual predecessor to unlink (which - * won't help for case (1) anyway), we record a conservative - * estimate of possible unsplice failures (in "sweepVotes"). - * We trigger a full sweep when the estimate exceeds a threshold - * ("SWEEP_THRESHOLD") indicating the maximum number of estimated - * removal failures to tolerate before sweeping through, unlinking - * cancelled nodes that were not unlinked upon initial removal. - * We perform sweeps by the thread hitting threshold (rather than - * background threads or by spreading work to other threads) - * because in the main contexts in which removal occurs, the - * caller is already timed-out, cancelled, or performing a - * potentially O(n) operation (e.g. remove(x)), none of which are - * time-critical enough to warrant the overhead that alternatives - * would impose on other threads. - * - * Because the sweepVotes estimate is conservative, and because - * nodes become unlinked "naturally" as they fall off the head of - * the queue, and because we allow votes to accumulate even while - * sweeps are in progress, there are typically significantly fewer - * such nodes than estimated. Choice of a threshold value - * balances the likelihood of wasted effort and contention, versus - * providing a worst-case bound on retention of interior nodes in - * quiescent queues. The value defined below was chosen - * empirically to balance these under various timeout scenarios. - * - * Note that we cannot self-link unlinked interior nodes during - * sweeps. However, the associated garbage chains terminate when - * some successor ultimately falls off the head of the list and is - * self-linked. - */ - - /** True if on multiprocessor */ - private static final boolean MP = - Runtime.getRuntime().availableProcessors() > 1; - - /** - * The number of times to spin (with randomly interspersed calls - * to Thread.yield) on multiprocessor before blocking when a node - * is apparently the first waiter in the queue. See above for - * explanation. Must be a power of two. The value is empirically - * derived -- it works pretty well across a variety of processors, - * numbers of CPUs, and OSes. - */ - private static final int FRONT_SPINS = 1 << 7; - - /** - * The number of times to spin before blocking when a node is - * preceded by another node that is apparently spinning. Also - * serves as an increment to FRONT_SPINS on phase changes, and as - * base average frequency for yielding during spins. Must be a - * power of two. - */ - private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; - - /** - * The maximum number of estimated removal failures (sweepVotes) - * to tolerate before sweeping through the queue unlinking - * cancelled nodes that were not unlinked upon initial - * removal. See above for explanation. The value must be at least - * two to avoid useless sweeps when removing trailing nodes. - */ - static final int SWEEP_THRESHOLD = 32; - - /** - * Queue nodes. Uses Object, not E, for items to allow forgetting - * them after use. Relies heavily on Unsafe mechanics to minimize - * unnecessary ordering constraints: Writes that are intrinsically - * ordered wrt other accesses or CASes use simple relaxed forms. - */ - static final class Node { - final boolean isData; // false if this is a request node - volatile Object item; // initially non-null if isData; CASed to match - volatile Node next; - volatile Thread waiter; // null until waiting - - // CAS methods for fields - boolean casNext(Node cmp, Node val) { - if (AtomicFieldUpdaterUtil.isAvailable()) { - return nextUpdater.compareAndSet(this, cmp, val); - } else { - synchronized (this) { - if (next == cmp) { - next = val; - return true; - } else { - return false; - } - } - } - } - - boolean casItem(Object cmp, Object val) { - // assert cmp == null || cmp.getClass() != Node.class; - if (AtomicFieldUpdaterUtil.isAvailable()) { - return itemUpdater.compareAndSet(this, cmp, val); - } else { - synchronized (this) { - if (item == cmp) { - item = val; - return true; - } else { - return false; - } - } - } - } - - /** - * Constructs a new node. Uses relaxed write because item can - * only be seen after publication via casNext. - */ - Node(Object item, boolean isData) { - this.item = item; - this.isData = isData; - } - - /** - * Links node to itself to avoid garbage retention. Called - * only after CASing head field, so uses relaxed write. - */ - void forgetNext() { - this.next = this; - } - - /** - * Sets item to self and waiter to null, to avoid garbage - * retention after matching or cancelling. Uses relaxed writes - * bacause order is already constrained in the only calling - * contexts: item is forgotten only after volatile/atomic - * mechanics that extract items. Similarly, clearing waiter - * follows either CAS or return from park (if ever parked; - * else we don't care). - */ - void forgetContents() { - this.item = this; - this.waiter = null; - } - - /** - * Returns true if this node has been matched, including the - * case of artificial matches due to cancellation. - */ - boolean isMatched() { - Object x = item; - return x == this || x == null == isData; - } - - /** - * Returns true if this is an unmatched request node. - */ - boolean isUnmatchedRequest() { - return !isData && item == null; - } - - /** - * Returns true if a node with the given mode cannot be - * appended to this node because this node is unmatched and - * has opposite data mode. - */ - boolean cannotPrecede(boolean haveData) { - boolean d = isData; - Object x; - return d != haveData && (x = item) != this && x != null == d; - } - - /** - * Tries to artificially match a data node -- used by remove. - */ - boolean tryMatchData() { - // assert isData; - Object x = item; - if (x != null && x != this && casItem(x, null)) { - LockSupport.unpark(waiter); - return true; - } - return false; - } - - private static final AtomicReferenceFieldUpdater nextUpdater = - AtomicFieldUpdaterUtil.newRefUpdater(Node.class, Node.class, "next"); - private static final AtomicReferenceFieldUpdater itemUpdater = - AtomicFieldUpdaterUtil.newRefUpdater(Node.class, Object.class, "item"); - - } - - /** head of the queue; null until first enqueue */ - transient volatile Node head; - - /** tail of the queue; null until first append */ - transient volatile Node tail; - - /** The number of apparent failures to unsplice removed nodes */ - transient volatile int sweepVotes; - - // CAS methods for fields - private boolean casTail(Node cmp, Node val) { - if (AtomicFieldUpdaterUtil.isAvailable()) { - return tailUpdater.compareAndSet(this, cmp, val); - } else { - synchronized (this) { - if (tail == cmp) { - tail = val; - return true; - } else { - return false; - } - } - } - } - - private boolean casHead(Node cmp, Node val) { - if (AtomicFieldUpdaterUtil.isAvailable()) { - return headUpdater.compareAndSet(this, cmp, val); - } else { - synchronized (this) { - if (head == cmp) { - head = val; - return true; - } else { - return false; - } - } - } - } - - private boolean casSweepVotes(int cmp, int val) { - if (AtomicFieldUpdaterUtil.isAvailable()) { - return sweepVotesUpdater.compareAndSet(this, cmp, val); - } else { - synchronized (this) { - if (sweepVotes == cmp) { - sweepVotes = val; - return true; - } else { - return false; - } - } - } - } - - /* - * Possible values for "how" argument in xfer method. - */ - private static final int NOW = 0; // for untimed poll, tryTransfer - private static final int ASYNC = 1; // for offer, put, add - private static final int SYNC = 2; // for transfer, take - private static final int TIMED = 3; // for timed poll, tryTransfer - - @SuppressWarnings("unchecked") - static E cast(Object item) { - // assert item == null || item.getClass() != Node.class; - return (E) item; - } - - /** - * Implements all queuing methods. See above for explanation. - * - * @param e the item or null for take - * @param haveData true if this is a put, else a take - * @param how NOW, ASYNC, SYNC, or TIMED - * @param nanos timeout in nanosecs, used only if mode is TIMED - * @return an item if matched, else e - * @throws NullPointerException if haveData mode but e is null - */ - private E xfer(E e, boolean haveData, int how, long nanos) { - if (haveData && e == null) { - throw new NullPointerException(); - } - Node s = null; // the node to append, if needed - - retry: for (;;) { // restart on append race - - for (Node h = head, p = h; p != null;) { // find & match first node - boolean isData = p.isData; - Object item = p.item; - if (item != p && item != null == isData) { // unmatched - if (isData == haveData) { // can't match - break; - } - if (p.casItem(item, e)) { // match - for (Node q = p; q != h;) { - Node n = q.next; // update by 2 unless singleton - if (head == h && casHead(h, n == null? q : n)) { - h.forgetNext(); - break; - } // advance and retry - if ((h = head) == null || - (q = h.next) == null || !q.isMatched()) { - break; // unless slack < 2 - } - } - LockSupport.unpark(p.waiter); - return LinkedTransferQueue.cast(item); - } - } - Node n = p.next; - p = p != n ? n : (h = head); // Use head if p offlist - } - - if (how != NOW) { // No matches available - if (s == null) { - s = new Node(e, haveData); - } - Node pred = tryAppend(s, haveData); - if (pred == null) { - continue retry; // lost race vs opposite mode - } - if (how != ASYNC) { - return awaitMatch(s, pred, e, (how == TIMED), nanos); - } - } - return e; // not waiting - } - } - - /** - * Tries to append node s as tail. - * - * @param s the node to append - * @param haveData true if appending in data mode - * @return null on failure due to losing race with append in - * different mode, else s's predecessor, or s itself if no - * predecessor - */ - private Node tryAppend(Node s, boolean haveData) { - for (Node t = tail, p = t;;) { // move p to last node and append - Node n, u; // temps for reads of next & tail - if (p == null && (p = head) == null) { - if (casHead(null, s)) { - return s; // initialize - } - } - else if (p.cannotPrecede(haveData)) { - return null; // lost race vs opposite mode - } else if ((n = p.next) != null) { // not last; keep traversing - p = p != t && t != (u = tail) ? (t = u) : // stale tail - p != n ? n : null; // restart if off list - } else if (!p.casNext(null, s)) { - p = p.next; // re-read on CAS failure - } else { - if (p != t) { // update if slack now >= 2 - while ((tail != t || !casTail(t, s)) && - (t = tail) != null && - (s = t.next) != null && // advance and retry - (s = s.next) != null && s != t) { - continue; - } - } - return p; - } - } - } - - /** - * Spins/yields/blocks until node s is matched or caller gives up. - * - * @param s the waiting node - * @param pred the predecessor of s, or s itself if it has no - * predecessor, or null if unknown (the null case does not occur - * in any current calls but may in possible future extensions) - * @param e the comparison value for checking match - * @param timed if true, wait only until timeout elapses - * @param nanos timeout in nanosecs, used only if timed is true - * @return matched item, or e if unmatched on interrupt or timeout - */ - private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { - long lastTime = timed ? System.nanoTime() : 0L; - Thread w = Thread.currentThread(); - int spins = -1; // initialized after first item and cancel checks - ThreadLocalRandom randomYields = null; // bound if needed - - for (;;) { - Object item = s.item; - if (item != e) { // matched - // assert item != s; - s.forgetContents(); // avoid garbage - return LinkedTransferQueue.cast(item); - } - if ((w.isInterrupted() || timed && nanos <= 0) && - s.casItem(e, s)) { // cancel - unsplice(pred, s); - return e; - } - - if (spins < 0) { // establish spins at/near front - if ((spins = spinsFor(pred, s.isData)) > 0) { - randomYields = ThreadLocalRandom.current(); - } - } - else if (spins > 0) { // spin - --spins; - if (randomYields.nextInt(CHAINED_SPINS) == 0) { - Thread.yield(); // occasionally yield - } - } - else if (s.waiter == null) { - s.waiter = w; // request unpark then recheck - } - else if (timed) { - long now = System.nanoTime(); - if ((nanos -= now - lastTime) > 0) { - LockSupport.parkNanos(nanos); - } - lastTime = now; - } - else { - LockSupport.park(); - } - } - } - - /** - * Returns spin/yield value for a node with given predecessor and - * data mode. See above for explanation. - */ - private static int spinsFor(Node pred, boolean haveData) { - if (MP && pred != null) { - if (pred.isData != haveData) { // phase change - return FRONT_SPINS + CHAINED_SPINS; - } - if (pred.isMatched()) { // probably at front - return FRONT_SPINS; - } - if (pred.waiter == null) { // pred apparently spinning - return CHAINED_SPINS; - } - } - return 0; - } - - /* -------------- Traversal methods -------------- */ - - /** - * Returns the successor of p, or the head node if p.next has been - * linked to self, which will only be true if traversing with a - * stale pointer that is now off the list. - */ - final Node succ(Node p) { - Node next = p.next; - return p == next ? head : next; - } - - /** - * Returns the first unmatched node of the given mode, or null if - * none. Used by methods isEmpty, hasWaitingConsumer. - */ - private Node firstOfMode(boolean isData) { - for (Node p = head; p != null; p = succ(p)) { - if (!p.isMatched()) { - return p.isData == isData ? p : null; - } - } - return null; - } - - /** - * Returns the item in the first unmatched node with isData; or - * null if none. Used by peek. - */ - private E firstDataItem() { - for (Node p = head; p != null; p = succ(p)) { - Object item = p.item; - if (p.isData) { - if (item != null && item != p) { - return LinkedTransferQueue.cast(item); - } - } - else if (item == null) { - return null; - } - } - return null; - } - - /** - * Traverses and counts unmatched nodes of the given mode. - * Used by methods size and getWaitingConsumerCount. - */ - private int countOfMode(boolean data) { - int count = 0; - for (Node p = head; p != null; ) { - if (!p.isMatched()) { - if (p.isData != data) { - return 0; - } - if (++count == Integer.MAX_VALUE) { // saturated - break; - } - } - Node n = p.next; - if (n != p) { - p = n; - } else { - count = 0; - p = head; - } - } - return count; - } - - final class Itr implements Iterator { - private Node nextNode; // next node to return item for - private E nextItem; // the corresponding item - private Node lastRet; // last returned node, to support remove - private Node lastPred; // predecessor to unlink lastRet - - /** - * Moves to next node after prev, or first node if prev null. - */ - private void advance(Node prev) { - lastPred = lastRet; - lastRet = prev; - for (Node p = prev == null ? head : succ(prev); - p != null; p = succ(p)) { - Object item = p.item; - if (p.isData) { - if (item != null && item != p) { - nextItem = LinkedTransferQueue.cast(item); - nextNode = p; - return; - } - } - else if (item == null) { - break; - } - } - nextNode = null; - } - - Itr() { - advance(null); - } - - @Override - public boolean hasNext() { - return nextNode != null; - } - - @Override - public E next() { - Node p = nextNode; - if (p == null) { - throw new NoSuchElementException(); - } - E e = nextItem; - advance(p); - return e; - } - - @Override - public void remove() { - Node p = lastRet; - if (p == null) { - throw new IllegalStateException(); - } - if (p.tryMatchData()) { - unsplice(lastPred, p); - } - } - } - - /* -------------- Removal methods -------------- */ - - /** - * Unsplices (now or later) the given deleted/cancelled node with - * the given predecessor. - * - * @param pred a node that was at one time known to be the - * predecessor of s, or null or s itself if s is/was at head - * @param s the node to be unspliced - */ - final void unsplice(Node pred, Node s) { - s.forgetContents(); // forget unneeded fields - /* - * See above for rationale. Briefly: if pred still points to - * s, try to unlink s. If s cannot be unlinked, because it is - * trailing node or pred might be unlinked, and neither pred - * nor s are head or offlist, add to sweepVotes, and if enough - * votes have accumulated, sweep. - */ - if (pred != null && pred != s && pred.next == s) { - Node n = s.next; - if (n == null || - n != s && pred.casNext(s, n) && pred.isMatched()) { - for (;;) { // check if at, or could be, head - Node h = head; - if (h == pred || h == s || h == null) { - return; // at head or list empty - } - if (!h.isMatched()) { - break; - } - Node hn = h.next; - if (hn == null) { - return; // now empty - } - if (hn != h && casHead(h, hn)) { - h.forgetNext(); // advance head - } - } - if (pred.next != pred && s.next != s) { // recheck if offlist - for (;;) { // sweep now if enough votes - int v = sweepVotes; - if (v < SWEEP_THRESHOLD) { - if (casSweepVotes(v, v + 1)) { - break; - } - } - else if (casSweepVotes(v, 0)) { - sweep(); - break; - } - } - } - } - } - } - - /** - * Unlinks matched (typically cancelled) nodes encountered in a - * traversal from head. - */ - private void sweep() { - for (Node p = head, s, n; p != null && (s = p.next) != null; ) { - if (!s.isMatched()) { - // Unmatched nodes are never self-linked - p = s; - } else if ((n = s.next) == null) { // trailing node is pinned - break; - } else if (s == n) { // stale - // No need to also check for p == s, since that implies s == n - p = head; - } else { - p.casNext(s, n); - } - } - } - - /** - * Main implementation of remove(Object) - */ - private boolean findAndRemove(Object e) { - if (e != null) { - for (Node pred = null, p = head; p != null; ) { - Object item = p.item; - if (p.isData) { - if (item != null && item != p && e.equals(item) && - p.tryMatchData()) { - unsplice(pred, p); - return true; - } - } - else if (item == null) { - break; - } - pred = p; - if ((p = p.next) == pred) { // stale - pred = null; - p = head; - } - } - } - return false; - } - - - /** - * Creates an initially empty {@code LinkedTransferQueue}. - */ - public LinkedTransferQueue() { - } - - /** - * Creates a {@code LinkedTransferQueue} - * initially containing the elements of the given collection, - * added in traversal order of the collection's iterator. - * - * @param c the collection of elements to initially contain - * @throws NullPointerException if the specified collection or any - * of its elements are null - */ - public LinkedTransferQueue(Collection c) { - this(); - addAll(c); - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never block. - * - * @throws NullPointerException if the specified element is null - */ - @Override - public void put(E e) { - xfer(e, true, ASYNC, 0); - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never block or - * return {@code false}. - * - * @return {@code true} (as specified by - * {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer}) - * @throws NullPointerException if the specified element is null - */ - @Override - public boolean offer(E e, long timeout, TimeUnit unit) { - xfer(e, true, ASYNC, 0); - return true; - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never return {@code false}. - * - * @return {@code true} (as specified by - * {@link BlockingQueue#offer(Object) BlockingQueue.offer}) - * @throws NullPointerException if the specified element is null - */ - @Override - public boolean offer(E e) { - xfer(e, true, ASYNC, 0); - return true; - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never throw - * {@link IllegalStateException} or return {@code false}. - * - * @return {@code true} (as specified by {@link Collection#add}) - * @throws NullPointerException if the specified element is null - */ - @Override - public boolean add(E e) { - xfer(e, true, ASYNC, 0); - return true; - } - - /** - * Transfers the element to a waiting consumer immediately, if possible. - * - *

More precisely, transfers the specified element immediately - * if there exists a consumer already waiting to receive it (in - * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), - * otherwise returning {@code false} without enqueuing the element. - * - * @throws NullPointerException if the specified element is null - */ - public boolean tryTransfer(E e) { - return xfer(e, true, NOW, 0) == null; - } - - /** - * Transfers the element to a consumer, waiting if necessary to do so. - * - *

More precisely, transfers the specified element immediately - * if there exists a consumer already waiting to receive it (in - * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), - * else inserts the specified element at the tail of this queue - * and waits until the element is received by a consumer. - * - * @throws NullPointerException if the specified element is null - */ - public void transfer(E e) throws InterruptedException { - if (xfer(e, true, SYNC, 0) != null) { - Thread.interrupted(); // failure possible only due to interrupt - throw new InterruptedException(); - } - } - - /** - * Transfers the element to a consumer if it is possible to do so - * before the timeout elapses. - * - *

More precisely, transfers the specified element immediately - * if there exists a consumer already waiting to receive it (in - * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), - * else inserts the specified element at the tail of this queue - * and waits until the element is received by a consumer, - * returning {@code false} if the specified wait time elapses - * before the element can be transferred. - * - * @throws NullPointerException if the specified element is null - */ - public boolean tryTransfer(E e, long timeout, TimeUnit unit) - throws InterruptedException { - if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) { - return true; - } - if (!Thread.interrupted()) { - return false; - } - throw new InterruptedException(); - } - - @Override - public E take() throws InterruptedException { - E e = xfer(null, false, SYNC, 0); - if (e != null) { - return e; - } - Thread.interrupted(); - throw new InterruptedException(); - } - - @Override - public E poll(long timeout, TimeUnit unit) throws InterruptedException { - E e = xfer(null, false, TIMED, unit.toNanos(timeout)); - if (e != null || !Thread.interrupted()) { - return e; - } - throw new InterruptedException(); - } - - @Override - public E poll() { - return xfer(null, false, NOW, 0); - } - - /** - * @throws NullPointerException {@inheritDoc} - * @throws IllegalArgumentException {@inheritDoc} - */ - @Override - public int drainTo(Collection c) { - if (c == null) { - throw new NullPointerException(); - } - if (c == this) { - throw new IllegalArgumentException(); - } - int n = 0; - E e; - while ( (e = poll()) != null) { - c.add(e); - ++n; - } - return n; - } - - /** - * @throws NullPointerException {@inheritDoc} - * @throws IllegalArgumentException {@inheritDoc} - */ - @Override - public int drainTo(Collection c, int maxElements) { - if (c == null) { - throw new NullPointerException(); - } - if (c == this) { - throw new IllegalArgumentException(); - } - int n = 0; - E e; - while (n < maxElements && (e = poll()) != null) { - c.add(e); - ++n; - } - return n; - } - - /** - * Returns an iterator over the elements in this queue in proper - * sequence, from head to tail. - * - *

The returned iterator is a "weakly consistent" iterator that - * will never throw - * {@link ConcurrentModificationException ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed - * to) reflect any modifications subsequent to construction. - * - * @return an iterator over the elements in this queue in proper sequence - */ - @Override - public Iterator iterator() { - return new Itr(); - } - - @Override - public E peek() { - return firstDataItem(); - } - - /** - * Returns {@code true} if this queue contains no elements. - * - * @return {@code true} if this queue contains no elements - */ - @Override - public boolean isEmpty() { - for (Node p = head; p != null; p = succ(p)) { - if (!p.isMatched()) { - return !p.isData; - } - } - return true; - } - - public boolean hasWaitingConsumer() { - return firstOfMode(false) != null; - } - - /** - * Returns the number of elements in this queue. If this queue - * contains more than {@code Integer.MAX_VALUE} elements, returns - * {@code Integer.MAX_VALUE}. - * - *

Beware that, unlike in most collections, this method is - * NOT a constant-time operation. Because of the - * asynchronous nature of these queues, determining the current - * number of elements requires an O(n) traversal. - * - * @return the number of elements in this queue - */ - @Override - public int size() { - return countOfMode(true); - } - - public int getWaitingConsumerCount() { - return countOfMode(false); - } - - /** - * Removes a single instance of the specified element from this queue, - * if it is present. More formally, removes an element {@code e} such - * that {@code o.equals(e)}, if this queue contains one or more such - * elements. - * Returns {@code true} if this queue contained the specified element - * (or equivalently, if this queue changed as a result of the call). - * - * @param o element to be removed from this queue, if present - * @return {@code true} if this queue changed as a result of the call - */ - @Override - public boolean remove(Object o) { - return findAndRemove(o); - } - - /** - * Always returns {@code Integer.MAX_VALUE} because a - * {@code LinkedTransferQueue} is not capacity constrained. - * - * @return {@code Integer.MAX_VALUE} (as specified by - * {@link BlockingQueue#remainingCapacity()}) - */ - @Override - public int remainingCapacity() { - return Integer.MAX_VALUE; - } - - /** - * Saves the state to a stream (that is, serializes it). - * - * @serialData All of the elements (each an {@code E}) in - * the proper order, followed by a null - * @param s the stream - */ - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - s.defaultWriteObject(); - for (E e : this) { - s.writeObject(e); - } - // Use trailing null as sentinel - s.writeObject(null); - } - - /** - * Reconstitutes the Queue instance from a stream (that is, - * deserializes it). - * - * @param s the stream - */ - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - s.defaultReadObject(); - for (;;) { - @SuppressWarnings("unchecked") E item = (E) s.readObject(); - if (item == null) { - break; - } else { - offer(item); - } - } - } - - @SuppressWarnings("rawtypes") - private static final AtomicReferenceFieldUpdater headUpdater = - AtomicFieldUpdaterUtil.newRefUpdater(LinkedTransferQueue.class, Node.class, "head"); - @SuppressWarnings("rawtypes") - private static final AtomicReferenceFieldUpdater tailUpdater = - AtomicFieldUpdaterUtil.newRefUpdater(LinkedTransferQueue.class, Node.class, "tail"); - @SuppressWarnings("rawtypes") - private static final AtomicIntegerFieldUpdater sweepVotesUpdater = - AtomicFieldUpdaterUtil.newIntUpdater(LinkedTransferQueue.class, "sweepVotes"); -} - +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package org.jboss.netty.util.internal; + +import java.util.AbstractQueue; +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Queue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.LockSupport; + +/** + * This class is a copied from URL revision 1.91 + *
+ * The only difference is that it replace {@link BlockingQueue} and any reference to the TransferQueue interface was removed + *
+ * + * + * Please use {@link QueueFactory} to create a Queue as it will use the "optimal" implementation depending on the JVM + * + *
+ *
+ * + * An unbounded {@link BlockingQueue} based on linked nodes. + * This queue orders elements FIFO (first-in-first-out) with respect + * to any given producer. The head of the queue is that + * element that has been on the queue the longest time for some + * producer. The tail of the queue is that element that has + * been on the queue the shortest time for some producer. + * + *

Beware that, unlike in most collections, the {@code size} method + * is NOT a constant-time operation. Because of the + * asynchronous nature of these queues, determining the current number + * of elements requires a traversal of the elements, and so may report + * inaccurate results if this collection is modified during traversal. + * Additionally, the bulk operations {@code addAll}, + * {@code removeAll}, {@code retainAll}, {@code containsAll}, + * {@code equals}, and {@code toArray} are not guaranteed + * to be performed atomically. For example, an iterator operating + * concurrently with an {@code addAll} operation might view only some + * of the added elements. + * + *

This class and its iterator implement all of the + * optional methods of the {@link Collection} and {@link + * Iterator} interfaces. + * + *

Memory consistency effects: As with other concurrent + * collections, actions in a thread prior to placing an object into a + * {@code LinkedTransferQueue} + * happen-before + * actions subsequent to the access or removal of that element from + * the {@code LinkedTransferQueue} in another thread. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.7 + * @author Doug Lea + * @param the type of elements held in this collection + */ +public class LinkedTransferQueue extends AbstractQueue + implements BlockingQueue, java.io.Serializable { + private static final long serialVersionUID = -3223113410248163686L; + + /* + * *** Overview of Dual Queues with Slack *** + * + * Dual Queues, introduced by Scherer and Scott + * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are + * (linked) queues in which nodes may represent either data or + * requests. When a thread tries to enqueue a data node, but + * encounters a request node, it instead "matches" and removes it; + * and vice versa for enqueuing requests. Blocking Dual Queues + * arrange that threads enqueuing unmatched requests block until + * other threads provide the match. Dual Synchronous Queues (see + * Scherer, Lea, & Scott + * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) + * additionally arrange that threads enqueuing unmatched data also + * block. Dual Transfer Queues support all of these modes, as + * dictated by callers. + * + * A FIFO dual queue may be implemented using a variation of the + * Michael & Scott (M&S) lock-free queue algorithm + * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). + * It maintains two pointer fields, "head", pointing to a + * (matched) node that in turn points to the first actual + * (unmatched) queue node (or null if empty); and "tail" that + * points to the last node on the queue (or again null if + * empty). For example, here is a possible queue with four data + * elements: + * + * head tail + * | | + * v v + * M -> U -> U -> U -> U + * + * The M&S queue algorithm is known to be prone to scalability and + * overhead limitations when maintaining (via CAS) these head and + * tail pointers. This has led to the development of + * contention-reducing variants such as elimination arrays (see + * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and + * optimistic back pointers (see Ladan-Mozes & Shavit + * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). + * However, the nature of dual queues enables a simpler tactic for + * improving M&S-style implementations when dual-ness is needed. + * + * In a dual queue, each node must atomically maintain its match + * status. While there are other possible variants, we implement + * this here as: for a data-mode node, matching entails CASing an + * "item" field from a non-null data value to null upon match, and + * vice-versa for request nodes, CASing from null to a data + * value. (Note that the linearization properties of this style of + * queue are easy to verify -- elements are made available by + * linking, and unavailable by matching.) Compared to plain M&S + * queues, this property of dual queues requires one additional + * successful atomic operation per enq/deq pair. But it also + * enables lower cost variants of queue maintenance mechanics. (A + * variation of this idea applies even for non-dual queues that + * support deletion of interior elements, such as + * j.u.c.ConcurrentLinkedQueue.) + * + * Once a node is matched, its match status can never again + * change. We may thus arrange that the linked list of them + * contain a prefix of zero or more matched nodes, followed by a + * suffix of zero or more unmatched nodes. (Note that we allow + * both the prefix and suffix to be zero length, which in turn + * means that we do not use a dummy header.) If we were not + * concerned with either time or space efficiency, we could + * correctly perform enqueue and dequeue operations by traversing + * from a pointer to the initial node; CASing the item of the + * first unmatched node on match and CASing the next field of the + * trailing node on appends. (Plus some special-casing when + * initially empty). While this would be a terrible idea in + * itself, it does have the benefit of not requiring ANY atomic + * updates on head/tail fields. + * + * We introduce here an approach that lies between the extremes of + * never versus always updating queue (head and tail) pointers. + * This offers a tradeoff between sometimes requiring extra + * traversal steps to locate the first and/or last unmatched + * nodes, versus the reduced overhead and contention of fewer + * updates to queue pointers. For example, a possible snapshot of + * a queue is: + * + * head tail + * | | + * v v + * M -> M -> U -> U -> U -> U + * + * The best value for this "slack" (the targeted maximum distance + * between the value of "head" and the first unmatched node, and + * similarly for "tail") is an empirical matter. We have found + * that using very small constants in the range of 1-3 work best + * over a range of platforms. Larger values introduce increasing + * costs of cache misses and risks of long traversal chains, while + * smaller values increase CAS contention and overhead. + * + * Dual queues with slack differ from plain M&S dual queues by + * virtue of only sometimes updating head or tail pointers when + * matching, appending, or even traversing nodes; in order to + * maintain a targeted slack. The idea of "sometimes" may be + * operationalized in several ways. The simplest is to use a + * per-operation counter incremented on each traversal step, and + * to try (via CAS) to update the associated queue pointer + * whenever the count exceeds a threshold. Another, that requires + * more overhead, is to use random number generators to update + * with a given probability per traversal step. + * + * In any strategy along these lines, because CASes updating + * fields may fail, the actual slack may exceed targeted + * slack. However, they may be retried at any time to maintain + * targets. Even when using very small slack values, this + * approach works well for dual queues because it allows all + * operations up to the point of matching or appending an item + * (hence potentially allowing progress by another thread) to be + * read-only, thus not introducing any further contention. As + * described below, we implement this by performing slack + * maintenance retries only after these points. + * + * As an accompaniment to such techniques, traversal overhead can + * be further reduced without increasing contention of head + * pointer updates: Threads may sometimes shortcut the "next" link + * path from the current "head" node to be closer to the currently + * known first unmatched node, and similarly for tail. Again, this + * may be triggered with using thresholds or randomization. + * + * These ideas must be further extended to avoid unbounded amounts + * of costly-to-reclaim garbage caused by the sequential "next" + * links of nodes starting at old forgotten head nodes: As first + * described in detail by Boehm + * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC + * delays noticing that any arbitrarily old node has become + * garbage, all newer dead nodes will also be unreclaimed. + * (Similar issues arise in non-GC environments.) To cope with + * this in our implementation, upon CASing to advance the head + * pointer, we set the "next" link of the previous head to point + * only to itself; thus limiting the length of connected dead lists. + * (We also take similar care to wipe out possibly garbage + * retaining values held in other Node fields.) However, doing so + * adds some further complexity to traversal: If any "next" + * pointer links to itself, it indicates that the current thread + * has lagged behind a head-update, and so the traversal must + * continue from the "head". Traversals trying to find the + * current tail starting from "tail" may also encounter + * self-links, in which case they also continue at "head". + * + * It is tempting in slack-based scheme to not even use CAS for + * updates (similarly to Ladan-Mozes & Shavit). However, this + * cannot be done for head updates under the above link-forgetting + * mechanics because an update may leave head at a detached node. + * And while direct writes are possible for tail updates, they + * increase the risk of long retraversals, and hence long garbage + * chains, which can be much more costly than is worthwhile + * considering that the cost difference of performing a CAS vs + * write is smaller when they are not triggered on each operation + * (especially considering that writes and CASes equally require + * additional GC bookkeeping ("write barriers") that are sometimes + * more costly than the writes themselves because of contention). + * + * *** Overview of implementation *** + * + * We use a threshold-based approach to updates, with a slack + * threshold of two -- that is, we update head/tail when the + * current pointer appears to be two or more steps away from the + * first/last node. The slack value is hard-wired: a path greater + * than one is naturally implemented by checking equality of + * traversal pointers except when the list has only one element, + * in which case we keep slack threshold at one. Avoiding tracking + * explicit counts across method calls slightly simplifies an + * already-messy implementation. Using randomization would + * probably work better if there were a low-quality dirt-cheap + * per-thread one available, but even ThreadLocalRandom is too + * heavy for these purposes. + * + * With such a small slack threshold value, it is not worthwhile + * to augment this with path short-circuiting (i.e., unsplicing + * interior nodes) except in the case of cancellation/removal (see + * below). + * + * We allow both the head and tail fields to be null before any + * nodes are enqueued; initializing upon first append. This + * simplifies some other logic, as well as providing more + * efficient explicit control paths instead of letting JVMs insert + * implicit NullPointerExceptions when they are null. While not + * currently fully implemented, we also leave open the possibility + * of re-nulling these fields when empty (which is complicated to + * arrange, for little benefit.) + * + * All enqueue/dequeue operations are handled by the single method + * "xfer" with parameters indicating whether to act as some form + * of offer, put, poll, take, or transfer (each possibly with + * timeout). The relative complexity of using one monolithic + * method outweighs the code bulk and maintenance problems of + * using separate methods for each case. + * + * Operation consists of up to three phases. The first is + * implemented within method xfer, the second in tryAppend, and + * the third in method awaitMatch. + * + * 1. Try to match an existing node + * + * Starting at head, skip already-matched nodes until finding + * an unmatched node of opposite mode, if one exists, in which + * case matching it and returning, also if necessary updating + * head to one past the matched node (or the node itself if the + * list has no other unmatched nodes). If the CAS misses, then + * a loop retries advancing head by two steps until either + * success or the slack is at most two. By requiring that each + * attempt advances head by two (if applicable), we ensure that + * the slack does not grow without bound. Traversals also check + * if the initial head is now off-list, in which case they + * start at the new head. + * + * If no candidates are found and the call was untimed + * poll/offer, (argument "how" is NOW) return. + * + * 2. Try to append a new node (method tryAppend) + * + * Starting at current tail pointer, find the actual last node + * and try to append a new node (or if head was null, establish + * the first node). Nodes can be appended only if their + * predecessors are either already matched or are of the same + * mode. If we detect otherwise, then a new node with opposite + * mode must have been appended during traversal, so we must + * restart at phase 1. The traversal and update steps are + * otherwise similar to phase 1: Retrying upon CAS misses and + * checking for staleness. In particular, if a self-link is + * encountered, then we can safely jump to a node on the list + * by continuing the traversal at current head. + * + * On successful append, if the call was ASYNC, return. + * + * 3. Await match or cancellation (method awaitMatch) + * + * Wait for another thread to match node; instead cancelling if + * the current thread was interrupted or the wait timed out. On + * multiprocessors, we use front-of-queue spinning: If a node + * appears to be the first unmatched node in the queue, it + * spins a bit before blocking. In either case, before blocking + * it tries to unsplice any nodes between the current "head" + * and the first unmatched node. + * + * Front-of-queue spinning vastly improves performance of + * heavily contended queues. And so long as it is relatively + * brief and "quiet", spinning does not much impact performance + * of less-contended queues. During spins threads check their + * interrupt status and generate a thread-local random number + * to decide to occasionally perform a Thread.yield. While + * yield has underdefined specs, we assume that it might help, + * and will not hurt, in limiting impact of spinning on busy + * systems. We also use smaller (1/2) spins for nodes that are + * not known to be front but whose predecessors have not + * blocked -- these "chained" spins avoid artifacts of + * front-of-queue rules which otherwise lead to alternating + * nodes spinning vs blocking. Further, front threads that + * represent phase changes (from data to request node or vice + * versa) compared to their predecessors receive additional + * chained spins, reflecting longer paths typically required to + * unblock threads during phase changes. + * + * + * ** Unlinking removed interior nodes ** + * + * In addition to minimizing garbage retention via self-linking + * described above, we also unlink removed interior nodes. These + * may arise due to timed out or interrupted waits, or calls to + * remove(x) or Iterator.remove. Normally, given a node that was + * at one time known to be the predecessor of some node s that is + * to be removed, we can unsplice s by CASing the next field of + * its predecessor if it still points to s (otherwise s must + * already have been removed or is now offlist). But there are two + * situations in which we cannot guarantee to make node s + * unreachable in this way: (1) If s is the trailing node of list + * (i.e., with null next), then it is pinned as the target node + * for appends, so can only be removed later after other nodes are + * appended. (2) We cannot necessarily unlink s given a + * predecessor node that is matched (including the case of being + * cancelled): the predecessor may already be unspliced, in which + * case some previous reachable node may still point to s. + * (For further explanation see Herlihy & Shavit "The Art of + * Multiprocessor Programming" chapter 9). Although, in both + * cases, we can rule out the need for further action if either s + * or its predecessor are (or can be made to be) at, or fall off + * from, the head of list. + * + * Without taking these into account, it would be possible for an + * unbounded number of supposedly removed nodes to remain + * reachable. Situations leading to such buildup are uncommon but + * can occur in practice; for example when a series of short timed + * calls to poll repeatedly time out but never otherwise fall off + * the list because of an untimed call to take at the front of the + * queue. + * + * When these cases arise, rather than always retraversing the + * entire list to find an actual predecessor to unlink (which + * won't help for case (1) anyway), we record a conservative + * estimate of possible unsplice failures (in "sweepVotes"). + * We trigger a full sweep when the estimate exceeds a threshold + * ("SWEEP_THRESHOLD") indicating the maximum number of estimated + * removal failures to tolerate before sweeping through, unlinking + * cancelled nodes that were not unlinked upon initial removal. + * We perform sweeps by the thread hitting threshold (rather than + * background threads or by spreading work to other threads) + * because in the main contexts in which removal occurs, the + * caller is already timed-out, cancelled, or performing a + * potentially O(n) operation (e.g. remove(x)), none of which are + * time-critical enough to warrant the overhead that alternatives + * would impose on other threads. + * + * Because the sweepVotes estimate is conservative, and because + * nodes become unlinked "naturally" as they fall off the head of + * the queue, and because we allow votes to accumulate even while + * sweeps are in progress, there are typically significantly fewer + * such nodes than estimated. Choice of a threshold value + * balances the likelihood of wasted effort and contention, versus + * providing a worst-case bound on retention of interior nodes in + * quiescent queues. The value defined below was chosen + * empirically to balance these under various timeout scenarios. + * + * Note that we cannot self-link unlinked interior nodes during + * sweeps. However, the associated garbage chains terminate when + * some successor ultimately falls off the head of the list and is + * self-linked. + */ + + /** True if on multiprocessor */ + private static final boolean MP = + Runtime.getRuntime().availableProcessors() > 1; + + /** + * The number of times to spin (with randomly interspersed calls + * to Thread.yield) on multiprocessor before blocking when a node + * is apparently the first waiter in the queue. See above for + * explanation. Must be a power of two. The value is empirically + * derived -- it works pretty well across a variety of processors, + * numbers of CPUs, and OSes. + */ + private static final int FRONT_SPINS = 1 << 7; + + /** + * The number of times to spin before blocking when a node is + * preceded by another node that is apparently spinning. Also + * serves as an increment to FRONT_SPINS on phase changes, and as + * base average frequency for yielding during spins. Must be a + * power of two. + */ + private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; + + /** + * The maximum number of estimated removal failures (sweepVotes) + * to tolerate before sweeping through the queue unlinking + * cancelled nodes that were not unlinked upon initial + * removal. See above for explanation. The value must be at least + * two to avoid useless sweeps when removing trailing nodes. + */ + static final int SWEEP_THRESHOLD = 32; + + /** + * Queue nodes. Uses Object, not E, for items to allow forgetting + * them after use. Relies heavily on Unsafe mechanics to minimize + * unnecessary ordering constraints: Writes that are intrinsically + * ordered wrt other accesses or CASes use simple relaxed forms. + */ + static final class Node { + final boolean isData; // false if this is a request node + volatile Object item; // initially non-null if isData; CASed to match + volatile Node next; + volatile Thread waiter; // null until waiting + + // CAS methods for fields + final boolean casNext(Node cmp, Node val) { + return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); + } + + final boolean casItem(Object cmp, Object val) { + // assert cmp == null || cmp.getClass() != Node.class; + return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); + } + + /** + * Constructs a new node. Uses relaxed write because item can + * only be seen after publication via casNext. + */ + Node(Object item, boolean isData) { + UNSAFE.putObject(this, itemOffset, item); // relaxed write + this.isData = isData; + } + + /** + * Links node to itself to avoid garbage retention. Called + * only after CASing head field, so uses relaxed write. + */ + final void forgetNext() { + UNSAFE.putObject(this, nextOffset, this); + } + + /** + * Sets item to self and waiter to null, to avoid garbage + * retention after matching or cancelling. Uses relaxed writes + * because order is already constrained in the only calling + * contexts: item is forgotten only after volatile/atomic + * mechanics that extract items. Similarly, clearing waiter + * follows either CAS or return from park (if ever parked; + * else we don't care). + */ + final void forgetContents() { + UNSAFE.putObject(this, itemOffset, this); + UNSAFE.putObject(this, waiterOffset, null); + } + + /** + * Returns true if this node has been matched, including the + * case of artificial matches due to cancellation. + */ + final boolean isMatched() { + Object x = item; + return (x == this) || ((x == null) == isData); + } + + /** + * Returns true if this is an unmatched request node. + */ + final boolean isUnmatchedRequest() { + return !isData && item == null; + } + + /** + * Returns true if a node with the given mode cannot be + * appended to this node because this node is unmatched and + * has opposite data mode. + */ + final boolean cannotPrecede(boolean haveData) { + boolean d = isData; + Object x; + return d != haveData && (x = item) != this && (x != null) == d; + } + + /** + * Tries to artificially match a data node -- used by remove. + */ + final boolean tryMatchData() { + // assert isData; + Object x = item; + if (x != null && x != this && casItem(x, null)) { + LockSupport.unpark(waiter); + return true; + } + return false; + } + + private static final long serialVersionUID = -3375979862319811754L; + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long itemOffset; + private static final long nextOffset; + private static final long waiterOffset; + static { + try { + UNSAFE = getUnsafe(); + Class k = Node.class; + itemOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("item")); + nextOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("next")); + waiterOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("waiter")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /** head of the queue; null until first enqueue */ + transient volatile Node head; + + /** tail of the queue; null until first append */ + private transient volatile Node tail; + + /** The number of apparent failures to unsplice removed nodes */ + private transient volatile int sweepVotes; + + // CAS methods for fields + private boolean casTail(Node cmp, Node val) { + return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); + } + + private boolean casHead(Node cmp, Node val) { + return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); + } + + private boolean casSweepVotes(int cmp, int val) { + return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val); + } + + /* + * Possible values for "how" argument in xfer method. + */ + private static final int NOW = 0; // for untimed poll, tryTransfer + private static final int ASYNC = 1; // for offer, put, add + private static final int SYNC = 2; // for transfer, take + private static final int TIMED = 3; // for timed poll, tryTransfer + + @SuppressWarnings("unchecked") + static E cast(Object item) { + // assert item == null || item.getClass() != Node.class; + return (E) item; + } + + /** + * Implements all queuing methods. See above for explanation. + * + * @param e the item or null for take + * @param haveData true if this is a put, else a take + * @param how NOW, ASYNC, SYNC, or TIMED + * @param nanos timeout in nanosecs, used only if mode is TIMED + * @return an item if matched, else e + * @throws NullPointerException if haveData mode but e is null + */ + private E xfer(E e, boolean haveData, int how, long nanos) { + if (haveData && (e == null)) + throw new NullPointerException(); + Node s = null; // the node to append, if needed + + retry: + for (;;) { // restart on append race + + for (Node h = head, p = h; p != null;) { // find & match first node + boolean isData = p.isData; + Object item = p.item; + if (item != p && (item != null) == isData) { // unmatched + if (isData == haveData) // can't match + break; + if (p.casItem(item, e)) { // match + for (Node q = p; q != h;) { + Node n = q.next; // update by 2 unless singleton + if (head == h && casHead(h, n == null ? q : n)) { + h.forgetNext(); + break; + } // advance and retry + if ((h = head) == null || + (q = h.next) == null || !q.isMatched()) + break; // unless slack < 2 + } + LockSupport.unpark(p.waiter); + return LinkedTransferQueue.cast(item); + } + } + Node n = p.next; + p = (p != n) ? n : (h = head); // Use head if p offlist + } + + if (how != NOW) { // No matches available + if (s == null) + s = new Node(e, haveData); + Node pred = tryAppend(s, haveData); + if (pred == null) + continue retry; // lost race vs opposite mode + if (how != ASYNC) + return awaitMatch(s, pred, e, (how == TIMED), nanos); + } + return e; // not waiting + } + } + + /** + * Tries to append node s as tail. + * + * @param s the node to append + * @param haveData true if appending in data mode + * @return null on failure due to losing race with append in + * different mode, else s's predecessor, or s itself if no + * predecessor + */ + private Node tryAppend(Node s, boolean haveData) { + for (Node t = tail, p = t;;) { // move p to last node and append + Node n, u; // temps for reads of next & tail + if (p == null && (p = head) == null) { + if (casHead(null, s)) + return s; // initialize + } + else if (p.cannotPrecede(haveData)) + return null; // lost race vs opposite mode + else if ((n = p.next) != null) // not last; keep traversing + p = p != t && t != (u = tail) ? (t = u) : // stale tail + (p != n) ? n : null; // restart if off list + else if (!p.casNext(null, s)) + p = p.next; // re-read on CAS failure + else { + if (p != t) { // update if slack now >= 2 + while ((tail != t || !casTail(t, s)) && + (t = tail) != null && + (s = t.next) != null && // advance and retry + (s = s.next) != null && s != t); + } + return p; + } + } + } + + /** + * Spins/yields/blocks until node s is matched or caller gives up. + * + * @param s the waiting node + * @param pred the predecessor of s, or s itself if it has no + * predecessor, or null if unknown (the null case does not occur + * in any current calls but may in possible future extensions) + * @param e the comparison value for checking match + * @param timed if true, wait only until timeout elapses + * @param nanos timeout in nanosecs, used only if timed is true + * @return matched item, or e if unmatched on interrupt or timeout + */ + private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { + long lastTime = timed ? System.nanoTime() : 0L; + Thread w = Thread.currentThread(); + int spins = -1; // initialized after first item and cancel checks + ThreadLocalRandom randomYields = null; // bound if needed + + for (;;) { + Object item = s.item; + if (item != e) { // matched + // assert item != s; + s.forgetContents(); // avoid garbage + return LinkedTransferQueue.cast(item); + } + if ((w.isInterrupted() || (timed && nanos <= 0)) && + s.casItem(e, s)) { // cancel + unsplice(pred, s); + return e; + } + + if (spins < 0) { // establish spins at/near front + if ((spins = spinsFor(pred, s.isData)) > 0) + randomYields = ThreadLocalRandom.current(); + } + else if (spins > 0) { // spin + --spins; + if (randomYields.nextInt(CHAINED_SPINS) == 0) + Thread.yield(); // occasionally yield + } + else if (s.waiter == null) { + s.waiter = w; // request unpark then recheck + } + else if (timed) { + long now = System.nanoTime(); + if ((nanos -= now - lastTime) > 0) + LockSupport.parkNanos(this, nanos); + lastTime = now; + } + else { + LockSupport.park(this); + } + } + } + + /** + * Returns spin/yield value for a node with given predecessor and + * data mode. See above for explanation. + */ + private static int spinsFor(Node pred, boolean haveData) { + if (MP && pred != null) { + if (pred.isData != haveData) // phase change + return FRONT_SPINS + CHAINED_SPINS; + if (pred.isMatched()) // probably at front + return FRONT_SPINS; + if (pred.waiter == null) // pred apparently spinning + return CHAINED_SPINS; + } + return 0; + } + + /* -------------- Traversal methods -------------- */ + + /** + * Returns the successor of p, or the head node if p.next has been + * linked to self, which will only be true if traversing with a + * stale pointer that is now off the list. + */ + final Node succ(Node p) { + Node next = p.next; + return (p == next) ? head : next; + } + + /** + * Returns the first unmatched node of the given mode, or null if + * none. Used by methods isEmpty, hasWaitingConsumer. + */ + private Node firstOfMode(boolean isData) { + for (Node p = head; p != null; p = succ(p)) { + if (!p.isMatched()) + return (p.isData == isData) ? p : null; + } + return null; + } + + /** + * Returns the item in the first unmatched node with isData; or + * null if none. Used by peek. + */ + private E firstDataItem() { + for (Node p = head; p != null; p = succ(p)) { + Object item = p.item; + if (p.isData) { + if (item != null && item != p) + return LinkedTransferQueue.cast(item); + } + else if (item == null) + return null; + } + return null; + } + + /** + * Traverses and counts unmatched nodes of the given mode. + * Used by methods size and getWaitingConsumerCount. + */ + private int countOfMode(boolean data) { + int count = 0; + for (Node p = head; p != null; ) { + if (!p.isMatched()) { + if (p.isData != data) + return 0; + if (++count == Integer.MAX_VALUE) // saturated + break; + } + Node n = p.next; + if (n != p) + p = n; + else { + count = 0; + p = head; + } + } + return count; + } + + final class Itr implements Iterator { + private Node nextNode; // next node to return item for + private E nextItem; // the corresponding item + private Node lastRet; // last returned node, to support remove + private Node lastPred; // predecessor to unlink lastRet + + /** + * Moves to next node after prev, or first node if prev null. + */ + private void advance(Node prev) { + /* + * To track and avoid buildup of deleted nodes in the face + * of calls to both Queue.remove and Itr.remove, we must + * include variants of unsplice and sweep upon each + * advance: Upon Itr.remove, we may need to catch up links + * from lastPred, and upon other removes, we might need to + * skip ahead from stale nodes and unsplice deleted ones + * found while advancing. + */ + + Node r, b; // reset lastPred upon possible deletion of lastRet + if ((r = lastRet) != null && !r.isMatched()) + lastPred = r; // next lastPred is old lastRet + else if ((b = lastPred) == null || b.isMatched()) + lastPred = null; // at start of list + else { + Node s, n; // help with removal of lastPred.next + while ((s = b.next) != null && + s != b && s.isMatched() && + (n = s.next) != null && n != s) + b.casNext(s, n); + } + + this.lastRet = prev; + + for (Node p = prev, s, n;;) { + s = (p == null) ? head : p.next; + if (s == null) + break; + else if (s == p) { + p = null; + continue; + } + Object item = s.item; + if (s.isData) { + if (item != null && item != s) { + nextItem = LinkedTransferQueue.cast(item); + nextNode = s; + return; + } + } + else if (item == null) + break; + // assert s.isMatched(); + if (p == null) + p = s; + else if ((n = s.next) == null) + break; + else if (s == n) + p = null; + else + p.casNext(s, n); + } + nextNode = null; + nextItem = null; + } + + Itr() { + advance(null); + } + + public final boolean hasNext() { + return nextNode != null; + } + + public final E next() { + Node p = nextNode; + if (p == null) throw new NoSuchElementException(); + E e = nextItem; + advance(p); + return e; + } + + public final void remove() { + final Node lastRet = this.lastRet; + if (lastRet == null) + throw new IllegalStateException(); + this.lastRet = null; + if (lastRet.tryMatchData()) + unsplice(lastPred, lastRet); + } + } + + /* -------------- Removal methods -------------- */ + + /** + * Unsplices (now or later) the given deleted/cancelled node with + * the given predecessor. + * + * @param pred a node that was at one time known to be the + * predecessor of s, or null or s itself if s is/was at head + * @param s the node to be unspliced + */ + final void unsplice(Node pred, Node s) { + s.forgetContents(); // forget unneeded fields + /* + * See above for rationale. Briefly: if pred still points to + * s, try to unlink s. If s cannot be unlinked, because it is + * trailing node or pred might be unlinked, and neither pred + * nor s are head or offlist, add to sweepVotes, and if enough + * votes have accumulated, sweep. + */ + if (pred != null && pred != s && pred.next == s) { + Node n = s.next; + if (n == null || + (n != s && pred.casNext(s, n) && pred.isMatched())) { + for (;;) { // check if at, or could be, head + Node h = head; + if (h == pred || h == s || h == null) + return; // at head or list empty + if (!h.isMatched()) + break; + Node hn = h.next; + if (hn == null) + return; // now empty + if (hn != h && casHead(h, hn)) + h.forgetNext(); // advance head + } + if (pred.next != pred && s.next != s) { // recheck if offlist + for (;;) { // sweep now if enough votes + int v = sweepVotes; + if (v < SWEEP_THRESHOLD) { + if (casSweepVotes(v, v + 1)) + break; + } + else if (casSweepVotes(v, 0)) { + sweep(); + break; + } + } + } + } + } + } + + /** + * Unlinks matched (typically cancelled) nodes encountered in a + * traversal from head. + */ + private void sweep() { + for (Node p = head, s, n; p != null && (s = p.next) != null; ) { + if (!s.isMatched()) + // Unmatched nodes are never self-linked + p = s; + else if ((n = s.next) == null) // trailing node is pinned + break; + else if (s == n) // stale + // No need to also check for p == s, since that implies s == n + p = head; + else + p.casNext(s, n); + } + } + + /** + * Main implementation of remove(Object) + */ + private boolean findAndRemove(Object e) { + if (e != null) { + for (Node pred = null, p = head; p != null; ) { + Object item = p.item; + if (p.isData) { + if (item != null && item != p && e.equals(item) && + p.tryMatchData()) { + unsplice(pred, p); + return true; + } + } + else if (item == null) + break; + pred = p; + if ((p = p.next) == pred) { // stale + pred = null; + p = head; + } + } + } + return false; + } + + + /** + * Creates an initially empty {@code LinkedTransferQueue}. + */ + public LinkedTransferQueue() { + } + + /** + * Creates a {@code LinkedTransferQueue} + * initially containing the elements of the given collection, + * added in traversal order of the collection's iterator. + * + * @param c the collection of elements to initially contain + * @throws NullPointerException if the specified collection or any + * of its elements are null + */ + public LinkedTransferQueue(Collection c) { + this(); + addAll(c); + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never block. + * + * @throws NullPointerException if the specified element is null + */ + public void put(E e) { + xfer(e, true, ASYNC, 0); + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never block or + * return {@code false}. + * + * @return {@code true} (as specified by + * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit) + * BlockingQueue.offer}) + * @throws NullPointerException if the specified element is null + */ + public boolean offer(E e, long timeout, TimeUnit unit) { + xfer(e, true, ASYNC, 0); + return true; + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never return {@code false}. + * + * @return {@code true} (as specified by {@link Queue#offer}) + * @throws NullPointerException if the specified element is null + */ + public boolean offer(E e) { + xfer(e, true, ASYNC, 0); + return true; + } + + /** + * Inserts the specified element at the tail of this queue. + * As the queue is unbounded, this method will never throw + * {@link IllegalStateException} or return {@code false}. + * + * @return {@code true} (as specified by {@link Collection#add}) + * @throws NullPointerException if the specified element is null + */ + public boolean add(E e) { + xfer(e, true, ASYNC, 0); + return true; + } + + /** + * Transfers the element to a waiting consumer immediately, if possible. + * + *

More precisely, transfers the specified element immediately + * if there exists a consumer already waiting to receive it (in + * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), + * otherwise returning {@code false} without enqueuing the element. + * + * @throws NullPointerException if the specified element is null + */ + public boolean tryTransfer(E e) { + return xfer(e, true, NOW, 0) == null; + } + + /** + * Transfers the element to a consumer, waiting if necessary to do so. + * + *

More precisely, transfers the specified element immediately + * if there exists a consumer already waiting to receive it (in + * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), + * else inserts the specified element at the tail of this queue + * and waits until the element is received by a consumer. + * + * @throws NullPointerException if the specified element is null + */ + public void transfer(E e) throws InterruptedException { + if (xfer(e, true, SYNC, 0) != null) { + Thread.interrupted(); // failure possible only due to interrupt + throw new InterruptedException(); + } + } + + /** + * Transfers the element to a consumer if it is possible to do so + * before the timeout elapses. + * + *

More precisely, transfers the specified element immediately + * if there exists a consumer already waiting to receive it (in + * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), + * else inserts the specified element at the tail of this queue + * and waits until the element is received by a consumer, + * returning {@code false} if the specified wait time elapses + * before the element can be transferred. + * + * @throws NullPointerException if the specified element is null + */ + public boolean tryTransfer(E e, long timeout, TimeUnit unit) + throws InterruptedException { + if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) + return true; + if (!Thread.interrupted()) + return false; + throw new InterruptedException(); + } + + public E take() throws InterruptedException { + E e = xfer(null, false, SYNC, 0); + if (e != null) + return e; + Thread.interrupted(); + throw new InterruptedException(); + } + + public E poll(long timeout, TimeUnit unit) throws InterruptedException { + E e = xfer(null, false, TIMED, unit.toNanos(timeout)); + if (e != null || !Thread.interrupted()) + return e; + throw new InterruptedException(); + } + + public E poll() { + return xfer(null, false, NOW, 0); + } + + /** + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + public int drainTo(Collection c) { + if (c == null) + throw new NullPointerException(); + if (c == this) + throw new IllegalArgumentException(); + int n = 0; + for (E e; (e = poll()) != null;) { + c.add(e); + ++n; + } + return n; + } + + /** + * @throws NullPointerException {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + public int drainTo(Collection c, int maxElements) { + if (c == null) + throw new NullPointerException(); + if (c == this) + throw new IllegalArgumentException(); + int n = 0; + for (E e; n < maxElements && (e = poll()) != null;) { + c.add(e); + ++n; + } + return n; + } + + /** + * Returns an iterator over the elements in this queue in proper sequence. + * The elements will be returned in order from first (head) to last (tail). + * + *

The returned iterator is a "weakly consistent" iterator that + * will never throw {@link java.util.ConcurrentModificationException + * ConcurrentModificationException}, and guarantees to traverse + * elements as they existed upon construction of the iterator, and + * may (but is not guaranteed to) reflect any modifications + * subsequent to construction. + * + * @return an iterator over the elements in this queue in proper sequence + */ + public Iterator iterator() { + return new Itr(); + } + + public E peek() { + return firstDataItem(); + } + + /** + * Returns {@code true} if this queue contains no elements. + * + * @return {@code true} if this queue contains no elements + */ + public boolean isEmpty() { + for (Node p = head; p != null; p = succ(p)) { + if (!p.isMatched()) + return !p.isData; + } + return true; + } + + public boolean hasWaitingConsumer() { + return firstOfMode(false) != null; + } + + /** + * Returns the number of elements in this queue. If this queue + * contains more than {@code Integer.MAX_VALUE} elements, returns + * {@code Integer.MAX_VALUE}. + * + *

Beware that, unlike in most collections, this method is + * NOT a constant-time operation. Because of the + * asynchronous nature of these queues, determining the current + * number of elements requires an O(n) traversal. + * + * @return the number of elements in this queue + */ + public int size() { + return countOfMode(true); + } + + public int getWaitingConsumerCount() { + return countOfMode(false); + } + + /** + * Removes a single instance of the specified element from this queue, + * if it is present. More formally, removes an element {@code e} such + * that {@code o.equals(e)}, if this queue contains one or more such + * elements. + * Returns {@code true} if this queue contained the specified element + * (or equivalently, if this queue changed as a result of the call). + * + * @param o element to be removed from this queue, if present + * @return {@code true} if this queue changed as a result of the call + */ + public boolean remove(Object o) { + return findAndRemove(o); + } + + /** + * Returns {@code true} if this queue contains the specified element. + * More formally, returns {@code true} if and only if this queue contains + * at least one element {@code e} such that {@code o.equals(e)}. + * + * @param o object to be checked for containment in this queue + * @return {@code true} if this queue contains the specified element + */ + public boolean contains(Object o) { + if (o == null) return false; + for (Node p = head; p != null; p = succ(p)) { + Object item = p.item; + if (p.isData) { + if (item != null && item != p && o.equals(item)) + return true; + } + else if (item == null) + break; + } + return false; + } + + /** + * Always returns {@code Integer.MAX_VALUE} because a + * {@code LinkedTransferQueue} is not capacity constrained. + * + * @return {@code Integer.MAX_VALUE} (as specified by + * {@link java.util.concurrent.BlockingQueue#remainingCapacity() + * BlockingQueue.remainingCapacity}) + */ + public int remainingCapacity() { + return Integer.MAX_VALUE; + } + + /** + * Saves the state to a stream (that is, serializes it). + * + * @serialData All of the elements (each an {@code E}) in + * the proper order, followed by a null + * @param s the stream + */ + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + for (E e : this) + s.writeObject(e); + // Use trailing null as sentinel + s.writeObject(null); + } + + /** + * Reconstitutes the Queue instance from a stream (that is, + * deserializes it). + * + * @param s the stream + */ + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + for (;;) { + @SuppressWarnings("unchecked") + E item = (E) s.readObject(); + if (item == null) + break; + else + offer(item); + } + } + + // Unsafe mechanics + + private static final sun.misc.Unsafe UNSAFE; + private static final long headOffset; + private static final long tailOffset; + private static final long sweepVotesOffset; + static { + try { + UNSAFE = getUnsafe(); + Class k = LinkedTransferQueue.class; + headOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("head")); + tailOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("tail")); + sweepVotesOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("sweepVotes")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } + +} \ No newline at end of file diff --git a/src/main/java/org/jboss/netty/util/internal/QueueFactory.java b/src/main/java/org/jboss/netty/util/internal/QueueFactory.java new file mode 100644 index 0000000000..c506ca4943 --- /dev/null +++ b/src/main/java/org/jboss/netty/util/internal/QueueFactory.java @@ -0,0 +1,69 @@ +/* + * Copyright 2011 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.util.internal; + +import java.util.Collection; +import java.util.concurrent.BlockingQueue; + +import org.jboss.netty.util.UnsafeDetectUtil; + +/** + * This factory should be used to create the "optimal" {@link BlockingQueue} instance for the running JVM. + * + * + * + * @author The Netty Project + * @author Norman Maurer + * + */ +public class QueueFactory { + + private static final boolean useUnsafe = UnsafeDetectUtil.isUnsafeFound(QueueFactory.class.getClassLoader()); + + private QueueFactory() { + // only use static methods! + } + + + /** + * Create a new unbound {@link BlockingQueue} + * + * @param itemClass the {@link Class} type which will be used as {@link BlockingQueue} items + * @return queue the {@link BlockingQueue} implementation + */ + public static final BlockingQueue createQueue(Class itemClass) { + if (useUnsafe) { + return new LinkedTransferQueue(); + } else { + return new LegacyLinkedTransferQueue(); + } + } + + /** + * Create a new unbound {@link BlockingQueue} + * + * @param collection the collection which should get copied to the newly created {@link BlockingQueue} + * @param itemClass the {@link Class} type which will be used as {@link BlockingQueue} items + * @return queue the {@link BlockingQueue} implementation + */ + public static final BlockingQueue createQueue(Collection collection, Class itemClass) { + if (useUnsafe) { + return new LinkedTransferQueue(collection); + } else { + return new LegacyLinkedTransferQueue(collection); + } + } +} diff --git a/src/test/java/org/jboss/netty/handler/ipfilter/IpFilterRuleTest.java b/src/test/java/org/jboss/netty/handler/ipfilter/IpFilterRuleTest.java index a4df5f3aea..4f6bc64305 100644 --- a/src/test/java/org/jboss/netty/handler/ipfilter/IpFilterRuleTest.java +++ b/src/test/java/org/jboss/netty/handler/ipfilter/IpFilterRuleTest.java @@ -265,6 +265,16 @@ public class IpFilterRuleTest extends TestCase // TODO Auto-generated method stub return 0; } + + @Override + public Object getAttachment() { + return null; + } + + @Override + public void setAttachment(Object attachment) { + + } }, h, addr), addr);