merged with master

This commit is contained in:
Jestan Nirojan 2011-12-07 23:17:11 +05:30
commit 724ca7a913
44 changed files with 3121 additions and 3821 deletions

160
pom.xml
View File

@ -221,6 +221,37 @@
</resource>
</resources>
<pluginManagement>
<plugins>
<!--This plugin's configuration is used to store Eclipse m2e settings only.
It has no influence on the Maven build itself. -->
<plugin>
<groupId>org.eclipse.m2e</groupId>
<artifactId>lifecycle-mapping</artifactId>
<version>1.0.0</version>
<configuration>
<lifecycleMappingMetadata>
<pluginExecutions>
<pluginExecution>
<pluginExecutionFilter>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<versionRange>[1.7,)</versionRange>
<goals>
<goal>run</goal>
</goals>
</pluginExecutionFilter>
<action>
<ignore />
</action>
</pluginExecution>
</pluginExecutions>
</lifecycleMappingMetadata>
</configuration>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<!-- See org.jboss:jboss-parent -->
@ -514,135 +545,6 @@
<windowTitle>${project.name} Source Xref (${project.version})</windowTitle>
</configuration>
</plugin>
<plugin>
<groupId>org.jboss.maven.plugins</groupId>
<artifactId>maven-jdocbook-plugin</artifactId>
<version>2.2.1</version>
<executions>
<execution>
<id>generate-docbook</id>
<phase>package</phase>
<goals>
<goal>resources</goal>
<goal>generate</goal>
</goals>
</execution>
</executions>
<dependencies>
<dependency>
<groupId>org.eclipse.wst.css</groupId>
<artifactId>core</artifactId>
<version>1.1.101-v200705302225</version>
<exclusions>
<exclusion>
<groupId>org.apache</groupId>
<artifactId>xerces</artifactId>
</exclusion>
<exclusion>
<groupId>com.ibm</groupId>
<artifactId>icu</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.eclipse.wst.sse</groupId>
<artifactId>core</artifactId>
<version>1.1.202-v200709061102</version>
<exclusions>
<exclusion>
<groupId>org.apache</groupId>
<artifactId>xerces</artifactId>
</exclusion>
<exclusion>
<groupId>com.ibm</groupId>
<artifactId>icu</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.jboss</groupId>
<artifactId>jbossorg-docbook-xslt</artifactId>
<version>1.1.0</version>
<exclusions>
<exclusion>
<groupId>org.eclipse.wst.css</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.wst.sse</groupId>
<artifactId>core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.jboss</groupId>
<artifactId>jbossorg-jdocbook-style</artifactId>
<version>1.1.0</version>
<type>jdocbook-style</type>
<exclusions>
<exclusion>
<groupId>org.eclipse.wst.css</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.wst.sse</groupId>
<artifactId>core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.jboss</groupId>
<artifactId>jbossorg-fonts</artifactId>
<version>1.0.0</version>
<type>jdocbook-style</type>
</dependency>
</dependencies>
<configuration>
<sourceDocumentName>master.xml</sourceDocumentName>
<sourceDirectory>${basedir}/src/docbook</sourceDirectory>
<cssResource>
<directory>${basedir}/src/docbook</directory>
<includes>
<include>css/**/*</include>
</includes>
</cssResource>
<imageResource>
<directory>${basedir}/src/docbook</directory>
<includes>
<include>images/**/*</include>
</includes>
</imageResource>
<formats>
<format>
<formatName>html</formatName>
<stylesheetResource>file:///${basedir}/src/docbook/xslt/xhtml.xsl</stylesheetResource>
<finalName>index.html</finalName>
</format>
<format>
<formatName>html_single</formatName>
<stylesheetResource>file:///${basedir}/src/docbook/xslt/xhtml-single.xsl</stylesheetResource>
<finalName>index.html</finalName>
</format>
<format>
<formatName>eclipse</formatName>
<stylesheetResource>file:///${basedir}/src/docbook/xslt/eclipse.xsl</stylesheetResource>
<finalName>index.html</finalName>
</format>
<format>
<formatName>pdf</formatName>
<stylesheetResource>file:///${basedir}/src/docbook/xslt/pdf.xsl</stylesheetResource>
<finalName>netty.pdf</finalName>
</format>
</formats>
<options>
<xincludeSupported>true</xincludeSupported>
<xmlTransformerType>saxon</xmlTransformerType>
<docbookVersion>1.72.0</docbookVersion>
<localeSeparator>-</localeSeparator>
<autoDetectFonts>true</autoDetectFonts>
</options>
</configuration>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.2.1</version>

View File

@ -69,15 +69,6 @@
<include>**/**</include>
</includes>
</fileSet>
<!-- Documentation (Docbook) -->
<fileSet>
<directory>target/docbook/publish/en-US</directory>
<outputDirectory>doc/guide</outputDirectory>
<includes>
<include>**/**</include>
</includes>
</fileSet>
</fileSets>
</assembly>

View File

@ -1,132 +0,0 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
@import url("documentation.css");
@import url("docnav.css");
@import url("reports.css");
@import url("extensions.css");
@import url("codehighlight.css");
body {
background-image:url(../images/community/bkg_gradient.gif);
background-repeat:repeat-x;
margin:0 auto;
font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif;
font-size:12px;
max-width:55em;
padding:0em 2em;
color:#333;
line-height:150%;
text-align:justify;
}
/* Links */
a:link {color:#0066cc;}
a:visited {color:#6699cc;}
div.longdesc-link {
float:right;
color:#999;
}
/* Headings */
h1, h2, h3, h4, h5, h6 {
color:#4a5d75;
line-height:130%;
margin-top:0em;
font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif;
background-color:transparent;
}
h1 {
background-image:url(../images/community/title_hdr.png);
background-repeat:no-repeat;
border-top:1px dotted #CCCCCC;
line-height:1.2em;
color:#182737;
font-size:2em;
padding:1.5em;
}
h2 {font-size:1.6em;}
h3 {
font-size:1.3em;
padding-top:0em;
padding-bottom:0em;
}
h4 {
font-size:1.1em;
padding-top:0em;
padding-bottom:0em;
}
h5.formalpara {
font-size:1em;
margin-top:2em;
margin-bottom:.8em;
}
/* Element rules */
hr {
border-collapse:collapse;
border-style:none;
border-top:1px dotted #ccc;
width:100% !important;
}
sup {color:#999;}
/* Custom overrides */
tt, tt *, pre, pre *, code, code * {
font-size: 100% !important;
font-family: "Liberation Mono", "DejaVu Sans Mono", Consolas, Monaco, "Vera Sans Mono", "Lucida Console", "Courier New", monospace !important;
}
pre a:link * {color:#0066cc !important;}
pre a:visited * {color:#6699cc !important;}
.programlisting, .programlistingco pre {
line-height: 160%;
}
.programlisting img {
margin: 0;
padding: 0;
vertical-align: middle;
}
span.co {
position: relative;
left: 0;
top: 0;
margin: 0 0;
padding: 0 0;
height: 17px;
float: right;
}
span.co * {
margin: 0 0;
padding: 0 0;
}

View File

@ -1,88 +0,0 @@
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!-- Frequently used URLs -->
<!ENTITY Home "http://www.jboss.org/netty/">
<!ENTITY Downloads "&Home;downloads.html">
<!ENTITY Community "&Home;community.html">
<!ENTITY DocHome "http://docs.jboss.org/netty/3.2/">
<!ENTITY API "&DocHome;api/org/jboss/netty/">
<!ENTITY XRef "&DocHome;xref/org/jboss/netty/">
<!-- Types in the bootstrap package -->
<!ENTITY Bootstrap "<ulink url='&API;bootstrap/Bootstrap.html'><classname>Bootstrap</classname></ulink>">
<!ENTITY ClientBootstrap "<ulink url='&API;bootstrap/ClientBootstrap.html'><classname>ClientBootstrap</classname></ulink>">
<!ENTITY ServerBootstrap "<ulink url='&API;bootstrap/ServerBootstrap.html'><classname>ServerBootstrap</classname></ulink>">
<!-- Types in the buffer package -->
<!ENTITY ChannelBuffer "<ulink url='&API;buffer/ChannelBuffer.html'><interfacename>ChannelBuffer</interfacename></ulink>">
<!ENTITY ChannelBuffers "<ulink url='&API;buffer/ChannelBuffers.html'><classname>ChannelBuffers</classname></ulink>">
<!-- Types in the channel package -->
<!ENTITY Channel "<ulink url='&API;channel/Channel.html'><interfacename>Channel</interfacename></ulink>">
<!ENTITY ChannelDownstreamHandler "<ulink url='&API;channel/ChannelDownstreamHandler.html'><interfacename>ChannelDownstreamHandler</interfacename></ulink>">
<!ENTITY ChannelEvent "<ulink url='&API;channel/ChannelEvent.html'><interfacename>ChannelEvent</interfacename></ulink>">
<!ENTITY ChannelFactory "<ulink url='&API;channel/ChannelFactory.html'><interfacename>ChannelFactory</interfacename></ulink>">
<!ENTITY ChannelFuture "<ulink url='&API;channel/ChannelFuture.html'><interfacename>ChannelFuture</interfacename></ulink>">
<!ENTITY ChannelFutureListener "<ulink url='&API;channel/ChannelFutureListener.html'><interfacename>ChannelFutureListener</interfacename></ulink>">
<!ENTITY ChannelHandler "<ulink url='&API;channel/ChannelHandler.html'><interfacename>ChannelHandler</interfacename></ulink>">
<!ENTITY ChannelHandlerContext "<ulink url='&API;channel/ChannelHandlerContext.html'><interfacename>ChannelHandlerContext</interfacename></ulink>">
<!ENTITY ChannelPipeline "<ulink url='&API;channel/ChannelPipeline.html'><interfacename>ChannelPipeline</interfacename></ulink>">
<!ENTITY ChannelPipelineCoverage "<ulink url='&API;channel/ChannelPipelineCoverage.html'><interfacename>ChannelPipelineCoverage</interfacename></ulink>">
<!ENTITY ChannelPipelineFactory "<ulink url='&API;channel/ChannelPipelineFactory.html'><interfacename>ChannelPipelineFactory</interfacename></ulink>">
<!ENTITY Channels "<ulink url='&API;channel/Channels.html'><classname>Channels</classname></ulink>">
<!ENTITY ChannelStateEvent "<ulink url='&API;channel/ChannelStateEvent.html'><interfacename>ChannelStateEvent</interfacename></ulink>">
<!ENTITY ChannelUpstreamHandler "<ulink url='&API;channel/ChannelUpstreamHandler.html'><interfacename>ChannelUpstreamHandler</interfacename></ulink>">
<!ENTITY ExceptionEvent "<ulink url='&API;channel/ExceptionEvent.html'><interfacename>ExceptionEvent</interfacename></ulink>">
<!ENTITY MessageEvent "<ulink url='&API;channel/MessageEvent.html'><interfacename>MessageEvent</interfacename></ulink>">
<!ENTITY SimpleChannelHandler "<ulink url='&API;channel/SimpleChannelHandler.html'><classname>SimpleChannelHandler</classname></ulink>">
<!-- Types in the channel.group package -->
<!ENTITY ChannelGroup "<ulink url='&API;channel/group/ChannelGroup.html'><interfacename>ChannelGroup</interfacename></ulink>">
<!ENTITY ChannelGroupFuture "<ulink url='&API;channel/group/ChannelGroupFuture.html'><interfacename>ChannelGroupFuture</interfacename></ulink>">
<!ENTITY DefaultChannelGroup "<ulink url='&API;channel/group/DefaultChannelGroup.html'><classname>DefaultChannelGroup</classname></ulink>">
<!-- Types in the channel.socket package -->
<!ENTITY ServerSocketChannel "<ulink url='&API;channel/socket/ServerSocketChannel.html'><interfacename>ServerSocketChannel</interfacename></ulink>">
<!ENTITY SocketChannel "<ulink url='&API;channel/socket/SocketChannel.html'><interfacename>SocketChannel</interfacename></ulink>">
<!-- Types in the channel.socket.nio package -->
<!ENTITY NioClientSocketChannelFactory "<ulink url='&API;channel/socket/nio/NioClientSocketChannelFactory.html'><classname>NioClientSocketChannelFactory</classname></ulink>">
<!ENTITY NioServerSocketChannelFactory "<ulink url='&API;channel/socket/nio/NioServerSocketChannelFactory.html'><classname>NioServerSocketChannelFactory</classname></ulink>">
<!-- Types in the handler.codec.frame package -->
<!ENTITY FrameDecoder "<ulink url='&API;handler/codec/frame/FrameDecoder.html'><classname>FrameDecoder</classname></ulink>">
<!-- Types in the handler.codec.protobuf package -->
<!ENTITY ProtobufEncoder "<ulink url='&API;handler/codec/protobuf/ProtobufEncoder.html'><classname>ProtobufEncoder</classname></ulink>">
<!ENTITY ProtobufDecoder "<ulink url='&API;handler/codec/protobuf/ProtobufDecoder.html'><classname>ProtobufDecoder</classname></ulink>">
<!-- Types in the handler.codec.replay package -->
<!ENTITY ReplayingDecoder "<ulink url='&API;handler/codec/replay/ReplayingDecoder.html'><classname>ReplayingDecoder</classname></ulink>">
<!ENTITY VoidEnum "<ulink url='&API;handler/codec/replay/VoidEnum.html'><classname>VoidEnum</classname></ulink>">
<!-- Types in the handler.ssl package -->
<!ENTITY SslHandler "<ulink url='&API;handler/ssl/SslHandler.html'><classname>SslHandler</classname></ulink>">

View File

@ -1,63 +0,0 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "custom.dtd">
%CustomDTD;
]>
<book lang="en">
<bookinfo>
<title>The Netty Project 3.2 User Guide</title>
<subtitle>The Proven Approach to Rapid Network Application Development</subtitle>
<releaseinfo>
<!-- The version.txt file is generated by maven-antrun-plugin. -->
<xi:include href="../../../target/version.txt" parse="text"
xmlns:xi="http://www.w3.org/2001/XInclude"/>
</releaseinfo>
</bookinfo>
<toc/>
<xi:include href="module/preface.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="module/start.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="module/architecture.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<!-- The following chapters are not written yet. -->
<!--
<xi:include href="module/state-mgmt.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="module/codec.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="module/threading.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="module/security.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="module/transport.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="module/appendix.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
-->
</book>

View File

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE appendix PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<appendix id="appendix">
<title>Additional Resources</title>
<para>To be written...</para>
</appendix>

View File

@ -1,346 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<chapter id="architecture">
<title>Architectural Overview</title>
<mediaobject>
<imageobject>
<imagedata fileref="images/architecture.png" format="PNG" scale="50" scalefit="1" align="center" />
</imageobject>
<textobject>
<phrase>The Architecture Diagram of Netty</phrase>
</textobject>
</mediaobject>
<para>
In this chapter, we will examine what core functionalities are provided in
Netty and how they constitute a complete network application development
stack on top of the core. Please keep this diagram in mind as you read this
chapter.
</para>
<section>
<title>Rich Buffer Data Structure</title>
<para>
Netty uses its own buffer API instead of NIO <classname>ByteBuffer</classname>
to represent a sequence of bytes. This approach has significant advantages
over using <classname>ByteBuffer</classname>. Netty's new buffer type,
&ChannelBuffer; has been designed from the ground up to address the problems
of <classname>ByteBuffer</classname> and to meet the daily needs of
network application developers. To list a few cool features:
<itemizedlist>
<listitem>
<para>
You can define your own buffer type if necessary.
</para>
</listitem>
<listitem>
<para>
Transparent zero copy is achieved by a built-in composite buffer type.
</para>
</listitem>
<listitem>
<para>
A dynamic buffer type is provided out-of-the-box, whose capacity is
expanded on demand, just like <classname>StringBuffer</classname>.
</para>
</listitem>
<listitem>
<para>
There's no need to call <methodname>flip()</methodname> anymore.
</para>
</listitem>
<listitem>
<para>
It is often faster than <classname>ByteBuffer</classname>.
</para>
</listitem>
</itemizedlist>
</para>
<para>
For more information, please refer to the
<ulink url="&API;buffer/package-summary.html#package_description"><literal>org.jboss.netty.buffer</literal> package description</ulink>.
</para>
</section>
<section>
<title>Universal Asynchronous I/O API</title>
<para>
Traditional I/O APIs in Java provide different types and methods for
different transport types. For example,
<classname>java.net.Socket</classname> and
<classname>java.net.DatagramSocket</classname> do not have any common
super type and therefore they have very different ways to perform socket
I/O.
</para>
<para>
This mismatch makes porting a network application from one transport to
another tedious and difficult. The lack of portability between
transports becomes a problem when you need to support additional
transports, as this often entails rewriting the network layer of the
application. Logically, many protocols can run on more than one
transport such as TCP/IP, UDP/IP, SCTP, and serial port communication.
</para>
<para>
To make matters worse, Java's New I/O (NIO) API introduced
incompatibilities with the old blocking I/O (OIO) API and will continue
to do so in the next release, NIO.2 (AIO). Because all these APIs are
different from each other in design and performance characteristics, you
are often forced to determine which API your application will depend on
before you even begin the implementation phase.
</para>
<para>
For instance, you might want to start with OIO because the number of
clients you are going to serve will be very small and writing a socket
server using OIO is much easier than using NIO. However, you are going
to be in trouble when your business grows exponentially and your server
needs to serve tens of thousands of clients simultaneously. You could
start with NIO, but doing so may hinder rapid development by greatly
increasing development time due to the complexity of the NIO Selector
API.
</para>
<para>
Netty has a universal asynchronous I/O interface called a &Channel;, which
abstracts away all operations required for point-to-point communication.
That is, once you wrote your application on one Netty transport, your
application can run on other Netty transports. Netty provides a number
of essential transports via one universal API:
<itemizedlist>
<listitem>
<para>
NIO-based TCP/IP transport
(See <literal>org.jboss.netty.channel.socket.nio</literal>),
</para>
</listitem>
<listitem>
<para>
OIO-based TCP/IP transport
(See <literal>org.jboss.netty.channel.socket.oio</literal>),
</para>
</listitem>
<listitem>
<para>OIO-based UDP/IP transport, and</para>
</listitem>
<listitem>
<para>
Local transport (See <literal>org.jboss.netty.channel.local</literal>).
</para>
</listitem>
</itemizedlist>
Switching from one transport to another usually takes just a couple
lines of changes such as choosing a different &ChannelFactory;
implementation.
</para>
<para>
Also, you are even able to take advantage of new transports which aren't
yet written (such as serial port communication transport), again
by replacing just a couple lines of constructor calls. Moreover, you can
write your own transport by extending the core API.
</para>
</section>
<section>
<title>Event Model based on the Interceptor Chain Pattern</title>
<para>
A well-defined and extensible event model is a must for an event-driven
application. Netty has a well-defined event model focused on I/O. It
also allows you to implement your own event type without breaking the
existing code because each event type is distinguished from another by
a strict type hierarchy. This is another differentiator against other
frameworks. Many NIO frameworks have no or a very limited notion of an
event model. If they offer extension at all, they often break the
existing code when you try to add custom event types
</para>
<para>
A &ChannelEvent; is handled by a list of &ChannelHandler;s in a
&ChannelPipeline;. The pipeline implements an advanced form of the
<ulink url="http://java.sun.com/blueprints/corej2eepatterns/Patterns/InterceptingFilter.html">Intercepting Filter</ulink>
pattern to give a user full control over how an event is handled and how
the handlers in the pipeline interact with each other. For example,
you can define what to do when data is read from a socket:
</para>
<programlisting>public class MyReadHandler implements &SimpleChannelHandler; {
public void messageReceived(&ChannelHandlerContext; ctx, &MessageEvent; evt) {
Object message = evt.getMessage();
// Do something with the received message.
...
// And forward the event to the next handler.
ctx.sendUpstream(evt);
}
}</programlisting>
<para>
You can also define what to do when a handler receives a write request:
</para>
<programlisting>public class MyWriteHandler implements &SimpleChannelHandler; {
public void writeRequested(&ChannelHandlerContext; ctx, &MessageEvent; evt) {
Object message = evt.getMessage();
// Do something with the message to be written.
...
// And forward the event to the next handler.
ctx.sendDownstream(evt);
}
}</programlisting>
<para>
For more information on the event model, please refer to the
API documentation of &ChannelEvent; and &ChannelPipeline;.
</para>
</section>
<section>
<title>Advanced Components for More Rapid Development</title>
<para>
On top of the core components mentioned above, that already enable the
implementation of all types of network applications, Netty provides a set
of advanced features to accelerate the page of development even more.
</para>
<section>
<title>Codec framework</title>
<para>
As demonstrated in <xref linkend="start.pojo"/>, it is always a good
idea to separate a protocol codec from business logic. However, there
are some complications when implementing this idea from scratch. You
have to deal with the fragmentation of messages. Some protocols are
multi-layered (i.e. built on top of other lower level protocols). Some
are too complicated to be implemented in a single state machine.
</para>
<para>
Consequently, a good network application framework should provide an
extensible, reusable, unit-testable, and multi-layered codec framework
that generates maintainable user codecs.
</para>
<para>
Netty provides a number of basic and advanced codecs to address most
issues you will encounter when you write a protocol codec regardless
if it is simple or not, binary or text - simply whatever.
</para>
</section>
<section>
<title>SSL / TLS Support</title>
<para>
Unlike old blocking I/O, it is a non-trivial task to support SSL in NIO.
You can't simply wrap a stream to encrypt or decrypt data but you have
to use <classname>javax.net.ssl.SSLEngine</classname>.
<classname>SSLEngine</classname> is a state machine which is as complex
as SSL itself. You have to manage all possible states such as cipher
suite and encryption key negotiation (or re-negotiation), certificate
exchange, and validation. Moreover, <classname>SSLEngine</classname> is
not even completely thread-safe, as one would expect.
</para>
<para>
In Netty, &SslHandler; takes care of all the gory details and pitfalls
of <classname>SSLEngine</classname>. All you need to do is to configure
the &SslHandler; and insert it into your &ChannelPipeline;. It also
allows you to implement advanced features like
<ulink url="http://en.wikipedia.org/wiki/Starttls">StartTLS</ulink>
very easily.
</para>
</section>
<section>
<title>HTTP Implementation</title>
<para>
HTTP is definitely the most popular protocol in the Internet. There are
already a number of HTTP implementations such as a Servlet container.
Then why does Netty have HTTP on top of its core?
</para>
<para>
Netty's HTTP support is very different from the existing HTTP libraries.
It gives you complete control over how HTTP messages are exchanged at a
low level. Because it is basically the combination of an HTTP codec and
HTTP message classes, there is no restriction such as an enforced thread
model. That is, you can write your own HTTP client or server that works
exactly the way you want. You have full control over everything that's
in the HTTP specification, including the thread model, connection life
cycle, and chunked encoding.
</para>
<para>
Thanks to its highly customizable nature, you can write a very efficient
HTTP server such as:
<itemizedlist>
<listitem>
<para>
Chat server that requires persistent connections and server push
technology (e.g. <ulink url="http://en.wikipedia.org/wiki/Comet_%28programming%29">Comet</ulink>
and <ulink url="http://en.wikipedia.org/wiki/WebSockets">WebSockets</ulink>)
</para>
</listitem>
<listitem>
<para>
Media streaming server that needs to keep the connection open
until the whole media is streamed (e.g. 2 hours of video)
</para>
</listitem>
<listitem>
<para>
File server that allows the uploading of large files without
memory pressure (e.g. uploading 1GB per request)
</para>
</listitem>
<listitem>
<para>
Scalable mash-up client that connects to tens of thousands of 3rd
party web services asynchronously
</para>
</listitem>
</itemizedlist>
</para>
</section>
<section>
<title>Google Protocol Buffer Integration</title>
<para>
<ulink url="http://code.google.com/apis/protocolbuffers/docs/overview.html">Google Protocol Buffers</ulink>
are an ideal solution for the rapid implementation of a highly efficient
binary protocols that evolve over time. With &ProtobufEncoder; and
&ProtobufDecoder;, you can turn the message classes generated by the
Google Protocol Buffers Compiler (protoc) into Netty codec. Please take
a look into the
<ulink url="&XRef;example/localtime/package-summary.html">'LocalTime' example</ulink>
that shows how easily you can create a high-performing binary protocol
client and server from the
<ulink url="http://anonsvn.jboss.org/repos/netty/trunk/src/main/java/org/jboss/netty/example/localtime/LocalTimeProtocol.proto">sample protocol definition</ulink>.
</para>
</section>
</section>
<section>
<title>Summary</title>
<para>
In this chapter, we reviewed the overall architecture of Netty from the
feature standpoint. Netty has a simple, yet powerful architecture.
It is composed of three components - buffer, channel, and event model -
and all advanced features are built on top of the three core components.
Once you understood how these three work together, it should not be
difficult to understand the more advanced features which were covered
briefly in this chapter.
</para>
<para>
You might still have unanswered questions about what the overall
architecture looks like exactly and how each of the features work
together. If so, it is a good idea to
<ulink url="&Community;">talk to us</ulink> to improve this guide.
</para>
</section>
</chapter>

View File

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<chapter id="codec">
<title>Encoders and Decoders</title>
<para>To be written...</para>
</chapter>

View File

@ -1,87 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE preface PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<preface id="preface">
<title>Preface</title>
<section>
<title>The Problem</title>
<para>
Nowadays we use general purpose applications or libraries to communicate
with each other. For example, we often use an HTTP client library to
retrieve information from a web server and to invoke a remote procedure
call via web services.
</para>
<para>
However, a general purpose protocol or its implementation sometimes
does not scale very well. It is like we don't use a general purpose
HTTP server to exchange huge files, e-mail messages, and near-realtime
messages such as financial information and multiplayer game data.
What's required is a highly optimized protocol implementation which is
dedicated to a special purpose. For example, you might want to
implement an HTTP server which is optimized for AJAX-based chat
application, media streaming, or large file transfer. You could even
want to design and implement a whole new protocol which is precisely
tailored to your need.
</para>
<para>
Another inevitable case is when you have to deal with a legacy
proprietary protocol to ensure the interoperability with an old system.
What matters in this case is how quickly we can implement that protocol
while not sacrificing the stability and performance of the resulting
application.
</para>
</section>
<section>
<title>The Solution</title>
<para>
<firstterm><ulink url="&Home;">The Netty project</ulink></firstterm> is
an effort to provide an asynchronous event-driven network application
framework and tooling for the rapid development of maintainable
high-performance &middot; high-scalability protocol servers and clients.
</para>
<para>
In other words, Netty is a NIO client server framework which enables
quick and easy development of network applications such as protocol
servers and clients. It greatly simplifies and streamlines network
programming such as TCP and UDP socket server development.
</para>
<para>
'Quick and easy' does not mean that a resulting application will suffer
from a maintainability or a performance issue. Netty has been designed
carefully with the experiences earned from the implementation of a lot
of protocols such as FTP, SMTP, HTTP, and various binary and text-based
legacy protocols. As a result, Netty has succeeded to find a way to
achieve ease of development, performance, stability, and flexibility
without a compromise.
</para>
<para>
Some users might already have found other network application
framework that claims to have the same advantage, and you might want
to ask what makes Netty so different from them. The answer is the
philosophy where it is built on. Netty is designed to give you the most
comfortable experience both in terms of the API and the implementation
from the day one. It is not something tangible but you will realize that
this philosophy will make your life much easier as you read this guide
and play with Netty.
</para>
</section>
</preface>

View File

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<chapter id="security">
<title>Securing the Wire</title>
<para>To be written...</para>
</chapter>

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<chapter id="state-mgmt">
<title>State Management</title>
<para>To be written...</para>
</chapter>

View File

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<chapter id="chapter-id">
<title>Chapter title</title>
<para>To be written...</para>
</chapter>

View File

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<chapter id="threading">
<title>Thread Management</title>
<para>To be written...</para>
</chapter>

View File

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd" [
<!ENTITY % CustomDTD SYSTEM "../custom.dtd">
%CustomDTD;
]>
<chapter id="transport">
<title>Transports</title>
<para>To be written...</para>
</chapter>

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

View File

@ -1,25 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:import href="classpath:/xslt/org/jboss/eclipse.xsl" />
<xsl:param name="siteHref" select="'http://www.jboss.org/netty/'"/>
<xsl:param name="docHref" select="'http://www.jboss.org/netty/documentation.html'"/>
<xsl:param name="siteLinkText" select="'JBoss.org: Netty - The Client Server Framework and Tools'"/>
<xsl:param name="callout.defaultcolumn">1</xsl:param>
</xsl:stylesheet>

View File

@ -1,153 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:fo="http://www.w3.org/1999/XSL/Format"
version="1.0">
<xsl:import href="classpath:/xslt/org/jboss/pdf.xsl" />
<xsl:import href="classpath:/xslt/org/jboss/xslt/fonts/pdf/fonts.xsl" />
<!-- Override the default font settings -->
<xsl:template name="pickfont-serif">
<xsl:variable name="font">
<xsl:call-template name="pickfont"/>
</xsl:variable>
<xsl:copy-of select="$font"/>
<xsl:text>Liberation Serif,serif</xsl:text>
</xsl:template>
<xsl:param name="title.font.family">
<xsl:variable name="font">
<xsl:call-template name="pickfont-serif"/>
</xsl:variable>
<xsl:message>
<xsl:text>Setting 'title.font.family' param=</xsl:text><xsl:copy-of select="$font"/>
</xsl:message>
<xsl:copy-of select="$font"/>
</xsl:param>
<xsl:param name="body.font.family">
<xsl:variable name="font">
<xsl:call-template name="pickfont-serif"/>
</xsl:variable>
<xsl:message>
<xsl:text>Setting 'body.font.family' param=</xsl:text><xsl:copy-of select="$font"/>
</xsl:message>
<xsl:copy-of select="$font"/>
</xsl:param>
<xsl:param name="monospace.font.family">
<xsl:variable name="font">
<xsl:call-template name="pickfont-mono"/>
</xsl:variable>
<xsl:message>
<xsl:text>Setting 'monospace.font.family' param=</xsl:text><xsl:copy-of select="$font"/>
</xsl:message>
<xsl:copy-of select="$font"/>
</xsl:param>
<xsl:param name="sans.font.family">
<xsl:variable name="font">
<xsl:call-template name="pickfont-sans"/>
</xsl:variable>
<xsl:message>
<xsl:text>Setting 'sans.font.family' param=</xsl:text><xsl:copy-of select="$font"/>
</xsl:message>
<xsl:copy-of select="$font"/>
</xsl:param>
<xsl:param name="programlisting.font">
<xsl:variable name="font">
<xsl:call-template name="pickfont-mono"/>
</xsl:variable>
<xsl:message>
<xsl:text>Setting 'programlisting.font' param=</xsl:text><xsl:copy-of select="$font"/>
</xsl:message>
<xsl:copy-of select="$font"/>
</xsl:param>
<xsl:param name="programlisting.font.size" select="'85%'" />
<!-- Remove the blank pages between the chapters -->
<xsl:param name="double.sided" select="0" />
<!-- Use SVG for callout images instead of PNG -->
<xsl:param name="callout.graphics" select="1" />
<xsl:param name="callout.graphics.extension" select="'.svg'" />
<!-- Hide URL -->
<xsl:param name="ulink.show" select="0"/>
<!-- Don't use italic font for links -->
<xsl:attribute-set name="xref.properties">
<xsl:attribute name="font-style">normal</xsl:attribute>
</xsl:attribute-set>
<!-- Decrease the link font size in the program listing -->
<xsl:attribute-set name="monospace.properties">
<xsl:attribute name="font-size">1em</xsl:attribute>
<xsl:attribute name="font-family">
<xsl:value-of select="$monospace.font.family"/>
</xsl:attribute>
</xsl:attribute-set>
<!-- Add some spacing between callout listing items -->
<xsl:template match="callout">
<xsl:variable name="id"><xsl:call-template name="object.id"/></xsl:variable>
<fo:list-item id="{$id}" space-before="1em">
<fo:list-item-label end-indent="label-end()">
<fo:block>
<xsl:call-template name="callout.arearefs">
<xsl:with-param name="arearefs" select="@arearefs"/>
</xsl:call-template>
</fo:block>
</fo:list-item-label>
<fo:list-item-body start-indent="body-start()">
<fo:block padding-top="0.2em">
<xsl:apply-templates/>
</fo:block>
</fo:list-item-body>
</fo:list-item>
</xsl:template>
<!-- Slight baseline-shift for callouts in the program listing -->
<xsl:template name="callout-bug">
<xsl:param name="conum" select='1'/>
<xsl:choose>
<xsl:when test="$conum &lt;= $callout.graphics.number.limit">
<xsl:variable name="filename"
select="concat($callout.graphics.path, $conum,
$callout.graphics.extension)"/>
<fo:external-graphic content-width="{$callout.icon.size}"
width="{$callout.icon.size}"
padding="0.0em" margin="0.0em"
baseline-shift="-0.375em">
<xsl:attribute name="src">
<xsl:choose>
<xsl:when test="$passivetex.extensions != 0
or $fop.extensions != 0
or $arbortext.extensions != 0">
<xsl:value-of select="$filename"/>
</xsl:when>
<xsl:otherwise>
<xsl:text>url(</xsl:text>
<xsl:value-of select="$filename"/>
<xsl:text>)</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:attribute>
</fo:external-graphic>
</xsl:when>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>

View File

@ -1,25 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:import href="classpath:/xslt/org/jboss/xhtml-single.xsl" />
<xsl:param name="siteHref" select="'http://www.jboss.org/netty/'"/>
<xsl:param name="docHref" select="'http://www.jboss.org/netty/documentation.html'"/>
<xsl:param name="siteLinkText" select="'JBoss.org: Netty - The Client Server Framework and Tools'"/>
<xsl:param name="callout.defaultcolumn">1</xsl:param>
</xsl:stylesheet>

View File

@ -1,25 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:import href="classpath:/xslt/org/jboss/xhtml.xsl" />
<xsl:param name="siteHref" select="'http://www.jboss.org/netty/'"/>
<xsl:param name="docHref" select="'http://www.jboss.org/netty/documentation.html'"/>
<xsl:param name="siteLinkText" select="'JBoss.org: Netty - The Client Server Framework and Tools'"/>
<xsl:param name="callout.defaultcolumn">1</xsl:param>
</xsl:stylesheet>

View File

@ -57,6 +57,7 @@ public abstract class AbstractChannel implements Channel {
/** Cache for the string representation of this channel */
private boolean strValConnected;
private String strVal;
private volatile Object attachment;
/**
* Creates a new instance.
@ -272,6 +273,16 @@ public abstract class AbstractChannel implements Channel {
return Channels.write(this, message, remoteAddress);
}
@Override
public Object getAttachment() {
return attachment;
}
@Override
public void setAttachment(Object attachment) {
this.attachment = attachment;
}
/**
* Returns the {@link String} representation of this channel. The returned
* string contains the {@linkplain #getId() ID}, {@linkplain #getLocalAddress() local address},

View File

@ -362,4 +362,19 @@ public interface Channel extends Comparable<Channel> {
* {@code interestOps} change request succeeds or fails
*/
ChannelFuture setReadable(boolean readable);
/**
* Retrieves an object which is {@link #setAttachment(Object) attached} to
* this {@link Channel}.
*
* @return {@code null} if no object was attached or
* {@code null} was attached
*/
Object getAttachment();
/**
* Attaches an object to this {@link Channel} to store a stateful information
*
*/
void setAttachment(Object attachment);
}

View File

@ -32,8 +32,12 @@ import org.jboss.netty.util.internal.ConcurrentIdentityWeakKeyHashMap;
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @deprecated Use {@link Channel#setAttachment(Object)} and {@link Channel#getAttachment()}
*
* @apiviz.stereotype utility
*
*/
@Deprecated
public class ChannelLocal<T> {
private final ConcurrentMap<Channel, T> map =

View File

@ -32,7 +32,7 @@ import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.DefaultChannelConfig;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
import org.jboss.netty.util.internal.ThreadLocalBoolean;
/**
@ -52,7 +52,7 @@ final class DefaultLocalChannel extends AbstractChannel implements LocalChannel
private final ChannelConfig config;
private final ThreadLocalBoolean delivering = new ThreadLocalBoolean();
final Queue<MessageEvent> writeBuffer = new LinkedTransferQueue<MessageEvent>();
final Queue<MessageEvent> writeBuffer = QueueFactory.createQueue(MessageEvent.class);
volatile DefaultLocalChannel pairedChannel;
volatile LocalAddress localAddress;

View File

@ -0,0 +1,158 @@
/*
* Copyright 2011 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.nio;
import java.util.Collection;
import java.util.Iterator;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.util.internal.QueueFactory;
/**
*
*
*
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://www.murkycloud.com/">Norman Maurer</a>
*
*/
abstract class AbstractWriteRequestQueue implements BlockingQueue<MessageEvent>{
protected final BlockingQueue<MessageEvent> queue;
public AbstractWriteRequestQueue() {
this.queue = QueueFactory.createQueue(MessageEvent.class);
}
@Override
public MessageEvent remove() {
return queue.remove();
}
@Override
public MessageEvent element() {
return queue.element();
}
@Override
public MessageEvent peek() {
return queue.peek();
}
@Override
public int size() {
return queue.size();
}
@Override
public boolean isEmpty() {
return queue.isEmpty();
}
@Override
public Iterator<MessageEvent> iterator() {
return queue.iterator();
}
@Override
public Object[] toArray() {
return queue.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
return queue.toArray(a);
}
@Override
public boolean containsAll(Collection<?> c) {
return queue.containsAll(c);
}
@Override
public boolean addAll(Collection<? extends MessageEvent> c) {
return queue.addAll(c);
}
@Override
public boolean removeAll(Collection<?> c) {
return queue.removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return queue.retainAll(c);
}
@Override
public void clear() {
queue.clear();
}
@Override
public boolean add(MessageEvent e) {
return queue.add(e);
}
@Override
public void put(MessageEvent e) throws InterruptedException {
queue.put(e);
}
@Override
public boolean offer(MessageEvent e, long timeout, TimeUnit unit) throws InterruptedException {
return queue.offer(e, timeout, unit);
}
@Override
public MessageEvent take() throws InterruptedException {
return queue.take();
}
@Override
public MessageEvent poll(long timeout, TimeUnit unit) throws InterruptedException {
return queue.poll(timeout, unit);
}
@Override
public int remainingCapacity() {
return queue.remainingCapacity();
}
@Override
public boolean remove(Object o) {
return queue.remove(o);
}
@Override
public boolean contains(Object o) {
return queue.contains(o);
}
@Override
public int drainTo(Collection<? super MessageEvent> c) {
return queue.drainTo(c);
}
@Override
public int drainTo(Collection<? super MessageEvent> c, int maxElements) {
return queue.drainTo(c, maxElements);
}
}

View File

@ -43,7 +43,7 @@ import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.DeadLockProofWorker;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
*
@ -183,7 +183,7 @@ class NioClientSocketPipelineSink extends AbstractChannelSink {
private boolean started;
private final AtomicBoolean wakenUp = new AtomicBoolean();
private final Object startStopLock = new Object();
private final Queue<Runnable> registerTaskQueue = new LinkedTransferQueue<Runnable>();
private final Queue<Runnable> registerTaskQueue = QueueFactory.createQueue(Runnable.class);
Boss() {
}

View File

@ -38,7 +38,7 @@ import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.socket.DatagramChannelConfig;
import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.LegacyLinkedTransferQueue;
import org.jboss.netty.util.internal.ThreadLocalBoolean;
/**
@ -247,26 +247,22 @@ class NioDatagramChannel extends AbstractChannel
}
/**
* {@link WriteRequestQueue} is an extension of {@link LinkedTransferQueue}
* {@link WriteRequestQueue} is an extension of {@link AbstractWriteRequestQueue}
* that adds support for highWaterMark checking of the write buffer size.
*/
private final class WriteRequestQueue extends
LinkedTransferQueue<MessageEvent> {
private static final long serialVersionUID = 5057413071460766376L;
AbstractWriteRequestQueue {
private final ThreadLocalBoolean notifying = new ThreadLocalBoolean();
WriteRequestQueue() {
}
/**
* This method first delegates to {@link LinkedTransferQueue#offer(Object)} and
* This method first delegates to {@link LegacyLinkedTransferQueue#offer(Object)} and
* adds support for keeping track of the size of the this write buffer.
*/
@Override
public boolean offer(MessageEvent e) {
boolean success = super.offer(e);
boolean success = queue.offer(e);
assert success;
int messageSize = getMessageSize(e);
@ -287,12 +283,12 @@ class NioDatagramChannel extends AbstractChannel
}
/**
* This method first delegates to {@link LinkedTransferQueue#poll()} and
* This method first delegates to {@link LegacyLinkedTransferQueue#poll()} and
* adds support for keeping track of the size of the this writebuffers queue.
*/
@Override
public MessageEvent poll() {
MessageEvent e = super.poll();
MessageEvent e = queue.poll();
if (e != null) {
int messageSize = getMessageSize(e);
int newWriteBufferSize = writeBufferSize.addAndGet(-messageSize);

View File

@ -45,7 +45,7 @@ import org.jboss.netty.channel.ReceiveBufferSizePredictor;
import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
* A class responsible for registering channels with {@link Selector}.
@ -105,12 +105,12 @@ class NioDatagramWorker implements Runnable {
/**
* Queue of {@link ChannelRegistionTask}s
*/
private final Queue<Runnable> registerTaskQueue = new LinkedTransferQueue<Runnable>();
private final Queue<Runnable> registerTaskQueue = QueueFactory.createQueue(Runnable.class);
/**
* Queue of WriteTasks
*/
private final Queue<Runnable> writeTaskQueue = new LinkedTransferQueue<Runnable>();
private final Queue<Runnable> writeTaskQueue = QueueFactory.createQueue(Runnable.class);
private volatile int cancelledKeys; // should use AtomicInteger but we just need approximation

View File

@ -33,7 +33,6 @@ import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.ThreadLocalBoolean;
/**
@ -196,9 +195,7 @@ class NioSocketChannel extends AbstractChannel
}
}
private final class WriteRequestQueue extends LinkedTransferQueue<MessageEvent> {
private static final long serialVersionUID = -246694024103520626L;
private final class WriteRequestQueue extends AbstractWriteRequestQueue {
private final ThreadLocalBoolean notifying = new ThreadLocalBoolean();
@ -207,7 +204,7 @@ class NioSocketChannel extends AbstractChannel
@Override
public boolean offer(MessageEvent e) {
boolean success = super.offer(e);
boolean success = queue.offer(e);
assert success;
int messageSize = getMessageSize(e);
@ -229,7 +226,7 @@ class NioSocketChannel extends AbstractChannel
@Override
public MessageEvent poll() {
MessageEvent e = super.poll();
MessageEvent e = queue.poll();
if (e != null) {
int messageSize = getMessageSize(e);
int newWriteBufferSize = writeBufferSize.addAndGet(-messageSize);

View File

@ -47,7 +47,7 @@ import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.DeadLockProofWorker;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
*
@ -70,8 +70,8 @@ class NioWorker implements Runnable {
private final AtomicBoolean wakenUp = new AtomicBoolean();
private final ReadWriteLock selectorGuard = new ReentrantReadWriteLock();
private final Object startStopLock = new Object();
private final Queue<Runnable> registerTaskQueue = new LinkedTransferQueue<Runnable>();
private final Queue<Runnable> writeTaskQueue = new LinkedTransferQueue<Runnable>();
private final Queue<Runnable> registerTaskQueue = QueueFactory.createQueue(Runnable.class);
private final Queue<Runnable> writeTaskQueue = QueueFactory.createQueue(Runnable.class);
private volatile int cancelledKeys; // should use AtomicInteger but we just need approximation
private final SocketReceiveBufferPool recvBufferPool = new SocketReceiveBufferPool();

View File

@ -23,7 +23,7 @@ import org.jboss.netty.channel.ChannelDownstreamHandler;
import org.jboss.netty.channel.ChannelEvent;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelUpstreamHandler;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
* A combination of {@link HttpRequestEncoder} and {@link HttpResponseDecoder}
@ -46,7 +46,7 @@ public class HttpClientCodec implements ChannelUpstreamHandler,
ChannelDownstreamHandler {
/** A queue that is used for correlating a request and a response. */
final Queue<HttpMethod> queue = new LinkedTransferQueue<HttpMethod>();
final Queue<HttpMethod> queue = QueueFactory.createQueue(HttpMethod.class);
/** If true, decoding stops (i.e. pass-through) */
volatile boolean done;

View File

@ -24,7 +24,7 @@ import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
import org.jboss.netty.handler.codec.embedder.EncoderEmbedder;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
* Encodes the content of the outbound {@link HttpResponse} and {@link HttpChunk}.
@ -53,7 +53,7 @@ import org.jboss.netty.util.internal.LinkedTransferQueue;
*/
public abstract class HttpContentEncoder extends SimpleChannelHandler {
private final Queue<String> acceptEncodingQueue = new LinkedTransferQueue<String>();
private final Queue<String> acceptEncodingQueue = QueueFactory.createQueue(String.class);
private volatile EncoderEmbedder<ChannelBuffer> encoder;
/**

View File

@ -37,6 +37,7 @@ import org.jboss.netty.channel.WriteCompletionEvent;
import org.jboss.netty.util.DefaultObjectSizeEstimator;
import org.jboss.netty.util.ObjectSizeEstimator;
import org.jboss.netty.util.internal.ConcurrentIdentityHashMap;
import org.jboss.netty.util.internal.QueueFactory;
import org.jboss.netty.util.internal.SharedResourceMisuseDetector;
/**
@ -212,7 +213,7 @@ public class MemoryAwareThreadPoolExecutor extends ThreadPoolExecutor {
ThreadFactory threadFactory) {
super(corePoolSize, corePoolSize, keepAliveTime, unit,
new LinkedTransferQueue<Runnable>(), threadFactory, new NewThreadRunsPolicy());
QueueFactory.createQueue(Runnable.class), threadFactory, new NewThreadRunsPolicy());
if (objectSizeEstimator == null) {
throw new NullPointerException("objectSizeEstimator");

View File

@ -31,7 +31,7 @@ import org.jboss.netty.channel.ChannelState;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.util.ObjectSizeEstimator;
import org.jboss.netty.util.internal.ConcurrentIdentityWeakKeyHashMap;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
* A {@link MemoryAwareThreadPoolExecutor} which makes sure the events from the
@ -284,7 +284,7 @@ public class OrderedMemoryAwareThreadPoolExecutor extends
}
private final class ChildExecutor implements Executor, Runnable {
private final Queue<Runnable> tasks = new LinkedTransferQueue<Runnable>();
private final Queue<Runnable> tasks = QueueFactory.createQueue(Runnable.class);
private final AtomicBoolean isRunning = new AtomicBoolean(false);
ChildExecutor() {

View File

@ -29,7 +29,7 @@ import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.util.internal.DeadLockProofWorker;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
* Emulates blocking read operation. This handler stores all received messages
@ -84,7 +84,7 @@ public class BlockingReadHandler<E> extends SimpleChannelUpstreamHandler {
* implementation.
*/
public BlockingReadHandler() {
this(new LinkedTransferQueue<ChannelEvent>());
this(QueueFactory.createQueue(ChannelEvent.class));
}
/**

View File

@ -33,7 +33,7 @@ import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
import org.jboss.netty.channel.socket.nio.NioSocketChannelConfig;
import org.jboss.netty.util.HashedWheelTimer;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
* Emulates buffered write operation. This handler stores all write requests
@ -193,7 +193,7 @@ public class BufferedWriteHandler extends SimpleChannelHandler {
* into a single write request on {@link #flush()}
*/
public BufferedWriteHandler(boolean consolidateOnFlush) {
this(new LinkedTransferQueue<MessageEvent>(), consolidateOnFlush);
this(QueueFactory.createQueue(MessageEvent.class), consolidateOnFlush);
}
/**

View File

@ -50,8 +50,8 @@ import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.handler.codec.frame.FrameDecoder;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.NonReentrantLock;
import org.jboss.netty.util.internal.QueueFactory;
/**
* Adds <a href="http://en.wikipedia.org/wiki/Transport_Layer_Security">SSL
@ -196,7 +196,7 @@ public class SslHandler extends FrameDecoder
int ignoreClosedChannelException;
final Object ignoreClosedChannelExceptionLock = new Object();
private final Queue<PendingWrite> pendingUnencryptedWrites = new LinkedList<PendingWrite>();
private final Queue<MessageEvent> pendingEncryptedWrites = new LinkedTransferQueue<MessageEvent>();
private final Queue<MessageEvent> pendingEncryptedWrites = QueueFactory.createQueue(MessageEvent.class);
private final NonReentrantLock pendingEncryptedWritesLock = new NonReentrantLock();
private volatile boolean issueHandshake = false;

View File

@ -35,7 +35,7 @@ import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.QueueFactory;
/**
* A {@link ChannelHandler} that adds support for writing a large data stream
@ -79,8 +79,7 @@ public class ChunkedWriteHandler implements ChannelUpstreamHandler, ChannelDowns
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(ChunkedWriteHandler.class);
private final Queue<MessageEvent> queue =
new LinkedTransferQueue<MessageEvent>();
private final Queue<MessageEvent> queue = QueueFactory.createQueue(MessageEvent.class);
private ChannelHandlerContext ctx;
private MessageEvent currentEvent;

View File

@ -0,0 +1,51 @@
/*
* Copyright 2011 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.util;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Utility which checks if {@value #UNSAFE} class can be found in the classpath
*
*
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://www.murkycloud.com/">Norman Maurer</a>
*
*/
public class UnsafeDetectUtil {
private static final String UNSAFE = "sun.misc.Unsafe";
private static final boolean UNSAFE_FOUND = isUnsafeFound(AtomicInteger.class.getClassLoader());
public static boolean isUnsafeFound(ClassLoader loader) {
try {
Class.forName(UNSAFE, true, loader);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
public static boolean isUnsafeFound() {
return UNSAFE_FOUND;
}
private UnsafeDetectUtil() {
// only static method supported
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,38 +1,32 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/licenses/publicdomain
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package org.jboss.netty.util.internal;
import java.util.AbstractQueue;
import java.util.Collection;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.concurrent.locks.LockSupport;
/**
* This class is a copied from <a href="http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166y/LinkedTransferQueue.java"> URL revision 1.91 </a>
* <br>
* The only difference is that it replace {@link BlockingQueue} and any reference to the TransferQueue interface was removed
* <br>
*
* <strong>
* Please use {@link QueueFactory} to create a Queue as it will use the "optimal" implementation depending on the JVM
* </strong>
* <br>
* <br>
*
* An unbounded {@link BlockingQueue} based on linked nodes.
* This queue orders elements FIFO (first-in-first-out) with respect
* to any given producer. The <em>head</em> of the queue is that
@ -40,10 +34,17 @@ import java.util.concurrent.locks.LockSupport;
* producer. The <em>tail</em> of the queue is that element that has
* been on the queue the shortest time for some producer.
*
* <p>Beware that, unlike in most collections, the {@code size}
* method is <em>NOT</em> a constant-time operation. Because of the
* <p>Beware that, unlike in most collections, the {@code size} method
* is <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these queues, determining the current number
* of elements requires a traversal of the elements.
* of elements requires a traversal of the elements, and so may report
* inaccurate results if this collection is modified during traversal.
* Additionally, the bulk operations {@code addAll},
* {@code removeAll}, {@code retainAll}, {@code containsAll},
* {@code equals}, and {@code toArray} are <em>not</em> guaranteed
* to be performed atomically. For example, an iterator operating
* concurrently with an {@code addAll} operation might view only some
* of the added elements.
*
* <p>This class and its iterator implement all of the
* <em>optional</em> methods of the {@link Collection} and {@link
@ -60,10 +61,8 @@ import java.util.concurrent.locks.LockSupport;
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
* Java Collections Framework</a>.
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @since 1.7
* @author Doug Lea
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @param <E> the type of elements held in this collection
*/
public class LinkedTransferQueue<E> extends AbstractQueue<E>
@ -314,8 +313,8 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* of less-contended queues. During spins threads check their
* interrupt status and generate a thread-local random number
* to decide to occasionally perform a Thread.yield. While
* yield has underdefined specs, we assume that might it help,
* and will not hurt in limiting impact of spinning on busy
* yield has underdefined specs, we assume that it might help,
* and will not hurt, in limiting impact of spinning on busy
* systems. We also use smaller (1/2) spins for nodes that are
* not known to be front but whose predecessors have not
* blocked -- these "chained" spins avoid artifacts of
@ -436,35 +435,13 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
volatile Thread waiter; // null until waiting
// CAS methods for fields
boolean casNext(Node cmp, Node val) {
if (AtomicFieldUpdaterUtil.isAvailable()) {
return nextUpdater.compareAndSet(this, cmp, val);
} else {
synchronized (this) {
if (next == cmp) {
next = val;
return true;
} else {
return false;
}
}
}
final boolean casNext(Node cmp, Node val) {
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
}
boolean casItem(Object cmp, Object val) {
final boolean casItem(Object cmp, Object val) {
// assert cmp == null || cmp.getClass() != Node.class;
if (AtomicFieldUpdaterUtil.isAvailable()) {
return itemUpdater.compareAndSet(this, cmp, val);
} else {
synchronized (this) {
if (item == cmp) {
item = val;
return true;
} else {
return false;
}
}
}
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
}
/**
@ -472,7 +449,7 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* only be seen after publication via casNext.
*/
Node(Object item, boolean isData) {
this.item = item;
UNSAFE.putObject(this, itemOffset, item); // relaxed write
this.isData = isData;
}
@ -480,37 +457,37 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* Links node to itself to avoid garbage retention. Called
* only after CASing head field, so uses relaxed write.
*/
void forgetNext() {
this.next = this;
final void forgetNext() {
UNSAFE.putObject(this, nextOffset, this);
}
/**
* Sets item to self and waiter to null, to avoid garbage
* retention after matching or cancelling. Uses relaxed writes
* bacause order is already constrained in the only calling
* because order is already constrained in the only calling
* contexts: item is forgotten only after volatile/atomic
* mechanics that extract items. Similarly, clearing waiter
* follows either CAS or return from park (if ever parked;
* else we don't care).
*/
void forgetContents() {
this.item = this;
this.waiter = null;
final void forgetContents() {
UNSAFE.putObject(this, itemOffset, this);
UNSAFE.putObject(this, waiterOffset, null);
}
/**
* Returns true if this node has been matched, including the
* case of artificial matches due to cancellation.
*/
boolean isMatched() {
final boolean isMatched() {
Object x = item;
return x == this || x == null == isData;
return (x == this) || ((x == null) == isData);
}
/**
* Returns true if this is an unmatched request node.
*/
boolean isUnmatchedRequest() {
final boolean isUnmatchedRequest() {
return !isData && item == null;
}
@ -519,16 +496,16 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* appended to this node because this node is unmatched and
* has opposite data mode.
*/
boolean cannotPrecede(boolean haveData) {
final boolean cannotPrecede(boolean haveData) {
boolean d = isData;
Object x;
return d != haveData && (x = item) != this && x != null == d;
return d != haveData && (x = item) != this && (x != null) == d;
}
/**
* Tries to artificially match a data node -- used by remove.
*/
boolean tryMatchData() {
final boolean tryMatchData() {
// assert isData;
Object x = item;
if (x != null && x != this && casItem(x, null)) {
@ -538,66 +515,49 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
return false;
}
private static final AtomicReferenceFieldUpdater<Node, Node> nextUpdater =
AtomicFieldUpdaterUtil.newRefUpdater(Node.class, Node.class, "next");
private static final AtomicReferenceFieldUpdater<Node, Object> itemUpdater =
AtomicFieldUpdaterUtil.newRefUpdater(Node.class, Object.class, "item");
private static final long serialVersionUID = -3375979862319811754L;
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long itemOffset;
private static final long nextOffset;
private static final long waiterOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = Node.class;
itemOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("item"));
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
waiterOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("waiter"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/** head of the queue; null until first enqueue */
transient volatile Node head;
/** tail of the queue; null until first append */
transient volatile Node tail;
private transient volatile Node tail;
/** The number of apparent failures to unsplice removed nodes */
transient volatile int sweepVotes;
private transient volatile int sweepVotes;
// CAS methods for fields
private boolean casTail(Node cmp, Node val) {
if (AtomicFieldUpdaterUtil.isAvailable()) {
return tailUpdater.compareAndSet(this, cmp, val);
} else {
synchronized (this) {
if (tail == cmp) {
tail = val;
return true;
} else {
return false;
}
}
}
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
}
private boolean casHead(Node cmp, Node val) {
if (AtomicFieldUpdaterUtil.isAvailable()) {
return headUpdater.compareAndSet(this, cmp, val);
} else {
synchronized (this) {
if (head == cmp) {
head = val;
return true;
} else {
return false;
}
}
}
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
}
private boolean casSweepVotes(int cmp, int val) {
if (AtomicFieldUpdaterUtil.isAvailable()) {
return sweepVotesUpdater.compareAndSet(this, cmp, val);
} else {
synchronized (this) {
if (sweepVotes == cmp) {
sweepVotes = val;
return true;
} else {
return false;
}
}
}
return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
}
/*
@ -625,20 +585,19 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* @throws NullPointerException if haveData mode but e is null
*/
private E xfer(E e, boolean haveData, int how, long nanos) {
if (haveData && e == null) {
if (haveData && (e == null))
throw new NullPointerException();
}
Node s = null; // the node to append, if needed
retry: for (;;) { // restart on append race
retry:
for (;;) { // restart on append race
for (Node h = head, p = h; p != null;) { // find & match first node
boolean isData = p.isData;
Object item = p.item;
if (item != p && item != null == isData) { // unmatched
if (isData == haveData) { // can't match
if (item != p && (item != null) == isData) { // unmatched
if (isData == haveData) // can't match
break;
}
if (p.casItem(item, e)) { // match
for (Node q = p; q != h;) {
Node n = q.next; // update by 2 unless singleton
@ -647,30 +606,26 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
break;
} // advance and retry
if ((h = head) == null ||
(q = h.next) == null || !q.isMatched()) {
(q = h.next) == null || !q.isMatched())
break; // unless slack < 2
}
}
LockSupport.unpark(p.waiter);
return LinkedTransferQueue.cast(item);
return LinkedTransferQueue.<E>cast(item);
}
}
Node n = p.next;
p = p != n ? n : (h = head); // Use head if p offlist
p = (p != n) ? n : (h = head); // Use head if p offlist
}
if (how != NOW) { // No matches available
if (s == null) {
if (s == null)
s = new Node(e, haveData);
}
Node pred = tryAppend(s, haveData);
if (pred == null) {
if (pred == null)
continue retry; // lost race vs opposite mode
}
if (how != ASYNC) {
if (how != ASYNC)
return awaitMatch(s, pred, e, (how == TIMED), nanos);
}
}
return e; // not waiting
}
}
@ -688,25 +643,22 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
for (Node t = tail, p = t;;) { // move p to last node and append
Node n, u; // temps for reads of next & tail
if (p == null && (p = head) == null) {
if (casHead(null, s)) {
if (casHead(null, s))
return s; // initialize
}
}
else if (p.cannotPrecede(haveData)) {
else if (p.cannotPrecede(haveData))
return null; // lost race vs opposite mode
} else if ((n = p.next) != null) { // not last; keep traversing
else if ((n = p.next) != null) // not last; keep traversing
p = p != t && t != (u = tail) ? (t = u) : // stale tail
p != n ? n : null; // restart if off list
} else if (!p.casNext(null, s)) {
(p != n) ? n : null; // restart if off list
else if (!p.casNext(null, s))
p = p.next; // re-read on CAS failure
} else {
else {
if (p != t) { // update if slack now >= 2
while ((tail != t || !casTail(t, s)) &&
(t = tail) != null &&
(s = t.next) != null && // advance and retry
(s = s.next) != null && s != t) {
continue;
}
(s = s.next) != null && s != t);
}
return p;
}
@ -736,37 +688,34 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
if (item != e) { // matched
// assert item != s;
s.forgetContents(); // avoid garbage
return LinkedTransferQueue.cast(item);
return LinkedTransferQueue.<E>cast(item);
}
if ((w.isInterrupted() || timed && nanos <= 0) &&
if ((w.isInterrupted() || (timed && nanos <= 0)) &&
s.casItem(e, s)) { // cancel
unsplice(pred, s);
return e;
}
if (spins < 0) { // establish spins at/near front
if ((spins = spinsFor(pred, s.isData)) > 0) {
if ((spins = spinsFor(pred, s.isData)) > 0)
randomYields = ThreadLocalRandom.current();
}
}
else if (spins > 0) { // spin
--spins;
if (randomYields.nextInt(CHAINED_SPINS) == 0) {
if (randomYields.nextInt(CHAINED_SPINS) == 0)
Thread.yield(); // occasionally yield
}
}
else if (s.waiter == null) {
s.waiter = w; // request unpark then recheck
}
else if (timed) {
long now = System.nanoTime();
if ((nanos -= now - lastTime) > 0) {
LockSupport.parkNanos(nanos);
}
if ((nanos -= now - lastTime) > 0)
LockSupport.parkNanos(this, nanos);
lastTime = now;
}
else {
LockSupport.park();
LockSupport.park(this);
}
}
}
@ -777,16 +726,13 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*/
private static int spinsFor(Node pred, boolean haveData) {
if (MP && pred != null) {
if (pred.isData != haveData) { // phase change
if (pred.isData != haveData) // phase change
return FRONT_SPINS + CHAINED_SPINS;
}
if (pred.isMatched()) { // probably at front
if (pred.isMatched()) // probably at front
return FRONT_SPINS;
}
if (pred.waiter == null) { // pred apparently spinning
if (pred.waiter == null) // pred apparently spinning
return CHAINED_SPINS;
}
}
return 0;
}
@ -799,7 +745,7 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*/
final Node succ(Node p) {
Node next = p.next;
return p == next ? head : next;
return (p == next) ? head : next;
}
/**
@ -808,9 +754,8 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*/
private Node firstOfMode(boolean isData) {
for (Node p = head; p != null; p = succ(p)) {
if (!p.isMatched()) {
return p.isData == isData ? p : null;
}
if (!p.isMatched())
return (p.isData == isData) ? p : null;
}
return null;
}
@ -823,14 +768,12 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
for (Node p = head; p != null; p = succ(p)) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p) {
return LinkedTransferQueue.cast(item);
if (item != null && item != p)
return LinkedTransferQueue.<E>cast(item);
}
}
else if (item == null) {
else if (item == null)
return null;
}
}
return null;
}
@ -842,17 +785,15 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
int count = 0;
for (Node p = head; p != null; ) {
if (!p.isMatched()) {
if (p.isData != data) {
if (p.isData != data)
return 0;
}
if (++count == Integer.MAX_VALUE) { // saturated
if (++count == Integer.MAX_VALUE) // saturated
break;
}
}
Node n = p.next;
if (n != p) {
if (n != p)
p = n;
} else {
else {
count = 0;
p = head;
}
@ -870,54 +811,86 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* Moves to next node after prev, or first node if prev null.
*/
private void advance(Node prev) {
lastPred = lastRet;
lastRet = prev;
for (Node p = prev == null ? head : succ(prev);
p != null; p = succ(p)) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p) {
nextItem = LinkedTransferQueue.cast(item);
nextNode = p;
/*
* To track and avoid buildup of deleted nodes in the face
* of calls to both Queue.remove and Itr.remove, we must
* include variants of unsplice and sweep upon each
* advance: Upon Itr.remove, we may need to catch up links
* from lastPred, and upon other removes, we might need to
* skip ahead from stale nodes and unsplice deleted ones
* found while advancing.
*/
Node r, b; // reset lastPred upon possible deletion of lastRet
if ((r = lastRet) != null && !r.isMatched())
lastPred = r; // next lastPred is old lastRet
else if ((b = lastPred) == null || b.isMatched())
lastPred = null; // at start of list
else {
Node s, n; // help with removal of lastPred.next
while ((s = b.next) != null &&
s != b && s.isMatched() &&
(n = s.next) != null && n != s)
b.casNext(s, n);
}
this.lastRet = prev;
for (Node p = prev, s, n;;) {
s = (p == null) ? head : p.next;
if (s == null)
break;
else if (s == p) {
p = null;
continue;
}
Object item = s.item;
if (s.isData) {
if (item != null && item != s) {
nextItem = LinkedTransferQueue.<E>cast(item);
nextNode = s;
return;
}
}
else if (item == null) {
else if (item == null)
break;
}
// assert s.isMatched();
if (p == null)
p = s;
else if ((n = s.next) == null)
break;
else if (s == n)
p = null;
else
p.casNext(s, n);
}
nextNode = null;
nextItem = null;
}
Itr() {
advance(null);
}
@Override
public boolean hasNext() {
public final boolean hasNext() {
return nextNode != null;
}
@Override
public E next() {
public final E next() {
Node p = nextNode;
if (p == null) {
throw new NoSuchElementException();
}
if (p == null) throw new NoSuchElementException();
E e = nextItem;
advance(p);
return e;
}
@Override
public void remove() {
Node p = lastRet;
if (p == null) {
public final void remove() {
final Node lastRet = this.lastRet;
if (lastRet == null)
throw new IllegalStateException();
}
if (p.tryMatchData()) {
unsplice(lastPred, p);
}
this.lastRet = null;
if (lastRet.tryMatchData())
unsplice(lastPred, lastRet);
}
}
@ -943,31 +916,26 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
if (pred != null && pred != s && pred.next == s) {
Node n = s.next;
if (n == null ||
n != s && pred.casNext(s, n) && pred.isMatched()) {
(n != s && pred.casNext(s, n) && pred.isMatched())) {
for (;;) { // check if at, or could be, head
Node h = head;
if (h == pred || h == s || h == null) {
if (h == pred || h == s || h == null)
return; // at head or list empty
}
if (!h.isMatched()) {
if (!h.isMatched())
break;
}
Node hn = h.next;
if (hn == null) {
if (hn == null)
return; // now empty
}
if (hn != h && casHead(h, hn)) {
if (hn != h && casHead(h, hn))
h.forgetNext(); // advance head
}
}
if (pred.next != pred && s.next != s) { // recheck if offlist
for (;;) { // sweep now if enough votes
int v = sweepVotes;
if (v < SWEEP_THRESHOLD) {
if (casSweepVotes(v, v + 1)) {
if (casSweepVotes(v, v + 1))
break;
}
}
else if (casSweepVotes(v, 0)) {
sweep();
break;
@ -984,19 +952,18 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*/
private void sweep() {
for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
if (!s.isMatched()) {
if (!s.isMatched())
// Unmatched nodes are never self-linked
p = s;
} else if ((n = s.next) == null) { // trailing node is pinned
else if ((n = s.next) == null) // trailing node is pinned
break;
} else if (s == n) { // stale
else if (s == n) // stale
// No need to also check for p == s, since that implies s == n
p = head;
} else {
else
p.casNext(s, n);
}
}
}
/**
* Main implementation of remove(Object)
@ -1012,9 +979,8 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
return true;
}
}
else if (item == null) {
else if (item == null)
break;
}
pred = p;
if ((p = p.next) == pred) { // stale
pred = null;
@ -1052,7 +1018,6 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*
* @throws NullPointerException if the specified element is null
*/
@Override
public void put(E e) {
xfer(e, true, ASYNC, 0);
}
@ -1063,10 +1028,10 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* return {@code false}.
*
* @return {@code true} (as specified by
* {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer})
* {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
* BlockingQueue.offer})
* @throws NullPointerException if the specified element is null
*/
@Override
public boolean offer(E e, long timeout, TimeUnit unit) {
xfer(e, true, ASYNC, 0);
return true;
@ -1076,11 +1041,9 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by
* {@link BlockingQueue#offer(Object) BlockingQueue.offer})
* @return {@code true} (as specified by {@link Queue#offer})
* @throws NullPointerException if the specified element is null
*/
@Override
public boolean offer(E e) {
xfer(e, true, ASYNC, 0);
return true;
@ -1094,7 +1057,6 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* @return {@code true} (as specified by {@link Collection#add})
* @throws NullPointerException if the specified element is null
*/
@Override
public boolean add(E e) {
xfer(e, true, ASYNC, 0);
return true;
@ -1148,35 +1110,28 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*/
public boolean tryTransfer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) {
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
return true;
}
if (!Thread.interrupted()) {
if (!Thread.interrupted())
return false;
}
throw new InterruptedException();
}
@Override
public E take() throws InterruptedException {
E e = xfer(null, false, SYNC, 0);
if (e != null) {
if (e != null)
return e;
}
Thread.interrupted();
throw new InterruptedException();
}
@Override
public E poll(long timeout, TimeUnit unit) throws InterruptedException {
E e = xfer(null, false, TIMED, unit.toNanos(timeout));
if (e != null || !Thread.interrupted()) {
if (e != null || !Thread.interrupted())
return e;
}
throw new InterruptedException();
}
@Override
public E poll() {
return xfer(null, false, NOW, 0);
}
@ -1185,17 +1140,13 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
@Override
public int drainTo(Collection<? super E> c) {
if (c == null) {
if (c == null)
throw new NullPointerException();
}
if (c == this) {
if (c == this)
throw new IllegalArgumentException();
}
int n = 0;
E e;
while ( (e = poll()) != null) {
for (E e; (e = poll()) != null;) {
c.add(e);
++n;
}
@ -1206,17 +1157,13 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
@Override
public int drainTo(Collection<? super E> c, int maxElements) {
if (c == null) {
if (c == null)
throw new NullPointerException();
}
if (c == this) {
if (c == this)
throw new IllegalArgumentException();
}
int n = 0;
E e;
while (n < maxElements && (e = poll()) != null) {
for (E e; n < maxElements && (e = poll()) != null;) {
c.add(e);
++n;
}
@ -1224,24 +1171,22 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
}
/**
* Returns an iterator over the elements in this queue in proper
* sequence, from head to tail.
* Returns an iterator over the elements in this queue in proper sequence.
* The elements will be returned in order from first (head) to last (tail).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw
* {@link ConcurrentModificationException ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed
* to) reflect any modifications subsequent to construction.
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this queue in proper sequence
*/
@Override
public Iterator<E> iterator() {
return new Itr();
}
@Override
public E peek() {
return firstDataItem();
}
@ -1251,13 +1196,11 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*
* @return {@code true} if this queue contains no elements
*/
@Override
public boolean isEmpty() {
for (Node p = head; p != null; p = succ(p)) {
if (!p.isMatched()) {
if (!p.isMatched())
return !p.isData;
}
}
return true;
}
@ -1277,7 +1220,6 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
*
* @return the number of elements in this queue
*/
@Override
public int size() {
return countOfMode(true);
}
@ -1297,19 +1239,40 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* @param o element to be removed from this queue, if present
* @return {@code true} if this queue changed as a result of the call
*/
@Override
public boolean remove(Object o) {
return findAndRemove(o);
}
/**
* Returns {@code true} if this queue contains the specified element.
* More formally, returns {@code true} if and only if this queue contains
* at least one element {@code e} such that {@code o.equals(e)}.
*
* @param o object to be checked for containment in this queue
* @return {@code true} if this queue contains the specified element
*/
public boolean contains(Object o) {
if (o == null) return false;
for (Node p = head; p != null; p = succ(p)) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p && o.equals(item))
return true;
}
else if (item == null)
break;
}
return false;
}
/**
* Always returns {@code Integer.MAX_VALUE} because a
* {@code LinkedTransferQueue} is not capacity constrained.
*
* @return {@code Integer.MAX_VALUE} (as specified by
* {@link BlockingQueue#remainingCapacity()})
* {@link java.util.concurrent.BlockingQueue#remainingCapacity()
* BlockingQueue.remainingCapacity})
*/
@Override
public int remainingCapacity() {
return Integer.MAX_VALUE;
}
@ -1324,9 +1287,8 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
for (E e : this) {
for (E e : this)
s.writeObject(e);
}
// Use trailing null as sentinel
s.writeObject(null);
}
@ -1341,23 +1303,62 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
for (;;) {
@SuppressWarnings("unchecked") E item = (E) s.readObject();
if (item == null) {
@SuppressWarnings("unchecked")
E item = (E) s.readObject();
if (item == null)
break;
} else {
else
offer(item);
}
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long headOffset;
private static final long tailOffset;
private static final long sweepVotesOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = LinkedTransferQueue.class;
headOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("head"));
tailOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("tail"));
sweepVotesOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("sweepVotes"));
} catch (Exception e) {
throw new Error(e);
}
}
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<LinkedTransferQueue, Node> headUpdater =
AtomicFieldUpdaterUtil.newRefUpdater(LinkedTransferQueue.class, Node.class, "head");
@SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<LinkedTransferQueue, Node> tailUpdater =
AtomicFieldUpdaterUtil.newRefUpdater(LinkedTransferQueue.class, Node.class, "tail");
@SuppressWarnings("rawtypes")
private static final AtomicIntegerFieldUpdater<LinkedTransferQueue> sweepVotesUpdater =
AtomicFieldUpdaterUtil.newIntUpdater(LinkedTransferQueue.class, "sweepVotes");
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
(new java.security
.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
java.lang.reflect.Field f = sun.misc
.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
}

View File

@ -0,0 +1,69 @@
/*
* Copyright 2011 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.util.internal;
import java.util.Collection;
import java.util.concurrent.BlockingQueue;
import org.jboss.netty.util.UnsafeDetectUtil;
/**
* This factory should be used to create the "optimal" {@link BlockingQueue} instance for the running JVM.
*
*
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://www.murkycloud.com/">Norman Maurer</a>
*
*/
public class QueueFactory {
private static final boolean useUnsafe = UnsafeDetectUtil.isUnsafeFound(QueueFactory.class.getClassLoader());
private QueueFactory() {
// only use static methods!
}
/**
* Create a new unbound {@link BlockingQueue}
*
* @param itemClass the {@link Class} type which will be used as {@link BlockingQueue} items
* @return queue the {@link BlockingQueue} implementation
*/
public static final <T> BlockingQueue<T> createQueue(Class<T> itemClass) {
if (useUnsafe) {
return new LinkedTransferQueue<T>();
} else {
return new LegacyLinkedTransferQueue<T>();
}
}
/**
* Create a new unbound {@link BlockingQueue}
*
* @param collection the collection which should get copied to the newly created {@link BlockingQueue}
* @param itemClass the {@link Class} type which will be used as {@link BlockingQueue} items
* @return queue the {@link BlockingQueue} implementation
*/
public static final <T> BlockingQueue<T> createQueue(Collection<? extends T> collection, Class<T> itemClass) {
if (useUnsafe) {
return new LinkedTransferQueue<T>(collection);
} else {
return new LegacyLinkedTransferQueue<T>(collection);
}
}
}

View File

@ -266,6 +266,16 @@ public class IpFilterRuleTest extends TestCase
return 0;
}
@Override
public Object getAttachment() {
return null;
}
@Override
public void setAttachment(Object attachment) {
}
}, h, addr),
addr);
}