Added JavaDoc for channel.socket.nio and oio

This commit is contained in:
Trustin Lee 2008-09-03 07:41:27 +00:00
parent 9e3973415e
commit 7e491614cb
7 changed files with 320 additions and 3 deletions

View File

@ -24,6 +24,12 @@ package org.jboss.netty.channel.socket.nio;
/**
* The default {@link ReceiveBufferSizePredictor} implementation.
* <p>
* It doubles the expected number of readable bytes if the previous read
* filled the allocated buffer. It halves the expected number of readable
* bytes if the read operation was not able to fill a quarter of the allocated
* buffer two times consecutively. Otherwise, it keeps returning the previous
* prediction.
*
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Trustin Lee (tlee@redhat.com)
@ -42,10 +48,22 @@ public class DefaultReceiveBufferSizePredictor implements
private int nextReceiveBufferSize = 1024;
private boolean shouldHalveNow;
/**
* Creates a new predictor with the default parameters. With the default
* parameters, the expected buffer size starts from {@code 1024}, doesn't
* go down below {@code 256}, and doesn't go up above {@code 1048576}.
*/
public DefaultReceiveBufferSizePredictor() {
this(DEFAULT_MINIMUM, DEFAULT_INITIAL, DEFAULT_MAXIMUM);
}
/**
* Creates a new predictor with the specified parameters.
*
* @param minimum the inclusive lower bound of the expected buffer size
* @param initial the initial buffer size when no feed back was received
* @param maximum the inclusive upper bound of the expected buffer size
*/
public DefaultReceiveBufferSizePredictor(int minimum, int initial, int maximum) {
if (minimum <= 0) {
throw new IllegalArgumentException("minimum: " + minimum);
@ -73,7 +91,7 @@ public class DefaultReceiveBufferSizePredictor implements
} else {
shouldHalveNow = true;
}
} else if (previousReceiveBufferSize == nextReceiveBufferSize) {
} else if (previousReceiveBufferSize >= nextReceiveBufferSize) {
nextReceiveBufferSize = Math.min(maximum, nextReceiveBufferSize << 1);
shouldHalveNow = false;
}

View File

@ -22,14 +22,69 @@
*/
package org.jboss.netty.channel.socket.nio;
import java.nio.channels.Selector;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
import org.jboss.netty.channel.socket.SocketChannel;
/**
* A {@link ClientSocketChannelFactory} which creates a client-side NIO-based
* {@link SocketChannel}. It utilizes the non-blocking I/O mode which was
* introduced with NIO to serve many number of concurrent connections
* efficiently.
*
* <h3>How threads work</h3>
* <p>
* There are two types of threads in a {@link NioClientSocketChannelFactory};
* one is boss thread and the other is worker thread.
*
* <h4>Boss thread</h4>
* <p>
* One {@link NioClientSocketChannelFactory} has one boss thread. It makes
* a connection attempt on request. Once a connection attempt succeeds,
* the boss thread passes the connected {@link Channel} to one of the worker
* threads that the {@link NioClientSocketChannelFactory} manages.
*
* <h4>Worker threads</h4>
* <p>
* One {@link NioClientSocketChannelFactory} can have one or more worker
* threads. A worker thread performs non-blocking read and write for one or
* more {@link Channel}s on request.
*
* <h3>Life cycle of threads and graceful shutdown</h3>
* <p>
* All threads are acquired from the {@link Executor}s which were specified
* when a {@link NioClientSocketChannelFactory} is created. A boss thread is
* acquired from the {@code bossExecutor}, and worker threads are acquired from
* the {@code workerExecutor}. Therefore, you should make sure the specified
* {@link Executor}s are able to lend the sufficient number of threads.
* It is the best bet to specify {@linkplain Executors#newCachedThreadPool() a cached thread pool}.
* <p>
* Both boss and worker threads are acquired lazily, and then released when
* there's nothing left to process. All the related resources such as
* {@link Selector} are also released when the boss and worker threads are
* released. Therefore, to shut down a service gracefully, you should do the
* following:
*
* <ol>
* <li>close all channels created by the factory,</li>
* <li>call {@link ExecutorService#shutdown()} for all executors which were
* specified to create the factory, and</li>
* <li>call {@link ExecutorService#awaitTermination(long, TimeUnit)}
* until it returns {@code true}.</li>
* </ol>
*
* Please make sure not to shut down the executor until all channels are
* closed. Otherwise, you will end up with a {@link RejectedExecutionException}
* and the related resources might not be released properly.
*
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Trustin Lee (tlee@redhat.com)
@ -41,11 +96,32 @@ public class NioClientSocketChannelFactory implements ClientSocketChannelFactory
private final ChannelSink sink;
/**
* Creates a new instance. Calling this constructor is same with calling
* {@link #NioClientSocketChannelFactory(Executor, Executor, int)} with
* the number of available processors in the machine. The number of
* available processors is calculated by {@link Runtime#availableProcessors()}.
*
* @param bossExecutor
* the {@link Executor} which will execute the boss thread
* @param workerExecutor
* the {@link Executor} which will execute the I/O worker threads
*/
public NioClientSocketChannelFactory(
Executor bossExecutor, Executor workerExecutor) {
this(bossExecutor, workerExecutor, Runtime.getRuntime().availableProcessors());
}
/**
* Creates a new instance.
*
* @param bossExecutor
* the {@link Executor} which will execute the boss thread
* @param workerExecutor
* the {@link Executor} which will execute the I/O worker threads
* @param workerCount
* the number of I/O worker threads to start
*/
public NioClientSocketChannelFactory(
Executor bossExecutor, Executor workerExecutor,
int workerCount) {

View File

@ -22,14 +22,72 @@
*/
package org.jboss.netty.channel.socket.nio;
import java.nio.channels.Selector;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.socket.ServerSocketChannel;
import org.jboss.netty.channel.socket.ServerSocketChannelFactory;
/**
* A {@link ServerSocketChannelFactory} which creates a server-side NIO-based
* {@link ServerSocketChannel}. It utilizes the non-blocking I/O mode which
* was introduced with NIO to serve many number of concurrent connections
* efficiently.
*
* <h3>How threads work</h3>
* <p>
* There are two types of threads in a {@link NioServerSocketChannelFactory};
* one is boss thread and the other is worker thread.
*
* <h4>Boss threads</h4>
* <p>
* Each bound {@link ServerSocketChannel} has its own boss thread.
* For example, if you opened two server ports such as 80 and 443, you will
* have two boss threads. A boss thread accepts incoming connections until
* the port is unbound. Once a connection is accepted successfully, the boss
* thread passes the accepted {@link Channel} to one of the worker
* threads that the {@link NioServerSocketChannelFactory} manages.
*
* <h4>Worker threads</h4>
* <p>
* One {@link NioServerSocketChannelFactory} can have one or more worker
* threads. A worker thread performs non-blocking read and write for one or
* more {@link Channel}s on request.
*
* <h3>Life cycle of threads and graceful shutdown</h3>
* <p>
* All threads are acquired from the {@link Executor}s which were specified
* when a {@link NioServerSocketChannelFactory} is created. Boss threads are
* acquired from the {@code bossExecutor}, and worker threads are acquired from
* the {@code workerExecutor}. Therefore, you should make sure the specified
* {@link Executor}s are able to lend the sufficient number of threads.
* It is the best bet to specify {@linkplain Executors#newCachedThreadPool() a cached thread pool}.
* <p>
* Both boss and worker threads are acquired lazily, and then released when
* there's nothing left to process. All the related resources such as
* {@link Selector} are also released when the boss and worker threads are
* released. Therefore, to shut down a service gracefully, you should do the
* following:
*
* <ol>
* <li>unbind all channels created by the factory,
* <li>close all child channels accepted by the unbound channels,</li>
* <li>call {@link ExecutorService#shutdown()} for all executors which were
* specified to create the factory, and</li>
* <li>call {@link ExecutorService#awaitTermination(long, TimeUnit)}
* until it returns {@code true}.</li>
* </ol>
*
* Please make sure not to shut down the executor until all channels are
* closed. Otherwise, you will end up with a {@link RejectedExecutionException}
* and the related resources might not be released properly.
*
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Trustin Lee (tlee@redhat.com)

View File

@ -25,9 +25,11 @@ package org.jboss.netty.channel.socket.nio;
import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import org.jboss.netty.channel.socket.SocketChannel;
import org.jboss.netty.channel.socket.SocketChannelConfig;
/**
* A {@link SocketChannelConfig} for a NIO TCP/IP {@link SocketChannel}.
*
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Trustin Lee (tlee@redhat.com)
@ -38,19 +40,59 @@ import org.jboss.netty.channel.socket.SocketChannelConfig;
*/
public interface NioSocketChannelConfig extends SocketChannelConfig {
/**
* Returns the maximum loop count for a write operation until
* {@link WritableByteChannel#write(ByteBuffer)} returns a non-zero value.
* It is similar to what a spin lock is for in concurrency programming.
* It improves memory utilization and write throughput depending on
* the platform that JVM runs on. The default value is {@code 16}.
*/
int getWriteSpinCount();
/**
* The maximum loop count for a write operation until
* Sets the maximum loop count for a write operation until
* {@link WritableByteChannel#write(ByteBuffer)} returns a non-zero value.
* It is similar to what a spin lock is for in concurrency programming.
* It improves memory utilization and write throughput significantly.
* It improves memory utilization and write throughput depending on
* the platform that JVM runs on. The default value is {@code 16}.
*
* @throws IllegalArgumentException
* if the specified value is {@code 0} or less than {@code 0}
*/
void setWriteSpinCount(int writeSpinCount);
/**
* Returns the {@link ReceiveBufferSizePredictor} which predicts the
* number of readable bytes in the socket receive buffer. The default
* predictor is {@link DefaultReceiveBufferSizePredictor}.
* .
*/
ReceiveBufferSizePredictor getReceiveBufferSizePredictor();
/**
* Sets the {@link ReceiveBufferSizePredictor} which predicts the
* number of readable bytes in the socket receive buffer. The default
* predictor is {@link DefaultReceiveBufferSizePredictor}.
*/
void setReceiveBufferSizePredictor(ReceiveBufferSizePredictor predictor);
/**
* Returns {@code true} if and only if an I/O thread should do its effort
* to balance the ratio of read operations and write operations. Assuring
* the read-write fairness is sometimes necessary in a high speed network
* because a certain channel can spend too much time on flushing the
* large number of write requests not giving enough time for other channels
* to perform I/O. The default value is {@code false}.
*/
boolean isReadWriteFair();
/**
* Sets if an I/O thread should do its effort to balance the ratio of read
* operations and write operations. Assuring the read-write fairness is
* sometimes necessary in a high speed network because a certain channel
* can spend too much time on flushing the large number of write requests
* not giving enough time for other channels to perform I/O. The default
* value is {@code false}.
*/
void setReadWriteFair(boolean fair);
}

View File

@ -22,7 +22,19 @@
*/
package org.jboss.netty.channel.socket.nio;
import org.jboss.netty.buffer.ChannelBuffer;
/**
* Predicts the number of readable bytes in the socket receive buffer.
* <p>
* It calculates the close-to-optimal capacity of the {@link ChannelBuffer}
* for the next read operation depending on the actual number of read bytes
* in the previous read operation. More accurate the prediction is, more
* effective the memory utilization will be.
* <p>
* Once a read operation is performed and the actual number of read bytes is
* known, an I/O thread should call {@link #previousReceiveBufferSize(int)} to
* update the predictor so it can predict more accurately.
*
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Trustin Lee (tlee@redhat.com)
@ -31,6 +43,22 @@ package org.jboss.netty.channel.socket.nio;
*
*/
public interface ReceiveBufferSizePredictor {
/**
* Predicts the capacity of the {@link ChannelBuffer} for the next
* read operation depending on the actual number of read bytes in the
* previous read operation.
*
* @return the expected number of readable bytes this time
*/
int nextReceiveBufferSize();
/**
* Updates this predictor by telling the actual number of read bytes
* in the previous read operation.
*
* @param previousReceiveBufferSize
* the actual number of read bytes in the previous read operation
*/
void previousReceiveBufferSize(int previousReceiveBufferSize);
}

View File

@ -23,12 +23,54 @@
package org.jboss.netty.channel.socket.oio;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
import org.jboss.netty.channel.socket.SocketChannel;
/**
* A {@link ClientSocketChannelFactory} which creates a client-side blocking
* I/O based {@link SocketChannel}. It utilizes the good old blocking I/O API
* which is known to yield better throughput and latency when there are
* relatively small number of connections to serve.
*
* <h3>How threads work</h3>
* <p>
* There is only one type of threads in {@link OioClientSocketChannelFactory};
* worker threads.
*
* <h4>Worker threads</h4>
* <p>
* Each connected {@link Channel} has a dedicated worker thread, just like a
* traditional blocking I/O thread model.
*
* <h3>Life cycle of threads and graceful shutdown</h3>
* <p>
* Worker threads are acquired from the {@link Executor} which was specified
* when a {@link OioClientSocketChannelFactory} is created (i.e. {@code workerExecutor}.)
* Therefore, you should make sure the specified {@link Executor} is able to
* lend the sufficient number of threads.
* <p>
* Worker threads are acquired lazily, and then released when there's nothing
* left to process. All the related resources are also released when the
* worker threads are released. Therefore, to shut down a service gracefully,
* you should do the following:
*
* <ol>
* <li>close all channels created by the factory,</li>
* <li>call {@link ExecutorService#shutdown()} for the {@code workerExecutor}
* which was specified to create the factory, and</li>
* <li>call {@link ExecutorService#awaitTermination(long, TimeUnit)}
* until it returns {@code true}.</li>
* </ol>
*
* Please make sure not to shut down the executor until all channels are
* closed. Otherwise, you will end up with a {@link RejectedExecutionException}
* and the related resources might not be released properly.
*
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Trustin Lee (tlee@redhat.com)

View File

@ -23,13 +23,66 @@
package org.jboss.netty.channel.socket.oio;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.socket.ServerSocketChannel;
import org.jboss.netty.channel.socket.ServerSocketChannelFactory;
/**
* A {@link ServerSocketChannelFactory} which creates a server-side blocking
* I/O based {@link ServerSocketChannel}. It utilizes the good old blocking
* I/O API which is known to yield better throughput and latency when there
* are relatively small number of connections to serve.
*
* <h3>How threads work</h3>
* <p>
* There are two types of threads in a {@link OioServerSocketChannelFactory};
* one is boss thread and the other is worker thread.
*
* <h4>Boss threads</h4>
* <p>
* Each bound {@link ServerSocketChannel} has its own boss thread.
* For example, if you opened two server ports such as 80 and 443, you will
* have two boss threads. A boss thread accepts incoming connections until
* the port is unbound. Once a connection is accepted successfully, the boss
* thread passes the accepted {@link Channel} to one of the worker
* threads that the {@link OioServerSocketChannelFactory} manages.
*
* <h4>Worker threads</h4>
* <p>
* Each connected {@link Channel} has a dedicated worker thread, just like a
* traditional blocking I/O thread model.
*
* <h3>Life cycle of threads and graceful shutdown</h3>
* <p>
* All threads are acquired from the {@link Executor}s which were specified
* when a {@link OioServerSocketChannelFactory} is created. Boss threads are
* acquired from the {@code bossExecutor}, and worker threads are acquired from
* the {@code workerExecutor}. Therefore, you should make sure the specified
* {@link Executor}s are able to lend the sufficient number of threads.
* <p>
* Both boss and worker threads are acquired lazily, and then released when
* there's nothing left to process. All the related resources are also
* released when the boss and worker threads are released. Therefore, to shut
* down a service gracefully, you should do the following:
*
* <ol>
* <li>unbind all channels created by the factory,
* <li>close all child channels accepted by the unbound channels,</li>
* <li>call {@link ExecutorService#shutdown()} for all executors which were
* specified to create the factory, and</li>
* <li>call {@link ExecutorService#awaitTermination(long, TimeUnit)}
* until it returns {@code true}.</li>
* </ol>
*
* Please make sure not to shut down the executor until all channels are
* closed. Otherwise, you will end up with a {@link RejectedExecutionException}
* and the related resources might not be released properly.
*
* @author The Netty Project (netty-dev@lists.jboss.org)
* @author Trustin Lee (tlee@redhat.com)