diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java b/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java index fc1399e5cb..51e65cf282 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java @@ -24,6 +24,12 @@ package org.jboss.netty.channel.socket.nio; /** * The default {@link ReceiveBufferSizePredictor} implementation. + *
+ * It doubles the expected number of readable bytes if the previous read + * filled the allocated buffer. It halves the expected number of readable + * bytes if the read operation was not able to fill a quarter of the allocated + * buffer two times consecutively. Otherwise, it keeps returning the previous + * prediction. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) @@ -42,10 +48,22 @@ public class DefaultReceiveBufferSizePredictor implements private int nextReceiveBufferSize = 1024; private boolean shouldHalveNow; + /** + * Creates a new predictor with the default parameters. With the default + * parameters, the expected buffer size starts from {@code 1024}, doesn't + * go down below {@code 256}, and doesn't go up above {@code 1048576}. + */ public DefaultReceiveBufferSizePredictor() { this(DEFAULT_MINIMUM, DEFAULT_INITIAL, DEFAULT_MAXIMUM); } + /** + * Creates a new predictor with the specified parameters. + * + * @param minimum the inclusive lower bound of the expected buffer size + * @param initial the initial buffer size when no feed back was received + * @param maximum the inclusive upper bound of the expected buffer size + */ public DefaultReceiveBufferSizePredictor(int minimum, int initial, int maximum) { if (minimum <= 0) { throw new IllegalArgumentException("minimum: " + minimum); @@ -73,7 +91,7 @@ public class DefaultReceiveBufferSizePredictor implements } else { shouldHalveNow = true; } - } else if (previousReceiveBufferSize == nextReceiveBufferSize) { + } else if (previousReceiveBufferSize >= nextReceiveBufferSize) { nextReceiveBufferSize = Math.min(maximum, nextReceiveBufferSize << 1); shouldHalveNow = false; } diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java index 5b5c876b19..2cab3a7e1a 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java @@ -22,14 +22,69 @@ */ package org.jboss.netty.channel.socket.nio; +import java.nio.channels.Selector; import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.socket.ClientSocketChannelFactory; import org.jboss.netty.channel.socket.SocketChannel; /** + * A {@link ClientSocketChannelFactory} which creates a client-side NIO-based + * {@link SocketChannel}. It utilizes the non-blocking I/O mode which was + * introduced with NIO to serve many number of concurrent connections + * efficiently. + * + *
+ * There are two types of threads in a {@link NioClientSocketChannelFactory}; + * one is boss thread and the other is worker thread. + * + *
+ * One {@link NioClientSocketChannelFactory} has one boss thread. It makes + * a connection attempt on request. Once a connection attempt succeeds, + * the boss thread passes the connected {@link Channel} to one of the worker + * threads that the {@link NioClientSocketChannelFactory} manages. + * + *
+ * One {@link NioClientSocketChannelFactory} can have one or more worker + * threads. A worker thread performs non-blocking read and write for one or + * more {@link Channel}s on request. + * + *
+ * All threads are acquired from the {@link Executor}s which were specified + * when a {@link NioClientSocketChannelFactory} is created. A boss thread is + * acquired from the {@code bossExecutor}, and worker threads are acquired from + * the {@code workerExecutor}. Therefore, you should make sure the specified + * {@link Executor}s are able to lend the sufficient number of threads. + * It is the best bet to specify {@linkplain Executors#newCachedThreadPool() a cached thread pool}. + *
+ * Both boss and worker threads are acquired lazily, and then released when + * there's nothing left to process. All the related resources such as + * {@link Selector} are also released when the boss and worker threads are + * released. Therefore, to shut down a service gracefully, you should do the + * following: + * + *
+ * There are two types of threads in a {@link NioServerSocketChannelFactory}; + * one is boss thread and the other is worker thread. + * + *
+ * Each bound {@link ServerSocketChannel} has its own boss thread. + * For example, if you opened two server ports such as 80 and 443, you will + * have two boss threads. A boss thread accepts incoming connections until + * the port is unbound. Once a connection is accepted successfully, the boss + * thread passes the accepted {@link Channel} to one of the worker + * threads that the {@link NioServerSocketChannelFactory} manages. + * + *
+ * One {@link NioServerSocketChannelFactory} can have one or more worker + * threads. A worker thread performs non-blocking read and write for one or + * more {@link Channel}s on request. + * + *
+ * All threads are acquired from the {@link Executor}s which were specified + * when a {@link NioServerSocketChannelFactory} is created. Boss threads are + * acquired from the {@code bossExecutor}, and worker threads are acquired from + * the {@code workerExecutor}. Therefore, you should make sure the specified + * {@link Executor}s are able to lend the sufficient number of threads. + * It is the best bet to specify {@linkplain Executors#newCachedThreadPool() a cached thread pool}. + *
+ * Both boss and worker threads are acquired lazily, and then released when + * there's nothing left to process. All the related resources such as + * {@link Selector} are also released when the boss and worker threads are + * released. Therefore, to shut down a service gracefully, you should do the + * following: + * + *
+ * It calculates the close-to-optimal capacity of the {@link ChannelBuffer} + * for the next read operation depending on the actual number of read bytes + * in the previous read operation. More accurate the prediction is, more + * effective the memory utilization will be. + *
+ * Once a read operation is performed and the actual number of read bytes is + * known, an I/O thread should call {@link #previousReceiveBufferSize(int)} to + * update the predictor so it can predict more accurately. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) @@ -31,6 +43,22 @@ package org.jboss.netty.channel.socket.nio; * */ public interface ReceiveBufferSizePredictor { + + /** + * Predicts the capacity of the {@link ChannelBuffer} for the next + * read operation depending on the actual number of read bytes in the + * previous read operation. + * + * @return the expected number of readable bytes this time + */ int nextReceiveBufferSize(); + + /** + * Updates this predictor by telling the actual number of read bytes + * in the previous read operation. + * + * @param previousReceiveBufferSize + * the actual number of read bytes in the previous read operation + */ void previousReceiveBufferSize(int previousReceiveBufferSize); } diff --git a/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java b/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java index 11ed1eb5c3..36b76b9944 100644 --- a/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java +++ b/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java @@ -23,12 +23,54 @@ package org.jboss.netty.channel.socket.oio; import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.socket.ClientSocketChannelFactory; import org.jboss.netty.channel.socket.SocketChannel; /** + * A {@link ClientSocketChannelFactory} which creates a client-side blocking + * I/O based {@link SocketChannel}. It utilizes the good old blocking I/O API + * which is known to yield better throughput and latency when there are + * relatively small number of connections to serve. + * + *
+ * There is only one type of threads in {@link OioClientSocketChannelFactory}; + * worker threads. + * + *
+ * Each connected {@link Channel} has a dedicated worker thread, just like a + * traditional blocking I/O thread model. + * + *
+ * Worker threads are acquired from the {@link Executor} which was specified + * when a {@link OioClientSocketChannelFactory} is created (i.e. {@code workerExecutor}.) + * Therefore, you should make sure the specified {@link Executor} is able to + * lend the sufficient number of threads. + *
+ * Worker threads are acquired lazily, and then released when there's nothing + * left to process. All the related resources are also released when the + * worker threads are released. Therefore, to shut down a service gracefully, + * you should do the following: + * + *
+ * There are two types of threads in a {@link OioServerSocketChannelFactory}; + * one is boss thread and the other is worker thread. + * + *
+ * Each bound {@link ServerSocketChannel} has its own boss thread. + * For example, if you opened two server ports such as 80 and 443, you will + * have two boss threads. A boss thread accepts incoming connections until + * the port is unbound. Once a connection is accepted successfully, the boss + * thread passes the accepted {@link Channel} to one of the worker + * threads that the {@link OioServerSocketChannelFactory} manages. + * + *
+ * Each connected {@link Channel} has a dedicated worker thread, just like a + * traditional blocking I/O thread model. + * + *
+ * All threads are acquired from the {@link Executor}s which were specified + * when a {@link OioServerSocketChannelFactory} is created. Boss threads are + * acquired from the {@code bossExecutor}, and worker threads are acquired from + * the {@code workerExecutor}. Therefore, you should make sure the specified + * {@link Executor}s are able to lend the sufficient number of threads. + *
+ * Both boss and worker threads are acquired lazily, and then released when + * there's nothing left to process. All the related resources are also + * released when the boss and worker threads are released. Therefore, to shut + * down a service gracefully, you should do the following: + * + *