From 7e491614cb6e0c8420840f9ac14493e3e4c80de5 Mon Sep 17 00:00:00 2001 From: Trustin Lee Date: Wed, 3 Sep 2008 07:41:27 +0000 Subject: [PATCH] Added JavaDoc for channel.socket.nio and oio --- .../DefaultReceiveBufferSizePredictor.java | 20 ++++- .../nio/NioClientSocketChannelFactory.java | 76 +++++++++++++++++++ .../nio/NioServerSocketChannelFactory.java | 58 ++++++++++++++ .../socket/nio/NioSocketChannelConfig.java | 46 ++++++++++- .../nio/ReceiveBufferSizePredictor.java | 28 +++++++ .../oio/OioClientSocketChannelFactory.java | 42 ++++++++++ .../oio/OioServerSocketChannelFactory.java | 53 +++++++++++++ 7 files changed, 320 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java b/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java index fc1399e5cb..51e65cf282 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/DefaultReceiveBufferSizePredictor.java @@ -24,6 +24,12 @@ package org.jboss.netty.channel.socket.nio; /** * The default {@link ReceiveBufferSizePredictor} implementation. + *

+ * It doubles the expected number of readable bytes if the previous read + * filled the allocated buffer. It halves the expected number of readable + * bytes if the read operation was not able to fill a quarter of the allocated + * buffer two times consecutively. Otherwise, it keeps returning the previous + * prediction. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) @@ -42,10 +48,22 @@ public class DefaultReceiveBufferSizePredictor implements private int nextReceiveBufferSize = 1024; private boolean shouldHalveNow; + /** + * Creates a new predictor with the default parameters. With the default + * parameters, the expected buffer size starts from {@code 1024}, doesn't + * go down below {@code 256}, and doesn't go up above {@code 1048576}. + */ public DefaultReceiveBufferSizePredictor() { this(DEFAULT_MINIMUM, DEFAULT_INITIAL, DEFAULT_MAXIMUM); } + /** + * Creates a new predictor with the specified parameters. + * + * @param minimum the inclusive lower bound of the expected buffer size + * @param initial the initial buffer size when no feed back was received + * @param maximum the inclusive upper bound of the expected buffer size + */ public DefaultReceiveBufferSizePredictor(int minimum, int initial, int maximum) { if (minimum <= 0) { throw new IllegalArgumentException("minimum: " + minimum); @@ -73,7 +91,7 @@ public class DefaultReceiveBufferSizePredictor implements } else { shouldHalveNow = true; } - } else if (previousReceiveBufferSize == nextReceiveBufferSize) { + } else if (previousReceiveBufferSize >= nextReceiveBufferSize) { nextReceiveBufferSize = Math.min(maximum, nextReceiveBufferSize << 1); shouldHalveNow = false; } diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java index 5b5c876b19..2cab3a7e1a 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioClientSocketChannelFactory.java @@ -22,14 +22,69 @@ */ package org.jboss.netty.channel.socket.nio; +import java.nio.channels.Selector; import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.socket.ClientSocketChannelFactory; import org.jboss.netty.channel.socket.SocketChannel; /** + * A {@link ClientSocketChannelFactory} which creates a client-side NIO-based + * {@link SocketChannel}. It utilizes the non-blocking I/O mode which was + * introduced with NIO to serve many number of concurrent connections + * efficiently. + * + *

How threads work

+ *

+ * There are two types of threads in a {@link NioClientSocketChannelFactory}; + * one is boss thread and the other is worker thread. + * + *

Boss thread

+ *

+ * One {@link NioClientSocketChannelFactory} has one boss thread. It makes + * a connection attempt on request. Once a connection attempt succeeds, + * the boss thread passes the connected {@link Channel} to one of the worker + * threads that the {@link NioClientSocketChannelFactory} manages. + * + *

Worker threads

+ *

+ * One {@link NioClientSocketChannelFactory} can have one or more worker + * threads. A worker thread performs non-blocking read and write for one or + * more {@link Channel}s on request. + * + *

Life cycle of threads and graceful shutdown

+ *

+ * All threads are acquired from the {@link Executor}s which were specified + * when a {@link NioClientSocketChannelFactory} is created. A boss thread is + * acquired from the {@code bossExecutor}, and worker threads are acquired from + * the {@code workerExecutor}. Therefore, you should make sure the specified + * {@link Executor}s are able to lend the sufficient number of threads. + * It is the best bet to specify {@linkplain Executors#newCachedThreadPool() a cached thread pool}. + *

+ * Both boss and worker threads are acquired lazily, and then released when + * there's nothing left to process. All the related resources such as + * {@link Selector} are also released when the boss and worker threads are + * released. Therefore, to shut down a service gracefully, you should do the + * following: + * + *

    + *
  1. close all channels created by the factory,
  2. + *
  3. call {@link ExecutorService#shutdown()} for all executors which were + * specified to create the factory, and
  4. + *
  5. call {@link ExecutorService#awaitTermination(long, TimeUnit)} + * until it returns {@code true}.
  6. + *
+ * + * Please make sure not to shut down the executor until all channels are + * closed. Otherwise, you will end up with a {@link RejectedExecutionException} + * and the related resources might not be released properly. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) @@ -41,11 +96,32 @@ public class NioClientSocketChannelFactory implements ClientSocketChannelFactory private final ChannelSink sink; + /** + * Creates a new instance. Calling this constructor is same with calling + * {@link #NioClientSocketChannelFactory(Executor, Executor, int)} with + * the number of available processors in the machine. The number of + * available processors is calculated by {@link Runtime#availableProcessors()}. + * + * @param bossExecutor + * the {@link Executor} which will execute the boss thread + * @param workerExecutor + * the {@link Executor} which will execute the I/O worker threads + */ public NioClientSocketChannelFactory( Executor bossExecutor, Executor workerExecutor) { this(bossExecutor, workerExecutor, Runtime.getRuntime().availableProcessors()); } + /** + * Creates a new instance. + * + * @param bossExecutor + * the {@link Executor} which will execute the boss thread + * @param workerExecutor + * the {@link Executor} which will execute the I/O worker threads + * @param workerCount + * the number of I/O worker threads to start + */ public NioClientSocketChannelFactory( Executor bossExecutor, Executor workerExecutor, int workerCount) { diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioServerSocketChannelFactory.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioServerSocketChannelFactory.java index 45056a3ef2..adafbb32c2 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioServerSocketChannelFactory.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioServerSocketChannelFactory.java @@ -22,14 +22,72 @@ */ package org.jboss.netty.channel.socket.nio; +import java.nio.channels.Selector; import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.socket.ServerSocketChannel; import org.jboss.netty.channel.socket.ServerSocketChannelFactory; /** + * A {@link ServerSocketChannelFactory} which creates a server-side NIO-based + * {@link ServerSocketChannel}. It utilizes the non-blocking I/O mode which + * was introduced with NIO to serve many number of concurrent connections + * efficiently. + * + *

How threads work

+ *

+ * There are two types of threads in a {@link NioServerSocketChannelFactory}; + * one is boss thread and the other is worker thread. + * + *

Boss threads

+ *

+ * Each bound {@link ServerSocketChannel} has its own boss thread. + * For example, if you opened two server ports such as 80 and 443, you will + * have two boss threads. A boss thread accepts incoming connections until + * the port is unbound. Once a connection is accepted successfully, the boss + * thread passes the accepted {@link Channel} to one of the worker + * threads that the {@link NioServerSocketChannelFactory} manages. + * + *

Worker threads

+ *

+ * One {@link NioServerSocketChannelFactory} can have one or more worker + * threads. A worker thread performs non-blocking read and write for one or + * more {@link Channel}s on request. + * + *

Life cycle of threads and graceful shutdown

+ *

+ * All threads are acquired from the {@link Executor}s which were specified + * when a {@link NioServerSocketChannelFactory} is created. Boss threads are + * acquired from the {@code bossExecutor}, and worker threads are acquired from + * the {@code workerExecutor}. Therefore, you should make sure the specified + * {@link Executor}s are able to lend the sufficient number of threads. + * It is the best bet to specify {@linkplain Executors#newCachedThreadPool() a cached thread pool}. + *

+ * Both boss and worker threads are acquired lazily, and then released when + * there's nothing left to process. All the related resources such as + * {@link Selector} are also released when the boss and worker threads are + * released. Therefore, to shut down a service gracefully, you should do the + * following: + * + *

    + *
  1. unbind all channels created by the factory, + *
  2. close all child channels accepted by the unbound channels,
  3. + *
  4. call {@link ExecutorService#shutdown()} for all executors which were + * specified to create the factory, and
  5. + *
  6. call {@link ExecutorService#awaitTermination(long, TimeUnit)} + * until it returns {@code true}.
  7. + *
+ * + * Please make sure not to shut down the executor until all channels are + * closed. Otherwise, you will end up with a {@link RejectedExecutionException} + * and the related resources might not be released properly. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannelConfig.java b/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannelConfig.java index 62bcaa06d2..65caf78725 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannelConfig.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/NioSocketChannelConfig.java @@ -25,9 +25,11 @@ package org.jboss.netty.channel.socket.nio; import java.nio.ByteBuffer; import java.nio.channels.WritableByteChannel; +import org.jboss.netty.channel.socket.SocketChannel; import org.jboss.netty.channel.socket.SocketChannelConfig; /** + * A {@link SocketChannelConfig} for a NIO TCP/IP {@link SocketChannel}. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) @@ -38,19 +40,59 @@ import org.jboss.netty.channel.socket.SocketChannelConfig; */ public interface NioSocketChannelConfig extends SocketChannelConfig { + /** + * Returns the maximum loop count for a write operation until + * {@link WritableByteChannel#write(ByteBuffer)} returns a non-zero value. + * It is similar to what a spin lock is for in concurrency programming. + * It improves memory utilization and write throughput depending on + * the platform that JVM runs on. The default value is {@code 16}. + */ int getWriteSpinCount(); /** - * The maximum loop count for a write operation until + * Sets the maximum loop count for a write operation until * {@link WritableByteChannel#write(ByteBuffer)} returns a non-zero value. * It is similar to what a spin lock is for in concurrency programming. - * It improves memory utilization and write throughput significantly. + * It improves memory utilization and write throughput depending on + * the platform that JVM runs on. The default value is {@code 16}. + * + * @throws IllegalArgumentException + * if the specified value is {@code 0} or less than {@code 0} */ void setWriteSpinCount(int writeSpinCount); + /** + * Returns the {@link ReceiveBufferSizePredictor} which predicts the + * number of readable bytes in the socket receive buffer. The default + * predictor is {@link DefaultReceiveBufferSizePredictor}. + * . + */ ReceiveBufferSizePredictor getReceiveBufferSizePredictor(); + + /** + * Sets the {@link ReceiveBufferSizePredictor} which predicts the + * number of readable bytes in the socket receive buffer. The default + * predictor is {@link DefaultReceiveBufferSizePredictor}. + */ void setReceiveBufferSizePredictor(ReceiveBufferSizePredictor predictor); + /** + * Returns {@code true} if and only if an I/O thread should do its effort + * to balance the ratio of read operations and write operations. Assuring + * the read-write fairness is sometimes necessary in a high speed network + * because a certain channel can spend too much time on flushing the + * large number of write requests not giving enough time for other channels + * to perform I/O. The default value is {@code false}. + */ boolean isReadWriteFair(); + + /** + * Sets if an I/O thread should do its effort to balance the ratio of read + * operations and write operations. Assuring the read-write fairness is + * sometimes necessary in a high speed network because a certain channel + * can spend too much time on flushing the large number of write requests + * not giving enough time for other channels to perform I/O. The default + * value is {@code false}. + */ void setReadWriteFair(boolean fair); } diff --git a/src/main/java/org/jboss/netty/channel/socket/nio/ReceiveBufferSizePredictor.java b/src/main/java/org/jboss/netty/channel/socket/nio/ReceiveBufferSizePredictor.java index 3c2dc0a063..f9f02930eb 100644 --- a/src/main/java/org/jboss/netty/channel/socket/nio/ReceiveBufferSizePredictor.java +++ b/src/main/java/org/jboss/netty/channel/socket/nio/ReceiveBufferSizePredictor.java @@ -22,7 +22,19 @@ */ package org.jboss.netty.channel.socket.nio; +import org.jboss.netty.buffer.ChannelBuffer; + /** + * Predicts the number of readable bytes in the socket receive buffer. + *

+ * It calculates the close-to-optimal capacity of the {@link ChannelBuffer} + * for the next read operation depending on the actual number of read bytes + * in the previous read operation. More accurate the prediction is, more + * effective the memory utilization will be. + *

+ * Once a read operation is performed and the actual number of read bytes is + * known, an I/O thread should call {@link #previousReceiveBufferSize(int)} to + * update the predictor so it can predict more accurately. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) @@ -31,6 +43,22 @@ package org.jboss.netty.channel.socket.nio; * */ public interface ReceiveBufferSizePredictor { + + /** + * Predicts the capacity of the {@link ChannelBuffer} for the next + * read operation depending on the actual number of read bytes in the + * previous read operation. + * + * @return the expected number of readable bytes this time + */ int nextReceiveBufferSize(); + + /** + * Updates this predictor by telling the actual number of read bytes + * in the previous read operation. + * + * @param previousReceiveBufferSize + * the actual number of read bytes in the previous read operation + */ void previousReceiveBufferSize(int previousReceiveBufferSize); } diff --git a/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java b/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java index 11ed1eb5c3..36b76b9944 100644 --- a/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java +++ b/src/main/java/org/jboss/netty/channel/socket/oio/OioClientSocketChannelFactory.java @@ -23,12 +23,54 @@ package org.jboss.netty.channel.socket.oio; import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.socket.ClientSocketChannelFactory; import org.jboss.netty.channel.socket.SocketChannel; /** + * A {@link ClientSocketChannelFactory} which creates a client-side blocking + * I/O based {@link SocketChannel}. It utilizes the good old blocking I/O API + * which is known to yield better throughput and latency when there are + * relatively small number of connections to serve. + * + *

How threads work

+ *

+ * There is only one type of threads in {@link OioClientSocketChannelFactory}; + * worker threads. + * + *

Worker threads

+ *

+ * Each connected {@link Channel} has a dedicated worker thread, just like a + * traditional blocking I/O thread model. + * + *

Life cycle of threads and graceful shutdown

+ *

+ * Worker threads are acquired from the {@link Executor} which was specified + * when a {@link OioClientSocketChannelFactory} is created (i.e. {@code workerExecutor}.) + * Therefore, you should make sure the specified {@link Executor} is able to + * lend the sufficient number of threads. + *

+ * Worker threads are acquired lazily, and then released when there's nothing + * left to process. All the related resources are also released when the + * worker threads are released. Therefore, to shut down a service gracefully, + * you should do the following: + * + *

    + *
  1. close all channels created by the factory,
  2. + *
  3. call {@link ExecutorService#shutdown()} for the {@code workerExecutor} + * which was specified to create the factory, and
  4. + *
  5. call {@link ExecutorService#awaitTermination(long, TimeUnit)} + * until it returns {@code true}.
  6. + *
+ * + * Please make sure not to shut down the executor until all channels are + * closed. Otherwise, you will end up with a {@link RejectedExecutionException} + * and the related resources might not be released properly. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com) diff --git a/src/main/java/org/jboss/netty/channel/socket/oio/OioServerSocketChannelFactory.java b/src/main/java/org/jboss/netty/channel/socket/oio/OioServerSocketChannelFactory.java index 802d38806e..2ec324a48e 100644 --- a/src/main/java/org/jboss/netty/channel/socket/oio/OioServerSocketChannelFactory.java +++ b/src/main/java/org/jboss/netty/channel/socket/oio/OioServerSocketChannelFactory.java @@ -23,13 +23,66 @@ package org.jboss.netty.channel.socket.oio; import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelSink; import org.jboss.netty.channel.socket.ServerSocketChannel; import org.jboss.netty.channel.socket.ServerSocketChannelFactory; /** + * A {@link ServerSocketChannelFactory} which creates a server-side blocking + * I/O based {@link ServerSocketChannel}. It utilizes the good old blocking + * I/O API which is known to yield better throughput and latency when there + * are relatively small number of connections to serve. + * + *

How threads work

+ *

+ * There are two types of threads in a {@link OioServerSocketChannelFactory}; + * one is boss thread and the other is worker thread. + * + *

Boss threads

+ *

+ * Each bound {@link ServerSocketChannel} has its own boss thread. + * For example, if you opened two server ports such as 80 and 443, you will + * have two boss threads. A boss thread accepts incoming connections until + * the port is unbound. Once a connection is accepted successfully, the boss + * thread passes the accepted {@link Channel} to one of the worker + * threads that the {@link OioServerSocketChannelFactory} manages. + * + *

Worker threads

+ *

+ * Each connected {@link Channel} has a dedicated worker thread, just like a + * traditional blocking I/O thread model. + * + *

Life cycle of threads and graceful shutdown

+ *

+ * All threads are acquired from the {@link Executor}s which were specified + * when a {@link OioServerSocketChannelFactory} is created. Boss threads are + * acquired from the {@code bossExecutor}, and worker threads are acquired from + * the {@code workerExecutor}. Therefore, you should make sure the specified + * {@link Executor}s are able to lend the sufficient number of threads. + *

+ * Both boss and worker threads are acquired lazily, and then released when + * there's nothing left to process. All the related resources are also + * released when the boss and worker threads are released. Therefore, to shut + * down a service gracefully, you should do the following: + * + *

    + *
  1. unbind all channels created by the factory, + *
  2. close all child channels accepted by the unbound channels,
  3. + *
  4. call {@link ExecutorService#shutdown()} for all executors which were + * specified to create the factory, and
  5. + *
  6. call {@link ExecutorService#awaitTermination(long, TimeUnit)} + * until it returns {@code true}.
  7. + *
+ * + * Please make sure not to shut down the executor until all channels are + * closed. Otherwise, you will end up with a {@link RejectedExecutionException} + * and the related resources might not be released properly. * * @author The Netty Project (netty-dev@lists.jboss.org) * @author Trustin Lee (tlee@redhat.com)