added sctp package dir with initial classes copied from nio tcp

This commit is contained in:
Jestan Nirojan 2011-04-23 13:58:15 +05:30
parent a722f64991
commit 1b6dff9975
16 changed files with 3615 additions and 0 deletions

View File

@ -0,0 +1,193 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import java.net.Socket;
import java.util.Map;
import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory;
import org.jboss.netty.channel.ChannelException;
import org.jboss.netty.channel.ReceiveBufferSizePredictor;
import org.jboss.netty.channel.ReceiveBufferSizePredictorFactory;
import org.jboss.netty.channel.socket.DefaultSocketChannelConfig;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.ConversionUtil;
/**
* The default {@link NioSocketChannelConfig} implementation.
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
class DefaultNioSocketChannelConfig extends DefaultSocketChannelConfig
implements NioSocketChannelConfig {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(DefaultNioSocketChannelConfig.class);
private static final ReceiveBufferSizePredictorFactory DEFAULT_PREDICTOR_FACTORY =
new AdaptiveReceiveBufferSizePredictorFactory();
private volatile int writeBufferHighWaterMark = 64 * 1024;
private volatile int writeBufferLowWaterMark = 32 * 1024;
private volatile ReceiveBufferSizePredictor predictor;
private volatile ReceiveBufferSizePredictorFactory predictorFactory = DEFAULT_PREDICTOR_FACTORY;
private volatile int writeSpinCount = 16;
DefaultNioSocketChannelConfig(Socket socket) {
super(socket);
}
@Override
public void setOptions(Map<String, Object> options) {
super.setOptions(options);
if (getWriteBufferHighWaterMark() < getWriteBufferLowWaterMark()) {
// Recover the integrity of the configuration with a sensible value.
setWriteBufferLowWaterMark0(getWriteBufferHighWaterMark() >>> 1);
// Notify the user about misconfiguration.
logger.warn(
"writeBufferLowWaterMark cannot be greater than " +
"writeBufferHighWaterMark; setting to the half of the " +
"writeBufferHighWaterMark.");
}
}
@Override
public boolean setOption(String key, Object value) {
if (super.setOption(key, value)) {
return true;
}
if (key.equals("writeBufferHighWaterMark")) {
setWriteBufferHighWaterMark0(ConversionUtil.toInt(value));
} else if (key.equals("writeBufferLowWaterMark")) {
setWriteBufferLowWaterMark0(ConversionUtil.toInt(value));
} else if (key.equals("writeSpinCount")) {
setWriteSpinCount(ConversionUtil.toInt(value));
} else if (key.equals("receiveBufferSizePredictorFactory")) {
setReceiveBufferSizePredictorFactory((ReceiveBufferSizePredictorFactory) value);
} else if (key.equals("receiveBufferSizePredictor")) {
setReceiveBufferSizePredictor((ReceiveBufferSizePredictor) value);
} else {
return false;
}
return true;
}
@Override
public int getWriteBufferHighWaterMark() {
return writeBufferHighWaterMark;
}
@Override
public void setWriteBufferHighWaterMark(int writeBufferHighWaterMark) {
if (writeBufferHighWaterMark < getWriteBufferLowWaterMark()) {
throw new IllegalArgumentException(
"writeBufferHighWaterMark cannot be less than " +
"writeBufferLowWaterMark (" + getWriteBufferLowWaterMark() + "): " +
writeBufferHighWaterMark);
}
setWriteBufferHighWaterMark0(writeBufferHighWaterMark);
}
private void setWriteBufferHighWaterMark0(int writeBufferHighWaterMark) {
if (writeBufferHighWaterMark < 0) {
throw new IllegalArgumentException(
"writeBufferHighWaterMark: " + writeBufferHighWaterMark);
}
this.writeBufferHighWaterMark = writeBufferHighWaterMark;
}
@Override
public int getWriteBufferLowWaterMark() {
return writeBufferLowWaterMark;
}
@Override
public void setWriteBufferLowWaterMark(int writeBufferLowWaterMark) {
if (writeBufferLowWaterMark > getWriteBufferHighWaterMark()) {
throw new IllegalArgumentException(
"writeBufferLowWaterMark cannot be greater than " +
"writeBufferHighWaterMark (" + getWriteBufferHighWaterMark() + "): " +
writeBufferLowWaterMark);
}
setWriteBufferLowWaterMark0(writeBufferLowWaterMark);
}
private void setWriteBufferLowWaterMark0(int writeBufferLowWaterMark) {
if (writeBufferLowWaterMark < 0) {
throw new IllegalArgumentException(
"writeBufferLowWaterMark: " + writeBufferLowWaterMark);
}
this.writeBufferLowWaterMark = writeBufferLowWaterMark;
}
@Override
public int getWriteSpinCount() {
return writeSpinCount;
}
@Override
public void setWriteSpinCount(int writeSpinCount) {
if (writeSpinCount <= 0) {
throw new IllegalArgumentException(
"writeSpinCount must be a positive integer.");
}
this.writeSpinCount = writeSpinCount;
}
@Override
public ReceiveBufferSizePredictor getReceiveBufferSizePredictor() {
ReceiveBufferSizePredictor predictor = this.predictor;
if (predictor == null) {
try {
this.predictor = predictor = getReceiveBufferSizePredictorFactory().getPredictor();
} catch (Exception e) {
throw new ChannelException(
"Failed to create a new " +
ReceiveBufferSizePredictor.class.getSimpleName() + '.',
e);
}
}
return predictor;
}
@Override
public void setReceiveBufferSizePredictor(
ReceiveBufferSizePredictor predictor) {
if (predictor == null) {
throw new NullPointerException("predictor");
}
this.predictor = predictor;
}
@Override
public ReceiveBufferSizePredictorFactory getReceiveBufferSizePredictorFactory() {
return predictorFactory;
}
@Override
public void setReceiveBufferSizePredictorFactory(ReceiveBufferSizePredictorFactory predictorFactory) {
if (predictorFactory == null) {
throw new NullPointerException("predictorFactory");
}
this.predictorFactory = predictorFactory;
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import static org.jboss.netty.channel.Channels.*;
import java.nio.channels.SocketChannel;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
/**
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
final class NioAcceptedSocketChannel extends org.jboss.netty.channel.socket.nio.NioSocketChannel {
final Thread bossThread;
NioAcceptedSocketChannel(
ChannelFactory factory, ChannelPipeline pipeline,
Channel parent, ChannelSink sink,
SocketChannel socket, NioWorker worker, Thread bossThread) {
super(parent, factory, pipeline, sink, socket, worker);
this.bossThread = bossThread;
setConnected();
fireChannelOpen(this);
fireChannelBound(this, getLocalAddress());
fireChannelConnected(this, getRemoteAddress());
}
}

View File

@ -0,0 +1,86 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import static org.jboss.netty.channel.Channels.*;
import java.io.IOException;
import java.nio.channels.SocketChannel;
import org.jboss.netty.channel.ChannelException;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
/**
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
final class NioClientSocketChannel extends NioSocketChannel {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(NioClientSocketChannel.class);
private static SocketChannel newSocket() {
SocketChannel socket;
try {
socket = SocketChannel.open();
} catch (IOException e) {
throw new ChannelException("Failed to open a socket.", e);
}
boolean success = false;
try {
socket.configureBlocking(false);
success = true;
} catch (IOException e) {
throw new ChannelException("Failed to enter non-blocking mode.", e);
} finally {
if (!success) {
try {
socket.close();
} catch (IOException e) {
logger.warn(
"Failed to close a partially initialized socket.",
e);
}
}
}
return socket;
}
volatile ChannelFuture connectFuture;
volatile boolean boundManually;
// Does not need to be volatile as it's accessed by only one thread.
long connectDeadlineNanos;
NioClientSocketChannel(
ChannelFactory factory, ChannelPipeline pipeline,
ChannelSink sink, NioWorker worker) {
super(null, factory, pipeline, sink, newSocket(), worker);
fireChannelOpen(this);
}
}

View File

@ -0,0 +1,142 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import java.util.concurrent.Executor;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.socket.ClientSocketChannelFactory;
import org.jboss.netty.channel.socket.SocketChannel;
import org.jboss.netty.util.internal.ExecutorUtil;
/**
* A {@link org.jboss.netty.channel.socket.ClientSocketChannelFactory} which creates a client-side NIO-based
* {@link org.jboss.netty.channel.socket.SocketChannel}. It utilizes the non-blocking I/O mode which was
* introduced with NIO to serve many number of concurrent connections
* efficiently.
*
* <h3>How threads work</h3>
* <p>
* There are two types of threads in a {@link org.jboss.netty.channel.socket.sctp.NioClientSocketChannelFactory};
* one is boss thread and the other is worker thread.
*
* <h4>Boss thread</h4>
* <p>
* One {@link org.jboss.netty.channel.socket.sctp.NioClientSocketChannelFactory} has one boss thread. It makes
* a connection attempt on request. Once a connection attempt succeeds,
* the boss thread passes the connected {@link org.jboss.netty.channel.Channel} to one of the worker
* threads that the {@link org.jboss.netty.channel.socket.sctp.NioClientSocketChannelFactory} manages.
*
* <h4>Worker threads</h4>
* <p>
* One {@link org.jboss.netty.channel.socket.sctp.NioClientSocketChannelFactory} can have one or more worker
* threads. A worker thread performs non-blocking read and write for one or
* more {@link org.jboss.netty.channel.Channel}s in a non-blocking mode.
*
* <h3>Life cycle of threads and graceful shutdown</h3>
* <p>
* All threads are acquired from the {@link java.util.concurrent.Executor}s which were specified
* when a {@link org.jboss.netty.channel.socket.sctp.NioClientSocketChannelFactory} was created. A boss thread is
* acquired from the {@code bossExecutor}, and worker threads are acquired from
* the {@code workerExecutor}. Therefore, you should make sure the specified
* {@link java.util.concurrent.Executor}s are able to lend the sufficient number of threads.
* It is the best bet to specify {@linkplain java.util.concurrent.Executors#newCachedThreadPool() a cached thread pool}.
* <p>
* Both boss and worker threads are acquired lazily, and then released when
* there's nothing left to process. All the related resources such as
* {@link java.nio.channels.Selector} are also released when the boss and worker threads are
* released. Therefore, to shut down a service gracefully, you should do the
* following:
*
* <ol>
* <li>close all channels created by the factory usually using
* {@link org.jboss.netty.channel.group.ChannelGroup#close()}, and</li>
* <li>call {@link #releaseExternalResources()}.</li>
* </ol>
*
* Please make sure not to shut down the executor until all channels are
* closed. Otherwise, you will end up with a {@link java.util.concurrent.RejectedExecutionException}
* and the related resources might not be released properly.
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
* @apiviz.landmark
*/
public class NioClientSocketChannelFactory implements ClientSocketChannelFactory {
private final Executor bossExecutor;
private final Executor workerExecutor;
private final NioClientSocketPipelineSink sink;
/**
* Creates a new instance. Calling this constructor is same with calling
* {@link #NioClientSocketChannelFactory(java.util.concurrent.Executor, java.util.concurrent.Executor, int)} with 2 *
* the number of available processors in the machine. The number of
* available processors is obtained by {@link Runtime#availableProcessors()}.
*
* @param bossExecutor
* the {@link java.util.concurrent.Executor} which will execute the boss thread
* @param workerExecutor
* the {@link java.util.concurrent.Executor} which will execute the I/O worker threads
*/
public NioClientSocketChannelFactory(
Executor bossExecutor, Executor workerExecutor) {
this(bossExecutor, workerExecutor, SelectorUtil.DEFAULT_IO_THREADS);
}
/**
* Creates a new instance.
*
* @param bossExecutor
* the {@link java.util.concurrent.Executor} which will execute the boss thread
* @param workerExecutor
* the {@link java.util.concurrent.Executor} which will execute the I/O worker threads
* @param workerCount
* the maximum number of I/O worker threads
*/
public NioClientSocketChannelFactory(
Executor bossExecutor, Executor workerExecutor,
int workerCount) {
if (bossExecutor == null) {
throw new NullPointerException("bossExecutor");
}
if (workerExecutor == null) {
throw new NullPointerException("workerExecutor");
}
if (workerCount <= 0) {
throw new IllegalArgumentException(
"workerCount (" + workerCount + ") " +
"must be a positive integer.");
}
this.bossExecutor = bossExecutor;
this.workerExecutor = workerExecutor;
sink = new NioClientSocketPipelineSink(bossExecutor, workerExecutor, workerCount);
}
@Override
public SocketChannel newChannel(ChannelPipeline pipeline) {
return new NioClientSocketChannel(this, pipeline, sink, sink.nextWorker());
}
@Override
public void releaseExternalResources() {
ExecutorUtil.terminate(bossExecutor, workerExecutor);
}
}

View File

@ -0,0 +1,423 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import static org.jboss.netty.channel.Channels.*;
import java.io.IOException;
import java.net.ConnectException;
import java.net.SocketAddress;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.util.Iterator;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.jboss.netty.channel.AbstractChannelSink;
import org.jboss.netty.channel.ChannelEvent;
import org.jboss.netty.channel.ChannelException;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelState;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.DeadLockProofWorker;
import org.jboss.netty.util.internal.LinkedTransferQueue;
/**
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
class NioClientSocketPipelineSink extends AbstractChannelSink {
static final InternalLogger logger =
InternalLoggerFactory.getInstance(NioClientSocketPipelineSink.class);
final Executor bossExecutor;
private final Boss boss = new Boss();
private final NioWorker[] workers;
private final AtomicInteger workerIndex = new AtomicInteger();
NioClientSocketPipelineSink(
Executor bossExecutor, Executor workerExecutor, int workerCount) {
this.bossExecutor = bossExecutor;
workers = new NioWorker[workerCount];
for (int i = 0; i < workers.length; i ++) {
workers[i] = new NioWorker(workerExecutor);
}
}
@Override
public void eventSunk(
ChannelPipeline pipeline, ChannelEvent e) throws Exception {
if (e instanceof ChannelStateEvent) {
ChannelStateEvent event = (ChannelStateEvent) e;
NioClientSocketChannel channel =
(NioClientSocketChannel) event.getChannel();
ChannelFuture future = event.getFuture();
ChannelState state = event.getState();
Object value = event.getValue();
switch (state) {
case OPEN:
if (Boolean.FALSE.equals(value)) {
channel.worker.close(channel, future);
}
break;
case BOUND:
if (value != null) {
bind(channel, future, (SocketAddress) value);
} else {
channel.worker.close(channel, future);
}
break;
case CONNECTED:
if (value != null) {
connect(channel, future, (SocketAddress) value);
} else {
channel.worker.close(channel, future);
}
break;
case INTEREST_OPS:
channel.worker.setInterestOps(channel, future, ((Integer) value).intValue());
break;
}
} else if (e instanceof MessageEvent) {
MessageEvent event = (MessageEvent) e;
NioSocketChannel channel = (NioSocketChannel) event.getChannel();
boolean offered = channel.writeBuffer.offer(event);
assert offered;
channel.worker.writeFromUserCode(channel);
}
}
private void bind(
NioClientSocketChannel channel, ChannelFuture future,
SocketAddress localAddress) {
try {
channel.socket.socket().bind(localAddress);
channel.boundManually = true;
channel.setBound();
future.setSuccess();
fireChannelBound(channel, channel.getLocalAddress());
} catch (Throwable t) {
future.setFailure(t);
fireExceptionCaught(channel, t);
}
}
private void connect(
final NioClientSocketChannel channel, final ChannelFuture cf,
SocketAddress remoteAddress) {
try {
if (channel.socket.connect(remoteAddress)) {
channel.worker.register(channel, cf);
} else {
channel.getCloseFuture().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f)
throws Exception {
if (!cf.isDone()) {
cf.setFailure(new ClosedChannelException());
}
}
});
cf.addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
channel.connectFuture = cf;
boss.register(channel);
}
} catch (Throwable t) {
cf.setFailure(t);
fireExceptionCaught(channel, t);
channel.worker.close(channel, succeededFuture(channel));
}
}
NioWorker nextWorker() {
return workers[Math.abs(
workerIndex.getAndIncrement() % workers.length)];
}
private final class Boss implements Runnable {
volatile Selector selector;
private boolean started;
private final AtomicBoolean wakenUp = new AtomicBoolean();
private final Object startStopLock = new Object();
private final Queue<Runnable> registerTaskQueue = new LinkedTransferQueue<Runnable>();
Boss() {
super();
}
void register(NioClientSocketChannel channel) {
Runnable registerTask = new RegisterTask(this, channel);
Selector selector;
synchronized (startStopLock) {
if (!started) {
// Open a selector if this worker didn't start yet.
try {
this.selector = selector = Selector.open();
} catch (Throwable t) {
throw new ChannelException(
"Failed to create a selector.", t);
}
// Start the worker thread with the new Selector.
boolean success = false;
try {
DeadLockProofWorker.start(bossExecutor, this);
success = true;
} finally {
if (!success) {
// Release the Selector if the execution fails.
try {
selector.close();
} catch (Throwable t) {
logger.warn("Failed to close a selector.", t);
}
this.selector = selector = null;
// The method will return to the caller at this point.
}
}
} else {
// Use the existing selector if this worker has been started.
selector = this.selector;
}
assert selector != null && selector.isOpen();
started = true;
boolean offered = registerTaskQueue.offer(registerTask);
assert offered;
}
if (wakenUp.compareAndSet(false, true)) {
selector.wakeup();
}
}
@Override
public void run() {
boolean shutdown = false;
Selector selector = this.selector;
long lastConnectTimeoutCheckTimeNanos = System.nanoTime();
for (;;) {
wakenUp.set(false);
try {
int selectedKeyCount = selector.select(500);
// 'wakenUp.compareAndSet(false, true)' is always evaluated
// before calling 'selector.wakeup()' to reduce the wake-up
// overhead. (Selector.wakeup() is an expensive operation.)
//
// However, there is a race condition in this approach.
// The race condition is triggered when 'wakenUp' is set to
// true too early.
//
// 'wakenUp' is set to true too early if:
// 1) Selector is waken up between 'wakenUp.set(false)' and
// 'selector.select(...)'. (BAD)
// 2) Selector is waken up between 'selector.select(...)' and
// 'if (wakenUp.get()) { ... }'. (OK)
//
// In the first case, 'wakenUp' is set to true and the
// following 'selector.select(...)' will wake up immediately.
// Until 'wakenUp' is set to false again in the next round,
// 'wakenUp.compareAndSet(false, true)' will fail, and therefore
// any attempt to wake up the Selector will fail, too, causing
// the following 'selector.select(...)' call to block
// unnecessarily.
//
// To fix this problem, we wake up the selector again if wakenUp
// is true immediately after selector.select(...).
// It is inefficient in that it wakes up the selector for both
// the first case (BAD - wake-up required) and the second case
// (OK - no wake-up required).
if (wakenUp.get()) {
selector.wakeup();
}
processRegisterTaskQueue();
if (selectedKeyCount > 0) {
processSelectedKeys(selector.selectedKeys());
}
// Handle connection timeout every 0.5 seconds approximately.
long currentTimeNanos = System.nanoTime();
if (currentTimeNanos - lastConnectTimeoutCheckTimeNanos >= 500 * 1000000L) {
lastConnectTimeoutCheckTimeNanos = currentTimeNanos;
processConnectTimeout(selector.keys(), currentTimeNanos);
}
// Exit the loop when there's nothing to handle.
// The shutdown flag is used to delay the shutdown of this
// loop to avoid excessive Selector creation when
// connection attempts are made in a one-by-one manner
// instead of concurrent manner.
if (selector.keys().isEmpty()) {
if (shutdown ||
bossExecutor instanceof ExecutorService && ((ExecutorService) bossExecutor).isShutdown()) {
synchronized (startStopLock) {
if (registerTaskQueue.isEmpty() && selector.keys().isEmpty()) {
started = false;
try {
selector.close();
} catch (IOException e) {
logger.warn(
"Failed to close a selector.", e);
} finally {
this.selector = null;
}
break;
} else {
shutdown = false;
}
}
} else {
// Give one more second.
shutdown = true;
}
} else {
shutdown = false;
}
} catch (Throwable t) {
logger.warn(
"Unexpected exception in the selector loop.", t);
// Prevent possible consecutive immediate failures.
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Ignore.
}
}
}
}
private void processRegisterTaskQueue() {
for (;;) {
final Runnable task = registerTaskQueue.poll();
if (task == null) {
break;
}
task.run();
}
}
private void processSelectedKeys(Set<SelectionKey> selectedKeys) {
for (Iterator<SelectionKey> i = selectedKeys.iterator(); i.hasNext();) {
SelectionKey k = i.next();
i.remove();
if (!k.isValid()) {
close(k);
continue;
}
if (k.isConnectable()) {
connect(k);
}
}
}
private void processConnectTimeout(Set<SelectionKey> keys, long currentTimeNanos) {
ConnectException cause = null;
for (SelectionKey k: keys) {
if (!k.isValid()) {
continue;
}
NioClientSocketChannel ch = (NioClientSocketChannel) k.attachment();
if (ch.connectDeadlineNanos > 0 &&
currentTimeNanos >= ch.connectDeadlineNanos) {
if (cause == null) {
cause = new ConnectException("connection timed out");
}
ch.connectFuture.setFailure(cause);
fireExceptionCaught(ch, cause);
ch.worker.close(ch, succeededFuture(ch));
}
}
}
private void connect(SelectionKey k) {
NioClientSocketChannel ch = (NioClientSocketChannel) k.attachment();
try {
if (ch.socket.finishConnect()) {
k.cancel();
ch.worker.register(ch, ch.connectFuture);
}
} catch (Throwable t) {
ch.connectFuture.setFailure(t);
fireExceptionCaught(ch, t);
k.cancel(); // Some JDK implementations run into an infinite loop without this.
ch.worker.close(ch, succeededFuture(ch));
}
}
private void close(SelectionKey k) {
NioClientSocketChannel ch = (NioClientSocketChannel) k.attachment();
ch.worker.close(ch, succeededFuture(ch));
}
}
private static final class RegisterTask implements Runnable {
private final Boss boss;
private final NioClientSocketChannel channel;
RegisterTask(Boss boss, NioClientSocketChannel channel) {
this.boss = boss;
this.channel = channel;
}
@Override
public void run() {
try {
channel.socket.register(
boss.selector, SelectionKey.OP_CONNECT, channel);
} catch (ClosedChannelException e) {
channel.worker.close(channel, succeededFuture(channel));
}
int connectTimeout = channel.getConfig().getConnectTimeoutMillis();
if (connectTimeout > 0) {
channel.connectDeadlineNanos = System.nanoTime() + connectTimeout * 1000000L;
}
}
}
}

View File

@ -0,0 +1,435 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.spi.SelectorProvider;
import java.util.Set;
import java.util.Map.Entry;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.SystemPropertyUtil;
/**
* Provides information which is specific to a NIO service provider
* implementation.
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
class NioProviderMetadata {
static final InternalLogger logger =
InternalLoggerFactory.getInstance(NioProviderMetadata.class);
private static final String CONSTRAINT_LEVEL_PROPERTY =
"org.jboss.netty.channel.socket.nio.constraintLevel";
/**
* 0 - no need to wake up to get / set interestOps (most cases)
* 1 - no need to wake up to get interestOps, but need to wake up to set.
* 2 - need to wake up to get / set interestOps (old providers)
*/
static final int CONSTRAINT_LEVEL;
static {
int constraintLevel = -1;
// Use the system property if possible.
constraintLevel = SystemPropertyUtil.get(CONSTRAINT_LEVEL_PROPERTY, -1);
if (constraintLevel < 0 || constraintLevel > 2) {
constraintLevel = -1;
}
if (constraintLevel >= 0) {
logger.debug(
"Setting the NIO constraint level to: " + constraintLevel);
}
if (constraintLevel < 0) {
constraintLevel = detectConstraintLevelFromSystemProperties();
if (constraintLevel < 0) {
constraintLevel = 2;
logger.debug(
"Couldn't determine the NIO constraint level from " +
"the system properties; using the safest level (2)");
} else if (constraintLevel != 0) {
logger.info(
"Using the autodetected NIO constraint level: " +
constraintLevel +
" (Use better NIO provider for better performance)");
} else {
logger.debug(
"Using the autodetected NIO constraint level: " +
constraintLevel);
}
}
CONSTRAINT_LEVEL = constraintLevel;
if (CONSTRAINT_LEVEL < 0 || CONSTRAINT_LEVEL > 2) {
throw new Error(
"Unexpected NIO constraint level: " +
CONSTRAINT_LEVEL + ", please report this error.");
}
}
private static int detectConstraintLevelFromSystemProperties() {
String version = SystemPropertyUtil.get("java.specification.version");
String vminfo = SystemPropertyUtil.get("java.vm.info", "");
String os = SystemPropertyUtil.get("os.name");
String vendor = SystemPropertyUtil.get("java.vm.vendor");
String provider;
try {
provider = SelectorProvider.provider().getClass().getName();
} catch (Exception e) {
// Perhaps security exception.
provider = null;
}
if (version == null || os == null || vendor == null || provider == null) {
return -1;
}
os = os.toLowerCase();
vendor = vendor.toLowerCase();
// System.out.println(version);
// System.out.println(vminfo);
// System.out.println(os);
// System.out.println(vendor);
// System.out.println(provider);
// Sun JVM
if (vendor.indexOf("sun") >= 0) {
// Linux
if (os.indexOf("linux") >= 0) {
if (provider.equals("sun.nio.ch.EPollSelectorProvider") ||
provider.equals("sun.nio.ch.PollSelectorProvider")) {
return 0;
}
// Windows
} else if (os.indexOf("windows") >= 0) {
if (provider.equals("sun.nio.ch.WindowsSelectorProvider")) {
return 0;
}
// Solaris
} else if (os.indexOf("sun") >= 0 || os.indexOf("solaris") >= 0) {
if (provider.equals("sun.nio.ch.DevPollSelectorProvider")) {
return 0;
}
}
// Apple JVM
} else if (vendor.indexOf("apple") >= 0) {
// Mac OS
if (os.indexOf("mac") >= 0 && os.indexOf("os") >= 0) {
if (provider.equals("sun.nio.ch.KQueueSelectorProvider")) {
return 0;
}
}
// IBM
} else if (vendor.indexOf("ibm") >= 0) {
// Linux or AIX
if (os.indexOf("linux") >= 0 || os.indexOf("aix") >= 0) {
if (version.equals("1.5") || version.matches("^1\\.5\\D.*$")) {
if (provider.equals("sun.nio.ch.PollSelectorProvider")) {
return 1;
}
} else if (version.equals("1.6") || version.matches("^1\\.6\\D.*$")) {
// IBM JDK 1.6 has different constraint level for different
// version. The exact version can be determined only by its
// build date.
Pattern datePattern = Pattern.compile(
"(?:^|[^0-9])(" +
"[2-9][0-9]{3}" + // year
"(?:0[1-9]|1[0-2])" + // month
"(?:0[1-9]|[12][0-9]|3[01])" + // day of month
")(?:$|[^0-9])");
Matcher dateMatcher = datePattern.matcher(vminfo);
if (dateMatcher.find()) {
long dateValue = Long.parseLong(dateMatcher.group(1));
if (dateValue < 20081105L) {
// SR0, 1, and 2
return 2;
} else {
// SR3 and later
if (provider.equals("sun.nio.ch.EPollSelectorProvider")) {
return 0;
} else if (provider.equals("sun.nio.ch.PollSelectorProvider")) {
return 1;
}
}
}
}
}
// BEA
} else if (vendor.indexOf("bea") >= 0 || vendor.indexOf("oracle") >= 0) {
// Linux
if (os.indexOf("linux") >= 0) {
if (provider.equals("sun.nio.ch.EPollSelectorProvider") ||
provider.equals("sun.nio.ch.PollSelectorProvider")) {
return 0;
}
// Windows
} else if (os.indexOf("windows") >= 0) {
if (provider.equals("sun.nio.ch.WindowsSelectorProvider")) {
return 0;
}
}
// Apache Software Foundation
} else if (vendor.indexOf("apache") >= 0) {
if (provider.equals("org.apache.harmony.nio.internal.SelectorProviderImpl")) {
return 1;
}
}
// Others (untested)
return -1;
}
private static final class ConstraintLevelAutodetector {
ConstraintLevelAutodetector() {
super();
}
int autodetect() {
final int constraintLevel;
ExecutorService executor = Executors.newCachedThreadPool();
boolean success;
long startTime;
int interestOps;
ServerSocketChannel ch = null;
SelectorLoop loop = null;
try {
// Open a channel.
ch = ServerSocketChannel.open();
// Configure the channel
try {
ch.socket().bind(new InetSocketAddress(0));
ch.configureBlocking(false);
} catch (Throwable e) {
logger.warn("Failed to configure a temporary socket.", e);
return -1;
}
// Prepare the selector loop.
try {
loop = new SelectorLoop();
} catch (Throwable e) {
logger.warn("Failed to open a temporary selector.", e);
return -1;
}
// Register the channel
try {
ch.register(loop.selector, 0);
} catch (Throwable e) {
logger.warn("Failed to register a temporary selector.", e);
return -1;
}
SelectionKey key = ch.keyFor(loop.selector);
// Start the selector loop.
executor.execute(loop);
// Level 0
success = true;
for (int i = 0; i < 10; i ++) {
// Increase the probability of calling interestOps
// while select() is running.
do {
while (!loop.selecting) {
Thread.yield();
}
// Wait a little bit more.
try {
Thread.sleep(50);
} catch (InterruptedException e) {
// Ignore
}
} while (!loop.selecting);
startTime = System.nanoTime();
key.interestOps(key.interestOps() | SelectionKey.OP_ACCEPT);
key.interestOps(key.interestOps() & ~SelectionKey.OP_ACCEPT);
if (System.nanoTime() - startTime >= 500000000L) {
success = false;
break;
}
}
if (success) {
constraintLevel = 0;
} else {
// Level 1
success = true;
for (int i = 0; i < 10; i ++) {
// Increase the probability of calling interestOps
// while select() is running.
do {
while (!loop.selecting) {
Thread.yield();
}
// Wait a little bit more.
try {
Thread.sleep(50);
} catch (InterruptedException e) {
// Ignore
}
} while (!loop.selecting);
startTime = System.nanoTime();
interestOps = key.interestOps();
synchronized (loop) {
loop.selector.wakeup();
key.interestOps(interestOps | SelectionKey.OP_ACCEPT);
key.interestOps(interestOps & ~SelectionKey.OP_ACCEPT);
}
if (System.nanoTime() - startTime >= 500000000L) {
success = false;
break;
}
}
if (success) {
constraintLevel = 1;
} else {
constraintLevel = 2;
}
}
} catch (Throwable e) {
return -1;
} finally {
if (ch != null) {
try {
ch.close();
} catch (Throwable e) {
logger.warn("Failed to close a temporary socket.", e);
}
}
if (loop != null) {
loop.done = true;
try {
executor.shutdownNow();
} catch (NullPointerException ex) {
// Some JDK throws NPE here, but shouldn't.
}
try {
for (;;) {
loop.selector.wakeup();
try {
if (executor.awaitTermination(1, TimeUnit.SECONDS)) {
break;
}
} catch (InterruptedException e) {
// Ignore
}
}
} catch (Throwable e) {
// Perhaps security exception.
}
try {
loop.selector.close();
} catch (Throwable e) {
logger.warn("Failed to close a temporary selector.", e);
}
}
}
return constraintLevel;
}
}
private static final class SelectorLoop implements Runnable {
final Selector selector;
volatile boolean done;
volatile boolean selecting; // Just an approximation
SelectorLoop() throws IOException {
selector = Selector.open();
}
@Override
public void run() {
while (!done) {
synchronized (this) {
// Guard
}
try {
selecting = true;
try {
selector.select(1000);
} finally {
selecting = false;
}
Set<SelectionKey> keys = selector.selectedKeys();
for (SelectionKey k: keys) {
k.interestOps(0);
}
keys.clear();
} catch (IOException e) {
logger.warn("Failed to wait for a temporary selector.", e);
}
}
}
}
public static void main(String[] args) throws Exception {
for (Entry<Object, Object> e: System.getProperties().entrySet()) {
System.out.println(e.getKey() + ": " + e.getValue());
}
System.out.println();
System.out.println("Hard-coded Constraint Level: " + CONSTRAINT_LEVEL);
System.out.println(
"Auto-detected Constraint Level: " +
new ConstraintLevelAutodetector().autodetect());
}
private NioProviderMetadata() {
// Unused
}
}

View File

@ -0,0 +1,112 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import static org.jboss.netty.channel.Channels.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.jboss.netty.channel.AbstractServerChannel;
import org.jboss.netty.channel.ChannelException;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.socket.DefaultServerSocketChannelConfig;
import org.jboss.netty.channel.socket.ServerSocketChannelConfig;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
/**
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
class NioServerSocketChannel extends AbstractServerChannel
implements org.jboss.netty.channel.socket.ServerSocketChannel {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(NioServerSocketChannel.class);
final ServerSocketChannel socket;
final Lock shutdownLock = new ReentrantLock();
volatile Selector selector;
private final ServerSocketChannelConfig config;
NioServerSocketChannel(
ChannelFactory factory,
ChannelPipeline pipeline,
ChannelSink sink) {
super(factory, pipeline, sink);
try {
socket = ServerSocketChannel.open();
} catch (IOException e) {
throw new ChannelException(
"Failed to open a server socket.", e);
}
try {
socket.configureBlocking(false);
} catch (IOException e) {
try {
socket.close();
} catch (IOException e2) {
logger.warn(
"Failed to close a partially initialized socket.", e2);
}
throw new ChannelException("Failed to enter non-blocking mode.", e);
}
config = new DefaultServerSocketChannelConfig(socket.socket());
fireChannelOpen(this);
}
@Override
public ServerSocketChannelConfig getConfig() {
return config;
}
@Override
public InetSocketAddress getLocalAddress() {
return (InetSocketAddress) socket.socket().getLocalSocketAddress();
}
@Override
public InetSocketAddress getRemoteAddress() {
return null;
}
@Override
public boolean isBound() {
return isOpen() && socket.socket().isBound();
}
@Override
protected boolean setClosed() {
return super.setClosed();
}
}

View File

@ -0,0 +1,145 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import java.util.concurrent.Executor;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.socket.ServerSocketChannel;
import org.jboss.netty.channel.socket.ServerSocketChannelFactory;
import org.jboss.netty.util.internal.ExecutorUtil;
/**
* A {@link org.jboss.netty.channel.socket.ServerSocketChannelFactory} which creates a server-side NIO-based
* {@link org.jboss.netty.channel.socket.ServerSocketChannel}. It utilizes the non-blocking I/O mode which
* was introduced with NIO to serve many number of concurrent connections
* efficiently.
*
* <h3>How threads work</h3>
* <p>
* There are two types of threads in a {@link org.jboss.netty.channel.socket.sctp.NioServerSocketChannelFactory};
* one is boss thread and the other is worker thread.
*
* <h4>Boss threads</h4>
* <p>
* Each bound {@link org.jboss.netty.channel.socket.ServerSocketChannel} has its own boss thread.
* For example, if you opened two server ports such as 80 and 443, you will
* have two boss threads. A boss thread accepts incoming connections until
* the port is unbound. Once a connection is accepted successfully, the boss
* thread passes the accepted {@link org.jboss.netty.channel.Channel} to one of the worker
* threads that the {@link org.jboss.netty.channel.socket.sctp.NioServerSocketChannelFactory} manages.
*
* <h4>Worker threads</h4>
* <p>
* One {@link org.jboss.netty.channel.socket.sctp.NioServerSocketChannelFactory} can have one or more worker
* threads. A worker thread performs non-blocking read and write for one or
* more {@link org.jboss.netty.channel.Channel}s in a non-blocking mode.
*
* <h3>Life cycle of threads and graceful shutdown</h3>
* <p>
* All threads are acquired from the {@link java.util.concurrent.Executor}s which were specified
* when a {@link org.jboss.netty.channel.socket.sctp.NioServerSocketChannelFactory} was created. Boss threads are
* acquired from the {@code bossExecutor}, and worker threads are acquired from
* the {@code workerExecutor}. Therefore, you should make sure the specified
* {@link java.util.concurrent.Executor}s are able to lend the sufficient number of threads.
* It is the best bet to specify {@linkplain java.util.concurrent.Executors#newCachedThreadPool() a cached thread pool}.
* <p>
* Both boss and worker threads are acquired lazily, and then released when
* there's nothing left to process. All the related resources such as
* {@link java.nio.channels.Selector} are also released when the boss and worker threads are
* released. Therefore, to shut down a service gracefully, you should do the
* following:
*
* <ol>
* <li>unbind all channels created by the factory,
* <li>close all child channels accepted by the unbound channels, and
* (these two steps so far is usually done using {@link org.jboss.netty.channel.group.ChannelGroup#close()})</li>
* <li>call {@link #releaseExternalResources()}.</li>
* </ol>
*
* Please make sure not to shut down the executor until all channels are
* closed. Otherwise, you will end up with a {@link java.util.concurrent.RejectedExecutionException}
* and the related resources might not be released properly.
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
* @apiviz.landmark
*/
public class NioServerSocketChannelFactory implements ServerSocketChannelFactory {
final Executor bossExecutor;
private final Executor workerExecutor;
private final ChannelSink sink;
/**
* Creates a new instance. Calling this constructor is same with calling
* {@link #NioServerSocketChannelFactory(java.util.concurrent.Executor, java.util.concurrent.Executor, int)} with 2 *
* the number of available processors in the machine. The number of
* available processors is obtained by {@link Runtime#availableProcessors()}.
*
* @param bossExecutor
* the {@link java.util.concurrent.Executor} which will execute the boss threads
* @param workerExecutor
* the {@link java.util.concurrent.Executor} which will execute the I/O worker threads
*/
public NioServerSocketChannelFactory(
Executor bossExecutor, Executor workerExecutor) {
this(bossExecutor, workerExecutor, SelectorUtil.DEFAULT_IO_THREADS);
}
/**
* Creates a new instance.
*
* @param bossExecutor
* the {@link java.util.concurrent.Executor} which will execute the boss threads
* @param workerExecutor
* the {@link java.util.concurrent.Executor} which will execute the I/O worker threads
* @param workerCount
* the maximum number of I/O worker threads
*/
public NioServerSocketChannelFactory(
Executor bossExecutor, Executor workerExecutor,
int workerCount) {
if (bossExecutor == null) {
throw new NullPointerException("bossExecutor");
}
if (workerExecutor == null) {
throw new NullPointerException("workerExecutor");
}
if (workerCount <= 0) {
throw new IllegalArgumentException(
"workerCount (" + workerCount + ") " +
"must be a positive integer.");
}
this.bossExecutor = bossExecutor;
this.workerExecutor = workerExecutor;
sink = new NioServerSocketPipelineSink(workerExecutor, workerCount);
}
@Override
public ServerSocketChannel newChannel(ChannelPipeline pipeline) {
return new NioServerSocketChannel(this, pipeline, sink);
}
@Override
public void releaseExternalResources() {
ExecutorUtil.terminate(bossExecutor, workerExecutor);
}
}

View File

@ -0,0 +1,300 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import static org.jboss.netty.channel.Channels.*;
import java.io.IOException;
import java.net.SocketAddress;
import java.net.SocketTimeoutException;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ClosedSelectorException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.SocketChannel;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import org.jboss.netty.channel.AbstractChannelSink;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelEvent;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelState;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.DeadLockProofWorker;
/**
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
class NioServerSocketPipelineSink extends AbstractChannelSink {
static final InternalLogger logger =
InternalLoggerFactory.getInstance(NioServerSocketPipelineSink.class);
private final NioWorker[] workers;
private final AtomicInteger workerIndex = new AtomicInteger();
NioServerSocketPipelineSink(Executor workerExecutor, int workerCount) {
workers = new NioWorker[workerCount];
for (int i = 0; i < workers.length; i ++) {
workers[i] = new NioWorker(workerExecutor);
}
}
@Override
public void eventSunk(
ChannelPipeline pipeline, ChannelEvent e) throws Exception {
Channel channel = e.getChannel();
if (channel instanceof NioServerSocketChannel) {
handleServerSocket(e);
} else if (channel instanceof NioSocketChannel) {
handleAcceptedSocket(e);
}
}
private void handleServerSocket(ChannelEvent e) {
if (!(e instanceof ChannelStateEvent)) {
return;
}
ChannelStateEvent event = (ChannelStateEvent) e;
NioServerSocketChannel channel =
(NioServerSocketChannel) event.getChannel();
ChannelFuture future = event.getFuture();
ChannelState state = event.getState();
Object value = event.getValue();
switch (state) {
case OPEN:
if (Boolean.FALSE.equals(value)) {
close(channel, future);
}
break;
case BOUND:
if (value != null) {
bind(channel, future, (SocketAddress) value);
} else {
close(channel, future);
}
break;
}
}
private void handleAcceptedSocket(ChannelEvent e) {
if (e instanceof ChannelStateEvent) {
ChannelStateEvent event = (ChannelStateEvent) e;
NioSocketChannel channel = (NioSocketChannel) event.getChannel();
ChannelFuture future = event.getFuture();
ChannelState state = event.getState();
Object value = event.getValue();
switch (state) {
case OPEN:
if (Boolean.FALSE.equals(value)) {
channel.worker.close(channel, future);
}
break;
case BOUND:
case CONNECTED:
if (value == null) {
channel.worker.close(channel, future);
}
break;
case INTEREST_OPS:
channel.worker.setInterestOps(channel, future, ((Integer) value).intValue());
break;
}
} else if (e instanceof MessageEvent) {
MessageEvent event = (MessageEvent) e;
NioSocketChannel channel = (NioSocketChannel) event.getChannel();
boolean offered = channel.writeBuffer.offer(event);
assert offered;
channel.worker.writeFromUserCode(channel);
}
}
private void bind(
NioServerSocketChannel channel, ChannelFuture future,
SocketAddress localAddress) {
boolean bound = false;
boolean bossStarted = false;
try {
channel.socket.socket().bind(localAddress, channel.getConfig().getBacklog());
bound = true;
future.setSuccess();
fireChannelBound(channel, channel.getLocalAddress());
Executor bossExecutor =
((NioServerSocketChannelFactory) channel.getFactory()).bossExecutor;
DeadLockProofWorker.start(bossExecutor, new Boss(channel));
bossStarted = true;
} catch (Throwable t) {
future.setFailure(t);
fireExceptionCaught(channel, t);
} finally {
if (!bossStarted && bound) {
close(channel, future);
}
}
}
private void close(NioServerSocketChannel channel, ChannelFuture future) {
boolean bound = channel.isBound();
try {
if (channel.socket.isOpen()) {
channel.socket.close();
Selector selector = channel.selector;
if (selector != null) {
selector.wakeup();
}
}
// Make sure the boss thread is not running so that that the future
// is notified after a new connection cannot be accepted anymore.
// See NETTY-256 for more information.
channel.shutdownLock.lock();
try {
if (channel.setClosed()) {
future.setSuccess();
if (bound) {
fireChannelUnbound(channel);
}
fireChannelClosed(channel);
} else {
future.setSuccess();
}
} finally {
channel.shutdownLock.unlock();
}
} catch (Throwable t) {
future.setFailure(t);
fireExceptionCaught(channel, t);
}
}
NioWorker nextWorker() {
return workers[Math.abs(
workerIndex.getAndIncrement() % workers.length)];
}
private final class Boss implements Runnable {
private final Selector selector;
private final NioServerSocketChannel channel;
Boss(NioServerSocketChannel channel) throws IOException {
this.channel = channel;
selector = Selector.open();
boolean registered = false;
try {
channel.socket.register(selector, SelectionKey.OP_ACCEPT);
registered = true;
} finally {
if (!registered) {
closeSelector();
}
}
channel.selector = selector;
}
@Override
public void run() {
final Thread currentThread = Thread.currentThread();
channel.shutdownLock.lock();
try {
for (;;) {
try {
if (selector.select(1000) > 0) {
selector.selectedKeys().clear();
}
SocketChannel acceptedSocket = channel.socket.accept();
if (acceptedSocket != null) {
registerAcceptedChannel(acceptedSocket, currentThread);
}
} catch (SocketTimeoutException e) {
// Thrown every second to get ClosedChannelException
// raised.
} catch (CancelledKeyException e) {
// Raised by accept() when the server socket was closed.
} catch (ClosedSelectorException e) {
// Raised by accept() when the server socket was closed.
} catch (ClosedChannelException e) {
// Closed as requested.
break;
} catch (Throwable e) {
logger.warn(
"Failed to accept a connection.", e);
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
// Ignore
}
}
}
} finally {
channel.shutdownLock.unlock();
closeSelector();
}
}
private void registerAcceptedChannel(SocketChannel acceptedSocket, Thread currentThread) {
try {
ChannelPipeline pipeline =
channel.getConfig().getPipelineFactory().getPipeline();
NioWorker worker = nextWorker();
worker.register(new NioAcceptedSocketChannel(
channel.getFactory(), pipeline, channel,
org.jboss.netty.channel.socket.nio.NioServerSocketPipelineSink.this, acceptedSocket,
worker, currentThread), null);
} catch (Exception e) {
logger.warn(
"Failed to initialize an accepted socket.", e);
try {
acceptedSocket.close();
} catch (IOException e2) {
logger.warn(
"Failed to close a partially accepted socket.",
e2);
}
}
}
private void closeSelector() {
channel.selector = null;
try {
selector.close();
} catch (Exception e) {
logger.warn("Failed to close a selector.", e);
}
}
}
}

View File

@ -0,0 +1,286 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import static org.jboss.netty.channel.Channels.*;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.channels.SocketChannel;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.AbstractChannel;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelSink;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer;
import org.jboss.netty.util.internal.LinkedTransferQueue;
import org.jboss.netty.util.internal.ThreadLocalBoolean;
/**
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
class NioSocketChannel extends AbstractChannel
implements org.jboss.netty.channel.socket.SocketChannel {
private static final int ST_OPEN = 0;
private static final int ST_BOUND = 1;
private static final int ST_CONNECTED = 2;
private static final int ST_CLOSED = -1;
volatile int state = ST_OPEN;
final SocketChannel socket;
final NioWorker worker;
private final NioSocketChannelConfig config;
private volatile InetSocketAddress localAddress;
private volatile InetSocketAddress remoteAddress;
final Object interestOpsLock = new Object();
final Object writeLock = new Object();
final Runnable writeTask = new WriteTask();
final AtomicBoolean writeTaskInTaskQueue = new AtomicBoolean();
final Queue<MessageEvent> writeBuffer = new WriteRequestQueue();
final AtomicInteger writeBufferSize = new AtomicInteger();
final AtomicInteger highWaterMarkCounter = new AtomicInteger();
boolean inWriteNowLoop;
boolean writeSuspended;
MessageEvent currentWriteEvent;
SendBuffer currentWriteBuffer;
public NioSocketChannel(
Channel parent, ChannelFactory factory,
ChannelPipeline pipeline, ChannelSink sink,
SocketChannel socket, NioWorker worker) {
super(parent, factory, pipeline, sink);
this.socket = socket;
this.worker = worker;
config = new DefaultNioSocketChannelConfig(socket.socket());
// TODO Move the state variable to AbstractChannel so that we don't need
// to add many listeners.
getCloseFuture().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
state = ST_CLOSED;
}
});
}
@Override
public NioSocketChannelConfig getConfig() {
return config;
}
@Override
public InetSocketAddress getLocalAddress() {
InetSocketAddress localAddress = this.localAddress;
if (localAddress == null) {
try {
this.localAddress = localAddress =
(InetSocketAddress) socket.socket().getLocalSocketAddress();
} catch (Throwable t) {
// Sometimes fails on a closed socket in Windows.
return null;
}
}
return localAddress;
}
@Override
public InetSocketAddress getRemoteAddress() {
InetSocketAddress remoteAddress = this.remoteAddress;
if (remoteAddress == null) {
try {
this.remoteAddress = remoteAddress =
(InetSocketAddress) socket.socket().getRemoteSocketAddress();
} catch (Throwable t) {
// Sometimes fails on a closed socket in Windows.
return null;
}
}
return remoteAddress;
}
@Override
public boolean isOpen() {
return state >= ST_OPEN;
}
@Override
public boolean isBound() {
return state >= ST_BOUND;
}
@Override
public boolean isConnected() {
return state == ST_CONNECTED;
}
final void setBound() {
assert state == ST_OPEN : "Invalid state: " + state;
state = ST_BOUND;
}
final void setConnected() {
if (state != ST_CLOSED) {
state = ST_CONNECTED;
}
}
@Override
protected boolean setClosed() {
return super.setClosed();
}
@Override
public int getInterestOps() {
if (!isOpen()) {
return Channel.OP_WRITE;
}
int interestOps = getRawInterestOps();
int writeBufferSize = this.writeBufferSize.get();
if (writeBufferSize != 0) {
if (highWaterMarkCounter.get() > 0) {
int lowWaterMark = getConfig().getWriteBufferLowWaterMark();
if (writeBufferSize >= lowWaterMark) {
interestOps |= Channel.OP_WRITE;
} else {
interestOps &= ~Channel.OP_WRITE;
}
} else {
int highWaterMark = getConfig().getWriteBufferHighWaterMark();
if (writeBufferSize >= highWaterMark) {
interestOps |= Channel.OP_WRITE;
} else {
interestOps &= ~Channel.OP_WRITE;
}
}
} else {
interestOps &= ~Channel.OP_WRITE;
}
return interestOps;
}
int getRawInterestOps() {
return super.getInterestOps();
}
void setRawInterestOpsNow(int interestOps) {
super.setInterestOpsNow(interestOps);
}
@Override
public ChannelFuture write(Object message, SocketAddress remoteAddress) {
if (remoteAddress == null || remoteAddress.equals(getRemoteAddress())) {
return super.write(message, null);
} else {
return getUnsupportedOperationFuture();
}
}
private final class WriteRequestQueue extends LinkedTransferQueue<MessageEvent> {
private static final long serialVersionUID = -246694024103520626L;
private final ThreadLocalBoolean notifying = new ThreadLocalBoolean();
WriteRequestQueue() {
super();
}
@Override
public boolean offer(MessageEvent e) {
boolean success = super.offer(e);
assert success;
int messageSize = getMessageSize(e);
int newWriteBufferSize = writeBufferSize.addAndGet(messageSize);
int highWaterMark = getConfig().getWriteBufferHighWaterMark();
if (newWriteBufferSize >= highWaterMark) {
if (newWriteBufferSize - messageSize < highWaterMark) {
highWaterMarkCounter.incrementAndGet();
if (!notifying.get()) {
notifying.set(Boolean.TRUE);
fireChannelInterestChanged(org.jboss.netty.channel.socket.nio.NioSocketChannel.this);
notifying.set(Boolean.FALSE);
}
}
}
return true;
}
@Override
public MessageEvent poll() {
MessageEvent e = super.poll();
if (e != null) {
int messageSize = getMessageSize(e);
int newWriteBufferSize = writeBufferSize.addAndGet(-messageSize);
int lowWaterMark = getConfig().getWriteBufferLowWaterMark();
if (newWriteBufferSize == 0 || newWriteBufferSize < lowWaterMark) {
if (newWriteBufferSize + messageSize >= lowWaterMark) {
highWaterMarkCounter.decrementAndGet();
if (isConnected() && !notifying.get()) {
notifying.set(Boolean.TRUE);
fireChannelInterestChanged(org.jboss.netty.channel.socket.nio.NioSocketChannel.this);
notifying.set(Boolean.FALSE);
}
}
}
}
return e;
}
private int getMessageSize(MessageEvent e) {
Object m = e.getMessage();
if (m instanceof ChannelBuffer) {
return ((ChannelBuffer) m).readableBytes();
}
return 0;
}
}
private final class WriteTask implements Runnable {
WriteTask() {
super();
}
@Override
public void run() {
writeTaskInTaskQueue.set(false);
worker.writeFromTaskLoop(org.jboss.netty.channel.socket.nio.NioSocketChannel.this);
}
}
}

View File

@ -0,0 +1,141 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import org.jboss.netty.channel.ReceiveBufferSizePredictor;
import org.jboss.netty.channel.ReceiveBufferSizePredictorFactory;
import org.jboss.netty.channel.socket.SocketChannelConfig;
/**
* A {@link org.jboss.netty.channel.socket.SocketChannelConfig} for a NIO TCP/IP {@link org.jboss.netty.channel.socket.SocketChannel}.
*
* <h3>Available options</h3>
*
* In addition to the options provided by {@link org.jboss.netty.channel.ChannelConfig} and
* {@link org.jboss.netty.channel.socket.SocketChannelConfig}, {@link org.jboss.netty.channel.socket.sctp.NioSocketChannelConfig} allows the
* following options in the option map:
*
* <table border="1" cellspacing="0" cellpadding="6">
* <tr>
* <th>Name</th><th>Associated setter method</th>
* </tr><tr>
* <td>{@code "writeBufferHighWaterMark"}</td><td>{@link #setWriteBufferHighWaterMark(int)}</td>
* </tr><tr>
* <td>{@code "writeBufferLowWaterMark"}</td><td>{@link #setWriteBufferLowWaterMark(int)}</td>
* </tr><tr>
* <td>{@code "writeSpinCount"}</td><td>{@link #setWriteSpinCount(int)}</td>
* </tr><tr>
* <td>{@code "receiveBufferSizePredictor"}</td><td>{@link #setReceiveBufferSizePredictor(org.jboss.netty.channel.ReceiveBufferSizePredictor)}</td>
* </tr><tr>
* <td>{@code "receiveBufferSizePredictorFactory"}</td><td>{@link #setReceiveBufferSizePredictorFactory(org.jboss.netty.channel.ReceiveBufferSizePredictorFactory)}</td>
* </tr>
* </table>
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*/
public interface NioSocketChannelConfig extends SocketChannelConfig {
/**
* Returns the high water mark of the write buffer. If the number of bytes
* queued in the write buffer exceeds this value, {@link org.jboss.netty.channel.Channel#isWritable()}
* will start to return {@code false}.
*/
int getWriteBufferHighWaterMark();
/**
* Sets the high water mark of the write buffer. If the number of bytes
* queued in the write buffer exceeds this value, {@link org.jboss.netty.channel.Channel#isWritable()}
* will start to return {@code false}.
*/
void setWriteBufferHighWaterMark(int writeBufferHighWaterMark);
/**
* Returns the low water mark of the write buffer. Once the number of bytes
* queued in the write buffer exceeded the
* {@linkplain #setWriteBufferHighWaterMark(int) high water mark} and then
* dropped down below this value, {@link org.jboss.netty.channel.Channel#isWritable()} will return
* {@code true} again.
*/
int getWriteBufferLowWaterMark();
/**
* Sets the low water mark of the write buffer. Once the number of bytes
* queued in the write buffer exceeded the
* {@linkplain #setWriteBufferHighWaterMark(int) high water mark} and then
* dropped down below this value, {@link org.jboss.netty.channel.Channel#isWritable()} will return
* {@code true} again.
*/
void setWriteBufferLowWaterMark(int writeBufferLowWaterMark);
/**
* Returns the maximum loop count for a write operation until
* {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)} returns a non-zero value.
* It is similar to what a spin lock is used for in concurrency programming.
* It improves memory utilization and write throughput depending on
* the platform that JVM runs on. The default value is {@code 16}.
*/
int getWriteSpinCount();
/**
* Sets the maximum loop count for a write operation until
* {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)} returns a non-zero value.
* It is similar to what a spin lock is used for in concurrency programming.
* It improves memory utilization and write throughput depending on
* the platform that JVM runs on. The default value is {@code 16}.
*
* @throws IllegalArgumentException
* if the specified value is {@code 0} or less than {@code 0}
*/
void setWriteSpinCount(int writeSpinCount);
/**
* Returns the {@link org.jboss.netty.channel.ReceiveBufferSizePredictor} which predicts the
* number of readable bytes in the socket receive buffer. The default
* predictor is <tt>{@link org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictor}(64, 1024, 65536)</tt>.
*/
ReceiveBufferSizePredictor getReceiveBufferSizePredictor();
/**
* Sets the {@link org.jboss.netty.channel.ReceiveBufferSizePredictor} which predicts the
* number of readable bytes in the socket receive buffer. The default
* predictor is <tt>{@link org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictor}(64, 1024, 65536)</tt>.
*/
void setReceiveBufferSizePredictor(ReceiveBufferSizePredictor predictor);
/**
* Returns the {@link org.jboss.netty.channel.ReceiveBufferSizePredictorFactory} which creates a new
* {@link org.jboss.netty.channel.ReceiveBufferSizePredictor} when a new channel is created and
* no {@link org.jboss.netty.channel.ReceiveBufferSizePredictor} was set. If no predictor was set
* for the channel, {@link #setReceiveBufferSizePredictor(org.jboss.netty.channel.ReceiveBufferSizePredictor)}
* will be called with the new predictor. The default factory is
* <tt>{@link org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory}(64, 1024, 65536)</tt>.
*/
ReceiveBufferSizePredictorFactory getReceiveBufferSizePredictorFactory();
/**
* Sets the {@link org.jboss.netty.channel.ReceiveBufferSizePredictor} which creates a new
* {@link org.jboss.netty.channel.ReceiveBufferSizePredictor} when a new channel is created and
* no {@link org.jboss.netty.channel.ReceiveBufferSizePredictor} was set. If no predictor was set
* for the channel, {@link #setReceiveBufferSizePredictor(org.jboss.netty.channel.ReceiveBufferSizePredictor)}
* will be called with the new predictor. The default factory is
* <tt>{@link org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory}(64, 1024, 65536)</tt>.
*/
void setReceiveBufferSizePredictorFactory(
ReceiveBufferSizePredictorFactory predictorFactory);
}

View File

@ -0,0 +1,781 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import static org.jboss.netty.channel.Channels.*;
import java.io.IOException;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.AsynchronousCloseException;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.NotYetConnectedException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.SocketChannel;
import java.util.Iterator;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBufferFactory;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelException;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.ReceiveBufferSizePredictor;
import org.jboss.netty.channel.socket.nio.SocketSendBufferPool.SendBuffer;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.internal.DeadLockProofWorker;
import org.jboss.netty.util.internal.LinkedTransferQueue;
/**
*
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @version $Rev$, $Date$
*
*/
class NioWorker implements Runnable {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(NioWorker.class);
private static final int CONSTRAINT_LEVEL = NioProviderMetadata.CONSTRAINT_LEVEL;
static final int CLEANUP_INTERVAL = 256; // XXX Hard-coded value, but won't need customization.
private final Executor executor;
private boolean started;
private volatile Thread thread;
volatile Selector selector;
private final AtomicBoolean wakenUp = new AtomicBoolean();
private final ReadWriteLock selectorGuard = new ReentrantReadWriteLock();
private final Object startStopLock = new Object();
private final Queue<Runnable> registerTaskQueue = new LinkedTransferQueue<Runnable>();
private final Queue<Runnable> writeTaskQueue = new LinkedTransferQueue<Runnable>();
private volatile int cancelledKeys; // should use AtomicInteger but we just need approximation
private final SocketReceiveBufferPool recvBufferPool = new SocketReceiveBufferPool();
private final SocketSendBufferPool sendBufferPool = new SocketSendBufferPool();
NioWorker(Executor executor) {
this.executor = executor;
}
void register(NioSocketChannel channel, ChannelFuture future) {
boolean server = !(channel instanceof NioClientSocketChannel);
Runnable registerTask = new RegisterTask(channel, future, server);
Selector selector;
synchronized (startStopLock) {
if (!started) {
// Open a selector if this worker didn't start yet.
try {
this.selector = selector = Selector.open();
} catch (Throwable t) {
throw new ChannelException(
"Failed to create a selector.", t);
}
// Start the worker thread with the new Selector.
boolean success = false;
try {
DeadLockProofWorker.start(executor, this);
success = true;
} finally {
if (!success) {
// Release the Selector if the execution fails.
try {
selector.close();
} catch (Throwable t) {
logger.warn("Failed to close a selector.", t);
}
this.selector = selector = null;
// The method will return to the caller at this point.
}
}
} else {
// Use the existing selector if this worker has been started.
selector = this.selector;
}
assert selector != null && selector.isOpen();
started = true;
boolean offered = registerTaskQueue.offer(registerTask);
assert offered;
}
if (wakenUp.compareAndSet(false, true)) {
selector.wakeup();
}
}
@Override
public void run() {
thread = Thread.currentThread();
boolean shutdown = false;
Selector selector = this.selector;
for (;;) {
wakenUp.set(false);
if (CONSTRAINT_LEVEL != 0) {
selectorGuard.writeLock().lock();
// This empty synchronization block prevents the selector
// from acquiring its lock.
selectorGuard.writeLock().unlock();
}
try {
SelectorUtil.select(selector);
// 'wakenUp.compareAndSet(false, true)' is always evaluated
// before calling 'selector.wakeup()' to reduce the wake-up
// overhead. (Selector.wakeup() is an expensive operation.)
//
// However, there is a race condition in this approach.
// The race condition is triggered when 'wakenUp' is set to
// true too early.
//
// 'wakenUp' is set to true too early if:
// 1) Selector is waken up between 'wakenUp.set(false)' and
// 'selector.select(...)'. (BAD)
// 2) Selector is waken up between 'selector.select(...)' and
// 'if (wakenUp.get()) { ... }'. (OK)
//
// In the first case, 'wakenUp' is set to true and the
// following 'selector.select(...)' will wake up immediately.
// Until 'wakenUp' is set to false again in the next round,
// 'wakenUp.compareAndSet(false, true)' will fail, and therefore
// any attempt to wake up the Selector will fail, too, causing
// the following 'selector.select(...)' call to block
// unnecessarily.
//
// To fix this problem, we wake up the selector again if wakenUp
// is true immediately after selector.select(...).
// It is inefficient in that it wakes up the selector for both
// the first case (BAD - wake-up required) and the second case
// (OK - no wake-up required).
if (wakenUp.get()) {
selector.wakeup();
}
cancelledKeys = 0;
processRegisterTaskQueue();
processWriteTaskQueue();
processSelectedKeys(selector.selectedKeys());
// Exit the loop when there's nothing to handle.
// The shutdown flag is used to delay the shutdown of this
// loop to avoid excessive Selector creation when
// connections are registered in a one-by-one manner instead of
// concurrent manner.
if (selector.keys().isEmpty()) {
if (shutdown ||
executor instanceof ExecutorService && ((ExecutorService) executor).isShutdown()) {
synchronized (startStopLock) {
if (registerTaskQueue.isEmpty() && selector.keys().isEmpty()) {
started = false;
try {
selector.close();
} catch (IOException e) {
logger.warn(
"Failed to close a selector.", e);
} finally {
this.selector = null;
}
break;
} else {
shutdown = false;
}
}
} else {
// Give one more second.
shutdown = true;
}
} else {
shutdown = false;
}
} catch (Throwable t) {
logger.warn(
"Unexpected exception in the selector loop.", t);
// Prevent possible consecutive immediate failures that lead to
// excessive CPU consumption.
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Ignore.
}
}
}
}
private void processRegisterTaskQueue() throws IOException {
for (;;) {
final Runnable task = registerTaskQueue.poll();
if (task == null) {
break;
}
task.run();
cleanUpCancelledKeys();
}
}
private void processWriteTaskQueue() throws IOException {
for (;;) {
final Runnable task = writeTaskQueue.poll();
if (task == null) {
break;
}
task.run();
cleanUpCancelledKeys();
}
}
private void processSelectedKeys(Set<SelectionKey> selectedKeys) throws IOException {
for (Iterator<SelectionKey> i = selectedKeys.iterator(); i.hasNext();) {
SelectionKey k = i.next();
i.remove();
try {
int readyOps = k.readyOps();
if ((readyOps & SelectionKey.OP_READ) != 0 || readyOps == 0) {
if (!read(k)) {
// Connection already closed - no need to handle write.
continue;
}
}
if ((readyOps & SelectionKey.OP_WRITE) != 0) {
writeFromSelectorLoop(k);
}
} catch (CancelledKeyException e) {
close(k);
}
if (cleanUpCancelledKeys()) {
break; // break the loop to avoid ConcurrentModificationException
}
}
}
private boolean cleanUpCancelledKeys() throws IOException {
if (cancelledKeys >= CLEANUP_INTERVAL) {
cancelledKeys = 0;
selector.selectNow();
return true;
}
return false;
}
private boolean read(SelectionKey k) {
final SocketChannel ch = (SocketChannel) k.channel();
final NioSocketChannel channel = (NioSocketChannel) k.attachment();
final ReceiveBufferSizePredictor predictor =
channel.getConfig().getReceiveBufferSizePredictor();
final int predictedRecvBufSize = predictor.nextReceiveBufferSize();
int ret = 0;
int readBytes = 0;
boolean failure = true;
ByteBuffer bb = recvBufferPool.acquire(predictedRecvBufSize);
try {
while ((ret = ch.read(bb)) > 0) {
readBytes += ret;
if (!bb.hasRemaining()) {
break;
}
}
failure = false;
} catch (ClosedChannelException e) {
// Can happen, and does not need a user attention.
} catch (Throwable t) {
fireExceptionCaught(channel, t);
}
if (readBytes > 0) {
bb.flip();
final ChannelBufferFactory bufferFactory =
channel.getConfig().getBufferFactory();
final ChannelBuffer buffer = bufferFactory.getBuffer(readBytes);
buffer.setBytes(0, bb);
buffer.writerIndex(readBytes);
recvBufferPool.release(bb);
// Update the predictor.
predictor.previousReceiveBufferSize(readBytes);
// Fire the event.
fireMessageReceived(channel, buffer);
} else {
recvBufferPool.release(bb);
}
if (ret < 0 || failure) {
k.cancel(); // Some JDK implementations run into an infinite loop without this.
close(channel, succeededFuture(channel));
return false;
}
return true;
}
private void close(SelectionKey k) {
NioSocketChannel ch = (NioSocketChannel) k.attachment();
close(ch, succeededFuture(ch));
}
void writeFromUserCode(final NioSocketChannel channel) {
if (!channel.isConnected()) {
cleanUpWriteBuffer(channel);
return;
}
if (scheduleWriteIfNecessary(channel)) {
return;
}
// From here, we are sure Thread.currentThread() == workerThread.
if (channel.writeSuspended) {
return;
}
if (channel.inWriteNowLoop) {
return;
}
write0(channel);
}
void writeFromTaskLoop(final NioSocketChannel ch) {
if (!ch.writeSuspended) {
write0(ch);
}
}
void writeFromSelectorLoop(final SelectionKey k) {
NioSocketChannel ch = (NioSocketChannel) k.attachment();
ch.writeSuspended = false;
write0(ch);
}
private boolean scheduleWriteIfNecessary(final NioSocketChannel channel) {
final Thread currentThread = Thread.currentThread();
final Thread workerThread = thread;
if (currentThread != workerThread) {
if (channel.writeTaskInTaskQueue.compareAndSet(false, true)) {
boolean offered = writeTaskQueue.offer(channel.writeTask);
assert offered;
}
if (!(channel instanceof NioAcceptedSocketChannel) ||
((NioAcceptedSocketChannel) channel).bossThread != currentThread) {
final Selector workerSelector = selector;
if (workerSelector != null) {
if (wakenUp.compareAndSet(false, true)) {
workerSelector.wakeup();
}
}
} else {
// A write request can be made from an acceptor thread (boss)
// when a user attempted to write something in:
//
// * channelOpen()
// * channelBound()
// * channelConnected().
//
// In this case, there's no need to wake up the selector because
// the channel is not even registered yet at this moment.
}
return true;
}
return false;
}
private void write0(NioSocketChannel channel) {
boolean open = true;
boolean addOpWrite = false;
boolean removeOpWrite = false;
long writtenBytes = 0;
final SocketSendBufferPool sendBufferPool = this.sendBufferPool;
final SocketChannel ch = channel.socket;
final Queue<MessageEvent> writeBuffer = channel.writeBuffer;
final int writeSpinCount = channel.getConfig().getWriteSpinCount();
synchronized (channel.writeLock) {
channel.inWriteNowLoop = true;
for (;;) {
MessageEvent evt = channel.currentWriteEvent;
SendBuffer buf;
if (evt == null) {
if ((channel.currentWriteEvent = evt = writeBuffer.poll()) == null) {
removeOpWrite = true;
channel.writeSuspended = false;
break;
}
channel.currentWriteBuffer = buf = sendBufferPool.acquire(evt.getMessage());
} else {
buf = channel.currentWriteBuffer;
}
ChannelFuture future = evt.getFuture();
try {
long localWrittenBytes = 0;
for (int i = writeSpinCount; i > 0; i --) {
localWrittenBytes = buf.transferTo(ch);
if (localWrittenBytes != 0) {
writtenBytes += localWrittenBytes;
break;
}
if (buf.finished()) {
break;
}
}
if (buf.finished()) {
// Successful write - proceed to the next message.
buf.release();
channel.currentWriteEvent = null;
channel.currentWriteBuffer = null;
evt = null;
buf = null;
future.setSuccess();
} else {
// Not written fully - perhaps the kernel buffer is full.
addOpWrite = true;
channel.writeSuspended = true;
if (localWrittenBytes > 0) {
// Notify progress listeners if necessary.
future.setProgress(
localWrittenBytes,
buf.writtenBytes(), buf.totalBytes());
}
break;
}
} catch (AsynchronousCloseException e) {
// Doesn't need a user attention - ignore.
} catch (Throwable t) {
buf.release();
channel.currentWriteEvent = null;
channel.currentWriteBuffer = null;
buf = null;
evt = null;
future.setFailure(t);
fireExceptionCaught(channel, t);
if (t instanceof IOException) {
open = false;
close(channel, succeededFuture(channel));
}
}
}
channel.inWriteNowLoop = false;
}
fireWriteComplete(channel, writtenBytes);
if (open) {
if (addOpWrite) {
setOpWrite(channel);
} else if (removeOpWrite) {
clearOpWrite(channel);
}
}
}
private void setOpWrite(NioSocketChannel channel) {
Selector selector = this.selector;
SelectionKey key = channel.socket.keyFor(selector);
if (key == null) {
return;
}
if (!key.isValid()) {
close(key);
return;
}
// interestOps can change at any time and at any thread.
// Acquire a lock to avoid possible race condition.
synchronized (channel.interestOpsLock) {
int interestOps = channel.getRawInterestOps();
if ((interestOps & SelectionKey.OP_WRITE) == 0) {
interestOps |= SelectionKey.OP_WRITE;
key.interestOps(interestOps);
channel.setRawInterestOpsNow(interestOps);
}
}
}
private void clearOpWrite(NioSocketChannel channel) {
Selector selector = this.selector;
SelectionKey key = channel.socket.keyFor(selector);
if (key == null) {
return;
}
if (!key.isValid()) {
close(key);
return;
}
// interestOps can change at any time and at any thread.
// Acquire a lock to avoid possible race condition.
synchronized (channel.interestOpsLock) {
int interestOps = channel.getRawInterestOps();
if ((interestOps & SelectionKey.OP_WRITE) != 0) {
interestOps &= ~SelectionKey.OP_WRITE;
key.interestOps(interestOps);
channel.setRawInterestOpsNow(interestOps);
}
}
}
void close(NioSocketChannel channel, ChannelFuture future) {
boolean connected = channel.isConnected();
boolean bound = channel.isBound();
try {
channel.socket.close();
cancelledKeys ++;
if (channel.setClosed()) {
future.setSuccess();
if (connected) {
fireChannelDisconnected(channel);
}
if (bound) {
fireChannelUnbound(channel);
}
cleanUpWriteBuffer(channel);
fireChannelClosed(channel);
} else {
future.setSuccess();
}
} catch (Throwable t) {
future.setFailure(t);
fireExceptionCaught(channel, t);
}
}
private void cleanUpWriteBuffer(NioSocketChannel channel) {
Exception cause = null;
boolean fireExceptionCaught = false;
// Clean up the stale messages in the write buffer.
synchronized (channel.writeLock) {
MessageEvent evt = channel.currentWriteEvent;
if (evt != null) {
// Create the exception only once to avoid the excessive overhead
// caused by fillStackTrace.
if (channel.isOpen()) {
cause = new NotYetConnectedException();
} else {
cause = new ClosedChannelException();
}
ChannelFuture future = evt.getFuture();
channel.currentWriteBuffer.release();
channel.currentWriteBuffer = null;
channel.currentWriteEvent = null;
evt = null;
future.setFailure(cause);
fireExceptionCaught = true;
}
Queue<MessageEvent> writeBuffer = channel.writeBuffer;
if (!writeBuffer.isEmpty()) {
// Create the exception only once to avoid the excessive overhead
// caused by fillStackTrace.
if (cause == null) {
if (channel.isOpen()) {
cause = new NotYetConnectedException();
} else {
cause = new ClosedChannelException();
}
}
for (;;) {
evt = writeBuffer.poll();
if (evt == null) {
break;
}
evt.getFuture().setFailure(cause);
fireExceptionCaught = true;
}
}
}
if (fireExceptionCaught) {
fireExceptionCaught(channel, cause);
}
}
void setInterestOps(
NioSocketChannel channel, ChannelFuture future, int interestOps) {
boolean changed = false;
try {
// interestOps can change at any time and at any thread.
// Acquire a lock to avoid possible race condition.
synchronized (channel.interestOpsLock) {
Selector selector = this.selector;
SelectionKey key = channel.socket.keyFor(selector);
if (key == null || selector == null) {
// Not registered to the worker yet.
// Set the rawInterestOps immediately; RegisterTask will pick it up.
channel.setRawInterestOpsNow(interestOps);
return;
}
// Override OP_WRITE flag - a user cannot change this flag.
interestOps &= ~Channel.OP_WRITE;
interestOps |= channel.getRawInterestOps() & Channel.OP_WRITE;
switch (CONSTRAINT_LEVEL) {
case 0:
if (channel.getRawInterestOps() != interestOps) {
key.interestOps(interestOps);
if (Thread.currentThread() != thread &&
wakenUp.compareAndSet(false, true)) {
selector.wakeup();
}
changed = true;
}
break;
case 1:
case 2:
if (channel.getRawInterestOps() != interestOps) {
if (Thread.currentThread() == thread) {
key.interestOps(interestOps);
changed = true;
} else {
selectorGuard.readLock().lock();
try {
if (wakenUp.compareAndSet(false, true)) {
selector.wakeup();
}
key.interestOps(interestOps);
changed = true;
} finally {
selectorGuard.readLock().unlock();
}
}
}
break;
default:
throw new Error();
}
if (changed) {
channel.setRawInterestOpsNow(interestOps);
}
}
future.setSuccess();
if (changed) {
fireChannelInterestChanged(channel);
}
} catch (CancelledKeyException e) {
// setInterestOps() was called on a closed channel.
ClosedChannelException cce = new ClosedChannelException();
future.setFailure(cce);
fireExceptionCaught(channel, cce);
} catch (Throwable t) {
future.setFailure(t);
fireExceptionCaught(channel, t);
}
}
private final class RegisterTask implements Runnable {
private final NioSocketChannel channel;
private final ChannelFuture future;
private final boolean server;
RegisterTask(
NioSocketChannel channel, ChannelFuture future, boolean server) {
this.channel = channel;
this.future = future;
this.server = server;
}
@Override
public void run() {
SocketAddress localAddress = channel.getLocalAddress();
SocketAddress remoteAddress = channel.getRemoteAddress();
if (localAddress == null || remoteAddress == null) {
if (future != null) {
future.setFailure(new ClosedChannelException());
}
close(channel, succeededFuture(channel));
return;
}
try {
if (server) {
channel.socket.configureBlocking(false);
}
synchronized (channel.interestOpsLock) {
channel.socket.register(
selector, channel.getRawInterestOps(), channel);
}
if (future != null) {
channel.setConnected();
future.setSuccess();
}
} catch (IOException e) {
if (future != null) {
future.setFailure(e);
}
close(channel, succeededFuture(channel));
if (!(e instanceof ClosedChannelException)) {
throw new ChannelException(
"Failed to register a socket to the selector.", e);
}
}
if (!server) {
if (!((NioClientSocketChannel) channel).boundManually) {
fireChannelBound(channel, localAddress);
}
fireChannelConnected(channel, remoteAddress);
}
}
}
}

View File

@ -0,0 +1,46 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import java.io.IOException;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.Selector;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
/**
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
* @version $Rev$, $Date$
*/
final class SelectorUtil {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(SelectorUtil.class);
static final int DEFAULT_IO_THREADS = Runtime.getRuntime().availableProcessors() * 2;
static void select(Selector selector) throws IOException {
try {
selector.select(500);
} catch (CancelledKeyException e) {
// Harmless exception - log anyway
logger.debug(
CancelledKeyException.class.getSimpleName() +
" raised by a Selector - JDK bug?", e);
}
}
}

View File

@ -0,0 +1,102 @@
/*
* Copyright 2010 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import java.lang.ref.SoftReference;
import java.nio.ByteBuffer;
/**
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
* @version $Rev$, $Date$
*/
final class SocketReceiveBufferPool {
private static final int POOL_SIZE = 8;
@SuppressWarnings("unchecked")
private final SoftReference<ByteBuffer>[] pool = new SoftReference[POOL_SIZE];
SocketReceiveBufferPool() {
super();
}
final ByteBuffer acquire(int size) {
final SoftReference<ByteBuffer>[] pool = this.pool;
for (int i = 0; i < POOL_SIZE; i ++) {
SoftReference<ByteBuffer> ref = pool[i];
if (ref == null) {
continue;
}
ByteBuffer buf = ref.get();
if (buf == null) {
pool[i] = null;
continue;
}
if (buf.capacity() < size) {
continue;
}
pool[i] = null;
buf.clear();
return buf;
}
ByteBuffer buf = ByteBuffer.allocateDirect(normalizeCapacity(size));
buf.clear();
return buf;
}
final void release(ByteBuffer buffer) {
final SoftReference<ByteBuffer>[] pool = this.pool;
for (int i = 0; i < POOL_SIZE; i ++) {
SoftReference<ByteBuffer> ref = pool[i];
if (ref == null || ref.get() == null) {
pool[i] = new SoftReference<ByteBuffer>(buffer);
return;
}
}
// pool is full - replace one
final int capacity = buffer.capacity();
for (int i = 0; i< POOL_SIZE; i ++) {
SoftReference<ByteBuffer> ref = pool[i];
ByteBuffer pooled = ref.get();
if (pooled == null) {
pool[i] = null;
continue;
}
if (pooled.capacity() < capacity) {
pool[i] = new SoftReference<ByteBuffer>(buffer);
return;
}
}
}
private static final int normalizeCapacity(int capacity) {
// Normalize to multiple of 1024
int q = capacity >>> 10;
int r = capacity & 1023;
if (r != 0) {
q ++;
}
return q << 10;
}
}

View File

@ -0,0 +1,349 @@
/*
* Copyright 2010 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.socket.sctp;
import java.io.IOException;
import java.lang.ref.SoftReference;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.DatagramChannel;
import java.nio.channels.WritableByteChannel;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.FileRegion;
/**
* @author <a href="http://www.jboss.org/netty/">The Netty Project</a>
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
* @version $Rev: 2174 $, $Date: 2010-02-19 09:57:23 +0900 (Fri, 19 Feb 2010) $
*/
final class SocketSendBufferPool {
private static final SendBuffer EMPTY_BUFFER = new EmptySendBuffer();
private static final int DEFAULT_PREALLOCATION_SIZE = 65536;
private static final int ALIGN_SHIFT = 4;
private static final int ALIGN_MASK = 15;
PreallocationRef poolHead = null;
Preallocation current = new Preallocation(DEFAULT_PREALLOCATION_SIZE);
SocketSendBufferPool() {
super();
}
final SendBuffer acquire(Object message) {
if (message instanceof ChannelBuffer) {
return acquire((ChannelBuffer) message);
} else if (message instanceof FileRegion) {
return acquire((FileRegion) message);
}
throw new IllegalArgumentException(
"unsupported message type: " + message.getClass());
}
private final SendBuffer acquire(FileRegion src) {
if (src.getCount() == 0) {
return EMPTY_BUFFER;
}
return new FileSendBuffer(src);
}
private final SendBuffer acquire(ChannelBuffer src) {
final int size = src.readableBytes();
if (size == 0) {
return EMPTY_BUFFER;
}
if (src.isDirect()) {
return new UnpooledSendBuffer(src.toByteBuffer());
}
if (src.readableBytes() > DEFAULT_PREALLOCATION_SIZE) {
return new UnpooledSendBuffer(src.toByteBuffer());
}
Preallocation current = this.current;
ByteBuffer buffer = current.buffer;
int remaining = buffer.remaining();
PooledSendBuffer dst;
if (size < remaining) {
int nextPos = buffer.position() + size;
ByteBuffer slice = buffer.duplicate();
buffer.position(align(nextPos));
slice.limit(nextPos);
current.refCnt ++;
dst = new PooledSendBuffer(current, slice);
} else if (size > remaining) {
this.current = current = getPreallocation();
buffer = current.buffer;
ByteBuffer slice = buffer.duplicate();
buffer.position(align(size));
slice.limit(size);
current.refCnt ++;
dst = new PooledSendBuffer(current, slice);
} else { // size == remaining
current.refCnt ++;
this.current = getPreallocation0();
dst = new PooledSendBuffer(current, current.buffer);
}
ByteBuffer dstbuf = dst.buffer;
dstbuf.mark();
src.getBytes(src.readerIndex(), dstbuf);
dstbuf.reset();
return dst;
}
private final Preallocation getPreallocation() {
Preallocation current = this.current;
if (current.refCnt == 0) {
current.buffer.clear();
return current;
}
return getPreallocation0();
}
private final Preallocation getPreallocation0() {
PreallocationRef ref = poolHead;
if (ref != null) {
do {
Preallocation p = ref.get();
ref = ref.next;
if (p != null) {
poolHead = ref;
return p;
}
} while (ref != null);
poolHead = ref;
}
return new Preallocation(DEFAULT_PREALLOCATION_SIZE);
}
private static final int align(int pos) {
int q = pos >>> ALIGN_SHIFT;
int r = pos & ALIGN_MASK;
if (r != 0) {
q ++;
}
return q << ALIGN_SHIFT;
}
private final class Preallocation {
final ByteBuffer buffer;
int refCnt;
Preallocation(int capacity) {
buffer = ByteBuffer.allocateDirect(capacity);
}
}
private final class PreallocationRef extends SoftReference<Preallocation> {
final PreallocationRef next;
PreallocationRef(Preallocation prealloation, PreallocationRef next) {
super(prealloation);
this.next = next;
}
}
interface SendBuffer {
boolean finished();
long writtenBytes();
long totalBytes();
long transferTo(WritableByteChannel ch) throws IOException;
long transferTo(DatagramChannel ch, SocketAddress raddr) throws IOException;
void release();
}
class UnpooledSendBuffer implements SendBuffer {
final ByteBuffer buffer;
final int initialPos;
UnpooledSendBuffer(ByteBuffer buffer) {
this.buffer = buffer;
initialPos = buffer.position();
}
@Override
public final boolean finished() {
return !buffer.hasRemaining();
}
@Override
public final long writtenBytes() {
return buffer.position() - initialPos;
}
@Override
public final long totalBytes() {
return buffer.limit() - initialPos;
}
@Override
public final long transferTo(WritableByteChannel ch) throws IOException {
return ch.write(buffer);
}
@Override
public final long transferTo(DatagramChannel ch, SocketAddress raddr) throws IOException {
return ch.send(buffer, raddr);
}
@Override
public void release() {
// Unpooled.
}
}
final class PooledSendBuffer implements SendBuffer {
private final Preallocation parent;
final ByteBuffer buffer;
final int initialPos;
PooledSendBuffer(Preallocation parent, ByteBuffer buffer) {
this.parent = parent;
this.buffer = buffer;
initialPos = buffer.position();
}
@Override
public boolean finished() {
return !buffer.hasRemaining();
}
@Override
public long writtenBytes() {
return buffer.position() - initialPos;
}
@Override
public long totalBytes() {
return buffer.limit() - initialPos;
}
@Override
public long transferTo(WritableByteChannel ch) throws IOException {
return ch.write(buffer);
}
@Override
public long transferTo(DatagramChannel ch, SocketAddress raddr) throws IOException {
return ch.send(buffer, raddr);
}
@Override
public void release() {
final Preallocation parent = this.parent;
if (-- parent.refCnt == 0) {
parent.buffer.clear();
if (parent != current) {
poolHead = new PreallocationRef(parent, poolHead);
}
}
}
}
final class FileSendBuffer implements SendBuffer {
private final FileRegion file;
private long writtenBytes;
FileSendBuffer(FileRegion file) {
this.file = file;
}
@Override
public boolean finished() {
return writtenBytes >= file.getCount();
}
@Override
public long writtenBytes() {
return writtenBytes;
}
@Override
public long totalBytes() {
return file.getCount();
}
@Override
public long transferTo(WritableByteChannel ch) throws IOException {
long localWrittenBytes = file.transferTo(ch, writtenBytes);
writtenBytes += localWrittenBytes;
return localWrittenBytes;
}
@Override
public long transferTo(DatagramChannel ch, SocketAddress raddr)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void release() {
// Unpooled.
}
}
static final class EmptySendBuffer implements SendBuffer {
EmptySendBuffer() {
super();
}
@Override
public final boolean finished() {
return true;
}
@Override
public final long writtenBytes() {
return 0;
}
@Override
public final long totalBytes() {
return 0;
}
@Override
public final long transferTo(WritableByteChannel ch) throws IOException {
return 0;
}
@Override
public final long transferTo(DatagramChannel ch, SocketAddress raddr) throws IOException {
return 0;
}
@Override
public void release() {
// Unpooled.
}
}
}

View File

@ -0,0 +1,21 @@
/*
* Copyright 2009 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* <a href="http://en.wikipedia.org/wiki/New_I/O">NIO</a>-based socket channel
* API implementation - recommended for a large number of connections (&gt;= 1000).
*/
package org.jboss.netty.channel.socket.sctp;