2015-01-14 16:38:46 +01:00
|
|
|
/*
|
|
|
|
* Copyright 2015 The Netty Project
|
|
|
|
*
|
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
|
|
|
package io.netty.channel.epoll;
|
|
|
|
|
|
|
|
import io.netty.buffer.ByteBuf;
|
|
|
|
import io.netty.buffer.ByteBufAllocator;
|
|
|
|
import io.netty.channel.Channel;
|
|
|
|
import io.netty.channel.ChannelConfig;
|
|
|
|
import io.netty.channel.ChannelFuture;
|
|
|
|
import io.netty.channel.ChannelFutureListener;
|
2016-07-15 19:44:32 +02:00
|
|
|
import io.netty.channel.ChannelMetadata;
|
2015-01-14 16:38:46 +01:00
|
|
|
import io.netty.channel.ChannelOutboundBuffer;
|
|
|
|
import io.netty.channel.ChannelPipeline;
|
|
|
|
import io.netty.channel.ChannelPromise;
|
|
|
|
import io.netty.channel.DefaultFileRegion;
|
2015-04-14 06:54:20 +02:00
|
|
|
import io.netty.channel.EventLoop;
|
2016-09-11 09:27:58 +02:00
|
|
|
import io.netty.channel.FileRegion;
|
2015-01-14 16:38:46 +01:00
|
|
|
import io.netty.channel.RecvByteBufAllocator;
|
2017-12-08 01:00:52 +01:00
|
|
|
import io.netty.channel.internal.ChannelUtils;
|
2016-02-18 17:41:55 +01:00
|
|
|
import io.netty.channel.socket.DuplexChannel;
|
2015-02-12 20:44:36 +01:00
|
|
|
import io.netty.channel.unix.FileDescriptor;
|
2017-01-19 17:31:34 +01:00
|
|
|
import io.netty.channel.unix.IovArray;
|
|
|
|
import io.netty.channel.unix.SocketWritableByteChannel;
|
2017-05-21 13:24:14 +02:00
|
|
|
import io.netty.channel.unix.UnixChannelUtil;
|
2015-01-14 16:38:46 +01:00
|
|
|
import io.netty.util.internal.PlatformDependent;
|
|
|
|
import io.netty.util.internal.StringUtil;
|
2017-08-25 04:46:46 +02:00
|
|
|
import io.netty.util.internal.UnstableApi;
|
2015-04-14 06:54:20 +02:00
|
|
|
import io.netty.util.internal.logging.InternalLogger;
|
|
|
|
import io.netty.util.internal.logging.InternalLoggerFactory;
|
2015-01-14 16:38:46 +01:00
|
|
|
|
|
|
|
import java.io.IOException;
|
|
|
|
import java.net.SocketAddress;
|
|
|
|
import java.nio.ByteBuffer;
|
2015-04-19 21:07:19 +02:00
|
|
|
import java.nio.channels.ClosedChannelException;
|
2016-09-11 09:27:58 +02:00
|
|
|
import java.nio.channels.WritableByteChannel;
|
2015-04-14 06:54:20 +02:00
|
|
|
import java.util.Queue;
|
2016-02-18 17:41:55 +01:00
|
|
|
import java.util.concurrent.Executor;
|
2015-01-14 16:38:46 +01:00
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
import static io.netty.channel.internal.ChannelUtils.MAX_BYTES_PER_GATHERING_WRITE_ATTEMPTED_LOW_THRESHOLD;
|
|
|
|
import static io.netty.channel.internal.ChannelUtils.WRITE_STATUS_SNDBUF_FULL;
|
2015-10-07 04:00:59 +02:00
|
|
|
import static io.netty.channel.unix.FileDescriptor.pipe;
|
|
|
|
import static io.netty.util.internal.ObjectUtil.checkNotNull;
|
2019-01-31 09:06:59 +01:00
|
|
|
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
|
2015-10-07 04:00:59 +02:00
|
|
|
|
2016-02-18 17:41:55 +01:00
|
|
|
public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel implements DuplexChannel {
|
2016-07-15 19:44:32 +02:00
|
|
|
private static final ChannelMetadata METADATA = new ChannelMetadata(false, 16);
|
2015-01-14 16:38:46 +01:00
|
|
|
private static final String EXPECTED_TYPES =
|
|
|
|
" (expected: " + StringUtil.simpleClassName(ByteBuf.class) + ", " +
|
|
|
|
StringUtil.simpleClassName(DefaultFileRegion.class) + ')';
|
2015-04-14 06:54:20 +02:00
|
|
|
private static final InternalLogger logger = InternalLoggerFactory.getInstance(AbstractEpollStreamChannel.class);
|
2019-05-17 22:23:02 +02:00
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
private final Runnable flushTask = new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
2018-02-22 07:42:49 +01:00
|
|
|
// Calling flush0 directly to ensure we not try to flush messages that were added via write(...) in the
|
|
|
|
// meantime.
|
|
|
|
((AbstractEpollUnsafe) unsafe()).flush0();
|
2017-12-08 01:00:52 +01:00
|
|
|
}
|
|
|
|
};
|
2015-01-14 16:38:46 +01:00
|
|
|
|
2015-04-14 06:54:20 +02:00
|
|
|
// Lazy init these if we need to splice(...)
|
2019-07-12 18:06:26 +02:00
|
|
|
private volatile Queue<SpliceInTask> spliceQueue;
|
2015-10-07 04:00:59 +02:00
|
|
|
private FileDescriptor pipeIn;
|
|
|
|
private FileDescriptor pipeOut;
|
2015-04-14 06:54:20 +02:00
|
|
|
|
2016-09-11 09:27:58 +02:00
|
|
|
private WritableByteChannel byteChannel;
|
|
|
|
|
2015-01-14 16:38:46 +01:00
|
|
|
protected AbstractEpollStreamChannel(Channel parent, int fd) {
|
2017-01-19 17:31:34 +01:00
|
|
|
this(parent, new LinuxSocket(fd));
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
protected AbstractEpollStreamChannel(int fd) {
|
2017-01-19 17:31:34 +01:00
|
|
|
this(new LinuxSocket(fd));
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2017-01-19 17:31:34 +01:00
|
|
|
AbstractEpollStreamChannel(LinuxSocket fd) {
|
2016-03-17 12:10:23 +01:00
|
|
|
this(fd, isSoErrorZero(fd));
|
2015-10-07 04:00:59 +02:00
|
|
|
}
|
2015-07-16 11:19:42 +02:00
|
|
|
|
2017-01-19 17:31:34 +01:00
|
|
|
AbstractEpollStreamChannel(Channel parent, LinuxSocket fd) {
|
2018-06-15 10:28:50 +02:00
|
|
|
super(parent, fd, true);
|
2015-07-16 11:19:42 +02:00
|
|
|
// Add EPOLLRDHUP so we are notified once the remote peer close the connection.
|
|
|
|
flags |= Native.EPOLLRDHUP;
|
2015-02-05 10:40:41 +01:00
|
|
|
}
|
|
|
|
|
2017-01-09 14:15:48 +01:00
|
|
|
AbstractEpollStreamChannel(Channel parent, LinuxSocket fd, SocketAddress remote) {
|
2018-06-15 10:28:50 +02:00
|
|
|
super(parent, fd, remote);
|
2017-01-09 14:15:48 +01:00
|
|
|
// Add EPOLLRDHUP so we are notified once the remote peer close the connection.
|
|
|
|
flags |= Native.EPOLLRDHUP;
|
|
|
|
}
|
|
|
|
|
2017-01-19 17:31:34 +01:00
|
|
|
protected AbstractEpollStreamChannel(LinuxSocket fd, boolean active) {
|
2018-06-15 10:28:50 +02:00
|
|
|
super(null, fd, active);
|
2015-10-07 04:00:59 +02:00
|
|
|
// Add EPOLLRDHUP so we are notified once the remote peer close the connection.
|
|
|
|
flags |= Native.EPOLLRDHUP;
|
2015-09-22 17:00:16 +02:00
|
|
|
}
|
|
|
|
|
2015-01-14 16:38:46 +01:00
|
|
|
@Override
|
|
|
|
protected AbstractEpollUnsafe newUnsafe() {
|
|
|
|
return new EpollStreamUnsafe();
|
|
|
|
}
|
|
|
|
|
2016-07-15 19:44:32 +02:00
|
|
|
@Override
|
|
|
|
public ChannelMetadata metadata() {
|
|
|
|
return METADATA;
|
|
|
|
}
|
|
|
|
|
2015-04-14 06:54:20 +02:00
|
|
|
/**
|
|
|
|
* Splice from this {@link AbstractEpollStreamChannel} to another {@link AbstractEpollStreamChannel}.
|
|
|
|
* The {@code len} is the number of bytes to splice. If using {@link Integer#MAX_VALUE} it will
|
|
|
|
* splice until the {@link ChannelFuture} was canceled or it was failed.
|
|
|
|
*
|
|
|
|
* Please note:
|
|
|
|
* <ul>
|
|
|
|
* <li>both channels need to be registered to the same {@link EventLoop}, otherwise an
|
|
|
|
* {@link IllegalArgumentException} is thrown. </li>
|
|
|
|
* <li>{@link EpollChannelConfig#getEpollMode()} must be {@link EpollMode#LEVEL_TRIGGERED} for this and the
|
|
|
|
* target {@link AbstractEpollStreamChannel}</li>
|
|
|
|
* </ul>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
public final ChannelFuture spliceTo(final AbstractEpollStreamChannel ch, final int len) {
|
|
|
|
return spliceTo(ch, len, newPromise());
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Splice from this {@link AbstractEpollStreamChannel} to another {@link AbstractEpollStreamChannel}.
|
|
|
|
* The {@code len} is the number of bytes to splice. If using {@link Integer#MAX_VALUE} it will
|
|
|
|
* splice until the {@link ChannelFuture} was canceled or it was failed.
|
|
|
|
*
|
|
|
|
* Please note:
|
|
|
|
* <ul>
|
|
|
|
* <li>both channels need to be registered to the same {@link EventLoop}, otherwise an
|
|
|
|
* {@link IllegalArgumentException} is thrown. </li>
|
|
|
|
* <li>{@link EpollChannelConfig#getEpollMode()} must be {@link EpollMode#LEVEL_TRIGGERED} for this and the
|
|
|
|
* target {@link AbstractEpollStreamChannel}</li>
|
|
|
|
* </ul>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
public final ChannelFuture spliceTo(final AbstractEpollStreamChannel ch, final int len,
|
|
|
|
final ChannelPromise promise) {
|
|
|
|
if (ch.eventLoop() != eventLoop()) {
|
|
|
|
throw new IllegalArgumentException("EventLoops are not the same.");
|
|
|
|
}
|
2019-01-31 09:06:59 +01:00
|
|
|
checkPositiveOrZero(len, "len");
|
2015-04-14 06:54:20 +02:00
|
|
|
if (ch.config().getEpollMode() != EpollMode.LEVEL_TRIGGERED
|
|
|
|
|| config().getEpollMode() != EpollMode.LEVEL_TRIGGERED) {
|
|
|
|
throw new IllegalStateException("spliceTo() supported only when using " + EpollMode.LEVEL_TRIGGERED);
|
|
|
|
}
|
|
|
|
checkNotNull(promise, "promise");
|
|
|
|
if (!isOpen()) {
|
2019-05-17 22:23:02 +02:00
|
|
|
promise.tryFailure(new ClosedChannelException());
|
2015-04-14 06:54:20 +02:00
|
|
|
} else {
|
2015-11-20 07:52:26 +01:00
|
|
|
addToSpliceQueue(new SpliceInChannelTask(ch, len, promise));
|
2015-04-14 06:54:20 +02:00
|
|
|
failSpliceIfClosed(promise);
|
|
|
|
}
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Splice from this {@link AbstractEpollStreamChannel} to another {@link FileDescriptor}.
|
|
|
|
* The {@code offset} is the offset for the {@link FileDescriptor} and {@code len} is the
|
|
|
|
* number of bytes to splice. If using {@link Integer#MAX_VALUE} it will splice until the
|
|
|
|
* {@link ChannelFuture} was canceled or it was failed.
|
|
|
|
*
|
|
|
|
* Please note:
|
|
|
|
* <ul>
|
|
|
|
* <li>{@link EpollChannelConfig#getEpollMode()} must be {@link EpollMode#LEVEL_TRIGGERED} for this
|
|
|
|
* {@link AbstractEpollStreamChannel}</li>
|
|
|
|
* <li>the {@link FileDescriptor} will not be closed after the {@link ChannelFuture} is notified</li>
|
2015-11-20 07:52:26 +01:00
|
|
|
* <li>this channel must be registered to an event loop or {@link IllegalStateException} will be thrown.</li>
|
2015-04-14 06:54:20 +02:00
|
|
|
* </ul>
|
|
|
|
*/
|
|
|
|
public final ChannelFuture spliceTo(final FileDescriptor ch, final int offset, final int len) {
|
|
|
|
return spliceTo(ch, offset, len, newPromise());
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Splice from this {@link AbstractEpollStreamChannel} to another {@link FileDescriptor}.
|
|
|
|
* The {@code offset} is the offset for the {@link FileDescriptor} and {@code len} is the
|
|
|
|
* number of bytes to splice. If using {@link Integer#MAX_VALUE} it will splice until the
|
|
|
|
* {@link ChannelFuture} was canceled or it was failed.
|
|
|
|
*
|
|
|
|
* Please note:
|
|
|
|
* <ul>
|
|
|
|
* <li>{@link EpollChannelConfig#getEpollMode()} must be {@link EpollMode#LEVEL_TRIGGERED} for this
|
|
|
|
* {@link AbstractEpollStreamChannel}</li>
|
|
|
|
* <li>the {@link FileDescriptor} will not be closed after the {@link ChannelPromise} is notified</li>
|
2015-11-20 07:52:26 +01:00
|
|
|
* <li>this channel must be registered to an event loop or {@link IllegalStateException} will be thrown.</li>
|
2015-04-14 06:54:20 +02:00
|
|
|
* </ul>
|
|
|
|
*/
|
|
|
|
public final ChannelFuture spliceTo(final FileDescriptor ch, final int offset, final int len,
|
|
|
|
final ChannelPromise promise) {
|
2019-01-31 09:06:59 +01:00
|
|
|
checkPositiveOrZero(len, "len");
|
2019-07-16 13:22:30 +02:00
|
|
|
checkPositiveOrZero(offset, "offset");
|
2015-04-14 06:54:20 +02:00
|
|
|
if (config().getEpollMode() != EpollMode.LEVEL_TRIGGERED) {
|
|
|
|
throw new IllegalStateException("spliceTo() supported only when using " + EpollMode.LEVEL_TRIGGERED);
|
|
|
|
}
|
|
|
|
checkNotNull(promise, "promise");
|
|
|
|
if (!isOpen()) {
|
2019-05-17 22:23:02 +02:00
|
|
|
promise.tryFailure(new ClosedChannelException());
|
2015-04-14 06:54:20 +02:00
|
|
|
} else {
|
2015-11-20 07:52:26 +01:00
|
|
|
addToSpliceQueue(new SpliceFdTask(ch, offset, len, promise));
|
2015-04-14 06:54:20 +02:00
|
|
|
failSpliceIfClosed(promise);
|
|
|
|
}
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
|
|
|
private void failSpliceIfClosed(ChannelPromise promise) {
|
|
|
|
if (!isOpen()) {
|
|
|
|
// Seems like the Channel was closed in the meantime try to fail the promise to prevent any
|
|
|
|
// cases where a future may not be notified otherwise.
|
2019-05-17 22:23:02 +02:00
|
|
|
if (promise.tryFailure(new ClosedChannelException())) {
|
2016-03-27 14:25:39 +02:00
|
|
|
eventLoop().execute(new Runnable() {
|
2015-04-14 06:54:20 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
// Call this via the EventLoop as it is a MPSC queue.
|
|
|
|
clearSpliceQueue();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 16:38:46 +01:00
|
|
|
/**
|
|
|
|
* Write bytes form the given {@link ByteBuf} to the underlying {@link java.nio.channels.Channel}.
|
2017-12-08 01:00:52 +01:00
|
|
|
* @param in the collection which contains objects to write.
|
|
|
|
* @param buf the {@link ByteBuf} from which the bytes should be written
|
|
|
|
* @return The value that should be decremented from the write quantum which starts at
|
|
|
|
* {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
|
|
|
|
* <ul>
|
|
|
|
* <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
|
|
|
|
* is encountered</li>
|
|
|
|
* <li>1 - if a single call to write data was made to the OS</li>
|
|
|
|
* <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
|
|
|
|
* no data was accepted</li>
|
|
|
|
* </ul>
|
2015-01-14 16:38:46 +01:00
|
|
|
*/
|
2017-12-08 01:00:52 +01:00
|
|
|
private int writeBytes(ChannelOutboundBuffer in, ByteBuf buf) throws Exception {
|
2015-01-14 16:38:46 +01:00
|
|
|
int readableBytes = buf.readableBytes();
|
|
|
|
if (readableBytes == 0) {
|
|
|
|
in.remove();
|
2017-12-08 01:00:52 +01:00
|
|
|
return 0;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (buf.hasMemoryAddress() || buf.nioBufferCount() == 1) {
|
2017-12-08 01:00:52 +01:00
|
|
|
return doWriteBytes(in, buf);
|
2015-01-14 16:38:46 +01:00
|
|
|
} else {
|
|
|
|
ByteBuffer[] nioBuffers = buf.nioBuffers();
|
2017-12-08 01:00:52 +01:00
|
|
|
return writeBytesMultiple(in, nioBuffers, nioBuffers.length, readableBytes,
|
|
|
|
config().getMaxBytesPerGatheringWrite());
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
private void adjustMaxBytesPerGatheringWrite(long attempted, long written, long oldMaxBytesPerGatheringWrite) {
|
|
|
|
// By default we track the SO_SNDBUF when ever it is explicitly set. However some OSes may dynamically change
|
|
|
|
// SO_SNDBUF (and other characteristics that determine how much data can be written at once) so we should try
|
|
|
|
// make a best effort to adjust as OS behavior changes.
|
|
|
|
if (attempted == written) {
|
|
|
|
if (attempted << 1 > oldMaxBytesPerGatheringWrite) {
|
|
|
|
config().setMaxBytesPerGatheringWrite(attempted << 1);
|
|
|
|
}
|
|
|
|
} else if (attempted > MAX_BYTES_PER_GATHERING_WRITE_ATTEMPTED_LOW_THRESHOLD && written < attempted >>> 1) {
|
|
|
|
config().setMaxBytesPerGatheringWrite(attempted >>> 1);
|
|
|
|
}
|
|
|
|
}
|
2015-01-14 16:38:46 +01:00
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
/**
|
|
|
|
* Write multiple bytes via {@link IovArray}.
|
|
|
|
* @param in the collection which contains objects to write.
|
|
|
|
* @param array The array which contains the content to write.
|
|
|
|
* @return The value that should be decremented from the write quantum which starts at
|
|
|
|
* {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
|
|
|
|
* <ul>
|
|
|
|
* <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
|
|
|
|
* is encountered</li>
|
|
|
|
* <li>1 - if a single call to write data was made to the OS</li>
|
|
|
|
* <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
|
|
|
|
* no data was accepted</li>
|
|
|
|
* </ul>
|
|
|
|
* @throws IOException If an I/O exception occurs during write.
|
|
|
|
*/
|
|
|
|
private int writeBytesMultiple(ChannelOutboundBuffer in, IovArray array) throws IOException {
|
|
|
|
final long expectedWrittenBytes = array.size();
|
2015-01-14 16:38:46 +01:00
|
|
|
assert expectedWrittenBytes != 0;
|
2017-12-08 01:00:52 +01:00
|
|
|
final int cnt = array.count();
|
2015-01-14 16:38:46 +01:00
|
|
|
assert cnt != 0;
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
final long localWrittenBytes = socket.writevAddresses(array.memoryAddress(0), cnt);
|
|
|
|
if (localWrittenBytes > 0) {
|
|
|
|
adjustMaxBytesPerGatheringWrite(expectedWrittenBytes, localWrittenBytes, array.maxBytes());
|
|
|
|
in.removeBytes(localWrittenBytes);
|
|
|
|
return 1;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
2017-12-08 01:00:52 +01:00
|
|
|
return WRITE_STATUS_SNDBUF_FULL;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
/**
|
|
|
|
* Write multiple bytes via {@link ByteBuffer} array.
|
|
|
|
* @param in the collection which contains objects to write.
|
|
|
|
* @param nioBuffers The buffers to write.
|
|
|
|
* @param nioBufferCnt The number of buffers to write.
|
|
|
|
* @param expectedWrittenBytes The number of bytes we expect to write.
|
|
|
|
* @param maxBytesPerGatheringWrite The maximum number of bytes we should attempt to write.
|
|
|
|
* @return The value that should be decremented from the write quantum which starts at
|
|
|
|
* {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
|
|
|
|
* <ul>
|
|
|
|
* <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
|
|
|
|
* is encountered</li>
|
|
|
|
* <li>1 - if a single call to write data was made to the OS</li>
|
|
|
|
* <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
|
|
|
|
* no data was accepted</li>
|
|
|
|
* </ul>
|
|
|
|
* @throws IOException If an I/O exception occurs during write.
|
|
|
|
*/
|
|
|
|
private int writeBytesMultiple(
|
|
|
|
ChannelOutboundBuffer in, ByteBuffer[] nioBuffers, int nioBufferCnt, long expectedWrittenBytes,
|
|
|
|
long maxBytesPerGatheringWrite) throws IOException {
|
2015-01-14 16:38:46 +01:00
|
|
|
assert expectedWrittenBytes != 0;
|
2017-12-08 01:00:52 +01:00
|
|
|
if (expectedWrittenBytes > maxBytesPerGatheringWrite) {
|
|
|
|
expectedWrittenBytes = maxBytesPerGatheringWrite;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
final long localWrittenBytes = socket.writev(nioBuffers, 0, nioBufferCnt, expectedWrittenBytes);
|
|
|
|
if (localWrittenBytes > 0) {
|
|
|
|
adjustMaxBytesPerGatheringWrite(expectedWrittenBytes, localWrittenBytes, maxBytesPerGatheringWrite);
|
|
|
|
in.removeBytes(localWrittenBytes);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return WRITE_STATUS_SNDBUF_FULL;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Write a {@link DefaultFileRegion}
|
2017-12-08 01:00:52 +01:00
|
|
|
* @param in the collection which contains objects to write.
|
|
|
|
* @param region the {@link DefaultFileRegion} from which the bytes should be written
|
|
|
|
* @return The value that should be decremented from the write quantum which starts at
|
|
|
|
* {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
|
|
|
|
* <ul>
|
|
|
|
* <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
|
|
|
|
* is encountered</li>
|
|
|
|
* <li>1 - if a single call to write data was made to the OS</li>
|
|
|
|
* <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
|
|
|
|
* no data was accepted</li>
|
|
|
|
* </ul>
|
2015-01-14 16:38:46 +01:00
|
|
|
*/
|
2017-12-08 01:00:52 +01:00
|
|
|
private int writeDefaultFileRegion(ChannelOutboundBuffer in, DefaultFileRegion region) throws Exception {
|
2019-02-26 11:08:09 +01:00
|
|
|
final long offset = region.transferred();
|
2015-01-14 16:38:46 +01:00
|
|
|
final long regionCount = region.count();
|
2019-02-26 11:08:09 +01:00
|
|
|
if (offset >= regionCount) {
|
2015-01-14 16:38:46 +01:00
|
|
|
in.remove();
|
2017-12-08 01:00:52 +01:00
|
|
|
return 0;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
final long flushedAmount = socket.sendFile(region, region.position(), offset, regionCount - offset);
|
2015-01-14 16:38:46 +01:00
|
|
|
if (flushedAmount > 0) {
|
|
|
|
in.progress(flushedAmount);
|
2017-12-08 01:00:52 +01:00
|
|
|
if (region.transferred() >= regionCount) {
|
|
|
|
in.remove();
|
|
|
|
}
|
|
|
|
return 1;
|
2019-02-26 11:08:09 +01:00
|
|
|
} else if (flushedAmount == 0) {
|
|
|
|
validateFileRegion(region, offset);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
2017-12-08 01:00:52 +01:00
|
|
|
return WRITE_STATUS_SNDBUF_FULL;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
/**
|
|
|
|
* Write a {@link FileRegion}
|
|
|
|
* @param in the collection which contains objects to write.
|
|
|
|
* @param region the {@link FileRegion} from which the bytes should be written
|
|
|
|
* @return The value that should be decremented from the write quantum which starts at
|
|
|
|
* {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
|
|
|
|
* <ul>
|
|
|
|
* <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
|
|
|
|
* is encountered</li>
|
|
|
|
* <li>1 - if a single call to write data was made to the OS</li>
|
|
|
|
* <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
|
|
|
|
* no data was accepted</li>
|
|
|
|
* </ul>
|
|
|
|
*/
|
|
|
|
private int writeFileRegion(ChannelOutboundBuffer in, FileRegion region) throws Exception {
|
2016-09-11 09:27:58 +02:00
|
|
|
if (region.transferred() >= region.count()) {
|
|
|
|
in.remove();
|
2017-12-08 01:00:52 +01:00
|
|
|
return 0;
|
2016-09-11 09:27:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (byteChannel == null) {
|
2017-01-19 17:31:34 +01:00
|
|
|
byteChannel = new EpollSocketWritableByteChannel();
|
2016-09-11 09:27:58 +02:00
|
|
|
}
|
2017-12-08 01:00:52 +01:00
|
|
|
final long flushedAmount = region.transferTo(byteChannel, region.transferred());
|
2016-09-11 09:27:58 +02:00
|
|
|
if (flushedAmount > 0) {
|
|
|
|
in.progress(flushedAmount);
|
2017-12-08 01:00:52 +01:00
|
|
|
if (region.transferred() >= region.count()) {
|
|
|
|
in.remove();
|
|
|
|
}
|
|
|
|
return 1;
|
2016-09-11 09:27:58 +02:00
|
|
|
}
|
2017-12-08 01:00:52 +01:00
|
|
|
return WRITE_STATUS_SNDBUF_FULL;
|
2016-09-11 09:27:58 +02:00
|
|
|
}
|
|
|
|
|
2015-01-14 16:38:46 +01:00
|
|
|
@Override
|
|
|
|
protected void doWrite(ChannelOutboundBuffer in) throws Exception {
|
2015-02-06 16:29:15 +01:00
|
|
|
int writeSpinCount = config().getWriteSpinCount();
|
2017-12-08 01:00:52 +01:00
|
|
|
do {
|
2015-01-14 16:38:46 +01:00
|
|
|
final int msgCount = in.size();
|
2017-12-08 01:00:52 +01:00
|
|
|
// Do gathering write if the outbound buffer entries start with more than one ByteBuf.
|
|
|
|
if (msgCount > 1 && in.current() instanceof ByteBuf) {
|
|
|
|
writeSpinCount -= doWriteMultiple(in);
|
|
|
|
} else if (msgCount == 0) {
|
2015-01-14 16:38:46 +01:00
|
|
|
// Wrote all messages.
|
2015-01-29 06:49:14 +01:00
|
|
|
clearFlag(Native.EPOLLOUT);
|
2015-09-10 21:23:23 +02:00
|
|
|
// Return here so we not set the EPOLLOUT flag.
|
|
|
|
return;
|
2017-12-08 01:00:52 +01:00
|
|
|
} else { // msgCount == 1
|
|
|
|
writeSpinCount -= doWriteSingle(in);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
// We do not break the loop here even if the outbound buffer was flushed completely,
|
|
|
|
// because a user might have triggered another write and flush when we notify his or her
|
|
|
|
// listeners.
|
|
|
|
} while (writeSpinCount > 0);
|
|
|
|
|
|
|
|
if (writeSpinCount == 0) {
|
2018-02-18 20:49:08 +01:00
|
|
|
// It is possible that we have set EPOLLOUT, woken up by EPOLL because the socket is writable, and then use
|
|
|
|
// our write quantum. In this case we no longer want to set the EPOLLOUT flag because the socket is still
|
|
|
|
// writable (as far as we know). We will find out next time we attempt to write if the socket is writable
|
|
|
|
// and set the EPOLLOUT if necessary.
|
|
|
|
clearFlag(Native.EPOLLOUT);
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
// We used our writeSpin quantum, and should try to write again later.
|
|
|
|
eventLoop().execute(flushTask);
|
|
|
|
} else {
|
|
|
|
// Underlying descriptor can not accept all data currently, so set the EPOLLOUT flag to be woken up
|
|
|
|
// when it can accept more data.
|
|
|
|
setFlag(Native.EPOLLOUT);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
/**
|
|
|
|
* Attempt to write a single object.
|
|
|
|
* @param in the collection which contains objects to write.
|
|
|
|
* @return The value that should be decremented from the write quantum which starts at
|
|
|
|
* {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
|
|
|
|
* <ul>
|
|
|
|
* <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
|
|
|
|
* is encountered</li>
|
|
|
|
* <li>1 - if a single call to write data was made to the OS</li>
|
|
|
|
* <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
|
|
|
|
* no data was accepted</li>
|
|
|
|
* </ul>
|
|
|
|
* @throws Exception If an I/O error occurs.
|
|
|
|
*/
|
|
|
|
protected int doWriteSingle(ChannelOutboundBuffer in) throws Exception {
|
2015-01-14 16:38:46 +01:00
|
|
|
// The outbound buffer contains only one message or it contains a file region.
|
|
|
|
Object msg = in.current();
|
|
|
|
if (msg instanceof ByteBuf) {
|
2017-12-08 01:00:52 +01:00
|
|
|
return writeBytes(in, (ByteBuf) msg);
|
2015-01-14 16:38:46 +01:00
|
|
|
} else if (msg instanceof DefaultFileRegion) {
|
2017-12-08 01:00:52 +01:00
|
|
|
return writeDefaultFileRegion(in, (DefaultFileRegion) msg);
|
2016-09-11 09:27:58 +02:00
|
|
|
} else if (msg instanceof FileRegion) {
|
2017-12-08 01:00:52 +01:00
|
|
|
return writeFileRegion(in, (FileRegion) msg);
|
2015-04-14 06:54:20 +02:00
|
|
|
} else if (msg instanceof SpliceOutTask) {
|
|
|
|
if (!((SpliceOutTask) msg).spliceOut()) {
|
2017-12-08 01:00:52 +01:00
|
|
|
return WRITE_STATUS_SNDBUF_FULL;
|
2015-04-14 06:54:20 +02:00
|
|
|
}
|
|
|
|
in.remove();
|
2017-12-08 01:00:52 +01:00
|
|
|
return 1;
|
2015-01-14 16:38:46 +01:00
|
|
|
} else {
|
|
|
|
// Should never reach here.
|
|
|
|
throw new Error();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-08 01:00:52 +01:00
|
|
|
/**
|
|
|
|
* Attempt to write multiple {@link ByteBuf} objects.
|
|
|
|
* @param in the collection which contains objects to write.
|
|
|
|
* @return The value that should be decremented from the write quantum which starts at
|
|
|
|
* {@link ChannelConfig#getWriteSpinCount()}. The typical use cases are as follows:
|
|
|
|
* <ul>
|
|
|
|
* <li>0 - if no write was attempted. This is appropriate if an empty {@link ByteBuf} (or other empty content)
|
|
|
|
* is encountered</li>
|
|
|
|
* <li>1 - if a single call to write data was made to the OS</li>
|
|
|
|
* <li>{@link ChannelUtils#WRITE_STATUS_SNDBUF_FULL} - if an attempt to write data was made to the OS, but
|
|
|
|
* no data was accepted</li>
|
|
|
|
* </ul>
|
|
|
|
* @throws Exception If an I/O error occurs.
|
|
|
|
*/
|
|
|
|
private int doWriteMultiple(ChannelOutboundBuffer in) throws Exception {
|
|
|
|
final long maxBytesPerGatheringWrite = config().getMaxBytesPerGatheringWrite();
|
2018-08-29 19:36:33 +02:00
|
|
|
IovArray array = ((EpollEventLoop) eventLoop()).cleanIovArray();
|
|
|
|
array.maxBytes(maxBytesPerGatheringWrite);
|
|
|
|
in.forEachFlushedMessage(array);
|
|
|
|
|
|
|
|
if (array.count() >= 1) {
|
|
|
|
// TODO: Handle the case where cnt == 1 specially.
|
|
|
|
return writeBytesMultiple(in, array);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
2017-12-08 01:00:52 +01:00
|
|
|
// cnt == 0, which means the outbound buffer contained empty buffers only.
|
|
|
|
in.removeBytes(0);
|
|
|
|
return 0;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
protected Object filterOutboundMessage(Object msg) {
|
|
|
|
if (msg instanceof ByteBuf) {
|
|
|
|
ByteBuf buf = (ByteBuf) msg;
|
2017-05-21 13:24:14 +02:00
|
|
|
return UnixChannelUtil.isBufferCopyNeededForWrite(buf)? newDirectBuffer(buf): buf;
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2016-09-11 09:27:58 +02:00
|
|
|
if (msg instanceof FileRegion || msg instanceof SpliceOutTask) {
|
2015-01-14 16:38:46 +01:00
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new UnsupportedOperationException(
|
|
|
|
"unsupported message type: " + StringUtil.simpleClassName(msg) + EXPECTED_TYPES);
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:46:46 +02:00
|
|
|
@UnstableApi
|
|
|
|
@Override
|
2017-09-17 02:20:01 +02:00
|
|
|
protected final void doShutdownOutput() throws Exception {
|
|
|
|
socket.shutdown(false, true);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
|
2016-04-28 19:56:49 +02:00
|
|
|
private void shutdownInput0(final ChannelPromise promise) {
|
|
|
|
try {
|
2017-01-19 17:31:34 +01:00
|
|
|
socket.shutdown(true, false);
|
2016-04-28 19:56:49 +02:00
|
|
|
promise.setSuccess();
|
|
|
|
} catch (Throwable cause) {
|
|
|
|
promise.setFailure(cause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean isOutputShutdown() {
|
2017-01-19 17:31:34 +01:00
|
|
|
return socket.isOutputShutdown();
|
2016-04-28 19:56:49 +02:00
|
|
|
}
|
|
|
|
|
2016-02-18 17:41:55 +01:00
|
|
|
@Override
|
|
|
|
public boolean isInputShutdown() {
|
2017-01-19 17:31:34 +01:00
|
|
|
return socket.isInputShutdown();
|
2016-02-18 17:41:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-04-28 19:56:49 +02:00
|
|
|
public boolean isShutdown() {
|
2017-01-19 17:31:34 +01:00
|
|
|
return socket.isShutdown();
|
2016-02-18 17:41:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture shutdownOutput() {
|
|
|
|
return shutdownOutput(newPromise());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture shutdownOutput(final ChannelPromise promise) {
|
2017-09-17 02:20:01 +02:00
|
|
|
EventLoop loop = eventLoop();
|
|
|
|
if (loop.inEventLoop()) {
|
|
|
|
((AbstractUnsafe) unsafe()).shutdownOutput(promise);
|
|
|
|
} else {
|
|
|
|
loop.execute(new Runnable() {
|
2016-02-18 17:41:55 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2017-09-17 02:20:01 +02:00
|
|
|
((AbstractUnsafe) unsafe()).shutdownOutput(promise);
|
2016-02-18 17:41:55 +01:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
2017-09-17 02:20:01 +02:00
|
|
|
|
2016-02-18 17:41:55 +01:00
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
2016-04-28 19:56:49 +02:00
|
|
|
@Override
|
|
|
|
public ChannelFuture shutdownInput() {
|
|
|
|
return shutdownInput(newPromise());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture shutdownInput(final ChannelPromise promise) {
|
|
|
|
Executor closeExecutor = ((EpollStreamUnsafe) unsafe()).prepareToClose();
|
|
|
|
if (closeExecutor != null) {
|
2016-03-27 14:25:39 +02:00
|
|
|
closeExecutor.execute(new Runnable() {
|
2016-04-28 19:56:49 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
shutdownInput0(promise);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
EventLoop loop = eventLoop();
|
|
|
|
if (loop.inEventLoop()) {
|
|
|
|
shutdownInput0(promise);
|
|
|
|
} else {
|
2016-03-27 14:25:39 +02:00
|
|
|
loop.execute(new Runnable() {
|
2016-04-28 19:56:49 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
shutdownInput0(promise);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture shutdown() {
|
|
|
|
return shutdown(newPromise());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public ChannelFuture shutdown(final ChannelPromise promise) {
|
2017-09-17 02:20:01 +02:00
|
|
|
ChannelFuture shutdownOutputFuture = shutdownOutput();
|
|
|
|
if (shutdownOutputFuture.isDone()) {
|
|
|
|
shutdownOutputDone(shutdownOutputFuture, promise);
|
|
|
|
} else {
|
|
|
|
shutdownOutputFuture.addListener(new ChannelFutureListener() {
|
2016-04-28 19:56:49 +02:00
|
|
|
@Override
|
2017-09-17 02:20:01 +02:00
|
|
|
public void operationComplete(final ChannelFuture shutdownOutputFuture) throws Exception {
|
|
|
|
shutdownOutputDone(shutdownOutputFuture, promise);
|
2016-04-28 19:56:49 +02:00
|
|
|
}
|
|
|
|
});
|
2017-09-17 02:20:01 +02:00
|
|
|
}
|
|
|
|
return promise;
|
|
|
|
}
|
|
|
|
|
|
|
|
private void shutdownOutputDone(final ChannelFuture shutdownOutputFuture, final ChannelPromise promise) {
|
|
|
|
ChannelFuture shutdownInputFuture = shutdownInput();
|
|
|
|
if (shutdownInputFuture.isDone()) {
|
|
|
|
shutdownDone(shutdownOutputFuture, shutdownInputFuture, promise);
|
2016-04-28 19:56:49 +02:00
|
|
|
} else {
|
2017-09-17 02:20:01 +02:00
|
|
|
shutdownInputFuture.addListener(new ChannelFutureListener() {
|
|
|
|
@Override
|
|
|
|
public void operationComplete(ChannelFuture shutdownInputFuture) throws Exception {
|
|
|
|
shutdownDone(shutdownOutputFuture, shutdownInputFuture, promise);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void shutdownDone(ChannelFuture shutdownOutputFuture,
|
|
|
|
ChannelFuture shutdownInputFuture,
|
|
|
|
ChannelPromise promise) {
|
|
|
|
Throwable shutdownOutputCause = shutdownOutputFuture.cause();
|
|
|
|
Throwable shutdownInputCause = shutdownInputFuture.cause();
|
|
|
|
if (shutdownOutputCause != null) {
|
|
|
|
if (shutdownInputCause != null) {
|
|
|
|
logger.debug("Exception suppressed because a previous exception occurred.",
|
|
|
|
shutdownInputCause);
|
2016-04-28 19:56:49 +02:00
|
|
|
}
|
2017-09-17 02:20:01 +02:00
|
|
|
promise.setFailure(shutdownOutputCause);
|
|
|
|
} else if (shutdownInputCause != null) {
|
|
|
|
promise.setFailure(shutdownInputCause);
|
|
|
|
} else {
|
|
|
|
promise.setSuccess();
|
2016-04-28 19:56:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 06:54:20 +02:00
|
|
|
@Override
|
|
|
|
protected void doClose() throws Exception {
|
|
|
|
try {
|
2017-01-09 14:15:48 +01:00
|
|
|
// Calling super.doClose() first so spliceTo(...) will fail on next call.
|
2015-04-14 06:54:20 +02:00
|
|
|
super.doClose();
|
|
|
|
} finally {
|
|
|
|
safeClosePipe(pipeIn);
|
|
|
|
safeClosePipe(pipeOut);
|
|
|
|
clearSpliceQueue();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private void clearSpliceQueue() {
|
2019-07-12 18:06:26 +02:00
|
|
|
Queue<SpliceInTask> sQueue = spliceQueue;
|
|
|
|
if (sQueue == null) {
|
2015-11-20 07:52:26 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-05-17 22:23:02 +02:00
|
|
|
ClosedChannelException exception = null;
|
|
|
|
|
2015-04-14 06:54:20 +02:00
|
|
|
for (;;) {
|
2019-07-12 18:06:26 +02:00
|
|
|
SpliceInTask task = sQueue.poll();
|
2015-04-14 06:54:20 +02:00
|
|
|
if (task == null) {
|
|
|
|
break;
|
|
|
|
}
|
2019-05-17 22:23:02 +02:00
|
|
|
if (exception == null) {
|
|
|
|
exception = new ClosedChannelException();
|
|
|
|
}
|
|
|
|
task.promise.tryFailure(exception);
|
2015-04-14 06:54:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-22 14:42:11 +01:00
|
|
|
private static void safeClosePipe(FileDescriptor fd) {
|
2015-10-07 04:00:59 +02:00
|
|
|
if (fd != null) {
|
2015-04-14 06:54:20 +02:00
|
|
|
try {
|
2015-10-07 04:00:59 +02:00
|
|
|
fd.close();
|
2015-04-14 06:54:20 +02:00
|
|
|
} catch (IOException e) {
|
|
|
|
if (logger.isWarnEnabled()) {
|
|
|
|
logger.warn("Error while closing a pipe", e);
|
|
|
|
}
|
|
|
|
}
|
2015-04-19 21:07:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-15 14:38:14 +01:00
|
|
|
class EpollStreamUnsafe extends AbstractEpollUnsafe {
|
2016-02-18 17:41:55 +01:00
|
|
|
// Overridden here just to be able to access this method from AbstractEpollStreamChannel
|
|
|
|
@Override
|
|
|
|
protected Executor prepareToClose() {
|
|
|
|
return super.prepareToClose();
|
|
|
|
}
|
|
|
|
|
2016-02-28 02:56:41 +01:00
|
|
|
private void handleReadException(ChannelPipeline pipeline, ByteBuf byteBuf, Throwable cause, boolean close,
|
|
|
|
EpollRecvByteAllocatorHandle allocHandle) {
|
2015-01-14 16:38:46 +01:00
|
|
|
if (byteBuf != null) {
|
|
|
|
if (byteBuf.isReadable()) {
|
2017-11-06 18:21:42 +01:00
|
|
|
readPending = false;
|
2015-01-14 16:38:46 +01:00
|
|
|
pipeline.fireChannelRead(byteBuf);
|
|
|
|
} else {
|
|
|
|
byteBuf.release();
|
|
|
|
}
|
|
|
|
}
|
2016-02-28 02:56:41 +01:00
|
|
|
allocHandle.readComplete();
|
2015-01-14 16:38:46 +01:00
|
|
|
pipeline.fireChannelReadComplete();
|
|
|
|
pipeline.fireExceptionCaught(cause);
|
|
|
|
if (close || cause instanceof IOException) {
|
2017-02-02 19:15:10 +01:00
|
|
|
shutdownInput(false);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-05 03:36:55 +02:00
|
|
|
@Override
|
2017-02-02 19:15:10 +01:00
|
|
|
EpollRecvByteAllocatorHandle newEpollHandle(RecvByteBufAllocator.ExtendedHandle handle) {
|
|
|
|
return new EpollRecvByteAllocatorStreamingHandle(handle);
|
2015-06-05 03:36:55 +02:00
|
|
|
}
|
|
|
|
|
2015-01-14 16:38:46 +01:00
|
|
|
@Override
|
|
|
|
void epollInReady() {
|
2017-02-02 19:15:10 +01:00
|
|
|
final ChannelConfig config = config();
|
|
|
|
if (shouldBreakEpollInReady(config)) {
|
2015-02-20 14:20:33 +01:00
|
|
|
clearEpollIn0();
|
|
|
|
return;
|
|
|
|
}
|
2016-04-01 09:02:15 +02:00
|
|
|
final EpollRecvByteAllocatorHandle allocHandle = recvBufAllocHandle();
|
|
|
|
allocHandle.edgeTriggered(isFlagSet(Native.EPOLLET));
|
|
|
|
|
2015-01-14 16:38:46 +01:00
|
|
|
final ChannelPipeline pipeline = pipeline();
|
|
|
|
final ByteBufAllocator allocator = config.getAllocator();
|
2015-06-05 03:36:55 +02:00
|
|
|
allocHandle.reset(config);
|
2016-04-03 01:48:07 +02:00
|
|
|
epollInBefore();
|
2015-01-14 16:38:46 +01:00
|
|
|
|
|
|
|
ByteBuf byteBuf = null;
|
|
|
|
boolean close = false;
|
|
|
|
try {
|
2019-07-12 18:06:26 +02:00
|
|
|
Queue<SpliceInTask> sQueue = null;
|
2015-01-29 06:49:14 +01:00
|
|
|
do {
|
2019-07-12 18:06:26 +02:00
|
|
|
if (sQueue != null || (sQueue = spliceQueue) != null) {
|
|
|
|
SpliceInTask spliceTask = sQueue.peek();
|
2015-11-20 07:52:26 +01:00
|
|
|
if (spliceTask != null) {
|
|
|
|
if (spliceTask.spliceIn(allocHandle)) {
|
|
|
|
// We need to check if it is still active as if not we removed all SpliceTasks in
|
|
|
|
// doClose(...)
|
|
|
|
if (isActive()) {
|
2019-07-12 18:06:26 +02:00
|
|
|
sQueue.remove();
|
2015-11-20 07:52:26 +01:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
break;
|
2015-04-14 06:54:20 +02:00
|
|
|
}
|
2015-06-05 03:36:55 +02:00
|
|
|
}
|
2015-08-15 01:36:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// we use a direct buffer here as the native implementations only be able
|
|
|
|
// to handle direct buffers.
|
|
|
|
byteBuf = allocHandle.allocate(allocator);
|
2016-04-01 09:02:15 +02:00
|
|
|
allocHandle.lastBytesRead(doReadBytes(byteBuf));
|
2015-08-15 01:36:48 +02:00
|
|
|
if (allocHandle.lastBytesRead() <= 0) {
|
|
|
|
// nothing was read, release the buffer.
|
|
|
|
byteBuf.release();
|
2015-06-05 03:36:55 +02:00
|
|
|
byteBuf = null;
|
2015-08-15 01:36:48 +02:00
|
|
|
close = allocHandle.lastBytesRead() < 0;
|
2017-09-28 08:52:04 +02:00
|
|
|
if (close) {
|
|
|
|
// There is nothing left to read as we received an EOF.
|
|
|
|
readPending = false;
|
|
|
|
}
|
2015-08-15 01:36:48 +02:00
|
|
|
break;
|
2015-01-29 06:49:14 +01:00
|
|
|
}
|
2015-08-15 01:36:48 +02:00
|
|
|
allocHandle.incMessagesRead(1);
|
2017-11-06 18:21:42 +01:00
|
|
|
readPending = false;
|
2015-08-15 01:36:48 +02:00
|
|
|
pipeline.fireChannelRead(byteBuf);
|
|
|
|
byteBuf = null;
|
2016-03-24 19:37:43 +01:00
|
|
|
|
2017-02-02 19:15:10 +01:00
|
|
|
if (shouldBreakEpollInReady(config)) {
|
2016-03-24 19:37:43 +01:00
|
|
|
// We need to do this for two reasons:
|
|
|
|
//
|
|
|
|
// - If the input was shutdown in between (which may be the case when the user did it in the
|
|
|
|
// fireChannelRead(...) method we should not try to read again to not produce any
|
|
|
|
// miss-leading exceptions.
|
|
|
|
//
|
|
|
|
// - If the user closes the channel we need to ensure we not try to read from it again as
|
|
|
|
// the filedescriptor may be re-used already by the OS if the system is handling a lot of
|
|
|
|
// concurrent connections and so needs a lot of filedescriptors. If not do this we risk
|
|
|
|
// reading data from a filedescriptor that belongs to another socket then the socket that
|
|
|
|
// was "wrapped" by this Channel implementation.
|
|
|
|
break;
|
|
|
|
}
|
2015-06-05 03:36:55 +02:00
|
|
|
} while (allocHandle.continueReading());
|
2015-01-29 06:49:14 +01:00
|
|
|
|
2015-06-05 03:36:55 +02:00
|
|
|
allocHandle.readComplete();
|
2015-01-14 16:38:46 +01:00
|
|
|
pipeline.fireChannelReadComplete();
|
|
|
|
|
|
|
|
if (close) {
|
2017-02-02 19:15:10 +01:00
|
|
|
shutdownInput(false);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
} catch (Throwable t) {
|
2016-02-28 02:56:41 +01:00
|
|
|
handleReadException(pipeline, byteBuf, t, close, allocHandle);
|
2015-01-14 16:38:46 +01:00
|
|
|
} finally {
|
2016-02-28 02:56:41 +01:00
|
|
|
epollInFinally(config);
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-14 06:54:20 +02:00
|
|
|
|
2015-11-20 07:52:26 +01:00
|
|
|
private void addToSpliceQueue(final SpliceInTask task) {
|
2019-07-12 18:06:26 +02:00
|
|
|
Queue<SpliceInTask> sQueue = spliceQueue;
|
|
|
|
if (sQueue == null) {
|
|
|
|
synchronized (this) {
|
|
|
|
sQueue = spliceQueue;
|
|
|
|
if (sQueue == null) {
|
|
|
|
spliceQueue = sQueue = PlatformDependent.newMpscQueue();
|
2015-11-20 07:52:26 +01:00
|
|
|
}
|
2019-07-12 18:06:26 +02:00
|
|
|
}
|
2015-11-20 07:52:26 +01:00
|
|
|
}
|
2019-07-12 18:06:26 +02:00
|
|
|
sQueue.add(task);
|
2015-11-20 07:52:26 +01:00
|
|
|
}
|
|
|
|
|
2016-03-27 14:25:39 +02:00
|
|
|
protected abstract class SpliceInTask {
|
2015-04-14 06:54:20 +02:00
|
|
|
final ChannelPromise promise;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
protected SpliceInTask(int len, ChannelPromise promise) {
|
|
|
|
this.promise = promise;
|
|
|
|
this.len = len;
|
|
|
|
}
|
|
|
|
|
2016-03-22 14:42:11 +01:00
|
|
|
abstract boolean spliceIn(RecvByteBufAllocator.Handle handle);
|
2015-04-14 06:54:20 +02:00
|
|
|
|
2015-10-07 04:00:59 +02:00
|
|
|
protected final int spliceIn(FileDescriptor pipeOut, RecvByteBufAllocator.Handle handle) throws IOException {
|
2015-04-14 06:54:20 +02:00
|
|
|
// calculate the maximum amount of data we are allowed to splice
|
|
|
|
int length = Math.min(handle.guess(), len);
|
|
|
|
int splicedIn = 0;
|
|
|
|
for (;;) {
|
|
|
|
// Splicing until there is nothing left to splice.
|
2017-01-19 17:31:34 +01:00
|
|
|
int localSplicedIn = Native.splice(socket.intValue(), -1, pipeOut.intValue(), -1, length);
|
2015-04-14 06:54:20 +02:00
|
|
|
if (localSplicedIn == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
splicedIn += localSplicedIn;
|
|
|
|
length -= localSplicedIn;
|
|
|
|
}
|
|
|
|
|
|
|
|
return splicedIn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Let it directly implement channelFutureListener as well to reduce object creation.
|
|
|
|
private final class SpliceInChannelTask extends SpliceInTask implements ChannelFutureListener {
|
|
|
|
private final AbstractEpollStreamChannel ch;
|
|
|
|
|
|
|
|
SpliceInChannelTask(AbstractEpollStreamChannel ch, int len, ChannelPromise promise) {
|
|
|
|
super(len, promise);
|
|
|
|
this.ch = ch;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void operationComplete(ChannelFuture future) throws Exception {
|
|
|
|
if (!future.isSuccess()) {
|
|
|
|
promise.setFailure(future.cause());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-03-22 14:42:11 +01:00
|
|
|
public boolean spliceIn(RecvByteBufAllocator.Handle handle) {
|
2015-04-14 06:54:20 +02:00
|
|
|
assert ch.eventLoop().inEventLoop();
|
|
|
|
if (len == 0) {
|
|
|
|
promise.setSuccess();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
try {
|
|
|
|
// We create the pipe on the target channel as this will allow us to just handle pending writes
|
|
|
|
// later in a correct fashion without get into any ordering issues when spliceTo(...) is called
|
|
|
|
// on multiple Channels pointing to one target Channel.
|
2015-10-07 04:00:59 +02:00
|
|
|
FileDescriptor pipeOut = ch.pipeOut;
|
|
|
|
if (pipeOut == null) {
|
2015-04-14 06:54:20 +02:00
|
|
|
// Create a new pipe as non was created before.
|
2015-10-07 04:00:59 +02:00
|
|
|
FileDescriptor[] pipe = pipe();
|
|
|
|
ch.pipeIn = pipe[0];
|
|
|
|
pipeOut = ch.pipeOut = pipe[1];
|
2015-04-14 06:54:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int splicedIn = spliceIn(pipeOut, handle);
|
|
|
|
if (splicedIn > 0) {
|
|
|
|
// Integer.MAX_VALUE is a special value which will result in splice forever.
|
|
|
|
if (len != Integer.MAX_VALUE) {
|
|
|
|
len -= splicedIn;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Depending on if we are done with splicing inbound data we set the right promise for the
|
|
|
|
// outbound splicing.
|
|
|
|
final ChannelPromise splicePromise;
|
|
|
|
if (len == 0) {
|
|
|
|
splicePromise = promise;
|
|
|
|
} else {
|
|
|
|
splicePromise = ch.newPromise().addListener(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
boolean autoRead = config().isAutoRead();
|
|
|
|
|
|
|
|
// Just call unsafe().write(...) and flush() as we not want to traverse the whole pipeline for this
|
|
|
|
// case.
|
|
|
|
ch.unsafe().write(new SpliceOutTask(ch, splicedIn, autoRead), splicePromise);
|
|
|
|
ch.unsafe().flush();
|
|
|
|
if (autoRead && !splicePromise.isDone()) {
|
|
|
|
// Write was not done which means the target channel was not writable. In this case we need to
|
|
|
|
// disable reading until we are done with splicing to the target channel because:
|
|
|
|
//
|
|
|
|
// - The user may want to to trigger another splice operation once the splicing was complete.
|
|
|
|
config().setAutoRead(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return len == 0;
|
|
|
|
} catch (Throwable cause) {
|
|
|
|
promise.setFailure(cause);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private final class SpliceOutTask {
|
|
|
|
private final AbstractEpollStreamChannel ch;
|
|
|
|
private final boolean autoRead;
|
|
|
|
private int len;
|
|
|
|
|
|
|
|
SpliceOutTask(AbstractEpollStreamChannel ch, int len, boolean autoRead) {
|
|
|
|
this.ch = ch;
|
|
|
|
this.len = len;
|
|
|
|
this.autoRead = autoRead;
|
|
|
|
}
|
|
|
|
|
|
|
|
public boolean spliceOut() throws Exception {
|
|
|
|
assert ch.eventLoop().inEventLoop();
|
|
|
|
try {
|
2017-01-19 17:31:34 +01:00
|
|
|
int splicedOut = Native.splice(ch.pipeIn.intValue(), -1, ch.socket.intValue(), -1, len);
|
2015-04-14 06:54:20 +02:00
|
|
|
len -= splicedOut;
|
|
|
|
if (len == 0) {
|
|
|
|
if (autoRead) {
|
|
|
|
// AutoRead was used and we spliced everything so start reading again
|
|
|
|
config().setAutoRead(true);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
} catch (IOException e) {
|
|
|
|
if (autoRead) {
|
|
|
|
// AutoRead was used and we spliced everything so start reading again
|
|
|
|
config().setAutoRead(true);
|
|
|
|
}
|
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private final class SpliceFdTask extends SpliceInTask {
|
|
|
|
private final FileDescriptor fd;
|
|
|
|
private final ChannelPromise promise;
|
2019-07-16 13:22:30 +02:00
|
|
|
private int offset;
|
2015-04-14 06:54:20 +02:00
|
|
|
|
|
|
|
SpliceFdTask(FileDescriptor fd, int offset, int len, ChannelPromise promise) {
|
|
|
|
super(len, promise);
|
|
|
|
this.fd = fd;
|
|
|
|
this.promise = promise;
|
|
|
|
this.offset = offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-03-22 14:42:11 +01:00
|
|
|
public boolean spliceIn(RecvByteBufAllocator.Handle handle) {
|
2015-04-14 06:54:20 +02:00
|
|
|
assert eventLoop().inEventLoop();
|
|
|
|
if (len == 0) {
|
|
|
|
promise.setSuccess();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
2015-10-07 04:00:59 +02:00
|
|
|
FileDescriptor[] pipe = pipe();
|
|
|
|
FileDescriptor pipeIn = pipe[0];
|
|
|
|
FileDescriptor pipeOut = pipe[1];
|
|
|
|
try {
|
|
|
|
int splicedIn = spliceIn(pipeOut, handle);
|
|
|
|
if (splicedIn > 0) {
|
|
|
|
// Integer.MAX_VALUE is a special value which will result in splice forever.
|
|
|
|
if (len != Integer.MAX_VALUE) {
|
|
|
|
len -= splicedIn;
|
|
|
|
}
|
|
|
|
do {
|
|
|
|
int splicedOut = Native.splice(pipeIn.intValue(), -1, fd.intValue(), offset, splicedIn);
|
2019-07-16 13:22:30 +02:00
|
|
|
offset += splicedOut;
|
2015-10-07 04:00:59 +02:00
|
|
|
splicedIn -= splicedOut;
|
|
|
|
} while (splicedIn > 0);
|
|
|
|
if (len == 0) {
|
|
|
|
promise.setSuccess();
|
|
|
|
return true;
|
|
|
|
}
|
2015-04-14 06:54:20 +02:00
|
|
|
}
|
2015-10-07 04:00:59 +02:00
|
|
|
return false;
|
|
|
|
} finally {
|
|
|
|
safeClosePipe(pipeIn);
|
|
|
|
safeClosePipe(pipeOut);
|
2015-04-14 06:54:20 +02:00
|
|
|
}
|
|
|
|
} catch (Throwable cause) {
|
|
|
|
promise.setFailure(cause);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-09-11 09:27:58 +02:00
|
|
|
|
2017-01-19 17:31:34 +01:00
|
|
|
private final class EpollSocketWritableByteChannel extends SocketWritableByteChannel {
|
|
|
|
EpollSocketWritableByteChannel() {
|
|
|
|
super(socket);
|
2016-09-11 09:27:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2017-01-19 17:31:34 +01:00
|
|
|
protected ByteBufAllocator alloc() {
|
|
|
|
return AbstractEpollStreamChannel.this.alloc();
|
2016-09-11 09:27:58 +02:00
|
|
|
}
|
|
|
|
}
|
2015-01-14 16:38:46 +01:00
|
|
|
}
|