Use shaded dependency on JCTools instead of copy and paste
Motivation: JCTools supports both non-unsafe, unsafe versions of queues and JDK6 which allows us to shade the library in netty-common allowing it to stay "zero dependency". Modifications: - Remove copy paste JCTools code and shade the library (dependencies that are shaded should be removed from the <dependencies> section of the generated POM). - Remove usage of OneTimeTask and remove it all together. Result: Less code to maintain and easier to update JCTools and less GC pressure as the queue implementation nt creates so much garbage
This commit is contained in:
parent
398efb1f71
commit
c3abb9146e
1
.gitignore
vendored
1
.gitignore
vendored
@ -30,3 +30,4 @@
|
||||
# JVM crash logs
|
||||
hs_err_pid*.log
|
||||
|
||||
dependency-reduced-pom.xml
|
||||
|
@ -34,7 +34,6 @@ import io.netty.handler.codec.http.HttpResponse;
|
||||
import io.netty.handler.codec.http.HttpResponseDecoder;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
|
||||
import java.net.URI;
|
||||
@ -273,7 +272,7 @@ public abstract class WebSocketClientHandshaker {
|
||||
// Delay the removal of the decoder so the user can setup the pipeline if needed to handle
|
||||
// WebSocketFrame messages.
|
||||
// See https://github.com/netty/netty/issues/4533
|
||||
channel.eventLoop().execute(new OneTimeTask() {
|
||||
channel.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
p.remove(codec);
|
||||
@ -290,7 +289,7 @@ public abstract class WebSocketClientHandshaker {
|
||||
// Delay the removal of the decoder so the user can setup the pipeline if needed to handle
|
||||
// WebSocketFrame messages.
|
||||
// See https://github.com/netty/netty/issues/4533
|
||||
channel.eventLoop().execute(new OneTimeTask() {
|
||||
channel.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
p.remove(context.handler());
|
||||
|
@ -29,7 +29,6 @@ import io.netty.channel.RecvByteBufAllocator;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.net.SocketAddress;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
@ -201,7 +200,7 @@ abstract class AbstractHttp2StreamChannel extends AbstractChannel {
|
||||
in.remove();
|
||||
}
|
||||
|
||||
preferredExecutor.execute(new OneTimeTask() {
|
||||
preferredExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
for (Object msg : msgsCopy) {
|
||||
@ -248,7 +247,7 @@ abstract class AbstractHttp2StreamChannel extends AbstractChannel {
|
||||
if (eventLoop().inEventLoop()) {
|
||||
fireChildRead0(msg);
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
fireChildRead0(msg);
|
||||
|
@ -25,7 +25,6 @@ import io.netty.handler.codec.ByteToMessageDecoder;
|
||||
import io.netty.handler.codec.http2.Http2Exception.CompositeStreamException;
|
||||
import io.netty.handler.codec.http2.Http2Exception.StreamException;
|
||||
import io.netty.util.concurrent.ScheduledFuture;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.UnstableApi;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
@ -766,7 +765,7 @@ public class Http2ConnectionHandler extends ByteToMessageDecoder implements Http
|
||||
long timeout, TimeUnit unit) {
|
||||
this.ctx = ctx;
|
||||
this.promise = promise;
|
||||
timeoutTask = ctx.executor().schedule(new OneTimeTask() {
|
||||
timeoutTask = ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ctx.close(promise);
|
||||
|
@ -31,7 +31,6 @@ import io.netty.handler.codec.UnsupportedMessageTypeException;
|
||||
import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeEvent;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.UnstableApi;
|
||||
|
||||
import java.util.ArrayList;
|
||||
@ -212,7 +211,7 @@ public final class Http2MultiplexCodec extends ChannelDuplexHandler {
|
||||
writeFromStreamChannel0(msg, flush, promise);
|
||||
} else {
|
||||
try {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
writeFromStreamChannel0(msg, flush, promise);
|
||||
@ -344,7 +343,7 @@ public final class Http2MultiplexCodec extends ChannelDuplexHandler {
|
||||
if (eventLoop.inEventLoop()) {
|
||||
onStreamClosed0(streamInfo);
|
||||
} else {
|
||||
eventLoop.execute(new OneTimeTask() {
|
||||
eventLoop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
onStreamClosed0(streamInfo);
|
||||
@ -381,7 +380,7 @@ public final class Http2MultiplexCodec extends ChannelDuplexHandler {
|
||||
if (executor.inEventLoop()) {
|
||||
exceptionCaught(ctx, t);
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
exceptionCaught(ctx, t);
|
||||
@ -518,7 +517,7 @@ public final class Http2MultiplexCodec extends ChannelDuplexHandler {
|
||||
if (executor.inEventLoop()) {
|
||||
bytesConsumed0(bytes);
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
bytesConsumed0(bytes);
|
||||
|
@ -26,7 +26,6 @@ import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.ChannelPromiseNotifier;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@ -253,7 +252,7 @@ public class JZlibEncoder extends ZlibEncoder {
|
||||
return finishEncode(ctx, promise);
|
||||
} else {
|
||||
final ChannelPromise p = ctx.newPromise();
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelFuture f = finishEncode(ctx(), p);
|
||||
@ -352,7 +351,7 @@ public class JZlibEncoder extends ZlibEncoder {
|
||||
|
||||
if (!f.isDone()) {
|
||||
// Ensure the channel is closed even if the write operation completes in time.
|
||||
ctx.executor().schedule(new OneTimeTask() {
|
||||
ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ctx.close(promise);
|
||||
|
@ -22,7 +22,6 @@ import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.ChannelPromiseNotifier;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.zip.CRC32;
|
||||
@ -164,7 +163,7 @@ public class JdkZlibEncoder extends ZlibEncoder {
|
||||
return finishEncode(ctx, promise);
|
||||
} else {
|
||||
final ChannelPromise p = ctx.newPromise();
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelFuture f = finishEncode(ctx(), p);
|
||||
@ -260,7 +259,7 @@ public class JdkZlibEncoder extends ZlibEncoder {
|
||||
|
||||
if (!f.isDone()) {
|
||||
// Ensure the channel is closed even if the write operation completes in time.
|
||||
ctx.executor().schedule(new OneTimeTask() {
|
||||
ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ctx.close(promise);
|
||||
|
@ -44,6 +44,12 @@
|
||||
<scope>compile</scope> <!-- override the 'test' scope defined at parent pom.xml -->
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jctools</groupId>
|
||||
<artifactId>jctools-core</artifactId>
|
||||
<!-- Mark as optional as otherwise the bundle plugin will add strict import statements and so fail in OSGI containers-->
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
|
||||
<!-- Logging frameworks - completely optional -->
|
||||
<dependency>
|
||||
@ -75,6 +81,31 @@
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<artifactSet>
|
||||
<includes>
|
||||
<include>org.jctools</include>
|
||||
</includes>
|
||||
</artifactSet>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>org.jctools.</pattern>
|
||||
<shadedPattern>io.netty.util.internal.shaded.org.jctools.</shadedPattern>
|
||||
</relocation>
|
||||
</relocations>
|
||||
<minimizeJar>true</minimizeJar>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<!-- Add generated collection sources. -->
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
@ -151,6 +182,7 @@
|
||||
</goals>
|
||||
<configuration>
|
||||
<instructions>
|
||||
<!-- Exclude org.jctools.* as we shade it -->
|
||||
<!-- NativeLibraryLoader can be used to manually load native libraries from other bundles that this bundle does not depend on,
|
||||
hence use DynamicImport-Package instruction to ensure the loading is successful -->
|
||||
<DynamicImport-Package>*</DynamicImport-Package>
|
||||
|
@ -15,7 +15,6 @@
|
||||
*/
|
||||
package io.netty.util;
|
||||
|
||||
import io.netty.util.internal.MpscLinkedQueueNode;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
@ -106,7 +105,7 @@ public class HashedWheelTimer implements Timer {
|
||||
private final int mask;
|
||||
private final CountDownLatch startTimeInitialized = new CountDownLatch(1);
|
||||
private final Queue<HashedWheelTimeout> timeouts = PlatformDependent.newMpscQueue();
|
||||
private final Queue<Runnable> cancelledTimeouts = PlatformDependent.newMpscQueue();
|
||||
private final Queue<HashedWheelTimeout> cancelledTimeouts = PlatformDependent.newMpscQueue();
|
||||
|
||||
private volatile long startTime;
|
||||
|
||||
@ -412,13 +411,13 @@ public class HashedWheelTimer implements Timer {
|
||||
|
||||
private void processCancelledTasks() {
|
||||
for (;;) {
|
||||
Runnable task = cancelledTimeouts.poll();
|
||||
if (task == null) {
|
||||
HashedWheelTimeout timeout = cancelledTimeouts.poll();
|
||||
if (timeout == null) {
|
||||
// all processed
|
||||
break;
|
||||
}
|
||||
try {
|
||||
task.run();
|
||||
timeout.remove();
|
||||
} catch (Throwable t) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("An exception was thrown while process a cancellation task", t);
|
||||
@ -472,8 +471,7 @@ public class HashedWheelTimer implements Timer {
|
||||
}
|
||||
}
|
||||
|
||||
private static final class HashedWheelTimeout extends MpscLinkedQueueNode<Timeout>
|
||||
implements Timeout {
|
||||
private static final class HashedWheelTimeout implements Timeout {
|
||||
|
||||
private static final int ST_INIT = 0;
|
||||
private static final int ST_CANCELLED = 1;
|
||||
@ -530,25 +528,20 @@ public class HashedWheelTimer implements Timer {
|
||||
if (!compareAndSetState(ST_INIT, ST_CANCELLED)) {
|
||||
return false;
|
||||
}
|
||||
// If a task should be canceled we create a new Runnable for this to another queue which will
|
||||
// be processed on each tick. So this means that we will have a GC latency of max. 1 tick duration
|
||||
// which is good enough. This way we can make again use of our MpscLinkedQueue and so minimize the
|
||||
// locking / overhead as much as possible.
|
||||
//
|
||||
// It is important that we not just add the HashedWheelTimeout itself again as it extends
|
||||
// MpscLinkedQueueNode and so may still be used as tombstone.
|
||||
timer.cancelledTimeouts.add(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
HashedWheelBucket bucket = HashedWheelTimeout.this.bucket;
|
||||
if (bucket != null) {
|
||||
bucket.remove(HashedWheelTimeout.this);
|
||||
}
|
||||
}
|
||||
});
|
||||
// If a task should be canceled we put this to another queue which will be processed on each tick.
|
||||
// So this means that we will have a GC latency of max. 1 tick duration which is good enough. This way
|
||||
// we can make again use of our MpscLinkedQueue and so minimize the locking / overhead as much as possible.
|
||||
timer.cancelledTimeouts.add(this);
|
||||
return true;
|
||||
}
|
||||
|
||||
void remove() {
|
||||
HashedWheelBucket bucket = this.bucket;
|
||||
if (bucket != null) {
|
||||
bucket.remove(this);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean compareAndSetState(int expected, int state) {
|
||||
return STATE_UPDATER.compareAndSet(this, expected, state);
|
||||
}
|
||||
@ -567,11 +560,6 @@ public class HashedWheelTimer implements Timer {
|
||||
return state() == ST_EXPIRED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HashedWheelTimeout value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
public void expire() {
|
||||
if (!compareAndSetState(ST_INIT, ST_EXPIRED)) {
|
||||
return;
|
||||
|
@ -17,7 +17,6 @@
|
||||
package io.netty.util;
|
||||
|
||||
import io.netty.util.concurrent.DefaultThreadFactory;
|
||||
import io.netty.util.internal.MpscLinkedQueueNode;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.SystemPropertyUtil;
|
||||
@ -213,7 +212,7 @@ public final class ThreadDeathWatcher {
|
||||
}
|
||||
}
|
||||
|
||||
private static final class Entry extends MpscLinkedQueueNode<Entry> {
|
||||
private static final class Entry {
|
||||
final Thread thread;
|
||||
final Runnable task;
|
||||
final boolean isWatch;
|
||||
@ -224,11 +223,6 @@ public final class ThreadDeathWatcher {
|
||||
this.isWatch = isWatch;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Entry value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return thread.hashCode() ^ task.hashCode();
|
||||
|
@ -16,7 +16,6 @@
|
||||
package io.netty.util.concurrent;
|
||||
|
||||
import io.netty.util.internal.ObjectUtil;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.util.PriorityQueue;
|
||||
import java.util.Queue;
|
||||
@ -195,7 +194,7 @@ public abstract class AbstractScheduledEventExecutor extends AbstractEventExecut
|
||||
if (inEventLoop()) {
|
||||
scheduledTaskQueue().add(task);
|
||||
} else {
|
||||
execute(new OneTimeTask() {
|
||||
execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
scheduledTaskQueue().add(task);
|
||||
@ -210,7 +209,7 @@ public abstract class AbstractScheduledEventExecutor extends AbstractEventExecut
|
||||
if (inEventLoop()) {
|
||||
scheduledTaskQueue().remove(task);
|
||||
} else {
|
||||
execute(new OneTimeTask() {
|
||||
execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
removeScheduled(task);
|
||||
|
@ -18,7 +18,6 @@ package io.netty.util.concurrent;
|
||||
import io.netty.util.Signal;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.InternalThreadLocalMap;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
@ -36,12 +35,14 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
|
||||
private static final InternalLogger rejectedExecutionLogger =
|
||||
InternalLoggerFactory.getInstance(DefaultPromise.class.getName() + ".rejectedExecution");
|
||||
private static final int MAX_LISTENER_STACK_DEPTH = 8;
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static final AtomicReferenceFieldUpdater<DefaultPromise, Object> RESULT_UPDATER;
|
||||
private static final Signal SUCCESS = Signal.valueOf(DefaultPromise.class, "SUCCESS");
|
||||
private static final Signal UNCANCELLABLE = Signal.valueOf(DefaultPromise.class, "UNCANCELLABLE");
|
||||
private static final CauseHolder CANCELLATION_CAUSE_HOLDER = new CauseHolder(new CancellationException());
|
||||
|
||||
static {
|
||||
@SuppressWarnings("rawtypes")
|
||||
AtomicReferenceFieldUpdater<DefaultPromise, Object> updater =
|
||||
PlatformDependent.newAtomicReferenceFieldUpdater(DefaultPromise.class, "result");
|
||||
RESULT_UPDATER = updater == null ? AtomicReferenceFieldUpdater.newUpdater(DefaultPromise.class,
|
||||
@ -302,6 +303,7 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public V getNow() {
|
||||
Object result = this.result;
|
||||
@ -436,7 +438,7 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
|
||||
}
|
||||
}
|
||||
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
notifyListenersNow();
|
||||
@ -466,7 +468,7 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
|
||||
}
|
||||
}
|
||||
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
notifyListener0(future, listener);
|
||||
@ -664,7 +666,7 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
|
||||
if (listeners instanceof GenericProgressiveFutureListener[]) {
|
||||
final GenericProgressiveFutureListener<?>[] array =
|
||||
(GenericProgressiveFutureListener<?>[]) listeners;
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
notifyProgressiveListeners0(self, array, progress, total);
|
||||
@ -673,7 +675,7 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
|
||||
} else {
|
||||
final GenericProgressiveFutureListener<ProgressiveFuture<V>> l =
|
||||
(GenericProgressiveFutureListener<ProgressiveFuture<V>>) listeners;
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
notifyProgressiveListener0(self, l, progress, total);
|
||||
|
@ -230,7 +230,7 @@ public abstract class SingleThreadEventExecutor extends AbstractScheduledEventEx
|
||||
}
|
||||
}
|
||||
|
||||
private void fetchFromScheduledTaskQueue() {
|
||||
private boolean fetchFromScheduledTaskQueue() {
|
||||
if (hasScheduledTasks()) {
|
||||
long nanoTime = AbstractScheduledEventExecutor.nanoTime();
|
||||
for (;;) {
|
||||
@ -238,9 +238,14 @@ public abstract class SingleThreadEventExecutor extends AbstractScheduledEventEx
|
||||
if (scheduledTask == null) {
|
||||
break;
|
||||
}
|
||||
taskQueue.add(scheduledTask);
|
||||
if (!taskQueue.offer(scheduledTask)) {
|
||||
// No space left in the task queue add it back to the scheduledTaskQueue so we pick it up again.
|
||||
scheduledTaskQueue().add((ScheduledFutureTask<?>) scheduledTask);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -280,7 +285,12 @@ public abstract class SingleThreadEventExecutor extends AbstractScheduledEventEx
|
||||
if (isShutdown()) {
|
||||
reject();
|
||||
}
|
||||
taskQueue.add(task);
|
||||
try {
|
||||
taskQueue.add(task);
|
||||
} catch (IllegalStateException e) {
|
||||
// Just use add and catch the exception as this should happen only very rarely.
|
||||
throw new RejectedExecutionException("Internal task queue is full", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -299,25 +309,30 @@ public abstract class SingleThreadEventExecutor extends AbstractScheduledEventEx
|
||||
* @return {@code true} if and only if at least one task was run
|
||||
*/
|
||||
protected boolean runAllTasks() {
|
||||
fetchFromScheduledTaskQueue();
|
||||
Runnable task = pollTask();
|
||||
if (task == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
try {
|
||||
task.run();
|
||||
} catch (Throwable t) {
|
||||
logger.warn("A task raised an exception.", t);
|
||||
}
|
||||
|
||||
task = pollTask();
|
||||
boolean fetchedAll;
|
||||
do {
|
||||
fetchedAll = fetchFromScheduledTaskQueue();
|
||||
Runnable task = pollTask();
|
||||
if (task == null) {
|
||||
lastExecutionTime = ScheduledFutureTask.nanoTime();
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
try {
|
||||
task.run();
|
||||
} catch (Throwable t) {
|
||||
logger.warn("A task raised an exception.", t);
|
||||
}
|
||||
|
||||
task = pollTask();
|
||||
if (task == null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (!fetchedAll); // keep on processing until we fetched all scheduled tasks.
|
||||
|
||||
lastExecutionTime = ScheduledFutureTask.nanoTime();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,110 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.util.AbstractQueue;
|
||||
import java.util.Iterator;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*/
|
||||
abstract class BaseLinkedAtomicQueue<E> extends AbstractQueue<E> {
|
||||
private final AtomicReference<LinkedQueueAtomicNode<E>> producerNode;
|
||||
private final AtomicReference<LinkedQueueAtomicNode<E>> consumerNode;
|
||||
public BaseLinkedAtomicQueue() {
|
||||
producerNode = new AtomicReference<LinkedQueueAtomicNode<E>>();
|
||||
consumerNode = new AtomicReference<LinkedQueueAtomicNode<E>>();
|
||||
}
|
||||
protected final LinkedQueueAtomicNode<E> lvProducerNode() {
|
||||
return producerNode.get();
|
||||
}
|
||||
protected final LinkedQueueAtomicNode<E> lpProducerNode() {
|
||||
return producerNode.get();
|
||||
}
|
||||
protected final void spProducerNode(LinkedQueueAtomicNode<E> node) {
|
||||
producerNode.lazySet(node);
|
||||
}
|
||||
protected final LinkedQueueAtomicNode<E> xchgProducerNode(LinkedQueueAtomicNode<E> node) {
|
||||
return producerNode.getAndSet(node);
|
||||
}
|
||||
protected final LinkedQueueAtomicNode<E> lvConsumerNode() {
|
||||
return consumerNode.get();
|
||||
}
|
||||
|
||||
protected final LinkedQueueAtomicNode<E> lpConsumerNode() {
|
||||
return consumerNode.get();
|
||||
}
|
||||
protected final void spConsumerNode(LinkedQueueAtomicNode<E> node) {
|
||||
consumerNode.lazySet(node);
|
||||
}
|
||||
@Override
|
||||
public final Iterator<E> iterator() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
* <p>
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* This is an O(n) operation as we run through all the nodes and count them.<br>
|
||||
*
|
||||
* @see java.util.Queue#size()
|
||||
*/
|
||||
@Override
|
||||
public final int size() {
|
||||
LinkedQueueAtomicNode<E> chaserNode = lvConsumerNode();
|
||||
final LinkedQueueAtomicNode<E> producerNode = lvProducerNode();
|
||||
int size = 0;
|
||||
// must chase the nodes all the way to the producer node, but there's no need to chase a moving target.
|
||||
while (chaserNode != producerNode && size < Integer.MAX_VALUE) {
|
||||
LinkedQueueAtomicNode<E> next;
|
||||
while ((next = chaserNode.lvNext()) == null) {
|
||||
continue;
|
||||
}
|
||||
chaserNode = next;
|
||||
size++;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
* <p>
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Queue is empty when producerNode is the same as consumerNode. An alternative implementation would be to observe
|
||||
* the producerNode.value is null, which also means an empty queue because only the consumerNode.value is allowed to
|
||||
* be null.
|
||||
*
|
||||
* @see MessagePassingQueue#isEmpty()
|
||||
*/
|
||||
@Override
|
||||
public final boolean isEmpty() {
|
||||
return lvConsumerNode() == lvProducerNode();
|
||||
}
|
||||
}
|
@ -1,158 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.util.AbstractQueue;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*
|
||||
* A base data structure for concurrent linked queues.
|
||||
*
|
||||
* @param <E>
|
||||
*/
|
||||
abstract class BaseLinkedQueue<E> extends BaseLinkedQueueConsumerNodeRef<E> {
|
||||
long p01, p02, p03, p04, p05, p06, p07;
|
||||
long p10, p11, p12, p13, p14, p15, p16, p17;
|
||||
|
||||
@Override
|
||||
public final Iterator<E> iterator() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
* <p>
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* This is an O(n) operation as we run through all the nodes and count them.<br>
|
||||
*
|
||||
* @see java.util.Queue#size()
|
||||
*/
|
||||
@Override
|
||||
public final int size() {
|
||||
// Read consumer first, this is important because if the producer is node is 'older' than the consumer the
|
||||
// consumer may overtake it (consume past it). This will lead to an infinite loop below.
|
||||
LinkedQueueNode<E> chaserNode = lvConsumerNode();
|
||||
final LinkedQueueNode<E> producerNode = lvProducerNode();
|
||||
int size = 0;
|
||||
// must chase the nodes all the way to the producer node, but there's no need to chase a moving target.
|
||||
while (chaserNode != producerNode && size < Integer.MAX_VALUE) {
|
||||
LinkedQueueNode<E> next;
|
||||
while ((next = chaserNode.lvNext()) == null) {
|
||||
continue;
|
||||
}
|
||||
chaserNode = next;
|
||||
size++;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
* <p>
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Queue is empty when producerNode is the same as consumerNode. An alternative implementation would be to observe
|
||||
* the producerNode.value is null, which also means an empty queue because only the consumerNode.value is allowed to
|
||||
* be null.
|
||||
*
|
||||
* @see MessagePassingQueue#isEmpty()
|
||||
*/
|
||||
@Override
|
||||
public final boolean isEmpty() {
|
||||
return lvConsumerNode() == lvProducerNode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int capacity() {
|
||||
return UNBOUNDED_CAPACITY;
|
||||
}
|
||||
}
|
||||
|
||||
abstract class BaseLinkedQueuePad0<E> extends AbstractQueue<E> implements MessagePassingQueue<E> {
|
||||
long p00, p01, p02, p03, p04, p05, p06, p07;
|
||||
long p10, p11, p12, p13, p14, p15, p16;
|
||||
}
|
||||
|
||||
abstract class BaseLinkedQueueProducerNodeRef<E> extends BaseLinkedQueuePad0<E> {
|
||||
protected static final long P_NODE_OFFSET;
|
||||
|
||||
static {
|
||||
try {
|
||||
P_NODE_OFFSET = PlatformDependent0.objectFieldOffset(
|
||||
BaseLinkedQueueProducerNodeRef.class.getDeclaredField("producerNode"));
|
||||
} catch (NoSuchFieldException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
protected LinkedQueueNode<E> producerNode;
|
||||
protected final void spProducerNode(LinkedQueueNode<E> node) {
|
||||
producerNode = node;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected final LinkedQueueNode<E> lvProducerNode() {
|
||||
return (LinkedQueueNode<E>) PlatformDependent0.getObjectVolatile(this, P_NODE_OFFSET);
|
||||
}
|
||||
|
||||
protected final LinkedQueueNode<E> lpProducerNode() {
|
||||
return producerNode;
|
||||
}
|
||||
}
|
||||
|
||||
abstract class BaseLinkedQueuePad1<E> extends BaseLinkedQueueProducerNodeRef<E> {
|
||||
long p01, p02, p03, p04, p05, p06, p07;
|
||||
long p10, p11, p12, p13, p14, p15, p16, p17;
|
||||
}
|
||||
|
||||
abstract class BaseLinkedQueueConsumerNodeRef<E> extends BaseLinkedQueuePad1<E> {
|
||||
protected static final long C_NODE_OFFSET;
|
||||
|
||||
static {
|
||||
try {
|
||||
C_NODE_OFFSET = PlatformDependent0.objectFieldOffset(
|
||||
BaseLinkedQueueConsumerNodeRef.class.getDeclaredField("consumerNode"));
|
||||
} catch (NoSuchFieldException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
protected LinkedQueueNode<E> consumerNode;
|
||||
protected final void spConsumerNode(LinkedQueueNode<E> node) {
|
||||
consumerNode = node;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected final LinkedQueueNode<E> lvConsumerNode() {
|
||||
return (LinkedQueueNode<E>) PlatformDependent0.getObjectVolatile(this, C_NODE_OFFSET);
|
||||
}
|
||||
|
||||
protected final LinkedQueueNode<E> lpConsumerNode() {
|
||||
return consumerNode;
|
||||
}
|
||||
}
|
@ -1,207 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
|
||||
import java.util.AbstractQueue;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*
|
||||
* A concurrent access enabling class used by circular array based queues this class exposes an offset computation
|
||||
* method along with differently memory fenced load/store methods into the underlying array. The class is pre-padded and
|
||||
* the array is padded on either side to help with False sharing prvention. It is expected theat subclasses handle post
|
||||
* padding.
|
||||
* <p>
|
||||
* Offset calculation is separate from access to enable the reuse of a give compute offset.
|
||||
* <p>
|
||||
* Load/Store methods using a <i>buffer</i> parameter are provided to allow the prevention of final field reload after a
|
||||
* LoadLoad barrier.
|
||||
* <p>
|
||||
*
|
||||
* @param <E>
|
||||
*/
|
||||
abstract class ConcurrentCircularArrayQueue<E> extends ConcurrentCircularArrayQueueL0Pad<E> {
|
||||
protected static final int REF_BUFFER_PAD;
|
||||
private static final long REF_ARRAY_BASE;
|
||||
private static final int REF_ELEMENT_SHIFT;
|
||||
static {
|
||||
final int scale = PlatformDependent0.UNSAFE.arrayIndexScale(Object[].class);
|
||||
if (4 == scale) {
|
||||
REF_ELEMENT_SHIFT = 2;
|
||||
} else if (8 == scale) {
|
||||
REF_ELEMENT_SHIFT = 3;
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown pointer size");
|
||||
}
|
||||
// 2 cache lines pad
|
||||
// TODO: replace 64 with the value we can detect
|
||||
REF_BUFFER_PAD = (64 * 2) / scale;
|
||||
// Including the buffer pad in the array base offset
|
||||
REF_ARRAY_BASE = PlatformDependent0.UNSAFE.arrayBaseOffset(Object[].class) + (REF_BUFFER_PAD * scale);
|
||||
}
|
||||
protected final long mask;
|
||||
// @Stable :(
|
||||
protected final E[] buffer;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public ConcurrentCircularArrayQueue(int capacity) {
|
||||
int actualCapacity = roundToPowerOfTwo(capacity);
|
||||
mask = actualCapacity - 1;
|
||||
// pad data on either end with some empty slots.
|
||||
buffer = (E[]) new Object[actualCapacity + REF_BUFFER_PAD * 2];
|
||||
}
|
||||
|
||||
private static int roundToPowerOfTwo(final int value) {
|
||||
return 1 << (32 - Integer.numberOfLeadingZeros(value - 1));
|
||||
}
|
||||
/**
|
||||
* @param index desirable element index
|
||||
* @return the offset in bytes within the array for a given index.
|
||||
*/
|
||||
protected final long calcElementOffset(long index) {
|
||||
return calcElementOffset(index, mask);
|
||||
}
|
||||
/**
|
||||
* @param index desirable element index
|
||||
* @param mask
|
||||
* @return the offset in bytes within the array for a given index.
|
||||
*/
|
||||
protected static final long calcElementOffset(long index, long mask) {
|
||||
return REF_ARRAY_BASE + ((index & mask) << REF_ELEMENT_SHIFT);
|
||||
}
|
||||
/**
|
||||
* A plain store (no ordering/fences) of an element to a given offset
|
||||
*
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @param e a kitty
|
||||
*/
|
||||
protected final void spElement(long offset, E e) {
|
||||
spElement(buffer, offset, e);
|
||||
}
|
||||
|
||||
/**
|
||||
* A plain store (no ordering/fences) of an element to a given offset
|
||||
*
|
||||
* @param buffer this.buffer
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @param e an orderly kitty
|
||||
*/
|
||||
protected static final <E> void spElement(E[] buffer, long offset, E e) {
|
||||
PlatformDependent0.UNSAFE.putObject(buffer, offset, e);
|
||||
}
|
||||
|
||||
/**
|
||||
* An ordered store(store + StoreStore barrier) of an element to a given offset
|
||||
*
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @param e an orderly kitty
|
||||
*/
|
||||
protected final void soElement(long offset, E e) {
|
||||
soElement(buffer, offset, e);
|
||||
}
|
||||
|
||||
/**
|
||||
* An ordered store(store + StoreStore barrier) of an element to a given offset
|
||||
*
|
||||
* @param buffer this.buffer
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @param e an orderly kitty
|
||||
*/
|
||||
protected static final <E> void soElement(E[] buffer, long offset, E e) {
|
||||
PlatformDependent0.UNSAFE.putOrderedObject(buffer, offset, e);
|
||||
}
|
||||
|
||||
/**
|
||||
* A plain load (no ordering/fences) of an element from a given offset.
|
||||
*
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @return the element at the offset
|
||||
*/
|
||||
protected final E lpElement(long offset) {
|
||||
return lpElement(buffer, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* A plain load (no ordering/fences) of an element from a given offset.
|
||||
*
|
||||
* @param buffer this.buffer
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @return the element at the offset
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected static final <E> E lpElement(E[] buffer, long offset) {
|
||||
return (E) PlatformDependent0.UNSAFE.getObject(buffer, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* A volatile load (load + LoadLoad barrier) of an element from a given offset.
|
||||
*
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @return the element at the offset
|
||||
*/
|
||||
protected final E lvElement(long offset) {
|
||||
return lvElement(buffer, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* A volatile load (load + LoadLoad barrier) of an element from a given offset.
|
||||
*
|
||||
* @param buffer this.buffer
|
||||
* @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||
* @return the element at the offset
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected static final <E> E lvElement(E[] buffer, long offset) {
|
||||
return (E) PlatformDependent0.UNSAFE.getObjectVolatile(buffer, offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<E> iterator() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
while (poll() != null || !isEmpty()) {
|
||||
// looping
|
||||
}
|
||||
}
|
||||
|
||||
public int capacity() {
|
||||
return (int) (mask + 1);
|
||||
}
|
||||
}
|
||||
|
||||
abstract class ConcurrentCircularArrayQueueL0Pad<E> extends AbstractQueue<E> {
|
||||
long p00, p01, p02, p03, p04, p05, p06, p07;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
}
|
||||
|
@ -1,71 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*/
|
||||
public final class LinkedQueueAtomicNode<E> extends AtomicReference<LinkedQueueAtomicNode<E>> {
|
||||
/** */
|
||||
private static final long serialVersionUID = 2404266111789071508L;
|
||||
private E value;
|
||||
LinkedQueueAtomicNode() {
|
||||
}
|
||||
LinkedQueueAtomicNode(E val) {
|
||||
spValue(val);
|
||||
}
|
||||
/**
|
||||
* Gets the current value and nulls out the reference to it from this node.
|
||||
*
|
||||
* @return value
|
||||
*/
|
||||
public E getAndNullValue() {
|
||||
E temp = lpValue();
|
||||
spValue(null);
|
||||
return temp;
|
||||
}
|
||||
|
||||
public E lpValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public void spValue(E newValue) {
|
||||
value = newValue;
|
||||
}
|
||||
|
||||
public void soNext(LinkedQueueAtomicNode<E> n) {
|
||||
lazySet(n);
|
||||
}
|
||||
|
||||
public LinkedQueueAtomicNode<E> lvNext() {
|
||||
return get();
|
||||
}
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*/
|
||||
final class LinkedQueueNode<E> {
|
||||
private static final long NEXT_OFFSET;
|
||||
static {
|
||||
try {
|
||||
NEXT_OFFSET = PlatformDependent0.objectFieldOffset(LinkedQueueNode.class.getDeclaredField("next"));
|
||||
} catch (NoSuchFieldException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
private E value;
|
||||
private volatile LinkedQueueNode<E> next;
|
||||
|
||||
LinkedQueueNode() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
LinkedQueueNode(E val) {
|
||||
spValue(val);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current value and nulls out the reference to it from this node.
|
||||
*
|
||||
* @return value
|
||||
*/
|
||||
public E getAndNullValue() {
|
||||
E temp = lpValue();
|
||||
spValue(null);
|
||||
return temp;
|
||||
}
|
||||
|
||||
public E lpValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public void spValue(E newValue) {
|
||||
value = newValue;
|
||||
}
|
||||
|
||||
public void soNext(LinkedQueueNode<E> n) {
|
||||
PlatformDependent0.putOrderedObject(this, NEXT_OFFSET, n);
|
||||
}
|
||||
|
||||
public LinkedQueueNode<E> lvNext() {
|
||||
return next;
|
||||
}
|
||||
}
|
@ -1,297 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.util.Queue;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*
|
||||
* This is a tagging interface for the queues in this library which implement a subset of the {@link Queue}
|
||||
* interface sufficient for concurrent message passing.<br>
|
||||
* Message passing queues provide happens before semantics to messages passed through, namely that writes made
|
||||
* by the producer before offering the message are visible to the consuming thread after the message has been
|
||||
* polled out of the queue.
|
||||
*
|
||||
* @param <T>
|
||||
* the event/message type
|
||||
*/
|
||||
public interface MessagePassingQueue<T> {
|
||||
int UNBOUNDED_CAPACITY = -1;
|
||||
|
||||
interface Supplier<T> {
|
||||
/**
|
||||
* This method will return the next value to be written to the queue. As such the queue
|
||||
* implementations are commited to insert the value once the call is made.
|
||||
* <p>
|
||||
* Users should be aware that underlying queue implementations may upfront claim parts of the queue
|
||||
* for batch operations and this will effect the view on the queue from the supplier method. In
|
||||
* particular size and any offer methods may take the view that the full batch has already happened.
|
||||
*
|
||||
* @return new element, NEVER null
|
||||
*/
|
||||
T get();
|
||||
}
|
||||
|
||||
interface Consumer<T> {
|
||||
/**
|
||||
* This method will process an element already removed from the queue. This method is expected to
|
||||
* never throw an exception.
|
||||
* <p>
|
||||
* Users should be aware that underlying queue implementations may upfront claim parts of the queue
|
||||
* for batch operations and this will effect the view on the queue from the accept method. In
|
||||
* particular size and any poll/peek methods may take the view that the full batch has already
|
||||
* happened.
|
||||
*
|
||||
* @param e not null
|
||||
*/
|
||||
void accept(T e);
|
||||
}
|
||||
|
||||
interface WaitStrategy {
|
||||
/**
|
||||
* This method can implement static or dynamic backoff. Dynamic backoff will rely on the counter for
|
||||
* estimating how long the caller has been idling. The expected usage is:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* int ic = 0;
|
||||
* while(true) {
|
||||
* if(!isGodotArrived()) {
|
||||
* ic = w.idle(ic);
|
||||
* continue;
|
||||
* }
|
||||
* ic = 0;
|
||||
* // party with Godot until he goes again
|
||||
* }
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @param idleCounter idle calls counter, managed by the idle method until reset
|
||||
* @return new counter value to be used on subsequent idle cycle
|
||||
*/
|
||||
int idle(int idleCounter);
|
||||
}
|
||||
|
||||
interface ExitCondition {
|
||||
|
||||
/**
|
||||
* This method should be implemented such that the flag read or determination cannot be hoisted out of
|
||||
* a loop which notmally means a volatile load, but with JDK9 VarHandles may mean getOpaque.
|
||||
*
|
||||
* @return true as long as we should keep running
|
||||
*/
|
||||
boolean keepRunning();
|
||||
}
|
||||
|
||||
/**
|
||||
* Called from a producer thread subject to the restrictions appropriate to the implementation and
|
||||
* according to the {@link Queue#offer(Object)} interface.
|
||||
*
|
||||
* @param e not null, will throw NPE if it is
|
||||
* @return true if element was inserted into the queue, false iff full
|
||||
*/
|
||||
boolean offer(T e);
|
||||
|
||||
/**
|
||||
* Called from the consumer thread subject to the restrictions appropriate to the implementation and
|
||||
* according to the {@link Queue#poll()} interface.
|
||||
*
|
||||
* @return a message from the queue if one is available, null iff empty
|
||||
*/
|
||||
T poll();
|
||||
|
||||
/**
|
||||
* Called from the consumer thread subject to the restrictions appropriate to the implementation and
|
||||
* according to the {@link Queue#peek()} interface.
|
||||
*
|
||||
* @return a message from the queue if one is available, null iff empty
|
||||
*/
|
||||
T peek();
|
||||
|
||||
/**
|
||||
* This method's accuracy is subject to concurrent modifications happening as the size is estimated and as
|
||||
* such is a best effort rather than absolute value. For some implementations this method may be O(n)
|
||||
* rather than O(1).
|
||||
*
|
||||
* @return number of messages in the queue, between 0 and {@link Integer#MAX_VALUE} but less or equals to
|
||||
* capacity (if bounded).
|
||||
*/
|
||||
int size();
|
||||
|
||||
/**
|
||||
* Removes all items from the queue. Called from the consumer thread subject to the restrictions
|
||||
* appropriate to the implementation and according to the {@link Queue#clear()} interface.
|
||||
*/
|
||||
void clear();
|
||||
|
||||
/**
|
||||
* This method's accuracy is subject to concurrent modifications happening as the observation is carried
|
||||
* out.
|
||||
*
|
||||
* @return true if empty, false otherwise
|
||||
*/
|
||||
boolean isEmpty();
|
||||
|
||||
/**
|
||||
* @return the capacity of this queue or UNBOUNDED_CAPACITY if not bounded
|
||||
*/
|
||||
int capacity();
|
||||
|
||||
/**
|
||||
* Called from a producer thread subject to the restrictions appropriate to the implementation. As opposed
|
||||
* to {@link Queue#offer(Object)} this method may return false without the queue being full.
|
||||
*
|
||||
* @param e not null, will throw NPE if it is
|
||||
* @return true if element was inserted into the queue, false if unable to offer
|
||||
*/
|
||||
boolean relaxedOffer(T e);
|
||||
|
||||
/**
|
||||
* Called from the consumer thread subject to the restrictions appropriate to the implementation. As
|
||||
* opposed to {@link Queue#poll()} this method may return null without the queue being empty.
|
||||
*
|
||||
* @return a message from the queue if one is available, null if unable to poll
|
||||
*/
|
||||
T relaxedPoll();
|
||||
|
||||
/**
|
||||
* Called from the consumer thread subject to the restrictions appropriate to the implementation. As
|
||||
* opposed to {@link Queue#peek()} this method may return null without the queue being empty.
|
||||
*
|
||||
* @return a message from the queue if one is available, null if unable to peek
|
||||
*/
|
||||
T relaxedPeek();
|
||||
|
||||
/**
|
||||
* Remove all available item from the queue and hand to consume. This should be semantically similar to:
|
||||
* <pre><code>
|
||||
* M m;
|
||||
* while((m = relaxedPoll()) != null){
|
||||
* c.accept(m);
|
||||
* }
|
||||
* </code></pre>
|
||||
* There's no strong commitment to the queue being empty at the end of a drain. Called from a
|
||||
* consumer thread subject to the restrictions appropriate to the implementation.
|
||||
*
|
||||
* @return the number of polled elements
|
||||
*/
|
||||
int drain(Consumer<T> c);
|
||||
|
||||
/**
|
||||
* Stuff the queue with elements from the supplier. Semantically similar to:
|
||||
* <pre><code>
|
||||
* while(relaxedOffer(s.get());
|
||||
* </code></pre>
|
||||
* There's no strong commitment to the queue being full at the end of a fill. Called from a
|
||||
* producer thread subject to the restrictions appropriate to the implementation.
|
||||
*
|
||||
* @return the number of offered elements
|
||||
*/
|
||||
int fill(Supplier<T> s);
|
||||
|
||||
/**
|
||||
* Remove up to <i>limit</i> elements from the queue and hand to consume. This should be semantically
|
||||
* similar to:
|
||||
*
|
||||
* <pre><code>
|
||||
* M m;
|
||||
* while((m = relaxedPoll()) != null){
|
||||
* c.accept(m);
|
||||
* }
|
||||
* </code></pre>
|
||||
*
|
||||
* There's no strong commitment to the queue being empty at the end of a drain. Called from a consumer
|
||||
* thread subject to the restrictions appropriate to the implementation.
|
||||
*
|
||||
* @return the number of polled elements
|
||||
*/
|
||||
int drain(Consumer<T> c, int limit);
|
||||
|
||||
/**
|
||||
* Stuff the queue with up to <i>limit</i> elements from the supplier. Semantically similar to:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* for(int i=0; i < limit && relaxedOffer(s.get(); i++);
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* There's no strong commitment to the queue being full at the end of a fill. Called from a producer
|
||||
* thread subject to the restrictions appropriate to the implementation.
|
||||
*
|
||||
* @return the number of offered elements
|
||||
*/
|
||||
int fill(Supplier<T> s, int limit);
|
||||
|
||||
/**
|
||||
* Remove elements from the queue and hand to consume forever. Semantically similar to:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* int idleCounter = 0;
|
||||
* while (exit.keepRunning()) {
|
||||
* E e = relaxedPoll();
|
||||
* if(e==null){
|
||||
* idleCounter = wait.idle(idleCounter);
|
||||
* continue;
|
||||
* }
|
||||
* idleCounter = 0;
|
||||
* c.accept(e);
|
||||
* }
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* Called from a consumer thread subject to the restrictions appropriate to the implementation.
|
||||
*
|
||||
*/
|
||||
void drain(Consumer<T> c, WaitStrategy wait, ExitCondition exit);
|
||||
|
||||
/**
|
||||
* Stuff the queue with elements from the supplier forever. Semantically similar to:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* int idleCounter = 0;
|
||||
* while (exit.keepRunning()) {
|
||||
* E e = s.get();
|
||||
* while (!relaxedOffer(e)) {
|
||||
* idleCounter = wait.idle(idleCounter);
|
||||
* continue;
|
||||
* }
|
||||
* idleCounter = 0;
|
||||
* }
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* Called from a producer thread subject to the restrictions appropriate to the implementation.
|
||||
*
|
||||
*/
|
||||
void fill(Supplier<T> s, WaitStrategy wait, ExitCondition exit);
|
||||
}
|
@ -1,331 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*
|
||||
* A Multi-Producer-Single-Consumer queue based on a {@link ConcurrentCircularArrayQueue}. This implies that
|
||||
* any thread may call the offer method, but only a single thread may call poll/peek for correctness to
|
||||
* maintained. <br>
|
||||
* This implementation follows patterns documented on the package level for False Sharing protection.<br>
|
||||
* This implementation is using the <a href="http://sourceforge.net/projects/mc-fastflow/">Fast Flow</a>
|
||||
* method for polling from the queue (with minor change to correctly publish the index) and an extension of
|
||||
* the Leslie Lamport concurrent queue algorithm (originated by Martin Thompson) on the producer side.<br>
|
||||
*
|
||||
* @param <E>
|
||||
*/
|
||||
final class MpscArrayQueue<E> extends MpscArrayQueueConsumerField<E> {
|
||||
long p40, p41, p42, p43, p44, p45, p46;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
|
||||
public MpscArrayQueue(final int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
*
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Lock free offer using a single CAS. As class name suggests access is permitted to many threads
|
||||
* concurrently.
|
||||
*
|
||||
* @see java.util.Queue#offer(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean offer(final E e) {
|
||||
if (null == e) {
|
||||
throw new NullPointerException("Null is not a valid element");
|
||||
}
|
||||
|
||||
// use a cached view on consumer index (potentially updated in loop)
|
||||
final long mask = this.mask;
|
||||
final long capacity = mask + 1;
|
||||
long consumerIndexCache = lvConsumerIndexCache(); // LoadLoad
|
||||
long currentProducerIndex;
|
||||
do {
|
||||
currentProducerIndex = lvProducerIndex(); // LoadLoad
|
||||
final long wrapPoint = currentProducerIndex - capacity;
|
||||
if (consumerIndexCache <= wrapPoint) {
|
||||
final long currHead = lvConsumerIndex(); // LoadLoad
|
||||
if (currHead <= wrapPoint) {
|
||||
return false; // FULL :(
|
||||
} else {
|
||||
// update shared cached value of the consumerIndex
|
||||
svConsumerIndexCache(currHead); // StoreLoad
|
||||
// update on stack copy, we might need this value again if we lose the CAS.
|
||||
consumerIndexCache = currHead;
|
||||
}
|
||||
}
|
||||
} while (!casProducerIndex(currentProducerIndex, currentProducerIndex + 1));
|
||||
/*
|
||||
* NOTE: the new producer index value is made visible BEFORE the element in the array. If we relied on
|
||||
* the index visibility to poll() we would need to handle the case where the element is not visible.
|
||||
*/
|
||||
|
||||
// Won CAS, move on to storing
|
||||
final long offset = calcElementOffset(currentProducerIndex, mask);
|
||||
soElement(offset, e); // StoreStore
|
||||
return true; // AWESOME :)
|
||||
}
|
||||
|
||||
/**
|
||||
* A wait free alternative to offer which fails on CAS failure.
|
||||
*
|
||||
* @param e new element, not null
|
||||
* @return 1 if next element cannot be filled, -1 if CAS failed, 0 if successful
|
||||
*/
|
||||
public int weakOffer(final E e) {
|
||||
if (null == e) {
|
||||
throw new NullPointerException("Null is not a valid element");
|
||||
}
|
||||
final long mask = this.mask;
|
||||
final long capacity = mask + 1;
|
||||
final long currentTail = lvProducerIndex(); // LoadLoad
|
||||
final long consumerIndexCache = lvConsumerIndexCache(); // LoadLoad
|
||||
final long wrapPoint = currentTail - capacity;
|
||||
if (consumerIndexCache <= wrapPoint) {
|
||||
long currHead = lvConsumerIndex(); // LoadLoad
|
||||
if (currHead <= wrapPoint) {
|
||||
return 1; // FULL :(
|
||||
} else {
|
||||
svConsumerIndexCache(currHead); // StoreLoad
|
||||
}
|
||||
}
|
||||
|
||||
// look Ma, no loop!
|
||||
if (!casProducerIndex(currentTail, currentTail + 1)) {
|
||||
return -1; // CAS FAIL :(
|
||||
}
|
||||
|
||||
// Won CAS, move on to storing
|
||||
final long offset = calcElementOffset(currentTail, mask);
|
||||
soElement(offset, e);
|
||||
return 0; // AWESOME :)
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p>
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Lock free poll using ordered loads/stores. As class name suggests access is limited to a single thread.
|
||||
*
|
||||
* @see java.util.Queue#poll()
|
||||
*/
|
||||
@Override
|
||||
public E poll() {
|
||||
final long consumerIndex = lvConsumerIndex(); // LoadLoad
|
||||
final long offset = calcElementOffset(consumerIndex);
|
||||
// Copy field to avoid re-reading after volatile load
|
||||
final E[] buffer = this.buffer;
|
||||
|
||||
// If we can't see the next available element we can't poll
|
||||
E e = lvElement(buffer, offset); // LoadLoad
|
||||
if (null == e) {
|
||||
/*
|
||||
* NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after
|
||||
* winning the CAS on offer but before storing the element in the queue. Other producers may go on
|
||||
* to fill up the queue after this element.
|
||||
*/
|
||||
if (consumerIndex != lvProducerIndex()) {
|
||||
do {
|
||||
e = lvElement(buffer, offset);
|
||||
} while (e == null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
spElement(buffer, offset, null);
|
||||
soConsumerIndex(consumerIndex + 1); // StoreStore
|
||||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p>
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Lock free peek using ordered loads. As class name suggests access is limited to a single thread.
|
||||
*
|
||||
* @see java.util.Queue#poll()
|
||||
*/
|
||||
@Override
|
||||
public E peek() {
|
||||
// Copy field to avoid re-reading after volatile load
|
||||
final E[] buffer = this.buffer;
|
||||
|
||||
final long consumerIndex = lvConsumerIndex(); // LoadLoad
|
||||
final long offset = calcElementOffset(consumerIndex);
|
||||
E e = lvElement(buffer, offset);
|
||||
if (null == e) {
|
||||
/*
|
||||
* NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after
|
||||
* winning the CAS on offer but before storing the element in the queue. Other producers may go on
|
||||
* to fill up the queue after this element.
|
||||
*/
|
||||
if (consumerIndex != lvProducerIndex()) {
|
||||
do {
|
||||
e = lvElement(buffer, offset);
|
||||
} while (e == null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p>
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public int size() {
|
||||
/*
|
||||
* It is possible for a thread to be interrupted or reschedule between the read of the producer and
|
||||
* consumer indices, therefore protection is required to ensure size is within valid range. In the
|
||||
* event of concurrent polls/offers to this method the size is OVER estimated as we read consumer
|
||||
* index BEFORE the producer index.
|
||||
*/
|
||||
long after = lvConsumerIndex();
|
||||
while (true) {
|
||||
final long before = after;
|
||||
final long currentProducerIndex = lvProducerIndex();
|
||||
after = lvConsumerIndex();
|
||||
if (before == after) {
|
||||
return (int) (currentProducerIndex - after);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
// Order matters!
|
||||
// Loading consumer before producer allows for producer increments after consumer index is read.
|
||||
// This ensures the correctness of this method at least for the consumer thread. Other threads POV is
|
||||
// not really
|
||||
// something we can fix here.
|
||||
return lvConsumerIndex() == lvProducerIndex();
|
||||
}
|
||||
}
|
||||
|
||||
abstract class MpscArrayQueueL1Pad<E> extends ConcurrentCircularArrayQueue<E> {
|
||||
long p10, p11, p12, p13, p14, p15, p16;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
|
||||
public MpscArrayQueueL1Pad(int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
}
|
||||
|
||||
abstract class MpscArrayQueueTailField<E> extends MpscArrayQueueL1Pad<E> {
|
||||
private static final long P_INDEX_OFFSET;
|
||||
|
||||
static {
|
||||
try {
|
||||
P_INDEX_OFFSET = PlatformDependent0.UNSAFE.objectFieldOffset(MpscArrayQueueTailField.class
|
||||
.getDeclaredField("producerIndex"));
|
||||
} catch (NoSuchFieldException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
private volatile long producerIndex;
|
||||
|
||||
public MpscArrayQueueTailField(int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
|
||||
protected final long lvProducerIndex() {
|
||||
return producerIndex;
|
||||
}
|
||||
|
||||
protected final boolean casProducerIndex(long expect, long newValue) {
|
||||
return PlatformDependent0.UNSAFE.compareAndSwapLong(this, P_INDEX_OFFSET, expect, newValue);
|
||||
}
|
||||
}
|
||||
|
||||
abstract class MpscArrayQueueMidPad<E> extends MpscArrayQueueTailField<E> {
|
||||
long p20, p21, p22, p23, p24, p25, p26;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
|
||||
public MpscArrayQueueMidPad(int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
}
|
||||
|
||||
abstract class MpscArrayQueueHeadCacheField<E> extends MpscArrayQueueMidPad<E> {
|
||||
private volatile long headCache;
|
||||
|
||||
public MpscArrayQueueHeadCacheField(int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
|
||||
protected final long lvConsumerIndexCache() {
|
||||
return headCache;
|
||||
}
|
||||
|
||||
protected final void svConsumerIndexCache(long v) {
|
||||
headCache = v;
|
||||
}
|
||||
}
|
||||
|
||||
abstract class MpscArrayQueueL2Pad<E> extends MpscArrayQueueHeadCacheField<E> {
|
||||
long p20, p21, p22, p23, p24, p25, p26;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
|
||||
public MpscArrayQueueL2Pad(int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
}
|
||||
|
||||
abstract class MpscArrayQueueConsumerField<E> extends MpscArrayQueueL2Pad<E> {
|
||||
private static final long C_INDEX_OFFSET;
|
||||
static {
|
||||
try {
|
||||
C_INDEX_OFFSET = PlatformDependent0.UNSAFE.objectFieldOffset(MpscArrayQueueConsumerField.class
|
||||
.getDeclaredField("consumerIndex"));
|
||||
} catch (NoSuchFieldException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
private volatile long consumerIndex;
|
||||
|
||||
public MpscArrayQueueConsumerField(int capacity) {
|
||||
super(capacity);
|
||||
}
|
||||
|
||||
protected final long lvConsumerIndex() {
|
||||
return consumerIndex;
|
||||
}
|
||||
|
||||
protected void soConsumerIndex(long l) {
|
||||
PlatformDependent0.UNSAFE.putOrderedLong(this, C_INDEX_OFFSET, l);
|
||||
}
|
||||
}
|
||||
|
@ -1,378 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/**
|
||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Queue;
|
||||
|
||||
/**
|
||||
* A lock-free concurrent single-consumer multi-producer {@link Queue}.
|
||||
* It allows multiple producer threads to perform the following operations simultaneously:
|
||||
* <ul>
|
||||
* <li>{@link #offer(Object)}, {@link #add(Object)}, {@link #addAll(Collection)}</li>
|
||||
* <li>{@link #isEmpty()}</li>
|
||||
* </ul>
|
||||
* .. while only one consumer thread is allowed to perform the following operations exclusively:
|
||||
* <ul>
|
||||
* <li>{@link #poll()} and {@link #remove()}</li>
|
||||
* <li>{@link #element()}, {@link #peek()}</li>
|
||||
* <li>{@link #remove(Object)}, {@link #removeAll(Collection)}, and {@link #retainAll(Collection)}</li>
|
||||
* <li>{@link #clear()}</li>
|
||||
* <li>{@link #iterator()}</li>
|
||||
* <li>{@link #toArray()} and {@link #toArray(Object[])}</li>
|
||||
* <li>{@link #contains(Object)} and {@link #containsAll(Collection)}</li>
|
||||
* <li>{@link #size()}</li>
|
||||
* </ul>
|
||||
*
|
||||
* <strong>The behavior of this implementation is undefined if you perform the operations for a consumer thread only
|
||||
* from multiple threads.</strong>
|
||||
*
|
||||
* The initial implementation is based on:
|
||||
* <ul>
|
||||
* <li><a href="http://netty.io/s/mpsc-1024c">Non-intrusive MPSC node based queue</a> from 1024cores.net</li>
|
||||
* <li><a href="http://netty.io/s/mpsc-akka">AbstractNodeQueue</a> from Akka</li>
|
||||
* </ul>
|
||||
* and adopted padded head node changes from:
|
||||
* <ul>
|
||||
* <li><a href="http://netty.io/s/mpsc-rxjava">MpscPaddedQueue</a> from RxJava</li>
|
||||
* </ul>
|
||||
* data structure modified to avoid false sharing between head and tail Ref as per implementation of MpscLinkedQueue
|
||||
* on <a href="https://github.com/JCTools/JCTools">JCTools project</a>.
|
||||
*/
|
||||
final class MpscLinkedQueue<E> extends MpscLinkedQueueTailRef<E> implements Queue<E> {
|
||||
|
||||
private static final long serialVersionUID = -1878402552271506449L;
|
||||
|
||||
long p00, p01, p02, p03, p04, p05, p06, p07;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
|
||||
// offer() occurs at the tail of the linked list.
|
||||
// poll() occurs at the head of the linked list.
|
||||
//
|
||||
// Resulting layout is:
|
||||
//
|
||||
// head --next--> 1st element --next--> 2nd element --next--> ... tail (last element)
|
||||
//
|
||||
// where the head is a dummy node whose value is null.
|
||||
//
|
||||
// offer() appends a new node next to the tail using AtomicReference.getAndSet()
|
||||
// poll() removes head from the linked list and promotes the 1st element to the head,
|
||||
// setting its value to null if possible.
|
||||
//
|
||||
// Also note that this class extends AtomicReference for the "tail" slot (which is the one that is appended to)
|
||||
// since Unsafe does not expose XCHG operation intrinsically.
|
||||
MpscLinkedQueue() {
|
||||
MpscLinkedQueueNode<E> tombstone = new DefaultNode<E>(null);
|
||||
setHeadRef(tombstone);
|
||||
setTailRef(tombstone);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the node right next to the head, which contains the first element of this queue.
|
||||
*/
|
||||
private MpscLinkedQueueNode<E> peekNode() {
|
||||
MpscLinkedQueueNode<E> head = headRef();
|
||||
MpscLinkedQueueNode<E> next = head.next();
|
||||
if (next == null && head != tailRef()) {
|
||||
// if tail != head this is not going to change until consumer makes progress
|
||||
// we can avoid reading the head and just spin on next until it shows up
|
||||
//
|
||||
// See https://github.com/akka/akka/pull/15596
|
||||
do {
|
||||
next = head.next();
|
||||
} while (next == null);
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public boolean offer(E value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException("value");
|
||||
}
|
||||
|
||||
final MpscLinkedQueueNode<E> newTail;
|
||||
if (value instanceof MpscLinkedQueueNode) {
|
||||
newTail = (MpscLinkedQueueNode<E>) value;
|
||||
newTail.setNext(null);
|
||||
} else {
|
||||
newTail = new DefaultNode<E>(value);
|
||||
}
|
||||
|
||||
MpscLinkedQueueNode<E> oldTail = getAndSetTailRef(newTail);
|
||||
oldTail.setNext(newTail);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public E poll() {
|
||||
final MpscLinkedQueueNode<E> next = peekNode();
|
||||
if (next == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// next becomes a new head.
|
||||
MpscLinkedQueueNode<E> oldHead = headRef();
|
||||
// Similar to 'headRef.node = next', but slightly faster (storestore vs loadstore)
|
||||
// See: http://robsjava.blogspot.com/2013/06/a-faster-volatile.html
|
||||
// See: http://psy-lob-saw.blogspot.com/2012/12/atomiclazyset-is-performance-win-for.html
|
||||
lazySetHeadRef(next);
|
||||
|
||||
// Break the linkage between the old head and the new head.
|
||||
oldHead.unlink();
|
||||
|
||||
return next.clearMaybe();
|
||||
}
|
||||
|
||||
@Override
|
||||
public E peek() {
|
||||
final MpscLinkedQueueNode<E> next = peekNode();
|
||||
if (next == null) {
|
||||
return null;
|
||||
}
|
||||
return next.value();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
int count = 0;
|
||||
MpscLinkedQueueNode<E> n = peekNode();
|
||||
for (;;) {
|
||||
// If value == null it means that clearMaybe() was called on the MpscLinkedQueueNode.
|
||||
if (n == null || n.value() == null) {
|
||||
break;
|
||||
}
|
||||
MpscLinkedQueueNode<E> next = n.next();
|
||||
if (n == next) {
|
||||
break;
|
||||
}
|
||||
n = next;
|
||||
if (++ count == Integer.MAX_VALUE) {
|
||||
// Guard against overflow of integer.
|
||||
break;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return headRef() == tailRef();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean contains(Object o) {
|
||||
MpscLinkedQueueNode<E> n = peekNode();
|
||||
for (;;) {
|
||||
if (n == null) {
|
||||
break;
|
||||
}
|
||||
E value = n.value();
|
||||
// If value == null it means that clearMaybe() was called on the MpscLinkedQueueNode.
|
||||
if (value == null) {
|
||||
return false;
|
||||
}
|
||||
if (value == o) {
|
||||
return true;
|
||||
}
|
||||
MpscLinkedQueueNode<E> next = n.next();
|
||||
if (n == next) {
|
||||
break;
|
||||
}
|
||||
n = next;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<E> iterator() {
|
||||
return new ReadOnlyIterator<E>(toList().iterator());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean add(E e) {
|
||||
if (offer(e)) {
|
||||
return true;
|
||||
}
|
||||
throw new IllegalStateException("queue full");
|
||||
}
|
||||
|
||||
@Override
|
||||
public E remove() {
|
||||
E e = poll();
|
||||
if (e != null) {
|
||||
return e;
|
||||
}
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public E element() {
|
||||
E e = peek();
|
||||
if (e != null) {
|
||||
return e;
|
||||
}
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
private List<E> toList(int initialCapacity) {
|
||||
return toList(new ArrayList<E>(initialCapacity));
|
||||
}
|
||||
|
||||
private List<E> toList() {
|
||||
return toList(new ArrayList<E>());
|
||||
}
|
||||
|
||||
private List<E> toList(List<E> elements) {
|
||||
MpscLinkedQueueNode<E> n = peekNode();
|
||||
for (;;) {
|
||||
if (n == null) {
|
||||
break;
|
||||
}
|
||||
E value = n.value();
|
||||
if (value == null) {
|
||||
break;
|
||||
}
|
||||
if (!elements.add(value)) {
|
||||
// Seems like there is no space left, break here.
|
||||
break;
|
||||
}
|
||||
MpscLinkedQueueNode<E> next = n.next();
|
||||
if (n == next) {
|
||||
break;
|
||||
}
|
||||
n = next;
|
||||
}
|
||||
return elements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object[] toArray() {
|
||||
return toList().toArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T> T[] toArray(T[] a) {
|
||||
return toList(a.length).toArray(a);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean remove(Object o) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsAll(Collection<?> c) {
|
||||
for (Object e: c) {
|
||||
if (!contains(e)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addAll(Collection<? extends E> c) {
|
||||
if (c == null) {
|
||||
throw new NullPointerException("c");
|
||||
}
|
||||
if (c == this) {
|
||||
throw new IllegalArgumentException("c == this");
|
||||
}
|
||||
|
||||
boolean modified = false;
|
||||
for (E e: c) {
|
||||
add(e);
|
||||
modified = true;
|
||||
}
|
||||
return modified;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean removeAll(Collection<?> c) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean retainAll(Collection<?> c) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
while (poll() != null) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
private void writeObject(ObjectOutputStream out) throws IOException {
|
||||
out.defaultWriteObject();
|
||||
for (E e: this) {
|
||||
out.writeObject(e);
|
||||
}
|
||||
out.writeObject(null);
|
||||
}
|
||||
|
||||
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
|
||||
in.defaultReadObject();
|
||||
|
||||
final MpscLinkedQueueNode<E> tombstone = new DefaultNode<E>(null);
|
||||
setHeadRef(tombstone);
|
||||
setTailRef(tombstone);
|
||||
|
||||
for (;;) {
|
||||
@SuppressWarnings("unchecked")
|
||||
E e = (E) in.readObject();
|
||||
if (e == null) {
|
||||
break;
|
||||
}
|
||||
add(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class DefaultNode<T> extends MpscLinkedQueueNode<T> {
|
||||
|
||||
private T value;
|
||||
|
||||
DefaultNode(T value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected T clearMaybe() {
|
||||
T value = this.value;
|
||||
this.value = null;
|
||||
return value;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
|
||||
|
||||
|
||||
abstract class MpscLinkedQueueHeadRef<E> extends MpscLinkedQueuePad0<E> implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 8467054865577874285L;
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static final AtomicReferenceFieldUpdater<MpscLinkedQueueHeadRef, MpscLinkedQueueNode> UPDATER;
|
||||
|
||||
static {
|
||||
@SuppressWarnings("rawtypes")
|
||||
AtomicReferenceFieldUpdater<MpscLinkedQueueHeadRef, MpscLinkedQueueNode> updater;
|
||||
updater = PlatformDependent.newAtomicReferenceFieldUpdater(MpscLinkedQueueHeadRef.class, "headRef");
|
||||
if (updater == null) {
|
||||
updater = AtomicReferenceFieldUpdater.newUpdater(
|
||||
MpscLinkedQueueHeadRef.class, MpscLinkedQueueNode.class, "headRef");
|
||||
}
|
||||
UPDATER = updater;
|
||||
}
|
||||
|
||||
private transient volatile MpscLinkedQueueNode<E> headRef;
|
||||
|
||||
protected final MpscLinkedQueueNode<E> headRef() {
|
||||
return headRef;
|
||||
}
|
||||
|
||||
protected final void setHeadRef(MpscLinkedQueueNode<E> headRef) {
|
||||
this.headRef = headRef;
|
||||
}
|
||||
|
||||
protected final void lazySetHeadRef(MpscLinkedQueueNode<E> headRef) {
|
||||
UPDATER.lazySet(this, headRef);
|
||||
}
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
|
||||
|
||||
public abstract class MpscLinkedQueueNode<T> {
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static final AtomicReferenceFieldUpdater<MpscLinkedQueueNode, MpscLinkedQueueNode> nextUpdater;
|
||||
|
||||
static {
|
||||
@SuppressWarnings("rawtypes")
|
||||
AtomicReferenceFieldUpdater<MpscLinkedQueueNode, MpscLinkedQueueNode> u;
|
||||
|
||||
u = PlatformDependent.newAtomicReferenceFieldUpdater(MpscLinkedQueueNode.class, "next");
|
||||
if (u == null) {
|
||||
u = AtomicReferenceFieldUpdater.newUpdater(MpscLinkedQueueNode.class, MpscLinkedQueueNode.class, "next");
|
||||
}
|
||||
nextUpdater = u;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private volatile MpscLinkedQueueNode<T> next;
|
||||
|
||||
final MpscLinkedQueueNode<T> next() {
|
||||
return next;
|
||||
}
|
||||
|
||||
final void setNext(final MpscLinkedQueueNode<T> newNext) {
|
||||
// Similar to 'next = newNext', but slightly faster (storestore vs loadstore)
|
||||
// See: http://robsjava.blogspot.com/2013/06/a-faster-volatile.html
|
||||
nextUpdater.lazySet(this, newNext);
|
||||
}
|
||||
|
||||
public abstract T value();
|
||||
|
||||
/**
|
||||
* Sets the element this node contains to {@code null} so that the node can be used as a tombstone.
|
||||
*/
|
||||
protected T clearMaybe() {
|
||||
return value();
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlink to allow GC'ed
|
||||
*/
|
||||
void unlink() {
|
||||
setNext(null);
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.netty.util.internal;
|
||||
|
||||
abstract class MpscLinkedQueuePad0<E> {
|
||||
long p00, p01, p02, p03, p04, p05, p06, p07;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.netty.util.internal;
|
||||
|
||||
abstract class MpscLinkedQueuePad1<E> extends MpscLinkedQueueHeadRef<E> {
|
||||
|
||||
private static final long serialVersionUID = 2886694927079691637L;
|
||||
|
||||
long p00, p01, p02, p03, p04, p05, p06, p07;
|
||||
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.netty.util.internal;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
|
||||
|
||||
abstract class MpscLinkedQueueTailRef<E> extends MpscLinkedQueuePad1<E> {
|
||||
|
||||
private static final long serialVersionUID = 8717072462993327429L;
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static final AtomicReferenceFieldUpdater<MpscLinkedQueueTailRef, MpscLinkedQueueNode> UPDATER;
|
||||
|
||||
static {
|
||||
@SuppressWarnings("rawtypes")
|
||||
AtomicReferenceFieldUpdater<MpscLinkedQueueTailRef, MpscLinkedQueueNode> updater;
|
||||
updater = PlatformDependent.newAtomicReferenceFieldUpdater(MpscLinkedQueueTailRef.class, "tailRef");
|
||||
if (updater == null) {
|
||||
updater = AtomicReferenceFieldUpdater.newUpdater(
|
||||
MpscLinkedQueueTailRef.class, MpscLinkedQueueNode.class, "tailRef");
|
||||
}
|
||||
UPDATER = updater;
|
||||
}
|
||||
|
||||
private transient volatile MpscLinkedQueueNode<E> tailRef;
|
||||
|
||||
protected final MpscLinkedQueueNode<E> tailRef() {
|
||||
return tailRef;
|
||||
}
|
||||
|
||||
protected final void setTailRef(MpscLinkedQueueNode<E> tailRef) {
|
||||
this.tailRef = tailRef;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected final MpscLinkedQueueNode<E> getAndSetTailRef(MpscLinkedQueueNode<E> tailRef) {
|
||||
// LOCK XCHG in JDK8, a CAS loop in JDK 7/6
|
||||
return (MpscLinkedQueueNode<E>) UPDATER.getAndSet(this, tailRef);
|
||||
}
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
|
||||
/**
|
||||
* {@link Runnable} which represent a one time task which may allow the {@link EventExecutor} to reduce the amount of
|
||||
* produced garbage when queue it for execution.
|
||||
*
|
||||
* <strong>It is important this will not be reused. After submitted it is not allowed to get submitted again!</strong>
|
||||
*/
|
||||
public abstract class OneTimeTask extends MpscLinkedQueueNode<Runnable> implements Runnable {
|
||||
|
||||
@Override
|
||||
public Runnable value() {
|
||||
return this;
|
||||
}
|
||||
}
|
@ -20,6 +20,12 @@ import io.netty.util.internal.chmv8.ConcurrentHashMapV8;
|
||||
import io.netty.util.internal.chmv8.LongAdderV8;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
import org.jctools.queues.MpscArrayQueue;
|
||||
import org.jctools.queues.MpscChunkedArrayQueue;
|
||||
import org.jctools.queues.SpscLinkedQueue;
|
||||
import org.jctools.queues.atomic.MpscAtomicArrayQueue;
|
||||
import org.jctools.queues.atomic.MpscLinkedAtomicQueue;
|
||||
import org.jctools.queues.atomic.SpscLinkedAtomicQueue;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
@ -41,7 +47,6 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.LinkedBlockingDeque;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
|
||||
@ -82,6 +87,7 @@ public final class PlatformDependent {
|
||||
private static final boolean DIRECT_BUFFER_PREFERRED =
|
||||
HAS_UNSAFE && !SystemPropertyUtil.getBoolean("io.netty.noPreferDirect", false);
|
||||
private static final long MAX_DIRECT_MEMORY = maxDirectMemory0();
|
||||
private static final int MAX_MPSC_CAPACITY = 1024 * 1024; // TODO: Maybe make this configurable ?
|
||||
|
||||
private static final long BYTE_ARRAY_BASE_OFFSET = PlatformDependent0.byteArrayBaseOffset();
|
||||
|
||||
@ -721,7 +727,8 @@ public final class PlatformDependent {
|
||||
* consumer (one thread!).
|
||||
*/
|
||||
public static <T> Queue<T> newMpscQueue() {
|
||||
return new MpscLinkedQueue<T>();
|
||||
return hasUnsafe() ? new MpscChunkedArrayQueue<T>(1024, MAX_MPSC_CAPACITY, true)
|
||||
: new MpscLinkedAtomicQueue<T>();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -729,10 +736,7 @@ public final class PlatformDependent {
|
||||
* consumer (one thread!).
|
||||
*/
|
||||
public static <T> Queue<T> newSpscQueue() {
|
||||
if (hasUnsafe()) {
|
||||
return new SpscLinkedQueue<T>();
|
||||
}
|
||||
return new SpscLinkedAtomicQueue<T>();
|
||||
return hasUnsafe() ? new SpscLinkedQueue<T>() : new SpscLinkedAtomicQueue<T>();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -740,11 +744,7 @@ public final class PlatformDependent {
|
||||
* consumer (one thread!) with the given fixes {@code capacity}.
|
||||
*/
|
||||
public static <T> Queue<T> newFixedMpscQueue(int capacity) {
|
||||
if (hasUnsafe()) {
|
||||
return new MpscArrayQueue<T>(capacity);
|
||||
} else {
|
||||
return new LinkedBlockingQueue<T>(capacity);
|
||||
}
|
||||
return hasUnsafe() ? new MpscArrayQueue<T>(capacity) : new MpscAtomicArrayQueue<T>(capacity);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,42 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.netty.util.internal;
|
||||
|
||||
import io.netty.util.Recycler;
|
||||
|
||||
/**
|
||||
* {@link MpscLinkedQueueNode} that will automatically call {@link Recycler.Handle#recycle(Object)} when the node was
|
||||
* unlinked.
|
||||
*/
|
||||
public abstract class RecyclableMpscLinkedQueueNode<T> extends MpscLinkedQueueNode<T> {
|
||||
@SuppressWarnings("rawtypes")
|
||||
private final Recycler.Handle handle;
|
||||
|
||||
protected RecyclableMpscLinkedQueueNode(Recycler.Handle<? extends RecyclableMpscLinkedQueueNode<T>> handle) {
|
||||
if (handle == null) {
|
||||
throw new NullPointerException("handle");
|
||||
}
|
||||
this.handle = handle;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
final void unlink() {
|
||||
super.unlink();
|
||||
handle.recycle(this);
|
||||
}
|
||||
}
|
@ -1,118 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*
|
||||
* This is a weakened version of the MPSC algorithm as presented <a
|
||||
* href="http://www.1024cores.net/home/lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue"> on 1024
|
||||
* Cores</a> by D. Vyukov. The original has been adapted to Java and it's quirks with regards to memory model and
|
||||
* layout:
|
||||
* <ol>
|
||||
* <li>As this is an SPSC we have no need for XCHG, an ordered store is enough.
|
||||
* </ol>
|
||||
* The queue is initialized with a stub node which is set to both the producer and consumer node references. From this
|
||||
* point follow the notes on offer/poll.
|
||||
*
|
||||
* @param <E>
|
||||
*/
|
||||
public final class SpscLinkedAtomicQueue<E> extends BaseLinkedAtomicQueue<E> {
|
||||
|
||||
public SpscLinkedAtomicQueue() {
|
||||
super();
|
||||
LinkedQueueAtomicNode<E> node = new LinkedQueueAtomicNode<E>();
|
||||
spProducerNode(node);
|
||||
spConsumerNode(node);
|
||||
node.soNext(null); // this ensures correct construction: StoreStore
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
*
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Offer is allowed from a SINGLE thread.<br>
|
||||
* Offer allocates a new node (holding the offered value) and:
|
||||
* <ol>
|
||||
* <li>Sets that node as the producerNode.next
|
||||
* <li>Sets the new node as the producerNode
|
||||
* </ol>
|
||||
* From this follows that producerNode.next is always null and for all other nodes node.next is not null.
|
||||
*
|
||||
* @see MessagePassingQueue#offer(Object)
|
||||
* @see java.util.Queue#offer(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean offer(final E nextValue) {
|
||||
if (nextValue == null) {
|
||||
throw new IllegalArgumentException("null elements not allowed");
|
||||
}
|
||||
final LinkedQueueAtomicNode<E> nextNode = new LinkedQueueAtomicNode<E>(nextValue);
|
||||
lpProducerNode().soNext(nextNode);
|
||||
spProducerNode(nextNode);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
*
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Poll is allowed from a SINGLE thread.<br>
|
||||
* Poll reads the next node from the consumerNode and:
|
||||
* <ol>
|
||||
* <li>If it is null, the queue is empty.
|
||||
* <li>If it is not null set it as the consumer node and return it's now evacuated value.
|
||||
* </ol>
|
||||
* This means the consumerNode.value is always null, which is also the starting point for the queue. Because null
|
||||
* values are not allowed to be offered this is the only node with it's value set to null at any one time.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public E poll() {
|
||||
final LinkedQueueAtomicNode<E> nextNode = lpConsumerNode().lvNext();
|
||||
if (nextNode != null) {
|
||||
// we have to null out the value because we are going to hang on to the node
|
||||
final E nextValue = nextNode.getAndNullValue();
|
||||
spConsumerNode(nextNode);
|
||||
return nextValue;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public E peek() {
|
||||
final LinkedQueueAtomicNode<E> nextNode = lpConsumerNode().lvNext();
|
||||
if (nextNode != null) {
|
||||
return nextNode.lpValue();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,215 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 The Netty Project
|
||||
*
|
||||
* The Netty Project licenses this file to you under the Apache License,
|
||||
* version 2.0 (the "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at:
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.netty.util.internal;
|
||||
|
||||
/**
|
||||
* Forked from <a href="https://github.com/JCTools/JCTools">JCTools</a>.
|
||||
*
|
||||
* This is a weakened version of the MPSC algorithm as presented
|
||||
* <a href="http://www.1024cores.net/home/lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue"> on
|
||||
* 1024 Cores</a> by D. Vyukov. The original has been adapted to Java and it's quirks with regards to memory
|
||||
* model and layout:
|
||||
* <ol>
|
||||
* <li>Use inheritance to ensure no false sharing occurs between producer/consumer node reference fields.
|
||||
* <li>As this is an SPSC we have no need for XCHG, an ordered store is enough.
|
||||
* </ol>
|
||||
* The queue is initialized with a stub node which is set to both the producer and consumer node references.
|
||||
* From this point follow the notes on offer/poll.
|
||||
*
|
||||
* @param <E>
|
||||
*/
|
||||
public class SpscLinkedQueue<E> extends BaseLinkedQueue<E> {
|
||||
|
||||
public SpscLinkedQueue() {
|
||||
spProducerNode(new LinkedQueueNode<E>());
|
||||
spConsumerNode(producerNode);
|
||||
consumerNode.soNext(null); // this ensures correct construction: StoreStore
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
*
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Offer is allowed from a SINGLE thread.<br>
|
||||
* Offer allocates a new node (holding the offered value) and:
|
||||
* <ol>
|
||||
* <li>Sets that node as the producerNode.next
|
||||
* <li>Sets the new node as the producerNode
|
||||
* </ol>
|
||||
* From this follows that producerNode.next is always null and for all other nodes node.next is not null.
|
||||
*
|
||||
* @see MessagePassingQueue#offer(Object)
|
||||
* @see java.util.Queue#offer(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean offer(final E nextValue) {
|
||||
if (nextValue == null) {
|
||||
throw new IllegalArgumentException("null elements not allowed");
|
||||
}
|
||||
final LinkedQueueNode<E> nextNode = new LinkedQueueNode<E>(nextValue);
|
||||
producerNode.soNext(nextNode);
|
||||
producerNode = nextNode;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc} <br>
|
||||
*
|
||||
* IMPLEMENTATION NOTES:<br>
|
||||
* Poll is allowed from a SINGLE thread.<br>
|
||||
* Poll reads the next node from the consumerNode and:
|
||||
* <ol>
|
||||
* <li>If it is null, the queue is empty.
|
||||
* <li>If it is not null set it as the consumer node and return it's now evacuated value.
|
||||
* </ol>
|
||||
* This means the consumerNode.value is always null, which is also the starting point for the queue.
|
||||
* Because null values are not allowed to be offered this is the only node with it's value set to null at
|
||||
* any one time.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public E poll() {
|
||||
final LinkedQueueNode<E> nextNode = consumerNode.lvNext();
|
||||
if (nextNode != null) {
|
||||
// we have to null out the value because we are going to hang on to the node
|
||||
final E nextValue = nextNode.getAndNullValue();
|
||||
consumerNode = nextNode;
|
||||
return nextValue;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public E peek() {
|
||||
final LinkedQueueNode<E> nextNode = consumerNode.lvNext();
|
||||
if (nextNode != null) {
|
||||
return nextNode.lpValue();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean relaxedOffer(E e) {
|
||||
return offer(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public E relaxedPoll() {
|
||||
return poll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public E relaxedPeek() {
|
||||
return peek();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int drain(Consumer<E> c) {
|
||||
long result = 0; // use long to force safepoint into loop below
|
||||
int drained;
|
||||
do {
|
||||
drained = drain(c, 4096);
|
||||
result += drained;
|
||||
} while (drained == 4096 && result <= Integer.MAX_VALUE - 4096);
|
||||
return (int) result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int fill(Supplier<E> s) {
|
||||
long result = 0; // result is a long because we want to have a safepoint check at regular intervals
|
||||
do {
|
||||
fill(s, 4096);
|
||||
result += 4096;
|
||||
} while (result <= Integer.MAX_VALUE - 4096);
|
||||
return (int) result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int drain(Consumer<E> c, int limit) {
|
||||
LinkedQueueNode<E> chaserNode = this.consumerNode;
|
||||
for (int i = 0; i < limit; i++) {
|
||||
chaserNode = chaserNode.lvNext();
|
||||
if (chaserNode == null) {
|
||||
return i;
|
||||
}
|
||||
// we have to null out the value because we are going to hang on to the node
|
||||
final E nextValue = chaserNode.getAndNullValue();
|
||||
this.consumerNode = chaserNode;
|
||||
c.accept(nextValue);
|
||||
}
|
||||
return limit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int fill(Supplier<E> s, int limit) {
|
||||
LinkedQueueNode<E> chaserNode = producerNode;
|
||||
for (int i = 0; i < limit; i++) {
|
||||
final LinkedQueueNode<E> nextNode = new LinkedQueueNode<E>(s.get());
|
||||
chaserNode.soNext(nextNode);
|
||||
chaserNode = nextNode;
|
||||
this.producerNode = chaserNode;
|
||||
}
|
||||
return limit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void drain(Consumer<E> c, WaitStrategy wait, ExitCondition exit) {
|
||||
LinkedQueueNode<E> chaserNode = this.consumerNode;
|
||||
int idleCounter = 0;
|
||||
while (exit.keepRunning()) {
|
||||
for (int i = 0; i < 4096; i++) {
|
||||
final LinkedQueueNode<E> next = chaserNode.lvNext();
|
||||
if (next == null) {
|
||||
idleCounter = wait.idle(idleCounter);
|
||||
continue;
|
||||
}
|
||||
chaserNode = next;
|
||||
idleCounter = 0;
|
||||
// we have to null out the value because we are going to hang on to the node
|
||||
final E nextValue = chaserNode.getAndNullValue();
|
||||
this.consumerNode = chaserNode;
|
||||
c.accept(nextValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void fill(Supplier<E> s, WaitStrategy wait, ExitCondition exit) {
|
||||
LinkedQueueNode<E> chaserNode = producerNode;
|
||||
while (exit.keepRunning()) {
|
||||
for (int i = 0; i < 4096; i++) {
|
||||
final LinkedQueueNode<E> nextNode = new LinkedQueueNode<E>(s.get());
|
||||
chaserNode.soNext(nextNode);
|
||||
chaserNode = nextNode;
|
||||
this.producerNode = chaserNode;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -28,7 +28,6 @@ import io.netty.util.concurrent.DefaultPromise;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.ScheduledFuture;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -195,7 +194,7 @@ public abstract class ProxyHandler extends ChannelDuplexHandler {
|
||||
private void sendInitialMessage(final ChannelHandlerContext ctx) throws Exception {
|
||||
final long connectTimeoutMillis = this.connectTimeoutMillis;
|
||||
if (connectTimeoutMillis > 0) {
|
||||
connectTimeoutFuture = ctx.executor().schedule(new OneTimeTask() {
|
||||
connectTimeoutFuture = ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (!connectPromise.isDone()) {
|
||||
|
@ -41,7 +41,6 @@ import io.netty.util.concurrent.FutureListener;
|
||||
import io.netty.util.concurrent.ImmediateExecutor;
|
||||
import io.netty.util.concurrent.Promise;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
@ -389,7 +388,7 @@ public class SslHandler extends ByteToMessageDecoder implements ChannelOutboundH
|
||||
*/
|
||||
public ChannelFuture close(final ChannelPromise future) {
|
||||
final ChannelHandlerContext ctx = this.ctx;
|
||||
ctx.executor().execute(new OneTimeTask() {
|
||||
ctx.executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
outboundClosed = true;
|
||||
@ -1139,7 +1138,7 @@ public class SslHandler extends ByteToMessageDecoder implements ChannelOutboundH
|
||||
}
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
delegatedTaskExecutor.execute(new OneTimeTask() {
|
||||
delegatedTaskExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
@ -1304,7 +1303,7 @@ public class SslHandler extends ByteToMessageDecoder implements ChannelOutboundH
|
||||
|
||||
EventExecutor executor = ctx.executor();
|
||||
if (!executor.inEventLoop()) {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
handshake(promise);
|
||||
@ -1371,7 +1370,7 @@ public class SslHandler extends ByteToMessageDecoder implements ChannelOutboundH
|
||||
return;
|
||||
}
|
||||
|
||||
final ScheduledFuture<?> timeoutFuture = ctx.executor().schedule(new OneTimeTask() {
|
||||
final ScheduledFuture<?> timeoutFuture = ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (p.isDone()) {
|
||||
@ -1413,7 +1412,7 @@ public class SslHandler extends ByteToMessageDecoder implements ChannelOutboundH
|
||||
final ScheduledFuture<?> timeoutFuture;
|
||||
if (closeNotifyTimeoutMillis > 0) {
|
||||
// Force-close the connection if close_notify is not fully sent in time.
|
||||
timeoutFuture = ctx.executor().schedule(new OneTimeTask() {
|
||||
timeoutFuture = ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
logger.warn("{} Last write attempt timed out; force-closing the connection.", ctx.channel());
|
||||
|
@ -27,7 +27,6 @@ import io.netty.channel.ChannelPipeline;
|
||||
import io.netty.channel.ChannelProgressivePromise;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -112,7 +111,7 @@ public class ChunkedWriteHandler extends ChannelDuplexHandler {
|
||||
}
|
||||
} else {
|
||||
// let the transfer resume on the next event loop round
|
||||
ctx.executor().execute(new OneTimeTask() {
|
||||
ctx.executor().execute(new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
@ -24,7 +24,6 @@ import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelInitializer;
|
||||
import io.netty.channel.ChannelOutboundHandlerAdapter;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -183,7 +182,7 @@ public class WriteTimeoutHandler extends ChannelOutboundHandlerAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
private final class WriteTimeoutTask extends OneTimeTask implements ChannelFutureListener {
|
||||
private final class WriteTimeoutTask implements Runnable, ChannelFutureListener {
|
||||
|
||||
private final ChannelHandlerContext ctx;
|
||||
private final ChannelPromise promise;
|
||||
|
@ -18,7 +18,6 @@ package io.netty.handler.traffic;
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -193,7 +192,7 @@ public class ChannelTrafficShapingHandler extends AbstractTrafficShapingHandler
|
||||
checkWriteSuspend(ctx, delay, queueSize);
|
||||
}
|
||||
final long futureNow = newToSend.relativeTimeAction;
|
||||
ctx.executor().schedule(new OneTimeTask() {
|
||||
ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
sendAllValid(ctx, futureNow);
|
||||
|
@ -21,7 +21,6 @@ import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
@ -361,7 +360,7 @@ public class GlobalTrafficShapingHandler extends AbstractTrafficShapingHandler {
|
||||
}
|
||||
final long futureNow = newToSend.relativeTimeAction;
|
||||
final PerChannel forSchedule = perChannel;
|
||||
ctx.executor().schedule(new OneTimeTask() {
|
||||
ctx.executor().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
sendAllValid(ctx, forSchedule, futureNow);
|
||||
|
12
pom.xml
12
pom.xml
@ -358,6 +358,13 @@
|
||||
<version>1.3</version>
|
||||
</dependency>
|
||||
|
||||
<!-- Java concurrency tools for the JVM -->
|
||||
<dependency>
|
||||
<groupId>org.jctools</groupId>
|
||||
<artifactId>jctools-core</artifactId>
|
||||
<version>1.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.rxtx</groupId>
|
||||
<artifactId>rxtx</artifactId>
|
||||
@ -1148,6 +1155,11 @@
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
<version>1.0.0.Final</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>2.4.3</version>
|
||||
</plugin>
|
||||
|
||||
<!-- Workaround for the 'M2E plugin execution not covered' problem.
|
||||
See: http://wiki.eclipse.org/M2E_plugin_execution_not_covered -->
|
||||
|
@ -16,7 +16,6 @@
|
||||
package io.netty.resolver.dns;
|
||||
|
||||
import io.netty.channel.EventLoop;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.UnstableApi;
|
||||
|
||||
@ -198,7 +197,7 @@ public class DefaultDnsCache implements DnsCache {
|
||||
final DnsCacheEntry e,
|
||||
int ttl,
|
||||
EventLoop loop) {
|
||||
e.scheduleExpiration(loop, new OneTimeTask() {
|
||||
e.scheduleExpiration(loop, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
synchronized (entries) {
|
||||
|
@ -29,7 +29,6 @@ import io.netty.handler.codec.dns.DnsResponse;
|
||||
import io.netty.handler.codec.dns.DnsSection;
|
||||
import io.netty.util.concurrent.Promise;
|
||||
import io.netty.util.concurrent.ScheduledFuture;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
@ -147,7 +146,7 @@ final class DnsQueryContext {
|
||||
// Schedule a query timeout task if necessary.
|
||||
final long queryTimeoutMillis = parent.queryTimeoutMillis();
|
||||
if (queryTimeoutMillis > 0) {
|
||||
timeoutFuture = parent.ch.eventLoop().schedule(new OneTimeTask() {
|
||||
timeoutFuture = parent.ch.eventLoop().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (promise.isDone()) {
|
||||
|
@ -31,7 +31,6 @@ import io.netty.channel.socket.ChannelInputShutdownEvent;
|
||||
import io.netty.channel.unix.Socket;
|
||||
import io.netty.channel.unix.UnixChannel;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
@ -161,7 +160,7 @@ abstract class AbstractEpollChannel extends AbstractChannel implements UnixChann
|
||||
unsafe.clearEpollIn0();
|
||||
} else {
|
||||
// schedule a task to clear the EPOLLIN as it is not safe to modify it directly
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (!unsafe.readPending && !config().isAutoRead()) {
|
||||
|
@ -33,8 +33,6 @@ import io.netty.channel.socket.DuplexChannel;
|
||||
import io.netty.channel.unix.FileDescriptor;
|
||||
import io.netty.channel.unix.Socket;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.MpscLinkedQueueNode;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
@ -238,7 +236,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
// Seems like the Channel was closed in the meantime try to fail the promise to prevent any
|
||||
// cases where a future may not be notified otherwise.
|
||||
if (promise.tryFailure(CLOSED_CHANNEL_EXCEPTION)) {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
// Call this via the EventLoop as it is a MPSC queue.
|
||||
@ -581,7 +579,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
public ChannelFuture shutdownOutput(final ChannelPromise promise) {
|
||||
Executor closeExecutor = ((EpollStreamUnsafe) unsafe()).prepareToClose();
|
||||
if (closeExecutor != null) {
|
||||
closeExecutor.execute(new OneTimeTask() {
|
||||
closeExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownOutput0(promise);
|
||||
@ -592,7 +590,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
if (loop.inEventLoop()) {
|
||||
shutdownOutput0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownOutput0(promise);
|
||||
@ -612,7 +610,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
public ChannelFuture shutdownInput(final ChannelPromise promise) {
|
||||
Executor closeExecutor = ((EpollStreamUnsafe) unsafe()).prepareToClose();
|
||||
if (closeExecutor != null) {
|
||||
closeExecutor.execute(new OneTimeTask() {
|
||||
closeExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownInput0(promise);
|
||||
@ -623,7 +621,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
if (loop.inEventLoop()) {
|
||||
shutdownInput0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownInput0(promise);
|
||||
@ -643,7 +641,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
public ChannelFuture shutdown(final ChannelPromise promise) {
|
||||
Executor closeExecutor = ((EpollStreamUnsafe) unsafe()).prepareToClose();
|
||||
if (closeExecutor != null) {
|
||||
closeExecutor.execute(new OneTimeTask() {
|
||||
closeExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdown0(promise);
|
||||
@ -654,7 +652,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
if (loop.inEventLoop()) {
|
||||
shutdown0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdown0(promise);
|
||||
@ -785,7 +783,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
// Schedule connect timeout.
|
||||
int connectTimeoutMillis = config().getConnectTimeoutMillis();
|
||||
if (connectTimeoutMillis > 0) {
|
||||
connectTimeoutFuture = eventLoop().schedule(new OneTimeTask() {
|
||||
connectTimeoutFuture = eventLoop().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelPromise connectPromise = AbstractEpollStreamChannel.this.connectPromise;
|
||||
@ -992,7 +990,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
if (eventLoop.inEventLoop()) {
|
||||
addToSpliceQueue0(task);
|
||||
} else {
|
||||
eventLoop.execute(new OneTimeTask() {
|
||||
eventLoop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
addToSpliceQueue0(task);
|
||||
@ -1008,7 +1006,7 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
spliceQueue.add(task);
|
||||
}
|
||||
|
||||
protected abstract class SpliceInTask extends MpscLinkedQueueNode<SpliceInTask> {
|
||||
protected abstract class SpliceInTask {
|
||||
final ChannelPromise promise;
|
||||
int len;
|
||||
|
||||
@ -1017,11 +1015,6 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
this.len = len;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SpliceInTask value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
abstract boolean spliceIn(RecvByteBufAllocator.Handle handle);
|
||||
|
||||
protected final int spliceIn(FileDescriptor pipeOut, RecvByteBufAllocator.Handle handle) throws IOException {
|
||||
@ -1162,11 +1155,6 @@ public abstract class AbstractEpollStreamChannel extends AbstractEpollChannel im
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SpliceFdTask value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean spliceIn(RecvByteBufAllocator.Handle handle) {
|
||||
assert eventLoop().inEventLoop();
|
||||
|
@ -21,7 +21,6 @@ import gnu.io.SerialPort;
|
||||
import io.netty.channel.ChannelFuture;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.oio.OioByteStreamChannel;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.net.SocketAddress;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -162,7 +161,7 @@ public class RxtxChannel extends OioByteStreamChannel {
|
||||
|
||||
int waitTime = config().getOption(WAIT_TIME);
|
||||
if (waitTime > 0) {
|
||||
eventLoop().schedule(new OneTimeTask() {
|
||||
eventLoop().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
|
@ -34,7 +34,6 @@ import io.netty.channel.sctp.SctpChannelConfig;
|
||||
import io.netty.channel.sctp.SctpMessage;
|
||||
import io.netty.channel.sctp.SctpNotificationHandler;
|
||||
import io.netty.channel.sctp.SctpServerChannel;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
@ -356,7 +355,7 @@ public class NioSctpChannel extends AbstractNioMessageChannel implements io.nett
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
bindAddress(localAddress, promise);
|
||||
@ -381,7 +380,7 @@ public class NioSctpChannel extends AbstractNioMessageChannel implements io.nett
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
unbindAddress(localAddress, promise);
|
||||
|
@ -25,7 +25,6 @@ import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.nio.AbstractNioMessageChannel;
|
||||
import io.netty.channel.sctp.DefaultSctpServerChannelConfig;
|
||||
import io.netty.channel.sctp.SctpServerChannelConfig;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
@ -160,7 +159,7 @@ public class NioSctpServerChannel extends AbstractNioMessageChannel
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
bindAddress(localAddress, promise);
|
||||
@ -185,7 +184,7 @@ public class NioSctpServerChannel extends AbstractNioMessageChannel
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
unbindAddress(localAddress, promise);
|
||||
|
@ -34,7 +34,6 @@ import io.netty.channel.sctp.SctpChannelConfig;
|
||||
import io.netty.channel.sctp.SctpMessage;
|
||||
import io.netty.channel.sctp.SctpNotificationHandler;
|
||||
import io.netty.channel.sctp.SctpServerChannel;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
@ -422,7 +421,7 @@ public class OioSctpChannel extends AbstractOioMessageChannel
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
bindAddress(localAddress, promise);
|
||||
@ -447,7 +446,7 @@ public class OioSctpChannel extends AbstractOioMessageChannel
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
unbindAddress(localAddress, promise);
|
||||
|
@ -25,7 +25,6 @@ import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.oio.AbstractOioMessageChannel;
|
||||
import io.netty.channel.sctp.DefaultSctpServerChannelConfig;
|
||||
import io.netty.channel.sctp.SctpServerChannelConfig;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -235,7 +234,7 @@ public class OioSctpServerChannel extends AbstractOioMessageChannel
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
bindAddress(localAddress, promise);
|
||||
@ -260,7 +259,7 @@ public class OioSctpServerChannel extends AbstractOioMessageChannel
|
||||
promise.setFailure(t);
|
||||
}
|
||||
} else {
|
||||
eventLoop().execute(new OneTimeTask() {
|
||||
eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
unbindAddress(localAddress, promise);
|
||||
|
@ -29,7 +29,6 @@ import io.netty.channel.ReflectiveChannelFactory;
|
||||
import io.netty.util.AttributeKey;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.concurrent.GlobalEventExecutor;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
|
||||
import java.net.InetAddress;
|
||||
@ -353,7 +352,7 @@ public abstract class AbstractBootstrap<B extends AbstractBootstrap<B, C>, C ext
|
||||
|
||||
// This method is invoked before channelRegistered() is triggered. Give user handlers a chance to set up
|
||||
// the pipeline in its channelRegistered() implementation.
|
||||
channel.eventLoop().execute(new OneTimeTask() {
|
||||
channel.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (regFuture.isSuccess()) {
|
||||
|
@ -30,7 +30,6 @@ import io.netty.resolver.AddressResolverGroup;
|
||||
import io.netty.util.AttributeKey;
|
||||
import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.FutureListener;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -246,7 +245,7 @@ public class Bootstrap extends AbstractBootstrap<Bootstrap, Channel> {
|
||||
// This method is invoked before channelRegistered() is triggered. Give user handlers a chance to set up
|
||||
// the pipeline in its channelRegistered() implementation.
|
||||
final Channel channel = connectPromise.channel();
|
||||
channel.eventLoop().execute(new OneTimeTask() {
|
||||
channel.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (localAddress == null) {
|
||||
|
@ -28,7 +28,6 @@ import io.netty.channel.ChannelPipeline;
|
||||
import io.netty.channel.EventLoopGroup;
|
||||
import io.netty.channel.ServerChannel;
|
||||
import io.netty.util.AttributeKey;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -267,7 +266,7 @@ public class ServerBootstrap extends AbstractBootstrap<ServerBootstrap, ServerCh
|
||||
// stop accept new connections for 1 second to allow the channel to recover
|
||||
// See https://github.com/netty/netty/issues/1328
|
||||
config.setAutoRead(false);
|
||||
ctx.channel().eventLoop().schedule(new OneTimeTask() {
|
||||
ctx.channel().eventLoop().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
config.setAutoRead(true);
|
||||
|
@ -19,7 +19,6 @@ import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.util.DefaultAttributeMap;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
@ -465,7 +464,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
register0(promise);
|
||||
} else {
|
||||
try {
|
||||
eventLoop.execute(new OneTimeTask() {
|
||||
eventLoop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
register0(promise);
|
||||
@ -548,7 +547,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
}
|
||||
|
||||
if (!wasActive && isActive()) {
|
||||
invokeLater(new OneTimeTask() {
|
||||
invokeLater(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
pipeline.fireChannelActive();
|
||||
@ -577,7 +576,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
}
|
||||
|
||||
if (wasActive && !isActive()) {
|
||||
invokeLater(new OneTimeTask() {
|
||||
invokeLater(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
pipeline.fireChannelInactive();
|
||||
@ -626,7 +625,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
this.outboundBuffer = null; // Disallow adding any messages and flushes to outboundBuffer.
|
||||
Executor closeExecutor = prepareToClose();
|
||||
if (closeExecutor != null) {
|
||||
closeExecutor.execute(new OneTimeTask() {
|
||||
closeExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
@ -634,7 +633,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
doClose0(promise);
|
||||
} finally {
|
||||
// Call invokeLater so closeAndDeregister is executed in the EventLoop again!
|
||||
invokeLater(new OneTimeTask() {
|
||||
invokeLater(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
// Fail all the queued messages
|
||||
@ -656,7 +655,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
outboundBuffer.close(CLOSED_CHANNEL_EXCEPTION);
|
||||
}
|
||||
if (inFlush0) {
|
||||
invokeLater(new OneTimeTask() {
|
||||
invokeLater(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
fireChannelInactiveAndDeregister(wasActive);
|
||||
@ -720,7 +719,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
//
|
||||
// See:
|
||||
// https://github.com/netty/netty/issues/4435
|
||||
invokeLater(new OneTimeTask() {
|
||||
invokeLater(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
@ -756,7 +755,7 @@ public abstract class AbstractChannel extends DefaultAttributeMap implements Cha
|
||||
try {
|
||||
doBeginRead();
|
||||
} catch (final Exception e) {
|
||||
invokeLater(new OneTimeTask() {
|
||||
invokeLater(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
pipeline.fireExceptionCaught(e);
|
||||
|
@ -24,8 +24,6 @@ import io.netty.util.Recycler;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.internal.ObjectUtil;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.RecyclableMpscLinkedQueueNode;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.SystemPropertyUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
@ -126,7 +124,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeChannelRegistered();
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeChannelRegistered();
|
||||
@ -158,7 +156,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeChannelUnregistered();
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeChannelUnregistered();
|
||||
@ -191,7 +189,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeChannelActive();
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeChannelActive();
|
||||
@ -223,7 +221,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeChannelInactive();
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeChannelInactive();
|
||||
@ -257,7 +255,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
next.invokeExceptionCaught(cause);
|
||||
} else {
|
||||
try {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeExceptionCaught(cause);
|
||||
@ -300,7 +298,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeUserEventTriggered(event);
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeUserEventTriggered(event);
|
||||
@ -333,7 +331,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeChannelRead(m);
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeChannelRead(m);
|
||||
@ -471,7 +469,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeBind(localAddress, promise);
|
||||
} else {
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeBind(localAddress, promise);
|
||||
@ -515,7 +513,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeConnect(remoteAddress, localAddress, promise);
|
||||
} else {
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeConnect(remoteAddress, localAddress, promise);
|
||||
@ -555,7 +553,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
next.invokeDisconnect(promise);
|
||||
}
|
||||
} else {
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (!channel().metadata().hasDisconnect()) {
|
||||
@ -593,7 +591,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeClose(promise);
|
||||
} else {
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeClose(promise);
|
||||
@ -628,7 +626,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
if (executor.inEventLoop()) {
|
||||
next.invokeDeregister(promise);
|
||||
} else {
|
||||
safeExecute(executor, new OneTimeTask() {
|
||||
safeExecute(executor, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
next.invokeDeregister(promise);
|
||||
@ -999,7 +997,7 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
return StringUtil.simpleClassName(ChannelHandlerContext.class) + '(' + name + ", " + channel() + ')';
|
||||
}
|
||||
|
||||
abstract static class AbstractWriteTask extends RecyclableMpscLinkedQueueNode<Runnable> implements Runnable {
|
||||
abstract static class AbstractWriteTask implements Runnable {
|
||||
|
||||
private static final boolean ESTIMATE_TASK_SIZE_ON_SUBMIT =
|
||||
SystemPropertyUtil.getBoolean("io.netty.transport.estimateSizeOnSubmit", true);
|
||||
@ -1008,13 +1006,15 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
private static final int WRITE_TASK_OVERHEAD =
|
||||
SystemPropertyUtil.getInt("io.netty.transport.writeTaskSizeOverhead", 48);
|
||||
|
||||
private final Recycler.Handle<AbstractWriteTask> handle;
|
||||
private AbstractChannelHandlerContext ctx;
|
||||
private Object msg;
|
||||
private ChannelPromise promise;
|
||||
private int size;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private AbstractWriteTask(Recycler.Handle<? extends AbstractWriteTask> handle) {
|
||||
super(handle);
|
||||
this.handle = (Recycler.Handle<AbstractWriteTask>) handle;
|
||||
}
|
||||
|
||||
protected static void init(AbstractWriteTask task, AbstractChannelHandlerContext ctx,
|
||||
@ -1052,14 +1052,10 @@ abstract class AbstractChannelHandlerContext extends DefaultAttributeMap
|
||||
ctx = null;
|
||||
msg = null;
|
||||
promise = null;
|
||||
handle.recycle(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Runnable value() {
|
||||
return this;
|
||||
}
|
||||
|
||||
protected void write(AbstractChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
|
||||
ctx.invokeWrite(msg, promise);
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import io.netty.util.Recycler.Handle;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.concurrent.FastThreadLocal;
|
||||
import io.netty.util.internal.InternalThreadLocalMap;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
@ -633,7 +632,7 @@ public final class ChannelOutboundBuffer {
|
||||
|
||||
void close(final ClosedChannelException cause) {
|
||||
if (inFail) {
|
||||
channel.eventLoop().execute(new OneTimeTask() {
|
||||
channel.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
close(cause);
|
||||
|
@ -19,7 +19,6 @@ import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.util.Attribute;
|
||||
import io.netty.util.AttributeKey;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -584,7 +583,7 @@ public class CombinedChannelDuplexHandler<I extends ChannelInboundHandler, O ext
|
||||
if (executor.inEventLoop()) {
|
||||
remove0();
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
remove0();
|
||||
|
@ -22,7 +22,6 @@ import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.concurrent.EventExecutorGroup;
|
||||
import io.netty.util.concurrent.FastThreadLocal;
|
||||
import io.netty.util.internal.ObjectUtil;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
@ -161,7 +160,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
|
||||
EventExecutor executor = newCtx.executor();
|
||||
if (!executor.inEventLoop()) {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
callHandlerAdded0(newCtx);
|
||||
@ -207,7 +206,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
|
||||
EventExecutor executor = newCtx.executor();
|
||||
if (!executor.inEventLoop()) {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
callHandlerAdded0(newCtx);
|
||||
@ -257,7 +256,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
|
||||
EventExecutor executor = newCtx.executor();
|
||||
if (!executor.inEventLoop()) {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
callHandlerAdded0(newCtx);
|
||||
@ -314,7 +313,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
}
|
||||
EventExecutor executor = newCtx.executor();
|
||||
if (!executor.inEventLoop()) {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
callHandlerAdded0(newCtx);
|
||||
@ -445,7 +444,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
|
||||
EventExecutor executor = ctx.executor();
|
||||
if (!executor.inEventLoop()) {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
callHandlerRemoved0(ctx);
|
||||
@ -530,7 +529,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
}
|
||||
EventExecutor executor = ctx.executor();
|
||||
if (!executor.inEventLoop()) {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
// Invoke newHandler.handlerAdded() first (i.e. before oldHandler.handlerRemoved() is invoked)
|
||||
@ -832,7 +831,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
final EventExecutor executor = ctx.executor();
|
||||
if (!inEventLoop && !executor.inEventLoop(currentThread)) {
|
||||
final AbstractChannelHandlerContext finalCtx = ctx;
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
destroyUp(finalCtx, true);
|
||||
@ -862,7 +861,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
callHandlerRemoved0(ctx);
|
||||
} else {
|
||||
final AbstractChannelHandlerContext finalCtx = ctx;
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
destroyDown(Thread.currentThread(), finalCtx, true);
|
||||
@ -1344,7 +1343,7 @@ public class DefaultChannelPipeline implements ChannelPipeline {
|
||||
}
|
||||
}
|
||||
|
||||
private abstract static class PendingHandlerCallback extends OneTimeTask {
|
||||
private abstract static class PendingHandlerCallback implements Runnable {
|
||||
final AbstractChannelHandlerContext ctx;
|
||||
PendingHandlerCallback next;
|
||||
|
||||
|
@ -18,7 +18,6 @@ package io.netty.channel.local;
|
||||
import io.netty.channel.AbstractChannel;
|
||||
import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelConfig;
|
||||
import io.netty.channel.ChannelException;
|
||||
import io.netty.channel.ChannelMetadata;
|
||||
import io.netty.channel.ChannelOutboundBuffer;
|
||||
import io.netty.channel.ChannelPipeline;
|
||||
@ -31,7 +30,6 @@ import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.SingleThreadEventExecutor;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.InternalThreadLocalMap;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
|
||||
import java.net.ConnectException;
|
||||
@ -193,7 +191,7 @@ public class LocalChannel extends AbstractChannel {
|
||||
// This ensures that if both channels are on the same event loop, the peer's channelActive
|
||||
// event is triggered *after* this channel's channelRegistered event, so that this channel's
|
||||
// pipeline is fully initialized by ChannelInitializer before any channelRead events.
|
||||
peer.eventLoop().execute(new OneTimeTask() {
|
||||
peer.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
registerInProgress = false;
|
||||
@ -262,7 +260,7 @@ public class LocalChannel extends AbstractChannel {
|
||||
// This value may change, and so we should save it before executing the Runnable.
|
||||
final boolean peerWriteInProgress = peer.writeInProgress;
|
||||
try {
|
||||
peer.eventLoop().execute(new OneTimeTask() {
|
||||
peer.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
doPeerClose(peer, peerWriteInProgress);
|
||||
@ -388,7 +386,7 @@ public class LocalChannel extends AbstractChannel {
|
||||
private void runFinishPeerReadTask(final LocalChannel peer) {
|
||||
// If the peer is writing, we must wait until after reads are completed for that peer before we can read. So
|
||||
// we keep track of the task, and coordinate later that our read can't happen until the peer is done.
|
||||
final Runnable finishPeerReadTask = new OneTimeTask() {
|
||||
final Runnable finishPeerReadTask = new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
finishPeerRead0(peer);
|
||||
|
@ -30,7 +30,6 @@ import io.netty.channel.EventLoop;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -150,7 +149,7 @@ public abstract class AbstractNioChannel extends AbstractChannel {
|
||||
if (eventLoop.inEventLoop()) {
|
||||
setReadPending0(readPending);
|
||||
} else {
|
||||
eventLoop.execute(new OneTimeTask() {
|
||||
eventLoop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
setReadPending0(readPending);
|
||||
@ -262,7 +261,7 @@ public abstract class AbstractNioChannel extends AbstractChannel {
|
||||
// Schedule connect timeout.
|
||||
int connectTimeoutMillis = config().getConnectTimeoutMillis();
|
||||
if (connectTimeoutMillis > 0) {
|
||||
connectTimeoutFuture = eventLoop().schedule(new OneTimeTask() {
|
||||
connectTimeoutFuture = eventLoop().schedule(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelPromise connectPromise = AbstractNioChannel.this.connectPromise;
|
||||
|
@ -20,7 +20,6 @@ import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.EventLoop;
|
||||
import io.netty.channel.ThreadPerChannelEventLoop;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.net.SocketAddress;
|
||||
|
||||
@ -123,7 +122,7 @@ public abstract class AbstractOioChannel extends AbstractChannel {
|
||||
if (eventLoop.inEventLoop()) {
|
||||
this.readPending = readPending;
|
||||
} else {
|
||||
eventLoop.execute(new OneTimeTask() {
|
||||
eventLoop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
AbstractOioChannel.this.readPending = readPending;
|
||||
|
@ -22,7 +22,6 @@ import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.FutureListener;
|
||||
import io.netty.util.concurrent.Promise;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.ArrayDeque;
|
||||
@ -202,7 +201,7 @@ public final class FixedChannelPool extends SimpleChannelPool {
|
||||
if (executor.inEventLoop()) {
|
||||
acquire0(promise);
|
||||
} else {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
acquire0(promise);
|
||||
@ -398,7 +397,7 @@ public final class FixedChannelPool extends SimpleChannelPool {
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
executor.execute(new OneTimeTask() {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (!closed) {
|
||||
|
@ -26,7 +26,6 @@ import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.FutureListener;
|
||||
import io.netty.util.concurrent.Promise;
|
||||
import io.netty.util.internal.EmptyArrays;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.PlatformDependent;
|
||||
|
||||
import java.util.Deque;
|
||||
@ -143,7 +142,7 @@ public class SimpleChannelPool implements ChannelPool {
|
||||
if (loop.inEventLoop()) {
|
||||
doHealthCheck(ch, promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
doHealthCheck(ch, promise);
|
||||
@ -226,7 +225,7 @@ public class SimpleChannelPool implements ChannelPool {
|
||||
if (loop.inEventLoop()) {
|
||||
doReleaseChannel(channel, promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
doReleaseChannel(channel, promise);
|
||||
|
@ -30,7 +30,6 @@ import io.netty.channel.socket.DefaultSocketChannelConfig;
|
||||
import io.netty.channel.socket.ServerSocketChannel;
|
||||
import io.netty.channel.socket.SocketChannelConfig;
|
||||
import io.netty.util.concurrent.GlobalEventExecutor;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -161,7 +160,7 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty
|
||||
public ChannelFuture shutdownOutput(final ChannelPromise promise) {
|
||||
Executor closeExecutor = ((NioSocketChannelUnsafe) unsafe()).prepareToClose();
|
||||
if (closeExecutor != null) {
|
||||
closeExecutor.execute(new OneTimeTask() {
|
||||
closeExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownOutput0(promise);
|
||||
@ -172,7 +171,7 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty
|
||||
if (loop.inEventLoop()) {
|
||||
shutdownOutput0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownOutput0(promise);
|
||||
@ -192,7 +191,7 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty
|
||||
public ChannelFuture shutdownInput(final ChannelPromise promise) {
|
||||
Executor closeExecutor = ((NioSocketChannelUnsafe) unsafe()).prepareToClose();
|
||||
if (closeExecutor != null) {
|
||||
closeExecutor.execute(new OneTimeTask() {
|
||||
closeExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownInput0(promise);
|
||||
@ -203,7 +202,7 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty
|
||||
if (loop.inEventLoop()) {
|
||||
shutdownInput0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownInput0(promise);
|
||||
@ -223,7 +222,7 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty
|
||||
public ChannelFuture shutdown(final ChannelPromise promise) {
|
||||
Executor closeExecutor = ((NioSocketChannelUnsafe) unsafe()).prepareToClose();
|
||||
if (closeExecutor != null) {
|
||||
closeExecutor.execute(new OneTimeTask() {
|
||||
closeExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdown0(promise);
|
||||
@ -234,7 +233,7 @@ public class NioSocketChannel extends AbstractNioByteChannel implements io.netty
|
||||
if (loop.inEventLoop()) {
|
||||
shutdown0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdown0(promise);
|
||||
|
@ -25,7 +25,6 @@ import io.netty.channel.EventLoop;
|
||||
import io.netty.channel.oio.OioByteStreamChannel;
|
||||
import io.netty.channel.socket.ServerSocketChannel;
|
||||
import io.netty.channel.socket.SocketChannel;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
|
||||
@ -161,7 +160,7 @@ public class OioSocketChannel extends OioByteStreamChannel implements SocketChan
|
||||
if (loop.inEventLoop()) {
|
||||
shutdownOutput0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownOutput0(promise);
|
||||
@ -186,7 +185,7 @@ public class OioSocketChannel extends OioByteStreamChannel implements SocketChan
|
||||
if (loop.inEventLoop()) {
|
||||
shutdownInput0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdownInput0(promise);
|
||||
@ -211,7 +210,7 @@ public class OioSocketChannel extends OioByteStreamChannel implements SocketChan
|
||||
if (loop.inEventLoop()) {
|
||||
shutdown0(promise);
|
||||
} else {
|
||||
loop.execute(new OneTimeTask() {
|
||||
loop.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
shutdown0(promise);
|
||||
|
@ -17,7 +17,6 @@
|
||||
package io.netty.bootstrap;
|
||||
|
||||
import io.netty.channel.Channel;
|
||||
import io.netty.channel.ChannelException;
|
||||
import io.netty.channel.ChannelFactory;
|
||||
import io.netty.channel.ChannelFuture;
|
||||
import io.netty.channel.ChannelFutureListener;
|
||||
@ -38,7 +37,6 @@ import io.netty.resolver.AbstractAddressResolver;
|
||||
import io.netty.util.concurrent.EventExecutor;
|
||||
import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.Promise;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -340,7 +338,7 @@ public class BootstrapTest {
|
||||
@Override
|
||||
protected void doResolve(
|
||||
final SocketAddress unresolvedAddress, final Promise<SocketAddress> promise) {
|
||||
executor().execute(new OneTimeTask() {
|
||||
executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (success) {
|
||||
@ -356,7 +354,7 @@ public class BootstrapTest {
|
||||
protected void doResolveAll(
|
||||
final SocketAddress unresolvedAddress, final Promise<List<SocketAddress>> promise)
|
||||
throws Exception {
|
||||
executor().execute(new OneTimeTask() {
|
||||
executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (success) {
|
||||
|
@ -35,7 +35,6 @@ import io.netty.channel.SingleThreadEventLoop;
|
||||
import io.netty.util.ReferenceCountUtil;
|
||||
import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.Promise;
|
||||
import io.netty.util.internal.OneTimeTask;
|
||||
import io.netty.util.internal.logging.InternalLogger;
|
||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||
import org.junit.AfterClass;
|
||||
@ -366,7 +365,7 @@ public class LocalChannelTest {
|
||||
|
||||
final Channel ccCpy = cc;
|
||||
// Make sure a write operation is executed in the eventloop
|
||||
cc.pipeline().lastContext().executor().execute(new OneTimeTask() {
|
||||
cc.pipeline().lastContext().executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelPromise promise = ccCpy.newPromise();
|
||||
@ -430,7 +429,7 @@ public class LocalChannelTest {
|
||||
|
||||
final Channel ccCpy = cc;
|
||||
// Make sure a write operation is executed in the eventloop
|
||||
cc.pipeline().lastContext().executor().execute(new OneTimeTask() {
|
||||
cc.pipeline().lastContext().executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelPromise promise = ccCpy.newPromise();
|
||||
@ -512,7 +511,7 @@ public class LocalChannelTest {
|
||||
|
||||
final Channel ccCpy = cc;
|
||||
// Make sure a write operation is executed in the eventloop
|
||||
cc.pipeline().lastContext().executor().execute(new OneTimeTask() {
|
||||
cc.pipeline().lastContext().executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelPromise promise = ccCpy.newPromise();
|
||||
@ -594,7 +593,7 @@ public class LocalChannelTest {
|
||||
|
||||
final Channel ccCpy = cc;
|
||||
// Make sure a write operation is executed in the eventloop
|
||||
cc.pipeline().lastContext().executor().execute(new OneTimeTask() {
|
||||
cc.pipeline().lastContext().executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelPromise promise = ccCpy.newPromise();
|
||||
@ -674,7 +673,7 @@ public class LocalChannelTest {
|
||||
|
||||
final Channel ccCpy = cc;
|
||||
// Make sure a write operation is executed in the eventloop
|
||||
cc.pipeline().lastContext().executor().execute(new OneTimeTask() {
|
||||
cc.pipeline().lastContext().executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ChannelPromise promise = ccCpy.newPromise();
|
||||
@ -755,14 +754,14 @@ public class LocalChannelTest {
|
||||
ccCpy.closeFuture().addListener(clientChannelCloseLatch);
|
||||
|
||||
// Make sure a write operation is executed in the eventloop
|
||||
cc.pipeline().lastContext().executor().execute(new OneTimeTask() {
|
||||
cc.pipeline().lastContext().executor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ccCpy.writeAndFlush(data.retainedDuplicate(), ccCpy.newPromise())
|
||||
.addListener(new ChannelFutureListener() {
|
||||
@Override
|
||||
public void operationComplete(ChannelFuture future) throws Exception {
|
||||
serverChannelCpy.eventLoop().execute(new OneTimeTask() {
|
||||
serverChannelCpy.eventLoop().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
// The point of this test is to write while the peer is closed, so we should
|
||||
|
Loading…
Reference in New Issue
Block a user