2009-01-19 16:05:04 +01:00
|
|
|
/*
|
2012-06-04 22:31:44 +02:00
|
|
|
* Copyright 2012 The Netty Project
|
2009-01-19 16:05:04 +01:00
|
|
|
*
|
2011-12-09 06:18:34 +01:00
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
2009-01-19 16:05:04 +01:00
|
|
|
*
|
2012-06-04 22:31:44 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2009-01-19 16:05:04 +01:00
|
|
|
*
|
2009-08-28 09:15:49 +02:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
2011-12-09 06:18:34 +01:00
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
2009-08-28 09:15:49 +02:00
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2009-01-19 16:05:04 +01:00
|
|
|
*/
|
2011-12-09 04:38:59 +01:00
|
|
|
package io.netty.util;
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-06-03 06:03:36 +02:00
|
|
|
import io.netty.util.internal.MpscLinkedQueueNode;
|
2013-01-11 06:03:27 +01:00
|
|
|
import io.netty.util.internal.PlatformDependent;
|
2013-11-04 11:42:33 +01:00
|
|
|
import io.netty.util.internal.StringUtil;
|
2013-02-26 23:54:25 +01:00
|
|
|
import io.netty.util.internal.logging.InternalLogger;
|
|
|
|
import io.netty.util.internal.logging.InternalLoggerFactory;
|
2012-05-15 10:10:54 +02:00
|
|
|
|
2009-01-20 13:14:29 +01:00
|
|
|
import java.util.Collections;
|
|
|
|
import java.util.HashSet;
|
2014-05-02 09:52:59 +02:00
|
|
|
import java.util.Queue;
|
2009-01-19 16:05:04 +01:00
|
|
|
import java.util.Set;
|
2013-10-07 10:09:00 +02:00
|
|
|
import java.util.concurrent.CountDownLatch;
|
2009-01-20 13:17:56 +01:00
|
|
|
import java.util.concurrent.Executors;
|
2009-01-20 12:04:27 +01:00
|
|
|
import java.util.concurrent.ThreadFactory;
|
2009-01-19 16:05:04 +01:00
|
|
|
import java.util.concurrent.TimeUnit;
|
2014-05-02 09:52:59 +02:00
|
|
|
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
|
2009-01-19 16:05:04 +01:00
|
|
|
|
|
|
|
/**
|
2009-06-17 08:37:36 +02:00
|
|
|
* A {@link Timer} optimized for approximated I/O timeout scheduling.
|
|
|
|
*
|
|
|
|
* <h3>Tick Duration</h3>
|
|
|
|
*
|
|
|
|
* As described with 'approximated', this timer does not execute the scheduled
|
|
|
|
* {@link TimerTask} on time. {@link HashedWheelTimer}, on every tick, will
|
|
|
|
* check if there are any {@link TimerTask}s behind the schedule and execute
|
|
|
|
* them.
|
|
|
|
* <p>
|
|
|
|
* You can increase or decrease the accuracy of the execution timing by
|
|
|
|
* specifying smaller or larger tick duration in the constructor. In most
|
|
|
|
* network applications, I/O timeout does not need to be accurate. Therefore,
|
|
|
|
* the default tick duration is 100 milliseconds and you will not need to try
|
|
|
|
* different configurations in most cases.
|
|
|
|
*
|
|
|
|
* <h3>Ticks per Wheel (Wheel Size)</h3>
|
|
|
|
*
|
|
|
|
* {@link HashedWheelTimer} maintains a data structure called 'wheel'.
|
|
|
|
* To put simply, a wheel is a hash table of {@link TimerTask}s whose hash
|
|
|
|
* function is 'dead line of the task'. The default number of ticks per wheel
|
|
|
|
* (i.e. the size of the wheel) is 512. You could specify a larger value
|
|
|
|
* if you are going to schedule a lot of timeouts.
|
|
|
|
*
|
2010-06-07 03:50:02 +02:00
|
|
|
* <h3>Do not create many instances.</h3>
|
|
|
|
*
|
|
|
|
* {@link HashedWheelTimer} creates a new thread whenever it is instantiated and
|
|
|
|
* started. Therefore, you should make sure to create only one instance and
|
|
|
|
* share it across your application. One of the common mistakes, that makes
|
2011-12-28 11:44:04 +01:00
|
|
|
* your application unresponsive, is to create a new instance for every connection.
|
2010-06-07 03:50:02 +02:00
|
|
|
*
|
2009-06-17 08:37:36 +02:00
|
|
|
* <h3>Implementation Details</h3>
|
|
|
|
*
|
|
|
|
* {@link HashedWheelTimer} is based on
|
2009-10-30 01:04:48 +01:00
|
|
|
* <a href="http://cseweb.ucsd.edu/users/varghese/">George Varghese</a> and
|
2009-06-17 08:37:36 +02:00
|
|
|
* Tony Lauck's paper,
|
2009-10-30 01:04:48 +01:00
|
|
|
* <a href="http://cseweb.ucsd.edu/users/varghese/PAPERS/twheel.ps.Z">'Hashed
|
2009-06-17 08:37:36 +02:00
|
|
|
* and Hierarchical Timing Wheels: data structures to efficiently implement a
|
|
|
|
* timer facility'</a>. More comprehensive slides are located
|
|
|
|
* <a href="http://www.cse.wustl.edu/~cdgill/courses/cs6874/TimingWheels.ppt">here</a>.
|
2009-01-19 16:05:04 +01:00
|
|
|
*/
|
|
|
|
public class HashedWheelTimer implements Timer {
|
|
|
|
|
2009-01-20 05:39:11 +01:00
|
|
|
static final InternalLogger logger =
|
2013-03-12 08:43:46 +01:00
|
|
|
InternalLoggerFactory.getInstance(HashedWheelTimer.class);
|
2009-01-20 05:39:11 +01:00
|
|
|
|
2013-01-11 14:47:54 +01:00
|
|
|
private static final ResourceLeakDetector<HashedWheelTimer> leakDetector =
|
|
|
|
new ResourceLeakDetector<HashedWheelTimer>(
|
|
|
|
HashedWheelTimer.class, 1, Runtime.getRuntime().availableProcessors() * 4);
|
2009-02-13 13:41:46 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
private static final AtomicIntegerFieldUpdater<HashedWheelTimer> WORKER_STATE_UPDATER;
|
|
|
|
static {
|
|
|
|
AtomicIntegerFieldUpdater<HashedWheelTimer> workerStateUpdater =
|
|
|
|
PlatformDependent.newAtomicIntegerFieldUpdater(HashedWheelTimer.class, "workerState");
|
|
|
|
if (workerStateUpdater == null) {
|
|
|
|
workerStateUpdater = AtomicIntegerFieldUpdater.newUpdater(HashedWheelTimer.class, "workerState");
|
|
|
|
}
|
|
|
|
WORKER_STATE_UPDATER = workerStateUpdater;
|
|
|
|
}
|
|
|
|
|
2013-04-22 10:07:01 +02:00
|
|
|
private final ResourceLeak leak;
|
2009-01-20 12:04:27 +01:00
|
|
|
private final Worker worker = new Worker();
|
2014-05-02 09:52:59 +02:00
|
|
|
private final Thread workerThread;
|
2013-03-16 04:27:33 +01:00
|
|
|
|
|
|
|
public static final int WORKER_STATE_INIT = 0;
|
|
|
|
public static final int WORKER_STATE_STARTED = 1;
|
|
|
|
public static final int WORKER_STATE_SHUTDOWN = 2;
|
2014-05-02 09:52:59 +02:00
|
|
|
@SuppressWarnings({ "unused", "FieldMayBeFinal", "RedundantFieldInitialization" })
|
|
|
|
private volatile int workerState = WORKER_STATE_INIT; // 0 - init, 1 - started, 2 - shut down
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
private final long tickDuration;
|
|
|
|
private final HashedWheelBucket[] wheel;
|
|
|
|
private final int mask;
|
|
|
|
private final CountDownLatch startTimeInitialized = new CountDownLatch(1);
|
|
|
|
private final Queue<HashedWheelTimeout> timeouts = PlatformDependent.newMpscQueue();
|
2014-07-21 14:17:35 +02:00
|
|
|
private final Queue<Runnable> cancelledTimeouts = PlatformDependent.newMpscQueue();
|
2014-06-05 21:06:46 +02:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
private volatile long startTime;
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2009-06-17 08:37:36 +02:00
|
|
|
/**
|
|
|
|
* Creates a new timer with the default thread factory
|
|
|
|
* ({@link Executors#defaultThreadFactory()}), default tick duration, and
|
|
|
|
* default number of ticks per wheel.
|
|
|
|
*/
|
2009-01-20 13:17:56 +01:00
|
|
|
public HashedWheelTimer() {
|
|
|
|
this(Executors.defaultThreadFactory());
|
|
|
|
}
|
|
|
|
|
2009-06-17 08:37:36 +02:00
|
|
|
/**
|
|
|
|
* Creates a new timer with the default thread factory
|
|
|
|
* ({@link Executors#defaultThreadFactory()}) and default number of ticks
|
|
|
|
* per wheel.
|
|
|
|
*
|
|
|
|
* @param tickDuration the duration between tick
|
|
|
|
* @param unit the time unit of the {@code tickDuration}
|
2012-09-30 22:23:14 +02:00
|
|
|
* @throws NullPointerException if {@code unit} is {@code null}
|
2015-12-27 07:19:11 +01:00
|
|
|
* @throws IllegalArgumentException if {@code tickDuration} is <= 0
|
2009-06-17 08:37:36 +02:00
|
|
|
*/
|
2009-02-13 05:29:56 +01:00
|
|
|
public HashedWheelTimer(long tickDuration, TimeUnit unit) {
|
|
|
|
this(Executors.defaultThreadFactory(), tickDuration, unit);
|
|
|
|
}
|
|
|
|
|
2009-06-17 08:37:36 +02:00
|
|
|
/**
|
|
|
|
* Creates a new timer with the default thread factory
|
|
|
|
* ({@link Executors#defaultThreadFactory()}).
|
|
|
|
*
|
|
|
|
* @param tickDuration the duration between tick
|
|
|
|
* @param unit the time unit of the {@code tickDuration}
|
|
|
|
* @param ticksPerWheel the size of the wheel
|
2012-09-30 22:23:14 +02:00
|
|
|
* @throws NullPointerException if {@code unit} is {@code null}
|
2015-12-27 07:19:11 +01:00
|
|
|
* @throws IllegalArgumentException if either of {@code tickDuration} and {@code ticksPerWheel} is <= 0
|
2009-06-17 08:37:36 +02:00
|
|
|
*/
|
2009-02-13 05:29:56 +01:00
|
|
|
public HashedWheelTimer(long tickDuration, TimeUnit unit, int ticksPerWheel) {
|
2009-01-20 13:17:56 +01:00
|
|
|
this(Executors.defaultThreadFactory(), tickDuration, unit, ticksPerWheel);
|
|
|
|
}
|
|
|
|
|
2009-06-17 08:37:36 +02:00
|
|
|
/**
|
|
|
|
* Creates a new timer with the default tick duration and default number of
|
|
|
|
* ticks per wheel.
|
|
|
|
*
|
|
|
|
* @param threadFactory a {@link ThreadFactory} that creates a
|
|
|
|
* background {@link Thread} which is dedicated to
|
|
|
|
* {@link TimerTask} execution.
|
2012-09-30 22:23:14 +02:00
|
|
|
* @throws NullPointerException if {@code threadFactory} is {@code null}
|
2009-06-17 08:37:36 +02:00
|
|
|
*/
|
2009-01-20 13:18:50 +01:00
|
|
|
public HashedWheelTimer(ThreadFactory threadFactory) {
|
2009-02-13 05:29:56 +01:00
|
|
|
this(threadFactory, 100, TimeUnit.MILLISECONDS);
|
|
|
|
}
|
|
|
|
|
2009-06-17 08:37:36 +02:00
|
|
|
/**
|
|
|
|
* Creates a new timer with the default number of ticks per wheel.
|
|
|
|
*
|
|
|
|
* @param threadFactory a {@link ThreadFactory} that creates a
|
|
|
|
* background {@link Thread} which is dedicated to
|
|
|
|
* {@link TimerTask} execution.
|
|
|
|
* @param tickDuration the duration between tick
|
|
|
|
* @param unit the time unit of the {@code tickDuration}
|
2012-09-30 22:23:14 +02:00
|
|
|
* @throws NullPointerException if either of {@code threadFactory} and {@code unit} is {@code null}
|
2015-12-27 07:19:11 +01:00
|
|
|
* @throws IllegalArgumentException if {@code tickDuration} is <= 0
|
2009-06-17 08:37:36 +02:00
|
|
|
*/
|
2009-02-13 05:29:56 +01:00
|
|
|
public HashedWheelTimer(
|
|
|
|
ThreadFactory threadFactory, long tickDuration, TimeUnit unit) {
|
|
|
|
this(threadFactory, tickDuration, unit, 512);
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2009-06-17 08:37:36 +02:00
|
|
|
/**
|
|
|
|
* Creates a new timer.
|
|
|
|
*
|
|
|
|
* @param threadFactory a {@link ThreadFactory} that creates a
|
|
|
|
* background {@link Thread} which is dedicated to
|
|
|
|
* {@link TimerTask} execution.
|
|
|
|
* @param tickDuration the duration between tick
|
|
|
|
* @param unit the time unit of the {@code tickDuration}
|
|
|
|
* @param ticksPerWheel the size of the wheel
|
2012-09-30 22:23:14 +02:00
|
|
|
* @throws NullPointerException if either of {@code threadFactory} and {@code unit} is {@code null}
|
2015-12-27 07:19:11 +01:00
|
|
|
* @throws IllegalArgumentException if either of {@code tickDuration} and {@code ticksPerWheel} is <= 0
|
2009-06-17 08:37:36 +02:00
|
|
|
*/
|
2009-01-19 16:05:04 +01:00
|
|
|
public HashedWheelTimer(
|
2009-01-20 12:04:27 +01:00
|
|
|
ThreadFactory threadFactory,
|
2009-01-19 16:05:04 +01:00
|
|
|
long tickDuration, TimeUnit unit, int ticksPerWheel) {
|
|
|
|
|
2009-01-20 12:04:27 +01:00
|
|
|
if (threadFactory == null) {
|
|
|
|
throw new NullPointerException("threadFactory");
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
if (unit == null) {
|
|
|
|
throw new NullPointerException("unit");
|
|
|
|
}
|
|
|
|
if (tickDuration <= 0) {
|
2012-09-30 22:23:14 +02:00
|
|
|
throw new IllegalArgumentException("tickDuration must be greater than 0: " + tickDuration);
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
2009-01-20 13:17:56 +01:00
|
|
|
if (ticksPerWheel <= 0) {
|
2012-09-30 22:23:14 +02:00
|
|
|
throw new IllegalArgumentException("ticksPerWheel must be greater than 0: " + ticksPerWheel);
|
2009-01-20 13:17:56 +01:00
|
|
|
}
|
2009-01-19 16:05:04 +01:00
|
|
|
|
|
|
|
// Normalize ticksPerWheel to power of two and initialize the wheel.
|
|
|
|
wheel = createWheel(ticksPerWheel);
|
|
|
|
mask = wheel.length - 1;
|
|
|
|
|
2013-04-08 10:42:54 +02:00
|
|
|
// Convert tickDuration to nanos.
|
2013-04-10 06:44:05 +02:00
|
|
|
this.tickDuration = unit.toNanos(tickDuration);
|
2009-01-19 16:05:04 +01:00
|
|
|
|
|
|
|
// Prevent overflow.
|
2013-04-10 06:44:05 +02:00
|
|
|
if (this.tickDuration >= Long.MAX_VALUE / wheel.length) {
|
|
|
|
throw new IllegalArgumentException(String.format(
|
|
|
|
"tickDuration: %d (expected: 0 < tickDuration in nanos < %d",
|
|
|
|
tickDuration, Long.MAX_VALUE / wheel.length));
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
2011-04-04 11:21:47 +02:00
|
|
|
workerThread = threadFactory.newThread(worker);
|
2014-05-02 09:52:59 +02:00
|
|
|
|
2013-04-22 10:07:01 +02:00
|
|
|
leak = leakDetector.open(this);
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
private static HashedWheelBucket[] createWheel(int ticksPerWheel) {
|
2009-01-19 16:05:04 +01:00
|
|
|
if (ticksPerWheel <= 0) {
|
|
|
|
throw new IllegalArgumentException(
|
|
|
|
"ticksPerWheel must be greater than 0: " + ticksPerWheel);
|
|
|
|
}
|
|
|
|
if (ticksPerWheel > 1073741824) {
|
|
|
|
throw new IllegalArgumentException(
|
|
|
|
"ticksPerWheel may not be greater than 2^30: " + ticksPerWheel);
|
|
|
|
}
|
|
|
|
|
|
|
|
ticksPerWheel = normalizeTicksPerWheel(ticksPerWheel);
|
2014-05-02 09:52:59 +02:00
|
|
|
HashedWheelBucket[] wheel = new HashedWheelBucket[ticksPerWheel];
|
2009-01-20 08:57:45 +01:00
|
|
|
for (int i = 0; i < wheel.length; i ++) {
|
2014-05-02 09:52:59 +02:00
|
|
|
wheel[i] = new HashedWheelBucket();
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
2009-01-20 08:57:45 +01:00
|
|
|
return wheel;
|
|
|
|
}
|
|
|
|
|
2009-01-19 16:05:04 +01:00
|
|
|
private static int normalizeTicksPerWheel(int ticksPerWheel) {
|
|
|
|
int normalizedTicksPerWheel = 1;
|
|
|
|
while (normalizedTicksPerWheel < ticksPerWheel) {
|
|
|
|
normalizedTicksPerWheel <<= 1;
|
|
|
|
}
|
|
|
|
return normalizedTicksPerWheel;
|
|
|
|
}
|
|
|
|
|
2009-06-17 08:37:36 +02:00
|
|
|
/**
|
|
|
|
* Starts the background thread explicitly. The background thread will
|
|
|
|
* start automatically on demand even if you did not call this method.
|
|
|
|
*
|
|
|
|
* @throws IllegalStateException if this timer has been
|
|
|
|
* {@linkplain #stop() stopped} already
|
|
|
|
*/
|
2012-10-16 22:36:36 +02:00
|
|
|
public void start() {
|
2014-05-02 09:52:59 +02:00
|
|
|
switch (WORKER_STATE_UPDATER.get(this)) {
|
2013-03-16 04:27:33 +01:00
|
|
|
case WORKER_STATE_INIT:
|
2014-05-02 09:52:59 +02:00
|
|
|
if (WORKER_STATE_UPDATER.compareAndSet(this, WORKER_STATE_INIT, WORKER_STATE_STARTED)) {
|
2013-03-12 08:43:46 +01:00
|
|
|
workerThread.start();
|
|
|
|
}
|
|
|
|
break;
|
2013-03-16 04:27:33 +01:00
|
|
|
case WORKER_STATE_STARTED:
|
2013-03-12 08:43:46 +01:00
|
|
|
break;
|
2013-03-16 04:27:33 +01:00
|
|
|
case WORKER_STATE_SHUTDOWN:
|
2013-03-12 08:43:46 +01:00
|
|
|
throw new IllegalStateException("cannot be started once stopped");
|
|
|
|
default:
|
2013-03-16 04:27:33 +01:00
|
|
|
throw new Error("Invalid WorkerState");
|
2009-02-11 06:05:20 +01:00
|
|
|
}
|
2013-10-07 10:09:00 +02:00
|
|
|
|
|
|
|
// Wait until the startTime is initialized by the worker.
|
|
|
|
while (startTime == 0) {
|
|
|
|
try {
|
|
|
|
startTimeInitialized.await();
|
|
|
|
} catch (InterruptedException ignore) {
|
|
|
|
// Ignore - it will be ready very soon.
|
|
|
|
}
|
|
|
|
}
|
2009-01-20 12:04:27 +01:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-10-16 22:36:36 +02:00
|
|
|
public Set<Timeout> stop() {
|
2011-01-31 06:48:01 +01:00
|
|
|
if (Thread.currentThread() == workerThread) {
|
|
|
|
throw new IllegalStateException(
|
|
|
|
HashedWheelTimer.class.getSimpleName() +
|
2013-03-12 08:43:46 +01:00
|
|
|
".stop() cannot be called from " +
|
|
|
|
TimerTask.class.getSimpleName());
|
2011-01-31 06:48:01 +01:00
|
|
|
}
|
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
if (!WORKER_STATE_UPDATER.compareAndSet(this, WORKER_STATE_STARTED, WORKER_STATE_SHUTDOWN)) {
|
2012-10-16 22:36:36 +02:00
|
|
|
// workerState can be 0 or 2 at this moment - let it always be 2.
|
2014-05-02 09:52:59 +02:00
|
|
|
WORKER_STATE_UPDATER.set(this, WORKER_STATE_SHUTDOWN);
|
2013-10-02 06:44:55 +02:00
|
|
|
|
|
|
|
if (leak != null) {
|
|
|
|
leak.close();
|
|
|
|
}
|
|
|
|
|
2009-01-20 13:14:29 +01:00
|
|
|
return Collections.emptySet();
|
2009-01-20 12:04:27 +01:00
|
|
|
}
|
2009-01-20 13:14:29 +01:00
|
|
|
|
2009-09-04 06:21:56 +02:00
|
|
|
boolean interrupted = false;
|
2009-01-20 12:04:27 +01:00
|
|
|
while (workerThread.isAlive()) {
|
|
|
|
workerThread.interrupt();
|
|
|
|
try {
|
|
|
|
workerThread.join(100);
|
2014-07-02 12:04:11 +02:00
|
|
|
} catch (InterruptedException ignored) {
|
2009-09-04 06:21:56 +02:00
|
|
|
interrupted = true;
|
2009-01-20 12:04:27 +01:00
|
|
|
}
|
|
|
|
}
|
2009-01-20 13:14:29 +01:00
|
|
|
|
2009-09-04 06:21:56 +02:00
|
|
|
if (interrupted) {
|
|
|
|
Thread.currentThread().interrupt();
|
|
|
|
}
|
|
|
|
|
2013-07-23 07:06:58 +02:00
|
|
|
if (leak != null) {
|
|
|
|
leak.close();
|
|
|
|
}
|
2014-05-02 09:52:59 +02:00
|
|
|
return worker.unprocessedTimeouts();
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2009-02-02 04:42:05 +01:00
|
|
|
public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) {
|
2009-01-20 05:29:58 +01:00
|
|
|
if (task == null) {
|
|
|
|
throw new NullPointerException("task");
|
|
|
|
}
|
|
|
|
if (unit == null) {
|
|
|
|
throw new NullPointerException("unit");
|
|
|
|
}
|
2014-05-02 09:52:59 +02:00
|
|
|
start();
|
2009-01-20 05:29:58 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
// Add the timeout to the timeout queue which will be processed on the next tick.
|
|
|
|
// During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket.
|
2013-10-07 10:09:00 +02:00
|
|
|
long deadline = System.nanoTime() + unit.toNanos(delay) - startTime;
|
2014-05-02 09:52:59 +02:00
|
|
|
HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline);
|
|
|
|
timeouts.add(timeout);
|
2013-10-07 10:09:00 +02:00
|
|
|
return timeout;
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private final class Worker implements Runnable {
|
2014-05-02 09:52:59 +02:00
|
|
|
private final Set<Timeout> unprocessedTimeouts = new HashSet<Timeout>();
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
private long tick;
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2009-01-20 12:07:30 +01:00
|
|
|
public void run() {
|
2013-10-07 10:09:00 +02:00
|
|
|
// Initialize the startTime.
|
2013-04-08 07:01:08 +02:00
|
|
|
startTime = System.nanoTime();
|
2013-10-07 10:09:00 +02:00
|
|
|
if (startTime == 0) {
|
|
|
|
// We use 0 as an indicator for the uninitialized value here, so make sure it's not 0 when initialized.
|
|
|
|
startTime = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify the other threads waiting for the initialization at start().
|
|
|
|
startTimeInitialized.countDown();
|
|
|
|
|
|
|
|
do {
|
2011-01-31 06:48:01 +01:00
|
|
|
final long deadline = waitForNextTick();
|
2013-03-12 08:43:46 +01:00
|
|
|
if (deadline > 0) {
|
2014-07-11 15:41:03 +02:00
|
|
|
int idx = (int) (tick & mask);
|
2014-07-21 14:17:35 +02:00
|
|
|
processCancelledTasks();
|
2014-05-02 09:52:59 +02:00
|
|
|
HashedWheelBucket bucket =
|
2014-07-11 15:41:03 +02:00
|
|
|
wheel[idx];
|
2014-07-21 14:17:35 +02:00
|
|
|
transferTimeoutsToBuckets();
|
|
|
|
bucket.expireTimeouts(deadline);
|
2014-05-02 09:52:59 +02:00
|
|
|
tick++;
|
2011-01-31 06:48:01 +01:00
|
|
|
}
|
2014-05-02 09:52:59 +02:00
|
|
|
} while (WORKER_STATE_UPDATER.get(HashedWheelTimer.this) == WORKER_STATE_STARTED);
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
// Fill the unprocessedTimeouts so we can return them from stop() method.
|
|
|
|
for (HashedWheelBucket bucket: wheel) {
|
|
|
|
bucket.clearTimeouts(unprocessedTimeouts);
|
2009-01-20 08:57:45 +01:00
|
|
|
}
|
2014-05-02 09:52:59 +02:00
|
|
|
for (;;) {
|
|
|
|
HashedWheelTimeout timeout = timeouts.poll();
|
|
|
|
if (timeout == null) {
|
|
|
|
break;
|
2009-01-20 10:03:31 +01:00
|
|
|
}
|
2014-07-21 14:17:35 +02:00
|
|
|
if (!timeout.isCancelled()) {
|
|
|
|
unprocessedTimeouts.add(timeout);
|
|
|
|
}
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
2014-07-21 14:17:35 +02:00
|
|
|
processCancelledTasks();
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
private void transferTimeoutsToBuckets() {
|
|
|
|
// transfer only max. 100000 timeouts per tick to prevent a thread to stale the workerThread when it just
|
|
|
|
// adds new timeouts in a loop.
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
HashedWheelTimeout timeout = timeouts.poll();
|
|
|
|
if (timeout == null) {
|
|
|
|
// all processed
|
|
|
|
break;
|
|
|
|
}
|
2014-07-11 15:41:03 +02:00
|
|
|
if (timeout.state() == HashedWheelTimeout.ST_CANCELLED) {
|
|
|
|
// Was cancelled in the meantime.
|
2014-06-05 21:06:46 +02:00
|
|
|
continue;
|
|
|
|
}
|
2014-07-11 15:41:03 +02:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
long calculated = timeout.deadline / tickDuration;
|
2014-07-02 12:04:11 +02:00
|
|
|
timeout.remainingRounds = (calculated - tick) / wheel.length;
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
final long ticks = Math.max(calculated, tick); // Ensure we don't schedule for past.
|
|
|
|
int stopIndex = (int) (ticks & mask);
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
HashedWheelBucket bucket = wheel[stopIndex];
|
|
|
|
bucket.addTimeout(timeout);
|
|
|
|
}
|
|
|
|
}
|
2014-07-21 14:17:35 +02:00
|
|
|
|
|
|
|
private void processCancelledTasks() {
|
|
|
|
for (;;) {
|
|
|
|
Runnable task = cancelledTimeouts.poll();
|
|
|
|
if (task == null) {
|
|
|
|
// all processed
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
try {
|
|
|
|
task.run();
|
|
|
|
} catch (Throwable t) {
|
|
|
|
if (logger.isWarnEnabled()) {
|
|
|
|
logger.warn("An exception was thrown while process a cancellation task", t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-16 04:27:33 +01:00
|
|
|
/**
|
|
|
|
* calculate goal nanoTime from startTime and current tick number,
|
|
|
|
* then wait until that goal has been reached.
|
|
|
|
* @return Long.MIN_VALUE if received a shutdown request,
|
|
|
|
* current time otherwise (with Long.MIN_VALUE changed by +1)
|
|
|
|
*/
|
2011-01-31 06:48:01 +01:00
|
|
|
private long waitForNextTick() {
|
2013-10-07 10:09:00 +02:00
|
|
|
long deadline = tickDuration * (tick + 1);
|
2011-01-31 06:48:01 +01:00
|
|
|
|
2009-01-19 16:05:04 +01:00
|
|
|
for (;;) {
|
2013-10-07 10:09:00 +02:00
|
|
|
final long currentTime = System.nanoTime() - startTime;
|
2013-03-16 04:27:33 +01:00
|
|
|
long sleepTimeMs = (deadline - currentTime + 999999) / 1000000;
|
|
|
|
|
|
|
|
if (sleepTimeMs <= 0) {
|
|
|
|
if (currentTime == Long.MIN_VALUE) {
|
|
|
|
return -Long.MAX_VALUE;
|
|
|
|
} else {
|
|
|
|
return currentTime;
|
|
|
|
}
|
|
|
|
}
|
2012-05-23 15:12:04 +02:00
|
|
|
|
|
|
|
// Check if we run on windows, as if thats the case we will need
|
|
|
|
// to round the sleepTime as workaround for a bug that only affect
|
|
|
|
// the JVM if it runs on windows.
|
|
|
|
//
|
|
|
|
// See https://github.com/netty/netty/issues/356
|
2013-01-11 06:03:27 +01:00
|
|
|
if (PlatformDependent.isWindows()) {
|
2013-04-10 06:44:05 +02:00
|
|
|
sleepTimeMs = sleepTimeMs / 10 * 10;
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
2013-03-16 04:27:33 +01:00
|
|
|
Thread.sleep(sleepTimeMs);
|
2014-07-02 12:04:11 +02:00
|
|
|
} catch (InterruptedException ignored) {
|
2014-05-02 09:52:59 +02:00
|
|
|
if (WORKER_STATE_UPDATER.get(HashedWheelTimer.this) == WORKER_STATE_SHUTDOWN) {
|
2013-03-16 04:27:33 +01:00
|
|
|
return Long.MIN_VALUE;
|
2009-01-20 08:57:45 +01:00
|
|
|
}
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-02 09:52:59 +02:00
|
|
|
|
|
|
|
public Set<Timeout> unprocessedTimeouts() {
|
|
|
|
return Collections.unmodifiableSet(unprocessedTimeouts);
|
|
|
|
}
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2014-06-03 06:03:36 +02:00
|
|
|
private static final class HashedWheelTimeout extends MpscLinkedQueueNode<Timeout>
|
2014-05-02 09:52:59 +02:00
|
|
|
implements Timeout {
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2011-08-12 07:03:48 +02:00
|
|
|
private static final int ST_INIT = 0;
|
2014-07-11 15:41:03 +02:00
|
|
|
private static final int ST_CANCELLED = 1;
|
|
|
|
private static final int ST_EXPIRED = 2;
|
2014-05-02 09:52:59 +02:00
|
|
|
private static final AtomicIntegerFieldUpdater<HashedWheelTimeout> STATE_UPDATER;
|
|
|
|
|
|
|
|
static {
|
|
|
|
AtomicIntegerFieldUpdater<HashedWheelTimeout> updater =
|
|
|
|
PlatformDependent.newAtomicIntegerFieldUpdater(HashedWheelTimeout.class, "state");
|
|
|
|
if (updater == null) {
|
|
|
|
updater = AtomicIntegerFieldUpdater.newUpdater(HashedWheelTimeout.class, "state");
|
|
|
|
}
|
|
|
|
STATE_UPDATER = updater;
|
|
|
|
}
|
2011-08-12 07:03:48 +02:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
private final HashedWheelTimer timer;
|
2009-01-20 05:29:58 +01:00
|
|
|
private final TimerTask task;
|
2014-05-02 09:52:59 +02:00
|
|
|
private final long deadline;
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
@SuppressWarnings({"unused", "FieldMayBeFinal", "RedundantFieldInitialization" })
|
|
|
|
private volatile int state = ST_INIT;
|
|
|
|
|
|
|
|
// remainingRounds will be calculated and set by Worker.transferTimeoutsToBuckets() before the
|
|
|
|
// HashedWheelTimeout will be added to the correct HashedWheelBucket.
|
|
|
|
long remainingRounds;
|
|
|
|
|
|
|
|
// This will be used to chain timeouts in HashedWheelTimerBucket via a double-linked-list.
|
|
|
|
// As only the workerThread will act on it there is no need for synchronization / volatile.
|
|
|
|
HashedWheelTimeout next;
|
|
|
|
HashedWheelTimeout prev;
|
|
|
|
|
2014-06-05 21:06:46 +02:00
|
|
|
// The bucket to which the timeout was added
|
|
|
|
HashedWheelBucket bucket;
|
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
HashedWheelTimeout(HashedWheelTimer timer, TimerTask task, long deadline) {
|
|
|
|
this.timer = timer;
|
2009-01-20 05:29:58 +01:00
|
|
|
this.task = task;
|
2009-02-02 04:42:05 +01:00
|
|
|
this.deadline = deadline;
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-12-26 05:50:01 +01:00
|
|
|
public Timer timer() {
|
2014-05-02 09:52:59 +02:00
|
|
|
return timer;
|
2009-06-17 08:37:36 +02:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-12-26 05:50:01 +01:00
|
|
|
public TimerTask task() {
|
2009-01-20 05:29:58 +01:00
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2012-06-29 13:59:48 +02:00
|
|
|
public boolean cancel() {
|
2014-05-02 09:52:59 +02:00
|
|
|
// only update the state it will be removed from HashedWheelBucket on next tick.
|
2014-07-11 15:41:03 +02:00
|
|
|
if (!compareAndSetState(ST_INIT, ST_CANCELLED)) {
|
2012-06-29 13:59:48 +02:00
|
|
|
return false;
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
2014-07-21 14:17:35 +02:00
|
|
|
// If a task should be canceled we create a new Runnable for this to another queue which will
|
|
|
|
// be processed on each tick. So this means that we will have a GC latency of max. 1 tick duration
|
|
|
|
// which is good enough. This way we can make again use of our MpscLinkedQueue and so minimize the
|
|
|
|
// locking / overhead as much as possible.
|
|
|
|
//
|
|
|
|
// It is important that we not just add the HashedWheelTimeout itself again as it extends
|
|
|
|
// MpscLinkedQueueNode and so may still be used as tombstone.
|
|
|
|
timer.cancelledTimeouts.add(new Runnable() {
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
HashedWheelBucket bucket = HashedWheelTimeout.this.bucket;
|
|
|
|
if (bucket != null) {
|
|
|
|
bucket.remove(HashedWheelTimeout.this);
|
2014-07-11 15:41:03 +02:00
|
|
|
}
|
|
|
|
}
|
2014-07-21 14:17:35 +02:00
|
|
|
});
|
2014-07-11 15:41:03 +02:00
|
|
|
return true;
|
2014-06-05 21:06:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public boolean compareAndSetState(int expected, int state) {
|
|
|
|
return STATE_UPDATER.compareAndSet(this, expected, state);
|
|
|
|
}
|
|
|
|
|
|
|
|
public int state() {
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2009-01-19 16:05:04 +01:00
|
|
|
public boolean isCancelled() {
|
2014-06-05 21:06:46 +02:00
|
|
|
return state() == ST_CANCELLED;
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2010-11-12 01:45:39 +01:00
|
|
|
@Override
|
2009-01-19 16:05:04 +01:00
|
|
|
public boolean isExpired() {
|
2014-07-11 15:41:03 +02:00
|
|
|
return state() == ST_EXPIRED;
|
2014-05-02 09:52:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public HashedWheelTimeout value() {
|
|
|
|
return this;
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public void expire() {
|
2014-07-11 15:41:03 +02:00
|
|
|
if (!compareAndSetState(ST_INIT, ST_EXPIRED)) {
|
2009-01-20 05:29:58 +01:00
|
|
|
return;
|
|
|
|
}
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2009-01-20 05:29:58 +01:00
|
|
|
try {
|
|
|
|
task.run(this);
|
|
|
|
} catch (Throwable t) {
|
2012-02-17 10:37:41 +01:00
|
|
|
if (logger.isWarnEnabled()) {
|
2013-03-12 08:43:46 +01:00
|
|
|
logger.warn("An exception was thrown by " + TimerTask.class.getSimpleName() + '.', t);
|
2012-02-17 10:37:41 +01:00
|
|
|
}
|
2009-01-20 05:29:58 +01:00
|
|
|
}
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String toString() {
|
2013-04-08 07:01:08 +02:00
|
|
|
final long currentTime = System.nanoTime();
|
2014-05-02 09:52:59 +02:00
|
|
|
long remaining = deadline - currentTime + timer.startTime;
|
2009-01-19 16:05:04 +01:00
|
|
|
|
2014-11-08 23:46:30 +01:00
|
|
|
StringBuilder buf = new StringBuilder(192)
|
|
|
|
.append(StringUtil.simpleClassName(this))
|
|
|
|
.append('(')
|
|
|
|
.append("deadline: ");
|
2009-01-19 16:05:04 +01:00
|
|
|
if (remaining > 0) {
|
2014-11-08 23:46:30 +01:00
|
|
|
buf.append(remaining)
|
|
|
|
.append(" ns later");
|
2009-01-19 16:05:04 +01:00
|
|
|
} else if (remaining < 0) {
|
2014-11-08 23:46:30 +01:00
|
|
|
buf.append(-remaining)
|
|
|
|
.append(" ns ago");
|
2009-01-19 16:05:04 +01:00
|
|
|
} else {
|
2013-10-07 10:09:00 +02:00
|
|
|
buf.append("now");
|
2009-01-20 05:29:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isCancelled()) {
|
2012-01-11 12:16:14 +01:00
|
|
|
buf.append(", cancelled");
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
|
2014-11-08 23:46:30 +01:00
|
|
|
return buf.append(", task: ")
|
|
|
|
.append(task())
|
|
|
|
.append(')')
|
|
|
|
.toString();
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|
|
|
|
}
|
2014-05-02 09:52:59 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Bucket that stores HashedWheelTimeouts. These are stored in a linked-list like datastructure to allow easy
|
|
|
|
* removal of HashedWheelTimeouts in the middle. Also the HashedWheelTimeout act as nodes themself and so no
|
|
|
|
* extra object creation is needed.
|
|
|
|
*/
|
|
|
|
private static final class HashedWheelBucket {
|
|
|
|
// Used for the linked-list datastructure
|
|
|
|
private HashedWheelTimeout head;
|
|
|
|
private HashedWheelTimeout tail;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Add {@link HashedWheelTimeout} to this bucket.
|
|
|
|
*/
|
|
|
|
public void addTimeout(HashedWheelTimeout timeout) {
|
2014-06-05 21:06:46 +02:00
|
|
|
assert timeout.bucket == null;
|
|
|
|
timeout.bucket = this;
|
2014-05-02 09:52:59 +02:00
|
|
|
if (head == null) {
|
|
|
|
head = tail = timeout;
|
|
|
|
} else {
|
|
|
|
tail.next = timeout;
|
|
|
|
timeout.prev = tail;
|
|
|
|
tail = timeout;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Expire all {@link HashedWheelTimeout}s for the given {@code deadline}.
|
|
|
|
*/
|
|
|
|
public void expireTimeouts(long deadline) {
|
|
|
|
HashedWheelTimeout timeout = head;
|
|
|
|
|
|
|
|
// process all timeouts
|
|
|
|
while (timeout != null) {
|
|
|
|
boolean remove = false;
|
|
|
|
if (timeout.remainingRounds <= 0) {
|
|
|
|
if (timeout.deadline <= deadline) {
|
|
|
|
timeout.expire();
|
|
|
|
} else {
|
|
|
|
// The timeout was placed into a wrong slot. This should never happen.
|
|
|
|
throw new IllegalStateException(String.format(
|
|
|
|
"timeout.deadline (%d) > deadline (%d)", timeout.deadline, deadline));
|
|
|
|
}
|
|
|
|
remove = true;
|
|
|
|
} else if (timeout.isCancelled()) {
|
|
|
|
remove = true;
|
|
|
|
} else {
|
|
|
|
timeout.remainingRounds --;
|
|
|
|
}
|
|
|
|
// store reference to next as we may null out timeout.next in the remove block.
|
|
|
|
HashedWheelTimeout next = timeout.next;
|
|
|
|
if (remove) {
|
2014-06-05 21:06:46 +02:00
|
|
|
remove(timeout);
|
2014-05-02 09:52:59 +02:00
|
|
|
}
|
|
|
|
timeout = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-05 21:06:46 +02:00
|
|
|
public void remove(HashedWheelTimeout timeout) {
|
|
|
|
HashedWheelTimeout next = timeout.next;
|
|
|
|
// remove timeout that was either processed or cancelled by updating the linked-list
|
|
|
|
if (timeout.prev != null) {
|
|
|
|
timeout.prev.next = next;
|
|
|
|
}
|
|
|
|
if (timeout.next != null) {
|
|
|
|
timeout.next.prev = timeout.prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout == head) {
|
|
|
|
// if timeout is also the tail we need to adjust the entry too
|
|
|
|
if (timeout == tail) {
|
|
|
|
tail = null;
|
|
|
|
head = null;
|
|
|
|
} else {
|
|
|
|
head = next;
|
|
|
|
}
|
|
|
|
} else if (timeout == tail) {
|
|
|
|
// if the timeout is the tail modify the tail to be the prev node.
|
|
|
|
tail = timeout.prev;
|
|
|
|
}
|
|
|
|
// null out prev, next and bucket to allow for GC.
|
|
|
|
timeout.prev = null;
|
|
|
|
timeout.next = null;
|
|
|
|
timeout.bucket = null;
|
|
|
|
}
|
|
|
|
|
2014-05-02 09:52:59 +02:00
|
|
|
/**
|
|
|
|
* Clear this bucket and return all not expired / cancelled {@link Timeout}s.
|
|
|
|
*/
|
|
|
|
public void clearTimeouts(Set<Timeout> set) {
|
|
|
|
for (;;) {
|
|
|
|
HashedWheelTimeout timeout = pollTimeout();
|
|
|
|
if (timeout == null) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (timeout.isExpired() || timeout.isCancelled()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
set.add(timeout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private HashedWheelTimeout pollTimeout() {
|
|
|
|
HashedWheelTimeout head = this.head;
|
|
|
|
if (head == null) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
HashedWheelTimeout next = head.next;
|
|
|
|
if (next == null) {
|
|
|
|
tail = this.head = null;
|
|
|
|
} else {
|
|
|
|
this.head = next;
|
|
|
|
next.prev = null;
|
|
|
|
}
|
|
|
|
|
|
|
|
// null out prev and next to allow for GC.
|
|
|
|
head.next = null;
|
|
|
|
head.prev = null;
|
2014-06-05 21:06:46 +02:00
|
|
|
head.bucket = null;
|
2014-05-02 09:52:59 +02:00
|
|
|
return head;
|
|
|
|
}
|
|
|
|
}
|
2009-01-19 16:05:04 +01:00
|
|
|
}
|