Minimize memory footprint of HashedWheelTimer and context-switching
Motivation: At the moment there are two issues with HashedWheelTimer: * the memory footprint of it is pretty heavy (250kb fon an empty instance) * the way how added Timeouts are handled is inefficient in terms of how locks etc are used and so a lot of context-switching / condition can happen. Modification: Rewrite HashedWheelTimer to use an optimized bucket implementation to store the submitted Timeouts and a MPSC queue to handover the timeouts. So volatile writes are reduced to a minimum and also the memory foot-print of the buckets itself is reduced a lot as the bucket uses a double-linked-list. Beside this we use Atomic*FieldUpdater where-ever possible to improve the memory foot-print and performance. Result: Lower memory-footprint and better performance
This commit is contained in:
parent
d69ad2f85c
commit
2f7d60f234
@ -15,24 +15,21 @@
|
|||||||
*/
|
*/
|
||||||
package io.netty.util;
|
package io.netty.util;
|
||||||
|
|
||||||
|
import io.netty.util.internal.MpscLinkedQueue;
|
||||||
import io.netty.util.internal.PlatformDependent;
|
import io.netty.util.internal.PlatformDependent;
|
||||||
import io.netty.util.internal.StringUtil;
|
import io.netty.util.internal.StringUtil;
|
||||||
import io.netty.util.internal.logging.InternalLogger;
|
import io.netty.util.internal.logging.InternalLogger;
|
||||||
import io.netty.util.internal.logging.InternalLoggerFactory;
|
import io.netty.util.internal.logging.InternalLoggerFactory;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Queue;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.ThreadFactory;
|
import java.util.concurrent.ThreadFactory;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
|
||||||
import java.util.concurrent.locks.ReadWriteLock;
|
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A {@link Timer} optimized for approximated I/O timeout scheduling.
|
* A {@link Timer} optimized for approximated I/O timeout scheduling.
|
||||||
@ -84,22 +81,32 @@ public class HashedWheelTimer implements Timer {
|
|||||||
new ResourceLeakDetector<HashedWheelTimer>(
|
new ResourceLeakDetector<HashedWheelTimer>(
|
||||||
HashedWheelTimer.class, 1, Runtime.getRuntime().availableProcessors() * 4);
|
HashedWheelTimer.class, 1, Runtime.getRuntime().availableProcessors() * 4);
|
||||||
|
|
||||||
|
private static final AtomicIntegerFieldUpdater<HashedWheelTimer> WORKER_STATE_UPDATER;
|
||||||
|
static {
|
||||||
|
AtomicIntegerFieldUpdater<HashedWheelTimer> workerStateUpdater =
|
||||||
|
PlatformDependent.newAtomicIntegerFieldUpdater(HashedWheelTimer.class, "workerState");
|
||||||
|
if (workerStateUpdater == null) {
|
||||||
|
workerStateUpdater = AtomicIntegerFieldUpdater.newUpdater(HashedWheelTimer.class, "workerState");
|
||||||
|
}
|
||||||
|
WORKER_STATE_UPDATER = workerStateUpdater;
|
||||||
|
}
|
||||||
|
|
||||||
private final ResourceLeak leak;
|
private final ResourceLeak leak;
|
||||||
private final Worker worker = new Worker();
|
private final Worker worker = new Worker();
|
||||||
final Thread workerThread;
|
private final Thread workerThread;
|
||||||
|
|
||||||
public static final int WORKER_STATE_INIT = 0;
|
public static final int WORKER_STATE_INIT = 0;
|
||||||
public static final int WORKER_STATE_STARTED = 1;
|
public static final int WORKER_STATE_STARTED = 1;
|
||||||
public static final int WORKER_STATE_SHUTDOWN = 2;
|
public static final int WORKER_STATE_SHUTDOWN = 2;
|
||||||
final AtomicInteger workerState = new AtomicInteger(); // 0 - init, 1 - started, 2 - shut down
|
@SuppressWarnings({ "unused", "FieldMayBeFinal", "RedundantFieldInitialization" })
|
||||||
|
private volatile int workerState = WORKER_STATE_INIT; // 0 - init, 1 - started, 2 - shut down
|
||||||
|
|
||||||
final long tickDuration;
|
private final long tickDuration;
|
||||||
final Set<HashedWheelTimeout>[] wheel;
|
private final HashedWheelBucket[] wheel;
|
||||||
final int mask;
|
private final int mask;
|
||||||
final ReadWriteLock lock = new ReentrantReadWriteLock();
|
private final CountDownLatch startTimeInitialized = new CountDownLatch(1);
|
||||||
final CountDownLatch startTimeInitialized = new CountDownLatch(1);
|
private final Queue<HashedWheelTimeout> timeouts = PlatformDependent.newMpscQueue();
|
||||||
volatile long startTime;
|
private volatile long startTime;
|
||||||
volatile long tick;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new timer with the default thread factory
|
* Creates a new timer with the default thread factory
|
||||||
@ -209,13 +216,13 @@ public class HashedWheelTimer implements Timer {
|
|||||||
"tickDuration: %d (expected: 0 < tickDuration in nanos < %d",
|
"tickDuration: %d (expected: 0 < tickDuration in nanos < %d",
|
||||||
tickDuration, Long.MAX_VALUE / wheel.length));
|
tickDuration, Long.MAX_VALUE / wheel.length));
|
||||||
}
|
}
|
||||||
|
|
||||||
workerThread = threadFactory.newThread(worker);
|
workerThread = threadFactory.newThread(worker);
|
||||||
|
|
||||||
leak = leakDetector.open(this);
|
leak = leakDetector.open(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
private static Set<HashedWheelTimeout>[] createWheel(int ticksPerWheel) {
|
private static HashedWheelBucket[] createWheel(int ticksPerWheel) {
|
||||||
if (ticksPerWheel <= 0) {
|
if (ticksPerWheel <= 0) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"ticksPerWheel must be greater than 0: " + ticksPerWheel);
|
"ticksPerWheel must be greater than 0: " + ticksPerWheel);
|
||||||
@ -226,10 +233,9 @@ public class HashedWheelTimer implements Timer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ticksPerWheel = normalizeTicksPerWheel(ticksPerWheel);
|
ticksPerWheel = normalizeTicksPerWheel(ticksPerWheel);
|
||||||
Set<HashedWheelTimeout>[] wheel = new Set[ticksPerWheel];
|
HashedWheelBucket[] wheel = new HashedWheelBucket[ticksPerWheel];
|
||||||
for (int i = 0; i < wheel.length; i ++) {
|
for (int i = 0; i < wheel.length; i ++) {
|
||||||
wheel[i] = Collections.newSetFromMap(
|
wheel[i] = new HashedWheelBucket();
|
||||||
PlatformDependent.<HashedWheelTimeout, Boolean>newConcurrentHashMap());
|
|
||||||
}
|
}
|
||||||
return wheel;
|
return wheel;
|
||||||
}
|
}
|
||||||
@ -250,9 +256,9 @@ public class HashedWheelTimer implements Timer {
|
|||||||
* {@linkplain #stop() stopped} already
|
* {@linkplain #stop() stopped} already
|
||||||
*/
|
*/
|
||||||
public void start() {
|
public void start() {
|
||||||
switch (workerState.get()) {
|
switch (WORKER_STATE_UPDATER.get(this)) {
|
||||||
case WORKER_STATE_INIT:
|
case WORKER_STATE_INIT:
|
||||||
if (workerState.compareAndSet(WORKER_STATE_INIT, WORKER_STATE_STARTED)) {
|
if (WORKER_STATE_UPDATER.compareAndSet(this, WORKER_STATE_INIT, WORKER_STATE_STARTED)) {
|
||||||
workerThread.start();
|
workerThread.start();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -283,9 +289,9 @@ public class HashedWheelTimer implements Timer {
|
|||||||
TimerTask.class.getSimpleName());
|
TimerTask.class.getSimpleName());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!workerState.compareAndSet(WORKER_STATE_STARTED, WORKER_STATE_SHUTDOWN)) {
|
if (!WORKER_STATE_UPDATER.compareAndSet(this, WORKER_STATE_STARTED, WORKER_STATE_SHUTDOWN)) {
|
||||||
// workerState can be 0 or 2 at this moment - let it always be 2.
|
// workerState can be 0 or 2 at this moment - let it always be 2.
|
||||||
workerState.set(WORKER_STATE_SHUTDOWN);
|
WORKER_STATE_UPDATER.set(this, WORKER_STATE_SHUTDOWN);
|
||||||
|
|
||||||
if (leak != null) {
|
if (leak != null) {
|
||||||
leak.close();
|
leak.close();
|
||||||
@ -311,49 +317,31 @@ public class HashedWheelTimer implements Timer {
|
|||||||
if (leak != null) {
|
if (leak != null) {
|
||||||
leak.close();
|
leak.close();
|
||||||
}
|
}
|
||||||
|
return worker.unprocessedTimeouts();
|
||||||
Set<Timeout> unprocessedTimeouts = new HashSet<Timeout>();
|
|
||||||
for (Set<HashedWheelTimeout> bucket: wheel) {
|
|
||||||
unprocessedTimeouts.addAll(bucket);
|
|
||||||
bucket.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
return Collections.unmodifiableSet(unprocessedTimeouts);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) {
|
public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) {
|
||||||
start();
|
|
||||||
|
|
||||||
if (task == null) {
|
if (task == null) {
|
||||||
throw new NullPointerException("task");
|
throw new NullPointerException("task");
|
||||||
}
|
}
|
||||||
if (unit == null) {
|
if (unit == null) {
|
||||||
throw new NullPointerException("unit");
|
throw new NullPointerException("unit");
|
||||||
}
|
}
|
||||||
|
start();
|
||||||
|
|
||||||
|
// Add the timeout to the timeout queue which will be processed on the next tick.
|
||||||
|
// During processing all the queued HashedWheelTimeouts will be added to the correct HashedWheelBucket.
|
||||||
long deadline = System.nanoTime() + unit.toNanos(delay) - startTime;
|
long deadline = System.nanoTime() + unit.toNanos(delay) - startTime;
|
||||||
|
HashedWheelTimeout timeout = new HashedWheelTimeout(this, task, deadline);
|
||||||
// Add the timeout to the wheel.
|
timeouts.add(timeout);
|
||||||
HashedWheelTimeout timeout;
|
|
||||||
lock.readLock().lock();
|
|
||||||
try {
|
|
||||||
timeout = new HashedWheelTimeout(task, deadline);
|
|
||||||
if (workerState.get() == WORKER_STATE_SHUTDOWN) {
|
|
||||||
throw new IllegalStateException("Cannot enqueue after shutdown");
|
|
||||||
}
|
|
||||||
wheel[timeout.stopIndex].add(timeout);
|
|
||||||
} finally {
|
|
||||||
lock.readLock().unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
return timeout;
|
return timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
private final class Worker implements Runnable {
|
private final class Worker implements Runnable {
|
||||||
|
private final Set<Timeout> unprocessedTimeouts = new HashSet<Timeout>();
|
||||||
|
|
||||||
Worker() {
|
private long tick;
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
@ -367,68 +355,50 @@ public class HashedWheelTimer implements Timer {
|
|||||||
// Notify the other threads waiting for the initialization at start().
|
// Notify the other threads waiting for the initialization at start().
|
||||||
startTimeInitialized.countDown();
|
startTimeInitialized.countDown();
|
||||||
|
|
||||||
List<HashedWheelTimeout> expiredTimeouts = new ArrayList<HashedWheelTimeout>();
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
final long deadline = waitForNextTick();
|
final long deadline = waitForNextTick();
|
||||||
if (deadline > 0) {
|
if (deadline > 0) {
|
||||||
fetchExpiredTimeouts(expiredTimeouts, deadline);
|
transferTimeoutsToBuckets();
|
||||||
notifyExpiredTimeouts(expiredTimeouts);
|
HashedWheelBucket bucket =
|
||||||
|
wheel[(int) (tick & mask)];
|
||||||
|
bucket.expireTimeouts(deadline);
|
||||||
|
tick++;
|
||||||
}
|
}
|
||||||
} while (workerState.get() == WORKER_STATE_STARTED);
|
} while (WORKER_STATE_UPDATER.get(HashedWheelTimer.this) == WORKER_STATE_STARTED);
|
||||||
}
|
|
||||||
|
|
||||||
private void fetchExpiredTimeouts(
|
// Fill the unprocessedTimeouts so we can return them from stop() method.
|
||||||
List<HashedWheelTimeout> expiredTimeouts, long deadline) {
|
for (HashedWheelBucket bucket: wheel) {
|
||||||
|
bucket.clearTimeouts(unprocessedTimeouts);
|
||||||
// Find the expired timeouts and decrease the round counter
|
|
||||||
// if necessary. Note that we don't send the notification
|
|
||||||
// immediately to make sure the listeners are called without
|
|
||||||
// an exclusive lock.
|
|
||||||
lock.writeLock().lock();
|
|
||||||
try {
|
|
||||||
fetchExpiredTimeouts(expiredTimeouts, wheel[(int) (tick & mask)].iterator(), deadline);
|
|
||||||
} finally {
|
|
||||||
// Note that the tick is updated only while the writer lock is held,
|
|
||||||
// so that newTimeout() and consequently new HashedWheelTimeout() never see an old value
|
|
||||||
// while the reader lock is held.
|
|
||||||
tick ++;
|
|
||||||
lock.writeLock().unlock();
|
|
||||||
}
|
}
|
||||||
}
|
for (;;) {
|
||||||
|
HashedWheelTimeout timeout = timeouts.poll();
|
||||||
private void fetchExpiredTimeouts(
|
if (timeout == null) {
|
||||||
List<HashedWheelTimeout> expiredTimeouts,
|
break;
|
||||||
Iterator<HashedWheelTimeout> i, long deadline) {
|
|
||||||
|
|
||||||
while (i.hasNext()) {
|
|
||||||
HashedWheelTimeout timeout = i.next();
|
|
||||||
if (timeout.remainingRounds <= 0) {
|
|
||||||
i.remove();
|
|
||||||
if (timeout.deadline <= deadline) {
|
|
||||||
expiredTimeouts.add(timeout);
|
|
||||||
} else {
|
|
||||||
// The timeout was placed into a wrong slot. This should never happen.
|
|
||||||
throw new Error(String.format(
|
|
||||||
"timeout.deadline (%d) > deadline (%d)", timeout.deadline, deadline));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
timeout.remainingRounds --;
|
|
||||||
}
|
}
|
||||||
|
unprocessedTimeouts.add(timeout);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void notifyExpiredTimeouts(
|
private void transferTimeoutsToBuckets() {
|
||||||
List<HashedWheelTimeout> expiredTimeouts) {
|
// transfer only max. 100000 timeouts per tick to prevent a thread to stale the workerThread when it just
|
||||||
// Notify the expired timeouts.
|
// adds new timeouts in a loop.
|
||||||
for (int i = expiredTimeouts.size() - 1; i >= 0; i --) {
|
for (int i = 0; i < 100000; i++) {
|
||||||
expiredTimeouts.get(i).expire();
|
HashedWheelTimeout timeout = timeouts.poll();
|
||||||
|
if (timeout == null) {
|
||||||
|
// all processed
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
long calculated = timeout.deadline / tickDuration;
|
||||||
|
long remainingRounds = (calculated - tick) / wheel.length;
|
||||||
|
timeout.remainingRounds = remainingRounds;
|
||||||
|
|
||||||
|
final long ticks = Math.max(calculated, tick); // Ensure we don't schedule for past.
|
||||||
|
int stopIndex = (int) (ticks & mask);
|
||||||
|
|
||||||
|
HashedWheelBucket bucket = wheel[stopIndex];
|
||||||
|
bucket.addTimeout(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up the temporary list.
|
|
||||||
expiredTimeouts.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* calculate goal nanoTime from startTime and current tick number,
|
* calculate goal nanoTime from startTime and current tick number,
|
||||||
* then wait until that goal has been reached.
|
* then wait until that goal has been reached.
|
||||||
@ -462,39 +432,60 @@ public class HashedWheelTimer implements Timer {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTimeMs);
|
Thread.sleep(sleepTimeMs);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
if (workerState.get() == WORKER_STATE_SHUTDOWN) {
|
if (WORKER_STATE_UPDATER.get(HashedWheelTimer.this) == WORKER_STATE_SHUTDOWN) {
|
||||||
return Long.MIN_VALUE;
|
return Long.MIN_VALUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Set<Timeout> unprocessedTimeouts() {
|
||||||
|
return Collections.unmodifiableSet(unprocessedTimeouts);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private final class HashedWheelTimeout implements Timeout {
|
private static final class HashedWheelTimeout extends MpscLinkedQueue.Node<Timeout>
|
||||||
|
implements Timeout {
|
||||||
|
|
||||||
private static final int ST_INIT = 0;
|
private static final int ST_INIT = 0;
|
||||||
private static final int ST_CANCELLED = 1;
|
private static final int ST_CANCELLED = 1;
|
||||||
private static final int ST_EXPIRED = 2;
|
private static final int ST_EXPIRED = 2;
|
||||||
|
private static final AtomicIntegerFieldUpdater<HashedWheelTimeout> STATE_UPDATER;
|
||||||
|
|
||||||
|
static {
|
||||||
|
AtomicIntegerFieldUpdater<HashedWheelTimeout> updater =
|
||||||
|
PlatformDependent.newAtomicIntegerFieldUpdater(HashedWheelTimeout.class, "state");
|
||||||
|
if (updater == null) {
|
||||||
|
updater = AtomicIntegerFieldUpdater.newUpdater(HashedWheelTimeout.class, "state");
|
||||||
|
}
|
||||||
|
STATE_UPDATER = updater;
|
||||||
|
}
|
||||||
|
|
||||||
|
private final HashedWheelTimer timer;
|
||||||
private final TimerTask task;
|
private final TimerTask task;
|
||||||
final long deadline;
|
private final long deadline;
|
||||||
final int stopIndex;
|
|
||||||
volatile long remainingRounds;
|
|
||||||
private final AtomicInteger state = new AtomicInteger(ST_INIT);
|
|
||||||
|
|
||||||
HashedWheelTimeout(TimerTask task, long deadline) {
|
@SuppressWarnings({"unused", "FieldMayBeFinal", "RedundantFieldInitialization" })
|
||||||
|
private volatile int state = ST_INIT;
|
||||||
|
|
||||||
|
// remainingRounds will be calculated and set by Worker.transferTimeoutsToBuckets() before the
|
||||||
|
// HashedWheelTimeout will be added to the correct HashedWheelBucket.
|
||||||
|
long remainingRounds;
|
||||||
|
|
||||||
|
// This will be used to chain timeouts in HashedWheelTimerBucket via a double-linked-list.
|
||||||
|
// As only the workerThread will act on it there is no need for synchronization / volatile.
|
||||||
|
HashedWheelTimeout next;
|
||||||
|
HashedWheelTimeout prev;
|
||||||
|
|
||||||
|
HashedWheelTimeout(HashedWheelTimer timer, TimerTask task, long deadline) {
|
||||||
|
this.timer = timer;
|
||||||
this.task = task;
|
this.task = task;
|
||||||
this.deadline = deadline;
|
this.deadline = deadline;
|
||||||
|
|
||||||
long calculated = deadline / tickDuration;
|
|
||||||
final long ticks = Math.max(calculated, tick); // Ensure we don't schedule for past.
|
|
||||||
stopIndex = (int) (ticks & mask);
|
|
||||||
remainingRounds = (calculated - tick) / wheel.length;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Timer timer() {
|
public Timer timer() {
|
||||||
return HashedWheelTimer.this;
|
return timer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -504,26 +495,30 @@ public class HashedWheelTimer implements Timer {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean cancel() {
|
public boolean cancel() {
|
||||||
if (!state.compareAndSet(ST_INIT, ST_CANCELLED)) {
|
// only update the state it will be removed from HashedWheelBucket on next tick.
|
||||||
|
if (!STATE_UPDATER.compareAndSet(this, ST_INIT, ST_CANCELLED)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
wheel[stopIndex].remove(this);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isCancelled() {
|
public boolean isCancelled() {
|
||||||
return state.get() == ST_CANCELLED;
|
return STATE_UPDATER.get(this) == ST_CANCELLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isExpired() {
|
public boolean isExpired() {
|
||||||
return state.get() != ST_INIT;
|
return STATE_UPDATER.get(this) != ST_INIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public HashedWheelTimeout value() {
|
||||||
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void expire() {
|
public void expire() {
|
||||||
if (!state.compareAndSet(ST_INIT, ST_EXPIRED)) {
|
if (!STATE_UPDATER.compareAndSet(this, ST_INIT, ST_EXPIRED)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,7 +534,7 @@ public class HashedWheelTimer implements Timer {
|
|||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
final long currentTime = System.nanoTime();
|
final long currentTime = System.nanoTime();
|
||||||
long remaining = deadline - currentTime + startTime;
|
long remaining = deadline - currentTime + timer.startTime;
|
||||||
|
|
||||||
StringBuilder buf = new StringBuilder(192);
|
StringBuilder buf = new StringBuilder(192);
|
||||||
buf.append(StringUtil.simpleClassName(this));
|
buf.append(StringUtil.simpleClassName(this));
|
||||||
@ -566,4 +561,117 @@ public class HashedWheelTimer implements Timer {
|
|||||||
return buf.append(')').toString();
|
return buf.append(')').toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bucket that stores HashedWheelTimeouts. These are stored in a linked-list like datastructure to allow easy
|
||||||
|
* removal of HashedWheelTimeouts in the middle. Also the HashedWheelTimeout act as nodes themself and so no
|
||||||
|
* extra object creation is needed.
|
||||||
|
*/
|
||||||
|
private static final class HashedWheelBucket {
|
||||||
|
|
||||||
|
// Used for the linked-list datastructure
|
||||||
|
private HashedWheelTimeout head;
|
||||||
|
private HashedWheelTimeout tail;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add {@link HashedWheelTimeout} to this bucket.
|
||||||
|
*/
|
||||||
|
public void addTimeout(HashedWheelTimeout timeout) {
|
||||||
|
if (head == null) {
|
||||||
|
head = tail = timeout;
|
||||||
|
} else {
|
||||||
|
tail.next = timeout;
|
||||||
|
timeout.prev = tail;
|
||||||
|
tail = timeout;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expire all {@link HashedWheelTimeout}s for the given {@code deadline}.
|
||||||
|
*/
|
||||||
|
public void expireTimeouts(long deadline) {
|
||||||
|
HashedWheelTimeout timeout = head;
|
||||||
|
|
||||||
|
// process all timeouts
|
||||||
|
while (timeout != null) {
|
||||||
|
boolean remove = false;
|
||||||
|
if (timeout.remainingRounds <= 0) {
|
||||||
|
if (timeout.deadline <= deadline) {
|
||||||
|
timeout.expire();
|
||||||
|
} else {
|
||||||
|
// The timeout was placed into a wrong slot. This should never happen.
|
||||||
|
throw new IllegalStateException(String.format(
|
||||||
|
"timeout.deadline (%d) > deadline (%d)", timeout.deadline, deadline));
|
||||||
|
}
|
||||||
|
remove = true;
|
||||||
|
} else if (timeout.isCancelled()) {
|
||||||
|
remove = true;
|
||||||
|
} else {
|
||||||
|
timeout.remainingRounds --;
|
||||||
|
}
|
||||||
|
// store reference to next as we may null out timeout.next in the remove block.
|
||||||
|
HashedWheelTimeout next = timeout.next;
|
||||||
|
if (remove) {
|
||||||
|
// remove timeout that was either processed or cancelled by updating the linked-list
|
||||||
|
if (timeout.prev != null) {
|
||||||
|
timeout.prev.next = timeout.next;
|
||||||
|
}
|
||||||
|
if (timeout.next != null) {
|
||||||
|
timeout.next.prev = timeout.prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (timeout == head) {
|
||||||
|
// if timeout is head we need to replace the head with the next entry
|
||||||
|
head = next;
|
||||||
|
if (timeout == tail) {
|
||||||
|
// if timeout is also the tail we need to adjust the entry too
|
||||||
|
tail = timeout.next;
|
||||||
|
}
|
||||||
|
} else if (timeout == tail) {
|
||||||
|
// if the timeout is the tail modify the tail to be the prev node.
|
||||||
|
tail = timeout.prev;
|
||||||
|
}
|
||||||
|
// null out prev and next to allow for GC.
|
||||||
|
timeout.prev = null;
|
||||||
|
timeout.next = null;
|
||||||
|
}
|
||||||
|
timeout = next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear this bucket and return all not expired / cancelled {@link Timeout}s.
|
||||||
|
*/
|
||||||
|
public void clearTimeouts(Set<Timeout> set) {
|
||||||
|
for (;;) {
|
||||||
|
HashedWheelTimeout timeout = pollTimeout();
|
||||||
|
if (timeout == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (timeout.isExpired() || timeout.isCancelled()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
set.add(timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private HashedWheelTimeout pollTimeout() {
|
||||||
|
HashedWheelTimeout head = this.head;
|
||||||
|
if (head == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
HashedWheelTimeout next = head.next;
|
||||||
|
if (next == null) {
|
||||||
|
tail = this.head = null;
|
||||||
|
} else {
|
||||||
|
this.head = next;
|
||||||
|
next.prev = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// null out prev and next to allow for GC.
|
||||||
|
head.next = null;
|
||||||
|
head.prev = null;
|
||||||
|
return head;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("serial")
|
@SuppressWarnings("serial")
|
||||||
final class MpscLinkedQueue extends AtomicReference<OneTimeTask> implements Queue<Runnable> {
|
public final class MpscLinkedQueue<T> extends AtomicReference<MpscLinkedQueue.Node<T>> implements Queue<T> {
|
||||||
private static final long tailOffset;
|
private static final long tailOffset;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
@ -54,74 +54,75 @@ final class MpscLinkedQueue extends AtomicReference<OneTimeTask> implements Queu
|
|||||||
// Extends AtomicReference for the "head" slot (which is the one that is appended to)
|
// Extends AtomicReference for the "head" slot (which is the one that is appended to)
|
||||||
// since Unsafe does not expose XCHG operation intrinsically
|
// since Unsafe does not expose XCHG operation intrinsically
|
||||||
@SuppressWarnings({ "unused", "FieldMayBeFinal" })
|
@SuppressWarnings({ "unused", "FieldMayBeFinal" })
|
||||||
private volatile OneTimeTask tail;
|
private volatile Node<T> tail;
|
||||||
|
|
||||||
MpscLinkedQueue() {
|
MpscLinkedQueue() {
|
||||||
final OneTimeTask task = new OneTimeTaskAdapter(null);
|
final Node<T> task = new DefaultNode<T>(null);
|
||||||
tail = task;
|
tail = task;
|
||||||
set(task);
|
set(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
@Override
|
@Override
|
||||||
public boolean add(Runnable runnable) {
|
public boolean add(T value) {
|
||||||
if (runnable instanceof OneTimeTask) {
|
if (value instanceof Node) {
|
||||||
OneTimeTask node = (OneTimeTask) runnable;
|
Node<T> node = (Node<T>) value;
|
||||||
node.setNext(null);
|
node.setNext(null);
|
||||||
getAndSet(node).setNext(node);
|
getAndSet(node).setNext(node);
|
||||||
} else {
|
} else {
|
||||||
final OneTimeTask n = new OneTimeTaskAdapter(runnable);
|
final Node<T> n = new DefaultNode<T>(value);
|
||||||
getAndSet(n).setNext(n);
|
getAndSet(n).setNext(n);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean offer(Runnable runnable) {
|
public boolean offer(T value) {
|
||||||
return add(runnable);
|
return add(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Runnable remove() {
|
public T remove() {
|
||||||
Runnable task = poll();
|
T v = poll();
|
||||||
if (task == null) {
|
if (v == null) {
|
||||||
throw new NoSuchElementException();
|
throw new NoSuchElementException();
|
||||||
}
|
}
|
||||||
return task;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Runnable poll() {
|
public T poll() {
|
||||||
final OneTimeTask next = peekTask();
|
final Node<T> next = peekNode();
|
||||||
if (next == null) {
|
if (next == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
final OneTimeTask ret = next;
|
final Node<T> ret = next;
|
||||||
PlatformDependent.putOrderedObject(this, tailOffset, next);
|
PlatformDependent.putOrderedObject(this, tailOffset, next);
|
||||||
return unwrapIfNeeded(ret);
|
return ret.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Runnable element() {
|
public T element() {
|
||||||
final OneTimeTask next = peekTask();
|
final Node<T> next = peekNode();
|
||||||
if (next == null) {
|
if (next == null) {
|
||||||
throw new NoSuchElementException();
|
throw new NoSuchElementException();
|
||||||
}
|
}
|
||||||
return unwrapIfNeeded(next);
|
return next.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Runnable peek() {
|
public T peek() {
|
||||||
final OneTimeTask next = peekTask();
|
final Node<T> next = peekNode();
|
||||||
if (next == null) {
|
if (next == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return unwrapIfNeeded(next);
|
return next.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int size() {
|
public int size() {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
OneTimeTask n = peekTask();
|
Node<T> n = peekNode();
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (n == null) {
|
if (n == null) {
|
||||||
break;
|
break;
|
||||||
@ -133,10 +134,10 @@ final class MpscLinkedQueue extends AtomicReference<OneTimeTask> implements Queu
|
|||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
private OneTimeTask peekTask() {
|
private Node<T> peekNode() {
|
||||||
for (;;) {
|
for (;;) {
|
||||||
final OneTimeTask tail = (OneTimeTask) PlatformDependent.getObjectVolatile(this, tailOffset);
|
final Node<T> tail = (Node<T>) PlatformDependent.getObjectVolatile(this, tailOffset);
|
||||||
final OneTimeTask next = tail.next();
|
final Node<T> next = tail.next();
|
||||||
if (next != null || get() == tail) {
|
if (next != null || get() == tail) {
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
@ -150,12 +151,12 @@ final class MpscLinkedQueue extends AtomicReference<OneTimeTask> implements Queu
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean contains(Object o) {
|
public boolean contains(Object o) {
|
||||||
OneTimeTask n = peekTask();
|
Node<T> n = peekNode();
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (n == null) {
|
if (n == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (unwrapIfNeeded(n) == o) {
|
if (n.value() == o) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
n = n.next();
|
n = n.next();
|
||||||
@ -164,7 +165,7 @@ final class MpscLinkedQueue extends AtomicReference<OneTimeTask> implements Queu
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<Runnable> iterator() {
|
public Iterator<T> iterator() {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,8 +195,8 @@ final class MpscLinkedQueue extends AtomicReference<OneTimeTask> implements Queu
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean addAll(Collection<? extends Runnable> c) {
|
public boolean addAll(Collection<? extends T> c) {
|
||||||
for (Runnable r: c) {
|
for (T r: c) {
|
||||||
add(r);
|
add(r);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -220,26 +221,50 @@ final class MpscLinkedQueue extends AtomicReference<OneTimeTask> implements Queu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private static final class DefaultNode<T> extends Node<T> {
|
||||||
* Unwrap {@link OneTimeTask} if needed and so return the proper queued task.
|
private final T value;
|
||||||
*/
|
|
||||||
private static Runnable unwrapIfNeeded(OneTimeTask task) {
|
|
||||||
if (task instanceof OneTimeTaskAdapter) {
|
|
||||||
return ((OneTimeTaskAdapter) task).task;
|
|
||||||
}
|
|
||||||
return task;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static final class OneTimeTaskAdapter extends OneTimeTask {
|
DefaultNode(T value) {
|
||||||
private final Runnable task;
|
this.value = value;
|
||||||
|
|
||||||
OneTimeTaskAdapter(Runnable task) {
|
|
||||||
this.task = task;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public T value() {
|
||||||
task.run();
|
return value;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public abstract static class Node<T> {
|
||||||
|
|
||||||
|
private static final long nextOffset;
|
||||||
|
|
||||||
|
static {
|
||||||
|
if (PlatformDependent0.hasUnsafe()) {
|
||||||
|
try {
|
||||||
|
nextOffset = PlatformDependent.objectFieldOffset(
|
||||||
|
Node.class.getDeclaredField("tail"));
|
||||||
|
} catch (Throwable t) {
|
||||||
|
throw new ExceptionInInitializerError(t);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
nextOffset = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
|
private volatile Node<T> tail;
|
||||||
|
|
||||||
|
// Only use from MpscLinkedQueue and so we are sure Unsafe is present
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
final Node<T> next() {
|
||||||
|
return (Node<T>) PlatformDependent.getObjectVolatile(this, nextOffset);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only use from MpscLinkedQueue and so we are sure Unsafe is present
|
||||||
|
final void setNext(final Node<T> newNext) {
|
||||||
|
PlatformDependent.putOrderedObject(this, nextOffset, newNext);
|
||||||
|
}
|
||||||
|
|
||||||
|
public abstract T value();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,34 +23,10 @@ import io.netty.util.concurrent.EventExecutor;
|
|||||||
*
|
*
|
||||||
* <strong>It is important this will not be reused. After submitted it is not allowed to get submitted again!</strong>
|
* <strong>It is important this will not be reused. After submitted it is not allowed to get submitted again!</strong>
|
||||||
*/
|
*/
|
||||||
public abstract class OneTimeTask implements Runnable {
|
public abstract class OneTimeTask extends MpscLinkedQueue.Node<Runnable> implements Runnable {
|
||||||
|
|
||||||
private static final long nextOffset;
|
@Override
|
||||||
|
public Runnable value() {
|
||||||
static {
|
return this;
|
||||||
if (PlatformDependent0.hasUnsafe()) {
|
|
||||||
try {
|
|
||||||
nextOffset = PlatformDependent.objectFieldOffset(
|
|
||||||
OneTimeTask.class.getDeclaredField("tail"));
|
|
||||||
} catch (Throwable t) {
|
|
||||||
throw new ExceptionInInitializerError(t);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
nextOffset = -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
|
||||||
private volatile OneTimeTask tail;
|
|
||||||
|
|
||||||
// Only use from MpscLinkedQueue and so we are sure Unsafe is present
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
final OneTimeTask next() {
|
|
||||||
return (OneTimeTask) PlatformDependent.getObjectVolatile(this, nextOffset);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only use from MpscLinkedQueue and so we are sure Unsafe is present
|
|
||||||
final void setNext(final OneTimeTask newNext) {
|
|
||||||
PlatformDependent.putOrderedObject(this, nextOffset, newNext);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -379,11 +379,11 @@ public final class PlatformDependent {
|
|||||||
* Create a new {@link Queue} which is safe to use for multiple producers (different threads) and a single
|
* Create a new {@link Queue} which is safe to use for multiple producers (different threads) and a single
|
||||||
* consumer (one thread!).
|
* consumer (one thread!).
|
||||||
*/
|
*/
|
||||||
public static Queue<Runnable> newMpscQueue() {
|
public static <T> Queue<T> newMpscQueue() {
|
||||||
if (hasUnsafe()) {
|
if (hasUnsafe()) {
|
||||||
return new MpscLinkedQueue();
|
return new MpscLinkedQueue<T>();
|
||||||
} else {
|
} else {
|
||||||
return new ConcurrentLinkedQueue<Runnable>();
|
return new ConcurrentLinkedQueue<T>();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user