Keep the amount of scheduled tasks for DefaultDnsCache at a minimum (#8187)
Motivation: We are currently always remove all entries from the cache for a hostname if the lowest TTL was reached but schedule one for each of the cached entries. This is wasteful. Modifications: - Reimplement logic to schedule TTL to only schedule a new removal task if the requested TTL was actual lower then the one for the already scheduled task. - Ensure we only remove from the internal map if we did not replace the Entries in the meantime. Result: Less overhead in terms of scheduled tasks for the DefaultDnsCache
This commit is contained in:
parent
bd25fd03e3
commit
2fa7a0aa57
@ -17,7 +17,6 @@ package io.netty.resolver.dns;
|
|||||||
|
|
||||||
import io.netty.channel.EventLoop;
|
import io.netty.channel.EventLoop;
|
||||||
import io.netty.handler.codec.dns.DnsRecord;
|
import io.netty.handler.codec.dns.DnsRecord;
|
||||||
import io.netty.util.concurrent.ScheduledFuture;
|
|
||||||
import io.netty.util.internal.PlatformDependent;
|
import io.netty.util.internal.PlatformDependent;
|
||||||
import io.netty.util.internal.StringUtil;
|
import io.netty.util.internal.StringUtil;
|
||||||
import io.netty.util.internal.UnstableApi;
|
import io.netty.util.internal.UnstableApi;
|
||||||
@ -29,8 +28,11 @@ import java.util.Iterator;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
import java.util.concurrent.Delayed;
|
||||||
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
|
||||||
|
|
||||||
import static io.netty.util.internal.ObjectUtil.checkNotNull;
|
import static io.netty.util.internal.ObjectUtil.checkNotNull;
|
||||||
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
|
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
|
||||||
@ -41,12 +43,55 @@ import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
|
|||||||
*/
|
*/
|
||||||
@UnstableApi
|
@UnstableApi
|
||||||
public class DefaultDnsCache implements DnsCache {
|
public class DefaultDnsCache implements DnsCache {
|
||||||
|
|
||||||
private final ConcurrentMap<String, Entries> resolveCache = PlatformDependent.newConcurrentHashMap();
|
|
||||||
|
|
||||||
// Two years are supported by all our EventLoop implementations and so safe to use as maximum.
|
// Two years are supported by all our EventLoop implementations and so safe to use as maximum.
|
||||||
// See also: https://github.com/netty/netty/commit/b47fb817991b42ec8808c7d26538f3f2464e1fa6
|
// See also: https://github.com/netty/netty/commit/b47fb817991b42ec8808c7d26538f3f2464e1fa6
|
||||||
private static final int MAX_SUPPORTED_TTL_SECS = (int) TimeUnit.DAYS.toSeconds(365 * 2);
|
private static final int MAX_SUPPORTED_TTL_SECS = (int) TimeUnit.DAYS.toSeconds(365 * 2);
|
||||||
|
|
||||||
|
private static final ScheduledFuture<?> CANCELLED = new ScheduledFuture<Object>() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean cancel(boolean mayInterruptIfRunning) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getDelay(TimeUnit unit) {
|
||||||
|
// We ignore unit and always return the minimum value to ensure the TTL of the cancelled marker is
|
||||||
|
// the smallest.
|
||||||
|
return Long.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int compareTo(Delayed o) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCancelled() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isDone() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object get() {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object get(long timeout, TimeUnit unit) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private static final AtomicReferenceFieldUpdater<DefaultDnsCache.Entries, ScheduledFuture> FUTURE_UPDATER =
|
||||||
|
AtomicReferenceFieldUpdater.newUpdater(
|
||||||
|
DefaultDnsCache.Entries.class, ScheduledFuture.class, "expirationFuture");
|
||||||
|
|
||||||
|
private final ConcurrentMap<String, Entries> resolveCache = PlatformDependent.newConcurrentHashMap();
|
||||||
private final int minTtl;
|
private final int minTtl;
|
||||||
private final int maxTtl;
|
private final int maxTtl;
|
||||||
private final int negativeTtl;
|
private final int negativeTtl;
|
||||||
@ -146,7 +191,7 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
cache0(appendDot(hostname), e,
|
cache0(appendDot(hostname), e,
|
||||||
Math.max(minTtl, Math.min(MAX_SUPPORTED_TTL_SECS, (int) Math.min(maxTtl, originalTtl))), loop);
|
Math.max(minTtl, (int) Math.min(maxTtl, originalTtl)), loop);
|
||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,45 +206,20 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
|
|
||||||
cache0(appendDot(hostname), e, Math.min(MAX_SUPPORTED_TTL_SECS, negativeTtl), loop);
|
cache0(appendDot(hostname), e, negativeTtl, loop);
|
||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void cache0(String hostname, DefaultDnsCacheEntry e, int ttl, EventLoop loop) {
|
private void cache0(String hostname, DefaultDnsCacheEntry e, int ttl, EventLoop loop) {
|
||||||
Entries entries = resolveCache.get(hostname);
|
Entries entries = resolveCache.get(hostname);
|
||||||
if (entries == null) {
|
if (entries == null) {
|
||||||
entries = new Entries(e);
|
entries = new Entries(hostname);
|
||||||
Entries oldEntries = resolveCache.putIfAbsent(hostname, entries);
|
Entries oldEntries = resolveCache.putIfAbsent(hostname, entries);
|
||||||
if (oldEntries != null) {
|
if (oldEntries != null) {
|
||||||
entries = oldEntries;
|
entries = oldEntries;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
entries.add(e);
|
entries.add(e, ttl, loop);
|
||||||
|
|
||||||
scheduleCacheExpiration(hostname, e, ttl, loop);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void scheduleCacheExpiration(final String hostname, final DefaultDnsCacheEntry e,
|
|
||||||
int ttl,
|
|
||||||
EventLoop loop) {
|
|
||||||
e.scheduleExpiration(loop, new Runnable() {
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
// We always remove all entries for a hostname once one entry expire. This is not the
|
|
||||||
// most efficient to do but this way we can guarantee that if a DnsResolver
|
|
||||||
// be configured to prefer one ip family over the other we will not return unexpected
|
|
||||||
// results to the enduser if one of the A or AAAA records has different TTL settings.
|
|
||||||
//
|
|
||||||
// As a TTL is just a hint of the maximum time a cache is allowed to cache stuff it's
|
|
||||||
// completely fine to remove the entry even if the TTL is not reached yet.
|
|
||||||
//
|
|
||||||
// See https://github.com/netty/netty/issues/7329
|
|
||||||
Entries entries = resolveCache.remove(hostname);
|
|
||||||
if (entries != null) {
|
|
||||||
entries.clearAndCancel();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}, ttl, TimeUnit.SECONDS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -217,7 +237,6 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
private final String hostname;
|
private final String hostname;
|
||||||
private final InetAddress address;
|
private final InetAddress address;
|
||||||
private final Throwable cause;
|
private final Throwable cause;
|
||||||
private volatile ScheduledFuture<?> expirationFuture;
|
|
||||||
|
|
||||||
DefaultDnsCacheEntry(String hostname, InetAddress address) {
|
DefaultDnsCacheEntry(String hostname, InetAddress address) {
|
||||||
this.hostname = checkNotNull(hostname, "hostname");
|
this.hostname = checkNotNull(hostname, "hostname");
|
||||||
@ -241,16 +260,8 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
return cause;
|
return cause;
|
||||||
}
|
}
|
||||||
|
|
||||||
void scheduleExpiration(EventLoop loop, Runnable task, long delay, TimeUnit unit) {
|
String hostname() {
|
||||||
assert expirationFuture == null : "expiration task scheduled already";
|
return hostname;
|
||||||
expirationFuture = loop.schedule(task, delay, unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
void cancelExpiration() {
|
|
||||||
ScheduledFuture<?> expirationFuture = this.expirationFuture;
|
|
||||||
if (expirationFuture != null) {
|
|
||||||
expirationFuture.cancel(false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -264,13 +275,18 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Directly extend AtomicReference for intrinsics and also to keep memory overhead low.
|
// Directly extend AtomicReference for intrinsics and also to keep memory overhead low.
|
||||||
private static final class Entries extends AtomicReference<List<DefaultDnsCacheEntry>> {
|
private final class Entries extends AtomicReference<List<DefaultDnsCacheEntry>> implements Runnable {
|
||||||
|
|
||||||
Entries(DefaultDnsCacheEntry entry) {
|
private final String hostname;
|
||||||
super(Collections.singletonList(entry));
|
// Needs to be package-private to be able to access it via the AtomicReferenceFieldUpdater
|
||||||
|
volatile ScheduledFuture<?> expirationFuture;
|
||||||
|
|
||||||
|
Entries(String hostname) {
|
||||||
|
super(Collections.<DefaultDnsCacheEntry>emptyList());
|
||||||
|
this.hostname = hostname;
|
||||||
}
|
}
|
||||||
|
|
||||||
void add(DefaultDnsCacheEntry e) {
|
void add(DefaultDnsCacheEntry e, int ttl, EventLoop loop) {
|
||||||
if (e.cause() == null) {
|
if (e.cause() == null) {
|
||||||
for (;;) {
|
for (;;) {
|
||||||
List<DefaultDnsCacheEntry> entries = get();
|
List<DefaultDnsCacheEntry> entries = get();
|
||||||
@ -278,8 +294,9 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
final DefaultDnsCacheEntry firstEntry = entries.get(0);
|
final DefaultDnsCacheEntry firstEntry = entries.get(0);
|
||||||
if (firstEntry.cause() != null) {
|
if (firstEntry.cause() != null) {
|
||||||
assert entries.size() == 1;
|
assert entries.size() == 1;
|
||||||
|
|
||||||
if (compareAndSet(entries, Collections.singletonList(e))) {
|
if (compareAndSet(entries, Collections.singletonList(e))) {
|
||||||
firstEntry.cancelExpiration();
|
scheduleCacheExpirationIfNeeded(ttl, loop);
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
// Need to try again as CAS failed
|
// Need to try again as CAS failed
|
||||||
@ -289,33 +306,58 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
|
|
||||||
// Create a new List for COW semantics
|
// Create a new List for COW semantics
|
||||||
List<DefaultDnsCacheEntry> newEntries = new ArrayList<DefaultDnsCacheEntry>(entries.size() + 1);
|
List<DefaultDnsCacheEntry> newEntries = new ArrayList<DefaultDnsCacheEntry>(entries.size() + 1);
|
||||||
DefaultDnsCacheEntry replacedEntry = null;
|
int i = 0;
|
||||||
for (int i = 0; i < entries.size(); i++) {
|
do {
|
||||||
DefaultDnsCacheEntry entry = entries.get(i);
|
DefaultDnsCacheEntry entry = entries.get(i);
|
||||||
// Only add old entry if the address is not the same as the one we try to add as well.
|
// Only add old entry if the address is not the same as the one we try to add as well.
|
||||||
// In this case we will skip it and just add the new entry as this may have
|
// In this case we will skip it and just add the new entry as this may have
|
||||||
// more up-to-date data and cancel the old after we were able to update the cache.
|
// more up-to-date data and cancel the old after we were able to update the cache.
|
||||||
if (!e.address().equals(entry.address())) {
|
if (!e.address().equals(entry.address())) {
|
||||||
newEntries.add(entry);
|
newEntries.add(entry);
|
||||||
} else {
|
|
||||||
assert replacedEntry == null;
|
|
||||||
replacedEntry = entry;
|
|
||||||
}
|
}
|
||||||
}
|
} while (++i < entries.size());
|
||||||
newEntries.add(e);
|
newEntries.add(e);
|
||||||
if (compareAndSet(entries, newEntries)) {
|
if (compareAndSet(entries, Collections.unmodifiableList(newEntries))) {
|
||||||
if (replacedEntry != null) {
|
scheduleCacheExpirationIfNeeded(ttl, loop);
|
||||||
replacedEntry.cancelExpiration();
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else if (compareAndSet(entries, Collections.singletonList(e))) {
|
} else if (compareAndSet(entries, Collections.singletonList(e))) {
|
||||||
|
scheduleCacheExpirationIfNeeded(ttl, loop);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
List<DefaultDnsCacheEntry> entries = getAndSet(Collections.singletonList(e));
|
set(Collections.singletonList(e));
|
||||||
cancelExpiration(entries);
|
scheduleCacheExpirationIfNeeded(ttl, loop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void scheduleCacheExpirationIfNeeded(int ttl, EventLoop loop) {
|
||||||
|
for (;;) {
|
||||||
|
// We currently don't calculate a new TTL when we need to retry the CAS as we don't expect this to
|
||||||
|
// be invoked very concurrently and also we use SECONDS anyway. If this ever becomes a problem
|
||||||
|
// we can reconsider.
|
||||||
|
ScheduledFuture<?> oldFuture = FUTURE_UPDATER.get(this);
|
||||||
|
if (oldFuture == null || oldFuture.getDelay(TimeUnit.SECONDS) > ttl) {
|
||||||
|
ScheduledFuture<?> newFuture = loop.schedule(this, ttl, TimeUnit.SECONDS);
|
||||||
|
// It is possible that
|
||||||
|
// 1. task will fire in between this line, or
|
||||||
|
// 2. multiple timers may be set if there is concurrency
|
||||||
|
// (1) Shouldn't be a problem because we will fail the CAS and then the next loop will see CANCELLED
|
||||||
|
// so the ttl will not be less, and we will bail out of the loop.
|
||||||
|
// (2) This is a trade-off to avoid concurrency resulting in contention on a synchronized block.
|
||||||
|
if (FUTURE_UPDATER.compareAndSet(this, oldFuture, newFuture)) {
|
||||||
|
if (oldFuture != null) {
|
||||||
|
oldFuture.cancel(true);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
// There was something else scheduled in the meantime... Cancel and try again.
|
||||||
|
newFuture.cancel(true);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,15 +367,28 @@ public class DefaultDnsCache implements DnsCache {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
cancelExpiration(entries);
|
ScheduledFuture<?> expirationFuture = FUTURE_UPDATER.getAndSet(this, CANCELLED);
|
||||||
|
if (expirationFuture != null) {
|
||||||
|
expirationFuture.cancel(false);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void cancelExpiration(List<DefaultDnsCacheEntry> entryList) {
|
@Override
|
||||||
final int numEntries = entryList.size();
|
public void run() {
|
||||||
for (int i = 0; i < numEntries; i++) {
|
// We always remove all entries for a hostname once one entry expire. This is not the
|
||||||
entryList.get(i).cancelExpiration();
|
// most efficient to do but this way we can guarantee that if a DnsResolver
|
||||||
}
|
// be configured to prefer one ip family over the other we will not return unexpected
|
||||||
|
// results to the enduser if one of the A or AAAA records has different TTL settings.
|
||||||
|
//
|
||||||
|
// As a TTL is just a hint of the maximum time a cache is allowed to cache stuff it's
|
||||||
|
// completely fine to remove the entry even if the TTL is not reached yet.
|
||||||
|
//
|
||||||
|
// See https://github.com/netty/netty/issues/7329
|
||||||
|
resolveCache.remove(hostname, this);
|
||||||
|
|
||||||
|
clearAndCancel();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user