2016-01-07 17:49:15 +01:00
|
|
|
/*
|
|
|
|
* Copyright 2016 The Netty Project
|
|
|
|
*
|
|
|
|
* The Netty Project licenses this file to you under the Apache License,
|
|
|
|
* version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at:
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*/
|
|
|
|
package io.netty.resolver.dns;
|
|
|
|
|
|
|
|
import io.netty.channel.EventLoop;
|
2016-07-29 10:30:08 +02:00
|
|
|
import io.netty.handler.codec.dns.DnsRecord;
|
2017-08-19 04:16:51 +02:00
|
|
|
import io.netty.util.concurrent.ScheduledFuture;
|
2016-01-07 17:49:15 +01:00
|
|
|
import io.netty.util.internal.PlatformDependent;
|
2016-04-12 14:22:41 +02:00
|
|
|
import io.netty.util.internal.UnstableApi;
|
2016-01-07 17:49:15 +01:00
|
|
|
|
|
|
|
import java.net.InetAddress;
|
|
|
|
import java.util.ArrayList;
|
2018-01-16 12:35:34 +01:00
|
|
|
import java.util.Collections;
|
2016-01-07 17:49:15 +01:00
|
|
|
import java.util.Iterator;
|
|
|
|
import java.util.List;
|
|
|
|
import java.util.Map;
|
|
|
|
import java.util.concurrent.ConcurrentMap;
|
|
|
|
import java.util.concurrent.TimeUnit;
|
2018-01-16 12:35:34 +01:00
|
|
|
import java.util.concurrent.atomic.AtomicReference;
|
2016-01-07 17:49:15 +01:00
|
|
|
|
|
|
|
import static io.netty.util.internal.ObjectUtil.checkNotNull;
|
|
|
|
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Default implementation of {@link DnsCache}, backed by a {@link ConcurrentMap}.
|
2016-07-29 10:30:08 +02:00
|
|
|
* If any additional {@link DnsRecord} is used, no caching takes place.
|
2016-01-07 17:49:15 +01:00
|
|
|
*/
|
2016-04-12 14:22:41 +02:00
|
|
|
@UnstableApi
|
2016-01-07 17:49:15 +01:00
|
|
|
public class DefaultDnsCache implements DnsCache {
|
|
|
|
|
2018-01-16 12:35:34 +01:00
|
|
|
private final ConcurrentMap<String, Entries> resolveCache = PlatformDependent.newConcurrentHashMap();
|
|
|
|
|
2018-05-04 13:38:08 +02:00
|
|
|
// Two years are supported by all our EventLoop implementations and so safe to use as maximum.
|
|
|
|
// See also: https://github.com/netty/netty/commit/b47fb817991b42ec8808c7d26538f3f2464e1fa6
|
|
|
|
private static final int MAX_SUPPORTED_TTL_SECS = (int) TimeUnit.DAYS.toSeconds(365 * 2);
|
2016-01-07 17:49:15 +01:00
|
|
|
private final int minTtl;
|
|
|
|
private final int maxTtl;
|
|
|
|
private final int negativeTtl;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a cache that respects the TTL returned by the DNS server
|
|
|
|
* and doesn't cache negative responses.
|
|
|
|
*/
|
|
|
|
public DefaultDnsCache() {
|
2018-05-04 13:38:08 +02:00
|
|
|
this(0, MAX_SUPPORTED_TTL_SECS, 0);
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a cache.
|
|
|
|
* @param minTtl the minimum TTL
|
|
|
|
* @param maxTtl the maximum TTL
|
|
|
|
* @param negativeTtl the TTL for failed queries
|
|
|
|
*/
|
|
|
|
public DefaultDnsCache(int minTtl, int maxTtl, int negativeTtl) {
|
2018-05-09 08:57:20 +02:00
|
|
|
this.minTtl = Math.min(MAX_SUPPORTED_TTL_SECS, checkPositiveOrZero(minTtl, "minTtl"));
|
|
|
|
this.maxTtl = Math.min(MAX_SUPPORTED_TTL_SECS, checkPositiveOrZero(maxTtl, "maxTtl"));
|
2016-01-07 17:49:15 +01:00
|
|
|
if (minTtl > maxTtl) {
|
|
|
|
throw new IllegalArgumentException(
|
|
|
|
"minTtl: " + minTtl + ", maxTtl: " + maxTtl + " (expected: 0 <= minTtl <= maxTtl)");
|
|
|
|
}
|
|
|
|
this.negativeTtl = checkPositiveOrZero(negativeTtl, "negativeTtl");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the minimum TTL of the cached DNS resource records (in seconds).
|
|
|
|
*
|
|
|
|
* @see #maxTtl()
|
|
|
|
*/
|
|
|
|
public int minTtl() {
|
|
|
|
return minTtl;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the maximum TTL of the cached DNS resource records (in seconds).
|
|
|
|
*
|
|
|
|
* @see #minTtl()
|
|
|
|
*/
|
|
|
|
public int maxTtl() {
|
|
|
|
return maxTtl;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the TTL of the cache for the failed DNS queries (in seconds). The default value is {@code 0}, which
|
|
|
|
* disables the cache for negative results.
|
|
|
|
*/
|
|
|
|
public int negativeTtl() {
|
|
|
|
return negativeTtl;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void clear() {
|
2018-01-16 12:35:34 +01:00
|
|
|
while (!resolveCache.isEmpty()) {
|
|
|
|
for (Iterator<Map.Entry<String, Entries>> i = resolveCache.entrySet().iterator(); i.hasNext();) {
|
|
|
|
Map.Entry<String, Entries> e = i.next();
|
|
|
|
i.remove();
|
|
|
|
|
2018-01-30 15:55:07 +01:00
|
|
|
e.getValue().clearAndCancel();
|
2018-01-16 12:35:34 +01:00
|
|
|
}
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean clear(String hostname) {
|
|
|
|
checkNotNull(hostname, "hostname");
|
2018-01-16 12:35:34 +01:00
|
|
|
Entries entries = resolveCache.remove(hostname);
|
2018-01-30 15:55:07 +01:00
|
|
|
return entries != null && entries.clearAndCancel();
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
|
2016-07-29 10:30:08 +02:00
|
|
|
private static boolean emptyAdditionals(DnsRecord[] additionals) {
|
|
|
|
return additionals == null || additionals.length == 0;
|
|
|
|
}
|
|
|
|
|
2016-01-07 17:49:15 +01:00
|
|
|
@Override
|
2017-08-19 04:16:51 +02:00
|
|
|
public List<? extends DnsCacheEntry> get(String hostname, DnsRecord[] additionals) {
|
2016-01-07 17:49:15 +01:00
|
|
|
checkNotNull(hostname, "hostname");
|
2016-07-29 10:30:08 +02:00
|
|
|
if (!emptyAdditionals(additionals)) {
|
2018-01-16 12:35:34 +01:00
|
|
|
return Collections.<DnsCacheEntry>emptyList();
|
2016-07-29 10:30:08 +02:00
|
|
|
}
|
2016-01-07 17:49:15 +01:00
|
|
|
|
2018-01-16 12:35:34 +01:00
|
|
|
Entries entries = resolveCache.get(hostname);
|
|
|
|
return entries == null ? null : entries.get();
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2017-08-19 04:16:51 +02:00
|
|
|
public DnsCacheEntry cache(String hostname, DnsRecord[] additionals,
|
|
|
|
InetAddress address, long originalTtl, EventLoop loop) {
|
2016-01-07 17:49:15 +01:00
|
|
|
checkNotNull(hostname, "hostname");
|
|
|
|
checkNotNull(address, "address");
|
|
|
|
checkNotNull(loop, "loop");
|
2017-08-19 04:16:51 +02:00
|
|
|
final DefaultDnsCacheEntry e = new DefaultDnsCacheEntry(hostname, address);
|
2016-07-29 10:30:08 +02:00
|
|
|
if (maxTtl == 0 || !emptyAdditionals(additionals)) {
|
2017-08-19 04:16:51 +02:00
|
|
|
return e;
|
2016-07-29 10:30:08 +02:00
|
|
|
}
|
2018-05-04 13:38:08 +02:00
|
|
|
cache0(e, Math.max(minTtl, Math.min(MAX_SUPPORTED_TTL_SECS, (int) Math.min(maxTtl, originalTtl))), loop);
|
2017-08-19 04:16:51 +02:00
|
|
|
return e;
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2017-08-19 04:16:51 +02:00
|
|
|
public DnsCacheEntry cache(String hostname, DnsRecord[] additionals, Throwable cause, EventLoop loop) {
|
2016-01-07 17:49:15 +01:00
|
|
|
checkNotNull(hostname, "hostname");
|
|
|
|
checkNotNull(cause, "cause");
|
|
|
|
checkNotNull(loop, "loop");
|
|
|
|
|
2017-08-19 04:16:51 +02:00
|
|
|
final DefaultDnsCacheEntry e = new DefaultDnsCacheEntry(hostname, cause);
|
2016-07-29 10:30:08 +02:00
|
|
|
if (negativeTtl == 0 || !emptyAdditionals(additionals)) {
|
2017-08-19 04:16:51 +02:00
|
|
|
return e;
|
2016-07-29 10:30:08 +02:00
|
|
|
}
|
2016-01-07 17:49:15 +01:00
|
|
|
|
2018-05-04 13:38:08 +02:00
|
|
|
cache0(e, Math.min(MAX_SUPPORTED_TTL_SECS, negativeTtl), loop);
|
2017-08-19 04:16:51 +02:00
|
|
|
return e;
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
|
2018-01-16 12:35:34 +01:00
|
|
|
private void cache0(DefaultDnsCacheEntry e, int ttl, EventLoop loop) {
|
|
|
|
Entries entries = resolveCache.get(e.hostname());
|
|
|
|
if (entries == null) {
|
|
|
|
entries = new Entries(e);
|
|
|
|
Entries oldEntries = resolveCache.putIfAbsent(e.hostname(), entries);
|
|
|
|
if (oldEntries != null) {
|
|
|
|
entries = oldEntries;
|
|
|
|
}
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
2018-05-04 14:19:26 +02:00
|
|
|
entries.add(e);
|
2018-01-16 12:35:34 +01:00
|
|
|
|
2018-01-30 15:55:07 +01:00
|
|
|
scheduleCacheExpiration(e, ttl, loop);
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
|
2018-01-30 15:55:07 +01:00
|
|
|
private void scheduleCacheExpiration(final DefaultDnsCacheEntry e,
|
2016-01-07 17:49:15 +01:00
|
|
|
int ttl,
|
|
|
|
EventLoop loop) {
|
2016-03-27 14:25:39 +02:00
|
|
|
e.scheduleExpiration(loop, new Runnable() {
|
2016-01-07 17:49:15 +01:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2018-01-30 15:55:07 +01:00
|
|
|
// We always remove all entries for a hostname once one entry expire. This is not the
|
|
|
|
// most efficient to do but this way we can guarantee that if a DnsResolver
|
|
|
|
// be configured to prefer one ip family over the other we will not return unexpected
|
|
|
|
// results to the enduser if one of the A or AAAA records has different TTL settings.
|
|
|
|
//
|
|
|
|
// As a TTL is just a hint of the maximum time a cache is allowed to cache stuff it's
|
|
|
|
// completely fine to remove the entry even if the TTL is not reached yet.
|
|
|
|
//
|
|
|
|
// See https://github.com/netty/netty/issues/7329
|
|
|
|
Entries entries = resolveCache.remove(e.hostname);
|
|
|
|
if (entries != null) {
|
|
|
|
entries.clearAndCancel();
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}, ttl, TimeUnit.SECONDS);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String toString() {
|
|
|
|
return new StringBuilder()
|
|
|
|
.append("DefaultDnsCache(minTtl=")
|
|
|
|
.append(minTtl).append(", maxTtl=")
|
|
|
|
.append(maxTtl).append(", negativeTtl=")
|
|
|
|
.append(negativeTtl).append(", cached resolved hostname=")
|
|
|
|
.append(resolveCache.size()).append(")")
|
|
|
|
.toString();
|
|
|
|
}
|
2017-08-19 04:16:51 +02:00
|
|
|
|
|
|
|
private static final class DefaultDnsCacheEntry implements DnsCacheEntry {
|
|
|
|
private final String hostname;
|
|
|
|
private final InetAddress address;
|
|
|
|
private final Throwable cause;
|
|
|
|
private volatile ScheduledFuture<?> expirationFuture;
|
|
|
|
|
|
|
|
DefaultDnsCacheEntry(String hostname, InetAddress address) {
|
|
|
|
this.hostname = checkNotNull(hostname, "hostname");
|
|
|
|
this.address = checkNotNull(address, "address");
|
|
|
|
cause = null;
|
|
|
|
}
|
|
|
|
|
|
|
|
DefaultDnsCacheEntry(String hostname, Throwable cause) {
|
|
|
|
this.hostname = checkNotNull(hostname, "hostname");
|
|
|
|
this.cause = checkNotNull(cause, "cause");
|
|
|
|
address = null;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public InetAddress address() {
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public Throwable cause() {
|
|
|
|
return cause;
|
|
|
|
}
|
|
|
|
|
|
|
|
String hostname() {
|
|
|
|
return hostname;
|
|
|
|
}
|
|
|
|
|
|
|
|
void scheduleExpiration(EventLoop loop, Runnable task, long delay, TimeUnit unit) {
|
|
|
|
assert expirationFuture == null : "expiration task scheduled already";
|
|
|
|
expirationFuture = loop.schedule(task, delay, unit);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cancelExpiration() {
|
|
|
|
ScheduledFuture<?> expirationFuture = this.expirationFuture;
|
|
|
|
if (expirationFuture != null) {
|
|
|
|
expirationFuture.cancel(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public String toString() {
|
|
|
|
if (cause != null) {
|
|
|
|
return hostname + '/' + cause;
|
|
|
|
} else {
|
|
|
|
return address.toString();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-01-16 12:35:34 +01:00
|
|
|
|
|
|
|
// Directly extend AtomicReference for intrinsics and also to keep memory overhead low.
|
|
|
|
private static final class Entries extends AtomicReference<List<DefaultDnsCacheEntry>> {
|
|
|
|
|
|
|
|
Entries(DefaultDnsCacheEntry entry) {
|
|
|
|
super(Collections.singletonList(entry));
|
|
|
|
}
|
|
|
|
|
|
|
|
void add(DefaultDnsCacheEntry e) {
|
|
|
|
if (e.cause() == null) {
|
|
|
|
for (;;) {
|
|
|
|
List<DefaultDnsCacheEntry> entries = get();
|
|
|
|
if (!entries.isEmpty()) {
|
|
|
|
final DefaultDnsCacheEntry firstEntry = entries.get(0);
|
|
|
|
if (firstEntry.cause() != null) {
|
|
|
|
assert entries.size() == 1;
|
|
|
|
if (compareAndSet(entries, Collections.singletonList(e))) {
|
|
|
|
firstEntry.cancelExpiration();
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
// Need to try again as CAS failed
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2018-05-04 14:19:26 +02:00
|
|
|
|
2018-01-16 12:35:34 +01:00
|
|
|
// Create a new List for COW semantics
|
|
|
|
List<DefaultDnsCacheEntry> newEntries = new ArrayList<DefaultDnsCacheEntry>(entries.size() + 1);
|
2018-05-04 14:19:26 +02:00
|
|
|
DefaultDnsCacheEntry replacedEntry = null;
|
|
|
|
for (int i = 0; i < entries.size(); i++) {
|
|
|
|
DefaultDnsCacheEntry entry = entries.get(i);
|
|
|
|
// Only add old entry if the address is not the same as the one we try to add as well.
|
|
|
|
// In this case we will skip it and just add the new entry as this may have
|
|
|
|
// more up-to-date data and cancel the old after we were able to update the cache.
|
|
|
|
if (!e.address().equals(entry.address())) {
|
|
|
|
newEntries.add(entry);
|
|
|
|
} else {
|
|
|
|
assert replacedEntry == null;
|
|
|
|
replacedEntry = entry;
|
|
|
|
}
|
|
|
|
}
|
2018-01-16 12:35:34 +01:00
|
|
|
newEntries.add(e);
|
|
|
|
if (compareAndSet(entries, newEntries)) {
|
2018-05-04 14:19:26 +02:00
|
|
|
if (replacedEntry != null) {
|
|
|
|
replacedEntry.cancelExpiration();
|
|
|
|
}
|
2018-01-16 12:35:34 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else if (compareAndSet(entries, Collections.singletonList(e))) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
List<DefaultDnsCacheEntry> entries = getAndSet(Collections.singletonList(e));
|
|
|
|
cancelExpiration(entries);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-30 15:55:07 +01:00
|
|
|
boolean clearAndCancel() {
|
2018-01-16 12:35:34 +01:00
|
|
|
List<DefaultDnsCacheEntry> entries = getAndSet(Collections.<DefaultDnsCacheEntry>emptyList());
|
|
|
|
if (entries.isEmpty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cancelExpiration(entries);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
private static void cancelExpiration(List<DefaultDnsCacheEntry> entryList) {
|
|
|
|
final int numEntries = entryList.size();
|
|
|
|
for (int i = 0; i < numEntries; i++) {
|
|
|
|
entryList.get(i).cancelExpiration();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-01-07 17:49:15 +01:00
|
|
|
}
|