diff --git a/common/src/main/java/io/netty/util/internal/chmv8/ConcurrentHashMapV8.java b/common/src/main/java/io/netty/util/internal/chmv8/ConcurrentHashMapV8.java
index c1f309b55c..6cb2a7f860 100644
--- a/common/src/main/java/io/netty/util/internal/chmv8/ConcurrentHashMapV8.java
+++ b/common/src/main/java/io/netty/util/internal/chmv8/ConcurrentHashMapV8.java
@@ -22,11 +22,16 @@
package io.netty.util.internal.chmv8;
+import java.io.ObjectStreamField;
import java.io.Serializable;
-import java.util.AbstractMap;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
import java.util.Arrays;
import java.util.Collection;
+import java.util.ConcurrentModificationException;
import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Hashtable;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
@@ -34,7 +39,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.AbstractQueuedSynchronizer;
+import java.util.concurrent.locks.LockSupport;
+import java.util.concurrent.locks.ReentrantLock;
/**
* A hash table supporting full concurrency of retrievals and
@@ -60,7 +66,7 @@ import java.util.concurrent.locks.AbstractQueuedSynchronizer;
* Iterators and Enumerations return elements reflecting the state of
* the hash table at some point at or since the creation of the
* iterator/enumeration. They do not throw {@link
- * java.util.ConcurrentModificationException}. However, iterators are designed
+ * ConcurrentModificationException}. However, iterators are designed
* to be used by only one thread at a time. Bear in mind that the
* results of aggregate status methods including {@code size}, {@code
* isEmpty}, and {@code containsValue} are typically useful only when
@@ -88,45 +94,37 @@ import java.util.concurrent.locks.AbstractQueuedSynchronizer;
* expected {@code concurrencyLevel} as an additional hint for
* internal sizing. Note that using many keys with exactly the same
* {@code hashCode()} is a sure way to slow down performance of any
- * hash table.
+ * hash table. To ameliorate impact, when keys are {@link Comparable},
+ * this class may use comparison order among keys to help break ties.
*
- *
A {@link java.util.Set} projection of a ConcurrentHashMapV8 may be created
+ *
A {@link Set} projection of a ConcurrentHashMapV8 may be created
* (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
* (using {@link #keySet(Object)} when only keys are of interest, and the
* mapped values are (perhaps transiently) not used or all take the
* same mapping value.
*
- *
A ConcurrentHashMapV8 can be used as scalable frequency map (a
- * form of histogram or multiset) by using {@link LongAdder} values
- * and initializing via {@link #computeIfAbsent}. For example, to add
- * a count to a {@code ConcurrentHashMapV8 freqs}, you
- * can use {@code freqs.computeIfAbsent(k -> new
- * LongAdder()).increment();}
- *
* This class and its views and iterators implement all of the
- * optional methods of the {@link java.util.Map} and {@link java.util.Iterator}
+ * optional methods of the {@link Map} and {@link Iterator}
* interfaces.
*
- *
Like {@link java.util.Hashtable} but unlike {@link java.util.HashMap}, this class
+ *
Like {@link Hashtable} but unlike {@link HashMap}, this class
* does not allow {@code null} to be used as a key or value.
*
- *
ConcurrentHashMapV8s support sequential and parallel operations
- * bulk operations. (Parallel forms use the {@link
- * ForkJoinPool#commonPool()}). Tasks that may be used in other
- * contexts are available in class {@link io.netty.util.internal.chmv8.ConcurrentHashMapV8.ForkJoinTasks}. These
- * operations are designed to be safely, and often sensibly, applied
- * even with maps that are being concurrently updated by other
- * threads; for example, when computing a snapshot summary of the
- * values in a shared registry. There are three kinds of operation,
- * each with four forms, accepting functions with Keys, Values,
- * Entries, and (Key, Value) arguments and/or return values. Because
- * the elements of a ConcurrentHashMapV8 are not ordered in any
- * particular way, and may be processed in different orders in
- * different parallel executions, the correctness of supplied
- * functions should not depend on any ordering, or on any other
- * objects or values that may transiently change while computation is
- * in progress; and except for forEach actions, should ideally be
- * side-effect-free.
+ *
ConcurrentHashMapV8s support a set of sequential and parallel bulk
+ * operations that are designed
+ * to be safely, and often sensibly, applied even with maps that are
+ * being concurrently updated by other threads; for example, when
+ * computing a snapshot summary of the values in a shared registry.
+ * There are three kinds of operation, each with four forms, accepting
+ * functions with Keys, Values, Entries, and (Key, Value) arguments
+ * and/or return values. Because the elements of a ConcurrentHashMapV8
+ * are not ordered in any particular way, and may be processed in
+ * different orders in different parallel executions, the correctness
+ * of supplied functions should not depend on any ordering, or on any
+ * other objects or values that may transiently change while
+ * computation is in progress; and except for forEach actions, should
+ * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
+ * objects do not support method {@code setValue}.
*
*
* - forEach: Perform a given action on each element.
@@ -153,9 +151,20 @@ import java.util.concurrent.locks.AbstractQueuedSynchronizer;
*
- Reductions to scalar doubles, longs, and ints, using a
* given basis value.
*
+ *
*
*
- *
+ *
+ * These bulk operations accept a {@code parallelismThreshold}
+ * argument. Methods proceed sequentially if the current map size is
+ * estimated to be less than the given threshold. Using a value of
+ * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
+ * of {@code 1} results in maximal parallelism by partitioning into
+ * enough subtasks to fully utilize the {@link
+ * ForkJoinPool#commonPool()} that is used for all parallel
+ * computations. Normally, you would initially choose one of these
+ * extreme values, and then measure performance of using in-between
+ * values that trade off overhead versus throughput.
*
*
The concurrency properties of bulk operations follow
* from those of ConcurrentHashMapV8: Any non-null result returned
@@ -223,79 +232,63 @@ import java.util.concurrent.locks.AbstractQueuedSynchronizer;
* @param the type of mapped values
*/
@SuppressWarnings("all")
-public final class ConcurrentHashMapV8
- implements ConcurrentMap, Serializable {
+public class ConcurrentHashMapV8
+ implements ConcurrentMap, Serializable {
private static final long serialVersionUID = 7249069246763182397L;
/**
- * A partitionable iterator. A Spliterator can be traversed
- * directly, but can also be partitioned (before traversal) by
- * creating another Spliterator that covers a non-overlapping
- * portion of the elements, and so may be amenable to parallel
- * execution.
- *
- * This interface exports a subset of expected JDK8
- * functionality.
- *
- *
Sample usage: Here is one (of the several) ways to compute
- * the sum of the values held in a map using the ForkJoin
- * framework. As illustrated here, Spliterators are well suited to
- * designs in which a task repeatedly splits off half its work
- * into forked subtasks until small enough to process directly,
- * and then joins these subtasks. Variants of this style can also
- * be used in completion-based designs.
- *
- *
- * {@code ConcurrentHashMapV8 m = ...
- * // split as if have 8 * parallelism, for load balance
- * int n = m.size();
- * int p = aForkJoinPool.getParallelism() * 8;
- * int split = (n < p)? n : p;
- * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
- * // ...
- * static class SumValues extends RecursiveTask {
- * final Spliterator s;
- * final int split; // split while > 1
- * final SumValues nextJoin; // records forked subtasks to join
- * SumValues(Spliterator s, int depth, SumValues nextJoin) {
- * this.s = s; this.depth = depth; this.nextJoin = nextJoin;
- * }
- * public Long compute() {
- * long sum = 0;
- * SumValues subtasks = null; // fork subtasks
- * for (int s = split >>> 1; s > 0; s >>>= 1)
- * (subtasks = new SumValues(s.split(), s, subtasks)).fork();
- * while (s.hasNext()) // directly process remaining elements
- * sum += s.next();
- * for (SumValues t = subtasks; t != null; t = t.nextJoin)
- * sum += t.join(); // collect subtask results
- * return sum;
- * }
- * }
- * }
+ * An object for traversing and partitioning elements of a source.
+ * This interface provides a subset of the functionality of JDK8
+ * java.util.Spliterator.
*/
- public static interface Spliterator extends Iterator {
+ public static interface ConcurrentHashMapSpliterator {
/**
- * Returns a Spliterator covering approximately half of the
- * elements, guaranteed not to overlap with those subsequently
- * returned by this Spliterator. After invoking this method,
- * the current Spliterator will not produce any of
- * the elements of the returned Spliterator, but the two
- * Spliterators together will produce all of the elements that
- * would have been produced by this Spliterator had this
- * method not been called. The exact number of elements
- * produced by the returned Spliterator is not guaranteed, and
- * may be zero (i.e., with {@code hasNext()} reporting {@code
- * false}) if this Spliterator cannot be further split.
- *
- * @return a Spliterator covering approximately half of the
- * elements
- * @throws IllegalStateException if this Spliterator has
- * already commenced traversing elements
+ * If possible, returns a new spliterator covering
+ * approximately one half of the elements, which will not be
+ * covered by this spliterator. Returns null if cannot be
+ * split.
*/
- Spliterator split();
+ ConcurrentHashMapSpliterator trySplit();
+ /**
+ * Returns an estimate of the number of elements covered by
+ * this Spliterator.
+ */
+ long estimateSize();
+
+ /** Applies the action to each untraversed element */
+ void forEachRemaining(Action super T> action);
+ /** If an element remains, applies the action and returns true. */
+ boolean tryAdvance(Action super T> action);
}
+ // Sams
+ /** Interface describing a void action of one argument */
+ public interface Action { void apply(A a); }
+ /** Interface describing a void action of two arguments */
+ public interface BiAction { void apply(A a, B b); }
+ /** Interface describing a function of one argument */
+ public interface Fun { T apply(A a); }
+ /** Interface describing a function of two arguments */
+ public interface BiFun { T apply(A a, B b); }
+ /** Interface describing a function mapping its argument to a double */
+ public interface ObjectToDouble { double apply(A a); }
+ /** Interface describing a function mapping its argument to a long */
+ public interface ObjectToLong { long apply(A a); }
+ /** Interface describing a function mapping its argument to an int */
+ public interface ObjectToInt {int apply(A a); }
+ /** Interface describing a function mapping two arguments to a double */
+ public interface ObjectByObjectToDouble { double apply(A a, B b); }
+ /** Interface describing a function mapping two arguments to a long */
+ public interface ObjectByObjectToLong { long apply(A a, B b); }
+ /** Interface describing a function mapping two arguments to an int */
+ public interface ObjectByObjectToInt {int apply(A a, B b); }
+ /** Interface describing a function mapping two doubles to a double */
+ public interface DoubleByDoubleToDouble { double apply(double a, double b); }
+ /** Interface describing a function mapping two longs to a long */
+ public interface LongByLongToLong { long apply(long a, long b); }
+ /** Interface describing a function mapping two ints to an int */
+ public interface IntByIntToInt { int apply(int a, int b); }
+
/*
* Overview:
*
@@ -306,16 +299,21 @@ public final class ConcurrentHashMapV8
* the same or better than java.util.HashMap, and to support high
* initial insertion rates on an empty table by many threads.
*
- * Each key-value mapping is held in a Node. Because Node key
- * fields can contain special values, they are defined using plain
- * Object types (not type "K"). This leads to a lot of explicit
- * casting (and many explicit warning suppressions to tell
- * compilers not to complain about it). It also allows some of the
- * public methods to be factored into a smaller number of internal
- * methods (although sadly not so for the five variants of
- * put-related operations). The validation-based approach
- * explained below leads to a lot of code sprawl because
- * retry-control precludes factoring into smaller methods.
+ * This map usually acts as a binned (bucketed) hash table. Each
+ * key-value mapping is held in a Node. Most nodes are instances
+ * of the basic Node class with hash, key, value, and next
+ * fields. However, various subclasses exist: TreeNodes are
+ * arranged in balanced trees, not lists. TreeBins hold the roots
+ * of sets of TreeNodes. ForwardingNodes are placed at the heads
+ * of bins during resizing. ReservationNodes are used as
+ * placeholders while establishing values in computeIfAbsent and
+ * related methods. The types TreeBin, ForwardingNode, and
+ * ReservationNode do not hold normal user keys, values, or
+ * hashes, and are readily distinguishable during search etc
+ * because they have negative hash fields and null key and value
+ * fields. (These special nodes are either uncommon or transient,
+ * so the impact of carrying around some unused fields is
+ * insignificant.)
*
* The table is lazily initialized to a power-of-two size upon the
* first insertion. Each bin in the table normally contains a
@@ -323,17 +321,12 @@ public final class ConcurrentHashMapV8
* Table accesses require volatile/atomic reads, writes, and
* CASes. Because there is no other way to arrange this without
* adding further indirections, we use intrinsics
- * (sun.misc.Unsafe) operations. The lists of nodes within bins
- * are always accurately traversable under volatile reads, so long
- * as lookups check hash code and non-nullness of value before
- * checking key equality.
+ * (sun.misc.Unsafe) operations.
*
* We use the top (sign) bit of Node hash fields for control
* purposes -- it is available anyway because of addressing
- * constraints. Nodes with negative hash fields are forwarding
- * nodes to either TreeBins or resized tables. The lower 31 bits
- * of each normal Node's hash field contain a transformation of
- * the key's hash code.
+ * constraints. Nodes with negative hash fields are specially
+ * handled or ignored in map methods.
*
* Insertion (via put or its variants) of the first node in an
* empty bin is performed by just CASing it to the bin. This is
@@ -350,10 +343,7 @@ public final class ConcurrentHashMapV8
* validate that it is still the first node after locking it, and
* retry if not. Because new nodes are always appended to lists,
* once a node is first in a bin, it remains first until deleted
- * or the bin becomes invalidated (upon resizing). However,
- * operations that only conditionally update may inspect nodes
- * until the point of update. This is a converse of sorts to the
- * lazy locking technique described by Herlihy & Shavit.
+ * or the bin becomes invalidated (upon resizing).
*
* The main disadvantage of per-bin locks is that other update
* operations on other nodes in a bin list protected by the same
@@ -386,15 +376,12 @@ public final class ConcurrentHashMapV8
* sometimes deviate significantly from uniform randomness. This
* includes the case when N > (1<<30), so some keys MUST collide.
* Similarly for dumb or hostile usages in which multiple keys are
- * designed to have identical hash codes. Also, although we guard
- * against the worst effects of this (see method spread), sets of
- * hashes may differ only in bits that do not impact their bin
- * index for a given power-of-two mask. So we use a secondary
- * strategy that applies when the number of nodes in a bin exceeds
- * a threshold, and at least one of the keys implements
- * Comparable. These TreeBins use a balanced tree to hold nodes
- * (a specialized form of red-black trees), bounding search time
- * to O(log N). Each search step in a TreeBin is around twice as
+ * designed to have identical hash codes or ones that differs only
+ * in masked-out high bits. So we use a secondary strategy that
+ * applies when the number of nodes in a bin exceeds a
+ * threshold. These TreeBins use a balanced tree to hold nodes (a
+ * specialized form of red-black trees), bounding search time to
+ * O(log N). Each search step in a TreeBin is at least twice as
* slow as in a regular list, but given that N cannot exceed
* (1<<64) (before running out of addresses) this bounds search
* steps, lock hold times, etc, to reasonable constants (roughly
@@ -467,9 +454,41 @@ public final class ConcurrentHashMapV8
* bin already holding two or more nodes. Under uniform hash
* distributions, the probability of this occurring at threshold
* is around 13%, meaning that only about 1 in 8 puts check
- * threshold (and after resizing, many fewer do so). The bulk
- * putAll operation further reduces contention by only committing
- * count updates upon these size checks.
+ * threshold (and after resizing, many fewer do so).
+ *
+ * TreeBins use a special form of comparison for search and
+ * related operations (which is the main reason we cannot use
+ * existing collections such as TreeMaps). TreeBins contain
+ * Comparable elements, but may contain others, as well as
+ * elements that are Comparable but not necessarily Comparable
+ * for the same T, so we cannot invoke compareTo among them. To
+ * handle this, the tree is ordered primarily by hash value, then
+ * by Comparable.compareTo order if applicable. On lookup at a
+ * node, if elements are not comparable or compare as 0 then both
+ * left and right children may need to be searched in the case of
+ * tied hash values. (This corresponds to the full list search
+ * that would be necessary if all elements were non-Comparable and
+ * had tied hashes.) The red-black balancing code is updated from
+ * pre-jdk-collections
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
+ * Algorithms" (CLR).
+ *
+ * TreeBins also require an additional locking mechanism. While
+ * list traversal is always possible by readers even during
+ * updates, tree traversal is not, mainly because of tree-rotations
+ * that may change the root node and/or its linkages. TreeBins
+ * include a simple read-write lock mechanism parasitic on the
+ * main bin-synchronization strategy: Structural adjustments
+ * associated with an insertion or removal are already bin-locked
+ * (and so cannot conflict with other writers) but must wait for
+ * ongoing readers to finish. Since there can be only one such
+ * waiter, we use a simple scheme using a single "waiter" field to
+ * block writers. However, readers need never block. If the root
+ * lock is held, they proceed along the slow traversal path (via
+ * next-pointers) until the lock becomes available or the list is
+ * exhausted, whichever comes first. These cases are not fast, but
+ * maximize aggregate expected throughput.
*
* Maintaining API and serialization compatibility with previous
* versions of this class introduces several oddities. Mainly: We
@@ -479,6 +498,13 @@ public final class ConcurrentHashMapV8
* time that we can guarantee to honor it.) We also declare an
* unused "Segment" class that is instantiated in minimal form
* only when serializing.
+ *
+ * This file is organized to make things a little easier to follow
+ * while reading than they might otherwise: First the main static
+ * declarations and utilities, then fields, then main public
+ * methods (with a few factorings of multiple public methods into
+ * internal ones), then sizing methods, trees, traversers, and
+ * bulk operations.
*/
/* ---------------- Constants -------------- */
@@ -521,10 +547,28 @@ public final class ConcurrentHashMapV8
/**
* The bin count threshold for using a tree rather than list for a
- * bin. The value reflects the approximate break-even point for
- * using tree-based operations.
+ * bin. Bins are converted to trees when adding an element to a
+ * bin with at least this many nodes. The value must be greater
+ * than 2, and should be at least 8 to mesh with assumptions in
+ * tree removal about conversion back to plain bins upon
+ * shrinkage.
*/
- private static final int TREE_THRESHOLD = 8;
+ static final int TREEIFY_THRESHOLD = 8;
+
+ /**
+ * The bin count threshold for untreeifying a (split) bin during a
+ * resize operation. Should be less than TREEIFY_THRESHOLD, and at
+ * most 6 to mesh with shrinkage detection under removal.
+ */
+ static final int UNTREEIFY_THRESHOLD = 6;
+
+ /**
+ * The smallest table capacity for which bins may be treeified.
+ * (Otherwise the table is resized if too many nodes in a bin.)
+ * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
+ * conflicts between resizing and treeification thresholds.
+ */
+ static final int MIN_TREEIFY_CAPACITY = 64;
/**
* Minimum number of rebinnings per transfer step. Ranges are
@@ -538,50 +582,178 @@ public final class ConcurrentHashMapV8
/*
* Encodings for Node hash fields. See above for explanation.
*/
- static final int MOVED = 0x80000000; // hash field for forwarding nodes
+ static final int MOVED = -1; // hash for forwarding nodes
+ static final int TREEBIN = -2; // hash for roots of trees
+ static final int RESERVED = -3; // hash for transient reservations
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
/** Number of CPUS, to place bounds on some sizings */
static final int NCPU = Runtime.getRuntime().availableProcessors();
- /* ---------------- Counters -------------- */
+ /** For serialization compatibility. */
+ private static final ObjectStreamField[] serialPersistentFields = {
+ new ObjectStreamField("segments", Segment[].class),
+ new ObjectStreamField("segmentMask", Integer.TYPE),
+ new ObjectStreamField("segmentShift", Integer.TYPE)
+ };
- // Adapted from LongAdder and Striped64.
- // See their internal docs for explanation.
+ /* ---------------- Nodes -------------- */
- // A padded cell for distributing counts
- static final class CounterCell {
- volatile long p0, p1, p2, p3, p4, p5, p6;
- volatile long value;
- volatile long q0, q1, q2, q3, q4, q5, q6;
- CounterCell(long x) { value = x; }
+ /**
+ * Key-value entry. This class is never exported out as a
+ * user-mutable Map.Entry (i.e., one supporting setValue; see
+ * MapEntry below), but can be used for read-only traversals used
+ * in bulk tasks. Subclasses of Node with a negative hash field
+ * are special, and contain null keys and values (but are never
+ * exported). Otherwise, keys and vals are never null.
+ */
+ static class Node implements Map.Entry {
+ final int hash;
+ final K key;
+ volatile V val;
+ volatile Node next;
+
+ Node(int hash, K key, V val, Node next) {
+ this.hash = hash;
+ this.key = key;
+ this.val = val;
+ this.next = next;
+ }
+
+ public final K getKey() { return key; }
+ public final V getValue() { return val; }
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public final String toString(){ return key + "=" + val; }
+ public final V setValue(V value) {
+ throw new UnsupportedOperationException();
+ }
+
+ public final boolean equals(Object o) {
+ Object k, v, u; Map.Entry,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ (k == key || k.equals(key)) &&
+ (v == (u = val) || v.equals(u)));
+ }
+
+ /**
+ * Virtualized support for map.get(); overridden in subclasses.
+ */
+ Node find(int h, Object k) {
+ Node e = this;
+ if (k != null) {
+ do {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || (ek != null && k.equals(ek))))
+ return e;
+ } while ((e = e.next) != null);
+ }
+ return null;
+ }
+ }
+
+ /* ---------------- Static utilities -------------- */
+
+ /**
+ * Spreads (XORs) higher bits of hash to lower and also forces top
+ * bit to 0. Because the table uses power-of-two masking, sets of
+ * hashes that vary only in bits above the current mask will
+ * always collide. (Among known examples are sets of Float keys
+ * holding consecutive whole numbers in small tables.) So we
+ * apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reasonably distributed (so don't benefit from
+ * spreading), and because we use trees to handle large sets of
+ * collisions in bins, we just XOR some shifted bits in the
+ * cheapest possible way to reduce systematic lossage, as well as
+ * to incorporate impact of the highest bits that would otherwise
+ * never be used in index calculations because of table bounds.
+ */
+ static final int spread(int h) {
+ return (h ^ (h >>> 16)) & HASH_BITS;
}
/**
- * Holder for the thread-local hash code determining which
- * CounterCell to use. The code is initialized via the
- * counterHashCodeGenerator, but may be moved upon collisions.
+ * Returns a power of two table size for the given desired capacity.
+ * See Hackers Delight, sec 3.2
*/
- static final class CounterHashCode {
- int code;
+ private static final int tableSizeFor(int c) {
+ int n = c - 1;
+ n |= n >>> 1;
+ n |= n >>> 2;
+ n |= n >>> 4;
+ n |= n >>> 8;
+ n |= n >>> 16;
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
/**
- * Generates initial value for per-thread CounterHashCodes
+ * Returns x's Class if it is of the form "class C implements
+ * Comparable", else null.
*/
- static final AtomicInteger counterHashCodeGenerator = new AtomicInteger();
+ static Class> comparableClassFor(Object x) {
+ if (x instanceof Comparable) {
+ Class> c; Type[] ts, as; Type t; ParameterizedType p;
+ if ((c = x.getClass()) == String.class) // bypass checks
+ return c;
+ if ((ts = c.getGenericInterfaces()) != null) {
+ for (int i = 0; i < ts.length; ++i) {
+ if (((t = ts[i]) instanceof ParameterizedType) &&
+ ((p = (ParameterizedType)t).getRawType() ==
+ Comparable.class) &&
+ (as = p.getActualTypeArguments()) != null &&
+ as.length == 1 && as[0] == c) // type arg is c
+ return c;
+ }
+ }
+ }
+ return null;
+ }
/**
- * Increment for counterHashCodeGenerator. See class ThreadLocal
- * for explanation.
+ * Returns k.compareTo(x) if x matches kc (k's screened comparable
+ * class), else 0.
*/
- static final int SEED_INCREMENT = 0x61c88647;
+ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
+ static int compareComparables(Class> kc, Object k, Object x) {
+ return (x == null || x.getClass() != kc ? 0 :
+ ((Comparable)k).compareTo(x));
+ }
- /**
- * Per-thread counter hash codes. Shared across all instances.
+ /* ---------------- Table element access -------------- */
+
+ /*
+ * Volatile access methods are used for table elements as well as
+ * elements of in-progress next table while resizing. All uses of
+ * the tab arguments must be null checked by callers. All callers
+ * also paranoically precheck that tab's length is not zero (or an
+ * equivalent check), thus ensuring that any index argument taking
+ * the form of a hash value anded with (length - 1) is a valid
+ * index. Note that, to be correct wrt arbitrary concurrency
+ * errors by users, these checks must operate on local variables,
+ * which accounts for some odd-looking inline assignments below.
+ * Note that calls to setTabAt always occur within locked regions,
+ * and so in principle require only release ordering, not need
+ * full volatile semantics, but are currently coded as volatile
+ * writes to be conservative.
*/
- static final ThreadLocal threadCounterHashCode =
- new ThreadLocal();
+
+ @SuppressWarnings("unchecked")
+ static final Node tabAt(Node[] tab, int i) {
+ return (Node)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
+ }
+
+ static final boolean casTabAt(Node[] tab, int i,
+ Node c, Node v) {
+ return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
+ }
+
+ static final void setTabAt(Node[] tab, int i, Node v) {
+ U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
+ }
/* ---------------- Fields -------------- */
@@ -589,12 +761,12 @@ public final class ConcurrentHashMapV8
* The array of bins. Lazily initialized upon first insertion.
* Size is always a power of two. Accessed directly by iterators.
*/
- transient volatile Node[] table;
+ transient volatile Node[] table;
/**
* The next table to use; non-null only while resizing.
*/
- private transient volatile Node[] nextTable;
+ private transient volatile Node[] nextTable;
/**
* Base counter value, used mainly when there is no contention,
@@ -624,9 +796,9 @@ public final class ConcurrentHashMapV8
private transient volatile int transferOrigin;
/**
- * Spinlock (locked via CAS) used when resizing and/or creating Cells.
+ * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
*/
- private transient volatile int counterBusy;
+ private transient volatile int cellsBusy;
/**
* Table of counter cells. When non-null, size is a power of 2.
@@ -638,1775 +810,6 @@ public final class ConcurrentHashMapV8
private transient ValuesView values;
private transient EntrySetView entrySet;
- /** For serialization compatibility. Null unless serialized; see below */
- private Segment[] segments;
-
- /* ---------------- Table element access -------------- */
-
- /*
- * Volatile access methods are used for table elements as well as
- * elements of in-progress next table while resizing. Uses are
- * null checked by callers, and implicitly bounds-checked, relying
- * on the invariants that tab arrays have non-zero size, and all
- * indices are masked with (tab.length - 1) which is never
- * negative and always less than length. Note that, to be correct
- * wrt arbitrary concurrency errors by users, bounds checks must
- * operate on local variables, which accounts for some odd-looking
- * inline assignments below.
- */
-
- @SuppressWarnings("unchecked") static final Node tabAt
- (Node[] tab, int i) { // used by Traverser
- return (Node)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
- }
-
- private static final boolean casTabAt
- (Node[] tab, int i, Node c, Node v) {
- return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
- }
-
- private static final void setTabAt
- (Node[] tab, int i, Node v) {
- U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
- }
-
- /* ---------------- Nodes -------------- */
-
- /**
- * Key-value entry. Note that this is never exported out as a
- * user-visible Map.Entry (see MapEntry below). Nodes with a hash
- * field of MOVED are special, and do not contain user keys or
- * values. Otherwise, keys are never null, and null val fields
- * indicate that a node is in the process of being deleted or
- * created. For purposes of read-only access, a key may be read
- * before a val, but can only be used after checking val to be
- * non-null.
- */
- static class Node {
- final int hash;
- final Object key;
- volatile V val;
- volatile Node next;
-
- Node(int hash, Object key, V val, Node next) {
- this.hash = hash;
- this.key = key;
- this.val = val;
- this.next = next;
- }
- }
-
- /* ---------------- TreeBins -------------- */
-
- /**
- * Nodes for use in TreeBins
- */
- static final class TreeNode extends Node {
- TreeNode parent; // red-black tree links
- TreeNode left;
- TreeNode right;
- TreeNode prev; // needed to unlink next upon deletion
- boolean red;
-
- TreeNode(int hash, Object key, V val, Node next, TreeNode parent) {
- super(hash, key, val, next);
- this.parent = parent;
- }
- }
-
- /**
- * A specialized form of red-black tree for use in bins
- * whose size exceeds a threshold.
- *
- * TreeBins use a special form of comparison for search and
- * related operations (which is the main reason we cannot use
- * existing collections such as TreeMaps). TreeBins contain
- * Comparable elements, but may contain others, as well as
- * elements that are Comparable but not necessarily Comparable
- * for the same T, so we cannot invoke compareTo among them. To
- * handle this, the tree is ordered primarily by hash value, then
- * by getClass().getName() order, and then by Comparator order
- * among elements of the same class. On lookup at a node, if
- * elements are not comparable or compare as 0, both left and
- * right children may need to be searched in the case of tied hash
- * values. (This corresponds to the full list search that would be
- * necessary if all elements were non-Comparable and had tied
- * hashes.) The red-black balancing code is updated from
- * pre-jdk-collections
- * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
- * based in turn on Cormen, Leiserson, and Rivest "Introduction to
- * Algorithms" (CLR).
- *
- * TreeBins also maintain a separate locking discipline than
- * regular bins. Because they are forwarded via special MOVED
- * nodes at bin heads (which can never change once established),
- * we cannot use those nodes as locks. Instead, TreeBin
- * extends AbstractQueuedSynchronizer to support a simple form of
- * read-write lock. For update operations and table validation,
- * the exclusive form of lock behaves in the same way as bin-head
- * locks. However, lookups use shared read-lock mechanics to allow
- * multiple readers in the absence of writers. Additionally,
- * these lookups do not ever block: While the lock is not
- * available, they proceed along the slow traversal path (via
- * next-pointers) until the lock becomes available or the list is
- * exhausted, whichever comes first. (These cases are not fast,
- * but maximize aggregate expected throughput.) The AQS mechanics
- * for doing this are straightforward. The lock state is held as
- * AQS getState(). Read counts are negative; the write count (1)
- * is positive. There are no signalling preferences among readers
- * and writers. Since we don't need to export full Lock API, we
- * just override the minimal AQS methods and use them directly.
- */
- static final class TreeBin extends AbstractQueuedSynchronizer {
- private static final long serialVersionUID = 2249069246763182397L;
- transient TreeNode root; // root of tree
- transient TreeNode first; // head of next-pointer list
-
- /* AQS overrides */
- public final boolean isHeldExclusively() { return getState() > 0; }
- public final boolean tryAcquire(int ignore) {
- if (compareAndSetState(0, 1)) {
- setExclusiveOwnerThread(Thread.currentThread());
- return true;
- }
- return false;
- }
- public final boolean tryRelease(int ignore) {
- setExclusiveOwnerThread(null);
- setState(0);
- return true;
- }
- public final int tryAcquireShared(int ignore) {
- for (int c;;) {
- if ((c = getState()) > 0)
- return -1;
- if (compareAndSetState(c, c -1))
- return 1;
- }
- }
- public final boolean tryReleaseShared(int ignore) {
- int c;
- do {} while (!compareAndSetState(c = getState(), c + 1));
- return c == -1;
- }
-
- /** From CLR */
- private void rotateLeft(TreeNode p) {
- if (p != null) {
- TreeNode r = p.right, pp, rl;
- if ((rl = p.right = r.left) != null)
- rl.parent = p;
- if ((pp = r.parent = p.parent) == null)
- root = r;
- else if (pp.left == p)
- pp.left = r;
- else
- pp.right = r;
- r.left = p;
- p.parent = r;
- }
- }
-
- /** From CLR */
- private void rotateRight(TreeNode p) {
- if (p != null) {
- TreeNode l = p.left, pp, lr;
- if ((lr = p.left = l.right) != null)
- lr.parent = p;
- if ((pp = l.parent = p.parent) == null)
- root = l;
- else if (pp.right == p)
- pp.right = l;
- else
- pp.left = l;
- l.right = p;
- p.parent = l;
- }
- }
-
- /**
- * Returns the TreeNode (or null if not found) for the given key
- * starting at given root.
- */
- @SuppressWarnings("unchecked") final TreeNode getTreeNode
- (int h, Object k, TreeNode p) {
- Class> c = k.getClass();
- while (p != null) {
- int dir, ph; Object pk; Class> pc;
- if ((ph = p.hash) == h) {
- if ((pk = p.key) == k || k.equals(pk))
- return p;
- if (c != (pc = pk.getClass()) ||
- !(k instanceof Comparable) ||
- (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
- if ((dir = (c == pc) ? 0 :
- c.getName().compareTo(pc.getName())) == 0) {
- TreeNode r = null, pl, pr; // check both sides
- if ((pr = p.right) != null && h >= pr.hash &&
- (r = getTreeNode(h, k, pr)) != null)
- return r;
- else if ((pl = p.left) != null && h <= pl.hash)
- dir = -1;
- else // nothing there
- return null;
- }
- }
- }
- else
- dir = (h < ph) ? -1 : 1;
- p = (dir > 0) ? p.right : p.left;
- }
- return null;
- }
-
- /**
- * Wrapper for getTreeNode used by CHM.get. Tries to obtain
- * read-lock to call getTreeNode, but during failure to get
- * lock, searches along next links.
- */
- final V getValue(int h, Object k) {
- Node r = null;
- int c = getState(); // Must read lock state first
- for (Node e = first; e != null; e = e.next) {
- if (c <= 0 && compareAndSetState(c, c - 1)) {
- try {
- r = getTreeNode(h, k, root);
- } finally {
- releaseShared(0);
- }
- break;
- }
- else if (e.hash == h && k.equals(e.key)) {
- r = e;
- break;
- }
- else
- c = getState();
- }
- return r == null ? null : r.val;
- }
-
- /**
- * Finds or adds a node.
- * @return null if added
- */
- @SuppressWarnings("unchecked") final TreeNode putTreeNode
- (int h, Object k, V v) {
- Class> c = k.getClass();
- TreeNode pp = root, p = null;
- int dir = 0;
- while (pp != null) { // find existing node or leaf to insert at
- int ph; Object pk; Class> pc;
- p = pp;
- if ((ph = p.hash) == h) {
- if ((pk = p.key) == k || k.equals(pk))
- return p;
- if (c != (pc = pk.getClass()) ||
- !(k instanceof Comparable) ||
- (dir = ((Comparable)k).compareTo((Comparable)pk)) == 0) {
- TreeNode s = null, r = null, pr;
- if ((dir = (c == pc) ? 0 :
- c.getName().compareTo(pc.getName())) == 0) {
- if ((pr = p.right) != null && h >= pr.hash &&
- (r = getTreeNode(h, k, pr)) != null)
- return r;
- else // continue left
- dir = -1;
- }
- else if ((pr = p.right) != null && h >= pr.hash)
- s = pr;
- if (s != null && (r = getTreeNode(h, k, s)) != null)
- return r;
- }
- }
- else
- dir = (h < ph) ? -1 : 1;
- pp = (dir > 0) ? p.right : p.left;
- }
-
- TreeNode f = first;
- TreeNode x = first = new TreeNode(h, k, v, f, p);
- if (p == null)
- root = x;
- else { // attach and rebalance; adapted from CLR
- TreeNode xp, xpp;
- if (f != null)
- f.prev = x;
- if (dir <= 0)
- p.left = x;
- else
- p.right = x;
- x.red = true;
- while (x != null && (xp = x.parent) != null && xp.red &&
- (xpp = xp.parent) != null) {
- TreeNode xppl = xpp.left;
- if (xp == xppl) {
- TreeNode y = xpp.right;
- if (y != null && y.red) {
- y.red = false;
- xp.red = false;
- xpp.red = true;
- x = xpp;
- }
- else {
- if (x == xp.right) {
- rotateLeft(x = xp);
- xpp = (xp = x.parent) == null ? null : xp.parent;
- }
- if (xp != null) {
- xp.red = false;
- if (xpp != null) {
- xpp.red = true;
- rotateRight(xpp);
- }
- }
- }
- }
- else {
- TreeNode y = xppl;
- if (y != null && y.red) {
- y.red = false;
- xp.red = false;
- xpp.red = true;
- x = xpp;
- }
- else {
- if (x == xp.left) {
- rotateRight(x = xp);
- xpp = (xp = x.parent) == null ? null : xp.parent;
- }
- if (xp != null) {
- xp.red = false;
- if (xpp != null) {
- xpp.red = true;
- rotateLeft(xpp);
- }
- }
- }
- }
- }
- TreeNode r = root;
- if (r != null && r.red)
- r.red = false;
- }
- return null;
- }
-
- /**
- * Removes the given node, that must be present before this
- * call. This is messier than typical red-black deletion code
- * because we cannot swap the contents of an interior node
- * with a leaf successor that is pinned by "next" pointers
- * that are accessible independently of lock. So instead we
- * swap the tree linkages.
- */
- final void deleteTreeNode(TreeNode p) {
- TreeNode next = (TreeNode)p.next; // unlink traversal pointers
- TreeNode pred = p.prev;
- if (pred == null)
- first = next;
- else
- pred.next = next;
- if (next != null)
- next.prev = pred;
- TreeNode replacement;
- TreeNode pl = p.left;
- TreeNode pr = p.right;
- if (pl != null && pr != null) {
- TreeNode s = pr, sl;
- while ((sl = s.left) != null) // find successor
- s = sl;
- boolean c = s.red; s.red = p.red; p.red = c; // swap colors
- TreeNode sr = s.right;
- TreeNode pp = p.parent;
- if (s == pr) { // p was s's direct parent
- p.parent = s;
- s.right = p;
- }
- else {
- TreeNode sp = s.parent;
- if ((p.parent = sp) != null) {
- if (s == sp.left)
- sp.left = p;
- else
- sp.right = p;
- }
- if ((s.right = pr) != null)
- pr.parent = s;
- }
- p.left = null;
- if ((p.right = sr) != null)
- sr.parent = p;
- if ((s.left = pl) != null)
- pl.parent = s;
- if ((s.parent = pp) == null)
- root = s;
- else if (p == pp.left)
- pp.left = s;
- else
- pp.right = s;
- replacement = sr;
- }
- else
- replacement = (pl != null) ? pl : pr;
- TreeNode pp = p.parent;
- if (replacement == null) {
- if (pp == null) {
- root = null;
- return;
- }
- replacement = p;
- }
- else {
- replacement.parent = pp;
- if (pp == null)
- root = replacement;
- else if (p == pp.left)
- pp.left = replacement;
- else
- pp.right = replacement;
- p.left = p.right = p.parent = null;
- }
- if (!p.red) { // rebalance, from CLR
- TreeNode x = replacement;
- while (x != null) {
- TreeNode xp, xpl;
- if (x.red || (xp = x.parent) == null) {
- x.red = false;
- break;
- }
- if (x == (xpl = xp.left)) {
- TreeNode sib = xp.right;
- if (sib != null && sib.red) {
- sib.red = false;
- xp.red = true;
- rotateLeft(xp);
- sib = (xp = x.parent) == null ? null : xp.right;
- }
- if (sib == null)
- x = xp;
- else {
- TreeNode sl = sib.left, sr = sib.right;
- if ((sr == null || !sr.red) &&
- (sl == null || !sl.red)) {
- sib.red = true;
- x = xp;
- }
- else {
- if (sr == null || !sr.red) {
- if (sl != null)
- sl.red = false;
- sib.red = true;
- rotateRight(sib);
- sib = (xp = x.parent) == null ?
- null : xp.right;
- }
- if (sib != null) {
- sib.red = (xp == null) ? false : xp.red;
- if ((sr = sib.right) != null)
- sr.red = false;
- }
- if (xp != null) {
- xp.red = false;
- rotateLeft(xp);
- }
- x = root;
- }
- }
- }
- else { // symmetric
- TreeNode sib = xpl;
- if (sib != null && sib.red) {
- sib.red = false;
- xp.red = true;
- rotateRight(xp);
- sib = (xp = x.parent) == null ? null : xp.left;
- }
- if (sib == null)
- x = xp;
- else {
- TreeNode sl = sib.left, sr = sib.right;
- if ((sl == null || !sl.red) &&
- (sr == null || !sr.red)) {
- sib.red = true;
- x = xp;
- }
- else {
- if (sl == null || !sl.red) {
- if (sr != null)
- sr.red = false;
- sib.red = true;
- rotateLeft(sib);
- sib = (xp = x.parent) == null ?
- null : xp.left;
- }
- if (sib != null) {
- sib.red = (xp == null) ? false : xp.red;
- if ((sl = sib.left) != null)
- sl.red = false;
- }
- if (xp != null) {
- xp.red = false;
- rotateRight(xp);
- }
- x = root;
- }
- }
- }
- }
- }
- if (p == replacement && (pp = p.parent) != null) {
- if (p == pp.left) // detach pointers
- pp.left = null;
- else if (p == pp.right)
- pp.right = null;
- p.parent = null;
- }
- }
- }
-
- /* ---------------- Collision reduction methods -------------- */
-
- /**
- * Spreads higher bits to lower, and also forces top bit to 0.
- * Because the table uses power-of-two masking, sets of hashes
- * that vary only in bits above the current mask will always
- * collide. (Among known examples are sets of Float keys holding
- * consecutive whole numbers in small tables.) To counter this,
- * we apply a transform that spreads the impact of higher bits
- * downward. There is a tradeoff between speed, utility, and
- * quality of bit-spreading. Because many common sets of hashes
- * are already reasonably distributed across bits (so don't benefit
- * from spreading), and because we use trees to handle large sets
- * of collisions in bins, we don't need excessively high quality.
- */
- private static final int spread(int h) {
- h ^= (h >>> 18) ^ (h >>> 12);
- return (h ^ (h >>> 10)) & HASH_BITS;
- }
-
- /**
- * Replaces a list bin with a tree bin if key is comparable. Call
- * only when locked.
- */
- private final void replaceWithTreeBin(Node[] tab, int index, Object key) {
- if (key instanceof Comparable) {
- TreeBin t = new TreeBin();
- for (Node e = tabAt(tab, index); e != null; e = e.next)
- t.putTreeNode(e.hash, e.key, e.val);
- setTabAt(tab, index, new Node(MOVED, t, null, null));
- }
- }
-
- /* ---------------- Internal access and update methods -------------- */
-
- /** Implementation for get and containsKey */
- @SuppressWarnings("unchecked") private final V internalGet(Object k) {
- int h = spread(k.hashCode());
- retry: for (Node[] tab = table; tab != null;) {
- Node e; Object ek; V ev; int eh; // locals to read fields once
- for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) {
- if ((eh = e.hash) < 0) {
- if ((ek = e.key) instanceof TreeBin) // search TreeBin
- return ((TreeBin)ek).getValue(h, k);
- else { // restart with new table
- tab = (Node[])ek;
- continue retry;
- }
- }
- else if (eh == h && (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek)))
- return ev;
- }
- break;
- }
- return null;
- }
-
- /**
- * Implementation for the four public remove/replace methods:
- * Replaces node value with v, conditional upon match of cv if
- * non-null. If resulting value is null, delete.
- */
- @SuppressWarnings("unchecked") private final V internalReplace
- (Object k, V v, Object cv) {
- int h = spread(k.hashCode());
- V oldVal = null;
- for (Node[] tab = table;;) {
- Node f; int i, fh; Object fk;
- if (tab == null ||
- (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
- break;
- else if ((fh = f.hash) < 0) {
- if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- boolean validated = false;
- boolean deleted = false;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- validated = true;
- TreeNode p = t.getTreeNode(h, k, t.root);
- if (p != null) {
- V pv = p.val;
- if (cv == null || cv == pv || cv.equals(pv)) {
- oldVal = pv;
- if ((p.val = v) == null) {
- deleted = true;
- t.deleteTreeNode(p);
- }
- }
- }
- }
- } finally {
- t.release(0);
- }
- if (validated) {
- if (deleted)
- addCount(-1L, -1);
- break;
- }
- }
- else
- tab = (Node[])fk;
- }
- else if (fh != h && f.next == null) // precheck
- break; // rules out possible existence
- else {
- boolean validated = false;
- boolean deleted = false;
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- validated = true;
- for (Node e = f, pred = null;;) {
- Object ek; V ev;
- if (e.hash == h &&
- ((ev = e.val) != null) &&
- ((ek = e.key) == k || k.equals(ek))) {
- if (cv == null || cv == ev || cv.equals(ev)) {
- oldVal = ev;
- if ((e.val = v) == null) {
- deleted = true;
- Node en = e.next;
- if (pred != null)
- pred.next = en;
- else
- setTabAt(tab, i, en);
- }
- }
- break;
- }
- pred = e;
- if ((e = e.next) == null)
- break;
- }
- }
- }
- if (validated) {
- if (deleted)
- addCount(-1L, -1);
- break;
- }
- }
- }
- return oldVal;
- }
-
- /*
- * Internal versions of insertion methods
- * All have the same basic structure as the first (internalPut):
- * 1. If table uninitialized, create
- * 2. If bin empty, try to CAS new node
- * 3. If bin stale, use new table
- * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
- * 5. Lock and validate; if valid, scan and add or update
- *
- * The putAll method differs mainly in attempting to pre-allocate
- * enough table space, and also more lazily performs count updates
- * and checks.
- *
- * Most of the function-accepting methods can't be factored nicely
- * because they require different functional forms, so instead
- * sprawl out similar mechanics.
- */
-
- /** Implementation for put and putIfAbsent */
- @SuppressWarnings("unchecked") private final V internalPut
- (K k, V v, boolean onlyIfAbsent) {
- if (k == null || v == null) throw new NullPointerException();
- int h = spread(k.hashCode());
- int len = 0;
- for (Node[] tab = table;;) {
- int i, fh; Node f; Object fk; V fv;
- if (tab == null)
- tab = initTable();
- else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
- if (casTabAt(tab, i, null, new Node(h, k, v, null)))
- break; // no lock when adding to empty bin
- }
- else if ((fh = f.hash) < 0) {
- if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- V oldVal = null;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- len = 2;
- TreeNode p = t.putTreeNode(h, k, v);
- if (p != null) {
- oldVal = p.val;
- if (!onlyIfAbsent)
- p.val = v;
- }
- }
- } finally {
- t.release(0);
- }
- if (len != 0) {
- if (oldVal != null)
- return oldVal;
- break;
- }
- }
- else
- tab = (Node[])fk;
- }
- else if (onlyIfAbsent && fh == h && (fv = f.val) != null &&
- ((fk = f.key) == k || k.equals(fk))) // peek while nearby
- return fv;
- else {
- V oldVal = null;
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- len = 1;
- for (Node e = f;; ++len) {
- Object ek; V ev;
- if (e.hash == h &&
- (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek))) {
- oldVal = ev;
- if (!onlyIfAbsent)
- e.val = v;
- break;
- }
- Node last = e;
- if ((e = e.next) == null) {
- last.next = new Node(h, k, v, null);
- if (len >= TREE_THRESHOLD)
- replaceWithTreeBin(tab, i, k);
- break;
- }
- }
- }
- }
- if (len != 0) {
- if (oldVal != null)
- return oldVal;
- break;
- }
- }
- }
- addCount(1L, len);
- return null;
- }
-
- /** Implementation for computeIfAbsent */
- @SuppressWarnings("unchecked") private final V internalComputeIfAbsent
- (K k, Fun super K, ? extends V> mf) {
- if (k == null || mf == null)
- throw new NullPointerException();
- int h = spread(k.hashCode());
- V val = null;
- int len = 0;
- for (Node[] tab = table;;) {
- Node f; int i; Object fk;
- if (tab == null)
- tab = initTable();
- else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
- Node node = new Node(h, k, null, null);
- synchronized (node) {
- if (casTabAt(tab, i, null, node)) {
- len = 1;
- try {
- if ((val = mf.apply(k)) != null)
- node.val = val;
- } finally {
- if (val == null)
- setTabAt(tab, i, null);
- }
- }
- }
- if (len != 0)
- break;
- }
- else if (f.hash < 0) {
- if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- boolean added = false;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- len = 1;
- TreeNode p = t.getTreeNode(h, k, t.root);
- if (p != null)
- val = p.val;
- else if ((val = mf.apply(k)) != null) {
- added = true;
- len = 2;
- t.putTreeNode(h, k, val);
- }
- }
- } finally {
- t.release(0);
- }
- if (len != 0) {
- if (!added)
- return val;
- break;
- }
- }
- else
- tab = (Node[])fk;
- }
- else {
- for (Node e = f; e != null; e = e.next) { // prescan
- Object ek; V ev;
- if (e.hash == h && (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek)))
- return ev;
- }
- boolean added = false;
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- len = 1;
- for (Node e = f;; ++len) {
- Object ek; V ev;
- if (e.hash == h &&
- (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek))) {
- val = ev;
- break;
- }
- Node last = e;
- if ((e = e.next) == null) {
- if ((val = mf.apply(k)) != null) {
- added = true;
- last.next = new Node(h, k, val, null);
- if (len >= TREE_THRESHOLD)
- replaceWithTreeBin(tab, i, k);
- }
- break;
- }
- }
- }
- }
- if (len != 0) {
- if (!added)
- return val;
- break;
- }
- }
- }
- if (val != null)
- addCount(1L, len);
- return val;
- }
-
- /** Implementation for compute */
- @SuppressWarnings("unchecked") private final V internalCompute
- (K k, boolean onlyIfPresent,
- BiFun super K, ? super V, ? extends V> mf) {
- if (k == null || mf == null)
- throw new NullPointerException();
- int h = spread(k.hashCode());
- V val = null;
- int delta = 0;
- int len = 0;
- for (Node[] tab = table;;) {
- Node f; int i, fh; Object fk;
- if (tab == null)
- tab = initTable();
- else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
- if (onlyIfPresent)
- break;
- Node node = new Node(h, k, null, null);
- synchronized (node) {
- if (casTabAt(tab, i, null, node)) {
- try {
- len = 1;
- if ((val = mf.apply(k, null)) != null) {
- node.val = val;
- delta = 1;
- }
- } finally {
- if (delta == 0)
- setTabAt(tab, i, null);
- }
- }
- }
- if (len != 0)
- break;
- }
- else if ((fh = f.hash) < 0) {
- if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- len = 1;
- TreeNode p = t.getTreeNode(h, k, t.root);
- if (p == null && onlyIfPresent)
- break;
- V pv = (p == null) ? null : p.val;
- if ((val = mf.apply(k, pv)) != null) {
- if (p != null)
- p.val = val;
- else {
- len = 2;
- delta = 1;
- t.putTreeNode(h, k, val);
- }
- }
- else if (p != null) {
- delta = -1;
- t.deleteTreeNode(p);
- }
- }
- } finally {
- t.release(0);
- }
- if (len != 0)
- break;
- }
- else
- tab = (Node[])fk;
- }
- else {
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- len = 1;
- for (Node e = f, pred = null;; ++len) {
- Object ek; V ev;
- if (e.hash == h &&
- (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek))) {
- val = mf.apply(k, ev);
- if (val != null)
- e.val = val;
- else {
- delta = -1;
- Node en = e.next;
- if (pred != null)
- pred.next = en;
- else
- setTabAt(tab, i, en);
- }
- break;
- }
- pred = e;
- if ((e = e.next) == null) {
- if (!onlyIfPresent &&
- (val = mf.apply(k, null)) != null) {
- pred.next = new Node(h, k, val, null);
- delta = 1;
- if (len >= TREE_THRESHOLD)
- replaceWithTreeBin(tab, i, k);
- }
- break;
- }
- }
- }
- }
- if (len != 0)
- break;
- }
- }
- if (delta != 0)
- addCount((long)delta, len);
- return val;
- }
-
- /** Implementation for merge */
- @SuppressWarnings("unchecked") private final V internalMerge
- (K k, V v, BiFun super V, ? super V, ? extends V> mf) {
- if (k == null || v == null || mf == null)
- throw new NullPointerException();
- int h = spread(k.hashCode());
- V val = null;
- int delta = 0;
- int len = 0;
- for (Node[] tab = table;;) {
- int i; Node f; Object fk; V fv;
- if (tab == null)
- tab = initTable();
- else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
- if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
- delta = 1;
- val = v;
- break;
- }
- }
- else if (f.hash < 0) {
- if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- len = 1;
- TreeNode p = t.getTreeNode(h, k, t.root);
- val = (p == null) ? v : mf.apply(p.val, v);
- if (val != null) {
- if (p != null)
- p.val = val;
- else {
- len = 2;
- delta = 1;
- t.putTreeNode(h, k, val);
- }
- }
- else if (p != null) {
- delta = -1;
- t.deleteTreeNode(p);
- }
- }
- } finally {
- t.release(0);
- }
- if (len != 0)
- break;
- }
- else
- tab = (Node[])fk;
- }
- else {
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- len = 1;
- for (Node e = f, pred = null;; ++len) {
- Object ek; V ev;
- if (e.hash == h &&
- (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek))) {
- val = mf.apply(ev, v);
- if (val != null)
- e.val = val;
- else {
- delta = -1;
- Node en = e.next;
- if (pred != null)
- pred.next = en;
- else
- setTabAt(tab, i, en);
- }
- break;
- }
- pred = e;
- if ((e = e.next) == null) {
- val = v;
- pred.next = new Node(h, k, val, null);
- delta = 1;
- if (len >= TREE_THRESHOLD)
- replaceWithTreeBin(tab, i, k);
- break;
- }
- }
- }
- }
- if (len != 0)
- break;
- }
- }
- if (delta != 0)
- addCount((long)delta, len);
- return val;
- }
-
- /** Implementation for putAll */
- @SuppressWarnings("unchecked") private final void internalPutAll
- (Map extends K, ? extends V> m) {
- tryPresize(m.size());
- long delta = 0L; // number of uncommitted additions
- boolean npe = false; // to throw exception on exit for nulls
- try { // to clean up counts on other exceptions
- for (Entry, ? extends V> entry : m.entrySet()) {
- Object k; V v;
- if (entry == null || (k = entry.getKey()) == null ||
- (v = entry.getValue()) == null) {
- npe = true;
- break;
- }
- int h = spread(k.hashCode());
- for (Node[] tab = table;;) {
- int i; Node f; int fh; Object fk;
- if (tab == null)
- tab = initTable();
- else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
- if (casTabAt(tab, i, null, new Node(h, k, v, null))) {
- ++delta;
- break;
- }
- }
- else if ((fh = f.hash) < 0) {
- if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- boolean validated = false;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- validated = true;
- TreeNode p = t.getTreeNode(h, k, t.root);
- if (p != null)
- p.val = v;
- else {
- t.putTreeNode(h, k, v);
- ++delta;
- }
- }
- } finally {
- t.release(0);
- }
- if (validated)
- break;
- }
- else
- tab = (Node[])fk;
- }
- else {
- int len = 0;
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- len = 1;
- for (Node e = f;; ++len) {
- Object ek; V ev;
- if (e.hash == h &&
- (ev = e.val) != null &&
- ((ek = e.key) == k || k.equals(ek))) {
- e.val = v;
- break;
- }
- Node last = e;
- if ((e = e.next) == null) {
- ++delta;
- last.next = new Node(h, k, v, null);
- if (len >= TREE_THRESHOLD)
- replaceWithTreeBin(tab, i, k);
- break;
- }
- }
- }
- }
- if (len != 0) {
- if (len > 1) {
- addCount(delta, len);
- delta = 0L;
- }
- break;
- }
- }
- }
- }
- } finally {
- if (delta != 0L)
- addCount(delta, 2);
- }
- if (npe)
- throw new NullPointerException();
- }
-
- /**
- * Implementation for clear. Steps through each bin, removing all
- * nodes.
- */
- @SuppressWarnings("unchecked") private final void internalClear() {
- long delta = 0L; // negative number of deletions
- int i = 0;
- Node[] tab = table;
- while (tab != null && i < tab.length) {
- Node f = tabAt(tab, i);
- if (f == null)
- ++i;
- else if (f.hash < 0) {
- Object fk;
- if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- for (Node p = t.first; p != null; p = p.next) {
- if (p.val != null) { // (currently always true)
- p.val = null;
- --delta;
- }
- }
- t.first = null;
- t.root = null;
- ++i;
- }
- } finally {
- t.release(0);
- }
- }
- else
- tab = (Node[])fk;
- }
- else {
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- for (Node e = f; e != null; e = e.next) {
- if (e.val != null) { // (currently always true)
- e.val = null;
- --delta;
- }
- }
- setTabAt(tab, i, null);
- ++i;
- }
- }
- }
- }
- if (delta != 0L)
- addCount(delta, -1);
- }
-
- /* ---------------- Table Initialization and Resizing -------------- */
-
- /**
- * Returns a power of two table size for the given desired capacity.
- * See Hackers Delight, sec 3.2
- */
- private static final int tableSizeFor(int c) {
- int n = c - 1;
- n |= n >>> 1;
- n |= n >>> 2;
- n |= n >>> 4;
- n |= n >>> 8;
- n |= n >>> 16;
- return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
- }
-
- /**
- * Initializes table, using the size recorded in sizeCtl.
- */
- @SuppressWarnings("unchecked") private final Node[] initTable() {
- Node[] tab; int sc;
- while ((tab = table) == null) {
- if ((sc = sizeCtl) < 0)
- Thread.yield(); // lost initialization race; just spin
- else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
- try {
- if ((tab = table) == null) {
- int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
- @SuppressWarnings("rawtypes") Node[] tb = new Node[n];
- table = tab = (Node[])tb;
- sc = n - (n >>> 2);
- }
- } finally {
- sizeCtl = sc;
- }
- break;
- }
- }
- return tab;
- }
-
- /**
- * Adds to count, and if table is too small and not already
- * resizing, initiates transfer. If already resizing, helps
- * perform transfer if work is available. Rechecks occupancy
- * after a transfer to see if another resize is already needed
- * because resizings are lagging additions.
- *
- * @param x the count to add
- * @param check if <0, don't check resize, if <= 1 only check if uncontended
- */
- private final void addCount(long x, int check) {
- CounterCell[] as; long b, s;
- if ((as = counterCells) != null ||
- !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
- CounterHashCode hc; CounterCell a; long v; int m;
- boolean uncontended = true;
- if ((hc = threadCounterHashCode.get()) == null ||
- as == null || (m = as.length - 1) < 0 ||
- (a = as[m & hc.code]) == null ||
- !(uncontended =
- U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
- fullAddCount(x, hc, uncontended);
- return;
- }
- if (check <= 1)
- return;
- s = sumCount();
- }
- if (check >= 0) {
- Node[] tab, nt; int sc;
- while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
- tab.length < MAXIMUM_CAPACITY) {
- if (sc < 0) {
- if (sc == -1 || transferIndex <= transferOrigin ||
- (nt = nextTable) == null)
- break;
- if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
- transfer(tab, nt);
- }
- else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
- transfer(tab, null);
- s = sumCount();
- }
- }
- }
-
- /**
- * Tries to presize table to accommodate the given number of elements.
- *
- * @param size number of elements (doesn't need to be perfectly accurate)
- */
- @SuppressWarnings("unchecked") private final void tryPresize(int size) {
- int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
- tableSizeFor(size + (size >>> 1) + 1);
- int sc;
- while ((sc = sizeCtl) >= 0) {
- Node[] tab = table; int n;
- if (tab == null || (n = tab.length) == 0) {
- n = (sc > c) ? sc : c;
- if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
- try {
- if (table == tab) {
- @SuppressWarnings("rawtypes") Node[] tb = new Node[n];
- table = (Node[])tb;
- sc = n - (n >>> 2);
- }
- } finally {
- sizeCtl = sc;
- }
- }
- }
- else if (c <= sc || n >= MAXIMUM_CAPACITY)
- break;
- else if (tab == table &&
- U.compareAndSwapInt(this, SIZECTL, sc, -2))
- transfer(tab, null);
- }
- }
-
- /**
- * Moves and/or copies the nodes in each bin to new table. See
- * above for explanation.
- */
- @SuppressWarnings("unchecked") private final void transfer
- (Node[] tab, Node[] nextTab) {
- int n = tab.length, stride;
- if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
- stride = MIN_TRANSFER_STRIDE; // subdivide range
- if (nextTab == null) { // initiating
- try {
- @SuppressWarnings("rawtypes") Node[] tb = new Node[n << 1];
- nextTab = (Node[])tb;
- } catch (Throwable ex) { // try to cope with OOME
- sizeCtl = Integer.MAX_VALUE;
- return;
- }
- nextTable = nextTab;
- transferOrigin = n;
- transferIndex = n;
- Node rev = new Node(MOVED, tab, null, null);
- for (int k = n; k > 0;) { // progressively reveal ready slots
- int nextk = (k > stride) ? k - stride : 0;
- for (int m = nextk; m < k; ++m)
- nextTab[m] = rev;
- for (int m = n + nextk; m < n + k; ++m)
- nextTab[m] = rev;
- U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
- }
- }
- int nextn = nextTab.length;
- Node fwd = new Node(MOVED, nextTab, null, null);
- boolean advance = true;
- for (int i = 0, bound = 0;;) {
- int nextIndex, nextBound; Node f; Object fk;
- while (advance) {
- if (--i >= bound)
- advance = false;
- else if ((nextIndex = transferIndex) <= transferOrigin) {
- i = -1;
- advance = false;
- }
- else if (U.compareAndSwapInt
- (this, TRANSFERINDEX, nextIndex,
- nextBound = (nextIndex > stride ?
- nextIndex - stride : 0))) {
- bound = nextBound;
- i = nextIndex - 1;
- advance = false;
- }
- }
- if (i < 0 || i >= n || i + n >= nextn) {
- for (int sc;;) {
- if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
- if (sc == -1) {
- nextTable = null;
- table = nextTab;
- sizeCtl = (n << 1) - (n >>> 1);
- }
- return;
- }
- }
- }
- else if ((f = tabAt(tab, i)) == null) {
- if (casTabAt(tab, i, null, fwd)) {
- setTabAt(nextTab, i, null);
- setTabAt(nextTab, i + n, null);
- advance = true;
- }
- }
- else if (f.hash >= 0) {
- synchronized (f) {
- if (tabAt(tab, i) == f) {
- int runBit = f.hash & n;
- Node lastRun = f, lo = null, hi = null;
- for (Node p = f.next; p != null; p = p.next) {
- int b = p.hash & n;
- if (b != runBit) {
- runBit = b;
- lastRun = p;
- }
- }
- if (runBit == 0)
- lo = lastRun;
- else
- hi = lastRun;
- for (Node p = f; p != lastRun; p = p.next) {
- int ph = p.hash;
- Object pk = p.key; V pv = p.val;
- if ((ph & n) == 0)
- lo = new Node(ph, pk, pv, lo);
- else
- hi = new Node(ph, pk, pv, hi);
- }
- setTabAt(nextTab, i, lo);
- setTabAt(nextTab, i + n, hi);
- setTabAt(tab, i, fwd);
- advance = true;
- }
- }
- }
- else if ((fk = f.key) instanceof TreeBin) {
- TreeBin t = (TreeBin)fk;
- t.acquire(0);
- try {
- if (tabAt(tab, i) == f) {
- TreeBin lt = new TreeBin();
- TreeBin ht = new TreeBin();
- int lc = 0, hc = 0;
- for (Node e = t.first; e != null; e = e.next) {
- int h = e.hash;
- Object k = e.key; V v = e.val;
- if ((h & n) == 0) {
- ++lc;
- lt.putTreeNode(h, k, v);
- }
- else {
- ++hc;
- ht.putTreeNode(h, k, v);
- }
- }
- Node ln, hn; // throw away trees if too small
- if (lc < TREE_THRESHOLD) {
- ln = null;
- for (Node p = lt.first; p != null; p = p.next)
- ln = new Node(p.hash, p.key, p.val, ln);
- }
- else
- ln = new Node(MOVED, lt, null, null);
- setTabAt(nextTab, i, ln);
- if (hc < TREE_THRESHOLD) {
- hn = null;
- for (Node p = ht.first; p != null; p = p.next)
- hn = new Node(p.hash, p.key, p.val, hn);
- }
- else
- hn = new Node(MOVED, ht, null, null);
- setTabAt(nextTab, i + n, hn);
- setTabAt(tab, i, fwd);
- advance = true;
- }
- } finally {
- t.release(0);
- }
- }
- else
- advance = true; // already processed
- }
- }
-
- /* ---------------- Counter support -------------- */
-
- final long sumCount() {
- CounterCell[] as = counterCells; CounterCell a;
- long sum = baseCount;
- if (as != null) {
- for (int i = 0; i < as.length; ++i) {
- if ((a = as[i]) != null)
- sum += a.value;
- }
- }
- return sum;
- }
-
- // See LongAdder version for explanation
- private final void fullAddCount(long x, CounterHashCode hc,
- boolean wasUncontended) {
- int h;
- if (hc == null) {
- hc = new CounterHashCode();
- int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT);
- h = hc.code = (s == 0) ? 1 : s; // Avoid zero
- threadCounterHashCode.set(hc);
- }
- else
- h = hc.code;
- boolean collide = false; // True if last slot nonempty
- for (;;) {
- CounterCell[] as; CounterCell a; int n; long v;
- if ((as = counterCells) != null && (n = as.length) > 0) {
- if ((a = as[(n - 1) & h]) == null) {
- if (counterBusy == 0) { // Try to attach new Cell
- CounterCell r = new CounterCell(x); // Optimistic create
- if (counterBusy == 0 &&
- U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) {
- boolean created = false;
- try { // Recheck under lock
- CounterCell[] rs; int m, j;
- if ((rs = counterCells) != null &&
- (m = rs.length) > 0 &&
- rs[j = (m - 1) & h] == null) {
- rs[j] = r;
- created = true;
- }
- } finally {
- counterBusy = 0;
- }
- if (created)
- break;
- continue; // Slot is now non-empty
- }
- }
- collide = false;
- }
- else if (!wasUncontended) // CAS already known to fail
- wasUncontended = true; // Continue after rehash
- else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
- break;
- else if (counterCells != as || n >= NCPU)
- collide = false; // At max size or stale
- else if (!collide)
- collide = true;
- else if (counterBusy == 0 &&
- U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) {
- try {
- if (counterCells == as) {// Expand table unless stale
- CounterCell[] rs = new CounterCell[n << 1];
- for (int i = 0; i < n; ++i)
- rs[i] = as[i];
- counterCells = rs;
- }
- } finally {
- counterBusy = 0;
- }
- collide = false;
- continue; // Retry with expanded table
- }
- h ^= h << 13; // Rehash
- h ^= h >>> 17;
- h ^= h << 5;
- }
- else if (counterBusy == 0 && counterCells == as &&
- U.compareAndSwapInt(this, COUNTERBUSY, 0, 1)) {
- boolean init = false;
- try { // Initialize table
- if (counterCells == as) {
- CounterCell[] rs = new CounterCell[2];
- rs[h & 1] = new CounterCell(x);
- counterCells = rs;
- init = true;
- }
- } finally {
- counterBusy = 0;
- }
- if (init)
- break;
- }
- else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
- break; // Fall back on using base
- }
- hc.code = h; // Record index for next time
- }
-
- /* ----------------Table Traversal -------------- */
-
- /**
- * Encapsulates traversal for methods such as containsValue; also
- * serves as a base class for other iterators and bulk tasks.
- *
- * At each step, the iterator snapshots the key ("nextKey") and
- * value ("nextVal") of a valid node (i.e., one that, at point of
- * snapshot, has a non-null user value). Because val fields can
- * change (including to null, indicating deletion), field nextVal
- * might not be accurate at point of use, but still maintains the
- * weak consistency property of holding a value that was once
- * valid. To support iterator.remove, the nextKey field is not
- * updated (nulled out) when the iterator cannot advance.
- *
- * Internal traversals directly access these fields, as in:
- * {@code while (it.advance() != null) { process(it.nextKey); }}
- *
- * Exported iterators must track whether the iterator has advanced
- * (in hasNext vs next) (by setting/checking/nulling field
- * nextVal), and then extract key, value, or key-value pairs as
- * return values of next().
- *
- * The iterator visits once each still-valid node that was
- * reachable upon iterator construction. It might miss some that
- * were added to a bin after the bin was visited, which is OK wrt
- * consistency guarantees. Maintaining this property in the face
- * of possible ongoing resizes requires a fair amount of
- * bookkeeping state that is difficult to optimize away amidst
- * volatile accesses. Even so, traversal maintains reasonable
- * throughput.
- *
- * Normally, iteration proceeds bin-by-bin traversing lists.
- * However, if the table has been resized, then all future steps
- * must traverse both the bin at the current index as well as at
- * (index + baseSize); and so on for further resizings. To
- * paranoically cope with potential sharing by users of iterators
- * across threads, iteration terminates if a bounds checks fails
- * for a table read.
- *
- * This class extends CountedCompleter to streamline parallel
- * iteration in bulk operations. This adds only a few fields of
- * space overhead, which is small enough in cases where it is not
- * needed to not worry about it. Because CountedCompleter is
- * Serializable, but iterators need not be, we need to add warning
- * suppressions.
- */
- @SuppressWarnings("serial") static class Traverser
- extends CountedCompleter {
- final ConcurrentHashMapV8 map;
- Node next; // the next entry to use
- Object nextKey; // cached key field of next
- V nextVal; // cached val field of next
- Node[] tab; // current table; updated if resized
- int index; // index of bin to use next
- int baseIndex; // current index of initial table
- int baseLimit; // index bound for initial table
- int baseSize; // initial table size
- int batch; // split control
-
- /** Creates iterator for all entries in the table. */
- Traverser(ConcurrentHashMapV8 map) {
- this.map = map;
- }
-
- /** Creates iterator for split() methods and task constructors */
- Traverser(ConcurrentHashMapV8 map, Traverser it, int batch) {
- super(it);
- this.batch = batch;
- if ((this.map = map) != null && it != null) { // split parent
- Node[] t;
- if ((t = it.tab) == null &&
- (t = it.tab = map.table) != null)
- it.baseLimit = it.baseSize = t.length;
- this.tab = t;
- this.baseSize = it.baseSize;
- int hi = this.baseLimit = it.baseLimit;
- it.baseLimit = this.index = this.baseIndex =
- (hi + it.baseIndex + 1) >>> 1;
- }
- }
-
- /**
- * Advances next; returns nextVal or null if terminated.
- * See above for explanation.
- */
- @SuppressWarnings("unchecked") final V advance() {
- Node e = next;
- V ev = null;
- outer: do {
- if (e != null) // advance past used/skipped node
- e = e.next;
- while (e == null) { // get to next non-null bin
- ConcurrentHashMapV8 m;
- Node[] t; int b, i, n; Object ek; // must use locals
- if ((t = tab) != null)
- n = t.length;
- else if ((m = map) != null && (t = tab = m.table) != null)
- n = baseLimit = baseSize = t.length;
- else
- break outer;
- if ((b = baseIndex) >= baseLimit ||
- (i = index) < 0 || i >= n)
- break outer;
- if ((e = tabAt(t, i)) != null && e.hash < 0) {
- if ((ek = e.key) instanceof TreeBin)
- e = ((TreeBin)ek).first;
- else {
- tab = (Node[])ek;
- continue; // restarts due to null val
- }
- } // visit upper slots if present
- index = (i += baseSize) < n ? i : (baseIndex = b + 1);
- }
- nextKey = e.key;
- } while ((ev = e.val) == null); // skip deleted or special nodes
- next = e;
- return nextVal = ev;
- }
-
- public final void remove() {
- Object k = nextKey;
- if (k == null && (advance() == null || (k = nextKey) == null))
- throw new IllegalStateException();
- map.internalReplace(k, null, null);
- }
-
- public final boolean hasNext() {
- return nextVal != null || advance() != null;
- }
-
- public final boolean hasMoreElements() { return hasNext(); }
-
- public void compute() { } // default no-op CountedCompleter body
-
- /**
- * Returns a batch value > 0 if this task should (and must) be
- * split, if so, adding to pending count, and in any case
- * updating batch value. The initial batch value is approx
- * exp2 of the number of times (minus one) to split task by
- * two before executing leaf action. This value is faster to
- * compute and more convenient to use as a guide to splitting
- * than is the depth, since it is used while dividing by two
- * anyway.
- */
- final int preSplit() {
- ConcurrentHashMapV8 m; int b; Node[] t; ForkJoinPool pool;
- if ((b = batch) < 0 && (m = map) != null) { // force initialization
- if ((t = tab) == null && (t = tab = m.table) != null)
- baseLimit = baseSize = t.length;
- if (t != null) {
- long n = m.sumCount();
- int par = ((pool = getPool()) == null) ?
- ForkJoinPool.getCommonPoolParallelism() :
- pool.getParallelism();
- int sp = par << 3; // slack of 8
- b = (n <= 0L) ? 0 : (n < (long)sp) ? (int)n : sp;
- }
- }
- b = (b <= 1 || baseIndex == baseLimit) ? 0 : (b >>> 1);
- if ((batch = b) > 0)
- addToPendingCount(1);
- return b;
- }
-
- }
/* ---------------- Public operations -------------- */
@@ -2430,8 +833,8 @@ public final class ConcurrentHashMapV8
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
- MAXIMUM_CAPACITY :
- tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
+ MAXIMUM_CAPACITY :
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
@@ -2442,7 +845,7 @@ public final class ConcurrentHashMapV8
*/
public ConcurrentHashMapV8(Map extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
- internalPutAll(m);
+ putAll(m);
}
/**
@@ -2490,42 +893,11 @@ public final class ConcurrentHashMapV8
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
- MAXIMUM_CAPACITY : tableSizeFor((int)size);
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
- /**
- * Creates a new {@link java.util.Set} backed by a ConcurrentHashMapV8
- * from the given type to {@code Boolean.TRUE}.
- *
- * @return the new set
- */
- public static KeySetView newKeySet() {
- return new KeySetView(new ConcurrentHashMapV8(),
- Boolean.TRUE);
- }
-
- /**
- * Creates a new {@link java.util.Set} backed by a ConcurrentHashMapV8
- * from the given type to {@code Boolean.TRUE}.
- *
- * @param initialCapacity The implementation performs internal
- * sizing to accommodate this many elements.
- * @throws IllegalArgumentException if the initial capacity of
- * elements is negative
- * @return the new set
- */
- public static KeySetView newKeySet(int initialCapacity) {
- return new KeySetView
- (new ConcurrentHashMapV8(initialCapacity), Boolean.TRUE);
- }
-
- /**
- * {@inheritDoc}
- */
- public boolean isEmpty() {
- return sumCount() <= 0L; // ignore transient negative values
- }
+ // Original (since JDK1.2) Map methods
/**
* {@inheritDoc}
@@ -2534,21 +906,14 @@ public final class ConcurrentHashMapV8