1 /*
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.  Oracle designates this
7  * particular file as subject to the "Classpath" exception as provided
8  * by Oracle in the LICENSE file that accompanied this code.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  */
24 
25 /*
26  * This file is available under and governed by the GNU General Public
27  * License version 2 only, as published by the Free Software Foundation.
28  * However, the following notice accompanied the original version of this
29  * file:
30  *
31  * Written by Doug Lea with assistance from members of JCP JSR-166
32  * Expert Group and released to the public domain, as explained at
33  * http://creativecommons.org/publicdomain/zero/1.0/
34  */
35 
36 package java.util.concurrent;
37 
38 import java.io.ObjectStreamField;
39 import java.io.Serializable;
40 import java.lang.reflect.ParameterizedType;
41 import java.lang.reflect.Type;
42 import java.util.AbstractMap;
43 import java.util.Arrays;
44 import java.util.Collection;
45 import java.util.Enumeration;
46 import java.util.HashMap;
47 import java.util.Hashtable;
48 import java.util.Iterator;
49 import java.util.Map;
50 import java.util.NoSuchElementException;
51 import java.util.Set;
52 import java.util.Spliterator;
53 import java.util.concurrent.atomic.AtomicReference;
54 import java.util.concurrent.locks.LockSupport;
55 import java.util.concurrent.locks.ReentrantLock;
56 import java.util.function.BiConsumer;
57 import java.util.function.BiFunction;
58 import java.util.function.Consumer;
59 import java.util.function.DoubleBinaryOperator;
60 import java.util.function.Function;
61 import java.util.function.IntBinaryOperator;
62 import java.util.function.LongBinaryOperator;
63 import java.util.function.Predicate;
64 import java.util.function.ToDoubleBiFunction;
65 import java.util.function.ToDoubleFunction;
66 import java.util.function.ToIntBiFunction;
67 import java.util.function.ToIntFunction;
68 import java.util.function.ToLongBiFunction;
69 import java.util.function.ToLongFunction;
70 import java.util.stream.Stream;
71 
72 // BEGIN android-note
73 // removed link to collections framework docs
74 // END android-note
75 
76 /**
77  * A hash table supporting full concurrency of retrievals and
78  * high expected concurrency for updates. This class obeys the
79  * same functional specification as {@link java.util.Hashtable}, and
80  * includes versions of methods corresponding to each method of
81  * {@code Hashtable}. However, even though all operations are
82  * thread-safe, retrieval operations do <em>not</em> entail locking,
83  * and there is <em>not</em> any support for locking the entire table
84  * in a way that prevents all access.  This class is fully
85  * interoperable with {@code Hashtable} in programs that rely on its
86  * thread safety but not on its synchronization details.
87  *
88  * <p>Retrieval operations (including {@code get}) generally do not
89  * block, so may overlap with update operations (including {@code put}
90  * and {@code remove}). Retrievals reflect the results of the most
91  * recently <em>completed</em> update operations holding upon their
92  * onset. (More formally, an update operation for a given key bears a
93  * <em>happens-before</em> relation with any (non-null) retrieval for
94  * that key reporting the updated value.)  For aggregate operations
95  * such as {@code putAll} and {@code clear}, concurrent retrievals may
96  * reflect insertion or removal of only some entries.  Similarly,
97  * Iterators, Spliterators and Enumerations return elements reflecting the
98  * state of the hash table at some point at or since the creation of the
99  * iterator/enumeration.  They do <em>not</em> throw {@link
100  * java.util.ConcurrentModificationException ConcurrentModificationException}.
101  * However, iterators are designed to be used by only one thread at a time.
102  * Bear in mind that the results of aggregate status methods including
103  * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
104  * useful only when a map is not undergoing concurrent updates in other threads.
105  * Otherwise the results of these methods reflect transient states
106  * that may be adequate for monitoring or estimation purposes, but not
107  * for program control.
108  *
109  * <p>The table is dynamically expanded when there are too many
110  * collisions (i.e., keys that have distinct hash codes but fall into
111  * the same slot modulo the table size), with the expected average
112  * effect of maintaining roughly two bins per mapping (corresponding
113  * to a 0.75 load factor threshold for resizing). There may be much
114  * variance around this average as mappings are added and removed, but
115  * overall, this maintains a commonly accepted time/space tradeoff for
116  * hash tables.  However, resizing this or any other kind of hash
117  * table may be a relatively slow operation. When possible, it is a
118  * good idea to provide a size estimate as an optional {@code
119  * initialCapacity} constructor argument. An additional optional
120  * {@code loadFactor} constructor argument provides a further means of
121  * customizing initial table capacity by specifying the table density
122  * to be used in calculating the amount of space to allocate for the
123  * given number of elements.  Also, for compatibility with previous
124  * versions of this class, constructors may optionally specify an
125  * expected {@code concurrencyLevel} as an additional hint for
126  * internal sizing.  Note that using many keys with exactly the same
127  * {@code hashCode()} is a sure way to slow down performance of any
128  * hash table. To ameliorate impact, when keys are {@link Comparable},
129  * this class may use comparison order among keys to help break ties.
130  *
131  * <p>A {@link Set} projection of a ConcurrentHashMap may be created
132  * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
133  * (using {@link #keySet(Object)} when only keys are of interest, and the
134  * mapped values are (perhaps transiently) not used or all take the
135  * same mapping value.
136  *
137  * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
138  * form of histogram or multiset) by using {@link
139  * java.util.concurrent.atomic.LongAdder} values and initializing via
140  * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
141  * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
142  * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
143  *
144  * <p>This class and its views and iterators implement all of the
145  * <em>optional</em> methods of the {@link Map} and {@link Iterator}
146  * interfaces.
147  *
148  * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
149  * does <em>not</em> allow {@code null} to be used as a key or value.
150  *
151  * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
152  * operations that, unlike most {@link Stream} methods, are designed
153  * to be safely, and often sensibly, applied even with maps that are
154  * being concurrently updated by other threads; for example, when
155  * computing a snapshot summary of the values in a shared registry.
156  * There are three kinds of operation, each with four forms, accepting
157  * functions with keys, values, entries, and (key, value) pairs as
158  * arguments and/or return values. Because the elements of a
159  * ConcurrentHashMap are not ordered in any particular way, and may be
160  * processed in different orders in different parallel executions, the
161  * correctness of supplied functions should not depend on any
162  * ordering, or on any other objects or values that may transiently
163  * change while computation is in progress; and except for forEach
164  * actions, should ideally be side-effect-free. Bulk operations on
165  * {@link java.util.Map.Entry} objects do not support method {@code
166  * setValue}.
167  *
168  * <ul>
169  * <li>forEach: Performs a given action on each element.
170  * A variant form applies a given transformation on each element
171  * before performing the action.
172  *
173  * <li>search: Returns the first available non-null result of
174  * applying a given function on each element; skipping further
175  * search when a result is found.
176  *
177  * <li>reduce: Accumulates each element.  The supplied reduction
178  * function cannot rely on ordering (more formally, it should be
179  * both associative and commutative).  There are five variants:
180  *
181  * <ul>
182  *
183  * <li>Plain reductions. (There is not a form of this method for
184  * (key, value) function arguments since there is no corresponding
185  * return type.)
186  *
187  * <li>Mapped reductions that accumulate the results of a given
188  * function applied to each element.
189  *
190  * <li>Reductions to scalar doubles, longs, and ints, using a
191  * given basis value.
192  *
193  * </ul>
194  * </ul>
195  *
196  * <p>These bulk operations accept a {@code parallelismThreshold}
197  * argument. Methods proceed sequentially if the current map size is
198  * estimated to be less than the given threshold. Using a value of
199  * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
200  * of {@code 1} results in maximal parallelism by partitioning into
201  * enough subtasks to fully utilize the {@link
202  * ForkJoinPool#commonPool()} that is used for all parallel
203  * computations. Normally, you would initially choose one of these
204  * extreme values, and then measure performance of using in-between
205  * values that trade off overhead versus throughput.
206  *
207  * <p>The concurrency properties of bulk operations follow
208  * from those of ConcurrentHashMap: Any non-null result returned
209  * from {@code get(key)} and related access methods bears a
210  * happens-before relation with the associated insertion or
211  * update.  The result of any bulk operation reflects the
212  * composition of these per-element relations (but is not
213  * necessarily atomic with respect to the map as a whole unless it
214  * is somehow known to be quiescent).  Conversely, because keys
215  * and values in the map are never null, null serves as a reliable
216  * atomic indicator of the current lack of any result.  To
217  * maintain this property, null serves as an implicit basis for
218  * all non-scalar reduction operations. For the double, long, and
219  * int versions, the basis should be one that, when combined with
220  * any other value, returns that other value (more formally, it
221  * should be the identity element for the reduction). Most common
222  * reductions have these properties; for example, computing a sum
223  * with basis 0 or a minimum with basis MAX_VALUE.
224  *
225  * <p>Search and transformation functions provided as arguments
226  * should similarly return null to indicate the lack of any result
227  * (in which case it is not used). In the case of mapped
228  * reductions, this also enables transformations to serve as
229  * filters, returning null (or, in the case of primitive
230  * specializations, the identity basis) if the element should not
231  * be combined. You can create compound transformations and
232  * filterings by composing them yourself under this "null means
233  * there is nothing there now" rule before using them in search or
234  * reduce operations.
235  *
236  * <p>Methods accepting and/or returning Entry arguments maintain
237  * key-value associations. They may be useful for example when
238  * finding the key for the greatest value. Note that "plain" Entry
239  * arguments can be supplied using {@code new
240  * AbstractMap.SimpleEntry(k,v)}.
241  *
242  * <p>Bulk operations may complete abruptly, throwing an
243  * exception encountered in the application of a supplied
244  * function. Bear in mind when handling such exceptions that other
245  * concurrently executing functions could also have thrown
246  * exceptions, or would have done so if the first exception had
247  * not occurred.
248  *
249  * <p>Speedups for parallel compared to sequential forms are common
250  * but not guaranteed.  Parallel operations involving brief functions
251  * on small maps may execute more slowly than sequential forms if the
252  * underlying work to parallelize the computation is more expensive
253  * than the computation itself.  Similarly, parallelization may not
254  * lead to much actual parallelism if all processors are busy
255  * performing unrelated tasks.
256  *
257  * <p>All arguments to all task methods must be non-null.
258  *
259  * @since 1.5
260  * @author Doug Lea
261  * @param <K> the type of keys maintained by this map
262  * @param <V> the type of mapped values
263  */
264 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
265     implements ConcurrentMap<K,V>, Serializable {
266     private static final long serialVersionUID = 7249069246763182397L;
267 
268     /*
269      * Overview:
270      *
271      * The primary design goal of this hash table is to maintain
272      * concurrent readability (typically method get(), but also
273      * iterators and related methods) while minimizing update
274      * contention. Secondary goals are to keep space consumption about
275      * the same or better than java.util.HashMap, and to support high
276      * initial insertion rates on an empty table by many threads.
277      *
278      * This map usually acts as a binned (bucketed) hash table.  Each
279      * key-value mapping is held in a Node.  Most nodes are instances
280      * of the basic Node class with hash, key, value, and next
281      * fields. However, various subclasses exist: TreeNodes are
282      * arranged in balanced trees, not lists.  TreeBins hold the roots
283      * of sets of TreeNodes. ForwardingNodes are placed at the heads
284      * of bins during resizing. ReservationNodes are used as
285      * placeholders while establishing values in computeIfAbsent and
286      * related methods.  The types TreeBin, ForwardingNode, and
287      * ReservationNode do not hold normal user keys, values, or
288      * hashes, and are readily distinguishable during search etc
289      * because they have negative hash fields and null key and value
290      * fields. (These special nodes are either uncommon or transient,
291      * so the impact of carrying around some unused fields is
292      * insignificant.)
293      *
294      * The table is lazily initialized to a power-of-two size upon the
295      * first insertion.  Each bin in the table normally contains a
296      * list of Nodes (most often, the list has only zero or one Node).
297      * Table accesses require volatile/atomic reads, writes, and
298      * CASes.  Because there is no other way to arrange this without
299      * adding further indirections, we use intrinsics
300      * (sun.misc.Unsafe) operations.
301      *
302      * We use the top (sign) bit of Node hash fields for control
303      * purposes -- it is available anyway because of addressing
304      * constraints.  Nodes with negative hash fields are specially
305      * handled or ignored in map methods.
306      *
307      * Insertion (via put or its variants) of the first node in an
308      * empty bin is performed by just CASing it to the bin.  This is
309      * by far the most common case for put operations under most
310      * key/hash distributions.  Other update operations (insert,
311      * delete, and replace) require locks.  We do not want to waste
312      * the space required to associate a distinct lock object with
313      * each bin, so instead use the first node of a bin list itself as
314      * a lock. Locking support for these locks relies on builtin
315      * "synchronized" monitors.
316      *
317      * Using the first node of a list as a lock does not by itself
318      * suffice though: When a node is locked, any update must first
319      * validate that it is still the first node after locking it, and
320      * retry if not. Because new nodes are always appended to lists,
321      * once a node is first in a bin, it remains first until deleted
322      * or the bin becomes invalidated (upon resizing).
323      *
324      * The main disadvantage of per-bin locks is that other update
325      * operations on other nodes in a bin list protected by the same
326      * lock can stall, for example when user equals() or mapping
327      * functions take a long time.  However, statistically, under
328      * random hash codes, this is not a common problem.  Ideally, the
329      * frequency of nodes in bins follows a Poisson distribution
330      * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
331      * parameter of about 0.5 on average, given the resizing threshold
332      * of 0.75, although with a large variance because of resizing
333      * granularity. Ignoring variance, the expected occurrences of
334      * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
335      * first values are:
336      *
337      * 0:    0.60653066
338      * 1:    0.30326533
339      * 2:    0.07581633
340      * 3:    0.01263606
341      * 4:    0.00157952
342      * 5:    0.00015795
343      * 6:    0.00001316
344      * 7:    0.00000094
345      * 8:    0.00000006
346      * more: less than 1 in ten million
347      *
348      * Lock contention probability for two threads accessing distinct
349      * elements is roughly 1 / (8 * #elements) under random hashes.
350      *
351      * Actual hash code distributions encountered in practice
352      * sometimes deviate significantly from uniform randomness.  This
353      * includes the case when N > (1<<30), so some keys MUST collide.
354      * Similarly for dumb or hostile usages in which multiple keys are
355      * designed to have identical hash codes or ones that differs only
356      * in masked-out high bits. So we use a secondary strategy that
357      * applies when the number of nodes in a bin exceeds a
358      * threshold. These TreeBins use a balanced tree to hold nodes (a
359      * specialized form of red-black trees), bounding search time to
360      * O(log N).  Each search step in a TreeBin is at least twice as
361      * slow as in a regular list, but given that N cannot exceed
362      * (1<<64) (before running out of addresses) this bounds search
363      * steps, lock hold times, etc, to reasonable constants (roughly
364      * 100 nodes inspected per operation worst case) so long as keys
365      * are Comparable (which is very common -- String, Long, etc).
366      * TreeBin nodes (TreeNodes) also maintain the same "next"
367      * traversal pointers as regular nodes, so can be traversed in
368      * iterators in the same way.
369      *
370      * The table is resized when occupancy exceeds a percentage
371      * threshold (nominally, 0.75, but see below).  Any thread
372      * noticing an overfull bin may assist in resizing after the
373      * initiating thread allocates and sets up the replacement array.
374      * However, rather than stalling, these other threads may proceed
375      * with insertions etc.  The use of TreeBins shields us from the
376      * worst case effects of overfilling while resizes are in
377      * progress.  Resizing proceeds by transferring bins, one by one,
378      * from the table to the next table. However, threads claim small
379      * blocks of indices to transfer (via field transferIndex) before
380      * doing so, reducing contention.  A generation stamp in field
381      * sizeCtl ensures that resizings do not overlap. Because we are
382      * using power-of-two expansion, the elements from each bin must
383      * either stay at same index, or move with a power of two
384      * offset. We eliminate unnecessary node creation by catching
385      * cases where old nodes can be reused because their next fields
386      * won't change.  On average, only about one-sixth of them need
387      * cloning when a table doubles. The nodes they replace will be
388      * garbage collectable as soon as they are no longer referenced by
389      * any reader thread that may be in the midst of concurrently
390      * traversing table.  Upon transfer, the old table bin contains
391      * only a special forwarding node (with hash field "MOVED") that
392      * contains the next table as its key. On encountering a
393      * forwarding node, access and update operations restart, using
394      * the new table.
395      *
396      * Each bin transfer requires its bin lock, which can stall
397      * waiting for locks while resizing. However, because other
398      * threads can join in and help resize rather than contend for
399      * locks, average aggregate waits become shorter as resizing
400      * progresses.  The transfer operation must also ensure that all
401      * accessible bins in both the old and new table are usable by any
402      * traversal.  This is arranged in part by proceeding from the
403      * last bin (table.length - 1) up towards the first.  Upon seeing
404      * a forwarding node, traversals (see class Traverser) arrange to
405      * move to the new table without revisiting nodes.  To ensure that
406      * no intervening nodes are skipped even when moved out of order,
407      * a stack (see class TableStack) is created on first encounter of
408      * a forwarding node during a traversal, to maintain its place if
409      * later processing the current table. The need for these
410      * save/restore mechanics is relatively rare, but when one
411      * forwarding node is encountered, typically many more will be.
412      * So Traversers use a simple caching scheme to avoid creating so
413      * many new TableStack nodes. (Thanks to Peter Levart for
414      * suggesting use of a stack here.)
415      *
416      * The traversal scheme also applies to partial traversals of
417      * ranges of bins (via an alternate Traverser constructor)
418      * to support partitioned aggregate operations.  Also, read-only
419      * operations give up if ever forwarded to a null table, which
420      * provides support for shutdown-style clearing, which is also not
421      * currently implemented.
422      *
423      * Lazy table initialization minimizes footprint until first use,
424      * and also avoids resizings when the first operation is from a
425      * putAll, constructor with map argument, or deserialization.
426      * These cases attempt to override the initial capacity settings,
427      * but harmlessly fail to take effect in cases of races.
428      *
429      * The element count is maintained using a specialization of
430      * LongAdder. We need to incorporate a specialization rather than
431      * just use a LongAdder in order to access implicit
432      * contention-sensing that leads to creation of multiple
433      * CounterCells.  The counter mechanics avoid contention on
434      * updates but can encounter cache thrashing if read too
435      * frequently during concurrent access. To avoid reading so often,
436      * resizing under contention is attempted only upon adding to a
437      * bin already holding two or more nodes. Under uniform hash
438      * distributions, the probability of this occurring at threshold
439      * is around 13%, meaning that only about 1 in 8 puts check
440      * threshold (and after resizing, many fewer do so).
441      *
442      * TreeBins use a special form of comparison for search and
443      * related operations (which is the main reason we cannot use
444      * existing collections such as TreeMaps). TreeBins contain
445      * Comparable elements, but may contain others, as well as
446      * elements that are Comparable but not necessarily Comparable for
447      * the same T, so we cannot invoke compareTo among them. To handle
448      * this, the tree is ordered primarily by hash value, then by
449      * Comparable.compareTo order if applicable.  On lookup at a node,
450      * if elements are not comparable or compare as 0 then both left
451      * and right children may need to be searched in the case of tied
452      * hash values. (This corresponds to the full list search that
453      * would be necessary if all elements were non-Comparable and had
454      * tied hashes.) On insertion, to keep a total ordering (or as
455      * close as is required here) across rebalancings, we compare
456      * classes and identityHashCodes as tie-breakers. The red-black
457      * balancing code is updated from pre-jdk-collections
458      * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
459      * based in turn on Cormen, Leiserson, and Rivest "Introduction to
460      * Algorithms" (CLR).
461      *
462      * TreeBins also require an additional locking mechanism.  While
463      * list traversal is always possible by readers even during
464      * updates, tree traversal is not, mainly because of tree-rotations
465      * that may change the root node and/or its linkages.  TreeBins
466      * include a simple read-write lock mechanism parasitic on the
467      * main bin-synchronization strategy: Structural adjustments
468      * associated with an insertion or removal are already bin-locked
469      * (and so cannot conflict with other writers) but must wait for
470      * ongoing readers to finish. Since there can be only one such
471      * waiter, we use a simple scheme using a single "waiter" field to
472      * block writers.  However, readers need never block.  If the root
473      * lock is held, they proceed along the slow traversal path (via
474      * next-pointers) until the lock becomes available or the list is
475      * exhausted, whichever comes first. These cases are not fast, but
476      * maximize aggregate expected throughput.
477      *
478      * Maintaining API and serialization compatibility with previous
479      * versions of this class introduces several oddities. Mainly: We
480      * leave untouched but unused constructor arguments referring to
481      * concurrencyLevel. We accept a loadFactor constructor argument,
482      * but apply it only to initial table capacity (which is the only
483      * time that we can guarantee to honor it.) We also declare an
484      * unused "Segment" class that is instantiated in minimal form
485      * only when serializing.
486      *
487      * Also, solely for compatibility with previous versions of this
488      * class, it extends AbstractMap, even though all of its methods
489      * are overridden, so it is just useless baggage.
490      *
491      * This file is organized to make things a little easier to follow
492      * while reading than they might otherwise: First the main static
493      * declarations and utilities, then fields, then main public
494      * methods (with a few factorings of multiple public methods into
495      * internal ones), then sizing methods, trees, traversers, and
496      * bulk operations.
497      */
498 
499     /* ---------------- Constants -------------- */
500 
501     /**
502      * The largest possible table capacity.  This value must be
503      * exactly 1<<30 to stay within Java array allocation and indexing
504      * bounds for power of two table sizes, and is further required
505      * because the top two bits of 32bit hash fields are used for
506      * control purposes.
507      */
508     private static final int MAXIMUM_CAPACITY = 1 << 30;
509 
510     /**
511      * The default initial table capacity.  Must be a power of 2
512      * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
513      */
514     private static final int DEFAULT_CAPACITY = 16;
515 
516     /**
517      * The largest possible (non-power of two) array size.
518      * Needed by toArray and related methods.
519      */
520     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
521 
522     /**
523      * The default concurrency level for this table. Unused but
524      * defined for compatibility with previous versions of this class.
525      */
526     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
527 
528     /**
529      * The load factor for this table. Overrides of this value in
530      * constructors affect only the initial table capacity.  The
531      * actual floating point value isn't normally used -- it is
532      * simpler to use expressions such as {@code n - (n >>> 2)} for
533      * the associated resizing threshold.
534      */
535     private static final float LOAD_FACTOR = 0.75f;
536 
537     /**
538      * The bin count threshold for using a tree rather than list for a
539      * bin.  Bins are converted to trees when adding an element to a
540      * bin with at least this many nodes. The value must be greater
541      * than 2, and should be at least 8 to mesh with assumptions in
542      * tree removal about conversion back to plain bins upon
543      * shrinkage.
544      */
545     static final int TREEIFY_THRESHOLD = 8;
546 
547     /**
548      * The bin count threshold for untreeifying a (split) bin during a
549      * resize operation. Should be less than TREEIFY_THRESHOLD, and at
550      * most 6 to mesh with shrinkage detection under removal.
551      */
552     static final int UNTREEIFY_THRESHOLD = 6;
553 
554     /**
555      * The smallest table capacity for which bins may be treeified.
556      * (Otherwise the table is resized if too many nodes in a bin.)
557      * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
558      * conflicts between resizing and treeification thresholds.
559      */
560     static final int MIN_TREEIFY_CAPACITY = 64;
561 
562     /**
563      * Minimum number of rebinnings per transfer step. Ranges are
564      * subdivided to allow multiple resizer threads.  This value
565      * serves as a lower bound to avoid resizers encountering
566      * excessive memory contention.  The value should be at least
567      * DEFAULT_CAPACITY.
568      */
569     private static final int MIN_TRANSFER_STRIDE = 16;
570 
571     /**
572      * The number of bits used for generation stamp in sizeCtl.
573      * Must be at least 6 for 32bit arrays.
574      */
575     private static final int RESIZE_STAMP_BITS = 16;
576 
577     /**
578      * The maximum number of threads that can help resize.
579      * Must fit in 32 - RESIZE_STAMP_BITS bits.
580      */
581     private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
582 
583     /**
584      * The bit shift for recording size stamp in sizeCtl.
585      */
586     private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
587 
588     /*
589      * Encodings for Node hash fields. See above for explanation.
590      */
591     static final int MOVED     = -1; // hash for forwarding nodes
592     static final int TREEBIN   = -2; // hash for roots of trees
593     static final int RESERVED  = -3; // hash for transient reservations
594     static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
595 
596     /** Number of CPUS, to place bounds on some sizings */
597     static final int NCPU = Runtime.getRuntime().availableProcessors();
598 
599     /**
600      * Serialized pseudo-fields, provided only for jdk7 compatibility.
601      * @serialField segments Segment[]
602      *   The segments, each of which is a specialized hash table.
603      * @serialField segmentMask int
604      *   Mask value for indexing into segments. The upper bits of a
605      *   key's hash code are used to choose the segment.
606      * @serialField segmentShift int
607      *   Shift value for indexing within segments.
608      */
609     private static final ObjectStreamField[] serialPersistentFields = {
610         new ObjectStreamField("segments", Segment[].class),
611         new ObjectStreamField("segmentMask", Integer.TYPE),
612         new ObjectStreamField("segmentShift", Integer.TYPE),
613     };
614 
615     /* ---------------- Nodes -------------- */
616 
617     /**
618      * Key-value entry.  This class is never exported out as a
619      * user-mutable Map.Entry (i.e., one supporting setValue; see
620      * MapEntry below), but can be used for read-only traversals used
621      * in bulk tasks.  Subclasses of Node with a negative hash field
622      * are special, and contain null keys and values (but are never
623      * exported).  Otherwise, keys and vals are never null.
624      */
625     static class Node<K,V> implements Map.Entry<K,V> {
626         final int hash;
627         final K key;
628         volatile V val;
629         volatile Node<K,V> next;
630 
Node(int hash, K key, V val, Node<K,V> next)631         Node(int hash, K key, V val, Node<K,V> next) {
632             this.hash = hash;
633             this.key = key;
634             this.val = val;
635             this.next = next;
636         }
637 
getKey()638         public final K getKey()     { return key; }
getValue()639         public final V getValue()   { return val; }
hashCode()640         public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
toString()641         public final String toString() {
642             return Helpers.mapEntryToString(key, val);
643         }
setValue(V value)644         public final V setValue(V value) {
645             throw new UnsupportedOperationException();
646         }
647 
equals(Object o)648         public final boolean equals(Object o) {
649             Object k, v, u; Map.Entry<?,?> e;
650             return ((o instanceof Map.Entry) &&
651                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
652                     (v = e.getValue()) != null &&
653                     (k == key || k.equals(key)) &&
654                     (v == (u = val) || v.equals(u)));
655         }
656 
657         /**
658          * Virtualized support for map.get(); overridden in subclasses.
659          */
find(int h, Object k)660         Node<K,V> find(int h, Object k) {
661             Node<K,V> e = this;
662             if (k != null) {
663                 do {
664                     K ek;
665                     if (e.hash == h &&
666                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
667                         return e;
668                 } while ((e = e.next) != null);
669             }
670             return null;
671         }
672     }
673 
674     /* ---------------- Static utilities -------------- */
675 
676     /**
677      * Spreads (XORs) higher bits of hash to lower and also forces top
678      * bit to 0. Because the table uses power-of-two masking, sets of
679      * hashes that vary only in bits above the current mask will
680      * always collide. (Among known examples are sets of Float keys
681      * holding consecutive whole numbers in small tables.)  So we
682      * apply a transform that spreads the impact of higher bits
683      * downward. There is a tradeoff between speed, utility, and
684      * quality of bit-spreading. Because many common sets of hashes
685      * are already reasonably distributed (so don't benefit from
686      * spreading), and because we use trees to handle large sets of
687      * collisions in bins, we just XOR some shifted bits in the
688      * cheapest possible way to reduce systematic lossage, as well as
689      * to incorporate impact of the highest bits that would otherwise
690      * never be used in index calculations because of table bounds.
691      */
spread(int h)692     static final int spread(int h) {
693         return (h ^ (h >>> 16)) & HASH_BITS;
694     }
695 
696     /**
697      * Returns a power of two table size for the given desired capacity.
698      * See Hackers Delight, sec 3.2
699      */
tableSizeFor(int c)700     private static final int tableSizeFor(int c) {
701         int n = c - 1;
702         n |= n >>> 1;
703         n |= n >>> 2;
704         n |= n >>> 4;
705         n |= n >>> 8;
706         n |= n >>> 16;
707         return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
708     }
709 
710     /**
711      * Returns x's Class if it is of the form "class C implements
712      * Comparable<C>", else null.
713      */
comparableClassFor(Object x)714     static Class<?> comparableClassFor(Object x) {
715         if (x instanceof Comparable) {
716             Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
717             if ((c = x.getClass()) == String.class) // bypass checks
718                 return c;
719             if ((ts = c.getGenericInterfaces()) != null) {
720                 for (int i = 0; i < ts.length; ++i) {
721                     if (((t = ts[i]) instanceof ParameterizedType) &&
722                         ((p = (ParameterizedType)t).getRawType() ==
723                          Comparable.class) &&
724                         (as = p.getActualTypeArguments()) != null &&
725                         as.length == 1 && as[0] == c) // type arg is c
726                         return c;
727                 }
728             }
729         }
730         return null;
731     }
732 
733     /**
734      * Returns k.compareTo(x) if x matches kc (k's screened comparable
735      * class), else 0.
736      */
737     @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
compareComparables(Class<?> kc, Object k, Object x)738     static int compareComparables(Class<?> kc, Object k, Object x) {
739         return (x == null || x.getClass() != kc ? 0 :
740                 ((Comparable)k).compareTo(x));
741     }
742 
743     /* ---------------- Table element access -------------- */
744 
745     /*
746      * Volatile access methods are used for table elements as well as
747      * elements of in-progress next table while resizing.  All uses of
748      * the tab arguments must be null checked by callers.  All callers
749      * also paranoically precheck that tab's length is not zero (or an
750      * equivalent check), thus ensuring that any index argument taking
751      * the form of a hash value anded with (length - 1) is a valid
752      * index.  Note that, to be correct wrt arbitrary concurrency
753      * errors by users, these checks must operate on local variables,
754      * which accounts for some odd-looking inline assignments below.
755      * Note that calls to setTabAt always occur within locked regions,
756      * and so in principle require only release ordering, not
757      * full volatile semantics, but are currently coded as volatile
758      * writes to be conservative.
759      */
760 
761     @SuppressWarnings("unchecked")
tabAt(Node<K,V>[] tab, int i)762     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
763         return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
764     }
765 
casTabAt(Node<K,V>[] tab, int i, Node<K,V> c, Node<K,V> v)766     static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
767                                         Node<K,V> c, Node<K,V> v) {
768         return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
769     }
770 
setTabAt(Node<K,V>[] tab, int i, Node<K,V> v)771     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
772         U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
773     }
774 
775     /* ---------------- Fields -------------- */
776 
777     /**
778      * The array of bins. Lazily initialized upon first insertion.
779      * Size is always a power of two. Accessed directly by iterators.
780      */
781     transient volatile Node<K,V>[] table;
782 
783     /**
784      * The next table to use; non-null only while resizing.
785      */
786     private transient volatile Node<K,V>[] nextTable;
787 
788     /**
789      * Base counter value, used mainly when there is no contention,
790      * but also as a fallback during table initialization
791      * races. Updated via CAS.
792      */
793     private transient volatile long baseCount;
794 
795     /**
796      * Table initialization and resizing control.  When negative, the
797      * table is being initialized or resized: -1 for initialization,
798      * else -(1 + the number of active resizing threads).  Otherwise,
799      * when table is null, holds the initial table size to use upon
800      * creation, or 0 for default. After initialization, holds the
801      * next element count value upon which to resize the table.
802      */
803     private transient volatile int sizeCtl;
804 
805     /**
806      * The next table index (plus one) to split while resizing.
807      */
808     private transient volatile int transferIndex;
809 
810     /**
811      * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
812      */
813     private transient volatile int cellsBusy;
814 
815     /**
816      * Table of counter cells. When non-null, size is a power of 2.
817      */
818     private transient volatile CounterCell[] counterCells;
819 
820     // views
821     private transient KeySetView<K,V> keySet;
822     private transient ValuesView<K,V> values;
823     private transient EntrySetView<K,V> entrySet;
824 
825 
826     /* ---------------- Public operations -------------- */
827 
828     /**
829      * Creates a new, empty map with the default initial table size (16).
830      */
ConcurrentHashMap()831     public ConcurrentHashMap() {
832     }
833 
834     /**
835      * Creates a new, empty map with an initial table size
836      * accommodating the specified number of elements without the need
837      * to dynamically resize.
838      *
839      * @param initialCapacity The implementation performs internal
840      * sizing to accommodate this many elements.
841      * @throws IllegalArgumentException if the initial capacity of
842      * elements is negative
843      */
ConcurrentHashMap(int initialCapacity)844     public ConcurrentHashMap(int initialCapacity) {
845         if (initialCapacity < 0)
846             throw new IllegalArgumentException();
847         int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
848                    MAXIMUM_CAPACITY :
849                    tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
850         this.sizeCtl = cap;
851     }
852 
853     /**
854      * Creates a new map with the same mappings as the given map.
855      *
856      * @param m the map
857      */
ConcurrentHashMap(Map<? extends K, ? extends V> m)858     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
859         this.sizeCtl = DEFAULT_CAPACITY;
860         putAll(m);
861     }
862 
863     /**
864      * Creates a new, empty map with an initial table size based on
865      * the given number of elements ({@code initialCapacity}) and
866      * initial table density ({@code loadFactor}).
867      *
868      * @param initialCapacity the initial capacity. The implementation
869      * performs internal sizing to accommodate this many elements,
870      * given the specified load factor.
871      * @param loadFactor the load factor (table density) for
872      * establishing the initial table size
873      * @throws IllegalArgumentException if the initial capacity of
874      * elements is negative or the load factor is nonpositive
875      *
876      * @since 1.6
877      */
ConcurrentHashMap(int initialCapacity, float loadFactor)878     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
879         this(initialCapacity, loadFactor, 1);
880     }
881 
882     /**
883      * Creates a new, empty map with an initial table size based on
884      * the given number of elements ({@code initialCapacity}), table
885      * density ({@code loadFactor}), and number of concurrently
886      * updating threads ({@code concurrencyLevel}).
887      *
888      * @param initialCapacity the initial capacity. The implementation
889      * performs internal sizing to accommodate this many elements,
890      * given the specified load factor.
891      * @param loadFactor the load factor (table density) for
892      * establishing the initial table size
893      * @param concurrencyLevel the estimated number of concurrently
894      * updating threads. The implementation may use this value as
895      * a sizing hint.
896      * @throws IllegalArgumentException if the initial capacity is
897      * negative or the load factor or concurrencyLevel are
898      * nonpositive
899      */
ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel)900     public ConcurrentHashMap(int initialCapacity,
901                              float loadFactor, int concurrencyLevel) {
902         if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
903             throw new IllegalArgumentException();
904         if (initialCapacity < concurrencyLevel)   // Use at least as many bins
905             initialCapacity = concurrencyLevel;   // as estimated threads
906         long size = (long)(1.0 + (long)initialCapacity / loadFactor);
907         int cap = (size >= (long)MAXIMUM_CAPACITY) ?
908             MAXIMUM_CAPACITY : tableSizeFor((int)size);
909         this.sizeCtl = cap;
910     }
911 
912     // Original (since JDK1.2) Map methods
913 
914     /**
915      * {@inheritDoc}
916      */
size()917     public int size() {
918         long n = sumCount();
919         return ((n < 0L) ? 0 :
920                 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
921                 (int)n);
922     }
923 
924     /**
925      * {@inheritDoc}
926      */
isEmpty()927     public boolean isEmpty() {
928         return sumCount() <= 0L; // ignore transient negative values
929     }
930 
931     /**
932      * Returns the value to which the specified key is mapped,
933      * or {@code null} if this map contains no mapping for the key.
934      *
935      * <p>More formally, if this map contains a mapping from a key
936      * {@code k} to a value {@code v} such that {@code key.equals(k)},
937      * then this method returns {@code v}; otherwise it returns
938      * {@code null}.  (There can be at most one such mapping.)
939      *
940      * @throws NullPointerException if the specified key is null
941      */
get(Object key)942     public V get(Object key) {
943         Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
944         int h = spread(key.hashCode());
945         if ((tab = table) != null && (n = tab.length) > 0 &&
946             (e = tabAt(tab, (n - 1) & h)) != null) {
947             if ((eh = e.hash) == h) {
948                 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
949                     return e.val;
950             }
951             else if (eh < 0)
952                 return (p = e.find(h, key)) != null ? p.val : null;
953             while ((e = e.next) != null) {
954                 if (e.hash == h &&
955                     ((ek = e.key) == key || (ek != null && key.equals(ek))))
956                     return e.val;
957             }
958         }
959         return null;
960     }
961 
962     /**
963      * Tests if the specified object is a key in this table.
964      *
965      * @param  key possible key
966      * @return {@code true} if and only if the specified object
967      *         is a key in this table, as determined by the
968      *         {@code equals} method; {@code false} otherwise
969      * @throws NullPointerException if the specified key is null
970      */
containsKey(Object key)971     public boolean containsKey(Object key) {
972         return get(key) != null;
973     }
974 
975     /**
976      * Returns {@code true} if this map maps one or more keys to the
977      * specified value. Note: This method may require a full traversal
978      * of the map, and is much slower than method {@code containsKey}.
979      *
980      * @param value value whose presence in this map is to be tested
981      * @return {@code true} if this map maps one or more keys to the
982      *         specified value
983      * @throws NullPointerException if the specified value is null
984      */
containsValue(Object value)985     public boolean containsValue(Object value) {
986         if (value == null)
987             throw new NullPointerException();
988         Node<K,V>[] t;
989         if ((t = table) != null) {
990             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
991             for (Node<K,V> p; (p = it.advance()) != null; ) {
992                 V v;
993                 if ((v = p.val) == value || (v != null && value.equals(v)))
994                     return true;
995             }
996         }
997         return false;
998     }
999 
1000     /**
1001      * Maps the specified key to the specified value in this table.
1002      * Neither the key nor the value can be null.
1003      *
1004      * <p>The value can be retrieved by calling the {@code get} method
1005      * with a key that is equal to the original key.
1006      *
1007      * @param key key with which the specified value is to be associated
1008      * @param value value to be associated with the specified key
1009      * @return the previous value associated with {@code key}, or
1010      *         {@code null} if there was no mapping for {@code key}
1011      * @throws NullPointerException if the specified key or value is null
1012      */
put(K key, V value)1013     public V put(K key, V value) {
1014         return putVal(key, value, false);
1015     }
1016 
1017     /** Implementation for put and putIfAbsent */
putVal(K key, V value, boolean onlyIfAbsent)1018     final V putVal(K key, V value, boolean onlyIfAbsent) {
1019         if (key == null || value == null) throw new NullPointerException();
1020         int hash = spread(key.hashCode());
1021         int binCount = 0;
1022         for (Node<K,V>[] tab = table;;) {
1023             Node<K,V> f; int n, i, fh;
1024             if (tab == null || (n = tab.length) == 0)
1025                 tab = initTable();
1026             else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1027                 if (casTabAt(tab, i, null,
1028                              new Node<K,V>(hash, key, value, null)))
1029                     break;                   // no lock when adding to empty bin
1030             }
1031             else if ((fh = f.hash) == MOVED)
1032                 tab = helpTransfer(tab, f);
1033             else {
1034                 V oldVal = null;
1035                 synchronized (f) {
1036                     if (tabAt(tab, i) == f) {
1037                         if (fh >= 0) {
1038                             binCount = 1;
1039                             for (Node<K,V> e = f;; ++binCount) {
1040                                 K ek;
1041                                 if (e.hash == hash &&
1042                                     ((ek = e.key) == key ||
1043                                      (ek != null && key.equals(ek)))) {
1044                                     oldVal = e.val;
1045                                     if (!onlyIfAbsent)
1046                                         e.val = value;
1047                                     break;
1048                                 }
1049                                 Node<K,V> pred = e;
1050                                 if ((e = e.next) == null) {
1051                                     pred.next = new Node<K,V>(hash, key,
1052                                                               value, null);
1053                                     break;
1054                                 }
1055                             }
1056                         }
1057                         else if (f instanceof TreeBin) {
1058                             Node<K,V> p;
1059                             binCount = 2;
1060                             if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1061                                                            value)) != null) {
1062                                 oldVal = p.val;
1063                                 if (!onlyIfAbsent)
1064                                     p.val = value;
1065                             }
1066                         }
1067                         else if (f instanceof ReservationNode)
1068                             throw new IllegalStateException("Recursive update");
1069                     }
1070                 }
1071                 if (binCount != 0) {
1072                     if (binCount >= TREEIFY_THRESHOLD)
1073                         treeifyBin(tab, i);
1074                     if (oldVal != null)
1075                         return oldVal;
1076                     break;
1077                 }
1078             }
1079         }
1080         addCount(1L, binCount);
1081         return null;
1082     }
1083 
1084     /**
1085      * Copies all of the mappings from the specified map to this one.
1086      * These mappings replace any mappings that this map had for any of the
1087      * keys currently in the specified map.
1088      *
1089      * @param m mappings to be stored in this map
1090      */
putAll(Map<? extends K, ? extends V> m)1091     public void putAll(Map<? extends K, ? extends V> m) {
1092         tryPresize(m.size());
1093         for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1094             putVal(e.getKey(), e.getValue(), false);
1095     }
1096 
1097     /**
1098      * Removes the key (and its corresponding value) from this map.
1099      * This method does nothing if the key is not in the map.
1100      *
1101      * @param  key the key that needs to be removed
1102      * @return the previous value associated with {@code key}, or
1103      *         {@code null} if there was no mapping for {@code key}
1104      * @throws NullPointerException if the specified key is null
1105      */
remove(Object key)1106     public V remove(Object key) {
1107         return replaceNode(key, null, null);
1108     }
1109 
1110     /**
1111      * Implementation for the four public remove/replace methods:
1112      * Replaces node value with v, conditional upon match of cv if
1113      * non-null.  If resulting value is null, delete.
1114      */
replaceNode(Object key, V value, Object cv)1115     final V replaceNode(Object key, V value, Object cv) {
1116         int hash = spread(key.hashCode());
1117         for (Node<K,V>[] tab = table;;) {
1118             Node<K,V> f; int n, i, fh;
1119             if (tab == null || (n = tab.length) == 0 ||
1120                 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1121                 break;
1122             else if ((fh = f.hash) == MOVED)
1123                 tab = helpTransfer(tab, f);
1124             else {
1125                 V oldVal = null;
1126                 boolean validated = false;
1127                 synchronized (f) {
1128                     if (tabAt(tab, i) == f) {
1129                         if (fh >= 0) {
1130                             validated = true;
1131                             for (Node<K,V> e = f, pred = null;;) {
1132                                 K ek;
1133                                 if (e.hash == hash &&
1134                                     ((ek = e.key) == key ||
1135                                      (ek != null && key.equals(ek)))) {
1136                                     V ev = e.val;
1137                                     if (cv == null || cv == ev ||
1138                                         (ev != null && cv.equals(ev))) {
1139                                         oldVal = ev;
1140                                         if (value != null)
1141                                             e.val = value;
1142                                         else if (pred != null)
1143                                             pred.next = e.next;
1144                                         else
1145                                             setTabAt(tab, i, e.next);
1146                                     }
1147                                     break;
1148                                 }
1149                                 pred = e;
1150                                 if ((e = e.next) == null)
1151                                     break;
1152                             }
1153                         }
1154                         else if (f instanceof TreeBin) {
1155                             validated = true;
1156                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1157                             TreeNode<K,V> r, p;
1158                             if ((r = t.root) != null &&
1159                                 (p = r.findTreeNode(hash, key, null)) != null) {
1160                                 V pv = p.val;
1161                                 if (cv == null || cv == pv ||
1162                                     (pv != null && cv.equals(pv))) {
1163                                     oldVal = pv;
1164                                     if (value != null)
1165                                         p.val = value;
1166                                     else if (t.removeTreeNode(p))
1167                                         setTabAt(tab, i, untreeify(t.first));
1168                                 }
1169                             }
1170                         }
1171                         else if (f instanceof ReservationNode)
1172                             throw new IllegalStateException("Recursive update");
1173                     }
1174                 }
1175                 if (validated) {
1176                     if (oldVal != null) {
1177                         if (value == null)
1178                             addCount(-1L, -1);
1179                         return oldVal;
1180                     }
1181                     break;
1182                 }
1183             }
1184         }
1185         return null;
1186     }
1187 
1188     /**
1189      * Removes all of the mappings from this map.
1190      */
clear()1191     public void clear() {
1192         long delta = 0L; // negative number of deletions
1193         int i = 0;
1194         Node<K,V>[] tab = table;
1195         while (tab != null && i < tab.length) {
1196             int fh;
1197             Node<K,V> f = tabAt(tab, i);
1198             if (f == null)
1199                 ++i;
1200             else if ((fh = f.hash) == MOVED) {
1201                 tab = helpTransfer(tab, f);
1202                 i = 0; // restart
1203             }
1204             else {
1205                 synchronized (f) {
1206                     if (tabAt(tab, i) == f) {
1207                         Node<K,V> p = (fh >= 0 ? f :
1208                                        (f instanceof TreeBin) ?
1209                                        ((TreeBin<K,V>)f).first : null);
1210                         while (p != null) {
1211                             --delta;
1212                             p = p.next;
1213                         }
1214                         setTabAt(tab, i++, null);
1215                     }
1216                 }
1217             }
1218         }
1219         if (delta != 0L)
1220             addCount(delta, -1);
1221     }
1222 
1223     /**
1224      * Returns a {@link Set} view of the keys contained in this map.
1225      * The set is backed by the map, so changes to the map are
1226      * reflected in the set, and vice-versa. The set supports element
1227      * removal, which removes the corresponding mapping from this map,
1228      * via the {@code Iterator.remove}, {@code Set.remove},
1229      * {@code removeAll}, {@code retainAll}, and {@code clear}
1230      * operations.  It does not support the {@code add} or
1231      * {@code addAll} operations.
1232      *
1233      * <p> The set returned by this method is guaranteed to an instance of
1234      * {@link KeySetView}.
1235      *
1236      * <p>The view's iterators and spliterators are
1237      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1238      *
1239      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1240      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1241      *
1242      * @return the set view
1243      */
1244     // NOTE: The upstream version of this function returns KeySetView (See http://b/28099367).
keySet()1245     public Set<K> keySet() {
1246         KeySetView<K,V> ks;
1247         return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1248     }
1249 
1250     /**
1251      * Returns a {@link Collection} view of the values contained in this map.
1252      * The collection is backed by the map, so changes to the map are
1253      * reflected in the collection, and vice-versa.  The collection
1254      * supports element removal, which removes the corresponding
1255      * mapping from this map, via the {@code Iterator.remove},
1256      * {@code Collection.remove}, {@code removeAll},
1257      * {@code retainAll}, and {@code clear} operations.  It does not
1258      * support the {@code add} or {@code addAll} operations.
1259      *
1260      * <p>The view's iterators and spliterators are
1261      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1262      *
1263      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1264      * and {@link Spliterator#NONNULL}.
1265      *
1266      * @return the collection view
1267      */
values()1268     public Collection<V> values() {
1269         ValuesView<K,V> vs;
1270         return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1271     }
1272 
1273     /**
1274      * Returns a {@link Set} view of the mappings contained in this map.
1275      * The set is backed by the map, so changes to the map are
1276      * reflected in the set, and vice-versa.  The set supports element
1277      * removal, which removes the corresponding mapping from the map,
1278      * via the {@code Iterator.remove}, {@code Set.remove},
1279      * {@code removeAll}, {@code retainAll}, and {@code clear}
1280      * operations.
1281      *
1282      * <p>The view's iterators and spliterators are
1283      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1284      *
1285      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1286      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1287      *
1288      * @return the set view
1289      */
entrySet()1290     public Set<Map.Entry<K,V>> entrySet() {
1291         EntrySetView<K,V> es;
1292         return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1293     }
1294 
1295     /**
1296      * Returns the hash code value for this {@link Map}, i.e.,
1297      * the sum of, for each key-value pair in the map,
1298      * {@code key.hashCode() ^ value.hashCode()}.
1299      *
1300      * @return the hash code value for this map
1301      */
hashCode()1302     public int hashCode() {
1303         int h = 0;
1304         Node<K,V>[] t;
1305         if ((t = table) != null) {
1306             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1307             for (Node<K,V> p; (p = it.advance()) != null; )
1308                 h += p.key.hashCode() ^ p.val.hashCode();
1309         }
1310         return h;
1311     }
1312 
1313     /**
1314      * Returns a string representation of this map.  The string
1315      * representation consists of a list of key-value mappings (in no
1316      * particular order) enclosed in braces ("{@code {}}").  Adjacent
1317      * mappings are separated by the characters {@code ", "} (comma
1318      * and space).  Each key-value mapping is rendered as the key
1319      * followed by an equals sign ("{@code =}") followed by the
1320      * associated value.
1321      *
1322      * @return a string representation of this map
1323      */
toString()1324     public String toString() {
1325         Node<K,V>[] t;
1326         int f = (t = table) == null ? 0 : t.length;
1327         Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1328         StringBuilder sb = new StringBuilder();
1329         sb.append('{');
1330         Node<K,V> p;
1331         if ((p = it.advance()) != null) {
1332             for (;;) {
1333                 K k = p.key;
1334                 V v = p.val;
1335                 sb.append(k == this ? "(this Map)" : k);
1336                 sb.append('=');
1337                 sb.append(v == this ? "(this Map)" : v);
1338                 if ((p = it.advance()) == null)
1339                     break;
1340                 sb.append(',').append(' ');
1341             }
1342         }
1343         return sb.append('}').toString();
1344     }
1345 
1346     /**
1347      * Compares the specified object with this map for equality.
1348      * Returns {@code true} if the given object is a map with the same
1349      * mappings as this map.  This operation may return misleading
1350      * results if either map is concurrently modified during execution
1351      * of this method.
1352      *
1353      * @param o object to be compared for equality with this map
1354      * @return {@code true} if the specified object is equal to this map
1355      */
equals(Object o)1356     public boolean equals(Object o) {
1357         if (o != this) {
1358             if (!(o instanceof Map))
1359                 return false;
1360             Map<?,?> m = (Map<?,?>) o;
1361             Node<K,V>[] t;
1362             int f = (t = table) == null ? 0 : t.length;
1363             Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1364             for (Node<K,V> p; (p = it.advance()) != null; ) {
1365                 V val = p.val;
1366                 Object v = m.get(p.key);
1367                 if (v == null || (v != val && !v.equals(val)))
1368                     return false;
1369             }
1370             for (Map.Entry<?,?> e : m.entrySet()) {
1371                 Object mk, mv, v;
1372                 if ((mk = e.getKey()) == null ||
1373                     (mv = e.getValue()) == null ||
1374                     (v = get(mk)) == null ||
1375                     (mv != v && !mv.equals(v)))
1376                     return false;
1377             }
1378         }
1379         return true;
1380     }
1381 
1382     /**
1383      * Stripped-down version of helper class used in previous version,
1384      * declared for the sake of serialization compatibility.
1385      */
1386     static class Segment<K,V> extends ReentrantLock implements Serializable {
1387         private static final long serialVersionUID = 2249069246763182397L;
1388         final float loadFactor;
Segment(float lf)1389         Segment(float lf) { this.loadFactor = lf; }
1390     }
1391 
1392     /**
1393      * Saves the state of the {@code ConcurrentHashMap} instance to a
1394      * stream (i.e., serializes it).
1395      * @param s the stream
1396      * @throws java.io.IOException if an I/O error occurs
1397      * @serialData
1398      * the serialized fields, followed by the key (Object) and value
1399      * (Object) for each key-value mapping, followed by a null pair.
1400      * The key-value mappings are emitted in no particular order.
1401      */
writeObject(java.io.ObjectOutputStream s)1402     private void writeObject(java.io.ObjectOutputStream s)
1403         throws java.io.IOException {
1404         // For serialization compatibility
1405         // Emulate segment calculation from previous version of this class
1406         int sshift = 0;
1407         int ssize = 1;
1408         while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1409             ++sshift;
1410             ssize <<= 1;
1411         }
1412         int segmentShift = 32 - sshift;
1413         int segmentMask = ssize - 1;
1414         @SuppressWarnings("unchecked")
1415         Segment<K,V>[] segments = (Segment<K,V>[])
1416             new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1417         for (int i = 0; i < segments.length; ++i)
1418             segments[i] = new Segment<K,V>(LOAD_FACTOR);
1419         java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1420         streamFields.put("segments", segments);
1421         streamFields.put("segmentShift", segmentShift);
1422         streamFields.put("segmentMask", segmentMask);
1423         s.writeFields();
1424 
1425         Node<K,V>[] t;
1426         if ((t = table) != null) {
1427             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1428             for (Node<K,V> p; (p = it.advance()) != null; ) {
1429                 s.writeObject(p.key);
1430                 s.writeObject(p.val);
1431             }
1432         }
1433         s.writeObject(null);
1434         s.writeObject(null);
1435     }
1436 
1437     /**
1438      * Reconstitutes the instance from a stream (that is, deserializes it).
1439      * @param s the stream
1440      * @throws ClassNotFoundException if the class of a serialized object
1441      *         could not be found
1442      * @throws java.io.IOException if an I/O error occurs
1443      */
readObject(java.io.ObjectInputStream s)1444     private void readObject(java.io.ObjectInputStream s)
1445         throws java.io.IOException, ClassNotFoundException {
1446         /*
1447          * To improve performance in typical cases, we create nodes
1448          * while reading, then place in table once size is known.
1449          * However, we must also validate uniqueness and deal with
1450          * overpopulated bins while doing so, which requires
1451          * specialized versions of putVal mechanics.
1452          */
1453         sizeCtl = -1; // force exclusion for table construction
1454         s.defaultReadObject();
1455         long size = 0L;
1456         Node<K,V> p = null;
1457         for (;;) {
1458             @SuppressWarnings("unchecked")
1459             K k = (K) s.readObject();
1460             @SuppressWarnings("unchecked")
1461             V v = (V) s.readObject();
1462             if (k != null && v != null) {
1463                 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1464                 ++size;
1465             }
1466             else
1467                 break;
1468         }
1469         if (size == 0L)
1470             sizeCtl = 0;
1471         else {
1472             int n;
1473             if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1474                 n = MAXIMUM_CAPACITY;
1475             else {
1476                 int sz = (int)size;
1477                 n = tableSizeFor(sz + (sz >>> 1) + 1);
1478             }
1479             @SuppressWarnings("unchecked")
1480             Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1481             int mask = n - 1;
1482             long added = 0L;
1483             while (p != null) {
1484                 boolean insertAtFront;
1485                 Node<K,V> next = p.next, first;
1486                 int h = p.hash, j = h & mask;
1487                 if ((first = tabAt(tab, j)) == null)
1488                     insertAtFront = true;
1489                 else {
1490                     K k = p.key;
1491                     if (first.hash < 0) {
1492                         TreeBin<K,V> t = (TreeBin<K,V>)first;
1493                         if (t.putTreeVal(h, k, p.val) == null)
1494                             ++added;
1495                         insertAtFront = false;
1496                     }
1497                     else {
1498                         int binCount = 0;
1499                         insertAtFront = true;
1500                         Node<K,V> q; K qk;
1501                         for (q = first; q != null; q = q.next) {
1502                             if (q.hash == h &&
1503                                 ((qk = q.key) == k ||
1504                                  (qk != null && k.equals(qk)))) {
1505                                 insertAtFront = false;
1506                                 break;
1507                             }
1508                             ++binCount;
1509                         }
1510                         if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1511                             insertAtFront = false;
1512                             ++added;
1513                             p.next = first;
1514                             TreeNode<K,V> hd = null, tl = null;
1515                             for (q = p; q != null; q = q.next) {
1516                                 TreeNode<K,V> t = new TreeNode<K,V>
1517                                     (q.hash, q.key, q.val, null, null);
1518                                 if ((t.prev = tl) == null)
1519                                     hd = t;
1520                                 else
1521                                     tl.next = t;
1522                                 tl = t;
1523                             }
1524                             setTabAt(tab, j, new TreeBin<K,V>(hd));
1525                         }
1526                     }
1527                 }
1528                 if (insertAtFront) {
1529                     ++added;
1530                     p.next = first;
1531                     setTabAt(tab, j, p);
1532                 }
1533                 p = next;
1534             }
1535             table = tab;
1536             sizeCtl = n - (n >>> 2);
1537             baseCount = added;
1538         }
1539     }
1540 
1541     // ConcurrentMap methods
1542 
1543     /**
1544      * {@inheritDoc}
1545      *
1546      * @return the previous value associated with the specified key,
1547      *         or {@code null} if there was no mapping for the key
1548      * @throws NullPointerException if the specified key or value is null
1549      */
putIfAbsent(K key, V value)1550     public V putIfAbsent(K key, V value) {
1551         return putVal(key, value, true);
1552     }
1553 
1554     /**
1555      * {@inheritDoc}
1556      *
1557      * @throws NullPointerException if the specified key is null
1558      */
remove(Object key, Object value)1559     public boolean remove(Object key, Object value) {
1560         if (key == null)
1561             throw new NullPointerException();
1562         return value != null && replaceNode(key, null, value) != null;
1563     }
1564 
1565     /**
1566      * {@inheritDoc}
1567      *
1568      * @throws NullPointerException if any of the arguments are null
1569      */
replace(K key, V oldValue, V newValue)1570     public boolean replace(K key, V oldValue, V newValue) {
1571         if (key == null || oldValue == null || newValue == null)
1572             throw new NullPointerException();
1573         return replaceNode(key, newValue, oldValue) != null;
1574     }
1575 
1576     /**
1577      * {@inheritDoc}
1578      *
1579      * @return the previous value associated with the specified key,
1580      *         or {@code null} if there was no mapping for the key
1581      * @throws NullPointerException if the specified key or value is null
1582      */
replace(K key, V value)1583     public V replace(K key, V value) {
1584         if (key == null || value == null)
1585             throw new NullPointerException();
1586         return replaceNode(key, value, null);
1587     }
1588 
1589     // Overrides of JDK8+ Map extension method defaults
1590 
1591     /**
1592      * Returns the value to which the specified key is mapped, or the
1593      * given default value if this map contains no mapping for the
1594      * key.
1595      *
1596      * @param key the key whose associated value is to be returned
1597      * @param defaultValue the value to return if this map contains
1598      * no mapping for the given key
1599      * @return the mapping for the key, if present; else the default value
1600      * @throws NullPointerException if the specified key is null
1601      */
getOrDefault(Object key, V defaultValue)1602     public V getOrDefault(Object key, V defaultValue) {
1603         V v;
1604         return (v = get(key)) == null ? defaultValue : v;
1605     }
1606 
forEach(BiConsumer<? super K, ? super V> action)1607     public void forEach(BiConsumer<? super K, ? super V> action) {
1608         if (action == null) throw new NullPointerException();
1609         Node<K,V>[] t;
1610         if ((t = table) != null) {
1611             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1612             for (Node<K,V> p; (p = it.advance()) != null; ) {
1613                 action.accept(p.key, p.val);
1614             }
1615         }
1616     }
1617 
replaceAll(BiFunction<? super K, ? super V, ? extends V> function)1618     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1619         if (function == null) throw new NullPointerException();
1620         Node<K,V>[] t;
1621         if ((t = table) != null) {
1622             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1623             for (Node<K,V> p; (p = it.advance()) != null; ) {
1624                 V oldValue = p.val;
1625                 for (K key = p.key;;) {
1626                     V newValue = function.apply(key, oldValue);
1627                     if (newValue == null)
1628                         throw new NullPointerException();
1629                     if (replaceNode(key, newValue, oldValue) != null ||
1630                         (oldValue = get(key)) == null)
1631                         break;
1632                 }
1633             }
1634         }
1635     }
1636 
1637     /**
1638      * Helper method for EntrySetView.removeIf.
1639      */
removeEntryIf(Predicate<? super Entry<K,V>> function)1640     boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1641         if (function == null) throw new NullPointerException();
1642         Node<K,V>[] t;
1643         boolean removed = false;
1644         if ((t = table) != null) {
1645             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1646             for (Node<K,V> p; (p = it.advance()) != null; ) {
1647                 K k = p.key;
1648                 V v = p.val;
1649                 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1650                 if (function.test(e) && replaceNode(k, null, v) != null)
1651                     removed = true;
1652             }
1653         }
1654         return removed;
1655     }
1656 
1657     /**
1658      * Helper method for ValuesView.removeIf.
1659      */
removeValueIf(Predicate<? super V> function)1660     boolean removeValueIf(Predicate<? super V> function) {
1661         if (function == null) throw new NullPointerException();
1662         Node<K,V>[] t;
1663         boolean removed = false;
1664         if ((t = table) != null) {
1665             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1666             for (Node<K,V> p; (p = it.advance()) != null; ) {
1667                 K k = p.key;
1668                 V v = p.val;
1669                 if (function.test(v) && replaceNode(k, null, v) != null)
1670                     removed = true;
1671             }
1672         }
1673         return removed;
1674     }
1675 
1676     /**
1677      * If the specified key is not already associated with a value,
1678      * attempts to compute its value using the given mapping function
1679      * and enters it into this map unless {@code null}.  The entire
1680      * method invocation is performed atomically, so the function is
1681      * applied at most once per key.  Some attempted update operations
1682      * on this map by other threads may be blocked while computation
1683      * is in progress, so the computation should be short and simple,
1684      * and must not attempt to update any other mappings of this map.
1685      *
1686      * @param key key with which the specified value is to be associated
1687      * @param mappingFunction the function to compute a value
1688      * @return the current (existing or computed) value associated with
1689      *         the specified key, or null if the computed value is null
1690      * @throws NullPointerException if the specified key or mappingFunction
1691      *         is null
1692      * @throws IllegalStateException if the computation detectably
1693      *         attempts a recursive update to this map that would
1694      *         otherwise never complete
1695      * @throws RuntimeException or Error if the mappingFunction does so,
1696      *         in which case the mapping is left unestablished
1697      */
computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)1698     public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1699         if (key == null || mappingFunction == null)
1700             throw new NullPointerException();
1701         int h = spread(key.hashCode());
1702         V val = null;
1703         int binCount = 0;
1704         for (Node<K,V>[] tab = table;;) {
1705             Node<K,V> f; int n, i, fh;
1706             if (tab == null || (n = tab.length) == 0)
1707                 tab = initTable();
1708             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1709                 Node<K,V> r = new ReservationNode<K,V>();
1710                 synchronized (r) {
1711                     if (casTabAt(tab, i, null, r)) {
1712                         binCount = 1;
1713                         Node<K,V> node = null;
1714                         try {
1715                             if ((val = mappingFunction.apply(key)) != null)
1716                                 node = new Node<K,V>(h, key, val, null);
1717                         } finally {
1718                             setTabAt(tab, i, node);
1719                         }
1720                     }
1721                 }
1722                 if (binCount != 0)
1723                     break;
1724             }
1725             else if ((fh = f.hash) == MOVED)
1726                 tab = helpTransfer(tab, f);
1727             else {
1728                 boolean added = false;
1729                 synchronized (f) {
1730                     if (tabAt(tab, i) == f) {
1731                         if (fh >= 0) {
1732                             binCount = 1;
1733                             for (Node<K,V> e = f;; ++binCount) {
1734                                 K ek;
1735                                 if (e.hash == h &&
1736                                     ((ek = e.key) == key ||
1737                                      (ek != null && key.equals(ek)))) {
1738                                     val = e.val;
1739                                     break;
1740                                 }
1741                                 Node<K,V> pred = e;
1742                                 if ((e = e.next) == null) {
1743                                     if ((val = mappingFunction.apply(key)) != null) {
1744                                         if (pred.next != null)
1745                                             throw new IllegalStateException("Recursive update");
1746                                         added = true;
1747                                         pred.next = new Node<K,V>(h, key, val, null);
1748                                     }
1749                                     break;
1750                                 }
1751                             }
1752                         }
1753                         else if (f instanceof TreeBin) {
1754                             binCount = 2;
1755                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1756                             TreeNode<K,V> r, p;
1757                             if ((r = t.root) != null &&
1758                                 (p = r.findTreeNode(h, key, null)) != null)
1759                                 val = p.val;
1760                             else if ((val = mappingFunction.apply(key)) != null) {
1761                                 added = true;
1762                                 t.putTreeVal(h, key, val);
1763                             }
1764                         }
1765                         else if (f instanceof ReservationNode)
1766                             throw new IllegalStateException("Recursive update");
1767                     }
1768                 }
1769                 if (binCount != 0) {
1770                     if (binCount >= TREEIFY_THRESHOLD)
1771                         treeifyBin(tab, i);
1772                     if (!added)
1773                         return val;
1774                     break;
1775                 }
1776             }
1777         }
1778         if (val != null)
1779             addCount(1L, binCount);
1780         return val;
1781     }
1782 
1783     /**
1784      * If the value for the specified key is present, attempts to
1785      * compute a new mapping given the key and its current mapped
1786      * value.  The entire method invocation is performed atomically.
1787      * Some attempted update operations on this map by other threads
1788      * may be blocked while computation is in progress, so the
1789      * computation should be short and simple, and must not attempt to
1790      * update any other mappings of this map.
1791      *
1792      * @param key key with which a value may be associated
1793      * @param remappingFunction the function to compute a value
1794      * @return the new value associated with the specified key, or null if none
1795      * @throws NullPointerException if the specified key or remappingFunction
1796      *         is null
1797      * @throws IllegalStateException if the computation detectably
1798      *         attempts a recursive update to this map that would
1799      *         otherwise never complete
1800      * @throws RuntimeException or Error if the remappingFunction does so,
1801      *         in which case the mapping is unchanged
1802      */
computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1803     public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1804         if (key == null || remappingFunction == null)
1805             throw new NullPointerException();
1806         int h = spread(key.hashCode());
1807         V val = null;
1808         int delta = 0;
1809         int binCount = 0;
1810         for (Node<K,V>[] tab = table;;) {
1811             Node<K,V> f; int n, i, fh;
1812             if (tab == null || (n = tab.length) == 0)
1813                 tab = initTable();
1814             else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1815                 break;
1816             else if ((fh = f.hash) == MOVED)
1817                 tab = helpTransfer(tab, f);
1818             else {
1819                 synchronized (f) {
1820                     if (tabAt(tab, i) == f) {
1821                         if (fh >= 0) {
1822                             binCount = 1;
1823                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1824                                 K ek;
1825                                 if (e.hash == h &&
1826                                     ((ek = e.key) == key ||
1827                                      (ek != null && key.equals(ek)))) {
1828                                     val = remappingFunction.apply(key, e.val);
1829                                     if (val != null)
1830                                         e.val = val;
1831                                     else {
1832                                         delta = -1;
1833                                         Node<K,V> en = e.next;
1834                                         if (pred != null)
1835                                             pred.next = en;
1836                                         else
1837                                             setTabAt(tab, i, en);
1838                                     }
1839                                     break;
1840                                 }
1841                                 pred = e;
1842                                 if ((e = e.next) == null)
1843                                     break;
1844                             }
1845                         }
1846                         else if (f instanceof TreeBin) {
1847                             binCount = 2;
1848                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1849                             TreeNode<K,V> r, p;
1850                             if ((r = t.root) != null &&
1851                                 (p = r.findTreeNode(h, key, null)) != null) {
1852                                 val = remappingFunction.apply(key, p.val);
1853                                 if (val != null)
1854                                     p.val = val;
1855                                 else {
1856                                     delta = -1;
1857                                     if (t.removeTreeNode(p))
1858                                         setTabAt(tab, i, untreeify(t.first));
1859                                 }
1860                             }
1861                         }
1862                         else if (f instanceof ReservationNode)
1863                             throw new IllegalStateException("Recursive update");
1864                     }
1865                 }
1866                 if (binCount != 0)
1867                     break;
1868             }
1869         }
1870         if (delta != 0)
1871             addCount((long)delta, binCount);
1872         return val;
1873     }
1874 
1875     /**
1876      * Attempts to compute a mapping for the specified key and its
1877      * current mapped value (or {@code null} if there is no current
1878      * mapping). The entire method invocation is performed atomically.
1879      * Some attempted update operations on this map by other threads
1880      * may be blocked while computation is in progress, so the
1881      * computation should be short and simple, and must not attempt to
1882      * update any other mappings of this Map.
1883      *
1884      * @param key key with which the specified value is to be associated
1885      * @param remappingFunction the function to compute a value
1886      * @return the new value associated with the specified key, or null if none
1887      * @throws NullPointerException if the specified key or remappingFunction
1888      *         is null
1889      * @throws IllegalStateException if the computation detectably
1890      *         attempts a recursive update to this map that would
1891      *         otherwise never complete
1892      * @throws RuntimeException or Error if the remappingFunction does so,
1893      *         in which case the mapping is unchanged
1894      */
compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1895     public V compute(K key,
1896                      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1897         if (key == null || remappingFunction == null)
1898             throw new NullPointerException();
1899         int h = spread(key.hashCode());
1900         V val = null;
1901         int delta = 0;
1902         int binCount = 0;
1903         for (Node<K,V>[] tab = table;;) {
1904             Node<K,V> f; int n, i, fh;
1905             if (tab == null || (n = tab.length) == 0)
1906                 tab = initTable();
1907             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1908                 Node<K,V> r = new ReservationNode<K,V>();
1909                 synchronized (r) {
1910                     if (casTabAt(tab, i, null, r)) {
1911                         binCount = 1;
1912                         Node<K,V> node = null;
1913                         try {
1914                             if ((val = remappingFunction.apply(key, null)) != null) {
1915                                 delta = 1;
1916                                 node = new Node<K,V>(h, key, val, null);
1917                             }
1918                         } finally {
1919                             setTabAt(tab, i, node);
1920                         }
1921                     }
1922                 }
1923                 if (binCount != 0)
1924                     break;
1925             }
1926             else if ((fh = f.hash) == MOVED)
1927                 tab = helpTransfer(tab, f);
1928             else {
1929                 synchronized (f) {
1930                     if (tabAt(tab, i) == f) {
1931                         if (fh >= 0) {
1932                             binCount = 1;
1933                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1934                                 K ek;
1935                                 if (e.hash == h &&
1936                                     ((ek = e.key) == key ||
1937                                      (ek != null && key.equals(ek)))) {
1938                                     val = remappingFunction.apply(key, e.val);
1939                                     if (val != null)
1940                                         e.val = val;
1941                                     else {
1942                                         delta = -1;
1943                                         Node<K,V> en = e.next;
1944                                         if (pred != null)
1945                                             pred.next = en;
1946                                         else
1947                                             setTabAt(tab, i, en);
1948                                     }
1949                                     break;
1950                                 }
1951                                 pred = e;
1952                                 if ((e = e.next) == null) {
1953                                     val = remappingFunction.apply(key, null);
1954                                     if (val != null) {
1955                                         if (pred.next != null)
1956                                             throw new IllegalStateException("Recursive update");
1957                                         delta = 1;
1958                                         pred.next =
1959                                             new Node<K,V>(h, key, val, null);
1960                                     }
1961                                     break;
1962                                 }
1963                             }
1964                         }
1965                         else if (f instanceof TreeBin) {
1966                             binCount = 1;
1967                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1968                             TreeNode<K,V> r, p;
1969                             if ((r = t.root) != null)
1970                                 p = r.findTreeNode(h, key, null);
1971                             else
1972                                 p = null;
1973                             V pv = (p == null) ? null : p.val;
1974                             val = remappingFunction.apply(key, pv);
1975                             if (val != null) {
1976                                 if (p != null)
1977                                     p.val = val;
1978                                 else {
1979                                     delta = 1;
1980                                     t.putTreeVal(h, key, val);
1981                                 }
1982                             }
1983                             else if (p != null) {
1984                                 delta = -1;
1985                                 if (t.removeTreeNode(p))
1986                                     setTabAt(tab, i, untreeify(t.first));
1987                             }
1988                         }
1989                         else if (f instanceof ReservationNode)
1990                             throw new IllegalStateException("Recursive update");
1991                     }
1992                 }
1993                 if (binCount != 0) {
1994                     if (binCount >= TREEIFY_THRESHOLD)
1995                         treeifyBin(tab, i);
1996                     break;
1997                 }
1998             }
1999         }
2000         if (delta != 0)
2001             addCount((long)delta, binCount);
2002         return val;
2003     }
2004 
2005     /**
2006      * If the specified key is not already associated with a
2007      * (non-null) value, associates it with the given value.
2008      * Otherwise, replaces the value with the results of the given
2009      * remapping function, or removes if {@code null}. The entire
2010      * method invocation is performed atomically.  Some attempted
2011      * update operations on this map by other threads may be blocked
2012      * while computation is in progress, so the computation should be
2013      * short and simple, and must not attempt to update any other
2014      * mappings of this Map.
2015      *
2016      * @param key key with which the specified value is to be associated
2017      * @param value the value to use if absent
2018      * @param remappingFunction the function to recompute a value if present
2019      * @return the new value associated with the specified key, or null if none
2020      * @throws NullPointerException if the specified key or the
2021      *         remappingFunction is null
2022      * @throws RuntimeException or Error if the remappingFunction does so,
2023      *         in which case the mapping is unchanged
2024      */
merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction)2025     public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2026         if (key == null || value == null || remappingFunction == null)
2027             throw new NullPointerException();
2028         int h = spread(key.hashCode());
2029         V val = null;
2030         int delta = 0;
2031         int binCount = 0;
2032         for (Node<K,V>[] tab = table;;) {
2033             Node<K,V> f; int n, i, fh;
2034             if (tab == null || (n = tab.length) == 0)
2035                 tab = initTable();
2036             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2037                 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
2038                     delta = 1;
2039                     val = value;
2040                     break;
2041                 }
2042             }
2043             else if ((fh = f.hash) == MOVED)
2044                 tab = helpTransfer(tab, f);
2045             else {
2046                 synchronized (f) {
2047                     if (tabAt(tab, i) == f) {
2048                         if (fh >= 0) {
2049                             binCount = 1;
2050                             for (Node<K,V> e = f, pred = null;; ++binCount) {
2051                                 K ek;
2052                                 if (e.hash == h &&
2053                                     ((ek = e.key) == key ||
2054                                      (ek != null && key.equals(ek)))) {
2055                                     val = remappingFunction.apply(e.val, value);
2056                                     if (val != null)
2057                                         e.val = val;
2058                                     else {
2059                                         delta = -1;
2060                                         Node<K,V> en = e.next;
2061                                         if (pred != null)
2062                                             pred.next = en;
2063                                         else
2064                                             setTabAt(tab, i, en);
2065                                     }
2066                                     break;
2067                                 }
2068                                 pred = e;
2069                                 if ((e = e.next) == null) {
2070                                     delta = 1;
2071                                     val = value;
2072                                     pred.next =
2073                                         new Node<K,V>(h, key, val, null);
2074                                     break;
2075                                 }
2076                             }
2077                         }
2078                         else if (f instanceof TreeBin) {
2079                             binCount = 2;
2080                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2081                             TreeNode<K,V> r = t.root;
2082                             TreeNode<K,V> p = (r == null) ? null :
2083                                 r.findTreeNode(h, key, null);
2084                             val = (p == null) ? value :
2085                                 remappingFunction.apply(p.val, value);
2086                             if (val != null) {
2087                                 if (p != null)
2088                                     p.val = val;
2089                                 else {
2090                                     delta = 1;
2091                                     t.putTreeVal(h, key, val);
2092                                 }
2093                             }
2094                             else if (p != null) {
2095                                 delta = -1;
2096                                 if (t.removeTreeNode(p))
2097                                     setTabAt(tab, i, untreeify(t.first));
2098                             }
2099                         }
2100                         else if (f instanceof ReservationNode)
2101                             throw new IllegalStateException("Recursive update");
2102                     }
2103                 }
2104                 if (binCount != 0) {
2105                     if (binCount >= TREEIFY_THRESHOLD)
2106                         treeifyBin(tab, i);
2107                     break;
2108                 }
2109             }
2110         }
2111         if (delta != 0)
2112             addCount((long)delta, binCount);
2113         return val;
2114     }
2115 
2116     // Hashtable legacy methods
2117 
2118     /**
2119      * Tests if some key maps into the specified value in this table.
2120      *
2121      * <p>Note that this method is identical in functionality to
2122      * {@link #containsValue(Object)}, and exists solely to ensure
2123      * full compatibility with class {@link java.util.Hashtable},
2124      * which supported this method prior to introduction of the
2125      * Java Collections Framework.
2126      *
2127      * @param  value a value to search for
2128      * @return {@code true} if and only if some key maps to the
2129      *         {@code value} argument in this table as
2130      *         determined by the {@code equals} method;
2131      *         {@code false} otherwise
2132      * @throws NullPointerException if the specified value is null
2133      */
contains(Object value)2134     public boolean contains(Object value) {
2135         return containsValue(value);
2136     }
2137 
2138     /**
2139      * Returns an enumeration of the keys in this table.
2140      *
2141      * @return an enumeration of the keys in this table
2142      * @see #keySet()
2143      */
keys()2144     public Enumeration<K> keys() {
2145         Node<K,V>[] t;
2146         int f = (t = table) == null ? 0 : t.length;
2147         return new KeyIterator<K,V>(t, f, 0, f, this);
2148     }
2149 
2150     /**
2151      * Returns an enumeration of the values in this table.
2152      *
2153      * @return an enumeration of the values in this table
2154      * @see #values()
2155      */
elements()2156     public Enumeration<V> elements() {
2157         Node<K,V>[] t;
2158         int f = (t = table) == null ? 0 : t.length;
2159         return new ValueIterator<K,V>(t, f, 0, f, this);
2160     }
2161 
2162     // ConcurrentHashMap-only methods
2163 
2164     /**
2165      * Returns the number of mappings. This method should be used
2166      * instead of {@link #size} because a ConcurrentHashMap may
2167      * contain more mappings than can be represented as an int. The
2168      * value returned is an estimate; the actual count may differ if
2169      * there are concurrent insertions or removals.
2170      *
2171      * @return the number of mappings
2172      * @since 1.8
2173      */
mappingCount()2174     public long mappingCount() {
2175         long n = sumCount();
2176         return (n < 0L) ? 0L : n; // ignore transient negative values
2177     }
2178 
2179     /**
2180      * Creates a new {@link Set} backed by a ConcurrentHashMap
2181      * from the given type to {@code Boolean.TRUE}.
2182      *
2183      * @param <K> the element type of the returned set
2184      * @return the new set
2185      * @since 1.8
2186      */
newKeySet()2187     public static <K> KeySetView<K,Boolean> newKeySet() {
2188         return new KeySetView<K,Boolean>
2189             (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2190     }
2191 
2192     /**
2193      * Creates a new {@link Set} backed by a ConcurrentHashMap
2194      * from the given type to {@code Boolean.TRUE}.
2195      *
2196      * @param initialCapacity The implementation performs internal
2197      * sizing to accommodate this many elements.
2198      * @param <K> the element type of the returned set
2199      * @return the new set
2200      * @throws IllegalArgumentException if the initial capacity of
2201      * elements is negative
2202      * @since 1.8
2203      */
newKeySet(int initialCapacity)2204     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2205         return new KeySetView<K,Boolean>
2206             (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2207     }
2208 
2209     /**
2210      * Returns a {@link Set} view of the keys in this map, using the
2211      * given common mapped value for any additions (i.e., {@link
2212      * Collection#add} and {@link Collection#addAll(Collection)}).
2213      * This is of course only appropriate if it is acceptable to use
2214      * the same value for all additions from this view.
2215      *
2216      * @param mappedValue the mapped value to use for any additions
2217      * @return the set view
2218      * @throws NullPointerException if the mappedValue is null
2219      */
keySet(V mappedValue)2220     public KeySetView<K,V> keySet(V mappedValue) {
2221         if (mappedValue == null)
2222             throw new NullPointerException();
2223         return new KeySetView<K,V>(this, mappedValue);
2224     }
2225 
2226     /* ---------------- Special Nodes -------------- */
2227 
2228     /**
2229      * A node inserted at head of bins during transfer operations.
2230      */
2231     static final class ForwardingNode<K,V> extends Node<K,V> {
2232         final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab)2233         ForwardingNode(Node<K,V>[] tab) {
2234             super(MOVED, null, null, null);
2235             this.nextTable = tab;
2236         }
2237 
find(int h, Object k)2238         Node<K,V> find(int h, Object k) {
2239             // loop to avoid arbitrarily deep recursion on forwarding nodes
2240             outer: for (Node<K,V>[] tab = nextTable;;) {
2241                 Node<K,V> e; int n;
2242                 if (k == null || tab == null || (n = tab.length) == 0 ||
2243                     (e = tabAt(tab, (n - 1) & h)) == null)
2244                     return null;
2245                 for (;;) {
2246                     int eh; K ek;
2247                     if ((eh = e.hash) == h &&
2248                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
2249                         return e;
2250                     if (eh < 0) {
2251                         if (e instanceof ForwardingNode) {
2252                             tab = ((ForwardingNode<K,V>)e).nextTable;
2253                             continue outer;
2254                         }
2255                         else
2256                             return e.find(h, k);
2257                     }
2258                     if ((e = e.next) == null)
2259                         return null;
2260                 }
2261             }
2262         }
2263     }
2264 
2265     /**
2266      * A place-holder node used in computeIfAbsent and compute.
2267      */
2268     static final class ReservationNode<K,V> extends Node<K,V> {
ReservationNode()2269         ReservationNode() {
2270             super(RESERVED, null, null, null);
2271         }
2272 
find(int h, Object k)2273         Node<K,V> find(int h, Object k) {
2274             return null;
2275         }
2276     }
2277 
2278     /* ---------------- Table Initialization and Resizing -------------- */
2279 
2280     /**
2281      * Returns the stamp bits for resizing a table of size n.
2282      * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2283      */
resizeStamp(int n)2284     static final int resizeStamp(int n) {
2285         return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2286     }
2287 
2288     /**
2289      * Initializes table, using the size recorded in sizeCtl.
2290      */
initTable()2291     private final Node<K,V>[] initTable() {
2292         Node<K,V>[] tab; int sc;
2293         while ((tab = table) == null || tab.length == 0) {
2294             if ((sc = sizeCtl) < 0)
2295                 Thread.yield(); // lost initialization race; just spin
2296             else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2297                 try {
2298                     if ((tab = table) == null || tab.length == 0) {
2299                         int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2300                         @SuppressWarnings("unchecked")
2301                         Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2302                         table = tab = nt;
2303                         sc = n - (n >>> 2);
2304                     }
2305                 } finally {
2306                     sizeCtl = sc;
2307                 }
2308                 break;
2309             }
2310         }
2311         return tab;
2312     }
2313 
2314     /**
2315      * Adds to count, and if table is too small and not already
2316      * resizing, initiates transfer. If already resizing, helps
2317      * perform transfer if work is available.  Rechecks occupancy
2318      * after a transfer to see if another resize is already needed
2319      * because resizings are lagging additions.
2320      *
2321      * @param x the count to add
2322      * @param check if <0, don't check resize, if <= 1 only check if uncontended
2323      */
addCount(long x, int check)2324     private final void addCount(long x, int check) {
2325         CounterCell[] as; long b, s;
2326         if ((as = counterCells) != null ||
2327             !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2328             CounterCell a; long v; int m;
2329             boolean uncontended = true;
2330             if (as == null || (m = as.length - 1) < 0 ||
2331                 (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2332                 !(uncontended =
2333                   U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2334                 fullAddCount(x, uncontended);
2335                 return;
2336             }
2337             if (check <= 1)
2338                 return;
2339             s = sumCount();
2340         }
2341         if (check >= 0) {
2342             Node<K,V>[] tab, nt; int n, sc;
2343             while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2344                    (n = tab.length) < MAXIMUM_CAPACITY) {
2345                 int rs = resizeStamp(n);
2346                 if (sc < 0) {
2347                     if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2348                         sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2349                         transferIndex <= 0)
2350                         break;
2351                     if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2352                         transfer(tab, nt);
2353                 }
2354                 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2355                                              (rs << RESIZE_STAMP_SHIFT) + 2))
2356                     transfer(tab, null);
2357                 s = sumCount();
2358             }
2359         }
2360     }
2361 
2362     /**
2363      * Helps transfer if a resize is in progress.
2364      */
helpTransfer(Node<K,V>[] tab, Node<K,V> f)2365     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2366         Node<K,V>[] nextTab; int sc;
2367         if (tab != null && (f instanceof ForwardingNode) &&
2368             (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2369             int rs = resizeStamp(tab.length);
2370             while (nextTab == nextTable && table == tab &&
2371                    (sc = sizeCtl) < 0) {
2372                 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2373                     sc == rs + MAX_RESIZERS || transferIndex <= 0)
2374                     break;
2375                 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2376                     transfer(tab, nextTab);
2377                     break;
2378                 }
2379             }
2380             return nextTab;
2381         }
2382         return table;
2383     }
2384 
2385     /**
2386      * Tries to presize table to accommodate the given number of elements.
2387      *
2388      * @param size number of elements (doesn't need to be perfectly accurate)
2389      */
tryPresize(int size)2390     private final void tryPresize(int size) {
2391         int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2392             tableSizeFor(size + (size >>> 1) + 1);
2393         int sc;
2394         while ((sc = sizeCtl) >= 0) {
2395             Node<K,V>[] tab = table; int n;
2396             if (tab == null || (n = tab.length) == 0) {
2397                 n = (sc > c) ? sc : c;
2398                 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2399                     try {
2400                         if (table == tab) {
2401                             @SuppressWarnings("unchecked")
2402                             Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2403                             table = nt;
2404                             sc = n - (n >>> 2);
2405                         }
2406                     } finally {
2407                         sizeCtl = sc;
2408                     }
2409                 }
2410             }
2411             else if (c <= sc || n >= MAXIMUM_CAPACITY)
2412                 break;
2413             else if (tab == table) {
2414                 int rs = resizeStamp(n);
2415                 if (U.compareAndSwapInt(this, SIZECTL, sc,
2416                                         (rs << RESIZE_STAMP_SHIFT) + 2))
2417                     transfer(tab, null);
2418             }
2419         }
2420     }
2421 
2422     /**
2423      * Moves and/or copies the nodes in each bin to new table. See
2424      * above for explanation.
2425      */
transfer(Node<K,V>[] tab, Node<K,V>[] nextTab)2426     private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2427         int n = tab.length, stride;
2428         if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2429             stride = MIN_TRANSFER_STRIDE; // subdivide range
2430         if (nextTab == null) {            // initiating
2431             try {
2432                 @SuppressWarnings("unchecked")
2433                 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2434                 nextTab = nt;
2435             } catch (Throwable ex) {      // try to cope with OOME
2436                 sizeCtl = Integer.MAX_VALUE;
2437                 return;
2438             }
2439             nextTable = nextTab;
2440             transferIndex = n;
2441         }
2442         int nextn = nextTab.length;
2443         ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2444         boolean advance = true;
2445         boolean finishing = false; // to ensure sweep before committing nextTab
2446         for (int i = 0, bound = 0;;) {
2447             Node<K,V> f; int fh;
2448             while (advance) {
2449                 int nextIndex, nextBound;
2450                 if (--i >= bound || finishing)
2451                     advance = false;
2452                 else if ((nextIndex = transferIndex) <= 0) {
2453                     i = -1;
2454                     advance = false;
2455                 }
2456                 else if (U.compareAndSwapInt
2457                          (this, TRANSFERINDEX, nextIndex,
2458                           nextBound = (nextIndex > stride ?
2459                                        nextIndex - stride : 0))) {
2460                     bound = nextBound;
2461                     i = nextIndex - 1;
2462                     advance = false;
2463                 }
2464             }
2465             if (i < 0 || i >= n || i + n >= nextn) {
2466                 int sc;
2467                 if (finishing) {
2468                     nextTable = null;
2469                     table = nextTab;
2470                     sizeCtl = (n << 1) - (n >>> 1);
2471                     return;
2472                 }
2473                 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2474                     if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2475                         return;
2476                     finishing = advance = true;
2477                     i = n; // recheck before commit
2478                 }
2479             }
2480             else if ((f = tabAt(tab, i)) == null)
2481                 advance = casTabAt(tab, i, null, fwd);
2482             else if ((fh = f.hash) == MOVED)
2483                 advance = true; // already processed
2484             else {
2485                 synchronized (f) {
2486                     if (tabAt(tab, i) == f) {
2487                         Node<K,V> ln, hn;
2488                         if (fh >= 0) {
2489                             int runBit = fh & n;
2490                             Node<K,V> lastRun = f;
2491                             for (Node<K,V> p = f.next; p != null; p = p.next) {
2492                                 int b = p.hash & n;
2493                                 if (b != runBit) {
2494                                     runBit = b;
2495                                     lastRun = p;
2496                                 }
2497                             }
2498                             if (runBit == 0) {
2499                                 ln = lastRun;
2500                                 hn = null;
2501                             }
2502                             else {
2503                                 hn = lastRun;
2504                                 ln = null;
2505                             }
2506                             for (Node<K,V> p = f; p != lastRun; p = p.next) {
2507                                 int ph = p.hash; K pk = p.key; V pv = p.val;
2508                                 if ((ph & n) == 0)
2509                                     ln = new Node<K,V>(ph, pk, pv, ln);
2510                                 else
2511                                     hn = new Node<K,V>(ph, pk, pv, hn);
2512                             }
2513                             setTabAt(nextTab, i, ln);
2514                             setTabAt(nextTab, i + n, hn);
2515                             setTabAt(tab, i, fwd);
2516                             advance = true;
2517                         }
2518                         else if (f instanceof TreeBin) {
2519                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2520                             TreeNode<K,V> lo = null, loTail = null;
2521                             TreeNode<K,V> hi = null, hiTail = null;
2522                             int lc = 0, hc = 0;
2523                             for (Node<K,V> e = t.first; e != null; e = e.next) {
2524                                 int h = e.hash;
2525                                 TreeNode<K,V> p = new TreeNode<K,V>
2526                                     (h, e.key, e.val, null, null);
2527                                 if ((h & n) == 0) {
2528                                     if ((p.prev = loTail) == null)
2529                                         lo = p;
2530                                     else
2531                                         loTail.next = p;
2532                                     loTail = p;
2533                                     ++lc;
2534                                 }
2535                                 else {
2536                                     if ((p.prev = hiTail) == null)
2537                                         hi = p;
2538                                     else
2539                                         hiTail.next = p;
2540                                     hiTail = p;
2541                                     ++hc;
2542                                 }
2543                             }
2544                             ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2545                                 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2546                             hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2547                                 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2548                             setTabAt(nextTab, i, ln);
2549                             setTabAt(nextTab, i + n, hn);
2550                             setTabAt(tab, i, fwd);
2551                             advance = true;
2552                         }
2553                     }
2554                 }
2555             }
2556         }
2557     }
2558 
2559     /* ---------------- Counter support -------------- */
2560 
2561     /**
2562      * A padded cell for distributing counts.  Adapted from LongAdder
2563      * and Striped64.  See their internal docs for explanation.
2564      */
2565     //@jdk.internal.vm.annotation.Contended // Android-removed
2566     static final class CounterCell {
2567         volatile long value;
CounterCell(long x)2568         CounterCell(long x) { value = x; }
2569     }
2570 
sumCount()2571     final long sumCount() {
2572         CounterCell[] as = counterCells; CounterCell a;
2573         long sum = baseCount;
2574         if (as != null) {
2575             for (int i = 0; i < as.length; ++i) {
2576                 if ((a = as[i]) != null)
2577                     sum += a.value;
2578             }
2579         }
2580         return sum;
2581     }
2582 
2583     // See LongAdder version for explanation
fullAddCount(long x, boolean wasUncontended)2584     private final void fullAddCount(long x, boolean wasUncontended) {
2585         int h;
2586         if ((h = ThreadLocalRandom.getProbe()) == 0) {
2587             ThreadLocalRandom.localInit();      // force initialization
2588             h = ThreadLocalRandom.getProbe();
2589             wasUncontended = true;
2590         }
2591         boolean collide = false;                // True if last slot nonempty
2592         for (;;) {
2593             CounterCell[] as; CounterCell a; int n; long v;
2594             if ((as = counterCells) != null && (n = as.length) > 0) {
2595                 if ((a = as[(n - 1) & h]) == null) {
2596                     if (cellsBusy == 0) {            // Try to attach new Cell
2597                         CounterCell r = new CounterCell(x); // Optimistic create
2598                         if (cellsBusy == 0 &&
2599                             U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2600                             boolean created = false;
2601                             try {               // Recheck under lock
2602                                 CounterCell[] rs; int m, j;
2603                                 if ((rs = counterCells) != null &&
2604                                     (m = rs.length) > 0 &&
2605                                     rs[j = (m - 1) & h] == null) {
2606                                     rs[j] = r;
2607                                     created = true;
2608                                 }
2609                             } finally {
2610                                 cellsBusy = 0;
2611                             }
2612                             if (created)
2613                                 break;
2614                             continue;           // Slot is now non-empty
2615                         }
2616                     }
2617                     collide = false;
2618                 }
2619                 else if (!wasUncontended)       // CAS already known to fail
2620                     wasUncontended = true;      // Continue after rehash
2621                 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2622                     break;
2623                 else if (counterCells != as || n >= NCPU)
2624                     collide = false;            // At max size or stale
2625                 else if (!collide)
2626                     collide = true;
2627                 else if (cellsBusy == 0 &&
2628                          U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2629                     try {
2630                         if (counterCells == as) {// Expand table unless stale
2631                             CounterCell[] rs = new CounterCell[n << 1];
2632                             for (int i = 0; i < n; ++i)
2633                                 rs[i] = as[i];
2634                             counterCells = rs;
2635                         }
2636                     } finally {
2637                         cellsBusy = 0;
2638                     }
2639                     collide = false;
2640                     continue;                   // Retry with expanded table
2641                 }
2642                 h = ThreadLocalRandom.advanceProbe(h);
2643             }
2644             else if (cellsBusy == 0 && counterCells == as &&
2645                      U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2646                 boolean init = false;
2647                 try {                           // Initialize table
2648                     if (counterCells == as) {
2649                         CounterCell[] rs = new CounterCell[2];
2650                         rs[h & 1] = new CounterCell(x);
2651                         counterCells = rs;
2652                         init = true;
2653                     }
2654                 } finally {
2655                     cellsBusy = 0;
2656                 }
2657                 if (init)
2658                     break;
2659             }
2660             else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2661                 break;                          // Fall back on using base
2662         }
2663     }
2664 
2665     /* ---------------- Conversion from/to TreeBins -------------- */
2666 
2667     /**
2668      * Replaces all linked nodes in bin at given index unless table is
2669      * too small, in which case resizes instead.
2670      */
treeifyBin(Node<K,V>[] tab, int index)2671     private final void treeifyBin(Node<K,V>[] tab, int index) {
2672         Node<K,V> b; int n;
2673         if (tab != null) {
2674             if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2675                 tryPresize(n << 1);
2676             else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2677                 synchronized (b) {
2678                     if (tabAt(tab, index) == b) {
2679                         TreeNode<K,V> hd = null, tl = null;
2680                         for (Node<K,V> e = b; e != null; e = e.next) {
2681                             TreeNode<K,V> p =
2682                                 new TreeNode<K,V>(e.hash, e.key, e.val,
2683                                                   null, null);
2684                             if ((p.prev = tl) == null)
2685                                 hd = p;
2686                             else
2687                                 tl.next = p;
2688                             tl = p;
2689                         }
2690                         setTabAt(tab, index, new TreeBin<K,V>(hd));
2691                     }
2692                 }
2693             }
2694         }
2695     }
2696 
2697     /**
2698      * Returns a list on non-TreeNodes replacing those in given list.
2699      */
untreeify(Node<K,V> b)2700     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2701         Node<K,V> hd = null, tl = null;
2702         for (Node<K,V> q = b; q != null; q = q.next) {
2703             Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2704             if (tl == null)
2705                 hd = p;
2706             else
2707                 tl.next = p;
2708             tl = p;
2709         }
2710         return hd;
2711     }
2712 
2713     /* ---------------- TreeNodes -------------- */
2714 
2715     /**
2716      * Nodes for use in TreeBins.
2717      */
2718     static final class TreeNode<K,V> extends Node<K,V> {
2719         TreeNode<K,V> parent;  // red-black tree links
2720         TreeNode<K,V> left;
2721         TreeNode<K,V> right;
2722         TreeNode<K,V> prev;    // needed to unlink next upon deletion
2723         boolean red;
2724 
TreeNode(int hash, K key, V val, Node<K,V> next, TreeNode<K,V> parent)2725         TreeNode(int hash, K key, V val, Node<K,V> next,
2726                  TreeNode<K,V> parent) {
2727             super(hash, key, val, next);
2728             this.parent = parent;
2729         }
2730 
find(int h, Object k)2731         Node<K,V> find(int h, Object k) {
2732             return findTreeNode(h, k, null);
2733         }
2734 
2735         /**
2736          * Returns the TreeNode (or null if not found) for the given key
2737          * starting at given root.
2738          */
findTreeNode(int h, Object k, Class<?> kc)2739         final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2740             if (k != null) {
2741                 TreeNode<K,V> p = this;
2742                 do {
2743                     int ph, dir; K pk; TreeNode<K,V> q;
2744                     TreeNode<K,V> pl = p.left, pr = p.right;
2745                     if ((ph = p.hash) > h)
2746                         p = pl;
2747                     else if (ph < h)
2748                         p = pr;
2749                     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2750                         return p;
2751                     else if (pl == null)
2752                         p = pr;
2753                     else if (pr == null)
2754                         p = pl;
2755                     else if ((kc != null ||
2756                               (kc = comparableClassFor(k)) != null) &&
2757                              (dir = compareComparables(kc, k, pk)) != 0)
2758                         p = (dir < 0) ? pl : pr;
2759                     else if ((q = pr.findTreeNode(h, k, kc)) != null)
2760                         return q;
2761                     else
2762                         p = pl;
2763                 } while (p != null);
2764             }
2765             return null;
2766         }
2767     }
2768 
2769     /* ---------------- TreeBins -------------- */
2770 
2771     /**
2772      * TreeNodes used at the heads of bins. TreeBins do not hold user
2773      * keys or values, but instead point to list of TreeNodes and
2774      * their root. They also maintain a parasitic read-write lock
2775      * forcing writers (who hold bin lock) to wait for readers (who do
2776      * not) to complete before tree restructuring operations.
2777      */
2778     static final class TreeBin<K,V> extends Node<K,V> {
2779         TreeNode<K,V> root;
2780         volatile TreeNode<K,V> first;
2781         volatile Thread waiter;
2782         volatile int lockState;
2783         // values for lockState
2784         static final int WRITER = 1; // set while holding write lock
2785         static final int WAITER = 2; // set when waiting for write lock
2786         static final int READER = 4; // increment value for setting read lock
2787 
2788         /**
2789          * Tie-breaking utility for ordering insertions when equal
2790          * hashCodes and non-comparable. We don't require a total
2791          * order, just a consistent insertion rule to maintain
2792          * equivalence across rebalancings. Tie-breaking further than
2793          * necessary simplifies testing a bit.
2794          */
tieBreakOrder(Object a, Object b)2795         static int tieBreakOrder(Object a, Object b) {
2796             int d;
2797             if (a == null || b == null ||
2798                 (d = a.getClass().getName().
2799                  compareTo(b.getClass().getName())) == 0)
2800                 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2801                      -1 : 1);
2802             return d;
2803         }
2804 
2805         /**
2806          * Creates bin with initial set of nodes headed by b.
2807          */
TreeBin(TreeNode<K,V> b)2808         TreeBin(TreeNode<K,V> b) {
2809             super(TREEBIN, null, null, null);
2810             this.first = b;
2811             TreeNode<K,V> r = null;
2812             for (TreeNode<K,V> x = b, next; x != null; x = next) {
2813                 next = (TreeNode<K,V>)x.next;
2814                 x.left = x.right = null;
2815                 if (r == null) {
2816                     x.parent = null;
2817                     x.red = false;
2818                     r = x;
2819                 }
2820                 else {
2821                     K k = x.key;
2822                     int h = x.hash;
2823                     Class<?> kc = null;
2824                     for (TreeNode<K,V> p = r;;) {
2825                         int dir, ph;
2826                         K pk = p.key;
2827                         if ((ph = p.hash) > h)
2828                             dir = -1;
2829                         else if (ph < h)
2830                             dir = 1;
2831                         else if ((kc == null &&
2832                                   (kc = comparableClassFor(k)) == null) ||
2833                                  (dir = compareComparables(kc, k, pk)) == 0)
2834                             dir = tieBreakOrder(k, pk);
2835                         TreeNode<K,V> xp = p;
2836                         if ((p = (dir <= 0) ? p.left : p.right) == null) {
2837                             x.parent = xp;
2838                             if (dir <= 0)
2839                                 xp.left = x;
2840                             else
2841                                 xp.right = x;
2842                             r = balanceInsertion(r, x);
2843                             break;
2844                         }
2845                     }
2846                 }
2847             }
2848             this.root = r;
2849             assert checkInvariants(root);
2850         }
2851 
2852         /**
2853          * Acquires write lock for tree restructuring.
2854          */
lockRoot()2855         private final void lockRoot() {
2856             if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2857                 contendedLock(); // offload to separate method
2858         }
2859 
2860         /**
2861          * Releases write lock for tree restructuring.
2862          */
unlockRoot()2863         private final void unlockRoot() {
2864             lockState = 0;
2865         }
2866 
2867         /**
2868          * Possibly blocks awaiting root lock.
2869          */
contendedLock()2870         private final void contendedLock() {
2871             boolean waiting = false;
2872             for (int s;;) {
2873                 if (((s = lockState) & ~WAITER) == 0) {
2874                     if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2875                         if (waiting)
2876                             waiter = null;
2877                         return;
2878                     }
2879                 }
2880                 else if ((s & WAITER) == 0) {
2881                     if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2882                         waiting = true;
2883                         waiter = Thread.currentThread();
2884                     }
2885                 }
2886                 else if (waiting)
2887                     LockSupport.park(this);
2888             }
2889         }
2890 
2891         /**
2892          * Returns matching node or null if none. Tries to search
2893          * using tree comparisons from root, but continues linear
2894          * search when lock not available.
2895          */
find(int h, Object k)2896         final Node<K,V> find(int h, Object k) {
2897             if (k != null) {
2898                 for (Node<K,V> e = first; e != null; ) {
2899                     int s; K ek;
2900                     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2901                         if (e.hash == h &&
2902                             ((ek = e.key) == k || (ek != null && k.equals(ek))))
2903                             return e;
2904                         e = e.next;
2905                     }
2906                     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2907                                                  s + READER)) {
2908                         TreeNode<K,V> r, p;
2909                         try {
2910                             p = ((r = root) == null ? null :
2911                                  r.findTreeNode(h, k, null));
2912                         } finally {
2913                             Thread w;
2914                             if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2915                                 (READER|WAITER) && (w = waiter) != null)
2916                                 LockSupport.unpark(w);
2917                         }
2918                         return p;
2919                     }
2920                 }
2921             }
2922             return null;
2923         }
2924 
2925         /**
2926          * Finds or adds a node.
2927          * @return null if added
2928          */
putTreeVal(int h, K k, V v)2929         final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2930             Class<?> kc = null;
2931             boolean searched = false;
2932             for (TreeNode<K,V> p = root;;) {
2933                 int dir, ph; K pk;
2934                 if (p == null) {
2935                     first = root = new TreeNode<K,V>(h, k, v, null, null);
2936                     break;
2937                 }
2938                 else if ((ph = p.hash) > h)
2939                     dir = -1;
2940                 else if (ph < h)
2941                     dir = 1;
2942                 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2943                     return p;
2944                 else if ((kc == null &&
2945                           (kc = comparableClassFor(k)) == null) ||
2946                          (dir = compareComparables(kc, k, pk)) == 0) {
2947                     if (!searched) {
2948                         TreeNode<K,V> q, ch;
2949                         searched = true;
2950                         if (((ch = p.left) != null &&
2951                              (q = ch.findTreeNode(h, k, kc)) != null) ||
2952                             ((ch = p.right) != null &&
2953                              (q = ch.findTreeNode(h, k, kc)) != null))
2954                             return q;
2955                     }
2956                     dir = tieBreakOrder(k, pk);
2957                 }
2958 
2959                 TreeNode<K,V> xp = p;
2960                 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2961                     TreeNode<K,V> x, f = first;
2962                     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2963                     if (f != null)
2964                         f.prev = x;
2965                     if (dir <= 0)
2966                         xp.left = x;
2967                     else
2968                         xp.right = x;
2969                     if (!xp.red)
2970                         x.red = true;
2971                     else {
2972                         lockRoot();
2973                         try {
2974                             root = balanceInsertion(root, x);
2975                         } finally {
2976                             unlockRoot();
2977                         }
2978                     }
2979                     break;
2980                 }
2981             }
2982             assert checkInvariants(root);
2983             return null;
2984         }
2985 
2986         /**
2987          * Removes the given node, that must be present before this
2988          * call.  This is messier than typical red-black deletion code
2989          * because we cannot swap the contents of an interior node
2990          * with a leaf successor that is pinned by "next" pointers
2991          * that are accessible independently of lock. So instead we
2992          * swap the tree linkages.
2993          *
2994          * @return true if now too small, so should be untreeified
2995          */
removeTreeNode(TreeNode<K,V> p)2996         final boolean removeTreeNode(TreeNode<K,V> p) {
2997             TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2998             TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
2999             TreeNode<K,V> r, rl;
3000             if (pred == null)
3001                 first = next;
3002             else
3003                 pred.next = next;
3004             if (next != null)
3005                 next.prev = pred;
3006             if (first == null) {
3007                 root = null;
3008                 return true;
3009             }
3010             if ((r = root) == null || r.right == null || // too small
3011                 (rl = r.left) == null || rl.left == null)
3012                 return true;
3013             lockRoot();
3014             try {
3015                 TreeNode<K,V> replacement;
3016                 TreeNode<K,V> pl = p.left;
3017                 TreeNode<K,V> pr = p.right;
3018                 if (pl != null && pr != null) {
3019                     TreeNode<K,V> s = pr, sl;
3020                     while ((sl = s.left) != null) // find successor
3021                         s = sl;
3022                     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
3023                     TreeNode<K,V> sr = s.right;
3024                     TreeNode<K,V> pp = p.parent;
3025                     if (s == pr) { // p was s's direct parent
3026                         p.parent = s;
3027                         s.right = p;
3028                     }
3029                     else {
3030                         TreeNode<K,V> sp = s.parent;
3031                         if ((p.parent = sp) != null) {
3032                             if (s == sp.left)
3033                                 sp.left = p;
3034                             else
3035                                 sp.right = p;
3036                         }
3037                         if ((s.right = pr) != null)
3038                             pr.parent = s;
3039                     }
3040                     p.left = null;
3041                     if ((p.right = sr) != null)
3042                         sr.parent = p;
3043                     if ((s.left = pl) != null)
3044                         pl.parent = s;
3045                     if ((s.parent = pp) == null)
3046                         r = s;
3047                     else if (p == pp.left)
3048                         pp.left = s;
3049                     else
3050                         pp.right = s;
3051                     if (sr != null)
3052                         replacement = sr;
3053                     else
3054                         replacement = p;
3055                 }
3056                 else if (pl != null)
3057                     replacement = pl;
3058                 else if (pr != null)
3059                     replacement = pr;
3060                 else
3061                     replacement = p;
3062                 if (replacement != p) {
3063                     TreeNode<K,V> pp = replacement.parent = p.parent;
3064                     if (pp == null)
3065                         r = replacement;
3066                     else if (p == pp.left)
3067                         pp.left = replacement;
3068                     else
3069                         pp.right = replacement;
3070                     p.left = p.right = p.parent = null;
3071                 }
3072 
3073                 root = (p.red) ? r : balanceDeletion(r, replacement);
3074 
3075                 if (p == replacement) {  // detach pointers
3076                     TreeNode<K,V> pp;
3077                     if ((pp = p.parent) != null) {
3078                         if (p == pp.left)
3079                             pp.left = null;
3080                         else if (p == pp.right)
3081                             pp.right = null;
3082                         p.parent = null;
3083                     }
3084                 }
3085             } finally {
3086                 unlockRoot();
3087             }
3088             assert checkInvariants(root);
3089             return false;
3090         }
3091 
3092         /* ------------------------------------------------------------ */
3093         // Red-black tree methods, all adapted from CLR
3094 
rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p)3095         static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3096                                               TreeNode<K,V> p) {
3097             TreeNode<K,V> r, pp, rl;
3098             if (p != null && (r = p.right) != null) {
3099                 if ((rl = p.right = r.left) != null)
3100                     rl.parent = p;
3101                 if ((pp = r.parent = p.parent) == null)
3102                     (root = r).red = false;
3103                 else if (pp.left == p)
3104                     pp.left = r;
3105                 else
3106                     pp.right = r;
3107                 r.left = p;
3108                 p.parent = r;
3109             }
3110             return root;
3111         }
3112 
rotateRight(TreeNode<K,V> root, TreeNode<K,V> p)3113         static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3114                                                TreeNode<K,V> p) {
3115             TreeNode<K,V> l, pp, lr;
3116             if (p != null && (l = p.left) != null) {
3117                 if ((lr = p.left = l.right) != null)
3118                     lr.parent = p;
3119                 if ((pp = l.parent = p.parent) == null)
3120                     (root = l).red = false;
3121                 else if (pp.right == p)
3122                     pp.right = l;
3123                 else
3124                     pp.left = l;
3125                 l.right = p;
3126                 p.parent = l;
3127             }
3128             return root;
3129         }
3130 
balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x)3131         static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3132                                                     TreeNode<K,V> x) {
3133             x.red = true;
3134             for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3135                 if ((xp = x.parent) == null) {
3136                     x.red = false;
3137                     return x;
3138                 }
3139                 else if (!xp.red || (xpp = xp.parent) == null)
3140                     return root;
3141                 if (xp == (xppl = xpp.left)) {
3142                     if ((xppr = xpp.right) != null && xppr.red) {
3143                         xppr.red = false;
3144                         xp.red = false;
3145                         xpp.red = true;
3146                         x = xpp;
3147                     }
3148                     else {
3149                         if (x == xp.right) {
3150                             root = rotateLeft(root, x = xp);
3151                             xpp = (xp = x.parent) == null ? null : xp.parent;
3152                         }
3153                         if (xp != null) {
3154                             xp.red = false;
3155                             if (xpp != null) {
3156                                 xpp.red = true;
3157                                 root = rotateRight(root, xpp);
3158                             }
3159                         }
3160                     }
3161                 }
3162                 else {
3163                     if (xppl != null && xppl.red) {
3164                         xppl.red = false;
3165                         xp.red = false;
3166                         xpp.red = true;
3167                         x = xpp;
3168                     }
3169                     else {
3170                         if (x == xp.left) {
3171                             root = rotateRight(root, x = xp);
3172                             xpp = (xp = x.parent) == null ? null : xp.parent;
3173                         }
3174                         if (xp != null) {
3175                             xp.red = false;
3176                             if (xpp != null) {
3177                                 xpp.red = true;
3178                                 root = rotateLeft(root, xpp);
3179                             }
3180                         }
3181                     }
3182                 }
3183             }
3184         }
3185 
balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x)3186         static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3187                                                    TreeNode<K,V> x) {
3188             for (TreeNode<K,V> xp, xpl, xpr;;) {
3189                 if (x == null || x == root)
3190                     return root;
3191                 else if ((xp = x.parent) == null) {
3192                     x.red = false;
3193                     return x;
3194                 }
3195                 else if (x.red) {
3196                     x.red = false;
3197                     return root;
3198                 }
3199                 else if ((xpl = xp.left) == x) {
3200                     if ((xpr = xp.right) != null && xpr.red) {
3201                         xpr.red = false;
3202                         xp.red = true;
3203                         root = rotateLeft(root, xp);
3204                         xpr = (xp = x.parent) == null ? null : xp.right;
3205                     }
3206                     if (xpr == null)
3207                         x = xp;
3208                     else {
3209                         TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3210                         if ((sr == null || !sr.red) &&
3211                             (sl == null || !sl.red)) {
3212                             xpr.red = true;
3213                             x = xp;
3214                         }
3215                         else {
3216                             if (sr == null || !sr.red) {
3217                                 if (sl != null)
3218                                     sl.red = false;
3219                                 xpr.red = true;
3220                                 root = rotateRight(root, xpr);
3221                                 xpr = (xp = x.parent) == null ?
3222                                     null : xp.right;
3223                             }
3224                             if (xpr != null) {
3225                                 xpr.red = (xp == null) ? false : xp.red;
3226                                 if ((sr = xpr.right) != null)
3227                                     sr.red = false;
3228                             }
3229                             if (xp != null) {
3230                                 xp.red = false;
3231                                 root = rotateLeft(root, xp);
3232                             }
3233                             x = root;
3234                         }
3235                     }
3236                 }
3237                 else { // symmetric
3238                     if (xpl != null && xpl.red) {
3239                         xpl.red = false;
3240                         xp.red = true;
3241                         root = rotateRight(root, xp);
3242                         xpl = (xp = x.parent) == null ? null : xp.left;
3243                     }
3244                     if (xpl == null)
3245                         x = xp;
3246                     else {
3247                         TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3248                         if ((sl == null || !sl.red) &&
3249                             (sr == null || !sr.red)) {
3250                             xpl.red = true;
3251                             x = xp;
3252                         }
3253                         else {
3254                             if (sl == null || !sl.red) {
3255                                 if (sr != null)
3256                                     sr.red = false;
3257                                 xpl.red = true;
3258                                 root = rotateLeft(root, xpl);
3259                                 xpl = (xp = x.parent) == null ?
3260                                     null : xp.left;
3261                             }
3262                             if (xpl != null) {
3263                                 xpl.red = (xp == null) ? false : xp.red;
3264                                 if ((sl = xpl.left) != null)
3265                                     sl.red = false;
3266                             }
3267                             if (xp != null) {
3268                                 xp.red = false;
3269                                 root = rotateRight(root, xp);
3270                             }
3271                             x = root;
3272                         }
3273                     }
3274                 }
3275             }
3276         }
3277 
3278         /**
3279          * Checks invariants recursively for the tree of Nodes rooted at t.
3280          */
checkInvariants(TreeNode<K,V> t)3281         static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3282             TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3283                 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3284             if (tb != null && tb.next != t)
3285                 return false;
3286             if (tn != null && tn.prev != t)
3287                 return false;
3288             if (tp != null && t != tp.left && t != tp.right)
3289                 return false;
3290             if (tl != null && (tl.parent != t || tl.hash > t.hash))
3291                 return false;
3292             if (tr != null && (tr.parent != t || tr.hash < t.hash))
3293                 return false;
3294             if (t.red && tl != null && tl.red && tr != null && tr.red)
3295                 return false;
3296             if (tl != null && !checkInvariants(tl))
3297                 return false;
3298             if (tr != null && !checkInvariants(tr))
3299                 return false;
3300             return true;
3301         }
3302 
3303         private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
3304         private static final long LOCKSTATE;
3305         static {
3306             try {
3307                 LOCKSTATE = U.objectFieldOffset
3308                     (TreeBin.class.getDeclaredField("lockState"));
3309             } catch (ReflectiveOperationException e) {
3310                 throw new Error(e);
3311             }
3312         }
3313     }
3314 
3315     /* ----------------Table Traversal -------------- */
3316 
3317     /**
3318      * Records the table, its length, and current traversal index for a
3319      * traverser that must process a region of a forwarded table before
3320      * proceeding with current table.
3321      */
3322     static final class TableStack<K,V> {
3323         int length;
3324         int index;
3325         Node<K,V>[] tab;
3326         TableStack<K,V> next;
3327     }
3328 
3329     /**
3330      * Encapsulates traversal for methods such as containsValue; also
3331      * serves as a base class for other iterators and spliterators.
3332      *
3333      * Method advance visits once each still-valid node that was
3334      * reachable upon iterator construction. It might miss some that
3335      * were added to a bin after the bin was visited, which is OK wrt
3336      * consistency guarantees. Maintaining this property in the face
3337      * of possible ongoing resizes requires a fair amount of
3338      * bookkeeping state that is difficult to optimize away amidst
3339      * volatile accesses.  Even so, traversal maintains reasonable
3340      * throughput.
3341      *
3342      * Normally, iteration proceeds bin-by-bin traversing lists.
3343      * However, if the table has been resized, then all future steps
3344      * must traverse both the bin at the current index as well as at
3345      * (index + baseSize); and so on for further resizings. To
3346      * paranoically cope with potential sharing by users of iterators
3347      * across threads, iteration terminates if a bounds checks fails
3348      * for a table read.
3349      */
3350     static class Traverser<K,V> {
3351         Node<K,V>[] tab;        // current table; updated if resized
3352         Node<K,V> next;         // the next entry to use
3353         TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3354         int index;              // index of bin to use next
3355         int baseIndex;          // current index of initial table
3356         int baseLimit;          // index bound for initial table
3357         final int baseSize;     // initial table size
3358 
Traverser(Node<K,V>[] tab, int size, int index, int limit)3359         Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3360             this.tab = tab;
3361             this.baseSize = size;
3362             this.baseIndex = this.index = index;
3363             this.baseLimit = limit;
3364             this.next = null;
3365         }
3366 
3367         /**
3368          * Advances if possible, returning next valid node, or null if none.
3369          */
advance()3370         final Node<K,V> advance() {
3371             Node<K,V> e;
3372             if ((e = next) != null)
3373                 e = e.next;
3374             for (;;) {
3375                 Node<K,V>[] t; int i, n;  // must use locals in checks
3376                 if (e != null)
3377                     return next = e;
3378                 if (baseIndex >= baseLimit || (t = tab) == null ||
3379                     (n = t.length) <= (i = index) || i < 0)
3380                     return next = null;
3381                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3382                     if (e instanceof ForwardingNode) {
3383                         tab = ((ForwardingNode<K,V>)e).nextTable;
3384                         e = null;
3385                         pushState(t, i, n);
3386                         continue;
3387                     }
3388                     else if (e instanceof TreeBin)
3389                         e = ((TreeBin<K,V>)e).first;
3390                     else
3391                         e = null;
3392                 }
3393                 if (stack != null)
3394                     recoverState(n);
3395                 else if ((index = i + baseSize) >= n)
3396                     index = ++baseIndex; // visit upper slots if present
3397             }
3398         }
3399 
3400         /**
3401          * Saves traversal state upon encountering a forwarding node.
3402          */
pushState(Node<K,V>[] t, int i, int n)3403         private void pushState(Node<K,V>[] t, int i, int n) {
3404             TableStack<K,V> s = spare;  // reuse if possible
3405             if (s != null)
3406                 spare = s.next;
3407             else
3408                 s = new TableStack<K,V>();
3409             s.tab = t;
3410             s.length = n;
3411             s.index = i;
3412             s.next = stack;
3413             stack = s;
3414         }
3415 
3416         /**
3417          * Possibly pops traversal state.
3418          *
3419          * @param n length of current table
3420          */
recoverState(int n)3421         private void recoverState(int n) {
3422             TableStack<K,V> s; int len;
3423             while ((s = stack) != null && (index += (len = s.length)) >= n) {
3424                 n = len;
3425                 index = s.index;
3426                 tab = s.tab;
3427                 s.tab = null;
3428                 TableStack<K,V> next = s.next;
3429                 s.next = spare; // save for reuse
3430                 stack = next;
3431                 spare = s;
3432             }
3433             if (s == null && (index += baseSize) >= n)
3434                 index = ++baseIndex;
3435         }
3436     }
3437 
3438     /**
3439      * Base of key, value, and entry Iterators. Adds fields to
3440      * Traverser to support iterator.remove.
3441      */
3442     static class BaseIterator<K,V> extends Traverser<K,V> {
3443         final ConcurrentHashMap<K,V> map;
3444         Node<K,V> lastReturned;
BaseIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMap<K,V> map)3445         BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3446                     ConcurrentHashMap<K,V> map) {
3447             super(tab, size, index, limit);
3448             this.map = map;
3449             advance();
3450         }
3451 
hasNext()3452         public final boolean hasNext() { return next != null; }
hasMoreElements()3453         public final boolean hasMoreElements() { return next != null; }
3454 
remove()3455         public final void remove() {
3456             Node<K,V> p;
3457             if ((p = lastReturned) == null)
3458                 throw new IllegalStateException();
3459             lastReturned = null;
3460             map.replaceNode(p.key, null, null);
3461         }
3462     }
3463 
3464     static final class KeyIterator<K,V> extends BaseIterator<K,V>
3465         implements Iterator<K>, Enumeration<K> {
KeyIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3466         KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3467                     ConcurrentHashMap<K,V> map) {
3468             super(tab, index, size, limit, map);
3469         }
3470 
next()3471         public final K next() {
3472             Node<K,V> p;
3473             if ((p = next) == null)
3474                 throw new NoSuchElementException();
3475             K k = p.key;
3476             lastReturned = p;
3477             advance();
3478             return k;
3479         }
3480 
nextElement()3481         public final K nextElement() { return next(); }
3482     }
3483 
3484     static final class ValueIterator<K,V> extends BaseIterator<K,V>
3485         implements Iterator<V>, Enumeration<V> {
ValueIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3486         ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3487                       ConcurrentHashMap<K,V> map) {
3488             super(tab, index, size, limit, map);
3489         }
3490 
next()3491         public final V next() {
3492             Node<K,V> p;
3493             if ((p = next) == null)
3494                 throw new NoSuchElementException();
3495             V v = p.val;
3496             lastReturned = p;
3497             advance();
3498             return v;
3499         }
3500 
nextElement()3501         public final V nextElement() { return next(); }
3502     }
3503 
3504     static final class EntryIterator<K,V> extends BaseIterator<K,V>
3505         implements Iterator<Map.Entry<K,V>> {
EntryIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3506         EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3507                       ConcurrentHashMap<K,V> map) {
3508             super(tab, index, size, limit, map);
3509         }
3510 
next()3511         public final Map.Entry<K,V> next() {
3512             Node<K,V> p;
3513             if ((p = next) == null)
3514                 throw new NoSuchElementException();
3515             K k = p.key;
3516             V v = p.val;
3517             lastReturned = p;
3518             advance();
3519             return new MapEntry<K,V>(k, v, map);
3520         }
3521     }
3522 
3523     /**
3524      * Exported Entry for EntryIterator.
3525      */
3526     static final class MapEntry<K,V> implements Map.Entry<K,V> {
3527         final K key; // non-null
3528         V val;       // non-null
3529         final ConcurrentHashMap<K,V> map;
MapEntry(K key, V val, ConcurrentHashMap<K,V> map)3530         MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3531             this.key = key;
3532             this.val = val;
3533             this.map = map;
3534         }
getKey()3535         public K getKey()        { return key; }
getValue()3536         public V getValue()      { return val; }
hashCode()3537         public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
toString()3538         public String toString() {
3539             return Helpers.mapEntryToString(key, val);
3540         }
3541 
equals(Object o)3542         public boolean equals(Object o) {
3543             Object k, v; Map.Entry<?,?> e;
3544             return ((o instanceof Map.Entry) &&
3545                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3546                     (v = e.getValue()) != null &&
3547                     (k == key || k.equals(key)) &&
3548                     (v == val || v.equals(val)));
3549         }
3550 
3551         /**
3552          * Sets our entry's value and writes through to the map. The
3553          * value to return is somewhat arbitrary here. Since we do not
3554          * necessarily track asynchronous changes, the most recent
3555          * "previous" value could be different from what we return (or
3556          * could even have been removed, in which case the put will
3557          * re-establish). We do not and cannot guarantee more.
3558          */
setValue(V value)3559         public V setValue(V value) {
3560             if (value == null) throw new NullPointerException();
3561             V v = val;
3562             val = value;
3563             map.put(key, value);
3564             return v;
3565         }
3566     }
3567 
3568     static final class KeySpliterator<K,V> extends Traverser<K,V>
3569         implements Spliterator<K> {
3570         long est;               // size estimate
KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3571         KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3572                        long est) {
3573             super(tab, size, index, limit);
3574             this.est = est;
3575         }
3576 
trySplit()3577         public KeySpliterator<K,V> trySplit() {
3578             int i, f, h;
3579             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3580                 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3581                                         f, est >>>= 1);
3582         }
3583 
forEachRemaining(Consumer<? super K> action)3584         public void forEachRemaining(Consumer<? super K> action) {
3585             if (action == null) throw new NullPointerException();
3586             for (Node<K,V> p; (p = advance()) != null;)
3587                 action.accept(p.key);
3588         }
3589 
tryAdvance(Consumer<? super K> action)3590         public boolean tryAdvance(Consumer<? super K> action) {
3591             if (action == null) throw new NullPointerException();
3592             Node<K,V> p;
3593             if ((p = advance()) == null)
3594                 return false;
3595             action.accept(p.key);
3596             return true;
3597         }
3598 
estimateSize()3599         public long estimateSize() { return est; }
3600 
characteristics()3601         public int characteristics() {
3602             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3603                 Spliterator.NONNULL;
3604         }
3605     }
3606 
3607     static final class ValueSpliterator<K,V> extends Traverser<K,V>
3608         implements Spliterator<V> {
3609         long est;               // size estimate
ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3610         ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3611                          long est) {
3612             super(tab, size, index, limit);
3613             this.est = est;
3614         }
3615 
trySplit()3616         public ValueSpliterator<K,V> trySplit() {
3617             int i, f, h;
3618             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3619                 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3620                                           f, est >>>= 1);
3621         }
3622 
forEachRemaining(Consumer<? super V> action)3623         public void forEachRemaining(Consumer<? super V> action) {
3624             if (action == null) throw new NullPointerException();
3625             for (Node<K,V> p; (p = advance()) != null;)
3626                 action.accept(p.val);
3627         }
3628 
tryAdvance(Consumer<? super V> action)3629         public boolean tryAdvance(Consumer<? super V> action) {
3630             if (action == null) throw new NullPointerException();
3631             Node<K,V> p;
3632             if ((p = advance()) == null)
3633                 return false;
3634             action.accept(p.val);
3635             return true;
3636         }
3637 
estimateSize()3638         public long estimateSize() { return est; }
3639 
characteristics()3640         public int characteristics() {
3641             return Spliterator.CONCURRENT | Spliterator.NONNULL;
3642         }
3643     }
3644 
3645     static final class EntrySpliterator<K,V> extends Traverser<K,V>
3646         implements Spliterator<Map.Entry<K,V>> {
3647         final ConcurrentHashMap<K,V> map; // To export MapEntry
3648         long est;               // size estimate
EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est, ConcurrentHashMap<K,V> map)3649         EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3650                          long est, ConcurrentHashMap<K,V> map) {
3651             super(tab, size, index, limit);
3652             this.map = map;
3653             this.est = est;
3654         }
3655 
trySplit()3656         public EntrySpliterator<K,V> trySplit() {
3657             int i, f, h;
3658             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3659                 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3660                                           f, est >>>= 1, map);
3661         }
3662 
forEachRemaining(Consumer<? super Map.Entry<K,V>> action)3663         public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3664             if (action == null) throw new NullPointerException();
3665             for (Node<K,V> p; (p = advance()) != null; )
3666                 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3667         }
3668 
tryAdvance(Consumer<? super Map.Entry<K,V>> action)3669         public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3670             if (action == null) throw new NullPointerException();
3671             Node<K,V> p;
3672             if ((p = advance()) == null)
3673                 return false;
3674             action.accept(new MapEntry<K,V>(p.key, p.val, map));
3675             return true;
3676         }
3677 
estimateSize()3678         public long estimateSize() { return est; }
3679 
characteristics()3680         public int characteristics() {
3681             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3682                 Spliterator.NONNULL;
3683         }
3684     }
3685 
3686     // Parallel bulk operations
3687 
3688     /**
3689      * Computes initial batch value for bulk tasks. The returned value
3690      * is approximately exp2 of the number of times (minus one) to
3691      * split task by two before executing leaf action. This value is
3692      * faster to compute and more convenient to use as a guide to
3693      * splitting than is the depth, since it is used while dividing by
3694      * two anyway.
3695      */
batchFor(long b)3696     final int batchFor(long b) {
3697         long n;
3698         if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3699             return 0;
3700         int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3701         return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3702     }
3703 
3704     /**
3705      * Performs the given action for each (key, value).
3706      *
3707      * @param parallelismThreshold the (estimated) number of elements
3708      * needed for this operation to be executed in parallel
3709      * @param action the action
3710      * @since 1.8
3711      */
forEach(long parallelismThreshold, BiConsumer<? super K,? super V> action)3712     public void forEach(long parallelismThreshold,
3713                         BiConsumer<? super K,? super V> action) {
3714         if (action == null) throw new NullPointerException();
3715         new ForEachMappingTask<K,V>
3716             (null, batchFor(parallelismThreshold), 0, 0, table,
3717              action).invoke();
3718     }
3719 
3720     /**
3721      * Performs the given action for each non-null transformation
3722      * of each (key, value).
3723      *
3724      * @param parallelismThreshold the (estimated) number of elements
3725      * needed for this operation to be executed in parallel
3726      * @param transformer a function returning the transformation
3727      * for an element, or null if there is no transformation (in
3728      * which case the action is not applied)
3729      * @param action the action
3730      * @param <U> the return type of the transformer
3731      * @since 1.8
3732      */
forEach(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)3733     public <U> void forEach(long parallelismThreshold,
3734                             BiFunction<? super K, ? super V, ? extends U> transformer,
3735                             Consumer<? super U> action) {
3736         if (transformer == null || action == null)
3737             throw new NullPointerException();
3738         new ForEachTransformedMappingTask<K,V,U>
3739             (null, batchFor(parallelismThreshold), 0, 0, table,
3740              transformer, action).invoke();
3741     }
3742 
3743     /**
3744      * Returns a non-null result from applying the given search
3745      * function on each (key, value), or null if none.  Upon
3746      * success, further element processing is suppressed and the
3747      * results of any other parallel invocations of the search
3748      * function are ignored.
3749      *
3750      * @param parallelismThreshold the (estimated) number of elements
3751      * needed for this operation to be executed in parallel
3752      * @param searchFunction a function returning a non-null
3753      * result on success, else null
3754      * @param <U> the return type of the search function
3755      * @return a non-null result from applying the given search
3756      * function on each (key, value), or null if none
3757      * @since 1.8
3758      */
search(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> searchFunction)3759     public <U> U search(long parallelismThreshold,
3760                         BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3761         if (searchFunction == null) throw new NullPointerException();
3762         return new SearchMappingsTask<K,V,U>
3763             (null, batchFor(parallelismThreshold), 0, 0, table,
3764              searchFunction, new AtomicReference<U>()).invoke();
3765     }
3766 
3767     /**
3768      * Returns the result of accumulating the given transformation
3769      * of all (key, value) pairs using the given reducer to
3770      * combine values, or null if none.
3771      *
3772      * @param parallelismThreshold the (estimated) number of elements
3773      * needed for this operation to be executed in parallel
3774      * @param transformer a function returning the transformation
3775      * for an element, or null if there is no transformation (in
3776      * which case it is not combined)
3777      * @param reducer a commutative associative combining function
3778      * @param <U> the return type of the transformer
3779      * @return the result of accumulating the given transformation
3780      * of all (key, value) pairs
3781      * @since 1.8
3782      */
reduce(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3783     public <U> U reduce(long parallelismThreshold,
3784                         BiFunction<? super K, ? super V, ? extends U> transformer,
3785                         BiFunction<? super U, ? super U, ? extends U> reducer) {
3786         if (transformer == null || reducer == null)
3787             throw new NullPointerException();
3788         return new MapReduceMappingsTask<K,V,U>
3789             (null, batchFor(parallelismThreshold), 0, 0, table,
3790              null, transformer, reducer).invoke();
3791     }
3792 
3793     /**
3794      * Returns the result of accumulating the given transformation
3795      * of all (key, value) pairs using the given reducer to
3796      * combine values, and the given basis as an identity value.
3797      *
3798      * @param parallelismThreshold the (estimated) number of elements
3799      * needed for this operation to be executed in parallel
3800      * @param transformer a function returning the transformation
3801      * for an element
3802      * @param basis the identity (initial default value) for the reduction
3803      * @param reducer a commutative associative combining function
3804      * @return the result of accumulating the given transformation
3805      * of all (key, value) pairs
3806      * @since 1.8
3807      */
reduceToDouble(long parallelismThreshold, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)3808     public double reduceToDouble(long parallelismThreshold,
3809                                  ToDoubleBiFunction<? super K, ? super V> transformer,
3810                                  double basis,
3811                                  DoubleBinaryOperator reducer) {
3812         if (transformer == null || reducer == null)
3813             throw new NullPointerException();
3814         return new MapReduceMappingsToDoubleTask<K,V>
3815             (null, batchFor(parallelismThreshold), 0, 0, table,
3816              null, transformer, basis, reducer).invoke();
3817     }
3818 
3819     /**
3820      * Returns the result of accumulating the given transformation
3821      * of all (key, value) pairs using the given reducer to
3822      * combine values, and the given basis as an identity value.
3823      *
3824      * @param parallelismThreshold the (estimated) number of elements
3825      * needed for this operation to be executed in parallel
3826      * @param transformer a function returning the transformation
3827      * for an element
3828      * @param basis the identity (initial default value) for the reduction
3829      * @param reducer a commutative associative combining function
3830      * @return the result of accumulating the given transformation
3831      * of all (key, value) pairs
3832      * @since 1.8
3833      */
reduceToLong(long parallelismThreshold, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)3834     public long reduceToLong(long parallelismThreshold,
3835                              ToLongBiFunction<? super K, ? super V> transformer,
3836                              long basis,
3837                              LongBinaryOperator reducer) {
3838         if (transformer == null || reducer == null)
3839             throw new NullPointerException();
3840         return new MapReduceMappingsToLongTask<K,V>
3841             (null, batchFor(parallelismThreshold), 0, 0, table,
3842              null, transformer, basis, reducer).invoke();
3843     }
3844 
3845     /**
3846      * Returns the result of accumulating the given transformation
3847      * of all (key, value) pairs using the given reducer to
3848      * combine values, and the given basis as an identity value.
3849      *
3850      * @param parallelismThreshold the (estimated) number of elements
3851      * needed for this operation to be executed in parallel
3852      * @param transformer a function returning the transformation
3853      * for an element
3854      * @param basis the identity (initial default value) for the reduction
3855      * @param reducer a commutative associative combining function
3856      * @return the result of accumulating the given transformation
3857      * of all (key, value) pairs
3858      * @since 1.8
3859      */
reduceToInt(long parallelismThreshold, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)3860     public int reduceToInt(long parallelismThreshold,
3861                            ToIntBiFunction<? super K, ? super V> transformer,
3862                            int basis,
3863                            IntBinaryOperator reducer) {
3864         if (transformer == null || reducer == null)
3865             throw new NullPointerException();
3866         return new MapReduceMappingsToIntTask<K,V>
3867             (null, batchFor(parallelismThreshold), 0, 0, table,
3868              null, transformer, basis, reducer).invoke();
3869     }
3870 
3871     /**
3872      * Performs the given action for each key.
3873      *
3874      * @param parallelismThreshold the (estimated) number of elements
3875      * needed for this operation to be executed in parallel
3876      * @param action the action
3877      * @since 1.8
3878      */
forEachKey(long parallelismThreshold, Consumer<? super K> action)3879     public void forEachKey(long parallelismThreshold,
3880                            Consumer<? super K> action) {
3881         if (action == null) throw new NullPointerException();
3882         new ForEachKeyTask<K,V>
3883             (null, batchFor(parallelismThreshold), 0, 0, table,
3884              action).invoke();
3885     }
3886 
3887     /**
3888      * Performs the given action for each non-null transformation
3889      * of each key.
3890      *
3891      * @param parallelismThreshold the (estimated) number of elements
3892      * needed for this operation to be executed in parallel
3893      * @param transformer a function returning the transformation
3894      * for an element, or null if there is no transformation (in
3895      * which case the action is not applied)
3896      * @param action the action
3897      * @param <U> the return type of the transformer
3898      * @since 1.8
3899      */
forEachKey(long parallelismThreshold, Function<? super K, ? extends U> transformer, Consumer<? super U> action)3900     public <U> void forEachKey(long parallelismThreshold,
3901                                Function<? super K, ? extends U> transformer,
3902                                Consumer<? super U> action) {
3903         if (transformer == null || action == null)
3904             throw new NullPointerException();
3905         new ForEachTransformedKeyTask<K,V,U>
3906             (null, batchFor(parallelismThreshold), 0, 0, table,
3907              transformer, action).invoke();
3908     }
3909 
3910     /**
3911      * Returns a non-null result from applying the given search
3912      * function on each key, or null if none. Upon success,
3913      * further element processing is suppressed and the results of
3914      * any other parallel invocations of the search function are
3915      * ignored.
3916      *
3917      * @param parallelismThreshold the (estimated) number of elements
3918      * needed for this operation to be executed in parallel
3919      * @param searchFunction a function returning a non-null
3920      * result on success, else null
3921      * @param <U> the return type of the search function
3922      * @return a non-null result from applying the given search
3923      * function on each key, or null if none
3924      * @since 1.8
3925      */
searchKeys(long parallelismThreshold, Function<? super K, ? extends U> searchFunction)3926     public <U> U searchKeys(long parallelismThreshold,
3927                             Function<? super K, ? extends U> searchFunction) {
3928         if (searchFunction == null) throw new NullPointerException();
3929         return new SearchKeysTask<K,V,U>
3930             (null, batchFor(parallelismThreshold), 0, 0, table,
3931              searchFunction, new AtomicReference<U>()).invoke();
3932     }
3933 
3934     /**
3935      * Returns the result of accumulating all keys using the given
3936      * reducer to combine values, or null if none.
3937      *
3938      * @param parallelismThreshold the (estimated) number of elements
3939      * needed for this operation to be executed in parallel
3940      * @param reducer a commutative associative combining function
3941      * @return the result of accumulating all keys using the given
3942      * reducer to combine values, or null if none
3943      * @since 1.8
3944      */
reduceKeys(long parallelismThreshold, BiFunction<? super K, ? super K, ? extends K> reducer)3945     public K reduceKeys(long parallelismThreshold,
3946                         BiFunction<? super K, ? super K, ? extends K> reducer) {
3947         if (reducer == null) throw new NullPointerException();
3948         return new ReduceKeysTask<K,V>
3949             (null, batchFor(parallelismThreshold), 0, 0, table,
3950              null, reducer).invoke();
3951     }
3952 
3953     /**
3954      * Returns the result of accumulating the given transformation
3955      * of all keys using the given reducer to combine values, or
3956      * null if none.
3957      *
3958      * @param parallelismThreshold the (estimated) number of elements
3959      * needed for this operation to be executed in parallel
3960      * @param transformer a function returning the transformation
3961      * for an element, or null if there is no transformation (in
3962      * which case it is not combined)
3963      * @param reducer a commutative associative combining function
3964      * @param <U> the return type of the transformer
3965      * @return the result of accumulating the given transformation
3966      * of all keys
3967      * @since 1.8
3968      */
reduceKeys(long parallelismThreshold, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3969     public <U> U reduceKeys(long parallelismThreshold,
3970                             Function<? super K, ? extends U> transformer,
3971          BiFunction<? super U, ? super U, ? extends U> reducer) {
3972         if (transformer == null || reducer == null)
3973             throw new NullPointerException();
3974         return new MapReduceKeysTask<K,V,U>
3975             (null, batchFor(parallelismThreshold), 0, 0, table,
3976              null, transformer, reducer).invoke();
3977     }
3978 
3979     /**
3980      * Returns the result of accumulating the given transformation
3981      * of all keys using the given reducer to combine values, and
3982      * the given basis as an identity value.
3983      *
3984      * @param parallelismThreshold the (estimated) number of elements
3985      * needed for this operation to be executed in parallel
3986      * @param transformer a function returning the transformation
3987      * for an element
3988      * @param basis the identity (initial default value) for the reduction
3989      * @param reducer a commutative associative combining function
3990      * @return the result of accumulating the given transformation
3991      * of all keys
3992      * @since 1.8
3993      */
reduceKeysToDouble(long parallelismThreshold, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)3994     public double reduceKeysToDouble(long parallelismThreshold,
3995                                      ToDoubleFunction<? super K> transformer,
3996                                      double basis,
3997                                      DoubleBinaryOperator reducer) {
3998         if (transformer == null || reducer == null)
3999             throw new NullPointerException();
4000         return new MapReduceKeysToDoubleTask<K,V>
4001             (null, batchFor(parallelismThreshold), 0, 0, table,
4002              null, transformer, basis, reducer).invoke();
4003     }
4004 
4005     /**
4006      * Returns the result of accumulating the given transformation
4007      * of all keys using the given reducer to combine values, and
4008      * the given basis as an identity value.
4009      *
4010      * @param parallelismThreshold the (estimated) number of elements
4011      * needed for this operation to be executed in parallel
4012      * @param transformer a function returning the transformation
4013      * for an element
4014      * @param basis the identity (initial default value) for the reduction
4015      * @param reducer a commutative associative combining function
4016      * @return the result of accumulating the given transformation
4017      * of all keys
4018      * @since 1.8
4019      */
reduceKeysToLong(long parallelismThreshold, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)4020     public long reduceKeysToLong(long parallelismThreshold,
4021                                  ToLongFunction<? super K> transformer,
4022                                  long basis,
4023                                  LongBinaryOperator reducer) {
4024         if (transformer == null || reducer == null)
4025             throw new NullPointerException();
4026         return new MapReduceKeysToLongTask<K,V>
4027             (null, batchFor(parallelismThreshold), 0, 0, table,
4028              null, transformer, basis, reducer).invoke();
4029     }
4030 
4031     /**
4032      * Returns the result of accumulating the given transformation
4033      * of all keys using the given reducer to combine values, and
4034      * the given basis as an identity value.
4035      *
4036      * @param parallelismThreshold the (estimated) number of elements
4037      * needed for this operation to be executed in parallel
4038      * @param transformer a function returning the transformation
4039      * for an element
4040      * @param basis the identity (initial default value) for the reduction
4041      * @param reducer a commutative associative combining function
4042      * @return the result of accumulating the given transformation
4043      * of all keys
4044      * @since 1.8
4045      */
reduceKeysToInt(long parallelismThreshold, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)4046     public int reduceKeysToInt(long parallelismThreshold,
4047                                ToIntFunction<? super K> transformer,
4048                                int basis,
4049                                IntBinaryOperator reducer) {
4050         if (transformer == null || reducer == null)
4051             throw new NullPointerException();
4052         return new MapReduceKeysToIntTask<K,V>
4053             (null, batchFor(parallelismThreshold), 0, 0, table,
4054              null, transformer, basis, reducer).invoke();
4055     }
4056 
4057     /**
4058      * Performs the given action for each value.
4059      *
4060      * @param parallelismThreshold the (estimated) number of elements
4061      * needed for this operation to be executed in parallel
4062      * @param action the action
4063      * @since 1.8
4064      */
forEachValue(long parallelismThreshold, Consumer<? super V> action)4065     public void forEachValue(long parallelismThreshold,
4066                              Consumer<? super V> action) {
4067         if (action == null)
4068             throw new NullPointerException();
4069         new ForEachValueTask<K,V>
4070             (null, batchFor(parallelismThreshold), 0, 0, table,
4071              action).invoke();
4072     }
4073 
4074     /**
4075      * Performs the given action for each non-null transformation
4076      * of each value.
4077      *
4078      * @param parallelismThreshold the (estimated) number of elements
4079      * needed for this operation to be executed in parallel
4080      * @param transformer a function returning the transformation
4081      * for an element, or null if there is no transformation (in
4082      * which case the action is not applied)
4083      * @param action the action
4084      * @param <U> the return type of the transformer
4085      * @since 1.8
4086      */
forEachValue(long parallelismThreshold, Function<? super V, ? extends U> transformer, Consumer<? super U> action)4087     public <U> void forEachValue(long parallelismThreshold,
4088                                  Function<? super V, ? extends U> transformer,
4089                                  Consumer<? super U> action) {
4090         if (transformer == null || action == null)
4091             throw new NullPointerException();
4092         new ForEachTransformedValueTask<K,V,U>
4093             (null, batchFor(parallelismThreshold), 0, 0, table,
4094              transformer, action).invoke();
4095     }
4096 
4097     /**
4098      * Returns a non-null result from applying the given search
4099      * function on each value, or null if none.  Upon success,
4100      * further element processing is suppressed and the results of
4101      * any other parallel invocations of the search function are
4102      * ignored.
4103      *
4104      * @param parallelismThreshold the (estimated) number of elements
4105      * needed for this operation to be executed in parallel
4106      * @param searchFunction a function returning a non-null
4107      * result on success, else null
4108      * @param <U> the return type of the search function
4109      * @return a non-null result from applying the given search
4110      * function on each value, or null if none
4111      * @since 1.8
4112      */
searchValues(long parallelismThreshold, Function<? super V, ? extends U> searchFunction)4113     public <U> U searchValues(long parallelismThreshold,
4114                               Function<? super V, ? extends U> searchFunction) {
4115         if (searchFunction == null) throw new NullPointerException();
4116         return new SearchValuesTask<K,V,U>
4117             (null, batchFor(parallelismThreshold), 0, 0, table,
4118              searchFunction, new AtomicReference<U>()).invoke();
4119     }
4120 
4121     /**
4122      * Returns the result of accumulating all values using the
4123      * given reducer to combine values, or null if none.
4124      *
4125      * @param parallelismThreshold the (estimated) number of elements
4126      * needed for this operation to be executed in parallel
4127      * @param reducer a commutative associative combining function
4128      * @return the result of accumulating all values
4129      * @since 1.8
4130      */
reduceValues(long parallelismThreshold, BiFunction<? super V, ? super V, ? extends V> reducer)4131     public V reduceValues(long parallelismThreshold,
4132                           BiFunction<? super V, ? super V, ? extends V> reducer) {
4133         if (reducer == null) throw new NullPointerException();
4134         return new ReduceValuesTask<K,V>
4135             (null, batchFor(parallelismThreshold), 0, 0, table,
4136              null, reducer).invoke();
4137     }
4138 
4139     /**
4140      * Returns the result of accumulating the given transformation
4141      * of all values using the given reducer to combine values, or
4142      * null if none.
4143      *
4144      * @param parallelismThreshold the (estimated) number of elements
4145      * needed for this operation to be executed in parallel
4146      * @param transformer a function returning the transformation
4147      * for an element, or null if there is no transformation (in
4148      * which case it is not combined)
4149      * @param reducer a commutative associative combining function
4150      * @param <U> the return type of the transformer
4151      * @return the result of accumulating the given transformation
4152      * of all values
4153      * @since 1.8
4154      */
reduceValues(long parallelismThreshold, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4155     public <U> U reduceValues(long parallelismThreshold,
4156                               Function<? super V, ? extends U> transformer,
4157                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4158         if (transformer == null || reducer == null)
4159             throw new NullPointerException();
4160         return new MapReduceValuesTask<K,V,U>
4161             (null, batchFor(parallelismThreshold), 0, 0, table,
4162              null, transformer, reducer).invoke();
4163     }
4164 
4165     /**
4166      * Returns the result of accumulating the given transformation
4167      * of all values using the given reducer to combine values,
4168      * and the given basis as an identity value.
4169      *
4170      * @param parallelismThreshold the (estimated) number of elements
4171      * needed for this operation to be executed in parallel
4172      * @param transformer a function returning the transformation
4173      * for an element
4174      * @param basis the identity (initial default value) for the reduction
4175      * @param reducer a commutative associative combining function
4176      * @return the result of accumulating the given transformation
4177      * of all values
4178      * @since 1.8
4179      */
reduceValuesToDouble(long parallelismThreshold, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)4180     public double reduceValuesToDouble(long parallelismThreshold,
4181                                        ToDoubleFunction<? super V> transformer,
4182                                        double basis,
4183                                        DoubleBinaryOperator reducer) {
4184         if (transformer == null || reducer == null)
4185             throw new NullPointerException();
4186         return new MapReduceValuesToDoubleTask<K,V>
4187             (null, batchFor(parallelismThreshold), 0, 0, table,
4188              null, transformer, basis, reducer).invoke();
4189     }
4190 
4191     /**
4192      * Returns the result of accumulating the given transformation
4193      * of all values using the given reducer to combine values,
4194      * and the given basis as an identity value.
4195      *
4196      * @param parallelismThreshold the (estimated) number of elements
4197      * needed for this operation to be executed in parallel
4198      * @param transformer a function returning the transformation
4199      * for an element
4200      * @param basis the identity (initial default value) for the reduction
4201      * @param reducer a commutative associative combining function
4202      * @return the result of accumulating the given transformation
4203      * of all values
4204      * @since 1.8
4205      */
reduceValuesToLong(long parallelismThreshold, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)4206     public long reduceValuesToLong(long parallelismThreshold,
4207                                    ToLongFunction<? super V> transformer,
4208                                    long basis,
4209                                    LongBinaryOperator reducer) {
4210         if (transformer == null || reducer == null)
4211             throw new NullPointerException();
4212         return new MapReduceValuesToLongTask<K,V>
4213             (null, batchFor(parallelismThreshold), 0, 0, table,
4214              null, transformer, basis, reducer).invoke();
4215     }
4216 
4217     /**
4218      * Returns the result of accumulating the given transformation
4219      * of all values using the given reducer to combine values,
4220      * and the given basis as an identity value.
4221      *
4222      * @param parallelismThreshold the (estimated) number of elements
4223      * needed for this operation to be executed in parallel
4224      * @param transformer a function returning the transformation
4225      * for an element
4226      * @param basis the identity (initial default value) for the reduction
4227      * @param reducer a commutative associative combining function
4228      * @return the result of accumulating the given transformation
4229      * of all values
4230      * @since 1.8
4231      */
reduceValuesToInt(long parallelismThreshold, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)4232     public int reduceValuesToInt(long parallelismThreshold,
4233                                  ToIntFunction<? super V> transformer,
4234                                  int basis,
4235                                  IntBinaryOperator reducer) {
4236         if (transformer == null || reducer == null)
4237             throw new NullPointerException();
4238         return new MapReduceValuesToIntTask<K,V>
4239             (null, batchFor(parallelismThreshold), 0, 0, table,
4240              null, transformer, basis, reducer).invoke();
4241     }
4242 
4243     /**
4244      * Performs the given action for each entry.
4245      *
4246      * @param parallelismThreshold the (estimated) number of elements
4247      * needed for this operation to be executed in parallel
4248      * @param action the action
4249      * @since 1.8
4250      */
forEachEntry(long parallelismThreshold, Consumer<? super Map.Entry<K,V>> action)4251     public void forEachEntry(long parallelismThreshold,
4252                              Consumer<? super Map.Entry<K,V>> action) {
4253         if (action == null) throw new NullPointerException();
4254         new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4255                                   action).invoke();
4256     }
4257 
4258     /**
4259      * Performs the given action for each non-null transformation
4260      * of each entry.
4261      *
4262      * @param parallelismThreshold the (estimated) number of elements
4263      * needed for this operation to be executed in parallel
4264      * @param transformer a function returning the transformation
4265      * for an element, or null if there is no transformation (in
4266      * which case the action is not applied)
4267      * @param action the action
4268      * @param <U> the return type of the transformer
4269      * @since 1.8
4270      */
forEachEntry(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)4271     public <U> void forEachEntry(long parallelismThreshold,
4272                                  Function<Map.Entry<K,V>, ? extends U> transformer,
4273                                  Consumer<? super U> action) {
4274         if (transformer == null || action == null)
4275             throw new NullPointerException();
4276         new ForEachTransformedEntryTask<K,V,U>
4277             (null, batchFor(parallelismThreshold), 0, 0, table,
4278              transformer, action).invoke();
4279     }
4280 
4281     /**
4282      * Returns a non-null result from applying the given search
4283      * function on each entry, or null if none.  Upon success,
4284      * further element processing is suppressed and the results of
4285      * any other parallel invocations of the search function are
4286      * ignored.
4287      *
4288      * @param parallelismThreshold the (estimated) number of elements
4289      * needed for this operation to be executed in parallel
4290      * @param searchFunction a function returning a non-null
4291      * result on success, else null
4292      * @param <U> the return type of the search function
4293      * @return a non-null result from applying the given search
4294      * function on each entry, or null if none
4295      * @since 1.8
4296      */
searchEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> searchFunction)4297     public <U> U searchEntries(long parallelismThreshold,
4298                                Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4299         if (searchFunction == null) throw new NullPointerException();
4300         return new SearchEntriesTask<K,V,U>
4301             (null, batchFor(parallelismThreshold), 0, 0, table,
4302              searchFunction, new AtomicReference<U>()).invoke();
4303     }
4304 
4305     /**
4306      * Returns the result of accumulating all entries using the
4307      * given reducer to combine values, or null if none.
4308      *
4309      * @param parallelismThreshold the (estimated) number of elements
4310      * needed for this operation to be executed in parallel
4311      * @param reducer a commutative associative combining function
4312      * @return the result of accumulating all entries
4313      * @since 1.8
4314      */
reduceEntries(long parallelismThreshold, BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)4315     public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4316                                         BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4317         if (reducer == null) throw new NullPointerException();
4318         return new ReduceEntriesTask<K,V>
4319             (null, batchFor(parallelismThreshold), 0, 0, table,
4320              null, reducer).invoke();
4321     }
4322 
4323     /**
4324      * Returns the result of accumulating the given transformation
4325      * of all entries using the given reducer to combine values,
4326      * or null if none.
4327      *
4328      * @param parallelismThreshold the (estimated) number of elements
4329      * needed for this operation to be executed in parallel
4330      * @param transformer a function returning the transformation
4331      * for an element, or null if there is no transformation (in
4332      * which case it is not combined)
4333      * @param reducer a commutative associative combining function
4334      * @param <U> the return type of the transformer
4335      * @return the result of accumulating the given transformation
4336      * of all entries
4337      * @since 1.8
4338      */
reduceEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4339     public <U> U reduceEntries(long parallelismThreshold,
4340                                Function<Map.Entry<K,V>, ? extends U> transformer,
4341                                BiFunction<? super U, ? super U, ? extends U> reducer) {
4342         if (transformer == null || reducer == null)
4343             throw new NullPointerException();
4344         return new MapReduceEntriesTask<K,V,U>
4345             (null, batchFor(parallelismThreshold), 0, 0, table,
4346              null, transformer, reducer).invoke();
4347     }
4348 
4349     /**
4350      * Returns the result of accumulating the given transformation
4351      * of all entries using the given reducer to combine values,
4352      * and the given basis as an identity value.
4353      *
4354      * @param parallelismThreshold the (estimated) number of elements
4355      * needed for this operation to be executed in parallel
4356      * @param transformer a function returning the transformation
4357      * for an element
4358      * @param basis the identity (initial default value) for the reduction
4359      * @param reducer a commutative associative combining function
4360      * @return the result of accumulating the given transformation
4361      * of all entries
4362      * @since 1.8
4363      */
reduceEntriesToDouble(long parallelismThreshold, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)4364     public double reduceEntriesToDouble(long parallelismThreshold,
4365                                         ToDoubleFunction<Map.Entry<K,V>> transformer,
4366                                         double basis,
4367                                         DoubleBinaryOperator reducer) {
4368         if (transformer == null || reducer == null)
4369             throw new NullPointerException();
4370         return new MapReduceEntriesToDoubleTask<K,V>
4371             (null, batchFor(parallelismThreshold), 0, 0, table,
4372              null, transformer, basis, reducer).invoke();
4373     }
4374 
4375     /**
4376      * Returns the result of accumulating the given transformation
4377      * of all entries using the given reducer to combine values,
4378      * and the given basis as an identity value.
4379      *
4380      * @param parallelismThreshold the (estimated) number of elements
4381      * needed for this operation to be executed in parallel
4382      * @param transformer a function returning the transformation
4383      * for an element
4384      * @param basis the identity (initial default value) for the reduction
4385      * @param reducer a commutative associative combining function
4386      * @return the result of accumulating the given transformation
4387      * of all entries
4388      * @since 1.8
4389      */
reduceEntriesToLong(long parallelismThreshold, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)4390     public long reduceEntriesToLong(long parallelismThreshold,
4391                                     ToLongFunction<Map.Entry<K,V>> transformer,
4392                                     long basis,
4393                                     LongBinaryOperator reducer) {
4394         if (transformer == null || reducer == null)
4395             throw new NullPointerException();
4396         return new MapReduceEntriesToLongTask<K,V>
4397             (null, batchFor(parallelismThreshold), 0, 0, table,
4398              null, transformer, basis, reducer).invoke();
4399     }
4400 
4401     /**
4402      * Returns the result of accumulating the given transformation
4403      * of all entries using the given reducer to combine values,
4404      * and the given basis as an identity value.
4405      *
4406      * @param parallelismThreshold the (estimated) number of elements
4407      * needed for this operation to be executed in parallel
4408      * @param transformer a function returning the transformation
4409      * for an element
4410      * @param basis the identity (initial default value) for the reduction
4411      * @param reducer a commutative associative combining function
4412      * @return the result of accumulating the given transformation
4413      * of all entries
4414      * @since 1.8
4415      */
reduceEntriesToInt(long parallelismThreshold, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)4416     public int reduceEntriesToInt(long parallelismThreshold,
4417                                   ToIntFunction<Map.Entry<K,V>> transformer,
4418                                   int basis,
4419                                   IntBinaryOperator reducer) {
4420         if (transformer == null || reducer == null)
4421             throw new NullPointerException();
4422         return new MapReduceEntriesToIntTask<K,V>
4423             (null, batchFor(parallelismThreshold), 0, 0, table,
4424              null, transformer, basis, reducer).invoke();
4425     }
4426 
4427 
4428     /* ----------------Views -------------- */
4429 
4430     /**
4431      * Base class for views.
4432      */
4433     abstract static class CollectionView<K,V,E>
4434         implements Collection<E>, java.io.Serializable {
4435         private static final long serialVersionUID = 7249069246763182397L;
4436         final ConcurrentHashMap<K,V> map;
CollectionView(ConcurrentHashMap<K,V> map)4437         CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4438 
4439         /**
4440          * Returns the map backing this view.
4441          *
4442          * @return the map backing this view
4443          */
getMap()4444         public ConcurrentHashMap<K,V> getMap() { return map; }
4445 
4446         /**
4447          * Removes all of the elements from this view, by removing all
4448          * the mappings from the map backing this view.
4449          */
clear()4450         public final void clear()      { map.clear(); }
size()4451         public final int size()        { return map.size(); }
isEmpty()4452         public final boolean isEmpty() { return map.isEmpty(); }
4453 
4454         // implementations below rely on concrete classes supplying these
4455         // abstract methods
4456         /**
4457          * Returns an iterator over the elements in this collection.
4458          *
4459          * <p>The returned iterator is
4460          * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4461          *
4462          * @return an iterator over the elements in this collection
4463          */
iterator()4464         public abstract Iterator<E> iterator();
contains(Object o)4465         public abstract boolean contains(Object o);
remove(Object o)4466         public abstract boolean remove(Object o);
4467 
4468         private static final String OOME_MSG = "Required array size too large";
4469 
toArray()4470         public final Object[] toArray() {
4471             long sz = map.mappingCount();
4472             if (sz > MAX_ARRAY_SIZE)
4473                 throw new OutOfMemoryError(OOME_MSG);
4474             int n = (int)sz;
4475             Object[] r = new Object[n];
4476             int i = 0;
4477             for (E e : this) {
4478                 if (i == n) {
4479                     if (n >= MAX_ARRAY_SIZE)
4480                         throw new OutOfMemoryError(OOME_MSG);
4481                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4482                         n = MAX_ARRAY_SIZE;
4483                     else
4484                         n += (n >>> 1) + 1;
4485                     r = Arrays.copyOf(r, n);
4486                 }
4487                 r[i++] = e;
4488             }
4489             return (i == n) ? r : Arrays.copyOf(r, i);
4490         }
4491 
4492         @SuppressWarnings("unchecked")
toArray(T[] a)4493         public final <T> T[] toArray(T[] a) {
4494             long sz = map.mappingCount();
4495             if (sz > MAX_ARRAY_SIZE)
4496                 throw new OutOfMemoryError(OOME_MSG);
4497             int m = (int)sz;
4498             T[] r = (a.length >= m) ? a :
4499                 (T[])java.lang.reflect.Array
4500                 .newInstance(a.getClass().getComponentType(), m);
4501             int n = r.length;
4502             int i = 0;
4503             for (E e : this) {
4504                 if (i == n) {
4505                     if (n >= MAX_ARRAY_SIZE)
4506                         throw new OutOfMemoryError(OOME_MSG);
4507                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4508                         n = MAX_ARRAY_SIZE;
4509                     else
4510                         n += (n >>> 1) + 1;
4511                     r = Arrays.copyOf(r, n);
4512                 }
4513                 r[i++] = (T)e;
4514             }
4515             if (a == r && i < n) {
4516                 r[i] = null; // null-terminate
4517                 return r;
4518             }
4519             return (i == n) ? r : Arrays.copyOf(r, i);
4520         }
4521 
4522         /**
4523          * Returns a string representation of this collection.
4524          * The string representation consists of the string representations
4525          * of the collection's elements in the order they are returned by
4526          * its iterator, enclosed in square brackets ({@code "[]"}).
4527          * Adjacent elements are separated by the characters {@code ", "}
4528          * (comma and space).  Elements are converted to strings as by
4529          * {@link String#valueOf(Object)}.
4530          *
4531          * @return a string representation of this collection
4532          */
toString()4533         public final String toString() {
4534             StringBuilder sb = new StringBuilder();
4535             sb.append('[');
4536             Iterator<E> it = iterator();
4537             if (it.hasNext()) {
4538                 for (;;) {
4539                     Object e = it.next();
4540                     sb.append(e == this ? "(this Collection)" : e);
4541                     if (!it.hasNext())
4542                         break;
4543                     sb.append(',').append(' ');
4544                 }
4545             }
4546             return sb.append(']').toString();
4547         }
4548 
containsAll(Collection<?> c)4549         public final boolean containsAll(Collection<?> c) {
4550             if (c != this) {
4551                 for (Object e : c) {
4552                     if (e == null || !contains(e))
4553                         return false;
4554                 }
4555             }
4556             return true;
4557         }
4558 
removeAll(Collection<?> c)4559         public final boolean removeAll(Collection<?> c) {
4560             if (c == null) throw new NullPointerException();
4561             boolean modified = false;
4562             for (Iterator<E> it = iterator(); it.hasNext();) {
4563                 if (c.contains(it.next())) {
4564                     it.remove();
4565                     modified = true;
4566                 }
4567             }
4568             return modified;
4569         }
4570 
retainAll(Collection<?> c)4571         public final boolean retainAll(Collection<?> c) {
4572             if (c == null) throw new NullPointerException();
4573             boolean modified = false;
4574             for (Iterator<E> it = iterator(); it.hasNext();) {
4575                 if (!c.contains(it.next())) {
4576                     it.remove();
4577                     modified = true;
4578                 }
4579             }
4580             return modified;
4581         }
4582 
4583     }
4584 
4585     /**
4586      * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4587      * which additions may optionally be enabled by mapping to a
4588      * common value.  This class cannot be directly instantiated.
4589      * See {@link #keySet(Object) keySet(V)},
4590      * {@link #newKeySet() newKeySet()},
4591      * {@link #newKeySet(int) newKeySet(int)}.
4592      *
4593      * @since 1.8
4594      */
4595     public static class KeySetView<K,V> extends CollectionView<K,V,K>
4596         implements Set<K>, java.io.Serializable {
4597         private static final long serialVersionUID = 7249069246763182397L;
4598         private final V value;
KeySetView(ConcurrentHashMap<K,V> map, V value)4599         KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4600             super(map);
4601             this.value = value;
4602         }
4603 
4604         /**
4605          * Returns the default mapped value for additions,
4606          * or {@code null} if additions are not supported.
4607          *
4608          * @return the default mapped value for additions, or {@code null}
4609          * if not supported
4610          */
getMappedValue()4611         public V getMappedValue() { return value; }
4612 
4613         /**
4614          * {@inheritDoc}
4615          * @throws NullPointerException if the specified key is null
4616          */
contains(Object o)4617         public boolean contains(Object o) { return map.containsKey(o); }
4618 
4619         /**
4620          * Removes the key from this map view, by removing the key (and its
4621          * corresponding value) from the backing map.  This method does
4622          * nothing if the key is not in the map.
4623          *
4624          * @param  o the key to be removed from the backing map
4625          * @return {@code true} if the backing map contained the specified key
4626          * @throws NullPointerException if the specified key is null
4627          */
remove(Object o)4628         public boolean remove(Object o) { return map.remove(o) != null; }
4629 
4630         /**
4631          * @return an iterator over the keys of the backing map
4632          */
iterator()4633         public Iterator<K> iterator() {
4634             Node<K,V>[] t;
4635             ConcurrentHashMap<K,V> m = map;
4636             int f = (t = m.table) == null ? 0 : t.length;
4637             return new KeyIterator<K,V>(t, f, 0, f, m);
4638         }
4639 
4640         /**
4641          * Adds the specified key to this set view by mapping the key to
4642          * the default mapped value in the backing map, if defined.
4643          *
4644          * @param e key to be added
4645          * @return {@code true} if this set changed as a result of the call
4646          * @throws NullPointerException if the specified key is null
4647          * @throws UnsupportedOperationException if no default mapped value
4648          * for additions was provided
4649          */
add(K e)4650         public boolean add(K e) {
4651             V v;
4652             if ((v = value) == null)
4653                 throw new UnsupportedOperationException();
4654             return map.putVal(e, v, true) == null;
4655         }
4656 
4657         /**
4658          * Adds all of the elements in the specified collection to this set,
4659          * as if by calling {@link #add} on each one.
4660          *
4661          * @param c the elements to be inserted into this set
4662          * @return {@code true} if this set changed as a result of the call
4663          * @throws NullPointerException if the collection or any of its
4664          * elements are {@code null}
4665          * @throws UnsupportedOperationException if no default mapped value
4666          * for additions was provided
4667          */
addAll(Collection<? extends K> c)4668         public boolean addAll(Collection<? extends K> c) {
4669             boolean added = false;
4670             V v;
4671             if ((v = value) == null)
4672                 throw new UnsupportedOperationException();
4673             for (K e : c) {
4674                 if (map.putVal(e, v, true) == null)
4675                     added = true;
4676             }
4677             return added;
4678         }
4679 
hashCode()4680         public int hashCode() {
4681             int h = 0;
4682             for (K e : this)
4683                 h += e.hashCode();
4684             return h;
4685         }
4686 
equals(Object o)4687         public boolean equals(Object o) {
4688             Set<?> c;
4689             return ((o instanceof Set) &&
4690                     ((c = (Set<?>)o) == this ||
4691                      (containsAll(c) && c.containsAll(this))));
4692         }
4693 
spliterator()4694         public Spliterator<K> spliterator() {
4695             Node<K,V>[] t;
4696             ConcurrentHashMap<K,V> m = map;
4697             long n = m.sumCount();
4698             int f = (t = m.table) == null ? 0 : t.length;
4699             return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4700         }
4701 
forEach(Consumer<? super K> action)4702         public void forEach(Consumer<? super K> action) {
4703             if (action == null) throw new NullPointerException();
4704             Node<K,V>[] t;
4705             if ((t = map.table) != null) {
4706                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4707                 for (Node<K,V> p; (p = it.advance()) != null; )
4708                     action.accept(p.key);
4709             }
4710         }
4711     }
4712 
4713     /**
4714      * A view of a ConcurrentHashMap as a {@link Collection} of
4715      * values, in which additions are disabled. This class cannot be
4716      * directly instantiated. See {@link #values()}.
4717      */
4718     static final class ValuesView<K,V> extends CollectionView<K,V,V>
4719         implements Collection<V>, java.io.Serializable {
4720         private static final long serialVersionUID = 2249069246763182397L;
ValuesView(ConcurrentHashMap<K,V> map)4721         ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
contains(Object o)4722         public final boolean contains(Object o) {
4723             return map.containsValue(o);
4724         }
4725 
remove(Object o)4726         public final boolean remove(Object o) {
4727             if (o != null) {
4728                 for (Iterator<V> it = iterator(); it.hasNext();) {
4729                     if (o.equals(it.next())) {
4730                         it.remove();
4731                         return true;
4732                     }
4733                 }
4734             }
4735             return false;
4736         }
4737 
iterator()4738         public final Iterator<V> iterator() {
4739             ConcurrentHashMap<K,V> m = map;
4740             Node<K,V>[] t;
4741             int f = (t = m.table) == null ? 0 : t.length;
4742             return new ValueIterator<K,V>(t, f, 0, f, m);
4743         }
4744 
add(V e)4745         public final boolean add(V e) {
4746             throw new UnsupportedOperationException();
4747         }
addAll(Collection<? extends V> c)4748         public final boolean addAll(Collection<? extends V> c) {
4749             throw new UnsupportedOperationException();
4750         }
4751 
removeIf(Predicate<? super V> filter)4752         public boolean removeIf(Predicate<? super V> filter) {
4753             return map.removeValueIf(filter);
4754         }
4755 
spliterator()4756         public Spliterator<V> spliterator() {
4757             Node<K,V>[] t;
4758             ConcurrentHashMap<K,V> m = map;
4759             long n = m.sumCount();
4760             int f = (t = m.table) == null ? 0 : t.length;
4761             return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4762         }
4763 
forEach(Consumer<? super V> action)4764         public void forEach(Consumer<? super V> action) {
4765             if (action == null) throw new NullPointerException();
4766             Node<K,V>[] t;
4767             if ((t = map.table) != null) {
4768                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4769                 for (Node<K,V> p; (p = it.advance()) != null; )
4770                     action.accept(p.val);
4771             }
4772         }
4773     }
4774 
4775     /**
4776      * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4777      * entries.  This class cannot be directly instantiated. See
4778      * {@link #entrySet()}.
4779      */
4780     static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4781         implements Set<Map.Entry<K,V>>, java.io.Serializable {
4782         private static final long serialVersionUID = 2249069246763182397L;
EntrySetView(ConcurrentHashMap<K,V> map)4783         EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4784 
contains(Object o)4785         public boolean contains(Object o) {
4786             Object k, v, r; Map.Entry<?,?> e;
4787             return ((o instanceof Map.Entry) &&
4788                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4789                     (r = map.get(k)) != null &&
4790                     (v = e.getValue()) != null &&
4791                     (v == r || v.equals(r)));
4792         }
4793 
remove(Object o)4794         public boolean remove(Object o) {
4795             Object k, v; Map.Entry<?,?> e;
4796             return ((o instanceof Map.Entry) &&
4797                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4798                     (v = e.getValue()) != null &&
4799                     map.remove(k, v));
4800         }
4801 
4802         /**
4803          * @return an iterator over the entries of the backing map
4804          */
iterator()4805         public Iterator<Map.Entry<K,V>> iterator() {
4806             ConcurrentHashMap<K,V> m = map;
4807             Node<K,V>[] t;
4808             int f = (t = m.table) == null ? 0 : t.length;
4809             return new EntryIterator<K,V>(t, f, 0, f, m);
4810         }
4811 
add(Entry<K,V> e)4812         public boolean add(Entry<K,V> e) {
4813             return map.putVal(e.getKey(), e.getValue(), false) == null;
4814         }
4815 
addAll(Collection<? extends Entry<K,V>> c)4816         public boolean addAll(Collection<? extends Entry<K,V>> c) {
4817             boolean added = false;
4818             for (Entry<K,V> e : c) {
4819                 if (add(e))
4820                     added = true;
4821             }
4822             return added;
4823         }
4824 
removeIf(Predicate<? super Entry<K,V>> filter)4825         public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4826             return map.removeEntryIf(filter);
4827         }
4828 
hashCode()4829         public final int hashCode() {
4830             int h = 0;
4831             Node<K,V>[] t;
4832             if ((t = map.table) != null) {
4833                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4834                 for (Node<K,V> p; (p = it.advance()) != null; ) {
4835                     h += p.hashCode();
4836                 }
4837             }
4838             return h;
4839         }
4840 
equals(Object o)4841         public final boolean equals(Object o) {
4842             Set<?> c;
4843             return ((o instanceof Set) &&
4844                     ((c = (Set<?>)o) == this ||
4845                      (containsAll(c) && c.containsAll(this))));
4846         }
4847 
spliterator()4848         public Spliterator<Map.Entry<K,V>> spliterator() {
4849             Node<K,V>[] t;
4850             ConcurrentHashMap<K,V> m = map;
4851             long n = m.sumCount();
4852             int f = (t = m.table) == null ? 0 : t.length;
4853             return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4854         }
4855 
forEach(Consumer<? super Map.Entry<K,V>> action)4856         public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4857             if (action == null) throw new NullPointerException();
4858             Node<K,V>[] t;
4859             if ((t = map.table) != null) {
4860                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4861                 for (Node<K,V> p; (p = it.advance()) != null; )
4862                     action.accept(new MapEntry<K,V>(p.key, p.val, map));
4863             }
4864         }
4865 
4866     }
4867 
4868     // -------------------------------------------------------
4869 
4870     /**
4871      * Base class for bulk tasks. Repeats some fields and code from
4872      * class Traverser, because we need to subclass CountedCompleter.
4873      */
4874     @SuppressWarnings("serial")
4875     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4876         Node<K,V>[] tab;        // same as Traverser
4877         Node<K,V> next;
4878         TableStack<K,V> stack, spare;
4879         int index;
4880         int baseIndex;
4881         int baseLimit;
4882         final int baseSize;
4883         int batch;              // split control
4884 
BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t)4885         BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4886             super(par);
4887             this.batch = b;
4888             this.index = this.baseIndex = i;
4889             if ((this.tab = t) == null)
4890                 this.baseSize = this.baseLimit = 0;
4891             else if (par == null)
4892                 this.baseSize = this.baseLimit = t.length;
4893             else {
4894                 this.baseLimit = f;
4895                 this.baseSize = par.baseSize;
4896             }
4897         }
4898 
4899         /**
4900          * Same as Traverser version.
4901          */
advance()4902         final Node<K,V> advance() {
4903             Node<K,V> e;
4904             if ((e = next) != null)
4905                 e = e.next;
4906             for (;;) {
4907                 Node<K,V>[] t; int i, n;
4908                 if (e != null)
4909                     return next = e;
4910                 if (baseIndex >= baseLimit || (t = tab) == null ||
4911                     (n = t.length) <= (i = index) || i < 0)
4912                     return next = null;
4913                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4914                     if (e instanceof ForwardingNode) {
4915                         tab = ((ForwardingNode<K,V>)e).nextTable;
4916                         e = null;
4917                         pushState(t, i, n);
4918                         continue;
4919                     }
4920                     else if (e instanceof TreeBin)
4921                         e = ((TreeBin<K,V>)e).first;
4922                     else
4923                         e = null;
4924                 }
4925                 if (stack != null)
4926                     recoverState(n);
4927                 else if ((index = i + baseSize) >= n)
4928                     index = ++baseIndex;
4929             }
4930         }
4931 
pushState(Node<K,V>[] t, int i, int n)4932         private void pushState(Node<K,V>[] t, int i, int n) {
4933             TableStack<K,V> s = spare;
4934             if (s != null)
4935                 spare = s.next;
4936             else
4937                 s = new TableStack<K,V>();
4938             s.tab = t;
4939             s.length = n;
4940             s.index = i;
4941             s.next = stack;
4942             stack = s;
4943         }
4944 
recoverState(int n)4945         private void recoverState(int n) {
4946             TableStack<K,V> s; int len;
4947             while ((s = stack) != null && (index += (len = s.length)) >= n) {
4948                 n = len;
4949                 index = s.index;
4950                 tab = s.tab;
4951                 s.tab = null;
4952                 TableStack<K,V> next = s.next;
4953                 s.next = spare; // save for reuse
4954                 stack = next;
4955                 spare = s;
4956             }
4957             if (s == null && (index += baseSize) >= n)
4958                 index = ++baseIndex;
4959         }
4960     }
4961 
4962     /*
4963      * Task classes. Coded in a regular but ugly format/style to
4964      * simplify checks that each variant differs in the right way from
4965      * others. The null screenings exist because compilers cannot tell
4966      * that we've already null-checked task arguments, so we force
4967      * simplest hoisted bypass to help avoid convoluted traps.
4968      */
4969     @SuppressWarnings("serial")
4970     static final class ForEachKeyTask<K,V>
4971         extends BulkTask<K,V,Void> {
4972         final Consumer<? super K> action;
ForEachKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super K> action)4973         ForEachKeyTask
4974             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4975              Consumer<? super K> action) {
4976             super(p, b, i, f, t);
4977             this.action = action;
4978         }
compute()4979         public final void compute() {
4980             final Consumer<? super K> action;
4981             if ((action = this.action) != null) {
4982                 for (int i = baseIndex, f, h; batch > 0 &&
4983                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
4984                     addToPendingCount(1);
4985                     new ForEachKeyTask<K,V>
4986                         (this, batch >>>= 1, baseLimit = h, f, tab,
4987                          action).fork();
4988                 }
4989                 for (Node<K,V> p; (p = advance()) != null;)
4990                     action.accept(p.key);
4991                 propagateCompletion();
4992             }
4993         }
4994     }
4995 
4996     @SuppressWarnings("serial")
4997     static final class ForEachValueTask<K,V>
4998         extends BulkTask<K,V,Void> {
4999         final Consumer<? super V> action;
ForEachValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super V> action)5000         ForEachValueTask
5001             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5002              Consumer<? super V> action) {
5003             super(p, b, i, f, t);
5004             this.action = action;
5005         }
compute()5006         public final void compute() {
5007             final Consumer<? super V> action;
5008             if ((action = this.action) != null) {
5009                 for (int i = baseIndex, f, h; batch > 0 &&
5010                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5011                     addToPendingCount(1);
5012                     new ForEachValueTask<K,V>
5013                         (this, batch >>>= 1, baseLimit = h, f, tab,
5014                          action).fork();
5015                 }
5016                 for (Node<K,V> p; (p = advance()) != null;)
5017                     action.accept(p.val);
5018                 propagateCompletion();
5019             }
5020         }
5021     }
5022 
5023     @SuppressWarnings("serial")
5024     static final class ForEachEntryTask<K,V>
5025         extends BulkTask<K,V,Void> {
5026         final Consumer<? super Entry<K,V>> action;
ForEachEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super Entry<K,V>> action)5027         ForEachEntryTask
5028             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5029              Consumer<? super Entry<K,V>> action) {
5030             super(p, b, i, f, t);
5031             this.action = action;
5032         }
compute()5033         public final void compute() {
5034             final Consumer<? super Entry<K,V>> action;
5035             if ((action = this.action) != null) {
5036                 for (int i = baseIndex, f, h; batch > 0 &&
5037                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5038                     addToPendingCount(1);
5039                     new ForEachEntryTask<K,V>
5040                         (this, batch >>>= 1, baseLimit = h, f, tab,
5041                          action).fork();
5042                 }
5043                 for (Node<K,V> p; (p = advance()) != null; )
5044                     action.accept(p);
5045                 propagateCompletion();
5046             }
5047         }
5048     }
5049 
5050     @SuppressWarnings("serial")
5051     static final class ForEachMappingTask<K,V>
5052         extends BulkTask<K,V,Void> {
5053         final BiConsumer<? super K, ? super V> action;
ForEachMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiConsumer<? super K,? super V> action)5054         ForEachMappingTask
5055             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5056              BiConsumer<? super K,? super V> action) {
5057             super(p, b, i, f, t);
5058             this.action = action;
5059         }
compute()5060         public final void compute() {
5061             final BiConsumer<? super K, ? super V> action;
5062             if ((action = this.action) != null) {
5063                 for (int i = baseIndex, f, h; batch > 0 &&
5064                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5065                     addToPendingCount(1);
5066                     new ForEachMappingTask<K,V>
5067                         (this, batch >>>= 1, baseLimit = h, f, tab,
5068                          action).fork();
5069                 }
5070                 for (Node<K,V> p; (p = advance()) != null; )
5071                     action.accept(p.key, p.val);
5072                 propagateCompletion();
5073             }
5074         }
5075     }
5076 
5077     @SuppressWarnings("serial")
5078     static final class ForEachTransformedKeyTask<K,V,U>
5079         extends BulkTask<K,V,Void> {
5080         final Function<? super K, ? extends U> transformer;
5081         final Consumer<? super U> action;
ForEachTransformedKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> transformer, Consumer<? super U> action)5082         ForEachTransformedKeyTask
5083             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5084              Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5085             super(p, b, i, f, t);
5086             this.transformer = transformer; this.action = action;
5087         }
compute()5088         public final void compute() {
5089             final Function<? super K, ? extends U> transformer;
5090             final Consumer<? super U> action;
5091             if ((transformer = this.transformer) != null &&
5092                 (action = this.action) != null) {
5093                 for (int i = baseIndex, f, h; batch > 0 &&
5094                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5095                     addToPendingCount(1);
5096                     new ForEachTransformedKeyTask<K,V,U>
5097                         (this, batch >>>= 1, baseLimit = h, f, tab,
5098                          transformer, action).fork();
5099                 }
5100                 for (Node<K,V> p; (p = advance()) != null; ) {
5101                     U u;
5102                     if ((u = transformer.apply(p.key)) != null)
5103                         action.accept(u);
5104                 }
5105                 propagateCompletion();
5106             }
5107         }
5108     }
5109 
5110     @SuppressWarnings("serial")
5111     static final class ForEachTransformedValueTask<K,V,U>
5112         extends BulkTask<K,V,Void> {
5113         final Function<? super V, ? extends U> transformer;
5114         final Consumer<? super U> action;
ForEachTransformedValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> transformer, Consumer<? super U> action)5115         ForEachTransformedValueTask
5116             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5117              Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5118             super(p, b, i, f, t);
5119             this.transformer = transformer; this.action = action;
5120         }
compute()5121         public final void compute() {
5122             final Function<? super V, ? extends U> transformer;
5123             final Consumer<? super U> action;
5124             if ((transformer = this.transformer) != null &&
5125                 (action = this.action) != null) {
5126                 for (int i = baseIndex, f, h; batch > 0 &&
5127                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5128                     addToPendingCount(1);
5129                     new ForEachTransformedValueTask<K,V,U>
5130                         (this, batch >>>= 1, baseLimit = h, f, tab,
5131                          transformer, action).fork();
5132                 }
5133                 for (Node<K,V> p; (p = advance()) != null; ) {
5134                     U u;
5135                     if ((u = transformer.apply(p.val)) != null)
5136                         action.accept(u);
5137                 }
5138                 propagateCompletion();
5139             }
5140         }
5141     }
5142 
5143     @SuppressWarnings("serial")
5144     static final class ForEachTransformedEntryTask<K,V,U>
5145         extends BulkTask<K,V,Void> {
5146         final Function<Map.Entry<K,V>, ? extends U> transformer;
5147         final Consumer<? super U> action;
ForEachTransformedEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)5148         ForEachTransformedEntryTask
5149             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5150              Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5151             super(p, b, i, f, t);
5152             this.transformer = transformer; this.action = action;
5153         }
compute()5154         public final void compute() {
5155             final Function<Map.Entry<K,V>, ? extends U> transformer;
5156             final Consumer<? super U> action;
5157             if ((transformer = this.transformer) != null &&
5158                 (action = this.action) != null) {
5159                 for (int i = baseIndex, f, h; batch > 0 &&
5160                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5161                     addToPendingCount(1);
5162                     new ForEachTransformedEntryTask<K,V,U>
5163                         (this, batch >>>= 1, baseLimit = h, f, tab,
5164                          transformer, action).fork();
5165                 }
5166                 for (Node<K,V> p; (p = advance()) != null; ) {
5167                     U u;
5168                     if ((u = transformer.apply(p)) != null)
5169                         action.accept(u);
5170                 }
5171                 propagateCompletion();
5172             }
5173         }
5174     }
5175 
5176     @SuppressWarnings("serial")
5177     static final class ForEachTransformedMappingTask<K,V,U>
5178         extends BulkTask<K,V,Void> {
5179         final BiFunction<? super K, ? super V, ? extends U> transformer;
5180         final Consumer<? super U> action;
ForEachTransformedMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)5181         ForEachTransformedMappingTask
5182             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5183              BiFunction<? super K, ? super V, ? extends U> transformer,
5184              Consumer<? super U> action) {
5185             super(p, b, i, f, t);
5186             this.transformer = transformer; this.action = action;
5187         }
compute()5188         public final void compute() {
5189             final BiFunction<? super K, ? super V, ? extends U> transformer;
5190             final Consumer<? super U> action;
5191             if ((transformer = this.transformer) != null &&
5192                 (action = this.action) != null) {
5193                 for (int i = baseIndex, f, h; batch > 0 &&
5194                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5195                     addToPendingCount(1);
5196                     new ForEachTransformedMappingTask<K,V,U>
5197                         (this, batch >>>= 1, baseLimit = h, f, tab,
5198                          transformer, action).fork();
5199                 }
5200                 for (Node<K,V> p; (p = advance()) != null; ) {
5201                     U u;
5202                     if ((u = transformer.apply(p.key, p.val)) != null)
5203                         action.accept(u);
5204                 }
5205                 propagateCompletion();
5206             }
5207         }
5208     }
5209 
5210     @SuppressWarnings("serial")
5211     static final class SearchKeysTask<K,V,U>
5212         extends BulkTask<K,V,U> {
5213         final Function<? super K, ? extends U> searchFunction;
5214         final AtomicReference<U> result;
SearchKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> searchFunction, AtomicReference<U> result)5215         SearchKeysTask
5216             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5217              Function<? super K, ? extends U> searchFunction,
5218              AtomicReference<U> result) {
5219             super(p, b, i, f, t);
5220             this.searchFunction = searchFunction; this.result = result;
5221         }
getRawResult()5222         public final U getRawResult() { return result.get(); }
compute()5223         public final void compute() {
5224             final Function<? super K, ? extends U> searchFunction;
5225             final AtomicReference<U> result;
5226             if ((searchFunction = this.searchFunction) != null &&
5227                 (result = this.result) != null) {
5228                 for (int i = baseIndex, f, h; batch > 0 &&
5229                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5230                     if (result.get() != null)
5231                         return;
5232                     addToPendingCount(1);
5233                     new SearchKeysTask<K,V,U>
5234                         (this, batch >>>= 1, baseLimit = h, f, tab,
5235                          searchFunction, result).fork();
5236                 }
5237                 while (result.get() == null) {
5238                     U u;
5239                     Node<K,V> p;
5240                     if ((p = advance()) == null) {
5241                         propagateCompletion();
5242                         break;
5243                     }
5244                     if ((u = searchFunction.apply(p.key)) != null) {
5245                         if (result.compareAndSet(null, u))
5246                             quietlyCompleteRoot();
5247                         break;
5248                     }
5249                 }
5250             }
5251         }
5252     }
5253 
5254     @SuppressWarnings("serial")
5255     static final class SearchValuesTask<K,V,U>
5256         extends BulkTask<K,V,U> {
5257         final Function<? super V, ? extends U> searchFunction;
5258         final AtomicReference<U> result;
SearchValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> searchFunction, AtomicReference<U> result)5259         SearchValuesTask
5260             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5261              Function<? super V, ? extends U> searchFunction,
5262              AtomicReference<U> result) {
5263             super(p, b, i, f, t);
5264             this.searchFunction = searchFunction; this.result = result;
5265         }
getRawResult()5266         public final U getRawResult() { return result.get(); }
compute()5267         public final void compute() {
5268             final Function<? super V, ? extends U> searchFunction;
5269             final AtomicReference<U> result;
5270             if ((searchFunction = this.searchFunction) != null &&
5271                 (result = this.result) != null) {
5272                 for (int i = baseIndex, f, h; batch > 0 &&
5273                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5274                     if (result.get() != null)
5275                         return;
5276                     addToPendingCount(1);
5277                     new SearchValuesTask<K,V,U>
5278                         (this, batch >>>= 1, baseLimit = h, f, tab,
5279                          searchFunction, result).fork();
5280                 }
5281                 while (result.get() == null) {
5282                     U u;
5283                     Node<K,V> p;
5284                     if ((p = advance()) == null) {
5285                         propagateCompletion();
5286                         break;
5287                     }
5288                     if ((u = searchFunction.apply(p.val)) != null) {
5289                         if (result.compareAndSet(null, u))
5290                             quietlyCompleteRoot();
5291                         break;
5292                     }
5293                 }
5294             }
5295         }
5296     }
5297 
5298     @SuppressWarnings("serial")
5299     static final class SearchEntriesTask<K,V,U>
5300         extends BulkTask<K,V,U> {
5301         final Function<Entry<K,V>, ? extends U> searchFunction;
5302         final AtomicReference<U> result;
SearchEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Entry<K,V>, ? extends U> searchFunction, AtomicReference<U> result)5303         SearchEntriesTask
5304             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5305              Function<Entry<K,V>, ? extends U> searchFunction,
5306              AtomicReference<U> result) {
5307             super(p, b, i, f, t);
5308             this.searchFunction = searchFunction; this.result = result;
5309         }
getRawResult()5310         public final U getRawResult() { return result.get(); }
compute()5311         public final void compute() {
5312             final Function<Entry<K,V>, ? extends U> searchFunction;
5313             final AtomicReference<U> result;
5314             if ((searchFunction = this.searchFunction) != null &&
5315                 (result = this.result) != null) {
5316                 for (int i = baseIndex, f, h; batch > 0 &&
5317                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5318                     if (result.get() != null)
5319                         return;
5320                     addToPendingCount(1);
5321                     new SearchEntriesTask<K,V,U>
5322                         (this, batch >>>= 1, baseLimit = h, f, tab,
5323                          searchFunction, result).fork();
5324                 }
5325                 while (result.get() == null) {
5326                     U u;
5327                     Node<K,V> p;
5328                     if ((p = advance()) == null) {
5329                         propagateCompletion();
5330                         break;
5331                     }
5332                     if ((u = searchFunction.apply(p)) != null) {
5333                         if (result.compareAndSet(null, u))
5334                             quietlyCompleteRoot();
5335                         return;
5336                     }
5337                 }
5338             }
5339         }
5340     }
5341 
5342     @SuppressWarnings("serial")
5343     static final class SearchMappingsTask<K,V,U>
5344         extends BulkTask<K,V,U> {
5345         final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5346         final AtomicReference<U> result;
SearchMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> searchFunction, AtomicReference<U> result)5347         SearchMappingsTask
5348             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5349              BiFunction<? super K, ? super V, ? extends U> searchFunction,
5350              AtomicReference<U> result) {
5351             super(p, b, i, f, t);
5352             this.searchFunction = searchFunction; this.result = result;
5353         }
getRawResult()5354         public final U getRawResult() { return result.get(); }
compute()5355         public final void compute() {
5356             final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5357             final AtomicReference<U> result;
5358             if ((searchFunction = this.searchFunction) != null &&
5359                 (result = this.result) != null) {
5360                 for (int i = baseIndex, f, h; batch > 0 &&
5361                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5362                     if (result.get() != null)
5363                         return;
5364                     addToPendingCount(1);
5365                     new SearchMappingsTask<K,V,U>
5366                         (this, batch >>>= 1, baseLimit = h, f, tab,
5367                          searchFunction, result).fork();
5368                 }
5369                 while (result.get() == null) {
5370                     U u;
5371                     Node<K,V> p;
5372                     if ((p = advance()) == null) {
5373                         propagateCompletion();
5374                         break;
5375                     }
5376                     if ((u = searchFunction.apply(p.key, p.val)) != null) {
5377                         if (result.compareAndSet(null, u))
5378                             quietlyCompleteRoot();
5379                         break;
5380                     }
5381                 }
5382             }
5383         }
5384     }
5385 
5386     @SuppressWarnings("serial")
5387     static final class ReduceKeysTask<K,V>
5388         extends BulkTask<K,V,K> {
5389         final BiFunction<? super K, ? super K, ? extends K> reducer;
5390         K result;
5391         ReduceKeysTask<K,V> rights, nextRight;
ReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceKeysTask<K,V> nextRight, BiFunction<? super K, ? super K, ? extends K> reducer)5392         ReduceKeysTask
5393             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5394              ReduceKeysTask<K,V> nextRight,
5395              BiFunction<? super K, ? super K, ? extends K> reducer) {
5396             super(p, b, i, f, t); this.nextRight = nextRight;
5397             this.reducer = reducer;
5398         }
getRawResult()5399         public final K getRawResult() { return result; }
compute()5400         public final void compute() {
5401             final BiFunction<? super K, ? super K, ? extends K> reducer;
5402             if ((reducer = this.reducer) != null) {
5403                 for (int i = baseIndex, f, h; batch > 0 &&
5404                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5405                     addToPendingCount(1);
5406                     (rights = new ReduceKeysTask<K,V>
5407                      (this, batch >>>= 1, baseLimit = h, f, tab,
5408                       rights, reducer)).fork();
5409                 }
5410                 K r = null;
5411                 for (Node<K,V> p; (p = advance()) != null; ) {
5412                     K u = p.key;
5413                     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5414                 }
5415                 result = r;
5416                 CountedCompleter<?> c;
5417                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5418                     @SuppressWarnings("unchecked")
5419                     ReduceKeysTask<K,V>
5420                         t = (ReduceKeysTask<K,V>)c,
5421                         s = t.rights;
5422                     while (s != null) {
5423                         K tr, sr;
5424                         if ((sr = s.result) != null)
5425                             t.result = (((tr = t.result) == null) ? sr :
5426                                         reducer.apply(tr, sr));
5427                         s = t.rights = s.nextRight;
5428                     }
5429                 }
5430             }
5431         }
5432     }
5433 
5434     @SuppressWarnings("serial")
5435     static final class ReduceValuesTask<K,V>
5436         extends BulkTask<K,V,V> {
5437         final BiFunction<? super V, ? super V, ? extends V> reducer;
5438         V result;
5439         ReduceValuesTask<K,V> rights, nextRight;
ReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceValuesTask<K,V> nextRight, BiFunction<? super V, ? super V, ? extends V> reducer)5440         ReduceValuesTask
5441             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5442              ReduceValuesTask<K,V> nextRight,
5443              BiFunction<? super V, ? super V, ? extends V> reducer) {
5444             super(p, b, i, f, t); this.nextRight = nextRight;
5445             this.reducer = reducer;
5446         }
getRawResult()5447         public final V getRawResult() { return result; }
compute()5448         public final void compute() {
5449             final BiFunction<? super V, ? super V, ? extends V> reducer;
5450             if ((reducer = this.reducer) != null) {
5451                 for (int i = baseIndex, f, h; batch > 0 &&
5452                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5453                     addToPendingCount(1);
5454                     (rights = new ReduceValuesTask<K,V>
5455                      (this, batch >>>= 1, baseLimit = h, f, tab,
5456                       rights, reducer)).fork();
5457                 }
5458                 V r = null;
5459                 for (Node<K,V> p; (p = advance()) != null; ) {
5460                     V v = p.val;
5461                     r = (r == null) ? v : reducer.apply(r, v);
5462                 }
5463                 result = r;
5464                 CountedCompleter<?> c;
5465                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5466                     @SuppressWarnings("unchecked")
5467                     ReduceValuesTask<K,V>
5468                         t = (ReduceValuesTask<K,V>)c,
5469                         s = t.rights;
5470                     while (s != null) {
5471                         V tr, sr;
5472                         if ((sr = s.result) != null)
5473                             t.result = (((tr = t.result) == null) ? sr :
5474                                         reducer.apply(tr, sr));
5475                         s = t.rights = s.nextRight;
5476                     }
5477                 }
5478             }
5479         }
5480     }
5481 
5482     @SuppressWarnings("serial")
5483     static final class ReduceEntriesTask<K,V>
5484         extends BulkTask<K,V,Map.Entry<K,V>> {
5485         final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5486         Map.Entry<K,V> result;
5487         ReduceEntriesTask<K,V> rights, nextRight;
ReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceEntriesTask<K,V> nextRight, BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)5488         ReduceEntriesTask
5489             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5490              ReduceEntriesTask<K,V> nextRight,
5491              BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5492             super(p, b, i, f, t); this.nextRight = nextRight;
5493             this.reducer = reducer;
5494         }
getRawResult()5495         public final Map.Entry<K,V> getRawResult() { return result; }
compute()5496         public final void compute() {
5497             final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5498             if ((reducer = this.reducer) != null) {
5499                 for (int i = baseIndex, f, h; batch > 0 &&
5500                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5501                     addToPendingCount(1);
5502                     (rights = new ReduceEntriesTask<K,V>
5503                      (this, batch >>>= 1, baseLimit = h, f, tab,
5504                       rights, reducer)).fork();
5505                 }
5506                 Map.Entry<K,V> r = null;
5507                 for (Node<K,V> p; (p = advance()) != null; )
5508                     r = (r == null) ? p : reducer.apply(r, p);
5509                 result = r;
5510                 CountedCompleter<?> c;
5511                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5512                     @SuppressWarnings("unchecked")
5513                     ReduceEntriesTask<K,V>
5514                         t = (ReduceEntriesTask<K,V>)c,
5515                         s = t.rights;
5516                     while (s != null) {
5517                         Map.Entry<K,V> tr, sr;
5518                         if ((sr = s.result) != null)
5519                             t.result = (((tr = t.result) == null) ? sr :
5520                                         reducer.apply(tr, sr));
5521                         s = t.rights = s.nextRight;
5522                     }
5523                 }
5524             }
5525         }
5526     }
5527 
5528     @SuppressWarnings("serial")
5529     static final class MapReduceKeysTask<K,V,U>
5530         extends BulkTask<K,V,U> {
5531         final Function<? super K, ? extends U> transformer;
5532         final BiFunction<? super U, ? super U, ? extends U> reducer;
5533         U result;
5534         MapReduceKeysTask<K,V,U> rights, nextRight;
MapReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysTask<K,V,U> nextRight, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5535         MapReduceKeysTask
5536             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5537              MapReduceKeysTask<K,V,U> nextRight,
5538              Function<? super K, ? extends U> transformer,
5539              BiFunction<? super U, ? super U, ? extends U> reducer) {
5540             super(p, b, i, f, t); this.nextRight = nextRight;
5541             this.transformer = transformer;
5542             this.reducer = reducer;
5543         }
getRawResult()5544         public final U getRawResult() { return result; }
compute()5545         public final void compute() {
5546             final Function<? super K, ? extends U> transformer;
5547             final BiFunction<? super U, ? super U, ? extends U> reducer;
5548             if ((transformer = this.transformer) != null &&
5549                 (reducer = this.reducer) != null) {
5550                 for (int i = baseIndex, f, h; batch > 0 &&
5551                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5552                     addToPendingCount(1);
5553                     (rights = new MapReduceKeysTask<K,V,U>
5554                      (this, batch >>>= 1, baseLimit = h, f, tab,
5555                       rights, transformer, reducer)).fork();
5556                 }
5557                 U r = null;
5558                 for (Node<K,V> p; (p = advance()) != null; ) {
5559                     U u;
5560                     if ((u = transformer.apply(p.key)) != null)
5561                         r = (r == null) ? u : reducer.apply(r, u);
5562                 }
5563                 result = r;
5564                 CountedCompleter<?> c;
5565                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5566                     @SuppressWarnings("unchecked")
5567                     MapReduceKeysTask<K,V,U>
5568                         t = (MapReduceKeysTask<K,V,U>)c,
5569                         s = t.rights;
5570                     while (s != null) {
5571                         U tr, sr;
5572                         if ((sr = s.result) != null)
5573                             t.result = (((tr = t.result) == null) ? sr :
5574                                         reducer.apply(tr, sr));
5575                         s = t.rights = s.nextRight;
5576                     }
5577                 }
5578             }
5579         }
5580     }
5581 
5582     @SuppressWarnings("serial")
5583     static final class MapReduceValuesTask<K,V,U>
5584         extends BulkTask<K,V,U> {
5585         final Function<? super V, ? extends U> transformer;
5586         final BiFunction<? super U, ? super U, ? extends U> reducer;
5587         U result;
5588         MapReduceValuesTask<K,V,U> rights, nextRight;
MapReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesTask<K,V,U> nextRight, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5589         MapReduceValuesTask
5590             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5591              MapReduceValuesTask<K,V,U> nextRight,
5592              Function<? super V, ? extends U> transformer,
5593              BiFunction<? super U, ? super U, ? extends U> reducer) {
5594             super(p, b, i, f, t); this.nextRight = nextRight;
5595             this.transformer = transformer;
5596             this.reducer = reducer;
5597         }
getRawResult()5598         public final U getRawResult() { return result; }
compute()5599         public final void compute() {
5600             final Function<? super V, ? extends U> transformer;
5601             final BiFunction<? super U, ? super U, ? extends U> reducer;
5602             if ((transformer = this.transformer) != null &&
5603                 (reducer = this.reducer) != null) {
5604                 for (int i = baseIndex, f, h; batch > 0 &&
5605                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5606                     addToPendingCount(1);
5607                     (rights = new MapReduceValuesTask<K,V,U>
5608                      (this, batch >>>= 1, baseLimit = h, f, tab,
5609                       rights, transformer, reducer)).fork();
5610                 }
5611                 U r = null;
5612                 for (Node<K,V> p; (p = advance()) != null; ) {
5613                     U u;
5614                     if ((u = transformer.apply(p.val)) != null)
5615                         r = (r == null) ? u : reducer.apply(r, u);
5616                 }
5617                 result = r;
5618                 CountedCompleter<?> c;
5619                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5620                     @SuppressWarnings("unchecked")
5621                     MapReduceValuesTask<K,V,U>
5622                         t = (MapReduceValuesTask<K,V,U>)c,
5623                         s = t.rights;
5624                     while (s != null) {
5625                         U tr, sr;
5626                         if ((sr = s.result) != null)
5627                             t.result = (((tr = t.result) == null) ? sr :
5628                                         reducer.apply(tr, sr));
5629                         s = t.rights = s.nextRight;
5630                     }
5631                 }
5632             }
5633         }
5634     }
5635 
5636     @SuppressWarnings("serial")
5637     static final class MapReduceEntriesTask<K,V,U>
5638         extends BulkTask<K,V,U> {
5639         final Function<Map.Entry<K,V>, ? extends U> transformer;
5640         final BiFunction<? super U, ? super U, ? extends U> reducer;
5641         U result;
5642         MapReduceEntriesTask<K,V,U> rights, nextRight;
MapReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesTask<K,V,U> nextRight, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5643         MapReduceEntriesTask
5644             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5645              MapReduceEntriesTask<K,V,U> nextRight,
5646              Function<Map.Entry<K,V>, ? extends U> transformer,
5647              BiFunction<? super U, ? super U, ? extends U> reducer) {
5648             super(p, b, i, f, t); this.nextRight = nextRight;
5649             this.transformer = transformer;
5650             this.reducer = reducer;
5651         }
getRawResult()5652         public final U getRawResult() { return result; }
compute()5653         public final void compute() {
5654             final Function<Map.Entry<K,V>, ? extends U> transformer;
5655             final BiFunction<? super U, ? super U, ? extends U> reducer;
5656             if ((transformer = this.transformer) != null &&
5657                 (reducer = this.reducer) != null) {
5658                 for (int i = baseIndex, f, h; batch > 0 &&
5659                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5660                     addToPendingCount(1);
5661                     (rights = new MapReduceEntriesTask<K,V,U>
5662                      (this, batch >>>= 1, baseLimit = h, f, tab,
5663                       rights, transformer, reducer)).fork();
5664                 }
5665                 U r = null;
5666                 for (Node<K,V> p; (p = advance()) != null; ) {
5667                     U u;
5668                     if ((u = transformer.apply(p)) != null)
5669                         r = (r == null) ? u : reducer.apply(r, u);
5670                 }
5671                 result = r;
5672                 CountedCompleter<?> c;
5673                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5674                     @SuppressWarnings("unchecked")
5675                     MapReduceEntriesTask<K,V,U>
5676                         t = (MapReduceEntriesTask<K,V,U>)c,
5677                         s = t.rights;
5678                     while (s != null) {
5679                         U tr, sr;
5680                         if ((sr = s.result) != null)
5681                             t.result = (((tr = t.result) == null) ? sr :
5682                                         reducer.apply(tr, sr));
5683                         s = t.rights = s.nextRight;
5684                     }
5685                 }
5686             }
5687         }
5688     }
5689 
5690     @SuppressWarnings("serial")
5691     static final class MapReduceMappingsTask<K,V,U>
5692         extends BulkTask<K,V,U> {
5693         final BiFunction<? super K, ? super V, ? extends U> transformer;
5694         final BiFunction<? super U, ? super U, ? extends U> reducer;
5695         U result;
5696         MapReduceMappingsTask<K,V,U> rights, nextRight;
MapReduceMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsTask<K,V,U> nextRight, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5697         MapReduceMappingsTask
5698             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5699              MapReduceMappingsTask<K,V,U> nextRight,
5700              BiFunction<? super K, ? super V, ? extends U> transformer,
5701              BiFunction<? super U, ? super U, ? extends U> reducer) {
5702             super(p, b, i, f, t); this.nextRight = nextRight;
5703             this.transformer = transformer;
5704             this.reducer = reducer;
5705         }
getRawResult()5706         public final U getRawResult() { return result; }
compute()5707         public final void compute() {
5708             final BiFunction<? super K, ? super V, ? extends U> transformer;
5709             final BiFunction<? super U, ? super U, ? extends U> reducer;
5710             if ((transformer = this.transformer) != null &&
5711                 (reducer = this.reducer) != null) {
5712                 for (int i = baseIndex, f, h; batch > 0 &&
5713                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5714                     addToPendingCount(1);
5715                     (rights = new MapReduceMappingsTask<K,V,U>
5716                      (this, batch >>>= 1, baseLimit = h, f, tab,
5717                       rights, transformer, reducer)).fork();
5718                 }
5719                 U r = null;
5720                 for (Node<K,V> p; (p = advance()) != null; ) {
5721                     U u;
5722                     if ((u = transformer.apply(p.key, p.val)) != null)
5723                         r = (r == null) ? u : reducer.apply(r, u);
5724                 }
5725                 result = r;
5726                 CountedCompleter<?> c;
5727                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5728                     @SuppressWarnings("unchecked")
5729                     MapReduceMappingsTask<K,V,U>
5730                         t = (MapReduceMappingsTask<K,V,U>)c,
5731                         s = t.rights;
5732                     while (s != null) {
5733                         U tr, sr;
5734                         if ((sr = s.result) != null)
5735                             t.result = (((tr = t.result) == null) ? sr :
5736                                         reducer.apply(tr, sr));
5737                         s = t.rights = s.nextRight;
5738                     }
5739                 }
5740             }
5741         }
5742     }
5743 
5744     @SuppressWarnings("serial")
5745     static final class MapReduceKeysToDoubleTask<K,V>
5746         extends BulkTask<K,V,Double> {
5747         final ToDoubleFunction<? super K> transformer;
5748         final DoubleBinaryOperator reducer;
5749         final double basis;
5750         double result;
5751         MapReduceKeysToDoubleTask<K,V> rights, nextRight;
MapReduceKeysToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToDoubleTask<K,V> nextRight, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)5752         MapReduceKeysToDoubleTask
5753             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5754              MapReduceKeysToDoubleTask<K,V> nextRight,
5755              ToDoubleFunction<? super K> transformer,
5756              double basis,
5757              DoubleBinaryOperator reducer) {
5758             super(p, b, i, f, t); this.nextRight = nextRight;
5759             this.transformer = transformer;
5760             this.basis = basis; this.reducer = reducer;
5761         }
getRawResult()5762         public final Double getRawResult() { return result; }
compute()5763         public final void compute() {
5764             final ToDoubleFunction<? super K> transformer;
5765             final DoubleBinaryOperator reducer;
5766             if ((transformer = this.transformer) != null &&
5767                 (reducer = this.reducer) != null) {
5768                 double r = this.basis;
5769                 for (int i = baseIndex, f, h; batch > 0 &&
5770                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5771                     addToPendingCount(1);
5772                     (rights = new MapReduceKeysToDoubleTask<K,V>
5773                      (this, batch >>>= 1, baseLimit = h, f, tab,
5774                       rights, transformer, r, reducer)).fork();
5775                 }
5776                 for (Node<K,V> p; (p = advance()) != null; )
5777                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5778                 result = r;
5779                 CountedCompleter<?> c;
5780                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5781                     @SuppressWarnings("unchecked")
5782                     MapReduceKeysToDoubleTask<K,V>
5783                         t = (MapReduceKeysToDoubleTask<K,V>)c,
5784                         s = t.rights;
5785                     while (s != null) {
5786                         t.result = reducer.applyAsDouble(t.result, s.result);
5787                         s = t.rights = s.nextRight;
5788                     }
5789                 }
5790             }
5791         }
5792     }
5793 
5794     @SuppressWarnings("serial")
5795     static final class MapReduceValuesToDoubleTask<K,V>
5796         extends BulkTask<K,V,Double> {
5797         final ToDoubleFunction<? super V> transformer;
5798         final DoubleBinaryOperator reducer;
5799         final double basis;
5800         double result;
5801         MapReduceValuesToDoubleTask<K,V> rights, nextRight;
MapReduceValuesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToDoubleTask<K,V> nextRight, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)5802         MapReduceValuesToDoubleTask
5803             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5804              MapReduceValuesToDoubleTask<K,V> nextRight,
5805              ToDoubleFunction<? super V> transformer,
5806              double basis,
5807              DoubleBinaryOperator reducer) {
5808             super(p, b, i, f, t); this.nextRight = nextRight;
5809             this.transformer = transformer;
5810             this.basis = basis; this.reducer = reducer;
5811         }
getRawResult()5812         public final Double getRawResult() { return result; }
compute()5813         public final void compute() {
5814             final ToDoubleFunction<? super V> transformer;
5815             final DoubleBinaryOperator reducer;
5816             if ((transformer = this.transformer) != null &&
5817                 (reducer = this.reducer) != null) {
5818                 double r = this.basis;
5819                 for (int i = baseIndex, f, h; batch > 0 &&
5820                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5821                     addToPendingCount(1);
5822                     (rights = new MapReduceValuesToDoubleTask<K,V>
5823                      (this, batch >>>= 1, baseLimit = h, f, tab,
5824                       rights, transformer, r, reducer)).fork();
5825                 }
5826                 for (Node<K,V> p; (p = advance()) != null; )
5827                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5828                 result = r;
5829                 CountedCompleter<?> c;
5830                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5831                     @SuppressWarnings("unchecked")
5832                     MapReduceValuesToDoubleTask<K,V>
5833                         t = (MapReduceValuesToDoubleTask<K,V>)c,
5834                         s = t.rights;
5835                     while (s != null) {
5836                         t.result = reducer.applyAsDouble(t.result, s.result);
5837                         s = t.rights = s.nextRight;
5838                     }
5839                 }
5840             }
5841         }
5842     }
5843 
5844     @SuppressWarnings("serial")
5845     static final class MapReduceEntriesToDoubleTask<K,V>
5846         extends BulkTask<K,V,Double> {
5847         final ToDoubleFunction<Map.Entry<K,V>> transformer;
5848         final DoubleBinaryOperator reducer;
5849         final double basis;
5850         double result;
5851         MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
MapReduceEntriesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToDoubleTask<K,V> nextRight, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)5852         MapReduceEntriesToDoubleTask
5853             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5854              MapReduceEntriesToDoubleTask<K,V> nextRight,
5855              ToDoubleFunction<Map.Entry<K,V>> transformer,
5856              double basis,
5857              DoubleBinaryOperator reducer) {
5858             super(p, b, i, f, t); this.nextRight = nextRight;
5859             this.transformer = transformer;
5860             this.basis = basis; this.reducer = reducer;
5861         }
getRawResult()5862         public final Double getRawResult() { return result; }
compute()5863         public final void compute() {
5864             final ToDoubleFunction<Map.Entry<K,V>> transformer;
5865             final DoubleBinaryOperator reducer;
5866             if ((transformer = this.transformer) != null &&
5867                 (reducer = this.reducer) != null) {
5868                 double r = this.basis;
5869                 for (int i = baseIndex, f, h; batch > 0 &&
5870                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5871                     addToPendingCount(1);
5872                     (rights = new MapReduceEntriesToDoubleTask<K,V>
5873                      (this, batch >>>= 1, baseLimit = h, f, tab,
5874                       rights, transformer, r, reducer)).fork();
5875                 }
5876                 for (Node<K,V> p; (p = advance()) != null; )
5877                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5878                 result = r;
5879                 CountedCompleter<?> c;
5880                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5881                     @SuppressWarnings("unchecked")
5882                     MapReduceEntriesToDoubleTask<K,V>
5883                         t = (MapReduceEntriesToDoubleTask<K,V>)c,
5884                         s = t.rights;
5885                     while (s != null) {
5886                         t.result = reducer.applyAsDouble(t.result, s.result);
5887                         s = t.rights = s.nextRight;
5888                     }
5889                 }
5890             }
5891         }
5892     }
5893 
5894     @SuppressWarnings("serial")
5895     static final class MapReduceMappingsToDoubleTask<K,V>
5896         extends BulkTask<K,V,Double> {
5897         final ToDoubleBiFunction<? super K, ? super V> transformer;
5898         final DoubleBinaryOperator reducer;
5899         final double basis;
5900         double result;
5901         MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
MapReduceMappingsToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToDoubleTask<K,V> nextRight, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)5902         MapReduceMappingsToDoubleTask
5903             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5904              MapReduceMappingsToDoubleTask<K,V> nextRight,
5905              ToDoubleBiFunction<? super K, ? super V> transformer,
5906              double basis,
5907              DoubleBinaryOperator reducer) {
5908             super(p, b, i, f, t); this.nextRight = nextRight;
5909             this.transformer = transformer;
5910             this.basis = basis; this.reducer = reducer;
5911         }
getRawResult()5912         public final Double getRawResult() { return result; }
compute()5913         public final void compute() {
5914             final ToDoubleBiFunction<? super K, ? super V> transformer;
5915             final DoubleBinaryOperator reducer;
5916             if ((transformer = this.transformer) != null &&
5917                 (reducer = this.reducer) != null) {
5918                 double r = this.basis;
5919                 for (int i = baseIndex, f, h; batch > 0 &&
5920                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5921                     addToPendingCount(1);
5922                     (rights = new MapReduceMappingsToDoubleTask<K,V>
5923                      (this, batch >>>= 1, baseLimit = h, f, tab,
5924                       rights, transformer, r, reducer)).fork();
5925                 }
5926                 for (Node<K,V> p; (p = advance()) != null; )
5927                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5928                 result = r;
5929                 CountedCompleter<?> c;
5930                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5931                     @SuppressWarnings("unchecked")
5932                     MapReduceMappingsToDoubleTask<K,V>
5933                         t = (MapReduceMappingsToDoubleTask<K,V>)c,
5934                         s = t.rights;
5935                     while (s != null) {
5936                         t.result = reducer.applyAsDouble(t.result, s.result);
5937                         s = t.rights = s.nextRight;
5938                     }
5939                 }
5940             }
5941         }
5942     }
5943 
5944     @SuppressWarnings("serial")
5945     static final class MapReduceKeysToLongTask<K,V>
5946         extends BulkTask<K,V,Long> {
5947         final ToLongFunction<? super K> transformer;
5948         final LongBinaryOperator reducer;
5949         final long basis;
5950         long result;
5951         MapReduceKeysToLongTask<K,V> rights, nextRight;
MapReduceKeysToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToLongTask<K,V> nextRight, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)5952         MapReduceKeysToLongTask
5953             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5954              MapReduceKeysToLongTask<K,V> nextRight,
5955              ToLongFunction<? super K> transformer,
5956              long basis,
5957              LongBinaryOperator reducer) {
5958             super(p, b, i, f, t); this.nextRight = nextRight;
5959             this.transformer = transformer;
5960             this.basis = basis; this.reducer = reducer;
5961         }
getRawResult()5962         public final Long getRawResult() { return result; }
compute()5963         public final void compute() {
5964             final ToLongFunction<? super K> transformer;
5965             final LongBinaryOperator reducer;
5966             if ((transformer = this.transformer) != null &&
5967                 (reducer = this.reducer) != null) {
5968                 long r = this.basis;
5969                 for (int i = baseIndex, f, h; batch > 0 &&
5970                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5971                     addToPendingCount(1);
5972                     (rights = new MapReduceKeysToLongTask<K,V>
5973                      (this, batch >>>= 1, baseLimit = h, f, tab,
5974                       rights, transformer, r, reducer)).fork();
5975                 }
5976                 for (Node<K,V> p; (p = advance()) != null; )
5977                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5978                 result = r;
5979                 CountedCompleter<?> c;
5980                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5981                     @SuppressWarnings("unchecked")
5982                     MapReduceKeysToLongTask<K,V>
5983                         t = (MapReduceKeysToLongTask<K,V>)c,
5984                         s = t.rights;
5985                     while (s != null) {
5986                         t.result = reducer.applyAsLong(t.result, s.result);
5987                         s = t.rights = s.nextRight;
5988                     }
5989                 }
5990             }
5991         }
5992     }
5993 
5994     @SuppressWarnings("serial")
5995     static final class MapReduceValuesToLongTask<K,V>
5996         extends BulkTask<K,V,Long> {
5997         final ToLongFunction<? super V> transformer;
5998         final LongBinaryOperator reducer;
5999         final long basis;
6000         long result;
6001         MapReduceValuesToLongTask<K,V> rights, nextRight;
MapReduceValuesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToLongTask<K,V> nextRight, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)6002         MapReduceValuesToLongTask
6003             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6004              MapReduceValuesToLongTask<K,V> nextRight,
6005              ToLongFunction<? super V> transformer,
6006              long basis,
6007              LongBinaryOperator reducer) {
6008             super(p, b, i, f, t); this.nextRight = nextRight;
6009             this.transformer = transformer;
6010             this.basis = basis; this.reducer = reducer;
6011         }
getRawResult()6012         public final Long getRawResult() { return result; }
compute()6013         public final void compute() {
6014             final ToLongFunction<? super V> transformer;
6015             final LongBinaryOperator reducer;
6016             if ((transformer = this.transformer) != null &&
6017                 (reducer = this.reducer) != null) {
6018                 long r = this.basis;
6019                 for (int i = baseIndex, f, h; batch > 0 &&
6020                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6021                     addToPendingCount(1);
6022                     (rights = new MapReduceValuesToLongTask<K,V>
6023                      (this, batch >>>= 1, baseLimit = h, f, tab,
6024                       rights, transformer, r, reducer)).fork();
6025                 }
6026                 for (Node<K,V> p; (p = advance()) != null; )
6027                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6028                 result = r;
6029                 CountedCompleter<?> c;
6030                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6031                     @SuppressWarnings("unchecked")
6032                     MapReduceValuesToLongTask<K,V>
6033                         t = (MapReduceValuesToLongTask<K,V>)c,
6034                         s = t.rights;
6035                     while (s != null) {
6036                         t.result = reducer.applyAsLong(t.result, s.result);
6037                         s = t.rights = s.nextRight;
6038                     }
6039                 }
6040             }
6041         }
6042     }
6043 
6044     @SuppressWarnings("serial")
6045     static final class MapReduceEntriesToLongTask<K,V>
6046         extends BulkTask<K,V,Long> {
6047         final ToLongFunction<Map.Entry<K,V>> transformer;
6048         final LongBinaryOperator reducer;
6049         final long basis;
6050         long result;
6051         MapReduceEntriesToLongTask<K,V> rights, nextRight;
MapReduceEntriesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToLongTask<K,V> nextRight, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)6052         MapReduceEntriesToLongTask
6053             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6054              MapReduceEntriesToLongTask<K,V> nextRight,
6055              ToLongFunction<Map.Entry<K,V>> transformer,
6056              long basis,
6057              LongBinaryOperator reducer) {
6058             super(p, b, i, f, t); this.nextRight = nextRight;
6059             this.transformer = transformer;
6060             this.basis = basis; this.reducer = reducer;
6061         }
getRawResult()6062         public final Long getRawResult() { return result; }
compute()6063         public final void compute() {
6064             final ToLongFunction<Map.Entry<K,V>> transformer;
6065             final LongBinaryOperator reducer;
6066             if ((transformer = this.transformer) != null &&
6067                 (reducer = this.reducer) != null) {
6068                 long r = this.basis;
6069                 for (int i = baseIndex, f, h; batch > 0 &&
6070                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6071                     addToPendingCount(1);
6072                     (rights = new MapReduceEntriesToLongTask<K,V>
6073                      (this, batch >>>= 1, baseLimit = h, f, tab,
6074                       rights, transformer, r, reducer)).fork();
6075                 }
6076                 for (Node<K,V> p; (p = advance()) != null; )
6077                     r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6078                 result = r;
6079                 CountedCompleter<?> c;
6080                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6081                     @SuppressWarnings("unchecked")
6082                     MapReduceEntriesToLongTask<K,V>
6083                         t = (MapReduceEntriesToLongTask<K,V>)c,
6084                         s = t.rights;
6085                     while (s != null) {
6086                         t.result = reducer.applyAsLong(t.result, s.result);
6087                         s = t.rights = s.nextRight;
6088                     }
6089                 }
6090             }
6091         }
6092     }
6093 
6094     @SuppressWarnings("serial")
6095     static final class MapReduceMappingsToLongTask<K,V>
6096         extends BulkTask<K,V,Long> {
6097         final ToLongBiFunction<? super K, ? super V> transformer;
6098         final LongBinaryOperator reducer;
6099         final long basis;
6100         long result;
6101         MapReduceMappingsToLongTask<K,V> rights, nextRight;
MapReduceMappingsToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToLongTask<K,V> nextRight, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)6102         MapReduceMappingsToLongTask
6103             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6104              MapReduceMappingsToLongTask<K,V> nextRight,
6105              ToLongBiFunction<? super K, ? super V> transformer,
6106              long basis,
6107              LongBinaryOperator reducer) {
6108             super(p, b, i, f, t); this.nextRight = nextRight;
6109             this.transformer = transformer;
6110             this.basis = basis; this.reducer = reducer;
6111         }
getRawResult()6112         public final Long getRawResult() { return result; }
compute()6113         public final void compute() {
6114             final ToLongBiFunction<? super K, ? super V> transformer;
6115             final LongBinaryOperator reducer;
6116             if ((transformer = this.transformer) != null &&
6117                 (reducer = this.reducer) != null) {
6118                 long r = this.basis;
6119                 for (int i = baseIndex, f, h; batch > 0 &&
6120                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6121                     addToPendingCount(1);
6122                     (rights = new MapReduceMappingsToLongTask<K,V>
6123                      (this, batch >>>= 1, baseLimit = h, f, tab,
6124                       rights, transformer, r, reducer)).fork();
6125                 }
6126                 for (Node<K,V> p; (p = advance()) != null; )
6127                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6128                 result = r;
6129                 CountedCompleter<?> c;
6130                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6131                     @SuppressWarnings("unchecked")
6132                     MapReduceMappingsToLongTask<K,V>
6133                         t = (MapReduceMappingsToLongTask<K,V>)c,
6134                         s = t.rights;
6135                     while (s != null) {
6136                         t.result = reducer.applyAsLong(t.result, s.result);
6137                         s = t.rights = s.nextRight;
6138                     }
6139                 }
6140             }
6141         }
6142     }
6143 
6144     @SuppressWarnings("serial")
6145     static final class MapReduceKeysToIntTask<K,V>
6146         extends BulkTask<K,V,Integer> {
6147         final ToIntFunction<? super K> transformer;
6148         final IntBinaryOperator reducer;
6149         final int basis;
6150         int result;
6151         MapReduceKeysToIntTask<K,V> rights, nextRight;
MapReduceKeysToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToIntTask<K,V> nextRight, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)6152         MapReduceKeysToIntTask
6153             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6154              MapReduceKeysToIntTask<K,V> nextRight,
6155              ToIntFunction<? super K> transformer,
6156              int basis,
6157              IntBinaryOperator reducer) {
6158             super(p, b, i, f, t); this.nextRight = nextRight;
6159             this.transformer = transformer;
6160             this.basis = basis; this.reducer = reducer;
6161         }
getRawResult()6162         public final Integer getRawResult() { return result; }
compute()6163         public final void compute() {
6164             final ToIntFunction<? super K> transformer;
6165             final IntBinaryOperator reducer;
6166             if ((transformer = this.transformer) != null &&
6167                 (reducer = this.reducer) != null) {
6168                 int r = this.basis;
6169                 for (int i = baseIndex, f, h; batch > 0 &&
6170                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6171                     addToPendingCount(1);
6172                     (rights = new MapReduceKeysToIntTask<K,V>
6173                      (this, batch >>>= 1, baseLimit = h, f, tab,
6174                       rights, transformer, r, reducer)).fork();
6175                 }
6176                 for (Node<K,V> p; (p = advance()) != null; )
6177                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6178                 result = r;
6179                 CountedCompleter<?> c;
6180                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6181                     @SuppressWarnings("unchecked")
6182                     MapReduceKeysToIntTask<K,V>
6183                         t = (MapReduceKeysToIntTask<K,V>)c,
6184                         s = t.rights;
6185                     while (s != null) {
6186                         t.result = reducer.applyAsInt(t.result, s.result);
6187                         s = t.rights = s.nextRight;
6188                     }
6189                 }
6190             }
6191         }
6192     }
6193 
6194     @SuppressWarnings("serial")
6195     static final class MapReduceValuesToIntTask<K,V>
6196         extends BulkTask<K,V,Integer> {
6197         final ToIntFunction<? super V> transformer;
6198         final IntBinaryOperator reducer;
6199         final int basis;
6200         int result;
6201         MapReduceValuesToIntTask<K,V> rights, nextRight;
MapReduceValuesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToIntTask<K,V> nextRight, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)6202         MapReduceValuesToIntTask
6203             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6204              MapReduceValuesToIntTask<K,V> nextRight,
6205              ToIntFunction<? super V> transformer,
6206              int basis,
6207              IntBinaryOperator reducer) {
6208             super(p, b, i, f, t); this.nextRight = nextRight;
6209             this.transformer = transformer;
6210             this.basis = basis; this.reducer = reducer;
6211         }
getRawResult()6212         public final Integer getRawResult() { return result; }
compute()6213         public final void compute() {
6214             final ToIntFunction<? super V> transformer;
6215             final IntBinaryOperator reducer;
6216             if ((transformer = this.transformer) != null &&
6217                 (reducer = this.reducer) != null) {
6218                 int r = this.basis;
6219                 for (int i = baseIndex, f, h; batch > 0 &&
6220                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6221                     addToPendingCount(1);
6222                     (rights = new MapReduceValuesToIntTask<K,V>
6223                      (this, batch >>>= 1, baseLimit = h, f, tab,
6224                       rights, transformer, r, reducer)).fork();
6225                 }
6226                 for (Node<K,V> p; (p = advance()) != null; )
6227                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6228                 result = r;
6229                 CountedCompleter<?> c;
6230                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6231                     @SuppressWarnings("unchecked")
6232                     MapReduceValuesToIntTask<K,V>
6233                         t = (MapReduceValuesToIntTask<K,V>)c,
6234                         s = t.rights;
6235                     while (s != null) {
6236                         t.result = reducer.applyAsInt(t.result, s.result);
6237                         s = t.rights = s.nextRight;
6238                     }
6239                 }
6240             }
6241         }
6242     }
6243 
6244     @SuppressWarnings("serial")
6245     static final class MapReduceEntriesToIntTask<K,V>
6246         extends BulkTask<K,V,Integer> {
6247         final ToIntFunction<Map.Entry<K,V>> transformer;
6248         final IntBinaryOperator reducer;
6249         final int basis;
6250         int result;
6251         MapReduceEntriesToIntTask<K,V> rights, nextRight;
MapReduceEntriesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToIntTask<K,V> nextRight, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)6252         MapReduceEntriesToIntTask
6253             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6254              MapReduceEntriesToIntTask<K,V> nextRight,
6255              ToIntFunction<Map.Entry<K,V>> transformer,
6256              int basis,
6257              IntBinaryOperator reducer) {
6258             super(p, b, i, f, t); this.nextRight = nextRight;
6259             this.transformer = transformer;
6260             this.basis = basis; this.reducer = reducer;
6261         }
getRawResult()6262         public final Integer getRawResult() { return result; }
compute()6263         public final void compute() {
6264             final ToIntFunction<Map.Entry<K,V>> transformer;
6265             final IntBinaryOperator reducer;
6266             if ((transformer = this.transformer) != null &&
6267                 (reducer = this.reducer) != null) {
6268                 int r = this.basis;
6269                 for (int i = baseIndex, f, h; batch > 0 &&
6270                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6271                     addToPendingCount(1);
6272                     (rights = new MapReduceEntriesToIntTask<K,V>
6273                      (this, batch >>>= 1, baseLimit = h, f, tab,
6274                       rights, transformer, r, reducer)).fork();
6275                 }
6276                 for (Node<K,V> p; (p = advance()) != null; )
6277                     r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6278                 result = r;
6279                 CountedCompleter<?> c;
6280                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6281                     @SuppressWarnings("unchecked")
6282                     MapReduceEntriesToIntTask<K,V>
6283                         t = (MapReduceEntriesToIntTask<K,V>)c,
6284                         s = t.rights;
6285                     while (s != null) {
6286                         t.result = reducer.applyAsInt(t.result, s.result);
6287                         s = t.rights = s.nextRight;
6288                     }
6289                 }
6290             }
6291         }
6292     }
6293 
6294     @SuppressWarnings("serial")
6295     static final class MapReduceMappingsToIntTask<K,V>
6296         extends BulkTask<K,V,Integer> {
6297         final ToIntBiFunction<? super K, ? super V> transformer;
6298         final IntBinaryOperator reducer;
6299         final int basis;
6300         int result;
6301         MapReduceMappingsToIntTask<K,V> rights, nextRight;
MapReduceMappingsToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToIntTask<K,V> nextRight, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)6302         MapReduceMappingsToIntTask
6303             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6304              MapReduceMappingsToIntTask<K,V> nextRight,
6305              ToIntBiFunction<? super K, ? super V> transformer,
6306              int basis,
6307              IntBinaryOperator reducer) {
6308             super(p, b, i, f, t); this.nextRight = nextRight;
6309             this.transformer = transformer;
6310             this.basis = basis; this.reducer = reducer;
6311         }
getRawResult()6312         public final Integer getRawResult() { return result; }
compute()6313         public final void compute() {
6314             final ToIntBiFunction<? super K, ? super V> transformer;
6315             final IntBinaryOperator reducer;
6316             if ((transformer = this.transformer) != null &&
6317                 (reducer = this.reducer) != null) {
6318                 int r = this.basis;
6319                 for (int i = baseIndex, f, h; batch > 0 &&
6320                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6321                     addToPendingCount(1);
6322                     (rights = new MapReduceMappingsToIntTask<K,V>
6323                      (this, batch >>>= 1, baseLimit = h, f, tab,
6324                       rights, transformer, r, reducer)).fork();
6325                 }
6326                 for (Node<K,V> p; (p = advance()) != null; )
6327                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6328                 result = r;
6329                 CountedCompleter<?> c;
6330                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6331                     @SuppressWarnings("unchecked")
6332                     MapReduceMappingsToIntTask<K,V>
6333                         t = (MapReduceMappingsToIntTask<K,V>)c,
6334                         s = t.rights;
6335                     while (s != null) {
6336                         t.result = reducer.applyAsInt(t.result, s.result);
6337                         s = t.rights = s.nextRight;
6338                     }
6339                 }
6340             }
6341         }
6342     }
6343 
6344     // Unsafe mechanics
6345     private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
6346     private static final long SIZECTL;
6347     private static final long TRANSFERINDEX;
6348     private static final long BASECOUNT;
6349     private static final long CELLSBUSY;
6350     private static final long CELLVALUE;
6351     private static final int ABASE;
6352     private static final int ASHIFT;
6353 
6354     static {
6355         try {
6356             SIZECTL = U.objectFieldOffset
6357                 (ConcurrentHashMap.class.getDeclaredField("sizeCtl"));
6358             TRANSFERINDEX = U.objectFieldOffset
6359                 (ConcurrentHashMap.class.getDeclaredField("transferIndex"));
6360             BASECOUNT = U.objectFieldOffset
6361                 (ConcurrentHashMap.class.getDeclaredField("baseCount"));
6362             CELLSBUSY = U.objectFieldOffset
6363                 (ConcurrentHashMap.class.getDeclaredField("cellsBusy"));
6364 
6365             CELLVALUE = U.objectFieldOffset
6366                 (CounterCell.class.getDeclaredField("value"));
6367 
6368             ABASE = U.arrayBaseOffset(Node[].class);
6369             int scale = U.arrayIndexScale(Node[].class);
6370             if ((scale & (scale - 1)) != 0)
6371                 throw new Error("array index scale not a power of two");
6372             ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6373         } catch (ReflectiveOperationException e) {
6374             throw new Error(e);
6375         }
6376 
6377         // Reduce the risk of rare disastrous classloading in first call to
6378         // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
6379         Class<?> ensureLoaded = LockSupport.class;
6380     }
6381 }
6382