1 /*
2  * Written by Doug Lea, Bill Scherer, and Michael Scott with
3  * assistance from members of JCP JSR-166 Expert Group and released to
4  * the public domain, as explained at
5  * http://creativecommons.org/publicdomain/zero/1.0/
6  */
7 
8 package java.util.concurrent;
9 
10 import java.util.AbstractQueue;
11 import java.util.Collection;
12 import java.util.Collections;
13 import java.util.Iterator;
14 import java.util.Spliterator;
15 import java.util.Spliterators;
16 import java.util.concurrent.locks.LockSupport;
17 import java.util.concurrent.locks.ReentrantLock;
18 
19 // BEGIN android-note
20 // removed link to collections framework docs
21 // END android-note
22 
23 /**
24  * A {@linkplain BlockingQueue blocking queue} in which each insert
25  * operation must wait for a corresponding remove operation by another
26  * thread, and vice versa.  A synchronous queue does not have any
27  * internal capacity, not even a capacity of one.  You cannot
28  * {@code peek} at a synchronous queue because an element is only
29  * present when you try to remove it; you cannot insert an element
30  * (using any method) unless another thread is trying to remove it;
31  * you cannot iterate as there is nothing to iterate.  The
32  * <em>head</em> of the queue is the element that the first queued
33  * inserting thread is trying to add to the queue; if there is no such
34  * queued thread then no element is available for removal and
35  * {@code poll()} will return {@code null}.  For purposes of other
36  * {@code Collection} methods (for example {@code contains}), a
37  * {@code SynchronousQueue} acts as an empty collection.  This queue
38  * does not permit {@code null} elements.
39  *
40  * <p>Synchronous queues are similar to rendezvous channels used in
41  * CSP and Ada. They are well suited for handoff designs, in which an
42  * object running in one thread must sync up with an object running
43  * in another thread in order to hand it some information, event, or
44  * task.
45  *
46  * <p>This class supports an optional fairness policy for ordering
47  * waiting producer and consumer threads.  By default, this ordering
48  * is not guaranteed. However, a queue constructed with fairness set
49  * to {@code true} grants threads access in FIFO order.
50  *
51  * <p>This class and its iterator implement all of the
52  * <em>optional</em> methods of the {@link Collection} and {@link
53  * Iterator} interfaces.
54  *
55  * @since 1.5
56  * @author Doug Lea and Bill Scherer and Michael Scott
57  * @param <E> the type of elements held in this queue
58  */
59 public class SynchronousQueue<E> extends AbstractQueue<E>
60     implements BlockingQueue<E>, java.io.Serializable {
61     private static final long serialVersionUID = -3223113410248163686L;
62 
63     /*
64      * This class implements extensions of the dual stack and dual
65      * queue algorithms described in "Nonblocking Concurrent Objects
66      * with Condition Synchronization", by W. N. Scherer III and
67      * M. L. Scott.  18th Annual Conf. on Distributed Computing,
68      * Oct. 2004 (see also
69      * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/duals.html).
70      * The (Lifo) stack is used for non-fair mode, and the (Fifo)
71      * queue for fair mode. The performance of the two is generally
72      * similar. Fifo usually supports higher throughput under
73      * contention but Lifo maintains higher thread locality in common
74      * applications.
75      *
76      * A dual queue (and similarly stack) is one that at any given
77      * time either holds "data" -- items provided by put operations,
78      * or "requests" -- slots representing take operations, or is
79      * empty. A call to "fulfill" (i.e., a call requesting an item
80      * from a queue holding data or vice versa) dequeues a
81      * complementary node.  The most interesting feature of these
82      * queues is that any operation can figure out which mode the
83      * queue is in, and act accordingly without needing locks.
84      *
85      * Both the queue and stack extend abstract class Transferer
86      * defining the single method transfer that does a put or a
87      * take. These are unified into a single method because in dual
88      * data structures, the put and take operations are symmetrical,
89      * so nearly all code can be combined. The resulting transfer
90      * methods are on the long side, but are easier to follow than
91      * they would be if broken up into nearly-duplicated parts.
92      *
93      * The queue and stack data structures share many conceptual
94      * similarities but very few concrete details. For simplicity,
95      * they are kept distinct so that they can later evolve
96      * separately.
97      *
98      * The algorithms here differ from the versions in the above paper
99      * in extending them for use in synchronous queues, as well as
100      * dealing with cancellation. The main differences include:
101      *
102      *  1. The original algorithms used bit-marked pointers, but
103      *     the ones here use mode bits in nodes, leading to a number
104      *     of further adaptations.
105      *  2. SynchronousQueues must block threads waiting to become
106      *     fulfilled.
107      *  3. Support for cancellation via timeout and interrupts,
108      *     including cleaning out cancelled nodes/threads
109      *     from lists to avoid garbage retention and memory depletion.
110      *
111      * Blocking is mainly accomplished using LockSupport park/unpark,
112      * except that nodes that appear to be the next ones to become
113      * fulfilled first spin a bit (on multiprocessors only). On very
114      * busy synchronous queues, spinning can dramatically improve
115      * throughput. And on less busy ones, the amount of spinning is
116      * small enough not to be noticeable.
117      *
118      * Cleaning is done in different ways in queues vs stacks.  For
119      * queues, we can almost always remove a node immediately in O(1)
120      * time (modulo retries for consistency checks) when it is
121      * cancelled. But if it may be pinned as the current tail, it must
122      * wait until some subsequent cancellation. For stacks, we need a
123      * potentially O(n) traversal to be sure that we can remove the
124      * node, but this can run concurrently with other threads
125      * accessing the stack.
126      *
127      * While garbage collection takes care of most node reclamation
128      * issues that otherwise complicate nonblocking algorithms, care
129      * is taken to "forget" references to data, other nodes, and
130      * threads that might be held on to long-term by blocked
131      * threads. In cases where setting to null would otherwise
132      * conflict with main algorithms, this is done by changing a
133      * node's link to now point to the node itself. This doesn't arise
134      * much for Stack nodes (because blocked threads do not hang on to
135      * old head pointers), but references in Queue nodes must be
136      * aggressively forgotten to avoid reachability of everything any
137      * node has ever referred to since arrival.
138      */
139 
140     /**
141      * Shared internal API for dual stacks and queues.
142      */
143     abstract static class Transferer<E> {
144         /**
145          * Performs a put or take.
146          *
147          * @param e if non-null, the item to be handed to a consumer;
148          *          if null, requests that transfer return an item
149          *          offered by producer.
150          * @param timed if this operation should timeout
151          * @param nanos the timeout, in nanoseconds
152          * @return if non-null, the item provided or received; if null,
153          *         the operation failed due to timeout or interrupt --
154          *         the caller can distinguish which of these occurred
155          *         by checking Thread.interrupted.
156          */
transfer(E e, boolean timed, long nanos)157         abstract E transfer(E e, boolean timed, long nanos);
158     }
159 
160     /**
161      * The number of times to spin before blocking in timed waits.
162      * The value is empirically derived -- it works well across a
163      * variety of processors and OSes. Empirically, the best value
164      * seems not to vary with number of CPUs (beyond 2) so is just
165      * a constant.
166      */
167     static final int MAX_TIMED_SPINS =
168         (Runtime.getRuntime().availableProcessors() < 2) ? 0 : 32;
169 
170     /**
171      * The number of times to spin before blocking in untimed waits.
172      * This is greater than timed value because untimed waits spin
173      * faster since they don't need to check times on each spin.
174      */
175     static final int MAX_UNTIMED_SPINS = MAX_TIMED_SPINS * 16;
176 
177     /**
178      * The number of nanoseconds for which it is faster to spin
179      * rather than to use timed park. A rough estimate suffices.
180      */
181     static final long SPIN_FOR_TIMEOUT_THRESHOLD = 1000L;
182 
183     /** Dual stack */
184     static final class TransferStack<E> extends Transferer<E> {
185         /*
186          * This extends Scherer-Scott dual stack algorithm, differing,
187          * among other ways, by using "covering" nodes rather than
188          * bit-marked pointers: Fulfilling operations push on marker
189          * nodes (with FULFILLING bit set in mode) to reserve a spot
190          * to match a waiting node.
191          */
192 
193         /* Modes for SNodes, ORed together in node fields */
194         /** Node represents an unfulfilled consumer */
195         static final int REQUEST    = 0;
196         /** Node represents an unfulfilled producer */
197         static final int DATA       = 1;
198         /** Node is fulfilling another unfulfilled DATA or REQUEST */
199         static final int FULFILLING = 2;
200 
201         /** Returns true if m has fulfilling bit set. */
isFulfilling(int m)202         static boolean isFulfilling(int m) { return (m & FULFILLING) != 0; }
203 
204         /** Node class for TransferStacks. */
205         static final class SNode {
206             volatile SNode next;        // next node in stack
207             volatile SNode match;       // the node matched to this
208             volatile Thread waiter;     // to control park/unpark
209             Object item;                // data; or null for REQUESTs
210             int mode;
211             // Note: item and mode fields don't need to be volatile
212             // since they are always written before, and read after,
213             // other volatile/atomic operations.
214 
SNode(Object item)215             SNode(Object item) {
216                 this.item = item;
217             }
218 
casNext(SNode cmp, SNode val)219             boolean casNext(SNode cmp, SNode val) {
220                 return cmp == next &&
221                     U.compareAndSwapObject(this, NEXT, cmp, val);
222             }
223 
224             /**
225              * Tries to match node s to this node, if so, waking up thread.
226              * Fulfillers call tryMatch to identify their waiters.
227              * Waiters block until they have been matched.
228              *
229              * @param s the node to match
230              * @return true if successfully matched to s
231              */
tryMatch(SNode s)232             boolean tryMatch(SNode s) {
233                 if (match == null &&
234                     U.compareAndSwapObject(this, MATCH, null, s)) {
235                     Thread w = waiter;
236                     if (w != null) {    // waiters need at most one unpark
237                         waiter = null;
238                         LockSupport.unpark(w);
239                     }
240                     return true;
241                 }
242                 return match == s;
243             }
244 
245             /**
246              * Tries to cancel a wait by matching node to itself.
247              */
tryCancel()248             void tryCancel() {
249                 U.compareAndSwapObject(this, MATCH, null, this);
250             }
251 
isCancelled()252             boolean isCancelled() {
253                 return match == this;
254             }
255 
256             // Unsafe mechanics
257             private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
258             private static final long MATCH;
259             private static final long NEXT;
260 
261             static {
262                 try {
263                     MATCH = U.objectFieldOffset
264                         (SNode.class.getDeclaredField("match"));
265                     NEXT = U.objectFieldOffset
266                         (SNode.class.getDeclaredField("next"));
267                 } catch (ReflectiveOperationException e) {
268                     throw new Error(e);
269                 }
270             }
271         }
272 
273         /** The head (top) of the stack */
274         volatile SNode head;
275 
casHead(SNode h, SNode nh)276         boolean casHead(SNode h, SNode nh) {
277             return h == head &&
278                 U.compareAndSwapObject(this, HEAD, h, nh);
279         }
280 
281         /**
282          * Creates or resets fields of a node. Called only from transfer
283          * where the node to push on stack is lazily created and
284          * reused when possible to help reduce intervals between reads
285          * and CASes of head and to avoid surges of garbage when CASes
286          * to push nodes fail due to contention.
287          */
snode(SNode s, Object e, SNode next, int mode)288         static SNode snode(SNode s, Object e, SNode next, int mode) {
289             if (s == null) s = new SNode(e);
290             s.mode = mode;
291             s.next = next;
292             return s;
293         }
294 
295         /**
296          * Puts or takes an item.
297          */
298         @SuppressWarnings("unchecked")
transfer(E e, boolean timed, long nanos)299         E transfer(E e, boolean timed, long nanos) {
300             /*
301              * Basic algorithm is to loop trying one of three actions:
302              *
303              * 1. If apparently empty or already containing nodes of same
304              *    mode, try to push node on stack and wait for a match,
305              *    returning it, or null if cancelled.
306              *
307              * 2. If apparently containing node of complementary mode,
308              *    try to push a fulfilling node on to stack, match
309              *    with corresponding waiting node, pop both from
310              *    stack, and return matched item. The matching or
311              *    unlinking might not actually be necessary because of
312              *    other threads performing action 3:
313              *
314              * 3. If top of stack already holds another fulfilling node,
315              *    help it out by doing its match and/or pop
316              *    operations, and then continue. The code for helping
317              *    is essentially the same as for fulfilling, except
318              *    that it doesn't return the item.
319              */
320 
321             SNode s = null; // constructed/reused as needed
322             int mode = (e == null) ? REQUEST : DATA;
323 
324             for (;;) {
325                 SNode h = head;
326                 if (h == null || h.mode == mode) {  // empty or same-mode
327                     if (timed && nanos <= 0L) {     // can't wait
328                         if (h != null && h.isCancelled())
329                             casHead(h, h.next);     // pop cancelled node
330                         else
331                             return null;
332                     } else if (casHead(h, s = snode(s, e, h, mode))) {
333                         SNode m = awaitFulfill(s, timed, nanos);
334                         if (m == s) {               // wait was cancelled
335                             clean(s);
336                             return null;
337                         }
338                         if ((h = head) != null && h.next == s)
339                             casHead(h, s.next);     // help s's fulfiller
340                         return (E) ((mode == REQUEST) ? m.item : s.item);
341                     }
342                 } else if (!isFulfilling(h.mode)) { // try to fulfill
343                     if (h.isCancelled())            // already cancelled
344                         casHead(h, h.next);         // pop and retry
345                     else if (casHead(h, s=snode(s, e, h, FULFILLING|mode))) {
346                         for (;;) { // loop until matched or waiters disappear
347                             SNode m = s.next;       // m is s's match
348                             if (m == null) {        // all waiters are gone
349                                 casHead(s, null);   // pop fulfill node
350                                 s = null;           // use new node next time
351                                 break;              // restart main loop
352                             }
353                             SNode mn = m.next;
354                             if (m.tryMatch(s)) {
355                                 casHead(s, mn);     // pop both s and m
356                                 return (E) ((mode == REQUEST) ? m.item : s.item);
357                             } else                  // lost match
358                                 s.casNext(m, mn);   // help unlink
359                         }
360                     }
361                 } else {                            // help a fulfiller
362                     SNode m = h.next;               // m is h's match
363                     if (m == null)                  // waiter is gone
364                         casHead(h, null);           // pop fulfilling node
365                     else {
366                         SNode mn = m.next;
367                         if (m.tryMatch(h))          // help match
368                             casHead(h, mn);         // pop both h and m
369                         else                        // lost match
370                             h.casNext(m, mn);       // help unlink
371                     }
372                 }
373             }
374         }
375 
376         /**
377          * Spins/blocks until node s is matched by a fulfill operation.
378          *
379          * @param s the waiting node
380          * @param timed true if timed wait
381          * @param nanos timeout value
382          * @return matched node, or s if cancelled
383          */
awaitFulfill(SNode s, boolean timed, long nanos)384         SNode awaitFulfill(SNode s, boolean timed, long nanos) {
385             /*
386              * When a node/thread is about to block, it sets its waiter
387              * field and then rechecks state at least one more time
388              * before actually parking, thus covering race vs
389              * fulfiller noticing that waiter is non-null so should be
390              * woken.
391              *
392              * When invoked by nodes that appear at the point of call
393              * to be at the head of the stack, calls to park are
394              * preceded by spins to avoid blocking when producers and
395              * consumers are arriving very close in time.  This can
396              * happen enough to bother only on multiprocessors.
397              *
398              * The order of checks for returning out of main loop
399              * reflects fact that interrupts have precedence over
400              * normal returns, which have precedence over
401              * timeouts. (So, on timeout, one last check for match is
402              * done before giving up.) Except that calls from untimed
403              * SynchronousQueue.{poll/offer} don't check interrupts
404              * and don't wait at all, so are trapped in transfer
405              * method rather than calling awaitFulfill.
406              */
407             final long deadline = timed ? System.nanoTime() + nanos : 0L;
408             Thread w = Thread.currentThread();
409             int spins = shouldSpin(s)
410                 ? (timed ? MAX_TIMED_SPINS : MAX_UNTIMED_SPINS)
411                 : 0;
412             for (;;) {
413                 if (w.isInterrupted())
414                     s.tryCancel();
415                 SNode m = s.match;
416                 if (m != null)
417                     return m;
418                 if (timed) {
419                     nanos = deadline - System.nanoTime();
420                     if (nanos <= 0L) {
421                         s.tryCancel();
422                         continue;
423                     }
424                 }
425                 if (spins > 0)
426                     spins = shouldSpin(s) ? (spins - 1) : 0;
427                 else if (s.waiter == null)
428                     s.waiter = w; // establish waiter so can park next iter
429                 else if (!timed)
430                     LockSupport.park(this);
431                 else if (nanos > SPIN_FOR_TIMEOUT_THRESHOLD)
432                     LockSupport.parkNanos(this, nanos);
433             }
434         }
435 
436         /**
437          * Returns true if node s is at head or there is an active
438          * fulfiller.
439          */
shouldSpin(SNode s)440         boolean shouldSpin(SNode s) {
441             SNode h = head;
442             return (h == s || h == null || isFulfilling(h.mode));
443         }
444 
445         /**
446          * Unlinks s from the stack.
447          */
clean(SNode s)448         void clean(SNode s) {
449             s.item = null;   // forget item
450             s.waiter = null; // forget thread
451 
452             /*
453              * At worst we may need to traverse entire stack to unlink
454              * s. If there are multiple concurrent calls to clean, we
455              * might not see s if another thread has already removed
456              * it. But we can stop when we see any node known to
457              * follow s. We use s.next unless it too is cancelled, in
458              * which case we try the node one past. We don't check any
459              * further because we don't want to doubly traverse just to
460              * find sentinel.
461              */
462 
463             SNode past = s.next;
464             if (past != null && past.isCancelled())
465                 past = past.next;
466 
467             // Absorb cancelled nodes at head
468             SNode p;
469             while ((p = head) != null && p != past && p.isCancelled())
470                 casHead(p, p.next);
471 
472             // Unsplice embedded nodes
473             while (p != null && p != past) {
474                 SNode n = p.next;
475                 if (n != null && n.isCancelled())
476                     p.casNext(n, n.next);
477                 else
478                     p = n;
479             }
480         }
481 
482         // Unsafe mechanics
483         private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
484         private static final long HEAD;
485         static {
486             try {
487                 HEAD = U.objectFieldOffset
488                     (TransferStack.class.getDeclaredField("head"));
489             } catch (ReflectiveOperationException e) {
490                 throw new Error(e);
491             }
492         }
493     }
494 
495     /** Dual Queue */
496     static final class TransferQueue<E> extends Transferer<E> {
497         /*
498          * This extends Scherer-Scott dual queue algorithm, differing,
499          * among other ways, by using modes within nodes rather than
500          * marked pointers. The algorithm is a little simpler than
501          * that for stacks because fulfillers do not need explicit
502          * nodes, and matching is done by CAS'ing QNode.item field
503          * from non-null to null (for put) or vice versa (for take).
504          */
505 
506         /** Node class for TransferQueue. */
507         static final class QNode {
508             volatile QNode next;          // next node in queue
509             volatile Object item;         // CAS'ed to or from null
510             volatile Thread waiter;       // to control park/unpark
511             final boolean isData;
512 
QNode(Object item, boolean isData)513             QNode(Object item, boolean isData) {
514                 this.item = item;
515                 this.isData = isData;
516             }
517 
casNext(QNode cmp, QNode val)518             boolean casNext(QNode cmp, QNode val) {
519                 return next == cmp &&
520                     U.compareAndSwapObject(this, NEXT, cmp, val);
521             }
522 
casItem(Object cmp, Object val)523             boolean casItem(Object cmp, Object val) {
524                 return item == cmp &&
525                     U.compareAndSwapObject(this, ITEM, cmp, val);
526             }
527 
528             /**
529              * Tries to cancel by CAS'ing ref to this as item.
530              */
tryCancel(Object cmp)531             void tryCancel(Object cmp) {
532                 U.compareAndSwapObject(this, ITEM, cmp, this);
533             }
534 
isCancelled()535             boolean isCancelled() {
536                 return item == this;
537             }
538 
539             /**
540              * Returns true if this node is known to be off the queue
541              * because its next pointer has been forgotten due to
542              * an advanceHead operation.
543              */
isOffList()544             boolean isOffList() {
545                 return next == this;
546             }
547 
548             // Unsafe mechanics
549             private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
550             private static final long ITEM;
551             private static final long NEXT;
552 
553             static {
554                 try {
555                     ITEM = U.objectFieldOffset
556                         (QNode.class.getDeclaredField("item"));
557                     NEXT = U.objectFieldOffset
558                         (QNode.class.getDeclaredField("next"));
559                 } catch (ReflectiveOperationException e) {
560                     throw new Error(e);
561                 }
562             }
563         }
564 
565         /** Head of queue */
566         transient volatile QNode head;
567         /** Tail of queue */
568         transient volatile QNode tail;
569         /**
570          * Reference to a cancelled node that might not yet have been
571          * unlinked from queue because it was the last inserted node
572          * when it was cancelled.
573          */
574         transient volatile QNode cleanMe;
575 
TransferQueue()576         TransferQueue() {
577             QNode h = new QNode(null, false); // initialize to dummy node.
578             head = h;
579             tail = h;
580         }
581 
582         /**
583          * Tries to cas nh as new head; if successful, unlink
584          * old head's next node to avoid garbage retention.
585          */
advanceHead(QNode h, QNode nh)586         void advanceHead(QNode h, QNode nh) {
587             if (h == head &&
588                 U.compareAndSwapObject(this, HEAD, h, nh))
589                 h.next = h; // forget old next
590         }
591 
592         /**
593          * Tries to cas nt as new tail.
594          */
advanceTail(QNode t, QNode nt)595         void advanceTail(QNode t, QNode nt) {
596             if (tail == t)
597                 U.compareAndSwapObject(this, TAIL, t, nt);
598         }
599 
600         /**
601          * Tries to CAS cleanMe slot.
602          */
casCleanMe(QNode cmp, QNode val)603         boolean casCleanMe(QNode cmp, QNode val) {
604             return cleanMe == cmp &&
605                 U.compareAndSwapObject(this, CLEANME, cmp, val);
606         }
607 
608         /**
609          * Puts or takes an item.
610          */
611         @SuppressWarnings("unchecked")
transfer(E e, boolean timed, long nanos)612         E transfer(E e, boolean timed, long nanos) {
613             /* Basic algorithm is to loop trying to take either of
614              * two actions:
615              *
616              * 1. If queue apparently empty or holding same-mode nodes,
617              *    try to add node to queue of waiters, wait to be
618              *    fulfilled (or cancelled) and return matching item.
619              *
620              * 2. If queue apparently contains waiting items, and this
621              *    call is of complementary mode, try to fulfill by CAS'ing
622              *    item field of waiting node and dequeuing it, and then
623              *    returning matching item.
624              *
625              * In each case, along the way, check for and try to help
626              * advance head and tail on behalf of other stalled/slow
627              * threads.
628              *
629              * The loop starts off with a null check guarding against
630              * seeing uninitialized head or tail values. This never
631              * happens in current SynchronousQueue, but could if
632              * callers held non-volatile/final ref to the
633              * transferer. The check is here anyway because it places
634              * null checks at top of loop, which is usually faster
635              * than having them implicitly interspersed.
636              */
637 
638             QNode s = null; // constructed/reused as needed
639             boolean isData = (e != null);
640 
641             for (;;) {
642                 QNode t = tail;
643                 QNode h = head;
644                 if (t == null || h == null)         // saw uninitialized value
645                     continue;                       // spin
646 
647                 if (h == t || t.isData == isData) { // empty or same-mode
648                     QNode tn = t.next;
649                     if (t != tail)                  // inconsistent read
650                         continue;
651                     if (tn != null) {               // lagging tail
652                         advanceTail(t, tn);
653                         continue;
654                     }
655                     if (timed && nanos <= 0L)       // can't wait
656                         return null;
657                     if (s == null)
658                         s = new QNode(e, isData);
659                     if (!t.casNext(null, s))        // failed to link in
660                         continue;
661 
662                     advanceTail(t, s);              // swing tail and wait
663                     Object x = awaitFulfill(s, e, timed, nanos);
664                     if (x == s) {                   // wait was cancelled
665                         clean(t, s);
666                         return null;
667                     }
668 
669                     if (!s.isOffList()) {           // not already unlinked
670                         advanceHead(t, s);          // unlink if head
671                         if (x != null)              // and forget fields
672                             s.item = s;
673                         s.waiter = null;
674                     }
675                     return (x != null) ? (E)x : e;
676 
677                 } else {                            // complementary-mode
678                     QNode m = h.next;               // node to fulfill
679                     if (t != tail || m == null || h != head)
680                         continue;                   // inconsistent read
681 
682                     Object x = m.item;
683                     if (isData == (x != null) ||    // m already fulfilled
684                         x == m ||                   // m cancelled
685                         !m.casItem(x, e)) {         // lost CAS
686                         advanceHead(h, m);          // dequeue and retry
687                         continue;
688                     }
689 
690                     advanceHead(h, m);              // successfully fulfilled
691                     LockSupport.unpark(m.waiter);
692                     return (x != null) ? (E)x : e;
693                 }
694             }
695         }
696 
697         /**
698          * Spins/blocks until node s is fulfilled.
699          *
700          * @param s the waiting node
701          * @param e the comparison value for checking match
702          * @param timed true if timed wait
703          * @param nanos timeout value
704          * @return matched item, or s if cancelled
705          */
awaitFulfill(QNode s, E e, boolean timed, long nanos)706         Object awaitFulfill(QNode s, E e, boolean timed, long nanos) {
707             /* Same idea as TransferStack.awaitFulfill */
708             final long deadline = timed ? System.nanoTime() + nanos : 0L;
709             Thread w = Thread.currentThread();
710             int spins = (head.next == s)
711                 ? (timed ? MAX_TIMED_SPINS : MAX_UNTIMED_SPINS)
712                 : 0;
713             for (;;) {
714                 if (w.isInterrupted())
715                     s.tryCancel(e);
716                 Object x = s.item;
717                 if (x != e)
718                     return x;
719                 if (timed) {
720                     nanos = deadline - System.nanoTime();
721                     if (nanos <= 0L) {
722                         s.tryCancel(e);
723                         continue;
724                     }
725                 }
726                 if (spins > 0)
727                     --spins;
728                 else if (s.waiter == null)
729                     s.waiter = w;
730                 else if (!timed)
731                     LockSupport.park(this);
732                 else if (nanos > SPIN_FOR_TIMEOUT_THRESHOLD)
733                     LockSupport.parkNanos(this, nanos);
734             }
735         }
736 
737         /**
738          * Gets rid of cancelled node s with original predecessor pred.
739          */
clean(QNode pred, QNode s)740         void clean(QNode pred, QNode s) {
741             s.waiter = null; // forget thread
742             /*
743              * At any given time, exactly one node on list cannot be
744              * deleted -- the last inserted node. To accommodate this,
745              * if we cannot delete s, we save its predecessor as
746              * "cleanMe", deleting the previously saved version
747              * first. At least one of node s or the node previously
748              * saved can always be deleted, so this always terminates.
749              */
750             while (pred.next == s) { // Return early if already unlinked
751                 QNode h = head;
752                 QNode hn = h.next;   // Absorb cancelled first node as head
753                 if (hn != null && hn.isCancelled()) {
754                     advanceHead(h, hn);
755                     continue;
756                 }
757                 QNode t = tail;      // Ensure consistent read for tail
758                 if (t == h)
759                     return;
760                 QNode tn = t.next;
761                 if (t != tail)
762                     continue;
763                 if (tn != null) {
764                     advanceTail(t, tn);
765                     continue;
766                 }
767                 if (s != t) {        // If not tail, try to unsplice
768                     QNode sn = s.next;
769                     if (sn == s || pred.casNext(s, sn))
770                         return;
771                 }
772                 QNode dp = cleanMe;
773                 if (dp != null) {    // Try unlinking previous cancelled node
774                     QNode d = dp.next;
775                     QNode dn;
776                     if (d == null ||               // d is gone or
777                         d == dp ||                 // d is off list or
778                         !d.isCancelled() ||        // d not cancelled or
779                         (d != t &&                 // d not tail and
780                          (dn = d.next) != null &&  //   has successor
781                          dn != d &&                //   that is on list
782                          dp.casNext(d, dn)))       // d unspliced
783                         casCleanMe(dp, null);
784                     if (dp == pred)
785                         return;      // s is already saved node
786                 } else if (casCleanMe(null, pred))
787                     return;          // Postpone cleaning s
788             }
789         }
790 
791         private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe();
792         private static final long HEAD;
793         private static final long TAIL;
794         private static final long CLEANME;
795         static {
796             try {
797                 HEAD = U.objectFieldOffset
798                     (TransferQueue.class.getDeclaredField("head"));
799                 TAIL = U.objectFieldOffset
800                     (TransferQueue.class.getDeclaredField("tail"));
801                 CLEANME = U.objectFieldOffset
802                     (TransferQueue.class.getDeclaredField("cleanMe"));
803             } catch (ReflectiveOperationException e) {
804                 throw new Error(e);
805             }
806         }
807     }
808 
809     /**
810      * The transferer. Set only in constructor, but cannot be declared
811      * as final without further complicating serialization.  Since
812      * this is accessed only at most once per public method, there
813      * isn't a noticeable performance penalty for using volatile
814      * instead of final here.
815      */
816     private transient volatile Transferer<E> transferer;
817 
818     /**
819      * Creates a {@code SynchronousQueue} with nonfair access policy.
820      */
SynchronousQueue()821     public SynchronousQueue() {
822         this(false);
823     }
824 
825     /**
826      * Creates a {@code SynchronousQueue} with the specified fairness policy.
827      *
828      * @param fair if true, waiting threads contend in FIFO order for
829      *        access; otherwise the order is unspecified.
830      */
SynchronousQueue(boolean fair)831     public SynchronousQueue(boolean fair) {
832         transferer = fair ? new TransferQueue<E>() : new TransferStack<E>();
833     }
834 
835     /**
836      * Adds the specified element to this queue, waiting if necessary for
837      * another thread to receive it.
838      *
839      * @throws InterruptedException {@inheritDoc}
840      * @throws NullPointerException {@inheritDoc}
841      */
put(E e)842     public void put(E e) throws InterruptedException {
843         if (e == null) throw new NullPointerException();
844         if (transferer.transfer(e, false, 0) == null) {
845             Thread.interrupted();
846             throw new InterruptedException();
847         }
848     }
849 
850     /**
851      * Inserts the specified element into this queue, waiting if necessary
852      * up to the specified wait time for another thread to receive it.
853      *
854      * @return {@code true} if successful, or {@code false} if the
855      *         specified waiting time elapses before a consumer appears
856      * @throws InterruptedException {@inheritDoc}
857      * @throws NullPointerException {@inheritDoc}
858      */
offer(E e, long timeout, TimeUnit unit)859     public boolean offer(E e, long timeout, TimeUnit unit)
860         throws InterruptedException {
861         if (e == null) throw new NullPointerException();
862         if (transferer.transfer(e, true, unit.toNanos(timeout)) != null)
863             return true;
864         if (!Thread.interrupted())
865             return false;
866         throw new InterruptedException();
867     }
868 
869     /**
870      * Inserts the specified element into this queue, if another thread is
871      * waiting to receive it.
872      *
873      * @param e the element to add
874      * @return {@code true} if the element was added to this queue, else
875      *         {@code false}
876      * @throws NullPointerException if the specified element is null
877      */
offer(E e)878     public boolean offer(E e) {
879         if (e == null) throw new NullPointerException();
880         return transferer.transfer(e, true, 0) != null;
881     }
882 
883     /**
884      * Retrieves and removes the head of this queue, waiting if necessary
885      * for another thread to insert it.
886      *
887      * @return the head of this queue
888      * @throws InterruptedException {@inheritDoc}
889      */
take()890     public E take() throws InterruptedException {
891         E e = transferer.transfer(null, false, 0);
892         if (e != null)
893             return e;
894         Thread.interrupted();
895         throw new InterruptedException();
896     }
897 
898     /**
899      * Retrieves and removes the head of this queue, waiting
900      * if necessary up to the specified wait time, for another thread
901      * to insert it.
902      *
903      * @return the head of this queue, or {@code null} if the
904      *         specified waiting time elapses before an element is present
905      * @throws InterruptedException {@inheritDoc}
906      */
poll(long timeout, TimeUnit unit)907     public E poll(long timeout, TimeUnit unit) throws InterruptedException {
908         E e = transferer.transfer(null, true, unit.toNanos(timeout));
909         if (e != null || !Thread.interrupted())
910             return e;
911         throw new InterruptedException();
912     }
913 
914     /**
915      * Retrieves and removes the head of this queue, if another thread
916      * is currently making an element available.
917      *
918      * @return the head of this queue, or {@code null} if no
919      *         element is available
920      */
poll()921     public E poll() {
922         return transferer.transfer(null, true, 0);
923     }
924 
925     /**
926      * Always returns {@code true}.
927      * A {@code SynchronousQueue} has no internal capacity.
928      *
929      * @return {@code true}
930      */
isEmpty()931     public boolean isEmpty() {
932         return true;
933     }
934 
935     /**
936      * Always returns zero.
937      * A {@code SynchronousQueue} has no internal capacity.
938      *
939      * @return zero
940      */
size()941     public int size() {
942         return 0;
943     }
944 
945     /**
946      * Always returns zero.
947      * A {@code SynchronousQueue} has no internal capacity.
948      *
949      * @return zero
950      */
remainingCapacity()951     public int remainingCapacity() {
952         return 0;
953     }
954 
955     /**
956      * Does nothing.
957      * A {@code SynchronousQueue} has no internal capacity.
958      */
clear()959     public void clear() {
960     }
961 
962     /**
963      * Always returns {@code false}.
964      * A {@code SynchronousQueue} has no internal capacity.
965      *
966      * @param o the element
967      * @return {@code false}
968      */
contains(Object o)969     public boolean contains(Object o) {
970         return false;
971     }
972 
973     /**
974      * Always returns {@code false}.
975      * A {@code SynchronousQueue} has no internal capacity.
976      *
977      * @param o the element to remove
978      * @return {@code false}
979      */
remove(Object o)980     public boolean remove(Object o) {
981         return false;
982     }
983 
984     /**
985      * Returns {@code false} unless the given collection is empty.
986      * A {@code SynchronousQueue} has no internal capacity.
987      *
988      * @param c the collection
989      * @return {@code false} unless given collection is empty
990      */
containsAll(Collection<?> c)991     public boolean containsAll(Collection<?> c) {
992         return c.isEmpty();
993     }
994 
995     /**
996      * Always returns {@code false}.
997      * A {@code SynchronousQueue} has no internal capacity.
998      *
999      * @param c the collection
1000      * @return {@code false}
1001      */
removeAll(Collection<?> c)1002     public boolean removeAll(Collection<?> c) {
1003         return false;
1004     }
1005 
1006     /**
1007      * Always returns {@code false}.
1008      * A {@code SynchronousQueue} has no internal capacity.
1009      *
1010      * @param c the collection
1011      * @return {@code false}
1012      */
retainAll(Collection<?> c)1013     public boolean retainAll(Collection<?> c) {
1014         return false;
1015     }
1016 
1017     /**
1018      * Always returns {@code null}.
1019      * A {@code SynchronousQueue} does not return elements
1020      * unless actively waited on.
1021      *
1022      * @return {@code null}
1023      */
peek()1024     public E peek() {
1025         return null;
1026     }
1027 
1028     /**
1029      * Returns an empty iterator in which {@code hasNext} always returns
1030      * {@code false}.
1031      *
1032      * @return an empty iterator
1033      */
iterator()1034     public Iterator<E> iterator() {
1035         return Collections.emptyIterator();
1036     }
1037 
1038     /**
1039      * Returns an empty spliterator in which calls to
1040      * {@link java.util.Spliterator#trySplit()} always return {@code null}.
1041      *
1042      * @return an empty spliterator
1043      * @since 1.8
1044      */
spliterator()1045     public Spliterator<E> spliterator() {
1046         return Spliterators.emptySpliterator();
1047     }
1048 
1049     /**
1050      * Returns a zero-length array.
1051      * @return a zero-length array
1052      */
toArray()1053     public Object[] toArray() {
1054         return new Object[0];
1055     }
1056 
1057     /**
1058      * Sets the zeroth element of the specified array to {@code null}
1059      * (if the array has non-zero length) and returns it.
1060      *
1061      * @param a the array
1062      * @return the specified array
1063      * @throws NullPointerException if the specified array is null
1064      */
toArray(T[] a)1065     public <T> T[] toArray(T[] a) {
1066         if (a.length > 0)
1067             a[0] = null;
1068         return a;
1069     }
1070 
1071     /**
1072      * Always returns {@code "[]"}.
1073      * @return {@code "[]"}
1074      */
toString()1075     public String toString() {
1076         return "[]";
1077     }
1078 
1079     /**
1080      * @throws UnsupportedOperationException {@inheritDoc}
1081      * @throws ClassCastException            {@inheritDoc}
1082      * @throws NullPointerException          {@inheritDoc}
1083      * @throws IllegalArgumentException      {@inheritDoc}
1084      */
drainTo(Collection<? super E> c)1085     public int drainTo(Collection<? super E> c) {
1086         if (c == null)
1087             throw new NullPointerException();
1088         if (c == this)
1089             throw new IllegalArgumentException();
1090         int n = 0;
1091         for (E e; (e = poll()) != null;) {
1092             c.add(e);
1093             ++n;
1094         }
1095         return n;
1096     }
1097 
1098     /**
1099      * @throws UnsupportedOperationException {@inheritDoc}
1100      * @throws ClassCastException            {@inheritDoc}
1101      * @throws NullPointerException          {@inheritDoc}
1102      * @throws IllegalArgumentException      {@inheritDoc}
1103      */
drainTo(Collection<? super E> c, int maxElements)1104     public int drainTo(Collection<? super E> c, int maxElements) {
1105         if (c == null)
1106             throw new NullPointerException();
1107         if (c == this)
1108             throw new IllegalArgumentException();
1109         int n = 0;
1110         for (E e; n < maxElements && (e = poll()) != null;) {
1111             c.add(e);
1112             ++n;
1113         }
1114         return n;
1115     }
1116 
1117     /*
1118      * To cope with serialization strategy in the 1.5 version of
1119      * SynchronousQueue, we declare some unused classes and fields
1120      * that exist solely to enable serializability across versions.
1121      * These fields are never used, so are initialized only if this
1122      * object is ever serialized or deserialized.
1123      */
1124 
1125     @SuppressWarnings("serial")
1126     static class WaitQueue implements java.io.Serializable { }
1127     static class LifoWaitQueue extends WaitQueue {
1128         private static final long serialVersionUID = -3633113410248163686L;
1129     }
1130     static class FifoWaitQueue extends WaitQueue {
1131         private static final long serialVersionUID = -3623113410248163686L;
1132     }
1133     private ReentrantLock qlock;
1134     private WaitQueue waitingProducers;
1135     private WaitQueue waitingConsumers;
1136 
1137     /**
1138      * Saves this queue to a stream (that is, serializes it).
1139      * @param s the stream
1140      * @throws java.io.IOException if an I/O error occurs
1141      */
writeObject(java.io.ObjectOutputStream s)1142     private void writeObject(java.io.ObjectOutputStream s)
1143         throws java.io.IOException {
1144         boolean fair = transferer instanceof TransferQueue;
1145         if (fair) {
1146             qlock = new ReentrantLock(true);
1147             waitingProducers = new FifoWaitQueue();
1148             waitingConsumers = new FifoWaitQueue();
1149         }
1150         else {
1151             qlock = new ReentrantLock();
1152             waitingProducers = new LifoWaitQueue();
1153             waitingConsumers = new LifoWaitQueue();
1154         }
1155         s.defaultWriteObject();
1156     }
1157 
1158     /**
1159      * Reconstitutes this queue from a stream (that is, deserializes it).
1160      * @param s the stream
1161      * @throws ClassNotFoundException if the class of a serialized object
1162      *         could not be found
1163      * @throws java.io.IOException if an I/O error occurs
1164      */
readObject(java.io.ObjectInputStream s)1165     private void readObject(java.io.ObjectInputStream s)
1166         throws java.io.IOException, ClassNotFoundException {
1167         s.defaultReadObject();
1168         if (waitingProducers instanceof FifoWaitQueue)
1169             transferer = new TransferQueue<E>();
1170         else
1171             transferer = new TransferStack<E>();
1172     }
1173 
1174     static {
1175         // Reduce the risk of rare disastrous classloading in first call to
1176         // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
1177         Class<?> ensureLoaded = LockSupport.class;
1178     }
1179 }
1180