1 use core::cell::UnsafeCell;
2 use core::sync::atomic::{AtomicPtr, AtomicBool};
3 use core::sync::atomic::Ordering::{self, SeqCst};
4 use alloc::sync::{Arc, Weak};
5 
6 use crate::task::{ArcWake, WakerRef, waker_ref};
7 use super::ReadyToRunQueue;
8 use super::abort::abort;
9 
10 pub(super) struct Task<Fut> {
11     // The future
12     pub(super) future: UnsafeCell<Option<Fut>>,
13 
14     // Next pointer for linked list tracking all active tasks (use
15     // `spin_next_all` to read when access is shared across threads)
16     pub(super) next_all: AtomicPtr<Task<Fut>>,
17 
18     // Previous task in linked list tracking all active tasks
19     pub(super) prev_all: UnsafeCell<*const Task<Fut>>,
20 
21     // Length of the linked list tracking all active tasks when this node was
22     // inserted (use `spin_next_all` to synchronize before reading when access
23     // is shared across threads)
24     pub(super) len_all: UnsafeCell<usize>,
25 
26     // Next pointer in ready to run queue
27     pub(super) next_ready_to_run: AtomicPtr<Task<Fut>>,
28 
29     // Queue that we'll be enqueued to when woken
30     pub(super) ready_to_run_queue: Weak<ReadyToRunQueue<Fut>>,
31 
32     // Whether or not this task is currently in the ready to run queue
33     pub(super) queued: AtomicBool,
34 }
35 
36 // `Task` can be sent across threads safely because it ensures that
37 // the underlying `Fut` type isn't touched from any of its methods.
38 //
39 // The parent (`super`) module is trusted not to access `future`
40 // across different threads.
41 unsafe impl<Fut> Send for Task<Fut> {}
42 unsafe impl<Fut> Sync for Task<Fut> {}
43 
44 impl<Fut> ArcWake for Task<Fut> {
wake_by_ref(arc_self: &Arc<Self>)45     fn wake_by_ref(arc_self: &Arc<Self>) {
46         let inner = match arc_self.ready_to_run_queue.upgrade() {
47             Some(inner) => inner,
48             None => return,
49         };
50 
51         // It's our job to enqueue this task it into the ready to run queue. To
52         // do this we set the `queued` flag, and if successful we then do the
53         // actual queueing operation, ensuring that we're only queued once.
54         //
55         // Once the task is inserted call `wake` to notify the parent task,
56         // as it'll want to come along and run our task later.
57         //
58         // Note that we don't change the reference count of the task here,
59         // we merely enqueue the raw pointer. The `FuturesUnordered`
60         // implementation guarantees that if we set the `queued` flag that
61         // there's a reference count held by the main `FuturesUnordered` queue
62         // still.
63         let prev = arc_self.queued.swap(true, SeqCst);
64         if !prev {
65             inner.enqueue(&**arc_self);
66             inner.waker.wake();
67         }
68     }
69 }
70 
71 impl<Fut> Task<Fut> {
72     /// Returns a waker reference for this task without cloning the Arc.
waker_ref(this: &Arc<Self>) -> WakerRef<'_>73     pub(super) fn waker_ref(this: &Arc<Self>) -> WakerRef<'_> {
74         waker_ref(this)
75     }
76 
77     /// Spins until `next_all` is no longer set to `pending_next_all`.
78     ///
79     /// The temporary `pending_next_all` value is typically overwritten fairly
80     /// quickly after a node is inserted into the list of all futures, so this
81     /// should rarely spin much.
82     ///
83     /// When it returns, the correct `next_all` value is returned.
84     ///
85     /// `Relaxed` or `Acquire` ordering can be used. `Acquire` ordering must be
86     /// used before `len_all` can be safely read.
87     #[inline]
spin_next_all( &self, pending_next_all: *mut Self, ordering: Ordering, ) -> *const Self88     pub(super) fn spin_next_all(
89         &self,
90         pending_next_all: *mut Self,
91         ordering: Ordering,
92     ) -> *const Self {
93         loop {
94             let next = self.next_all.load(ordering);
95             if next != pending_next_all {
96                 return next;
97             }
98         }
99     }
100 }
101 
102 impl<Fut> Drop for Task<Fut> {
drop(&mut self)103     fn drop(&mut self) {
104         // Since `Task<Fut>` is sent across all threads for any lifetime,
105         // regardless of `Fut`, we, to guarantee memory safety, can't actually
106         // touch `Fut` at any time except when we have a reference to the
107         // `FuturesUnordered` itself .
108         //
109         // Consequently it *should* be the case that we always drop futures from
110         // the `FuturesUnordered` instance. This is a bomb, just in case there's
111         // a bug in that logic.
112         unsafe {
113             if (*self.future.get()).is_some() {
114                 abort("future still here when dropping");
115             }
116         }
117     }
118 }
119