1 //! Methods for custom fork-join scopes, created by the [`scope()`]
2 //! function. These are a more flexible alternative to [`join()`].
3 //!
4 //! [`scope()`]: fn.scope.html
5 //! [`join()`]: ../join/join.fn.html
6 
7 use crate::job::{HeapJob, JobFifo};
8 use crate::latch::CountLatch;
9 use crate::registry::{in_worker, Registry, WorkerThread};
10 use crate::unwind;
11 use std::any::Any;
12 use std::fmt;
13 use std::marker::PhantomData;
14 use std::mem;
15 use std::ptr;
16 use std::sync::atomic::{AtomicPtr, Ordering};
17 use std::sync::Arc;
18 
19 #[cfg(test)]
20 mod test;
21 
22 /// Represents a fork-join scope which can be used to spawn any number of tasks.
23 /// See [`scope()`] for more information.
24 ///
25 ///[`scope()`]: fn.scope.html
26 pub struct Scope<'scope> {
27     base: ScopeBase<'scope>,
28 }
29 
30 /// Represents a fork-join scope which can be used to spawn any number of tasks.
31 /// Those spawned from the same thread are prioritized in relative FIFO order.
32 /// See [`scope_fifo()`] for more information.
33 ///
34 ///[`scope_fifo()`]: fn.scope_fifo.html
35 pub struct ScopeFifo<'scope> {
36     base: ScopeBase<'scope>,
37     fifos: Vec<JobFifo>,
38 }
39 
40 struct ScopeBase<'scope> {
41     /// thread where `scope()` was executed (note that individual jobs
42     /// may be executing on different worker threads, though they
43     /// should always be within the same pool of threads)
44     owner_thread_index: usize,
45 
46     /// thread registry where `scope()` was executed.
47     registry: Arc<Registry>,
48 
49     /// if some job panicked, the error is stored here; it will be
50     /// propagated to the one who created the scope
51     panic: AtomicPtr<Box<dyn Any + Send + 'static>>,
52 
53     /// latch to set when the counter drops to zero (and hence this scope is complete)
54     job_completed_latch: CountLatch,
55 
56     /// You can think of a scope as containing a list of closures to execute,
57     /// all of which outlive `'scope`.  They're not actually required to be
58     /// `Sync`, but it's still safe to let the `Scope` implement `Sync` because
59     /// the closures are only *moved* across threads to be executed.
60     marker: PhantomData<Box<dyn FnOnce(&Scope<'scope>) + Send + Sync + 'scope>>,
61 }
62 
63 /// Creates a "fork-join" scope `s` and invokes the closure with a
64 /// reference to `s`. This closure can then spawn asynchronous tasks
65 /// into `s`. Those tasks may run asynchronously with respect to the
66 /// closure; they may themselves spawn additional tasks into `s`. When
67 /// the closure returns, it will block until all tasks that have been
68 /// spawned into `s` complete.
69 ///
70 /// `scope()` is a more flexible building block compared to `join()`,
71 /// since a loop can be used to spawn any number of tasks without
72 /// recursing. However, that flexibility comes at a performance price:
73 /// tasks spawned using `scope()` must be allocated onto the heap,
74 /// whereas `join()` can make exclusive use of the stack. **Prefer
75 /// `join()` (or, even better, parallel iterators) where possible.**
76 ///
77 /// # Example
78 ///
79 /// The Rayon `join()` function launches two closures and waits for them
80 /// to stop. One could implement `join()` using a scope like so, although
81 /// it would be less efficient than the real implementation:
82 ///
83 /// ```rust
84 /// # use rayon_core as rayon;
85 /// pub fn join<A,B,RA,RB>(oper_a: A, oper_b: B) -> (RA, RB)
86 ///     where A: FnOnce() -> RA + Send,
87 ///           B: FnOnce() -> RB + Send,
88 ///           RA: Send,
89 ///           RB: Send,
90 /// {
91 ///     let mut result_a: Option<RA> = None;
92 ///     let mut result_b: Option<RB> = None;
93 ///     rayon::scope(|s| {
94 ///         s.spawn(|_| result_a = Some(oper_a()));
95 ///         s.spawn(|_| result_b = Some(oper_b()));
96 ///     });
97 ///     (result_a.unwrap(), result_b.unwrap())
98 /// }
99 /// ```
100 ///
101 /// # A note on threading
102 ///
103 /// The closure given to `scope()` executes in the Rayon thread-pool,
104 /// as do those given to `spawn()`. This means that you can't access
105 /// thread-local variables (well, you can, but they may have
106 /// unexpected values).
107 ///
108 /// # Task execution
109 ///
110 /// Task execution potentially starts as soon as `spawn()` is called.
111 /// The task will end sometime before `scope()` returns. Note that the
112 /// *closure* given to scope may return much earlier. In general
113 /// the lifetime of a scope created like `scope(body) goes something like this:
114 ///
115 /// - Scope begins when `scope(body)` is called
116 /// - Scope body `body()` is invoked
117 ///     - Scope tasks may be spawned
118 /// - Scope body returns
119 /// - Scope tasks execute, possibly spawning more tasks
120 /// - Once all tasks are done, scope ends and `scope()` returns
121 ///
122 /// To see how and when tasks are joined, consider this example:
123 ///
124 /// ```rust
125 /// # use rayon_core as rayon;
126 /// // point start
127 /// rayon::scope(|s| {
128 ///     s.spawn(|s| { // task s.1
129 ///         s.spawn(|s| { // task s.1.1
130 ///             rayon::scope(|t| {
131 ///                 t.spawn(|_| ()); // task t.1
132 ///                 t.spawn(|_| ()); // task t.2
133 ///             });
134 ///         });
135 ///     });
136 ///     s.spawn(|s| { // task s.2
137 ///     });
138 ///     // point mid
139 /// });
140 /// // point end
141 /// ```
142 ///
143 /// The various tasks that are run will execute roughly like so:
144 ///
145 /// ```notrust
146 /// | (start)
147 /// |
148 /// | (scope `s` created)
149 /// +-----------------------------------------------+ (task s.2)
150 /// +-------+ (task s.1)                            |
151 /// |       |                                       |
152 /// |       +---+ (task s.1.1)                      |
153 /// |       |   |                                   |
154 /// |       |   | (scope `t` created)               |
155 /// |       |   +----------------+ (task t.2)       |
156 /// |       |   +---+ (task t.1) |                  |
157 /// | (mid) |   |   |            |                  |
158 /// :       |   + <-+------------+ (scope `t` ends) |
159 /// :       |   |                                   |
160 /// |<------+---+-----------------------------------+ (scope `s` ends)
161 /// |
162 /// | (end)
163 /// ```
164 ///
165 /// The point here is that everything spawned into scope `s` will
166 /// terminate (at latest) at the same point -- right before the
167 /// original call to `rayon::scope` returns. This includes new
168 /// subtasks created by other subtasks (e.g., task `s.1.1`). If a new
169 /// scope is created (such as `t`), the things spawned into that scope
170 /// will be joined before that scope returns, which in turn occurs
171 /// before the creating task (task `s.1.1` in this case) finishes.
172 ///
173 /// There is no guaranteed order of execution for spawns in a scope,
174 /// given that other threads may steal tasks at any time. However, they
175 /// are generally prioritized in a LIFO order on the thread from which
176 /// they were spawned. So in this example, absent any stealing, we can
177 /// expect `s.2` to execute before `s.1`, and `t.2` before `t.1`. Other
178 /// threads always steal from the other end of the deque, like FIFO
179 /// order.  The idea is that "recent" tasks are most likely to be fresh
180 /// in the local CPU's cache, while other threads can steal older
181 /// "stale" tasks.  For an alternate approach, consider
182 /// [`scope_fifo()`] instead.
183 ///
184 /// [`scope_fifo()`]: fn.scope_fifo.html
185 ///
186 /// # Accessing stack data
187 ///
188 /// In general, spawned tasks may access stack data in place that
189 /// outlives the scope itself. Other data must be fully owned by the
190 /// spawned task.
191 ///
192 /// ```rust
193 /// # use rayon_core as rayon;
194 /// let ok: Vec<i32> = vec![1, 2, 3];
195 /// rayon::scope(|s| {
196 ///     let bad: Vec<i32> = vec![4, 5, 6];
197 ///     s.spawn(|_| {
198 ///         // We can access `ok` because outlives the scope `s`.
199 ///         println!("ok: {:?}", ok);
200 ///
201 ///         // If we just try to use `bad` here, the closure will borrow `bad`
202 ///         // (because we are just printing it out, and that only requires a
203 ///         // borrow), which will result in a compilation error. Read on
204 ///         // for options.
205 ///         // println!("bad: {:?}", bad);
206 ///    });
207 /// });
208 /// ```
209 ///
210 /// As the comments example above suggest, to reference `bad` we must
211 /// take ownership of it. One way to do this is to detach the closure
212 /// from the surrounding stack frame, using the `move` keyword. This
213 /// will cause it to take ownership of *all* the variables it touches,
214 /// in this case including both `ok` *and* `bad`:
215 ///
216 /// ```rust
217 /// # use rayon_core as rayon;
218 /// let ok: Vec<i32> = vec![1, 2, 3];
219 /// rayon::scope(|s| {
220 ///     let bad: Vec<i32> = vec![4, 5, 6];
221 ///     s.spawn(move |_| {
222 ///         println!("ok: {:?}", ok);
223 ///         println!("bad: {:?}", bad);
224 ///     });
225 ///
226 ///     // That closure is fine, but now we can't use `ok` anywhere else,
227 ///     // since it is owend by the previous task:
228 ///     // s.spawn(|_| println!("ok: {:?}", ok));
229 /// });
230 /// ```
231 ///
232 /// While this works, it could be a problem if we want to use `ok` elsewhere.
233 /// There are two choices. We can keep the closure as a `move` closure, but
234 /// instead of referencing the variable `ok`, we create a shadowed variable that
235 /// is a borrow of `ok` and capture *that*:
236 ///
237 /// ```rust
238 /// # use rayon_core as rayon;
239 /// let ok: Vec<i32> = vec![1, 2, 3];
240 /// rayon::scope(|s| {
241 ///     let bad: Vec<i32> = vec![4, 5, 6];
242 ///     let ok: &Vec<i32> = &ok; // shadow the original `ok`
243 ///     s.spawn(move |_| {
244 ///         println!("ok: {:?}", ok); // captures the shadowed version
245 ///         println!("bad: {:?}", bad);
246 ///     });
247 ///
248 ///     // Now we too can use the shadowed `ok`, since `&Vec<i32>` references
249 ///     // can be shared freely. Note that we need a `move` closure here though,
250 ///     // because otherwise we'd be trying to borrow the shadowed `ok`,
251 ///     // and that doesn't outlive `scope`.
252 ///     s.spawn(move |_| println!("ok: {:?}", ok));
253 /// });
254 /// ```
255 ///
256 /// Another option is not to use the `move` keyword but instead to take ownership
257 /// of individual variables:
258 ///
259 /// ```rust
260 /// # use rayon_core as rayon;
261 /// let ok: Vec<i32> = vec![1, 2, 3];
262 /// rayon::scope(|s| {
263 ///     let bad: Vec<i32> = vec![4, 5, 6];
264 ///     s.spawn(|_| {
265 ///         // Transfer ownership of `bad` into a local variable (also named `bad`).
266 ///         // This will force the closure to take ownership of `bad` from the environment.
267 ///         let bad = bad;
268 ///         println!("ok: {:?}", ok); // `ok` is only borrowed.
269 ///         println!("bad: {:?}", bad); // refers to our local variable, above.
270 ///     });
271 ///
272 ///     s.spawn(|_| println!("ok: {:?}", ok)); // we too can borrow `ok`
273 /// });
274 /// ```
275 ///
276 /// # Panics
277 ///
278 /// If a panic occurs, either in the closure given to `scope()` or in
279 /// any of the spawned jobs, that panic will be propagated and the
280 /// call to `scope()` will panic. If multiple panics occurs, it is
281 /// non-deterministic which of their panic values will propagate.
282 /// Regardless, once a task is spawned using `scope.spawn()`, it will
283 /// execute, even if the spawning task should later panic. `scope()`
284 /// returns once all spawned jobs have completed, and any panics are
285 /// propagated at that point.
scope<'scope, OP, R>(op: OP) -> R where OP: FnOnce(&Scope<'scope>) -> R + Send, R: Send,286 pub fn scope<'scope, OP, R>(op: OP) -> R
287 where
288     OP: FnOnce(&Scope<'scope>) -> R + Send,
289     R: Send,
290 {
291     in_worker(|owner_thread, _| {
292         let scope = Scope::<'scope>::new(owner_thread);
293         unsafe { scope.base.complete(owner_thread, || op(&scope)) }
294     })
295 }
296 
297 /// Creates a "fork-join" scope `s` with FIFO order, and invokes the
298 /// closure with a reference to `s`. This closure can then spawn
299 /// asynchronous tasks into `s`. Those tasks may run asynchronously with
300 /// respect to the closure; they may themselves spawn additional tasks
301 /// into `s`. When the closure returns, it will block until all tasks
302 /// that have been spawned into `s` complete.
303 ///
304 /// # Task execution
305 ///
306 /// Tasks in a `scope_fifo()` run similarly to [`scope()`], but there's a
307 /// difference in the order of execution. Consider a similar example:
308 ///
309 /// [`scope()`]: fn.scope.html
310 ///
311 /// ```rust
312 /// # use rayon_core as rayon;
313 /// // point start
314 /// rayon::scope_fifo(|s| {
315 ///     s.spawn_fifo(|s| { // task s.1
316 ///         s.spawn_fifo(|s| { // task s.1.1
317 ///             rayon::scope_fifo(|t| {
318 ///                 t.spawn_fifo(|_| ()); // task t.1
319 ///                 t.spawn_fifo(|_| ()); // task t.2
320 ///             });
321 ///         });
322 ///     });
323 ///     s.spawn_fifo(|s| { // task s.2
324 ///     });
325 ///     // point mid
326 /// });
327 /// // point end
328 /// ```
329 ///
330 /// The various tasks that are run will execute roughly like so:
331 ///
332 /// ```notrust
333 /// | (start)
334 /// |
335 /// | (FIFO scope `s` created)
336 /// +--------------------+ (task s.1)
337 /// +-------+ (task s.2) |
338 /// |       |            +---+ (task s.1.1)
339 /// |       |            |   |
340 /// |       |            |   | (FIFO scope `t` created)
341 /// |       |            |   +----------------+ (task t.1)
342 /// |       |            |   +---+ (task t.2) |
343 /// | (mid) |            |   |   |            |
344 /// :       |            |   + <-+------------+ (scope `t` ends)
345 /// :       |            |   |
346 /// |<------+------------+---+ (scope `s` ends)
347 /// |
348 /// | (end)
349 /// ```
350 ///
351 /// Under `scope_fifo()`, the spawns are prioritized in a FIFO order on
352 /// the thread from which they were spawned, as opposed to `scope()`'s
353 /// LIFO.  So in this example, we can expect `s.1` to execute before
354 /// `s.2`, and `t.1` before `t.2`. Other threads also steal tasks in
355 /// FIFO order, as usual. Overall, this has roughly the same order as
356 /// the now-deprecated [`breadth_first`] option, except the effect is
357 /// isolated to a particular scope. If spawns are intermingled from any
358 /// combination of `scope()` and `scope_fifo()`, or from different
359 /// threads, their order is only specified with respect to spawns in the
360 /// same scope and thread.
361 ///
362 /// For more details on this design, see Rayon [RFC #1].
363 ///
364 /// [`breadth_first`]: struct.ThreadPoolBuilder.html#method.breadth_first
365 /// [RFC #1]: https://github.com/rayon-rs/rfcs/blob/master/accepted/rfc0001-scope-scheduling.md
366 ///
367 /// # Panics
368 ///
369 /// If a panic occurs, either in the closure given to `scope_fifo()` or
370 /// in any of the spawned jobs, that panic will be propagated and the
371 /// call to `scope_fifo()` will panic. If multiple panics occurs, it is
372 /// non-deterministic which of their panic values will propagate.
373 /// Regardless, once a task is spawned using `scope.spawn_fifo()`, it
374 /// will execute, even if the spawning task should later panic.
375 /// `scope_fifo()` returns once all spawned jobs have completed, and any
376 /// panics are propagated at that point.
scope_fifo<'scope, OP, R>(op: OP) -> R where OP: FnOnce(&ScopeFifo<'scope>) -> R + Send, R: Send,377 pub fn scope_fifo<'scope, OP, R>(op: OP) -> R
378 where
379     OP: FnOnce(&ScopeFifo<'scope>) -> R + Send,
380     R: Send,
381 {
382     in_worker(|owner_thread, _| {
383         let scope = ScopeFifo::<'scope>::new(owner_thread);
384         unsafe { scope.base.complete(owner_thread, || op(&scope)) }
385     })
386 }
387 
388 impl<'scope> Scope<'scope> {
new(owner_thread: &WorkerThread) -> Self389     fn new(owner_thread: &WorkerThread) -> Self {
390         Scope {
391             base: ScopeBase::new(owner_thread),
392         }
393     }
394 
395     /// Spawns a job into the fork-join scope `self`. This job will
396     /// execute sometime before the fork-join scope completes.  The
397     /// job is specified as a closure, and this closure receives its
398     /// own reference to the scope `self` as argument. This can be
399     /// used to inject new jobs into `self`.
400     ///
401     /// # Returns
402     ///
403     /// Nothing. The spawned closures cannot pass back values to the
404     /// caller directly, though they can write to local variables on
405     /// the stack (if those variables outlive the scope) or
406     /// communicate through shared channels.
407     ///
408     /// (The intention is to eventualy integrate with Rust futures to
409     /// support spawns of functions that compute a value.)
410     ///
411     /// # Examples
412     ///
413     /// ```rust
414     /// # use rayon_core as rayon;
415     /// let mut value_a = None;
416     /// let mut value_b = None;
417     /// let mut value_c = None;
418     /// rayon::scope(|s| {
419     ///     s.spawn(|s1| {
420     ///           // ^ this is the same scope as `s`; this handle `s1`
421     ///           //   is intended for use by the spawned task,
422     ///           //   since scope handles cannot cross thread boundaries.
423     ///
424     ///         value_a = Some(22);
425     ///
426     ///         // the scope `s` will not end until all these tasks are done
427     ///         s1.spawn(|_| {
428     ///             value_b = Some(44);
429     ///         });
430     ///     });
431     ///
432     ///     s.spawn(|_| {
433     ///         value_c = Some(66);
434     ///     });
435     /// });
436     /// assert_eq!(value_a, Some(22));
437     /// assert_eq!(value_b, Some(44));
438     /// assert_eq!(value_c, Some(66));
439     /// ```
440     ///
441     /// # See also
442     ///
443     /// The [`scope` function] has more extensive documentation about
444     /// task spawning.
445     ///
446     /// [`scope` function]: fn.scope.html
spawn<BODY>(&self, body: BODY) where BODY: FnOnce(&Scope<'scope>) + Send + 'scope,447     pub fn spawn<BODY>(&self, body: BODY)
448     where
449         BODY: FnOnce(&Scope<'scope>) + Send + 'scope,
450     {
451         self.base.increment();
452         unsafe {
453             let job_ref = Box::new(HeapJob::new(move || {
454                 self.base.execute_job(move || body(self))
455             }))
456             .as_job_ref();
457 
458             // Since `Scope` implements `Sync`, we can't be sure that we're still in a
459             // thread of this pool, so we can't just push to the local worker thread.
460             self.base.registry.inject_or_push(job_ref);
461         }
462     }
463 }
464 
465 impl<'scope> ScopeFifo<'scope> {
new(owner_thread: &WorkerThread) -> Self466     fn new(owner_thread: &WorkerThread) -> Self {
467         let num_threads = owner_thread.registry().num_threads();
468         ScopeFifo {
469             base: ScopeBase::new(owner_thread),
470             fifos: (0..num_threads).map(|_| JobFifo::new()).collect(),
471         }
472     }
473 
474     /// Spawns a job into the fork-join scope `self`. This job will
475     /// execute sometime before the fork-join scope completes.  The
476     /// job is specified as a closure, and this closure receives its
477     /// own reference to the scope `self` as argument. This can be
478     /// used to inject new jobs into `self`.
479     ///
480     /// # See also
481     ///
482     /// This method is akin to [`Scope::spawn()`], but with a FIFO
483     /// priority.  The [`scope_fifo` function] has more details about
484     /// this distinction.
485     ///
486     /// [`Scope::spawn()`]: struct.Scope.html#method.spawn
487     /// [`scope_fifo` function]: fn.scope_fifo.html
spawn_fifo<BODY>(&self, body: BODY) where BODY: FnOnce(&ScopeFifo<'scope>) + Send + 'scope,488     pub fn spawn_fifo<BODY>(&self, body: BODY)
489     where
490         BODY: FnOnce(&ScopeFifo<'scope>) + Send + 'scope,
491     {
492         self.base.increment();
493         unsafe {
494             let job_ref = Box::new(HeapJob::new(move || {
495                 self.base.execute_job(move || body(self))
496             }))
497             .as_job_ref();
498 
499             // If we're in the pool, use our scope's private fifo for this thread to execute
500             // in a locally-FIFO order.  Otherwise, just use the pool's global injector.
501             match self.base.registry.current_thread() {
502                 Some(worker) => {
503                     let fifo = &self.fifos[worker.index()];
504                     worker.push(fifo.push(job_ref));
505                 }
506                 None => self.base.registry.inject(&[job_ref]),
507             }
508         }
509     }
510 }
511 
512 impl<'scope> ScopeBase<'scope> {
513     /// Creates the base of a new scope for the given worker thread
new(owner_thread: &WorkerThread) -> Self514     fn new(owner_thread: &WorkerThread) -> Self {
515         ScopeBase {
516             owner_thread_index: owner_thread.index(),
517             registry: owner_thread.registry().clone(),
518             panic: AtomicPtr::new(ptr::null_mut()),
519             job_completed_latch: CountLatch::new(),
520             marker: PhantomData,
521         }
522     }
523 
increment(&self)524     fn increment(&self) {
525         self.job_completed_latch.increment();
526     }
527 
528     /// Executes `func` as a job, either aborting or executing as
529     /// appropriate.
530     ///
531     /// Unsafe because it must be executed on a worker thread.
complete<FUNC, R>(&self, owner_thread: &WorkerThread, func: FUNC) -> R where FUNC: FnOnce() -> R,532     unsafe fn complete<FUNC, R>(&self, owner_thread: &WorkerThread, func: FUNC) -> R
533     where
534         FUNC: FnOnce() -> R,
535     {
536         let result = self.execute_job_closure(func);
537         self.steal_till_jobs_complete(owner_thread);
538         result.unwrap() // only None if `op` panicked, and that would have been propagated
539     }
540 
541     /// Executes `func` as a job, either aborting or executing as
542     /// appropriate.
543     ///
544     /// Unsafe because it must be executed on a worker thread.
execute_job<FUNC>(&self, func: FUNC) where FUNC: FnOnce(),545     unsafe fn execute_job<FUNC>(&self, func: FUNC)
546     where
547         FUNC: FnOnce(),
548     {
549         let _: Option<()> = self.execute_job_closure(func);
550     }
551 
552     /// Executes `func` as a job in scope. Adjusts the "job completed"
553     /// counters and also catches any panic and stores it into
554     /// `scope`.
555     ///
556     /// Unsafe because this must be executed on a worker thread.
execute_job_closure<FUNC, R>(&self, func: FUNC) -> Option<R> where FUNC: FnOnce() -> R,557     unsafe fn execute_job_closure<FUNC, R>(&self, func: FUNC) -> Option<R>
558     where
559         FUNC: FnOnce() -> R,
560     {
561         match unwind::halt_unwinding(func) {
562             Ok(r) => {
563                 self.job_completed_ok();
564                 Some(r)
565             }
566             Err(err) => {
567                 self.job_panicked(err);
568                 None
569             }
570         }
571     }
572 
job_panicked(&self, err: Box<dyn Any + Send + 'static>)573     unsafe fn job_panicked(&self, err: Box<dyn Any + Send + 'static>) {
574         // capture the first error we see, free the rest
575         let nil = ptr::null_mut();
576         let mut err = Box::new(err); // box up the fat ptr
577         if self
578             .panic
579             .compare_exchange(nil, &mut *err, Ordering::Release, Ordering::Relaxed)
580             .is_ok()
581         {
582             mem::forget(err); // ownership now transferred into self.panic
583         }
584 
585         self.job_completed_latch
586             .set_and_tickle_one(&self.registry, self.owner_thread_index);
587     }
588 
job_completed_ok(&self)589     unsafe fn job_completed_ok(&self) {
590         self.job_completed_latch
591             .set_and_tickle_one(&self.registry, self.owner_thread_index);
592     }
593 
steal_till_jobs_complete(&self, owner_thread: &WorkerThread)594     unsafe fn steal_till_jobs_complete(&self, owner_thread: &WorkerThread) {
595         // wait for job counter to reach 0:
596         owner_thread.wait_until(&self.job_completed_latch);
597 
598         // propagate panic, if any occurred; at this point, all
599         // outstanding jobs have completed, so we can use a relaxed
600         // ordering:
601         let panic = self.panic.swap(ptr::null_mut(), Ordering::Relaxed);
602         if !panic.is_null() {
603             let value: Box<Box<dyn Any + Send + 'static>> = mem::transmute(panic);
604             unwind::resume_unwinding(*value);
605         }
606     }
607 }
608 
609 impl<'scope> fmt::Debug for Scope<'scope> {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result610     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
611         fmt.debug_struct("Scope")
612             .field("pool_id", &self.base.registry.id())
613             .field("owner_thread_index", &self.base.owner_thread_index)
614             .field("panic", &self.base.panic)
615             .field("job_completed_latch", &self.base.job_completed_latch)
616             .finish()
617     }
618 }
619 
620 impl<'scope> fmt::Debug for ScopeFifo<'scope> {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result621     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
622         fmt.debug_struct("ScopeFifo")
623             .field("num_fifos", &self.fifos.len())
624             .field("pool_id", &self.base.registry.id())
625             .field("owner_thread_index", &self.base.owner_thread_index)
626             .field("panic", &self.base.panic)
627             .field("job_completed_latch", &self.base.job_completed_latch)
628             .finish()
629     }
630 }
631