1 // Necessary for implementing atomic methods for `AtomicUnit`
2 #![allow(clippy::unit_arg)]
3 #![allow(clippy::let_unit_value)]
4 
5 use crate::primitive::sync::atomic::{self, AtomicBool};
6 use core::cell::UnsafeCell;
7 use core::fmt;
8 use core::mem;
9 use core::sync::atomic::Ordering;
10 
11 #[cfg(not(crossbeam_loom))]
12 use core::ptr;
13 
14 #[cfg(feature = "std")]
15 use std::panic::{RefUnwindSafe, UnwindSafe};
16 
17 #[cfg(not(crossbeam_loom))]
18 use super::seq_lock::SeqLock;
19 
20 /// A thread-safe mutable memory location.
21 ///
22 /// This type is equivalent to [`Cell`], except it can also be shared among multiple threads.
23 ///
24 /// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using
25 /// global locks otherwise. You can call [`AtomicCell::<T>::is_lock_free()`] to check whether
26 /// atomic instructions or locks will be used.
27 ///
28 /// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering.
29 ///
30 /// [`Cell`]: std::cell::Cell
31 /// [`AtomicCell::<T>::is_lock_free()`]: AtomicCell::is_lock_free
32 /// [`Acquire`]: std::sync::atomic::Ordering::Acquire
33 /// [`Release`]: std::sync::atomic::Ordering::Release
34 #[repr(transparent)]
35 pub struct AtomicCell<T: ?Sized> {
36     /// The inner value.
37     ///
38     /// If this value can be transmuted into a primitive atomic type, it will be treated as such.
39     /// Otherwise, all potentially concurrent operations on this data will be protected by a global
40     /// lock.
41     value: UnsafeCell<T>,
42 }
43 
44 unsafe impl<T: Send> Send for AtomicCell<T> {}
45 unsafe impl<T: Send> Sync for AtomicCell<T> {}
46 
47 #[cfg(feature = "std")]
48 impl<T> UnwindSafe for AtomicCell<T> {}
49 #[cfg(feature = "std")]
50 impl<T> RefUnwindSafe for AtomicCell<T> {}
51 
52 impl<T> AtomicCell<T> {
53     /// Creates a new atomic cell initialized with `val`.
54     ///
55     /// # Examples
56     ///
57     /// ```
58     /// use crossbeam_utils::atomic::AtomicCell;
59     ///
60     /// let a = AtomicCell::new(7);
61     /// ```
new(val: T) -> AtomicCell<T>62     pub const fn new(val: T) -> AtomicCell<T> {
63         AtomicCell {
64             value: UnsafeCell::new(val),
65         }
66     }
67 
68     /// Consumes the atomic and returns the contained value.
69     ///
70     /// # Examples
71     ///
72     /// ```
73     /// use crossbeam_utils::atomic::AtomicCell;
74     ///
75     /// let a = AtomicCell::new(7);
76     /// let v = a.into_inner();
77     ///
78     /// assert_eq!(v, 7);
79     /// ```
into_inner(self) -> T80     pub fn into_inner(self) -> T {
81         self.value.into_inner()
82     }
83 
84     /// Returns `true` if operations on values of this type are lock-free.
85     ///
86     /// If the compiler or the platform doesn't support the necessary atomic instructions,
87     /// `AtomicCell<T>` will use global locks for every potentially concurrent atomic operation.
88     ///
89     /// # Examples
90     ///
91     /// ```
92     /// use crossbeam_utils::atomic::AtomicCell;
93     ///
94     /// // This type is internally represented as `AtomicUsize` so we can just use atomic
95     /// // operations provided by it.
96     /// assert_eq!(AtomicCell::<usize>::is_lock_free(), true);
97     ///
98     /// // A wrapper struct around `isize`.
99     /// struct Foo {
100     ///     bar: isize,
101     /// }
102     /// // `AtomicCell<Foo>` will be internally represented as `AtomicIsize`.
103     /// assert_eq!(AtomicCell::<Foo>::is_lock_free(), true);
104     ///
105     /// // Operations on zero-sized types are always lock-free.
106     /// assert_eq!(AtomicCell::<()>::is_lock_free(), true);
107     ///
108     /// // Very large types cannot be represented as any of the standard atomic types, so atomic
109     /// // operations on them will have to use global locks for synchronization.
110     /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false);
111     /// ```
is_lock_free() -> bool112     pub const fn is_lock_free() -> bool {
113         atomic_is_lock_free::<T>()
114     }
115 
116     /// Stores `val` into the atomic cell.
117     ///
118     /// # Examples
119     ///
120     /// ```
121     /// use crossbeam_utils::atomic::AtomicCell;
122     ///
123     /// let a = AtomicCell::new(7);
124     ///
125     /// assert_eq!(a.load(), 7);
126     /// a.store(8);
127     /// assert_eq!(a.load(), 8);
128     /// ```
store(&self, val: T)129     pub fn store(&self, val: T) {
130         if mem::needs_drop::<T>() {
131             drop(self.swap(val));
132         } else {
133             unsafe {
134                 atomic_store(self.value.get(), val);
135             }
136         }
137     }
138 
139     /// Stores `val` into the atomic cell and returns the previous value.
140     ///
141     /// # Examples
142     ///
143     /// ```
144     /// use crossbeam_utils::atomic::AtomicCell;
145     ///
146     /// let a = AtomicCell::new(7);
147     ///
148     /// assert_eq!(a.load(), 7);
149     /// assert_eq!(a.swap(8), 7);
150     /// assert_eq!(a.load(), 8);
151     /// ```
swap(&self, val: T) -> T152     pub fn swap(&self, val: T) -> T {
153         unsafe { atomic_swap(self.value.get(), val) }
154     }
155 }
156 
157 impl<T: ?Sized> AtomicCell<T> {
158     /// Returns a raw pointer to the underlying data in this atomic cell.
159     ///
160     /// # Examples
161     ///
162     /// ```
163     /// use crossbeam_utils::atomic::AtomicCell;
164     ///
165     /// let a = AtomicCell::new(5);
166     ///
167     /// let ptr = a.as_ptr();
168     /// ```
169     #[inline]
as_ptr(&self) -> *mut T170     pub fn as_ptr(&self) -> *mut T {
171         self.value.get()
172     }
173 }
174 
175 impl<T: Default> AtomicCell<T> {
176     /// Takes the value of the atomic cell, leaving `Default::default()` in its place.
177     ///
178     /// # Examples
179     ///
180     /// ```
181     /// use crossbeam_utils::atomic::AtomicCell;
182     ///
183     /// let a = AtomicCell::new(5);
184     /// let five = a.take();
185     ///
186     /// assert_eq!(five, 5);
187     /// assert_eq!(a.into_inner(), 0);
188     /// ```
take(&self) -> T189     pub fn take(&self) -> T {
190         self.swap(Default::default())
191     }
192 }
193 
194 impl<T: Copy> AtomicCell<T> {
195     /// Loads a value from the atomic cell.
196     ///
197     /// # Examples
198     ///
199     /// ```
200     /// use crossbeam_utils::atomic::AtomicCell;
201     ///
202     /// let a = AtomicCell::new(7);
203     ///
204     /// assert_eq!(a.load(), 7);
205     /// ```
load(&self) -> T206     pub fn load(&self) -> T {
207         unsafe { atomic_load(self.value.get()) }
208     }
209 }
210 
211 impl<T: Copy + Eq> AtomicCell<T> {
212     /// If the current value equals `current`, stores `new` into the atomic cell.
213     ///
214     /// The return value is always the previous value. If it is equal to `current`, then the value
215     /// was updated.
216     ///
217     /// # Examples
218     ///
219     /// ```
220     /// # #![allow(deprecated)]
221     /// use crossbeam_utils::atomic::AtomicCell;
222     ///
223     /// let a = AtomicCell::new(1);
224     ///
225     /// assert_eq!(a.compare_and_swap(2, 3), 1);
226     /// assert_eq!(a.load(), 1);
227     ///
228     /// assert_eq!(a.compare_and_swap(1, 2), 1);
229     /// assert_eq!(a.load(), 2);
230     /// ```
231     // TODO: remove in the next major version.
232     #[deprecated(note = "Use `compare_exchange` instead")]
compare_and_swap(&self, current: T, new: T) -> T233     pub fn compare_and_swap(&self, current: T, new: T) -> T {
234         match self.compare_exchange(current, new) {
235             Ok(v) => v,
236             Err(v) => v,
237         }
238     }
239 
240     /// If the current value equals `current`, stores `new` into the atomic cell.
241     ///
242     /// The return value is a result indicating whether the new value was written and containing
243     /// the previous value. On success this value is guaranteed to be equal to `current`.
244     ///
245     /// # Examples
246     ///
247     /// ```
248     /// use crossbeam_utils::atomic::AtomicCell;
249     ///
250     /// let a = AtomicCell::new(1);
251     ///
252     /// assert_eq!(a.compare_exchange(2, 3), Err(1));
253     /// assert_eq!(a.load(), 1);
254     ///
255     /// assert_eq!(a.compare_exchange(1, 2), Ok(1));
256     /// assert_eq!(a.load(), 2);
257     /// ```
compare_exchange(&self, current: T, new: T) -> Result<T, T>258     pub fn compare_exchange(&self, current: T, new: T) -> Result<T, T> {
259         unsafe { atomic_compare_exchange_weak(self.value.get(), current, new) }
260     }
261 }
262 
263 macro_rules! impl_arithmetic {
264     ($t:ty, $example:tt) => {
265         impl AtomicCell<$t> {
266             /// Increments the current value by `val` and returns the previous value.
267             ///
268             /// The addition wraps on overflow.
269             ///
270             /// # Examples
271             ///
272             /// ```
273             /// use crossbeam_utils::atomic::AtomicCell;
274             ///
275             #[doc = $example]
276             ///
277             /// assert_eq!(a.fetch_add(3), 7);
278             /// assert_eq!(a.load(), 10);
279             /// ```
280             #[inline]
281             pub fn fetch_add(&self, val: $t) -> $t {
282                 if can_transmute::<$t, atomic::AtomicUsize>() {
283                     let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
284                     a.fetch_add(val as usize, Ordering::AcqRel) as $t
285                 } else {
286                     let _guard = lock(self.value.get() as usize).write();
287                     let value = unsafe { &mut *(self.value.get()) };
288                     let old = *value;
289                     *value = value.wrapping_add(val);
290                     old
291                 }
292             }
293 
294             /// Decrements the current value by `val` and returns the previous value.
295             ///
296             /// The subtraction wraps on overflow.
297             ///
298             /// # Examples
299             ///
300             /// ```
301             /// use crossbeam_utils::atomic::AtomicCell;
302             ///
303             #[doc = $example]
304             ///
305             /// assert_eq!(a.fetch_sub(3), 7);
306             /// assert_eq!(a.load(), 4);
307             /// ```
308             #[inline]
309             pub fn fetch_sub(&self, val: $t) -> $t {
310                 if can_transmute::<$t, atomic::AtomicUsize>() {
311                     let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
312                     a.fetch_sub(val as usize, Ordering::AcqRel) as $t
313                 } else {
314                     let _guard = lock(self.value.get() as usize).write();
315                     let value = unsafe { &mut *(self.value.get()) };
316                     let old = *value;
317                     *value = value.wrapping_sub(val);
318                     old
319                 }
320             }
321 
322             /// Applies bitwise "and" to the current value and returns the previous value.
323             ///
324             /// # Examples
325             ///
326             /// ```
327             /// use crossbeam_utils::atomic::AtomicCell;
328             ///
329             #[doc = $example]
330             ///
331             /// assert_eq!(a.fetch_and(3), 7);
332             /// assert_eq!(a.load(), 3);
333             /// ```
334             #[inline]
335             pub fn fetch_and(&self, val: $t) -> $t {
336                 if can_transmute::<$t, atomic::AtomicUsize>() {
337                     let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
338                     a.fetch_and(val as usize, Ordering::AcqRel) as $t
339                 } else {
340                     let _guard = lock(self.value.get() as usize).write();
341                     let value = unsafe { &mut *(self.value.get()) };
342                     let old = *value;
343                     *value &= val;
344                     old
345                 }
346             }
347 
348             /// Applies bitwise "or" to the current value and returns the previous value.
349             ///
350             /// # Examples
351             ///
352             /// ```
353             /// use crossbeam_utils::atomic::AtomicCell;
354             ///
355             #[doc = $example]
356             ///
357             /// assert_eq!(a.fetch_or(16), 7);
358             /// assert_eq!(a.load(), 23);
359             /// ```
360             #[inline]
361             pub fn fetch_or(&self, val: $t) -> $t {
362                 if can_transmute::<$t, atomic::AtomicUsize>() {
363                     let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
364                     a.fetch_or(val as usize, Ordering::AcqRel) as $t
365                 } else {
366                     let _guard = lock(self.value.get() as usize).write();
367                     let value = unsafe { &mut *(self.value.get()) };
368                     let old = *value;
369                     *value |= val;
370                     old
371                 }
372             }
373 
374             /// Applies bitwise "xor" to the current value and returns the previous value.
375             ///
376             /// # Examples
377             ///
378             /// ```
379             /// use crossbeam_utils::atomic::AtomicCell;
380             ///
381             #[doc = $example]
382             ///
383             /// assert_eq!(a.fetch_xor(2), 7);
384             /// assert_eq!(a.load(), 5);
385             /// ```
386             #[inline]
387             pub fn fetch_xor(&self, val: $t) -> $t {
388                 if can_transmute::<$t, atomic::AtomicUsize>() {
389                     let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
390                     a.fetch_xor(val as usize, Ordering::AcqRel) as $t
391                 } else {
392                     let _guard = lock(self.value.get() as usize).write();
393                     let value = unsafe { &mut *(self.value.get()) };
394                     let old = *value;
395                     *value ^= val;
396                     old
397                 }
398             }
399         }
400     };
401     ($t:ty, $atomic:ty, $example:tt) => {
402         impl AtomicCell<$t> {
403             /// Increments the current value by `val` and returns the previous value.
404             ///
405             /// The addition wraps on overflow.
406             ///
407             /// # Examples
408             ///
409             /// ```
410             /// use crossbeam_utils::atomic::AtomicCell;
411             ///
412             #[doc = $example]
413             ///
414             /// assert_eq!(a.fetch_add(3), 7);
415             /// assert_eq!(a.load(), 10);
416             /// ```
417             #[inline]
418             pub fn fetch_add(&self, val: $t) -> $t {
419                 let a = unsafe { &*(self.value.get() as *const $atomic) };
420                 a.fetch_add(val, Ordering::AcqRel)
421             }
422 
423             /// Decrements the current value by `val` and returns the previous value.
424             ///
425             /// The subtraction wraps on overflow.
426             ///
427             /// # Examples
428             ///
429             /// ```
430             /// use crossbeam_utils::atomic::AtomicCell;
431             ///
432             #[doc = $example]
433             ///
434             /// assert_eq!(a.fetch_sub(3), 7);
435             /// assert_eq!(a.load(), 4);
436             /// ```
437             #[inline]
438             pub fn fetch_sub(&self, val: $t) -> $t {
439                 let a = unsafe { &*(self.value.get() as *const $atomic) };
440                 a.fetch_sub(val, Ordering::AcqRel)
441             }
442 
443             /// Applies bitwise "and" to the current value and returns the previous value.
444             ///
445             /// # Examples
446             ///
447             /// ```
448             /// use crossbeam_utils::atomic::AtomicCell;
449             ///
450             #[doc = $example]
451             ///
452             /// assert_eq!(a.fetch_and(3), 7);
453             /// assert_eq!(a.load(), 3);
454             /// ```
455             #[inline]
456             pub fn fetch_and(&self, val: $t) -> $t {
457                 let a = unsafe { &*(self.value.get() as *const $atomic) };
458                 a.fetch_and(val, Ordering::AcqRel)
459             }
460 
461             /// Applies bitwise "or" to the current value and returns the previous value.
462             ///
463             /// # Examples
464             ///
465             /// ```
466             /// use crossbeam_utils::atomic::AtomicCell;
467             ///
468             #[doc = $example]
469             ///
470             /// assert_eq!(a.fetch_or(16), 7);
471             /// assert_eq!(a.load(), 23);
472             /// ```
473             #[inline]
474             pub fn fetch_or(&self, val: $t) -> $t {
475                 let a = unsafe { &*(self.value.get() as *const $atomic) };
476                 a.fetch_or(val, Ordering::AcqRel)
477             }
478 
479             /// Applies bitwise "xor" to the current value and returns the previous value.
480             ///
481             /// # Examples
482             ///
483             /// ```
484             /// use crossbeam_utils::atomic::AtomicCell;
485             ///
486             #[doc = $example]
487             ///
488             /// assert_eq!(a.fetch_xor(2), 7);
489             /// assert_eq!(a.load(), 5);
490             /// ```
491             #[inline]
492             pub fn fetch_xor(&self, val: $t) -> $t {
493                 let a = unsafe { &*(self.value.get() as *const $atomic) };
494                 a.fetch_xor(val, Ordering::AcqRel)
495             }
496         }
497     };
498 }
499 
500 #[cfg(has_atomic_u8)]
501 impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
502 #[cfg(all(has_atomic_u8, not(crossbeam_loom)))]
503 impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
504 #[cfg(has_atomic_u16)]
505 impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
506 #[cfg(all(has_atomic_u16, not(crossbeam_loom)))]
507 impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
508 #[cfg(has_atomic_u32)]
509 impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
510 #[cfg(all(has_atomic_u32, not(crossbeam_loom)))]
511 impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
512 #[cfg(has_atomic_u64)]
513 impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
514 #[cfg(all(has_atomic_u64, not(crossbeam_loom)))]
515 impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
516 #[cfg(all(has_atomic_u128, not(crossbeam_loom)))]
517 impl_arithmetic!(u128, atomic::AtomicU128, "let a = AtomicCell::new(7u128);");
518 #[cfg(all(has_atomic_u128, not(crossbeam_loom)))]
519 impl_arithmetic!(i128, atomic::AtomicI128, "let  a = AtomicCell::new(7i128);");
520 
521 impl_arithmetic!(
522     usize,
523     atomic::AtomicUsize,
524     "let a = AtomicCell::new(7usize);"
525 );
526 #[cfg(not(crossbeam_loom))]
527 impl_arithmetic!(
528     isize,
529     atomic::AtomicIsize,
530     "let a = AtomicCell::new(7isize);"
531 );
532 
533 impl AtomicCell<bool> {
534     /// Applies logical "and" to the current value and returns the previous value.
535     ///
536     /// # Examples
537     ///
538     /// ```
539     /// use crossbeam_utils::atomic::AtomicCell;
540     ///
541     /// let a = AtomicCell::new(true);
542     ///
543     /// assert_eq!(a.fetch_and(true), true);
544     /// assert_eq!(a.load(), true);
545     ///
546     /// assert_eq!(a.fetch_and(false), true);
547     /// assert_eq!(a.load(), false);
548     /// ```
549     #[inline]
fetch_and(&self, val: bool) -> bool550     pub fn fetch_and(&self, val: bool) -> bool {
551         let a = unsafe { &*(self.value.get() as *const AtomicBool) };
552         a.fetch_and(val, Ordering::AcqRel)
553     }
554 
555     /// Applies logical "or" to the current value and returns the previous value.
556     ///
557     /// # Examples
558     ///
559     /// ```
560     /// use crossbeam_utils::atomic::AtomicCell;
561     ///
562     /// let a = AtomicCell::new(false);
563     ///
564     /// assert_eq!(a.fetch_or(false), false);
565     /// assert_eq!(a.load(), false);
566     ///
567     /// assert_eq!(a.fetch_or(true), false);
568     /// assert_eq!(a.load(), true);
569     /// ```
570     #[inline]
fetch_or(&self, val: bool) -> bool571     pub fn fetch_or(&self, val: bool) -> bool {
572         let a = unsafe { &*(self.value.get() as *const AtomicBool) };
573         a.fetch_or(val, Ordering::AcqRel)
574     }
575 
576     /// Applies logical "xor" to the current value and returns the previous value.
577     ///
578     /// # Examples
579     ///
580     /// ```
581     /// use crossbeam_utils::atomic::AtomicCell;
582     ///
583     /// let a = AtomicCell::new(true);
584     ///
585     /// assert_eq!(a.fetch_xor(false), true);
586     /// assert_eq!(a.load(), true);
587     ///
588     /// assert_eq!(a.fetch_xor(true), true);
589     /// assert_eq!(a.load(), false);
590     /// ```
591     #[inline]
fetch_xor(&self, val: bool) -> bool592     pub fn fetch_xor(&self, val: bool) -> bool {
593         let a = unsafe { &*(self.value.get() as *const AtomicBool) };
594         a.fetch_xor(val, Ordering::AcqRel)
595     }
596 }
597 
598 impl<T: Default> Default for AtomicCell<T> {
default() -> AtomicCell<T>599     fn default() -> AtomicCell<T> {
600         AtomicCell::new(T::default())
601     }
602 }
603 
604 impl<T> From<T> for AtomicCell<T> {
605     #[inline]
from(val: T) -> AtomicCell<T>606     fn from(val: T) -> AtomicCell<T> {
607         AtomicCell::new(val)
608     }
609 }
610 
611 impl<T: Copy + fmt::Debug> fmt::Debug for AtomicCell<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result612     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
613         f.debug_struct("AtomicCell")
614             .field("value", &self.load())
615             .finish()
616     }
617 }
618 
619 /// Returns `true` if values of type `A` can be transmuted into values of type `B`.
can_transmute<A, B>() -> bool620 const fn can_transmute<A, B>() -> bool {
621     // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`.
622     (mem::size_of::<A>() == mem::size_of::<B>()) & (mem::align_of::<A>() >= mem::align_of::<B>())
623 }
624 
625 /// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`.
626 ///
627 /// This function is used to protect atomic data which doesn't fit into any of the primitive atomic
628 /// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock.
629 ///
630 /// However, there is not only one global lock but an array of many locks, and one of them is
631 /// picked based on the given address. Having many locks reduces contention and improves
632 /// scalability.
633 #[inline]
634 #[must_use]
635 #[cfg(not(crossbeam_loom))]
lock(addr: usize) -> &'static SeqLock636 fn lock(addr: usize) -> &'static SeqLock {
637     // The number of locks is a prime number because we want to make sure `addr % LEN` gets
638     // dispersed across all locks.
639     //
640     // Note that addresses are always aligned to some power of 2, depending on type `T` in
641     // `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number,
642     // too, which means only half of the locks would get utilized!
643     //
644     // It is also possible for addresses to accidentally get aligned to a number that is not a
645     // power of 2. Consider this example:
646     //
647     // ```
648     // #[repr(C)]
649     // struct Foo {
650     //     a: AtomicCell<u8>,
651     //     b: u8,
652     //     c: u8,
653     // }
654     // ```
655     //
656     // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
657     // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
658     // In order to protect from such cases, we simply choose a large prime number for `LEN`.
659     const LEN: usize = 97;
660 
661     static LOCKS: [SeqLock; LEN] = [
662         SeqLock::new(),
663         SeqLock::new(),
664         SeqLock::new(),
665         SeqLock::new(),
666         SeqLock::new(),
667         SeqLock::new(),
668         SeqLock::new(),
669         SeqLock::new(),
670         SeqLock::new(),
671         SeqLock::new(),
672         SeqLock::new(),
673         SeqLock::new(),
674         SeqLock::new(),
675         SeqLock::new(),
676         SeqLock::new(),
677         SeqLock::new(),
678         SeqLock::new(),
679         SeqLock::new(),
680         SeqLock::new(),
681         SeqLock::new(),
682         SeqLock::new(),
683         SeqLock::new(),
684         SeqLock::new(),
685         SeqLock::new(),
686         SeqLock::new(),
687         SeqLock::new(),
688         SeqLock::new(),
689         SeqLock::new(),
690         SeqLock::new(),
691         SeqLock::new(),
692         SeqLock::new(),
693         SeqLock::new(),
694         SeqLock::new(),
695         SeqLock::new(),
696         SeqLock::new(),
697         SeqLock::new(),
698         SeqLock::new(),
699         SeqLock::new(),
700         SeqLock::new(),
701         SeqLock::new(),
702         SeqLock::new(),
703         SeqLock::new(),
704         SeqLock::new(),
705         SeqLock::new(),
706         SeqLock::new(),
707         SeqLock::new(),
708         SeqLock::new(),
709         SeqLock::new(),
710         SeqLock::new(),
711         SeqLock::new(),
712         SeqLock::new(),
713         SeqLock::new(),
714         SeqLock::new(),
715         SeqLock::new(),
716         SeqLock::new(),
717         SeqLock::new(),
718         SeqLock::new(),
719         SeqLock::new(),
720         SeqLock::new(),
721         SeqLock::new(),
722         SeqLock::new(),
723         SeqLock::new(),
724         SeqLock::new(),
725         SeqLock::new(),
726         SeqLock::new(),
727         SeqLock::new(),
728         SeqLock::new(),
729         SeqLock::new(),
730         SeqLock::new(),
731         SeqLock::new(),
732         SeqLock::new(),
733         SeqLock::new(),
734         SeqLock::new(),
735         SeqLock::new(),
736         SeqLock::new(),
737         SeqLock::new(),
738         SeqLock::new(),
739         SeqLock::new(),
740         SeqLock::new(),
741         SeqLock::new(),
742         SeqLock::new(),
743         SeqLock::new(),
744         SeqLock::new(),
745         SeqLock::new(),
746         SeqLock::new(),
747         SeqLock::new(),
748         SeqLock::new(),
749         SeqLock::new(),
750         SeqLock::new(),
751         SeqLock::new(),
752         SeqLock::new(),
753         SeqLock::new(),
754         SeqLock::new(),
755         SeqLock::new(),
756         SeqLock::new(),
757         SeqLock::new(),
758         SeqLock::new(),
759     ];
760 
761     // If the modulus is a constant number, the compiler will use crazy math to transform this into
762     // a sequence of cheap arithmetic operations rather than using the slow modulo instruction.
763     &LOCKS[addr % LEN]
764 }
765 
766 /// An atomic `()`.
767 ///
768 /// All operations are noops.
769 struct AtomicUnit;
770 
771 impl AtomicUnit {
772     #[inline]
load(&self, _order: Ordering)773     fn load(&self, _order: Ordering) {}
774 
775     #[inline]
store(&self, _val: (), _order: Ordering)776     fn store(&self, _val: (), _order: Ordering) {}
777 
778     #[inline]
swap(&self, _val: (), _order: Ordering)779     fn swap(&self, _val: (), _order: Ordering) {}
780 
781     #[allow(clippy::unnecessary_wraps)] // This is intentional.
782     #[inline]
compare_exchange_weak( &self, _current: (), _new: (), _success: Ordering, _failure: Ordering, ) -> Result<(), ()>783     fn compare_exchange_weak(
784         &self,
785         _current: (),
786         _new: (),
787         _success: Ordering,
788         _failure: Ordering,
789     ) -> Result<(), ()> {
790         Ok(())
791     }
792 }
793 
794 macro_rules! atomic {
795     // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
796     // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
797     (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
798         if can_transmute::<$t, $atomic>() {
799             let $a: &$atomic;
800             break $atomic_op;
801         }
802     };
803 
804     // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
805     // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
806     // `$fallback_op`.
807     ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
808         loop {
809             atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
810             atomic!(@check, $t, atomic::AtomicUsize, $a, $atomic_op);
811 
812             #[cfg(has_atomic_u8)]
813             atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
814             #[cfg(has_atomic_u16)]
815             atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
816             #[cfg(has_atomic_u32)]
817             atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
818             #[cfg(has_atomic_u64)]
819             atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
820             #[cfg(has_atomic_u128)]
821             atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
822 
823             #[cfg(crossbeam_loom)]
824             unimplemented!("loom does not support non-atomic atomic ops");
825             #[cfg(not(crossbeam_loom))]
826             break $fallback_op;
827         }
828     };
829 }
830 
831 /// Returns `true` if operations on `AtomicCell<T>` are lock-free.
atomic_is_lock_free<T>() -> bool832 const fn atomic_is_lock_free<T>() -> bool {
833     // HACK(taiki-e): This is equivalent to `atomic! { T, _a, true, false }`, but can be used in const fn even in Rust 1.36.
834     let is_lock_free = can_transmute::<T, AtomicUnit>() | can_transmute::<T, atomic::AtomicUsize>();
835     #[cfg(has_atomic_u8)]
836     let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU8>();
837     #[cfg(has_atomic_u16)]
838     let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU16>();
839     #[cfg(has_atomic_u32)]
840     let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU32>();
841     #[cfg(has_atomic_u64)]
842     let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU64>();
843     #[cfg(has_atomic_u128)]
844     let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU128>();
845     is_lock_free
846 }
847 
848 /// Atomically reads data from `src`.
849 ///
850 /// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a
851 /// global lock otherwise.
atomic_load<T>(src: *mut T) -> T where T: Copy,852 unsafe fn atomic_load<T>(src: *mut T) -> T
853 where
854     T: Copy,
855 {
856     atomic! {
857         T, a,
858         {
859             a = &*(src as *const _ as *const _);
860             mem::transmute_copy(&a.load(Ordering::Acquire))
861         },
862         {
863             let lock = lock(src as usize);
864 
865             // Try doing an optimistic read first.
866             if let Some(stamp) = lock.optimistic_read() {
867                 // We need a volatile read here because other threads might concurrently modify the
868                 // value. In theory, data races are *always* UB, even if we use volatile reads and
869                 // discard the data when a data race is detected. The proper solution would be to
870                 // do atomic reads and atomic writes, but we can't atomically read and write all
871                 // kinds of data since `AtomicU8` is not available on stable Rust yet.
872                 let val = ptr::read_volatile(src);
873 
874                 if lock.validate_read(stamp) {
875                     return val;
876                 }
877             }
878 
879             // Grab a regular write lock so that writers don't starve this load.
880             let guard = lock.write();
881             let val = ptr::read(src);
882             // The value hasn't been changed. Drop the guard without incrementing the stamp.
883             guard.abort();
884             val
885         }
886     }
887 }
888 
889 /// Atomically writes `val` to `dst`.
890 ///
891 /// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a
892 /// global lock otherwise.
atomic_store<T>(dst: *mut T, val: T)893 unsafe fn atomic_store<T>(dst: *mut T, val: T) {
894     atomic! {
895         T, a,
896         {
897             a = &*(dst as *const _ as *const _);
898             a.store(mem::transmute_copy(&val), Ordering::Release);
899             mem::forget(val);
900         },
901         {
902             let _guard = lock(dst as usize).write();
903             ptr::write(dst, val);
904         }
905     }
906 }
907 
908 /// Atomically swaps data at `dst` with `val`.
909 ///
910 /// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
911 /// global lock otherwise.
atomic_swap<T>(dst: *mut T, val: T) -> T912 unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
913     atomic! {
914         T, a,
915         {
916             a = &*(dst as *const _ as *const _);
917             let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel));
918             mem::forget(val);
919             res
920         },
921         {
922             let _guard = lock(dst as usize).write();
923             ptr::replace(dst, val)
924         }
925     }
926 }
927 
928 /// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at
929 /// `dst` with `new`.
930 ///
931 /// Returns the old value on success, or the current value at `dst` on failure.
932 ///
933 /// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
934 /// global lock otherwise.
atomic_compare_exchange_weak<T>(dst: *mut T, mut current: T, new: T) -> Result<T, T> where T: Copy + Eq,935 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, mut current: T, new: T) -> Result<T, T>
936 where
937     T: Copy + Eq,
938 {
939     atomic! {
940         T, a,
941         {
942             a = &*(dst as *const _ as *const _);
943             let mut current_raw = mem::transmute_copy(&current);
944             let new_raw = mem::transmute_copy(&new);
945 
946             loop {
947                 match a.compare_exchange_weak(
948                     current_raw,
949                     new_raw,
950                     Ordering::AcqRel,
951                     Ordering::Acquire,
952                 ) {
953                     Ok(_) => break Ok(current),
954                     Err(previous_raw) => {
955                         let previous = mem::transmute_copy(&previous_raw);
956 
957                         if !T::eq(&previous, &current) {
958                             break Err(previous);
959                         }
960 
961                         // The compare-exchange operation has failed and didn't store `new`. The
962                         // failure is either spurious, or `previous` was semantically equal to
963                         // `current` but not byte-equal. Let's retry with `previous` as the new
964                         // `current`.
965                         current = previous;
966                         current_raw = previous_raw;
967                     }
968                 }
969             }
970         },
971         {
972             let guard = lock(dst as usize).write();
973 
974             if T::eq(&*dst, &current) {
975                 Ok(ptr::replace(dst, new))
976             } else {
977                 let val = ptr::read(dst);
978                 // The value hasn't been changed. Drop the guard without incrementing the stamp.
979                 guard.abort();
980                 Err(val)
981             }
982         }
983     }
984 }
985