1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 use core::cell::UnsafeCell;
9 use core::fmt;
10 use core::marker::PhantomData;
11 use core::mem;
12 use core::ops::{Deref, DerefMut};
13 
14 #[cfg(feature = "owning_ref")]
15 use owning_ref::StableAddress;
16 
17 #[cfg(feature = "serde")]
18 use serde::{Deserialize, Deserializer, Serialize, Serializer};
19 
20 /// Basic operations for a reader-writer lock.
21 ///
22 /// Types implementing this trait can be used by `RwLock` to form a safe and
23 /// fully-functioning `RwLock` type.
24 ///
25 /// # Safety
26 ///
27 /// Implementations of this trait must ensure that the `RwLock` is actually
28 /// exclusive: an exclusive lock can't be acquired while an exclusive or shared
29 /// lock exists, and a shared lock can't be acquire while an exclusive lock
30 /// exists.
31 pub unsafe trait RawRwLock {
32     /// Initial value for an unlocked `RwLock`.
33     // A “non-constant” const item is a legacy way to supply an initialized value to downstream
34     // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
35     #[allow(clippy::declare_interior_mutable_const)]
36     const INIT: Self;
37 
38     /// Marker type which determines whether a lock guard should be `Send`. Use
39     /// one of the `GuardSend` or `GuardNoSend` helper types here.
40     type GuardMarker;
41 
42     /// Acquires a shared lock, blocking the current thread until it is able to do so.
lock_shared(&self)43     fn lock_shared(&self);
44 
45     /// Attempts to acquire a shared lock without blocking.
try_lock_shared(&self) -> bool46     fn try_lock_shared(&self) -> bool;
47 
48     /// Releases a shared lock.
49     ///
50     /// # Safety
51     ///
52     /// This method may only be called if a shared lock is held in the current context.
unlock_shared(&self)53     unsafe fn unlock_shared(&self);
54 
55     /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
lock_exclusive(&self)56     fn lock_exclusive(&self);
57 
58     /// Attempts to acquire an exclusive lock without blocking.
try_lock_exclusive(&self) -> bool59     fn try_lock_exclusive(&self) -> bool;
60 
61     /// Releases an exclusive lock.
62     ///
63     /// # Safety
64     ///
65     /// This method may only be called if an exclusive lock is held in the current context.
unlock_exclusive(&self)66     unsafe fn unlock_exclusive(&self);
67 
68     /// Checks if this `RwLock` is currently locked in any way.
69     #[inline]
is_locked(&self) -> bool70     fn is_locked(&self) -> bool {
71         let acquired_lock = self.try_lock_exclusive();
72         if acquired_lock {
73             // Safety: A lock was successfully acquired above.
74             unsafe {
75                 self.unlock_exclusive();
76             }
77         }
78         !acquired_lock
79     }
80 }
81 
82 /// Additional methods for RwLocks which support fair unlocking.
83 ///
84 /// Fair unlocking means that a lock is handed directly over to the next waiting
85 /// thread if there is one, without giving other threads the opportunity to
86 /// "steal" the lock in the meantime. This is typically slower than unfair
87 /// unlocking, but may be necessary in certain circumstances.
88 pub unsafe trait RawRwLockFair: RawRwLock {
89     /// Releases a shared lock using a fair unlock protocol.
90     ///
91     /// # Safety
92     ///
93     /// This method may only be called if a shared lock is held in the current context.
unlock_shared_fair(&self)94     unsafe fn unlock_shared_fair(&self);
95 
96     /// Releases an exclusive lock using a fair unlock protocol.
97     ///
98     /// # Safety
99     ///
100     /// This method may only be called if an exclusive lock is held in the current context.
unlock_exclusive_fair(&self)101     unsafe fn unlock_exclusive_fair(&self);
102 
103     /// Temporarily yields a shared lock to a waiting thread if there is one.
104     ///
105     /// This method is functionally equivalent to calling `unlock_shared_fair` followed
106     /// by `lock_shared`, however it can be much more efficient in the case where there
107     /// are no waiting threads.
108     ///
109     /// # Safety
110     ///
111     /// This method may only be called if a shared lock is held in the current context.
bump_shared(&self)112     unsafe fn bump_shared(&self) {
113         self.unlock_shared_fair();
114         self.lock_shared();
115     }
116 
117     /// Temporarily yields an exclusive lock to a waiting thread if there is one.
118     ///
119     /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
120     /// by `lock_exclusive`, however it can be much more efficient in the case where there
121     /// are no waiting threads.
122     ///
123     /// # Safety
124     ///
125     /// This method may only be called if an exclusive lock is held in the current context.
bump_exclusive(&self)126     unsafe fn bump_exclusive(&self) {
127         self.unlock_exclusive_fair();
128         self.lock_exclusive();
129     }
130 }
131 
132 /// Additional methods for RwLocks which support atomically downgrading an
133 /// exclusive lock to a shared lock.
134 pub unsafe trait RawRwLockDowngrade: RawRwLock {
135     /// Atomically downgrades an exclusive lock into a shared lock without
136     /// allowing any thread to take an exclusive lock in the meantime.
137     ///
138     /// # Safety
139     ///
140     /// This method may only be called if an exclusive lock is held in the current context.
downgrade(&self)141     unsafe fn downgrade(&self);
142 }
143 
144 /// Additional methods for RwLocks which support locking with timeouts.
145 ///
146 /// The `Duration` and `Instant` types are specified as associated types so that
147 /// this trait is usable even in `no_std` environments.
148 pub unsafe trait RawRwLockTimed: RawRwLock {
149     /// Duration type used for `try_lock_for`.
150     type Duration;
151 
152     /// Instant type used for `try_lock_until`.
153     type Instant;
154 
155     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_for(&self, timeout: Self::Duration) -> bool156     fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
157 
158     /// Attempts to acquire a shared lock until a timeout is reached.
try_lock_shared_until(&self, timeout: Self::Instant) -> bool159     fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
160 
161     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool162     fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
163 
164     /// Attempts to acquire an exclusive lock until a timeout is reached.
try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool165     fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
166 }
167 
168 /// Additional methods for RwLocks which support recursive read locks.
169 ///
170 /// These are guaranteed to succeed without blocking if
171 /// another read lock is held at the time of the call. This allows a thread
172 /// to recursively lock a `RwLock`. However using this method can cause
173 /// writers to starve since readers no longer block if a writer is waiting
174 /// for the lock.
175 pub unsafe trait RawRwLockRecursive: RawRwLock {
176     /// Acquires a shared lock without deadlocking in case of a recursive lock.
lock_shared_recursive(&self)177     fn lock_shared_recursive(&self);
178 
179     /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
try_lock_shared_recursive(&self) -> bool180     fn try_lock_shared_recursive(&self) -> bool;
181 }
182 
183 /// Additional methods for RwLocks which support recursive read locks and timeouts.
184 pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
185     /// Attempts to acquire a shared lock until a timeout is reached, without
186     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool187     fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
188 
189     /// Attempts to acquire a shared lock until a timeout is reached, without
190     /// deadlocking in case of a recursive lock.
try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool191     fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
192 }
193 
194 /// Additional methods for RwLocks which support atomically upgrading a shared
195 /// lock to an exclusive lock.
196 ///
197 /// This requires acquiring a special "upgradable read lock" instead of a
198 /// normal shared lock. There may only be one upgradable lock at any time,
199 /// otherwise deadlocks could occur when upgrading.
200 pub unsafe trait RawRwLockUpgrade: RawRwLock {
201     /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
lock_upgradable(&self)202     fn lock_upgradable(&self);
203 
204     /// Attempts to acquire an upgradable lock without blocking.
try_lock_upgradable(&self) -> bool205     fn try_lock_upgradable(&self) -> bool;
206 
207     /// Releases an upgradable lock.
208     ///
209     /// # Safety
210     ///
211     /// This method may only be called if an upgradable lock is held in the current context.
unlock_upgradable(&self)212     unsafe fn unlock_upgradable(&self);
213 
214     /// Upgrades an upgradable lock to an exclusive lock.
215     ///
216     /// # Safety
217     ///
218     /// This method may only be called if an upgradable lock is held in the current context.
upgrade(&self)219     unsafe fn upgrade(&self);
220 
221     /// Attempts to upgrade an upgradable lock to an exclusive lock without
222     /// blocking.
223     ///
224     /// # Safety
225     ///
226     /// This method may only be called if an upgradable lock is held in the current context.
try_upgrade(&self) -> bool227     unsafe fn try_upgrade(&self) -> bool;
228 }
229 
230 /// Additional methods for RwLocks which support upgradable locks and fair
231 /// unlocking.
232 pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
233     /// Releases an upgradable lock using a fair unlock protocol.
234     ///
235     /// # Safety
236     ///
237     /// This method may only be called if an upgradable lock is held in the current context.
unlock_upgradable_fair(&self)238     unsafe fn unlock_upgradable_fair(&self);
239 
240     /// Temporarily yields an upgradable lock to a waiting thread if there is one.
241     ///
242     /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
243     /// by `lock_upgradable`, however it can be much more efficient in the case where there
244     /// are no waiting threads.
245     ///
246     /// # Safety
247     ///
248     /// This method may only be called if an upgradable lock is held in the current context.
bump_upgradable(&self)249     unsafe fn bump_upgradable(&self) {
250         self.unlock_upgradable_fair();
251         self.lock_upgradable();
252     }
253 }
254 
255 /// Additional methods for RwLocks which support upgradable locks and lock
256 /// downgrading.
257 pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
258     /// Downgrades an upgradable lock to a shared lock.
259     ///
260     /// # Safety
261     ///
262     /// This method may only be called if an upgradable lock is held in the current context.
downgrade_upgradable(&self)263     unsafe fn downgrade_upgradable(&self);
264 
265     /// Downgrades an exclusive lock to an upgradable lock.
266     ///
267     /// # Safety
268     ///
269     /// This method may only be called if an exclusive lock is held in the current context.
downgrade_to_upgradable(&self)270     unsafe fn downgrade_to_upgradable(&self);
271 }
272 
273 /// Additional methods for RwLocks which support upgradable locks and locking
274 /// with timeouts.
275 pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
276     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool277     fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
278 
279     /// Attempts to acquire an upgradable lock until a timeout is reached.
try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool280     fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
281 
282     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
283     /// timeout is reached.
284     ///
285     /// # Safety
286     ///
287     /// This method may only be called if an upgradable lock is held in the current context.
try_upgrade_for(&self, timeout: Self::Duration) -> bool288     unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
289 
290     /// Attempts to upgrade an upgradable lock to an exclusive lock until a
291     /// timeout is reached.
292     ///
293     /// # Safety
294     ///
295     /// This method may only be called if an upgradable lock is held in the current context.
try_upgrade_until(&self, timeout: Self::Instant) -> bool296     unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
297 }
298 
299 /// A reader-writer lock
300 ///
301 /// This type of lock allows a number of readers or at most one writer at any
302 /// point in time. The write portion of this lock typically allows modification
303 /// of the underlying data (exclusive access) and the read portion of this lock
304 /// typically allows for read-only access (shared access).
305 ///
306 /// The type parameter `T` represents the data that this lock protects. It is
307 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to
308 /// allow concurrent access through readers. The RAII guards returned from the
309 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
310 /// to allow access to the contained of the lock.
311 pub struct RwLock<R, T: ?Sized> {
312     raw: R,
313     data: UnsafeCell<T>,
314 }
315 
316 // Copied and modified from serde
317 #[cfg(feature = "serde")]
318 impl<R, T> Serialize for RwLock<R, T>
319 where
320     R: RawRwLock,
321     T: Serialize + ?Sized,
322 {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,323     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
324     where
325         S: Serializer,
326     {
327         self.read().serialize(serializer)
328     }
329 }
330 
331 #[cfg(feature = "serde")]
332 impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
333 where
334     R: RawRwLock,
335     T: Deserialize<'de> + ?Sized,
336 {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,337     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
338     where
339         D: Deserializer<'de>,
340     {
341         Deserialize::deserialize(deserializer).map(RwLock::new)
342     }
343 }
344 
345 unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
346 unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
347 
348 impl<R: RawRwLock, T> RwLock<R, T> {
349     /// Creates a new instance of an `RwLock<T>` which is unlocked.
350     #[cfg(feature = "nightly")]
351     #[inline]
new(val: T) -> RwLock<R, T>352     pub const fn new(val: T) -> RwLock<R, T> {
353         RwLock {
354             data: UnsafeCell::new(val),
355             raw: R::INIT,
356         }
357     }
358 
359     /// Creates a new instance of an `RwLock<T>` which is unlocked.
360     #[cfg(not(feature = "nightly"))]
361     #[inline]
new(val: T) -> RwLock<R, T>362     pub fn new(val: T) -> RwLock<R, T> {
363         RwLock {
364             data: UnsafeCell::new(val),
365             raw: R::INIT,
366         }
367     }
368 
369     /// Consumes this `RwLock`, returning the underlying data.
370     #[inline]
371     #[allow(unused_unsafe)]
into_inner(self) -> T372     pub fn into_inner(self) -> T {
373         unsafe { self.data.into_inner() }
374     }
375 }
376 
377 impl<R, T> RwLock<R, T> {
378     /// Creates a new new instance of an `RwLock<T>` based on a pre-existing
379     /// `RawRwLock<T>`.
380     ///
381     /// This allows creating a `RwLock<T>` in a constant context on stable
382     /// Rust.
383     #[inline]
const_new(raw_rwlock: R, val: T) -> RwLock<R, T>384     pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> {
385         RwLock {
386             data: UnsafeCell::new(val),
387             raw: raw_rwlock,
388         }
389     }
390 }
391 
392 impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
393     /// # Safety
394     ///
395     /// The lock must be held when calling this method.
396     #[inline]
read_guard(&self) -> RwLockReadGuard<'_, R, T>397     unsafe fn read_guard(&self) -> RwLockReadGuard<'_, R, T> {
398         RwLockReadGuard {
399             rwlock: self,
400             marker: PhantomData,
401         }
402     }
403 
404     /// # Safety
405     ///
406     /// The lock must be held when calling this method.
407     #[inline]
write_guard(&self) -> RwLockWriteGuard<'_, R, T>408     unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> {
409         RwLockWriteGuard {
410             rwlock: self,
411             marker: PhantomData,
412         }
413     }
414 
415     /// Locks this `RwLock` with shared read access, blocking the current thread
416     /// until it can be acquired.
417     ///
418     /// The calling thread will be blocked until there are no more writers which
419     /// hold the lock. There may be other readers currently inside the lock when
420     /// this method returns.
421     ///
422     /// Note that attempts to recursively acquire a read lock on a `RwLock` when
423     /// the current thread already holds one may result in a deadlock.
424     ///
425     /// Returns an RAII guard which will release this thread's shared access
426     /// once it is dropped.
427     #[inline]
read(&self) -> RwLockReadGuard<'_, R, T>428     pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
429         self.raw.lock_shared();
430         // SAFETY: The lock is held, as required.
431         unsafe { self.read_guard() }
432     }
433 
434     /// Attempts to acquire this `RwLock` with shared read access.
435     ///
436     /// If the access could not be granted at this time, then `None` is returned.
437     /// Otherwise, an RAII guard is returned which will release the shared access
438     /// when it is dropped.
439     ///
440     /// This function does not block.
441     #[inline]
try_read(&self) -> Option<RwLockReadGuard<'_, R, T>>442     pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
443         if self.raw.try_lock_shared() {
444             // SAFETY: The lock is held, as required.
445             Some(unsafe { self.read_guard() })
446         } else {
447             None
448         }
449     }
450 
451     /// Locks this `RwLock` with exclusive write access, blocking the current
452     /// thread until it can be acquired.
453     ///
454     /// This function will not return while other writers or other readers
455     /// currently have access to the lock.
456     ///
457     /// Returns an RAII guard which will drop the write access of this `RwLock`
458     /// when dropped.
459     #[inline]
write(&self) -> RwLockWriteGuard<'_, R, T>460     pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
461         self.raw.lock_exclusive();
462         // SAFETY: The lock is held, as required.
463         unsafe { self.write_guard() }
464     }
465 
466     /// Attempts to lock this `RwLock` with exclusive write access.
467     ///
468     /// If the lock could not be acquired at this time, then `None` is returned.
469     /// Otherwise, an RAII guard is returned which will release the lock when
470     /// it is dropped.
471     ///
472     /// This function does not block.
473     #[inline]
try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>>474     pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
475         if self.raw.try_lock_exclusive() {
476             // SAFETY: The lock is held, as required.
477             Some(unsafe { self.write_guard() })
478         } else {
479             None
480         }
481     }
482 
483     /// Returns a mutable reference to the underlying data.
484     ///
485     /// Since this call borrows the `RwLock` mutably, no actual locking needs to
486     /// take place---the mutable borrow statically guarantees no locks exist.
487     #[inline]
get_mut(&mut self) -> &mut T488     pub fn get_mut(&mut self) -> &mut T {
489         unsafe { &mut *self.data.get() }
490     }
491 
492     /// Checks whether this `RwLock` is currently locked in any way.
493     #[inline]
is_locked(&self) -> bool494     pub fn is_locked(&self) -> bool {
495         self.raw.is_locked()
496     }
497 
498     /// Forcibly unlocks a read lock.
499     ///
500     /// This is useful when combined with `mem::forget` to hold a lock without
501     /// the need to maintain a `RwLockReadGuard` object alive, for example when
502     /// dealing with FFI.
503     ///
504     /// # Safety
505     ///
506     /// This method must only be called if the current thread logically owns a
507     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
508     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
509     #[inline]
force_unlock_read(&self)510     pub unsafe fn force_unlock_read(&self) {
511         self.raw.unlock_shared();
512     }
513 
514     /// Forcibly unlocks a write lock.
515     ///
516     /// This is useful when combined with `mem::forget` to hold a lock without
517     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
518     /// dealing with FFI.
519     ///
520     /// # Safety
521     ///
522     /// This method must only be called if the current thread logically owns a
523     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
524     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
525     #[inline]
force_unlock_write(&self)526     pub unsafe fn force_unlock_write(&self) {
527         self.raw.unlock_exclusive();
528     }
529 
530     /// Returns the underlying raw reader-writer lock object.
531     ///
532     /// Note that you will most likely need to import the `RawRwLock` trait from
533     /// `lock_api` to be able to call functions on the raw
534     /// reader-writer lock.
535     ///
536     /// # Safety
537     ///
538     /// This method is unsafe because it allows unlocking a mutex while
539     /// still holding a reference to a lock guard.
raw(&self) -> &R540     pub unsafe fn raw(&self) -> &R {
541         &self.raw
542     }
543 
544     /// Returns a raw pointer to the underlying data.
545     ///
546     /// This is useful when combined with `mem::forget` to hold a lock without
547     /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object
548     /// alive, for example when dealing with FFI.
549     ///
550     /// # Safety
551     ///
552     /// You must ensure that there are no data races when dereferencing the
553     /// returned pointer, for example if the current thread logically owns a
554     /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded
555     /// using `mem::forget`.
556     #[inline]
data_ptr(&self) -> *mut T557     pub fn data_ptr(&self) -> *mut T {
558         self.data.get()
559     }
560 }
561 
562 impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
563     /// Forcibly unlocks a read lock using a fair unlock procotol.
564     ///
565     /// This is useful when combined with `mem::forget` to hold a lock without
566     /// the need to maintain a `RwLockReadGuard` object alive, for example when
567     /// dealing with FFI.
568     ///
569     /// # Safety
570     ///
571     /// This method must only be called if the current thread logically owns a
572     /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
573     /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
574     #[inline]
force_unlock_read_fair(&self)575     pub unsafe fn force_unlock_read_fair(&self) {
576         self.raw.unlock_shared_fair();
577     }
578 
579     /// Forcibly unlocks a write lock using a fair unlock procotol.
580     ///
581     /// This is useful when combined with `mem::forget` to hold a lock without
582     /// the need to maintain a `RwLockWriteGuard` object alive, for example when
583     /// dealing with FFI.
584     ///
585     /// # Safety
586     ///
587     /// This method must only be called if the current thread logically owns a
588     /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
589     /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
590     #[inline]
force_unlock_write_fair(&self)591     pub unsafe fn force_unlock_write_fair(&self) {
592         self.raw.unlock_exclusive_fair();
593     }
594 }
595 
596 impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
597     /// Attempts to acquire this `RwLock` with shared read access until a timeout
598     /// is reached.
599     ///
600     /// If the access could not be granted before the timeout expires, then
601     /// `None` is returned. Otherwise, an RAII guard is returned which will
602     /// release the shared access when it is dropped.
603     #[inline]
try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>>604     pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
605         if self.raw.try_lock_shared_for(timeout) {
606             // SAFETY: The lock is held, as required.
607             Some(unsafe { self.read_guard() })
608         } else {
609             None
610         }
611     }
612 
613     /// Attempts to acquire this `RwLock` with shared read access until a timeout
614     /// is reached.
615     ///
616     /// If the access could not be granted before the timeout expires, then
617     /// `None` is returned. Otherwise, an RAII guard is returned which will
618     /// release the shared access when it is dropped.
619     #[inline]
try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>>620     pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
621         if self.raw.try_lock_shared_until(timeout) {
622             // SAFETY: The lock is held, as required.
623             Some(unsafe { self.read_guard() })
624         } else {
625             None
626         }
627     }
628 
629     /// Attempts to acquire this `RwLock` with exclusive write access until a
630     /// timeout is reached.
631     ///
632     /// If the access could not be granted before the timeout expires, then
633     /// `None` is returned. Otherwise, an RAII guard is returned which will
634     /// release the exclusive access when it is dropped.
635     #[inline]
try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>>636     pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
637         if self.raw.try_lock_exclusive_for(timeout) {
638             // SAFETY: The lock is held, as required.
639             Some(unsafe { self.write_guard() })
640         } else {
641             None
642         }
643     }
644 
645     /// Attempts to acquire this `RwLock` with exclusive write access until a
646     /// timeout is reached.
647     ///
648     /// If the access could not be granted before the timeout expires, then
649     /// `None` is returned. Otherwise, an RAII guard is returned which will
650     /// release the exclusive access when it is dropped.
651     #[inline]
try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>>652     pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
653         if self.raw.try_lock_exclusive_until(timeout) {
654             // SAFETY: The lock is held, as required.
655             Some(unsafe { self.write_guard() })
656         } else {
657             None
658         }
659     }
660 }
661 
662 impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
663     /// Locks this `RwLock` with shared read access, blocking the current thread
664     /// until it can be acquired.
665     ///
666     /// The calling thread will be blocked until there are no more writers which
667     /// hold the lock. There may be other readers currently inside the lock when
668     /// this method returns.
669     ///
670     /// Unlike `read`, this method is guaranteed to succeed without blocking if
671     /// another read lock is held at the time of the call. This allows a thread
672     /// to recursively lock a `RwLock`. However using this method can cause
673     /// writers to starve since readers no longer block if a writer is waiting
674     /// for the lock.
675     ///
676     /// Returns an RAII guard which will release this thread's shared access
677     /// once it is dropped.
678     #[inline]
read_recursive(&self) -> RwLockReadGuard<'_, R, T>679     pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
680         self.raw.lock_shared_recursive();
681         // SAFETY: The lock is held, as required.
682         unsafe { self.read_guard() }
683     }
684 
685     /// Attempts to acquire this `RwLock` with shared read access.
686     ///
687     /// If the access could not be granted at this time, then `None` is returned.
688     /// Otherwise, an RAII guard is returned which will release the shared access
689     /// when it is dropped.
690     ///
691     /// This method is guaranteed to succeed if another read lock is held at the
692     /// time of the call. See the documentation for `read_recursive` for details.
693     ///
694     /// This function does not block.
695     #[inline]
try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>>696     pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
697         if self.raw.try_lock_shared_recursive() {
698             // SAFETY: The lock is held, as required.
699             Some(unsafe { self.read_guard() })
700         } else {
701             None
702         }
703     }
704 }
705 
706 impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
707     /// Attempts to acquire this `RwLock` with shared read access until a timeout
708     /// is reached.
709     ///
710     /// If the access could not be granted before the timeout expires, then
711     /// `None` is returned. Otherwise, an RAII guard is returned which will
712     /// release the shared access when it is dropped.
713     ///
714     /// This method is guaranteed to succeed without blocking if another read
715     /// lock is held at the time of the call. See the documentation for
716     /// `read_recursive` for details.
717     #[inline]
try_read_recursive_for( &self, timeout: R::Duration, ) -> Option<RwLockReadGuard<'_, R, T>>718     pub fn try_read_recursive_for(
719         &self,
720         timeout: R::Duration,
721     ) -> Option<RwLockReadGuard<'_, R, T>> {
722         if self.raw.try_lock_shared_recursive_for(timeout) {
723             // SAFETY: The lock is held, as required.
724             Some(unsafe { self.read_guard() })
725         } else {
726             None
727         }
728     }
729 
730     /// Attempts to acquire this `RwLock` with shared read access until a timeout
731     /// is reached.
732     ///
733     /// If the access could not be granted before the timeout expires, then
734     /// `None` is returned. Otherwise, an RAII guard is returned which will
735     /// release the shared access when it is dropped.
736     #[inline]
try_read_recursive_until( &self, timeout: R::Instant, ) -> Option<RwLockReadGuard<'_, R, T>>737     pub fn try_read_recursive_until(
738         &self,
739         timeout: R::Instant,
740     ) -> Option<RwLockReadGuard<'_, R, T>> {
741         if self.raw.try_lock_shared_recursive_until(timeout) {
742             // SAFETY: The lock is held, as required.
743             Some(unsafe { self.read_guard() })
744         } else {
745             None
746         }
747     }
748 }
749 
750 impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
751     /// # Safety
752     ///
753     /// The lock must be held when calling this method.
754     #[inline]
upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T>755     unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
756         RwLockUpgradableReadGuard {
757             rwlock: self,
758             marker: PhantomData,
759         }
760     }
761 
762     /// Locks this `RwLock` with upgradable read access, blocking the current thread
763     /// until it can be acquired.
764     ///
765     /// The calling thread will be blocked until there are no more writers or other
766     /// upgradable reads which hold the lock. There may be other readers currently
767     /// inside the lock when this method returns.
768     ///
769     /// Returns an RAII guard which will release this thread's shared access
770     /// once it is dropped.
771     #[inline]
upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T>772     pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
773         self.raw.lock_upgradable();
774         // SAFETY: The lock is held, as required.
775         unsafe { self.upgradable_guard() }
776     }
777 
778     /// Attempts to acquire this `RwLock` with upgradable read access.
779     ///
780     /// If the access could not be granted at this time, then `None` is returned.
781     /// Otherwise, an RAII guard is returned which will release the shared access
782     /// when it is dropped.
783     ///
784     /// This function does not block.
785     #[inline]
try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>>786     pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
787         if self.raw.try_lock_upgradable() {
788             // SAFETY: The lock is held, as required.
789             Some(unsafe { self.upgradable_guard() })
790         } else {
791             None
792         }
793     }
794 }
795 
796 impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
797     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
798     /// is reached.
799     ///
800     /// If the access could not be granted before the timeout expires, then
801     /// `None` is returned. Otherwise, an RAII guard is returned which will
802     /// release the shared access when it is dropped.
803     #[inline]
try_upgradable_read_for( &self, timeout: R::Duration, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>804     pub fn try_upgradable_read_for(
805         &self,
806         timeout: R::Duration,
807     ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
808         if self.raw.try_lock_upgradable_for(timeout) {
809             // SAFETY: The lock is held, as required.
810             Some(unsafe { self.upgradable_guard() })
811         } else {
812             None
813         }
814     }
815 
816     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
817     /// is reached.
818     ///
819     /// If the access could not be granted before the timeout expires, then
820     /// `None` is returned. Otherwise, an RAII guard is returned which will
821     /// release the shared access when it is dropped.
822     #[inline]
try_upgradable_read_until( &self, timeout: R::Instant, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>823     pub fn try_upgradable_read_until(
824         &self,
825         timeout: R::Instant,
826     ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
827         if self.raw.try_lock_upgradable_until(timeout) {
828             // SAFETY: The lock is held, as required.
829             Some(unsafe { self.upgradable_guard() })
830         } else {
831             None
832         }
833     }
834 }
835 
836 impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
837     #[inline]
default() -> RwLock<R, T>838     fn default() -> RwLock<R, T> {
839         RwLock::new(Default::default())
840     }
841 }
842 
843 impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
844     #[inline]
from(t: T) -> RwLock<R, T>845     fn from(t: T) -> RwLock<R, T> {
846         RwLock::new(t)
847     }
848 }
849 
850 impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result851     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
852         match self.try_read() {
853             Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
854             None => {
855                 struct LockedPlaceholder;
856                 impl fmt::Debug for LockedPlaceholder {
857                     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
858                         f.write_str("<locked>")
859                     }
860                 }
861 
862                 f.debug_struct("RwLock")
863                     .field("data", &LockedPlaceholder)
864                     .finish()
865             }
866         }
867     }
868 }
869 
870 /// RAII structure used to release the shared read access of a lock when
871 /// dropped.
872 #[must_use = "if unused the RwLock will immediately unlock"]
873 pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
874     rwlock: &'a RwLock<R, T>,
875     marker: PhantomData<(&'a T, R::GuardMarker)>,
876 }
877 
878 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
879     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>880     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
881         s.rwlock
882     }
883 
884     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
885     ///
886     /// This operation cannot fail as the `RwLockReadGuard` passed
887     /// in already locked the data.
888     ///
889     /// This is an associated function that needs to be
890     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
891     /// the same name on the contents of the locked data.
892     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,893     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
894     where
895         F: FnOnce(&T) -> &U,
896     {
897         let raw = &s.rwlock.raw;
898         let data = f(unsafe { &*s.rwlock.data.get() });
899         mem::forget(s);
900         MappedRwLockReadGuard {
901             raw,
902             data,
903             marker: PhantomData,
904         }
905     }
906 
907     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
908     /// locked data. The original guard is return if the closure returns `None`.
909     ///
910     /// This operation cannot fail as the `RwLockReadGuard` passed
911     /// in already locked the data.
912     ///
913     /// This is an associated function that needs to be
914     /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
915     /// the same name on the contents of the locked data.
916     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,917     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
918     where
919         F: FnOnce(&T) -> Option<&U>,
920     {
921         let raw = &s.rwlock.raw;
922         let data = match f(unsafe { &*s.rwlock.data.get() }) {
923             Some(data) => data,
924             None => return Err(s),
925         };
926         mem::forget(s);
927         Ok(MappedRwLockReadGuard {
928             raw,
929             data,
930             marker: PhantomData,
931         })
932     }
933 
934     /// Temporarily unlocks the `RwLock` to execute the given function.
935     ///
936     /// The `RwLock` is unlocked a fair unlock protocol.
937     ///
938     /// This is safe because `&mut` guarantees that there exist no other
939     /// references to the data protected by the `RwLock`.
940     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,941     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
942     where
943         F: FnOnce() -> U,
944     {
945         // Safety: An RwLockReadGuard always holds a shared lock.
946         unsafe {
947             s.rwlock.raw.unlock_shared();
948         }
949         defer!(s.rwlock.raw.lock_shared());
950         f()
951     }
952 }
953 
954 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
955     /// Unlocks the `RwLock` using a fair unlock protocol.
956     ///
957     /// By default, `RwLock` is unfair and allow the current thread to re-lock
958     /// the `RwLock` before another has the chance to acquire the lock, even if
959     /// that thread has been blocked on the `RwLock` for a long time. This is
960     /// the default because it allows much higher throughput as it avoids
961     /// forcing a context switch on every `RwLock` unlock. This can result in one
962     /// thread acquiring a `RwLock` many more times than other threads.
963     ///
964     /// However in some cases it can be beneficial to ensure fairness by forcing
965     /// the lock to pass on to a waiting thread if there is one. This is done by
966     /// using this method instead of dropping the `RwLockReadGuard` normally.
967     #[inline]
unlock_fair(s: Self)968     pub fn unlock_fair(s: Self) {
969         // Safety: An RwLockReadGuard always holds a shared lock.
970         unsafe {
971             s.rwlock.raw.unlock_shared_fair();
972         }
973         mem::forget(s);
974     }
975 
976     /// Temporarily unlocks the `RwLock` to execute the given function.
977     ///
978     /// The `RwLock` is unlocked a fair unlock protocol.
979     ///
980     /// This is safe because `&mut` guarantees that there exist no other
981     /// references to the data protected by the `RwLock`.
982     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,983     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
984     where
985         F: FnOnce() -> U,
986     {
987         // Safety: An RwLockReadGuard always holds a shared lock.
988         unsafe {
989             s.rwlock.raw.unlock_shared_fair();
990         }
991         defer!(s.rwlock.raw.lock_shared());
992         f()
993     }
994 
995     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
996     ///
997     /// This method is functionally equivalent to calling `unlock_fair` followed
998     /// by `read`, however it can be much more efficient in the case where there
999     /// are no waiting threads.
1000     #[inline]
bump(s: &mut Self)1001     pub fn bump(s: &mut Self) {
1002         // Safety: An RwLockReadGuard always holds a shared lock.
1003         unsafe {
1004             s.rwlock.raw.bump_shared();
1005         }
1006     }
1007 }
1008 
1009 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
1010     type Target = T;
1011     #[inline]
deref(&self) -> &T1012     fn deref(&self) -> &T {
1013         unsafe { &*self.rwlock.data.get() }
1014     }
1015 }
1016 
1017 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
1018     #[inline]
drop(&mut self)1019     fn drop(&mut self) {
1020         // Safety: An RwLockReadGuard always holds a shared lock.
1021         unsafe {
1022             self.rwlock.raw.unlock_shared();
1023         }
1024     }
1025 }
1026 
1027 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1028     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1029         fmt::Debug::fmt(&**self, f)
1030     }
1031 }
1032 
1033 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1034     for RwLockReadGuard<'a, R, T>
1035 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1036     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1037         (**self).fmt(f)
1038     }
1039 }
1040 
1041 #[cfg(feature = "owning_ref")]
1042 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
1043 
1044 /// RAII structure used to release the exclusive write access of a lock when
1045 /// dropped.
1046 #[must_use = "if unused the RwLock will immediately unlock"]
1047 pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
1048     rwlock: &'a RwLock<R, T>,
1049     marker: PhantomData<(&'a mut T, R::GuardMarker)>,
1050 }
1051 
1052 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1053     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>1054     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1055         s.rwlock
1056     }
1057 
1058     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1059     ///
1060     /// This operation cannot fail as the `RwLockWriteGuard` passed
1061     /// in already locked the data.
1062     ///
1063     /// This is an associated function that needs to be
1064     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
1065     /// the same name on the contents of the locked data.
1066     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,1067     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
1068     where
1069         F: FnOnce(&mut T) -> &mut U,
1070     {
1071         let raw = &s.rwlock.raw;
1072         let data = f(unsafe { &mut *s.rwlock.data.get() });
1073         mem::forget(s);
1074         MappedRwLockWriteGuard {
1075             raw,
1076             data,
1077             marker: PhantomData,
1078         }
1079     }
1080 
1081     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1082     /// locked data. The original guard is return if the closure returns `None`.
1083     ///
1084     /// This operation cannot fail as the `RwLockWriteGuard` passed
1085     /// in already locked the data.
1086     ///
1087     /// This is an associated function that needs to be
1088     /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
1089     /// the same name on the contents of the locked data.
1090     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,1091     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
1092     where
1093         F: FnOnce(&mut T) -> Option<&mut U>,
1094     {
1095         let raw = &s.rwlock.raw;
1096         let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
1097             Some(data) => data,
1098             None => return Err(s),
1099         };
1100         mem::forget(s);
1101         Ok(MappedRwLockWriteGuard {
1102             raw,
1103             data,
1104             marker: PhantomData,
1105         })
1106     }
1107 
1108     /// Temporarily unlocks the `RwLock` to execute the given function.
1109     ///
1110     /// This is safe because `&mut` guarantees that there exist no other
1111     /// references to the data protected by the `RwLock`.
1112     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1113     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1114     where
1115         F: FnOnce() -> U,
1116     {
1117         // Safety: An RwLockReadGuard always holds a shared lock.
1118         unsafe {
1119             s.rwlock.raw.unlock_exclusive();
1120         }
1121         defer!(s.rwlock.raw.lock_exclusive());
1122         f()
1123     }
1124 }
1125 
1126 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1127     /// Atomically downgrades a write lock into a read lock without allowing any
1128     /// writers to take exclusive access of the lock in the meantime.
1129     ///
1130     /// Note that if there are any writers currently waiting to take the lock
1131     /// then other readers may not be able to acquire the lock even if it was
1132     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1133     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1134         // Safety: An RwLockWriteGuard always holds an exclusive lock.
1135         unsafe {
1136             s.rwlock.raw.downgrade();
1137         }
1138         let rwlock = s.rwlock;
1139         mem::forget(s);
1140         RwLockReadGuard {
1141             rwlock,
1142             marker: PhantomData,
1143         }
1144     }
1145 }
1146 
1147 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1148     /// Atomically downgrades a write lock into an upgradable read lock without allowing any
1149     /// writers to take exclusive access of the lock in the meantime.
1150     ///
1151     /// Note that if there are any writers currently waiting to take the lock
1152     /// then other readers may not be able to acquire the lock even if it was
1153     /// downgraded.
downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T>1154     pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
1155         // Safety: An RwLockWriteGuard always holds an exclusive lock.
1156         unsafe {
1157             s.rwlock.raw.downgrade_to_upgradable();
1158         }
1159         let rwlock = s.rwlock;
1160         mem::forget(s);
1161         RwLockUpgradableReadGuard {
1162             rwlock,
1163             marker: PhantomData,
1164         }
1165     }
1166 }
1167 
1168 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
1169     /// Unlocks the `RwLock` using a fair unlock protocol.
1170     ///
1171     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1172     /// the `RwLock` before another has the chance to acquire the lock, even if
1173     /// that thread has been blocked on the `RwLock` for a long time. This is
1174     /// the default because it allows much higher throughput as it avoids
1175     /// forcing a context switch on every `RwLock` unlock. This can result in one
1176     /// thread acquiring a `RwLock` many more times than other threads.
1177     ///
1178     /// However in some cases it can be beneficial to ensure fairness by forcing
1179     /// the lock to pass on to a waiting thread if there is one. This is done by
1180     /// using this method instead of dropping the `RwLockWriteGuard` normally.
1181     #[inline]
unlock_fair(s: Self)1182     pub fn unlock_fair(s: Self) {
1183         // Safety: An RwLockWriteGuard always holds an exclusive lock.
1184         unsafe {
1185             s.rwlock.raw.unlock_exclusive_fair();
1186         }
1187         mem::forget(s);
1188     }
1189 
1190     /// Temporarily unlocks the `RwLock` to execute the given function.
1191     ///
1192     /// The `RwLock` is unlocked a fair unlock protocol.
1193     ///
1194     /// This is safe because `&mut` guarantees that there exist no other
1195     /// references to the data protected by the `RwLock`.
1196     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1197     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1198     where
1199         F: FnOnce() -> U,
1200     {
1201         // Safety: An RwLockWriteGuard always holds an exclusive lock.
1202         unsafe {
1203             s.rwlock.raw.unlock_exclusive_fair();
1204         }
1205         defer!(s.rwlock.raw.lock_exclusive());
1206         f()
1207     }
1208 
1209     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1210     ///
1211     /// This method is functionally equivalent to calling `unlock_fair` followed
1212     /// by `write`, however it can be much more efficient in the case where there
1213     /// are no waiting threads.
1214     #[inline]
bump(s: &mut Self)1215     pub fn bump(s: &mut Self) {
1216         // Safety: An RwLockWriteGuard always holds an exclusive lock.
1217         unsafe {
1218             s.rwlock.raw.bump_exclusive();
1219         }
1220     }
1221 }
1222 
1223 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
1224     type Target = T;
1225     #[inline]
deref(&self) -> &T1226     fn deref(&self) -> &T {
1227         unsafe { &*self.rwlock.data.get() }
1228     }
1229 }
1230 
1231 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
1232     #[inline]
deref_mut(&mut self) -> &mut T1233     fn deref_mut(&mut self) -> &mut T {
1234         unsafe { &mut *self.rwlock.data.get() }
1235     }
1236 }
1237 
1238 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
1239     #[inline]
drop(&mut self)1240     fn drop(&mut self) {
1241         // Safety: An RwLockWriteGuard always holds an exclusive lock.
1242         unsafe {
1243             self.rwlock.raw.unlock_exclusive();
1244         }
1245     }
1246 }
1247 
1248 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1249     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1250         fmt::Debug::fmt(&**self, f)
1251     }
1252 }
1253 
1254 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1255     for RwLockWriteGuard<'a, R, T>
1256 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1257     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1258         (**self).fmt(f)
1259     }
1260 }
1261 
1262 #[cfg(feature = "owning_ref")]
1263 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
1264 
1265 /// RAII structure used to release the upgradable read access of a lock when
1266 /// dropped.
1267 #[must_use = "if unused the RwLock will immediately unlock"]
1268 pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
1269     rwlock: &'a RwLock<R, T>,
1270     marker: PhantomData<(&'a T, R::GuardMarker)>,
1271 }
1272 
1273 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
1274     for RwLockUpgradableReadGuard<'a, R, T>
1275 {
1276 }
1277 
1278 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1279     /// Returns a reference to the original reader-writer lock object.
rwlock(s: &Self) -> &'a RwLock<R, T>1280     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
1281         s.rwlock
1282     }
1283 
1284     /// Temporarily unlocks the `RwLock` to execute the given function.
1285     ///
1286     /// This is safe because `&mut` guarantees that there exist no other
1287     /// references to the data protected by the `RwLock`.
1288     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1289     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
1290     where
1291         F: FnOnce() -> U,
1292     {
1293         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1294         unsafe {
1295             s.rwlock.raw.unlock_upgradable();
1296         }
1297         defer!(s.rwlock.raw.lock_upgradable());
1298         f()
1299     }
1300 
1301     /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
1302     /// blocking the current thread until it can be acquired.
upgrade(s: Self) -> RwLockWriteGuard<'a, R, T>1303     pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
1304         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1305         unsafe {
1306             s.rwlock.raw.upgrade();
1307         }
1308         let rwlock = s.rwlock;
1309         mem::forget(s);
1310         RwLockWriteGuard {
1311             rwlock,
1312             marker: PhantomData,
1313         }
1314     }
1315 
1316     /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
1317     ///
1318     /// If the access could not be granted at this time, then the current guard is returned.
try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self>1319     pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1320         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1321         if unsafe { s.rwlock.raw.try_upgrade() } {
1322             let rwlock = s.rwlock;
1323             mem::forget(s);
1324             Ok(RwLockWriteGuard {
1325                 rwlock,
1326                 marker: PhantomData,
1327             })
1328         } else {
1329             Err(s)
1330         }
1331     }
1332 }
1333 
1334 impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1335     /// Unlocks the `RwLock` using a fair unlock protocol.
1336     ///
1337     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1338     /// the `RwLock` before another has the chance to acquire the lock, even if
1339     /// that thread has been blocked on the `RwLock` for a long time. This is
1340     /// the default because it allows much higher throughput as it avoids
1341     /// forcing a context switch on every `RwLock` unlock. This can result in one
1342     /// thread acquiring a `RwLock` many more times than other threads.
1343     ///
1344     /// However in some cases it can be beneficial to ensure fairness by forcing
1345     /// the lock to pass on to a waiting thread if there is one. This is done by
1346     /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
1347     #[inline]
unlock_fair(s: Self)1348     pub fn unlock_fair(s: Self) {
1349         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1350         unsafe {
1351             s.rwlock.raw.unlock_upgradable_fair();
1352         }
1353         mem::forget(s);
1354     }
1355 
1356     /// Temporarily unlocks the `RwLock` to execute the given function.
1357     ///
1358     /// The `RwLock` is unlocked a fair unlock protocol.
1359     ///
1360     /// This is safe because `&mut` guarantees that there exist no other
1361     /// references to the data protected by the `RwLock`.
1362     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1363     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1364     where
1365         F: FnOnce() -> U,
1366     {
1367         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1368         unsafe {
1369             s.rwlock.raw.unlock_upgradable_fair();
1370         }
1371         defer!(s.rwlock.raw.lock_upgradable());
1372         f()
1373     }
1374 
1375     /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1376     ///
1377     /// This method is functionally equivalent to calling `unlock_fair` followed
1378     /// by `upgradable_read`, however it can be much more efficient in the case where there
1379     /// are no waiting threads.
1380     #[inline]
bump(s: &mut Self)1381     pub fn bump(s: &mut Self) {
1382         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1383         unsafe {
1384             s.rwlock.raw.bump_upgradable();
1385         }
1386     }
1387 }
1388 
1389 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1390     /// Atomically downgrades an upgradable read lock lock into a shared read lock
1391     /// without allowing any writers to take exclusive access of the lock in the
1392     /// meantime.
1393     ///
1394     /// Note that if there are any writers currently waiting to take the lock
1395     /// then other readers may not be able to acquire the lock even if it was
1396     /// downgraded.
downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1397     pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
1398         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1399         unsafe {
1400             s.rwlock.raw.downgrade_upgradable();
1401         }
1402         let rwlock = s.rwlock;
1403         mem::forget(s);
1404         RwLockReadGuard {
1405             rwlock,
1406             marker: PhantomData,
1407         }
1408     }
1409 }
1410 
1411 impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
1412     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1413     /// write lock, until a timeout is reached.
1414     ///
1415     /// If the access could not be granted before the timeout expires, then
1416     /// the current guard is returned.
try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1417     pub fn try_upgrade_for(
1418         s: Self,
1419         timeout: R::Duration,
1420     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1421         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1422         if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
1423             let rwlock = s.rwlock;
1424             mem::forget(s);
1425             Ok(RwLockWriteGuard {
1426                 rwlock,
1427                 marker: PhantomData,
1428             })
1429         } else {
1430             Err(s)
1431         }
1432     }
1433 
1434     /// Tries to atomically upgrade an upgradable read lock into a exclusive
1435     /// write lock, until a timeout is reached.
1436     ///
1437     /// If the access could not be granted before the timeout expires, then
1438     /// the current guard is returned.
1439     #[inline]
try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>1440     pub fn try_upgrade_until(
1441         s: Self,
1442         timeout: R::Instant,
1443     ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
1444         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1445         if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
1446             let rwlock = s.rwlock;
1447             mem::forget(s);
1448             Ok(RwLockWriteGuard {
1449                 rwlock,
1450                 marker: PhantomData,
1451             })
1452         } else {
1453             Err(s)
1454         }
1455     }
1456 }
1457 
1458 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
1459     type Target = T;
1460     #[inline]
deref(&self) -> &T1461     fn deref(&self) -> &T {
1462         unsafe { &*self.rwlock.data.get() }
1463     }
1464 }
1465 
1466 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
1467     #[inline]
drop(&mut self)1468     fn drop(&mut self) {
1469         // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
1470         unsafe {
1471             self.rwlock.raw.unlock_upgradable();
1472         }
1473     }
1474 }
1475 
1476 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1477     for RwLockUpgradableReadGuard<'a, R, T>
1478 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1479     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1480         fmt::Debug::fmt(&**self, f)
1481     }
1482 }
1483 
1484 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1485     for RwLockUpgradableReadGuard<'a, R, T>
1486 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1487     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1488         (**self).fmt(f)
1489     }
1490 }
1491 
1492 #[cfg(feature = "owning_ref")]
1493 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
1494     for RwLockUpgradableReadGuard<'a, R, T>
1495 {
1496 }
1497 
1498 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
1499 /// subfield of the protected data.
1500 ///
1501 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
1502 /// former doesn't support temporarily unlocking and re-locking, since that
1503 /// could introduce soundness issues if the locked object is modified by another
1504 /// thread.
1505 #[must_use = "if unused the RwLock will immediately unlock"]
1506 pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
1507     raw: &'a R,
1508     data: *const T,
1509     marker: PhantomData<&'a T>,
1510 }
1511 
1512 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
1513 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
1514     R::GuardMarker: Send
1515 {
1516 }
1517 
1518 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1519     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
1520     ///
1521     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1522     /// in already locked the data.
1523     ///
1524     /// This is an associated function that needs to be
1525     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1526     /// the same name on the contents of the locked data.
1527     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,1528     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
1529     where
1530         F: FnOnce(&T) -> &U,
1531     {
1532         let raw = s.raw;
1533         let data = f(unsafe { &*s.data });
1534         mem::forget(s);
1535         MappedRwLockReadGuard {
1536             raw,
1537             data,
1538             marker: PhantomData,
1539         }
1540     }
1541 
1542     /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1543     /// locked data. The original guard is return if the closure returns `None`.
1544     ///
1545     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1546     /// in already locked the data.
1547     ///
1548     /// This is an associated function that needs to be
1549     /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1550     /// the same name on the contents of the locked data.
1551     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,1552     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
1553     where
1554         F: FnOnce(&T) -> Option<&U>,
1555     {
1556         let raw = s.raw;
1557         let data = match f(unsafe { &*s.data }) {
1558             Some(data) => data,
1559             None => return Err(s),
1560         };
1561         mem::forget(s);
1562         Ok(MappedRwLockReadGuard {
1563             raw,
1564             data,
1565             marker: PhantomData,
1566         })
1567     }
1568 }
1569 
1570 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
1571     /// Unlocks the `RwLock` using a fair unlock protocol.
1572     ///
1573     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1574     /// the `RwLock` before another has the chance to acquire the lock, even if
1575     /// that thread has been blocked on the `RwLock` for a long time. This is
1576     /// the default because it allows much higher throughput as it avoids
1577     /// forcing a context switch on every `RwLock` unlock. This can result in one
1578     /// thread acquiring a `RwLock` many more times than other threads.
1579     ///
1580     /// However in some cases it can be beneficial to ensure fairness by forcing
1581     /// the lock to pass on to a waiting thread if there is one. This is done by
1582     /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
1583     #[inline]
unlock_fair(s: Self)1584     pub fn unlock_fair(s: Self) {
1585         // Safety: A MappedRwLockReadGuard always holds a shared lock.
1586         unsafe {
1587             s.raw.unlock_shared_fair();
1588         }
1589         mem::forget(s);
1590     }
1591 }
1592 
1593 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
1594     type Target = T;
1595     #[inline]
deref(&self) -> &T1596     fn deref(&self) -> &T {
1597         unsafe { &*self.data }
1598     }
1599 }
1600 
1601 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
1602     #[inline]
drop(&mut self)1603     fn drop(&mut self) {
1604         // Safety: A MappedRwLockReadGuard always holds a shared lock.
1605         unsafe {
1606             self.raw.unlock_shared();
1607         }
1608     }
1609 }
1610 
1611 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1612     for MappedRwLockReadGuard<'a, R, T>
1613 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1614     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1615         fmt::Debug::fmt(&**self, f)
1616     }
1617 }
1618 
1619 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1620     for MappedRwLockReadGuard<'a, R, T>
1621 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1622     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1623         (**self).fmt(f)
1624     }
1625 }
1626 
1627 #[cfg(feature = "owning_ref")]
1628 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1629     for MappedRwLockReadGuard<'a, R, T>
1630 {
1631 }
1632 
1633 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
1634 /// subfield of the protected data.
1635 ///
1636 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
1637 /// former doesn't support temporarily unlocking and re-locking, since that
1638 /// could introduce soundness issues if the locked object is modified by another
1639 /// thread.
1640 #[must_use = "if unused the RwLock will immediately unlock"]
1641 pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
1642     raw: &'a R,
1643     data: *mut T,
1644     marker: PhantomData<&'a mut T>,
1645 }
1646 
1647 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
1648     for MappedRwLockWriteGuard<'a, R, T>
1649 {
1650 }
1651 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
1652     R::GuardMarker: Send
1653 {
1654 }
1655 
1656 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1657     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1658     ///
1659     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1660     /// in already locked the data.
1661     ///
1662     /// This is an associated function that needs to be
1663     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1664     /// the same name on the contents of the locked data.
1665     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,1666     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
1667     where
1668         F: FnOnce(&mut T) -> &mut U,
1669     {
1670         let raw = s.raw;
1671         let data = f(unsafe { &mut *s.data });
1672         mem::forget(s);
1673         MappedRwLockWriteGuard {
1674             raw,
1675             data,
1676             marker: PhantomData,
1677         }
1678     }
1679 
1680     /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1681     /// locked data. The original guard is return if the closure returns `None`.
1682     ///
1683     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1684     /// in already locked the data.
1685     ///
1686     /// This is an associated function that needs to be
1687     /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1688     /// the same name on the contents of the locked data.
1689     #[inline]
try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,1690     pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
1691     where
1692         F: FnOnce(&mut T) -> Option<&mut U>,
1693     {
1694         let raw = s.raw;
1695         let data = match f(unsafe { &mut *s.data }) {
1696             Some(data) => data,
1697             None => return Err(s),
1698         };
1699         mem::forget(s);
1700         Ok(MappedRwLockWriteGuard {
1701             raw,
1702             data,
1703             marker: PhantomData,
1704         })
1705     }
1706 }
1707 
1708 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
1709     /// Unlocks the `RwLock` using a fair unlock protocol.
1710     ///
1711     /// By default, `RwLock` is unfair and allow the current thread to re-lock
1712     /// the `RwLock` before another has the chance to acquire the lock, even if
1713     /// that thread has been blocked on the `RwLock` for a long time. This is
1714     /// the default because it allows much higher throughput as it avoids
1715     /// forcing a context switch on every `RwLock` unlock. This can result in one
1716     /// thread acquiring a `RwLock` many more times than other threads.
1717     ///
1718     /// However in some cases it can be beneficial to ensure fairness by forcing
1719     /// the lock to pass on to a waiting thread if there is one. This is done by
1720     /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
1721     #[inline]
unlock_fair(s: Self)1722     pub fn unlock_fair(s: Self) {
1723         // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
1724         unsafe {
1725             s.raw.unlock_exclusive_fair();
1726         }
1727         mem::forget(s);
1728     }
1729 }
1730 
1731 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
1732     type Target = T;
1733     #[inline]
deref(&self) -> &T1734     fn deref(&self) -> &T {
1735         unsafe { &*self.data }
1736     }
1737 }
1738 
1739 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
1740     #[inline]
deref_mut(&mut self) -> &mut T1741     fn deref_mut(&mut self) -> &mut T {
1742         unsafe { &mut *self.data }
1743     }
1744 }
1745 
1746 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
1747     #[inline]
drop(&mut self)1748     fn drop(&mut self) {
1749         // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
1750         unsafe {
1751             self.raw.unlock_exclusive();
1752         }
1753     }
1754 }
1755 
1756 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1757     for MappedRwLockWriteGuard<'a, R, T>
1758 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1759     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1760         fmt::Debug::fmt(&**self, f)
1761     }
1762 }
1763 
1764 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1765     for MappedRwLockWriteGuard<'a, R, T>
1766 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1767     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1768         (**self).fmt(f)
1769     }
1770 }
1771 
1772 #[cfg(feature = "owning_ref")]
1773 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
1774     for MappedRwLockWriteGuard<'a, R, T>
1775 {
1776 }
1777