1 //! A spinning mutex with a fairer unlock algorithm.
2 //!
3 //! This mutex is similar to the `SpinMutex` in that it uses spinning to avoid
4 //! context switches. However, it uses a fairer unlock algorithm that avoids
5 //! starvation of threads that are waiting for the lock.
6 
7 use crate::{
8     atomic::{AtomicUsize, Ordering},
9     RelaxStrategy, Spin,
10 };
11 use core::{
12     cell::UnsafeCell,
13     fmt,
14     marker::PhantomData,
15     mem::ManuallyDrop,
16     ops::{Deref, DerefMut},
17 };
18 
19 // The lowest bit of `lock` is used to indicate whether the mutex is locked or not. The rest of the bits are used to
20 // store the number of starving threads.
21 const LOCKED: usize = 1;
22 const STARVED: usize = 2;
23 
24 /// Number chosen by fair roll of the dice, adjust as needed.
25 const STARVATION_SPINS: usize = 1024;
26 
27 /// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data, but with a fairer
28 /// algorithm.
29 ///
30 /// # Example
31 ///
32 /// ```
33 /// use spin;
34 ///
35 /// let lock = spin::mutex::FairMutex::<_>::new(0);
36 ///
37 /// // Modify the data
38 /// *lock.lock() = 2;
39 ///
40 /// // Read the data
41 /// let answer = *lock.lock();
42 /// assert_eq!(answer, 2);
43 /// ```
44 ///
45 /// # Thread safety example
46 ///
47 /// ```
48 /// use spin;
49 /// use std::sync::{Arc, Barrier};
50 ///
51 /// let thread_count = 1000;
52 /// let spin_mutex = Arc::new(spin::mutex::FairMutex::<_>::new(0));
53 ///
54 /// // We use a barrier to ensure the readout happens after all writing
55 /// let barrier = Arc::new(Barrier::new(thread_count + 1));
56 ///
57 /// for _ in (0..thread_count) {
58 ///     let my_barrier = barrier.clone();
59 ///     let my_lock = spin_mutex.clone();
60 ///     std::thread::spawn(move || {
61 ///         let mut guard = my_lock.lock();
62 ///         *guard += 1;
63 ///
64 ///         // Release the lock to prevent a deadlock
65 ///         drop(guard);
66 ///         my_barrier.wait();
67 ///     });
68 /// }
69 ///
70 /// barrier.wait();
71 ///
72 /// let answer = { *spin_mutex.lock() };
73 /// assert_eq!(answer, thread_count);
74 /// ```
75 pub struct FairMutex<T: ?Sized, R = Spin> {
76     phantom: PhantomData<R>,
77     pub(crate) lock: AtomicUsize,
78     data: UnsafeCell<T>,
79 }
80 
81 /// A guard that provides mutable data access.
82 ///
83 /// When the guard falls out of scope it will release the lock.
84 pub struct FairMutexGuard<'a, T: ?Sized + 'a> {
85     lock: &'a AtomicUsize,
86     data: *mut T,
87 }
88 
89 /// A handle that indicates that we have been trying to acquire the lock for a while.
90 ///
91 /// This handle is used to prevent starvation.
92 pub struct Starvation<'a, T: ?Sized + 'a, R> {
93     lock: &'a FairMutex<T, R>,
94 }
95 
96 /// Indicates whether a lock was rejected due to the lock being held by another thread or due to starvation.
97 #[derive(Debug)]
98 pub enum LockRejectReason {
99     /// The lock was rejected due to the lock being held by another thread.
100     Locked,
101 
102     /// The lock was rejected due to starvation.
103     Starved,
104 }
105 
106 // Same unsafe impls as `std::sync::Mutex`
107 unsafe impl<T: ?Sized + Send, R> Sync for FairMutex<T, R> {}
108 unsafe impl<T: ?Sized + Send, R> Send for FairMutex<T, R> {}
109 
110 unsafe impl<T: ?Sized + Sync> Sync for FairMutexGuard<'_, T> {}
111 unsafe impl<T: ?Sized + Send> Send for FairMutexGuard<'_, T> {}
112 
113 impl<T, R> FairMutex<T, R> {
114     /// Creates a new [`FairMutex`] wrapping the supplied data.
115     ///
116     /// # Example
117     ///
118     /// ```
119     /// use spin::mutex::FairMutex;
120     ///
121     /// static MUTEX: FairMutex<()> = FairMutex::<_>::new(());
122     ///
123     /// fn demo() {
124     ///     let lock = MUTEX.lock();
125     ///     // do something with lock
126     ///     drop(lock);
127     /// }
128     /// ```
129     #[inline(always)]
new(data: T) -> Self130     pub const fn new(data: T) -> Self {
131         FairMutex {
132             lock: AtomicUsize::new(0),
133             data: UnsafeCell::new(data),
134             phantom: PhantomData,
135         }
136     }
137 
138     /// Consumes this [`FairMutex`] and unwraps the underlying data.
139     ///
140     /// # Example
141     ///
142     /// ```
143     /// let lock = spin::mutex::FairMutex::<_>::new(42);
144     /// assert_eq!(42, lock.into_inner());
145     /// ```
146     #[inline(always)]
into_inner(self) -> T147     pub fn into_inner(self) -> T {
148         // We know statically that there are no outstanding references to
149         // `self` so there's no need to lock.
150         let FairMutex { data, .. } = self;
151         data.into_inner()
152     }
153 
154     /// Returns a mutable pointer to the underlying data.
155     ///
156     /// This is mostly meant to be used for applications which require manual unlocking, but where
157     /// storing both the lock and the pointer to the inner data gets inefficient.
158     ///
159     /// # Example
160     /// ```
161     /// let lock = spin::mutex::FairMutex::<_>::new(42);
162     ///
163     /// unsafe {
164     ///     core::mem::forget(lock.lock());
165     ///
166     ///     assert_eq!(lock.as_mut_ptr().read(), 42);
167     ///     lock.as_mut_ptr().write(58);
168     ///
169     ///     lock.force_unlock();
170     /// }
171     ///
172     /// assert_eq!(*lock.lock(), 58);
173     ///
174     /// ```
175     #[inline(always)]
as_mut_ptr(&self) -> *mut T176     pub fn as_mut_ptr(&self) -> *mut T {
177         self.data.get()
178     }
179 }
180 
181 impl<T: ?Sized, R: RelaxStrategy> FairMutex<T, R> {
182     /// Locks the [`FairMutex`] and returns a guard that permits access to the inner data.
183     ///
184     /// The returned value may be dereferenced for data access
185     /// and the lock will be dropped when the guard falls out of scope.
186     ///
187     /// ```
188     /// let lock = spin::mutex::FairMutex::<_>::new(0);
189     /// {
190     ///     let mut data = lock.lock();
191     ///     // The lock is now locked and the data can be accessed
192     ///     *data += 1;
193     ///     // The lock is implicitly dropped at the end of the scope
194     /// }
195     /// ```
196     #[inline(always)]
lock(&self) -> FairMutexGuard<T>197     pub fn lock(&self) -> FairMutexGuard<T> {
198         // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
199         // when called in a loop.
200         let mut spins = 0;
201         while self
202             .lock
203             .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
204             .is_err()
205         {
206             // Wait until the lock looks unlocked before retrying
207             while self.is_locked() {
208                 R::relax();
209 
210                 // If we've been spinning for a while, switch to a fairer strategy that will prevent
211                 // newer users from stealing our lock from us.
212                 if spins > STARVATION_SPINS {
213                     return self.starve().lock();
214                 }
215                 spins += 1;
216             }
217         }
218 
219         FairMutexGuard {
220             lock: &self.lock,
221             data: unsafe { &mut *self.data.get() },
222         }
223     }
224 }
225 
226 impl<T: ?Sized, R> FairMutex<T, R> {
227     /// Returns `true` if the lock is currently held.
228     ///
229     /// # Safety
230     ///
231     /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
232     /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
233     #[inline(always)]
is_locked(&self) -> bool234     pub fn is_locked(&self) -> bool {
235         self.lock.load(Ordering::Relaxed) & LOCKED != 0
236     }
237 
238     /// Force unlock this [`FairMutex`].
239     ///
240     /// # Safety
241     ///
242     /// This is *extremely* unsafe if the lock is not held by the current
243     /// thread. However, this can be useful in some instances for exposing the
244     /// lock to FFI that doesn't know how to deal with RAII.
245     #[inline(always)]
force_unlock(&self)246     pub unsafe fn force_unlock(&self) {
247         self.lock.fetch_and(!LOCKED, Ordering::Release);
248     }
249 
250     /// Try to lock this [`FairMutex`], returning a lock guard if successful.
251     ///
252     /// # Example
253     ///
254     /// ```
255     /// let lock = spin::mutex::FairMutex::<_>::new(42);
256     ///
257     /// let maybe_guard = lock.try_lock();
258     /// assert!(maybe_guard.is_some());
259     ///
260     /// // `maybe_guard` is still held, so the second call fails
261     /// let maybe_guard2 = lock.try_lock();
262     /// assert!(maybe_guard2.is_none());
263     /// ```
264     #[inline(always)]
try_lock(&self) -> Option<FairMutexGuard<T>>265     pub fn try_lock(&self) -> Option<FairMutexGuard<T>> {
266         self.try_lock_starver().ok()
267     }
268 
269     /// Tries to lock this [`FairMutex`] and returns a result that indicates whether the lock was
270     /// rejected due to a starver or not.
271     #[inline(always)]
try_lock_starver(&self) -> Result<FairMutexGuard<T>, LockRejectReason>272     pub fn try_lock_starver(&self) -> Result<FairMutexGuard<T>, LockRejectReason> {
273         match self
274             .lock
275             .compare_exchange(0, LOCKED, Ordering::Acquire, Ordering::Relaxed)
276             .unwrap_or_else(|x| x)
277         {
278             0 => Ok(FairMutexGuard {
279                 lock: &self.lock,
280                 data: unsafe { &mut *self.data.get() },
281             }),
282             LOCKED => Err(LockRejectReason::Locked),
283             _ => Err(LockRejectReason::Starved),
284         }
285     }
286 
287     /// Indicates that the current user has been waiting for the lock for a while
288     /// and that the lock should yield to this thread over a newly arriving thread.
289     ///
290     /// # Example
291     ///
292     /// ```
293     /// let lock = spin::mutex::FairMutex::<_>::new(42);
294     ///
295     /// // Lock the mutex to simulate it being used by another user.
296     /// let guard1 = lock.lock();
297     ///
298     /// // Try to lock the mutex.
299     /// let guard2 = lock.try_lock();
300     /// assert!(guard2.is_none());
301     ///
302     /// // Wait for a while.
303     /// wait_for_a_while();
304     ///
305     /// // We are now starved, indicate as such.
306     /// let starve = lock.starve();
307     ///
308     /// // Once the lock is released, another user trying to lock it will
309     /// // fail.
310     /// drop(guard1);
311     /// let guard3 = lock.try_lock();
312     /// assert!(guard3.is_none());
313     ///
314     /// // However, we will be able to lock it.
315     /// let guard4 = starve.try_lock();
316     /// assert!(guard4.is_ok());
317     ///
318     /// # fn wait_for_a_while() {}
319     /// ```
starve(&self) -> Starvation<'_, T, R>320     pub fn starve(&self) -> Starvation<'_, T, R> {
321         // Add a new starver to the state.
322         if self.lock.fetch_add(STARVED, Ordering::Relaxed) > (core::isize::MAX - 1) as usize {
323             // In the event of a potential lock overflow, abort.
324             crate::abort();
325         }
326 
327         Starvation { lock: self }
328     }
329 
330     /// Returns a mutable reference to the underlying data.
331     ///
332     /// Since this call borrows the [`FairMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
333     /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
334     /// such, this is a 'zero-cost' operation.
335     ///
336     /// # Example
337     ///
338     /// ```
339     /// let mut lock = spin::mutex::FairMutex::<_>::new(0);
340     /// *lock.get_mut() = 10;
341     /// assert_eq!(*lock.lock(), 10);
342     /// ```
343     #[inline(always)]
get_mut(&mut self) -> &mut T344     pub fn get_mut(&mut self) -> &mut T {
345         // We know statically that there are no other references to `self`, so
346         // there's no need to lock the inner mutex.
347         unsafe { &mut *self.data.get() }
348     }
349 }
350 
351 impl<T: ?Sized + fmt::Debug, R> fmt::Debug for FairMutex<T, R> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result352     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
353         struct LockWrapper<'a, T: ?Sized + fmt::Debug>(Option<FairMutexGuard<'a, T>>);
354 
355         impl<T: ?Sized + fmt::Debug> fmt::Debug for LockWrapper<'_, T> {
356             fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
357                 match &self.0 {
358                     Some(guard) => fmt::Debug::fmt(guard, f),
359                     None => f.write_str("<locked>"),
360                 }
361             }
362         }
363 
364         f.debug_struct("FairMutex")
365             .field("data", &LockWrapper(self.try_lock()))
366             .finish()
367     }
368 }
369 
370 impl<T: ?Sized + Default, R> Default for FairMutex<T, R> {
default() -> Self371     fn default() -> Self {
372         Self::new(Default::default())
373     }
374 }
375 
376 impl<T, R> From<T> for FairMutex<T, R> {
from(data: T) -> Self377     fn from(data: T) -> Self {
378         Self::new(data)
379     }
380 }
381 
382 impl<'a, T: ?Sized> FairMutexGuard<'a, T> {
383     /// Leak the lock guard, yielding a mutable reference to the underlying data.
384     ///
385     /// Note that this function will permanently lock the original [`FairMutex`].
386     ///
387     /// ```
388     /// let mylock = spin::mutex::FairMutex::<_>::new(0);
389     ///
390     /// let data: &mut i32 = spin::mutex::FairMutexGuard::leak(mylock.lock());
391     ///
392     /// *data = 1;
393     /// assert_eq!(*data, 1);
394     /// ```
395     #[inline(always)]
leak(this: Self) -> &'a mut T396     pub fn leak(this: Self) -> &'a mut T {
397         // Use ManuallyDrop to avoid stacked-borrow invalidation
398         let mut this = ManuallyDrop::new(this);
399         // We know statically that only we are referencing data
400         unsafe { &mut *this.data }
401     }
402 }
403 
404 impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for FairMutexGuard<'a, T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result405     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
406         fmt::Debug::fmt(&**self, f)
407     }
408 }
409 
410 impl<'a, T: ?Sized + fmt::Display> fmt::Display for FairMutexGuard<'a, T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result411     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
412         fmt::Display::fmt(&**self, f)
413     }
414 }
415 
416 impl<'a, T: ?Sized> Deref for FairMutexGuard<'a, T> {
417     type Target = T;
deref(&self) -> &T418     fn deref(&self) -> &T {
419         // We know statically that only we are referencing data
420         unsafe { &*self.data }
421     }
422 }
423 
424 impl<'a, T: ?Sized> DerefMut for FairMutexGuard<'a, T> {
deref_mut(&mut self) -> &mut T425     fn deref_mut(&mut self) -> &mut T {
426         // We know statically that only we are referencing data
427         unsafe { &mut *self.data }
428     }
429 }
430 
431 impl<'a, T: ?Sized> Drop for FairMutexGuard<'a, T> {
432     /// The dropping of the MutexGuard will release the lock it was created from.
drop(&mut self)433     fn drop(&mut self) {
434         self.lock.fetch_and(!LOCKED, Ordering::Release);
435     }
436 }
437 
438 impl<'a, T: ?Sized, R> Starvation<'a, T, R> {
439     /// Attempts the lock the mutex if we are the only starving user.
440     ///
441     /// This allows another user to lock the mutex if they are starving as well.
try_lock_fair(self) -> Result<FairMutexGuard<'a, T>, Self>442     pub fn try_lock_fair(self) -> Result<FairMutexGuard<'a, T>, Self> {
443         // Try to lock the mutex.
444         if self
445             .lock
446             .lock
447             .compare_exchange(
448                 STARVED,
449                 STARVED | LOCKED,
450                 Ordering::Acquire,
451                 Ordering::Relaxed,
452             )
453             .is_ok()
454         {
455             // We are the only starving user, lock the mutex.
456             Ok(FairMutexGuard {
457                 lock: &self.lock.lock,
458                 data: self.lock.data.get(),
459             })
460         } else {
461             // Another user is starving, fail.
462             Err(self)
463         }
464     }
465 
466     /// Attempts to lock the mutex.
467     ///
468     /// If the lock is currently held by another thread, this will return `None`.
469     ///
470     /// # Example
471     ///
472     /// ```
473     /// let lock = spin::mutex::FairMutex::<_>::new(42);
474     ///
475     /// // Lock the mutex to simulate it being used by another user.
476     /// let guard1 = lock.lock();
477     ///
478     /// // Try to lock the mutex.
479     /// let guard2 = lock.try_lock();
480     /// assert!(guard2.is_none());
481     ///
482     /// // Wait for a while.
483     /// wait_for_a_while();
484     ///
485     /// // We are now starved, indicate as such.
486     /// let starve = lock.starve();
487     ///
488     /// // Once the lock is released, another user trying to lock it will
489     /// // fail.
490     /// drop(guard1);
491     /// let guard3 = lock.try_lock();
492     /// assert!(guard3.is_none());
493     ///
494     /// // However, we will be able to lock it.
495     /// let guard4 = starve.try_lock();
496     /// assert!(guard4.is_ok());
497     ///
498     /// # fn wait_for_a_while() {}
499     /// ```
try_lock(self) -> Result<FairMutexGuard<'a, T>, Self>500     pub fn try_lock(self) -> Result<FairMutexGuard<'a, T>, Self> {
501         // Try to lock the mutex.
502         if self.lock.lock.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 {
503             // We have successfully locked the mutex.
504             // By dropping `self` here, we decrement the starvation count.
505             Ok(FairMutexGuard {
506                 lock: &self.lock.lock,
507                 data: self.lock.data.get(),
508             })
509         } else {
510             Err(self)
511         }
512     }
513 }
514 
515 impl<'a, T: ?Sized, R: RelaxStrategy> Starvation<'a, T, R> {
516     /// Locks the mutex.
lock(mut self) -> FairMutexGuard<'a, T>517     pub fn lock(mut self) -> FairMutexGuard<'a, T> {
518         // Try to lock the mutex.
519         loop {
520             match self.try_lock() {
521                 Ok(lock) => return lock,
522                 Err(starve) => self = starve,
523             }
524 
525             // Relax until the lock is released.
526             while self.lock.is_locked() {
527                 R::relax();
528             }
529         }
530     }
531 }
532 
533 impl<'a, T: ?Sized, R> Drop for Starvation<'a, T, R> {
drop(&mut self)534     fn drop(&mut self) {
535         // As there is no longer a user being starved, we decrement the starver count.
536         self.lock.lock.fetch_sub(STARVED, Ordering::Release);
537     }
538 }
539 
540 impl fmt::Display for LockRejectReason {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result541     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
542         match self {
543             LockRejectReason::Locked => write!(f, "locked"),
544             LockRejectReason::Starved => write!(f, "starved"),
545         }
546     }
547 }
548 
549 #[cfg(feature = "std")]
550 impl std::error::Error for LockRejectReason {}
551 
552 #[cfg(feature = "lock_api")]
553 unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for FairMutex<(), R> {
554     type GuardMarker = lock_api_crate::GuardSend;
555 
556     const INIT: Self = Self::new(());
557 
lock(&self)558     fn lock(&self) {
559         // Prevent guard destructor running
560         core::mem::forget(Self::lock(self));
561     }
562 
try_lock(&self) -> bool563     fn try_lock(&self) -> bool {
564         // Prevent guard destructor running
565         Self::try_lock(self).map(core::mem::forget).is_some()
566     }
567 
unlock(&self)568     unsafe fn unlock(&self) {
569         self.force_unlock();
570     }
571 
is_locked(&self) -> bool572     fn is_locked(&self) -> bool {
573         Self::is_locked(self)
574     }
575 }
576 
577 #[cfg(test)]
578 mod tests {
579     use std::prelude::v1::*;
580 
581     use std::sync::atomic::{AtomicUsize, Ordering};
582     use std::sync::mpsc::channel;
583     use std::sync::Arc;
584     use std::thread;
585 
586     type FairMutex<T> = super::FairMutex<T>;
587 
588     #[derive(Eq, PartialEq, Debug)]
589     struct NonCopy(i32);
590 
591     #[test]
smoke()592     fn smoke() {
593         let m = FairMutex::<_>::new(());
594         drop(m.lock());
595         drop(m.lock());
596     }
597 
598     #[test]
lots_and_lots()599     fn lots_and_lots() {
600         static M: FairMutex<()> = FairMutex::<_>::new(());
601         static mut CNT: u32 = 0;
602         const J: u32 = 1000;
603         const K: u32 = 3;
604 
605         fn inc() {
606             for _ in 0..J {
607                 unsafe {
608                     let _g = M.lock();
609                     CNT += 1;
610                 }
611             }
612         }
613 
614         let (tx, rx) = channel();
615         for _ in 0..K {
616             let tx2 = tx.clone();
617             thread::spawn(move || {
618                 inc();
619                 tx2.send(()).unwrap();
620             });
621             let tx2 = tx.clone();
622             thread::spawn(move || {
623                 inc();
624                 tx2.send(()).unwrap();
625             });
626         }
627 
628         drop(tx);
629         for _ in 0..2 * K {
630             rx.recv().unwrap();
631         }
632         assert_eq!(unsafe { CNT }, J * K * 2);
633     }
634 
635     #[test]
try_lock()636     fn try_lock() {
637         let mutex = FairMutex::<_>::new(42);
638 
639         // First lock succeeds
640         let a = mutex.try_lock();
641         assert_eq!(a.as_ref().map(|r| **r), Some(42));
642 
643         // Additional lock fails
644         let b = mutex.try_lock();
645         assert!(b.is_none());
646 
647         // After dropping lock, it succeeds again
648         ::core::mem::drop(a);
649         let c = mutex.try_lock();
650         assert_eq!(c.as_ref().map(|r| **r), Some(42));
651     }
652 
653     #[test]
test_into_inner()654     fn test_into_inner() {
655         let m = FairMutex::<_>::new(NonCopy(10));
656         assert_eq!(m.into_inner(), NonCopy(10));
657     }
658 
659     #[test]
test_into_inner_drop()660     fn test_into_inner_drop() {
661         struct Foo(Arc<AtomicUsize>);
662         impl Drop for Foo {
663             fn drop(&mut self) {
664                 self.0.fetch_add(1, Ordering::SeqCst);
665             }
666         }
667         let num_drops = Arc::new(AtomicUsize::new(0));
668         let m = FairMutex::<_>::new(Foo(num_drops.clone()));
669         assert_eq!(num_drops.load(Ordering::SeqCst), 0);
670         {
671             let _inner = m.into_inner();
672             assert_eq!(num_drops.load(Ordering::SeqCst), 0);
673         }
674         assert_eq!(num_drops.load(Ordering::SeqCst), 1);
675     }
676 
677     #[test]
test_mutex_arc_nested()678     fn test_mutex_arc_nested() {
679         // Tests nested mutexes and access
680         // to underlying data.
681         let arc = Arc::new(FairMutex::<_>::new(1));
682         let arc2 = Arc::new(FairMutex::<_>::new(arc));
683         let (tx, rx) = channel();
684         let _t = thread::spawn(move || {
685             let lock = arc2.lock();
686             let lock2 = lock.lock();
687             assert_eq!(*lock2, 1);
688             tx.send(()).unwrap();
689         });
690         rx.recv().unwrap();
691     }
692 
693     #[test]
test_mutex_arc_access_in_unwind()694     fn test_mutex_arc_access_in_unwind() {
695         let arc = Arc::new(FairMutex::<_>::new(1));
696         let arc2 = arc.clone();
697         let _ = thread::spawn(move || -> () {
698             struct Unwinder {
699                 i: Arc<FairMutex<i32>>,
700             }
701             impl Drop for Unwinder {
702                 fn drop(&mut self) {
703                     *self.i.lock() += 1;
704                 }
705             }
706             let _u = Unwinder { i: arc2 };
707             panic!();
708         })
709         .join();
710         let lock = arc.lock();
711         assert_eq!(*lock, 2);
712     }
713 
714     #[test]
test_mutex_unsized()715     fn test_mutex_unsized() {
716         let mutex: &FairMutex<[i32]> = &FairMutex::<_>::new([1, 2, 3]);
717         {
718             let b = &mut *mutex.lock();
719             b[0] = 4;
720             b[2] = 5;
721         }
722         let comp: &[i32] = &[4, 2, 5];
723         assert_eq!(&*mutex.lock(), comp);
724     }
725 
726     #[test]
test_mutex_force_lock()727     fn test_mutex_force_lock() {
728         let lock = FairMutex::<_>::new(());
729         ::std::mem::forget(lock.lock());
730         unsafe {
731             lock.force_unlock();
732         }
733         assert!(lock.try_lock().is_some());
734     }
735 }
736