1 use std::cell::UnsafeCell; 2 use std::fmt; 3 use std::ops; 4 use std::panic; 5 6 /// `AtomicUsize` providing an additional `unsync_load` function. 7 pub(crate) struct AtomicUsize { 8 inner: UnsafeCell<std::sync::atomic::AtomicUsize>, 9 } 10 11 unsafe impl Send for AtomicUsize {} 12 unsafe impl Sync for AtomicUsize {} 13 impl panic::RefUnwindSafe for AtomicUsize {} 14 impl panic::UnwindSafe for AtomicUsize {} 15 16 impl AtomicUsize { new(val: usize) -> AtomicUsize17 pub(crate) const fn new(val: usize) -> AtomicUsize { 18 let inner = UnsafeCell::new(std::sync::atomic::AtomicUsize::new(val)); 19 AtomicUsize { inner } 20 } 21 22 /// Performs an unsynchronized load. 23 /// 24 /// # Safety 25 /// 26 /// All mutations must have happened before the unsynchronized load. 27 /// Additionally, there must be no concurrent mutations. unsync_load(&self) -> usize28 pub(crate) unsafe fn unsync_load(&self) -> usize { 29 // See <https://github.com/tokio-rs/tokio/issues/6155> 30 self.load(std::sync::atomic::Ordering::Relaxed) 31 } 32 with_mut<R>(&mut self, f: impl FnOnce(&mut usize) -> R) -> R33 pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut usize) -> R) -> R { 34 // safety: we have mutable access 35 f(unsafe { (*self.inner.get()).get_mut() }) 36 } 37 } 38 39 impl ops::Deref for AtomicUsize { 40 type Target = std::sync::atomic::AtomicUsize; 41 deref(&self) -> &Self::Target42 fn deref(&self) -> &Self::Target { 43 // safety: it is always safe to access `&self` fns on the inner value as 44 // we never perform unsafe mutations. 45 unsafe { &*self.inner.get() } 46 } 47 } 48 49 impl ops::DerefMut for AtomicUsize { deref_mut(&mut self) -> &mut Self::Target50 fn deref_mut(&mut self) -> &mut Self::Target { 51 // safety: we hold `&mut self` 52 unsafe { &mut *self.inner.get() } 53 } 54 } 55 56 impl fmt::Debug for AtomicUsize { fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result57 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { 58 (**self).fmt(fmt) 59 } 60 } 61