1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 //! Generic `Atomic<T>` wrapper type
9 //!
10 //! Atomic types provide primitive shared-memory communication between
11 //! threads, and are the building blocks of other concurrent types.
12 //!
13 //! This library defines a generic atomic wrapper type `Atomic<T>` for all
14 //! `T: NoUninit` types.
15 //! Atomic types present operations that, when used correctly, synchronize
16 //! updates between threads.
17 //!
18 //! The `NoUninit` bound is from the [bytemuck] crate, and indicates that a
19 //! type has no internal padding bytes. You will need to derive or implement
20 //! this trait for all types used with `Atomic<T>`.
21 //!
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
25 //!
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
27 //!
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
32 //!
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the `const fn` constructors. Atomic statics are often used for lazy global
35 //! initialization.
36 //!
37 //! [bytemuck]: https://docs.rs/bytemuck
38 
39 #![warn(missing_docs)]
40 #![warn(rust_2018_idioms)]
41 #![no_std]
42 #![cfg_attr(feature = "nightly", feature(integer_atomics))]
43 
44 #[cfg(any(test, feature = "std"))]
45 #[macro_use]
46 extern crate std;
47 
48 use core::mem::MaybeUninit;
49 // Re-export some useful definitions from libcore
50 pub use core::sync::atomic::{fence, Ordering};
51 
52 use core::cell::UnsafeCell;
53 use core::fmt;
54 
55 #[cfg(feature = "std")]
56 use std::panic::RefUnwindSafe;
57 
58 use bytemuck::NoUninit;
59 
60 #[cfg(feature = "fallback")]
61 mod fallback;
62 mod ops;
63 
64 /// A generic atomic wrapper type which allows an object to be safely shared
65 /// between threads.
66 #[repr(transparent)]
67 pub struct Atomic<T> {
68     // The MaybeUninit is here to work around rust-lang/rust#87341.
69     v: UnsafeCell<MaybeUninit<T>>,
70 }
71 
72 // Atomic<T> is only Sync if T is Send
73 unsafe impl<T: Copy + Send> Sync for Atomic<T> {}
74 
75 // Given that atomicity is guaranteed, Atomic<T> is RefUnwindSafe if T is
76 //
77 // This is trivially correct for native lock-free atomic types. For those whose
78 // atomicity is emulated using a spinlock, it is still correct because the
79 // `Atomic` API does not allow doing any panic-inducing operation after writing
80 // to the target object.
81 #[cfg(feature = "std")]
82 impl<T: RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
83 
84 impl<T: Default> Default for Atomic<T> {
85     #[inline]
default() -> Self86     fn default() -> Self {
87         Self::new(Default::default())
88     }
89 }
90 
91 impl<T: NoUninit + fmt::Debug> fmt::Debug for Atomic<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result92     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
93         f.debug_tuple("Atomic")
94             .field(&self.load(Ordering::SeqCst))
95             .finish()
96     }
97 }
98 
99 impl<T> Atomic<T> {
100     /// Creates a new `Atomic`.
101     #[inline]
new(v: T) -> Atomic<T>102     pub const fn new(v: T) -> Atomic<T> {
103         Atomic {
104             v: UnsafeCell::new(MaybeUninit::new(v)),
105         }
106     }
107 
108     /// Checks if `Atomic` objects of this type are lock-free.
109     ///
110     /// If an `Atomic` is not lock-free then it may be implemented using locks
111     /// internally, which makes it unsuitable for some situations (such as
112     /// communicating with a signal handler).
113     #[inline]
is_lock_free() -> bool114     pub const fn is_lock_free() -> bool {
115         ops::atomic_is_lock_free::<T>()
116     }
117 }
118 
119 impl<T: NoUninit> Atomic<T> {
120     #[inline]
inner_ptr(&self) -> *mut T121     fn inner_ptr(&self) -> *mut T {
122         self.v.get() as *mut T
123     }
124 
125     /// Returns a mutable reference to the underlying type.
126     ///
127     /// This is safe because the mutable reference guarantees that no other threads are
128     /// concurrently accessing the atomic data.
129     #[inline]
get_mut(&mut self) -> &mut T130     pub fn get_mut(&mut self) -> &mut T {
131         unsafe { &mut *self.inner_ptr() }
132     }
133 
134     /// Consumes the atomic and returns the contained value.
135     ///
136     /// This is safe because passing `self` by value guarantees that no other threads are
137     /// concurrently accessing the atomic data.
138     #[inline]
into_inner(self) -> T139     pub fn into_inner(self) -> T {
140         unsafe { self.v.into_inner().assume_init() }
141     }
142 
143     /// Loads a value from the `Atomic`.
144     ///
145     /// `load` takes an `Ordering` argument which describes the memory ordering
146     /// of this operation.
147     ///
148     /// # Panics
149     ///
150     /// Panics if `order` is `Release` or `AcqRel`.
151     #[inline]
load(&self, order: Ordering) -> T152     pub fn load(&self, order: Ordering) -> T {
153         unsafe { ops::atomic_load(self.inner_ptr(), order) }
154     }
155 
156     /// Stores a value into the `Atomic`.
157     ///
158     /// `store` takes an `Ordering` argument which describes the memory ordering
159     /// of this operation.
160     ///
161     /// # Panics
162     ///
163     /// Panics if `order` is `Acquire` or `AcqRel`.
164     #[inline]
store(&self, val: T, order: Ordering)165     pub fn store(&self, val: T, order: Ordering) {
166         unsafe {
167             ops::atomic_store(self.inner_ptr(), val, order);
168         }
169     }
170 
171     /// Stores a value into the `Atomic`, returning the old value.
172     ///
173     /// `swap` takes an `Ordering` argument which describes the memory ordering
174     /// of this operation.
175     #[inline]
swap(&self, val: T, order: Ordering) -> T176     pub fn swap(&self, val: T, order: Ordering) -> T {
177         unsafe { ops::atomic_swap(self.inner_ptr(), val, order) }
178     }
179 
180     /// Stores a value into the `Atomic` if the current value is the same as the
181     /// `current` value.
182     ///
183     /// The return value is a result indicating whether the new value was
184     /// written and containing the previous value. On success this value is
185     /// guaranteed to be equal to `new`.
186     ///
187     /// `compare_exchange` takes two `Ordering` arguments to describe the memory
188     /// ordering of this operation. The first describes the required ordering if
189     /// the operation succeeds while the second describes the required ordering
190     /// when the operation fails. The failure ordering can't be `Release` or
191     /// `AcqRel` and must be equivalent or weaker than the success ordering.
192     #[inline]
compare_exchange( &self, current: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T>193     pub fn compare_exchange(
194         &self,
195         current: T,
196         new: T,
197         success: Ordering,
198         failure: Ordering,
199     ) -> Result<T, T> {
200         unsafe { ops::atomic_compare_exchange(self.inner_ptr(), current, new, success, failure) }
201     }
202 
203     /// Stores a value into the `Atomic` if the current value is the same as the
204     /// `current` value.
205     ///
206     /// Unlike `compare_exchange`, this function is allowed to spuriously fail
207     /// even when the comparison succeeds, which can result in more efficient
208     /// code on some platforms. The return value is a result indicating whether
209     /// the new value was written and containing the previous value.
210     ///
211     /// `compare_exchange` takes two `Ordering` arguments to describe the memory
212     /// ordering of this operation. The first describes the required ordering if
213     /// the operation succeeds while the second describes the required ordering
214     /// when the operation fails. The failure ordering can't be `Release` or
215     /// `AcqRel` and must be equivalent or weaker than the success ordering.
216     /// success ordering.
217     #[inline]
compare_exchange_weak( &self, current: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T>218     pub fn compare_exchange_weak(
219         &self,
220         current: T,
221         new: T,
222         success: Ordering,
223         failure: Ordering,
224     ) -> Result<T, T> {
225         unsafe {
226             ops::atomic_compare_exchange_weak(self.inner_ptr(), current, new, success, failure)
227         }
228     }
229 
230     /// Fetches the value, and applies a function to it that returns an optional
231     /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
232     /// `Err(previous_value)`.
233     ///
234     /// Note: This may call the function multiple times if the value has been changed from other threads in
235     /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
236     /// only once to the stored value.
237     ///
238     /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
239     /// The first describes the required ordering for when the operation finally succeeds while the second
240     /// describes the required ordering for loads. These correspond to the success and failure orderings of
241     /// [`compare_exchange`] respectively.
242     ///
243     /// Using [`Acquire`] as success ordering makes the store part
244     /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
245     /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
246     /// and must be equivalent to or weaker than the success ordering.
247     ///
248     /// [`compare_exchange`]: #method.compare_exchange
249     /// [`Ordering`]: enum.Ordering.html
250     /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
251     /// [`Release`]: enum.Ordering.html#variant.Release
252     /// [`Acquire`]: enum.Ordering.html#variant.Acquire
253     /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
254     ///
255     /// # Examples
256     ///
257     /// ```rust
258     /// use atomic::{Atomic, Ordering};
259     ///
260     /// let x = Atomic::new(7);
261     /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
262     /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
263     /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
264     /// assert_eq!(x.load(Ordering::SeqCst), 9);
265     /// ```
266     #[inline]
fetch_update<F>( &self, set_order: Ordering, fetch_order: Ordering, mut f: F, ) -> Result<T, T> where F: FnMut(T) -> Option<T>,267     pub fn fetch_update<F>(
268         &self,
269         set_order: Ordering,
270         fetch_order: Ordering,
271         mut f: F,
272     ) -> Result<T, T>
273     where
274         F: FnMut(T) -> Option<T>,
275     {
276         let mut prev = self.load(fetch_order);
277         while let Some(next) = f(prev) {
278             match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
279                 x @ Ok(_) => return x,
280                 Err(next_prev) => prev = next_prev,
281             }
282         }
283         Err(prev)
284     }
285 }
286 
287 impl Atomic<bool> {
288     /// Logical "and" with a boolean value.
289     ///
290     /// Performs a logical "and" operation on the current value and the argument
291     /// `val`, and sets the new value to the result.
292     ///
293     /// Returns the previous value.
294     #[inline]
fetch_and(&self, val: bool, order: Ordering) -> bool295     pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
296         unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
297     }
298 
299     /// Logical "or" with a boolean value.
300     ///
301     /// Performs a logical "or" operation on the current value and the argument
302     /// `val`, and sets the new value to the result.
303     ///
304     /// Returns the previous value.
305     #[inline]
fetch_or(&self, val: bool, order: Ordering) -> bool306     pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
307         unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
308     }
309 
310     /// Logical "xor" with a boolean value.
311     ///
312     /// Performs a logical "xor" operation on the current value and the argument
313     /// `val`, and sets the new value to the result.
314     ///
315     /// Returns the previous value.
316     #[inline]
fetch_xor(&self, val: bool, order: Ordering) -> bool317     pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
318         unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
319     }
320 }
321 
322 macro_rules! atomic_ops_common {
323     ($($t:ty)*) => ($(
324         impl Atomic<$t> {
325             /// Add to the current value, returning the previous value.
326             #[inline]
327             pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {
328                 unsafe { ops::atomic_add(self.inner_ptr(), val, order) }
329             }
330 
331             /// Subtract from the current value, returning the previous value.
332             #[inline]
333             pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {
334                 unsafe { ops::atomic_sub(self.inner_ptr(), val, order) }
335             }
336 
337             /// Bitwise and with the current value, returning the previous value.
338             #[inline]
339             pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {
340                 unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
341             }
342 
343             /// Bitwise or with the current value, returning the previous value.
344             #[inline]
345             pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {
346                 unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
347             }
348 
349             /// Bitwise xor with the current value, returning the previous value.
350             #[inline]
351             pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {
352                 unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
353             }
354         }
355     )*);
356 }
357 macro_rules! atomic_ops_signed {
358     ($($t:ty)*) => (
359         atomic_ops_common!{ $($t)* }
360         $(
361             impl Atomic<$t> {
362                 /// Minimum with the current value.
363                 #[inline]
364                 pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
365                     unsafe { ops::atomic_min(self.inner_ptr(), val, order) }
366                 }
367 
368                 /// Maximum with the current value.
369                 #[inline]
370                 pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
371                     unsafe { ops::atomic_max(self.inner_ptr(), val, order) }
372                 }
373             }
374         )*
375     );
376 }
377 macro_rules! atomic_ops_unsigned {
378     ($($t:ty)*) => (
379         atomic_ops_common!{ $($t)* }
380         $(
381             impl Atomic<$t> {
382                 /// Minimum with the current value.
383                 #[inline]
384                 pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
385                     unsafe { ops::atomic_umin(self.inner_ptr(), val, order) }
386                 }
387 
388                 /// Maximum with the current value.
389                 #[inline]
390                 pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
391                     unsafe { ops::atomic_umax(self.inner_ptr(), val, order) }
392                 }
393             }
394         )*
395     );
396 }
397 atomic_ops_signed! { i8 i16 i32 i64 isize i128 }
398 atomic_ops_unsigned! { u8 u16 u32 u64 usize u128 }
399 
400 #[cfg(test)]
401 mod tests {
402     use super::{Atomic, Ordering::*};
403     use bytemuck::NoUninit;
404     use core::mem;
405 
406     #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
407     #[repr(C)]
408     struct Foo(u8, u8);
409     #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
410     #[repr(C)]
411     struct Bar(u64, u64);
412     #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
413     #[repr(C)]
414     struct Quux(u32);
415 
416     #[test]
atomic_bool()417     fn atomic_bool() {
418         let a = Atomic::new(false);
419         assert_eq!(
420             Atomic::<bool>::is_lock_free(),
421             cfg!(target_has_atomic = "8"),
422         );
423         assert_eq!(format!("{:?}", a), "Atomic(false)");
424         assert_eq!(a.load(SeqCst), false);
425         a.store(true, SeqCst);
426         assert_eq!(a.swap(false, SeqCst), true);
427         assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));
428         assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
429         assert_eq!(a.fetch_and(false, SeqCst), true);
430         assert_eq!(a.fetch_or(true, SeqCst), false);
431         assert_eq!(a.fetch_xor(false, SeqCst), true);
432         assert_eq!(a.load(SeqCst), true);
433     }
434 
435     #[test]
atomic_i8()436     fn atomic_i8() {
437         let a = Atomic::new(0i8);
438         assert_eq!(Atomic::<i8>::is_lock_free(), cfg!(target_has_atomic = "8"));
439         assert_eq!(format!("{:?}", a), "Atomic(0)");
440         assert_eq!(a.load(SeqCst), 0);
441         a.store(1, SeqCst);
442         assert_eq!(a.swap(2, SeqCst), 1);
443         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
444         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
445         assert_eq!(a.fetch_add(123, SeqCst), 3);
446         // Make sure overflows are handled correctly
447         assert_eq!(a.fetch_sub(-56, SeqCst), 126);
448         assert_eq!(a.fetch_and(7, SeqCst), -74);
449         assert_eq!(a.fetch_or(64, SeqCst), 6);
450         assert_eq!(a.fetch_xor(1, SeqCst), 70);
451         assert_eq!(a.fetch_min(30, SeqCst), 71);
452         assert_eq!(a.fetch_max(-25, SeqCst), 30);
453         assert_eq!(a.load(SeqCst), 30);
454     }
455 
456     #[test]
atomic_i16()457     fn atomic_i16() {
458         let a = Atomic::new(0i16);
459         assert_eq!(
460             Atomic::<i16>::is_lock_free(),
461             cfg!(target_has_atomic = "16")
462         );
463         assert_eq!(format!("{:?}", a), "Atomic(0)");
464         assert_eq!(a.load(SeqCst), 0);
465         a.store(1, SeqCst);
466         assert_eq!(a.swap(2, SeqCst), 1);
467         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
468         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
469         assert_eq!(a.fetch_add(123, SeqCst), 3);
470         assert_eq!(a.fetch_sub(-56, SeqCst), 126);
471         assert_eq!(a.fetch_and(7, SeqCst), 182);
472         assert_eq!(a.fetch_or(64, SeqCst), 6);
473         assert_eq!(a.fetch_xor(1, SeqCst), 70);
474         assert_eq!(a.fetch_min(30, SeqCst), 71);
475         assert_eq!(a.fetch_max(-25, SeqCst), 30);
476         assert_eq!(a.load(SeqCst), 30);
477     }
478 
479     #[test]
atomic_i32()480     fn atomic_i32() {
481         let a = Atomic::new(0i32);
482         assert_eq!(
483             Atomic::<i32>::is_lock_free(),
484             cfg!(target_has_atomic = "32")
485         );
486         assert_eq!(format!("{:?}", a), "Atomic(0)");
487         assert_eq!(a.load(SeqCst), 0);
488         a.store(1, SeqCst);
489         assert_eq!(a.swap(2, SeqCst), 1);
490         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
491         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
492         assert_eq!(a.fetch_add(123, SeqCst), 3);
493         assert_eq!(a.fetch_sub(-56, SeqCst), 126);
494         assert_eq!(a.fetch_and(7, SeqCst), 182);
495         assert_eq!(a.fetch_or(64, SeqCst), 6);
496         assert_eq!(a.fetch_xor(1, SeqCst), 70);
497         assert_eq!(a.fetch_min(30, SeqCst), 71);
498         assert_eq!(a.fetch_max(-25, SeqCst), 30);
499         assert_eq!(a.load(SeqCst), 30);
500     }
501 
502     #[test]
atomic_i64()503     fn atomic_i64() {
504         let a = Atomic::new(0i64);
505         assert_eq!(
506             Atomic::<i64>::is_lock_free(),
507             cfg!(target_has_atomic = "64") && mem::align_of::<i64>() == 8
508         );
509         assert_eq!(format!("{:?}", a), "Atomic(0)");
510         assert_eq!(a.load(SeqCst), 0);
511         a.store(1, SeqCst);
512         assert_eq!(a.swap(2, SeqCst), 1);
513         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
514         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
515         assert_eq!(a.fetch_add(123, SeqCst), 3);
516         assert_eq!(a.fetch_sub(-56, SeqCst), 126);
517         assert_eq!(a.fetch_and(7, SeqCst), 182);
518         assert_eq!(a.fetch_or(64, SeqCst), 6);
519         assert_eq!(a.fetch_xor(1, SeqCst), 70);
520         assert_eq!(a.fetch_min(30, SeqCst), 71);
521         assert_eq!(a.fetch_max(-25, SeqCst), 30);
522         assert_eq!(a.load(SeqCst), 30);
523     }
524 
525     #[test]
atomic_i128()526     fn atomic_i128() {
527         let a = Atomic::new(0i128);
528         assert_eq!(
529             Atomic::<i128>::is_lock_free(),
530             cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
531         );
532         assert_eq!(format!("{:?}", a), "Atomic(0)");
533         assert_eq!(a.load(SeqCst), 0);
534         a.store(1, SeqCst);
535         assert_eq!(a.swap(2, SeqCst), 1);
536         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
537         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
538         assert_eq!(a.fetch_add(123, SeqCst), 3);
539         assert_eq!(a.fetch_sub(-56, SeqCst), 126);
540         assert_eq!(a.fetch_and(7, SeqCst), 182);
541         assert_eq!(a.fetch_or(64, SeqCst), 6);
542         assert_eq!(a.fetch_xor(1, SeqCst), 70);
543         assert_eq!(a.fetch_min(30, SeqCst), 71);
544         assert_eq!(a.fetch_max(-25, SeqCst), 30);
545         assert_eq!(a.load(SeqCst), 30);
546     }
547 
548     #[test]
atomic_isize()549     fn atomic_isize() {
550         let a = Atomic::new(0isize);
551         assert_eq!(format!("{:?}", a), "Atomic(0)");
552         assert_eq!(a.load(SeqCst), 0);
553         a.store(1, SeqCst);
554         assert_eq!(a.swap(2, SeqCst), 1);
555         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
556         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
557         assert_eq!(a.fetch_add(123, SeqCst), 3);
558         assert_eq!(a.fetch_sub(-56, SeqCst), 126);
559         assert_eq!(a.fetch_and(7, SeqCst), 182);
560         assert_eq!(a.fetch_or(64, SeqCst), 6);
561         assert_eq!(a.fetch_xor(1, SeqCst), 70);
562         assert_eq!(a.fetch_min(30, SeqCst), 71);
563         assert_eq!(a.fetch_max(-25, SeqCst), 30);
564         assert_eq!(a.load(SeqCst), 30);
565     }
566 
567     #[test]
atomic_u8()568     fn atomic_u8() {
569         let a = Atomic::new(0u8);
570         assert_eq!(Atomic::<u8>::is_lock_free(), cfg!(target_has_atomic = "8"));
571         assert_eq!(format!("{:?}", a), "Atomic(0)");
572         assert_eq!(a.load(SeqCst), 0);
573         a.store(1, SeqCst);
574         assert_eq!(a.swap(2, SeqCst), 1);
575         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
576         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
577         assert_eq!(a.fetch_add(123, SeqCst), 3);
578         assert_eq!(a.fetch_sub(56, SeqCst), 126);
579         assert_eq!(a.fetch_and(7, SeqCst), 70);
580         assert_eq!(a.fetch_or(64, SeqCst), 6);
581         assert_eq!(a.fetch_xor(1, SeqCst), 70);
582         assert_eq!(a.fetch_min(30, SeqCst), 71);
583         assert_eq!(a.fetch_max(25, SeqCst), 30);
584         assert_eq!(a.load(SeqCst), 30);
585     }
586 
587     #[test]
atomic_u16()588     fn atomic_u16() {
589         let a = Atomic::new(0u16);
590         assert_eq!(
591             Atomic::<u16>::is_lock_free(),
592             cfg!(target_has_atomic = "16")
593         );
594         assert_eq!(format!("{:?}", a), "Atomic(0)");
595         assert_eq!(a.load(SeqCst), 0);
596         a.store(1, SeqCst);
597         assert_eq!(a.swap(2, SeqCst), 1);
598         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
599         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
600         assert_eq!(a.fetch_add(123, SeqCst), 3);
601         assert_eq!(a.fetch_sub(56, SeqCst), 126);
602         assert_eq!(a.fetch_and(7, SeqCst), 70);
603         assert_eq!(a.fetch_or(64, SeqCst), 6);
604         assert_eq!(a.fetch_xor(1, SeqCst), 70);
605         assert_eq!(a.fetch_min(30, SeqCst), 71);
606         assert_eq!(a.fetch_max(25, SeqCst), 30);
607         assert_eq!(a.load(SeqCst), 30);
608     }
609 
610     #[test]
atomic_u32()611     fn atomic_u32() {
612         let a = Atomic::new(0u32);
613         assert_eq!(
614             Atomic::<u32>::is_lock_free(),
615             cfg!(target_has_atomic = "32")
616         );
617         assert_eq!(format!("{:?}", a), "Atomic(0)");
618         assert_eq!(a.load(SeqCst), 0);
619         a.store(1, SeqCst);
620         assert_eq!(a.swap(2, SeqCst), 1);
621         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
622         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
623         assert_eq!(a.fetch_add(123, SeqCst), 3);
624         assert_eq!(a.fetch_sub(56, SeqCst), 126);
625         assert_eq!(a.fetch_and(7, SeqCst), 70);
626         assert_eq!(a.fetch_or(64, SeqCst), 6);
627         assert_eq!(a.fetch_xor(1, SeqCst), 70);
628         assert_eq!(a.fetch_min(30, SeqCst), 71);
629         assert_eq!(a.fetch_max(25, SeqCst), 30);
630         assert_eq!(a.load(SeqCst), 30);
631     }
632 
633     #[test]
atomic_u64()634     fn atomic_u64() {
635         let a = Atomic::new(0u64);
636         assert_eq!(
637             Atomic::<u64>::is_lock_free(),
638             cfg!(target_has_atomic = "64") && mem::align_of::<u64>() == 8
639         );
640         assert_eq!(format!("{:?}", a), "Atomic(0)");
641         assert_eq!(a.load(SeqCst), 0);
642         a.store(1, SeqCst);
643         assert_eq!(a.swap(2, SeqCst), 1);
644         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
645         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
646         assert_eq!(a.fetch_add(123, SeqCst), 3);
647         assert_eq!(a.fetch_sub(56, SeqCst), 126);
648         assert_eq!(a.fetch_and(7, SeqCst), 70);
649         assert_eq!(a.fetch_or(64, SeqCst), 6);
650         assert_eq!(a.fetch_xor(1, SeqCst), 70);
651         assert_eq!(a.fetch_min(30, SeqCst), 71);
652         assert_eq!(a.fetch_max(25, SeqCst), 30);
653         assert_eq!(a.load(SeqCst), 30);
654     }
655 
656     #[test]
atomic_u128()657     fn atomic_u128() {
658         let a = Atomic::new(0u128);
659         assert_eq!(
660             Atomic::<u128>::is_lock_free(),
661             cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
662         );
663         assert_eq!(format!("{:?}", a), "Atomic(0)");
664         assert_eq!(a.load(SeqCst), 0);
665         a.store(1, SeqCst);
666         assert_eq!(a.swap(2, SeqCst), 1);
667         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
668         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
669         assert_eq!(a.fetch_add(123, SeqCst), 3);
670         assert_eq!(a.fetch_sub(56, SeqCst), 126);
671         assert_eq!(a.fetch_and(7, SeqCst), 70);
672         assert_eq!(a.fetch_or(64, SeqCst), 6);
673         assert_eq!(a.fetch_xor(1, SeqCst), 70);
674         assert_eq!(a.fetch_min(30, SeqCst), 71);
675         assert_eq!(a.fetch_max(25, SeqCst), 30);
676         assert_eq!(a.load(SeqCst), 30);
677     }
678 
679     #[test]
atomic_usize()680     fn atomic_usize() {
681         let a = Atomic::new(0usize);
682         assert_eq!(format!("{:?}", a), "Atomic(0)");
683         assert_eq!(a.load(SeqCst), 0);
684         a.store(1, SeqCst);
685         assert_eq!(a.swap(2, SeqCst), 1);
686         assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
687         assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
688         assert_eq!(a.fetch_add(123, SeqCst), 3);
689         assert_eq!(a.fetch_sub(56, SeqCst), 126);
690         assert_eq!(a.fetch_and(7, SeqCst), 70);
691         assert_eq!(a.fetch_or(64, SeqCst), 6);
692         assert_eq!(a.fetch_xor(1, SeqCst), 70);
693         assert_eq!(a.fetch_min(30, SeqCst), 71);
694         assert_eq!(a.fetch_max(25, SeqCst), 30);
695         assert_eq!(a.load(SeqCst), 30);
696     }
697 
698     #[test]
atomic_foo()699     fn atomic_foo() {
700         let a = Atomic::default();
701         assert_eq!(Atomic::<Foo>::is_lock_free(), false);
702         assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))");
703         assert_eq!(a.load(SeqCst), Foo(0, 0));
704         a.store(Foo(1, 1), SeqCst);
705         assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));
706         assert_eq!(
707             a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),
708             Err(Foo(2, 2))
709         );
710         assert_eq!(
711             a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),
712             Ok(Foo(2, 2))
713         );
714         assert_eq!(a.load(SeqCst), Foo(3, 3));
715     }
716 
717     #[test]
atomic_bar()718     fn atomic_bar() {
719         let a = Atomic::default();
720         assert_eq!(Atomic::<Bar>::is_lock_free(), false);
721         assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))");
722         assert_eq!(a.load(SeqCst), Bar(0, 0));
723         a.store(Bar(1, 1), SeqCst);
724         assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));
725         assert_eq!(
726             a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),
727             Err(Bar(2, 2))
728         );
729         assert_eq!(
730             a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),
731             Ok(Bar(2, 2))
732         );
733         assert_eq!(a.load(SeqCst), Bar(3, 3));
734     }
735 
736     #[test]
atomic_quxx()737     fn atomic_quxx() {
738         let a = Atomic::default();
739         assert_eq!(
740             Atomic::<Quux>::is_lock_free(),
741             cfg!(target_has_atomic = "32")
742         );
743         assert_eq!(format!("{:?}", a), "Atomic(Quux(0))");
744         assert_eq!(a.load(SeqCst), Quux(0));
745         a.store(Quux(1), SeqCst);
746         assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));
747         assert_eq!(
748             a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),
749             Err(Quux(2))
750         );
751         assert_eq!(
752             a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),
753             Ok(Quux(2))
754         );
755         assert_eq!(a.load(SeqCst), Quux(3));
756     }
757 }
758