1 // Copyright 2012 The Chromium Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 // IMPORTANT NOTE: deprecated. Use std::atomic instead. 6 // 7 // Rationale: 8 // - Uniformity: most of the code uses std::atomic, and the underlying 9 // implementation is the same. Use the STL one. 10 // - Clearer code: return values from some operations (e.g. CompareAndSwap) 11 // differ from the equivalent ones in std::atomic, leading to confusion. 12 // - Richer semantics: can use actual types, rather than e.g. Atomic32 for a 13 // boolean flag, or AtomicWord for T*. Bitwise operations (e.g. fetch_or()) 14 // are only in std::atomic. 15 // - Harder to misuse: base::subtle::Atomic32 is just an int, making it possible 16 // to accidentally manipulate, not realizing that there are no atomic 17 // semantics attached to it. For instance, "Atomic32 a; a++;" is almost 18 // certainly incorrect. 19 20 // For atomic operations on reference counts, see atomic_refcount.h. 21 // For atomic operations on sequence numbers, see atomic_sequence_num.h. 22 23 // The routines exported by this module are subtle. If you use them, even if 24 // you get the code right, it will depend on careful reasoning about atomicity 25 // and memory ordering; it will be less readable, and harder to maintain. If 26 // you plan to use these routines, you should have a good reason, such as solid 27 // evidence that performance would otherwise suffer, or there being no 28 // alternative. You should assume only properties explicitly guaranteed by the 29 // specifications in this file. You are almost certainly _not_ writing code 30 // just for the x86; if you assume x86 semantics, x86 hardware bugs and 31 // implementations on other archtectures will cause your code to break. If you 32 // do not know what you are doing, avoid these routines, and use a Mutex. 33 // 34 // It is incorrect to make direct assignments to/from an atomic variable. 35 // You should use one of the Load or Store routines. The NoBarrier 36 // versions are provided when no barriers are needed: 37 // NoBarrier_Store() 38 // NoBarrier_Load() 39 // Although there are currently no compiler enforcement, you are encouraged 40 // to use these. 41 // 42 43 #ifndef BASE_ATOMICOPS_H_ 44 #define BASE_ATOMICOPS_H_ 45 46 #include <stdint.h> 47 48 // Small C++ header which defines implementation specific macros used to 49 // identify the STL implementation. 50 // - libc++: captures __config for _LIBCPP_VERSION 51 // - libstdc++: captures bits/c++config.h for __GLIBCXX__ 52 #include <cstddef> 53 54 #include "build/build_config.h" 55 56 namespace base { 57 namespace subtle { 58 59 typedef int32_t Atomic32; 60 #ifdef ARCH_CPU_64_BITS 61 // We need to be able to go between Atomic64 and AtomicWord implicitly. This 62 // means Atomic64 and AtomicWord should be the same type on 64-bit. 63 #if defined(__ILP32__) || BUILDFLAG(IS_NACL) 64 // NaCl's intptr_t is not actually 64-bits on 64-bit! 65 // http://code.google.com/p/nativeclient/issues/detail?id=1162 66 typedef int64_t Atomic64; 67 #else 68 typedef intptr_t Atomic64; 69 #endif 70 #endif 71 72 // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or 73 // Atomic64 routines below, depending on your architecture. 74 typedef intptr_t AtomicWord; 75 76 // Atomically execute: 77 // result = *ptr; 78 // if (*ptr == old_value) 79 // *ptr = new_value; 80 // return result; 81 // 82 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". 83 // Always return the old value of "*ptr" 84 // 85 // This routine implies no memory barriers. 86 Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 87 Atomic32 old_value, 88 Atomic32 new_value); 89 90 // Atomically store new_value into *ptr, returning the previous value held in 91 // *ptr. This routine implies no memory barriers. 92 Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); 93 94 // Atomically increment *ptr by "increment". Returns the new value of 95 // *ptr with the increment applied. This routine implies no memory barriers. 96 Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); 97 98 Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 99 Atomic32 increment); 100 101 // These following lower-level operations are typically useful only to people 102 // implementing higher-level synchronization operations like spinlocks, 103 // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or 104 // a store with appropriate memory-ordering instructions. "Acquire" operations 105 // ensure that no later memory access can be reordered ahead of the operation. 106 // "Release" operations ensure that no previous memory access can be reordered 107 // after the operation. "Barrier" operations have both "Acquire" and "Release" 108 // semantics. 109 Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 110 Atomic32 old_value, 111 Atomic32 new_value); 112 Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 113 Atomic32 old_value, 114 Atomic32 new_value); 115 116 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); 117 void Release_Store(volatile Atomic32* ptr, Atomic32 value); 118 119 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); 120 Atomic32 Acquire_Load(volatile const Atomic32* ptr); 121 122 // 64-bit atomic operations (only available on 64-bit processors). 123 #ifdef ARCH_CPU_64_BITS 124 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 125 Atomic64 old_value, 126 Atomic64 new_value); 127 Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); 128 Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); 129 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); 130 131 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 132 Atomic64 old_value, 133 Atomic64 new_value); 134 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 135 Atomic64 old_value, 136 Atomic64 new_value); 137 void Release_Store(volatile Atomic64* ptr, Atomic64 value); 138 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); 139 Atomic64 Acquire_Load(volatile const Atomic64* ptr); 140 #endif // ARCH_CPU_64_BITS 141 142 } // namespace subtle 143 } // namespace base 144 145 #include "base/atomicops_internals_portable.h" 146 147 // On some platforms we need additional declarations to make 148 // AtomicWord compatible with our other Atomic* types. 149 #if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_OPENBSD) 150 #include "base/atomicops_internals_atomicword_compat.h" 151 #endif 152 153 #endif // BASE_ATOMICOPS_H_ 154