1 /*
2 * Distributed under the Boost Software License, Version 1.0.
3 * (See accompanying file LICENSE_1_0.txt or copy at
4 * http://www.boost.org/LICENSE_1_0.txt)
5 *
6 * Copyright (c) 2011 Helge Bahmann
7 * Copyright (c) 2013-2014, 2020 Andrey Semashev
8 */
9 /*!
10 * \file lock_pool.cpp
11 *
12 * This file contains implementation of the lock pool used to emulate atomic ops.
13 */
14
15 #include <boost/predef/os/windows.h>
16 #if BOOST_OS_WINDOWS
17 // Include boost/winapi/config.hpp first to make sure target Windows version is selected by Boost.WinAPI
18 #include <boost/winapi/config.hpp>
19 #include <boost/predef/platform.h>
20 #endif
21 #include <boost/predef/architecture/x86.h>
22 #include <boost/predef/hardware/simd/x86.h>
23
24 #include <cstddef>
25 #include <cstring>
26 #include <cstdlib>
27 #include <new>
28 #include <limits>
29 #include <boost/config.hpp>
30 #include <boost/assert.hpp>
31 #include <boost/static_assert.hpp>
32 #include <boost/memory_order.hpp>
33 #include <boost/atomic/capabilities.hpp>
34 #include <boost/atomic/detail/config.hpp>
35 #include <boost/atomic/detail/intptr.hpp>
36 #include <boost/atomic/detail/int_sizes.hpp>
37 #include <boost/atomic/detail/aligned_variable.hpp>
38 #include <boost/atomic/detail/core_operations.hpp>
39 #include <boost/atomic/detail/extra_operations.hpp>
40 #include <boost/atomic/detail/fence_operations.hpp>
41 #include <boost/atomic/detail/lock_pool.hpp>
42 #include <boost/atomic/detail/pause.hpp>
43 #include <boost/atomic/detail/once_flag.hpp>
44 #include <boost/atomic/detail/type_traits/alignment_of.hpp>
45
46 #include <boost/align/aligned_alloc.hpp>
47
48 #include <boost/preprocessor/config/limits.hpp>
49 #include <boost/preprocessor/iteration/iterate.hpp>
50
51 #if BOOST_OS_WINDOWS
52 #include <boost/winapi/basic_types.hpp>
53 #include <boost/winapi/thread.hpp>
54 #include <boost/winapi/wait_constants.hpp>
55 #if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
56 #include <boost/winapi/srw_lock.hpp>
57 #include <boost/winapi/condition_variable.hpp>
58 #else // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
59 #include <boost/winapi/critical_section.hpp>
60 #include <boost/winapi/semaphore.hpp>
61 #include <boost/winapi/handles.hpp>
62 #include <boost/winapi/wait.hpp>
63 #endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
64 #define BOOST_ATOMIC_USE_WINAPI
65 #else // BOOST_OS_WINDOWS
66 #include <boost/atomic/detail/futex.hpp>
67 #if defined(BOOST_ATOMIC_DETAIL_HAS_FUTEX) && BOOST_ATOMIC_INT32_LOCK_FREE == 2
68 #define BOOST_ATOMIC_USE_FUTEX
69 #else // BOOST_OS_LINUX
70 #include <pthread.h>
71 #define BOOST_ATOMIC_USE_PTHREAD
72 #endif // BOOST_OS_LINUX
73 #include <cerrno>
74 #endif // BOOST_OS_WINDOWS
75
76 #include "find_address.hpp"
77
78 #if BOOST_ARCH_X86 && (defined(BOOST_ATOMIC_USE_SSE2) || defined(BOOST_ATOMIC_USE_SSE41)) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && \
79 (\
80 (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 && BOOST_HW_SIMD_X86 < BOOST_HW_SIMD_X86_SSE4_1_VERSION) || \
81 (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4 && BOOST_HW_SIMD_X86 < BOOST_HW_SIMD_X86_SSE2_VERSION) \
82 )
83 #include "cpuid.hpp"
84 #define BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH
85 #endif
86
87 #include <boost/atomic/detail/header.hpp>
88
89 // Cache line size, in bytes
90 // NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
91 #if defined(__s390__) || defined(__s390x__)
92 #define BOOST_ATOMIC_CACHE_LINE_SIZE 256
93 #elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__)
94 #define BOOST_ATOMIC_CACHE_LINE_SIZE 128
95 #else
96 #define BOOST_ATOMIC_CACHE_LINE_SIZE 64
97 #endif
98
99 namespace boost {
100 namespace atomics {
101 namespace detail {
102
103 //! \c find_address generic implementation
find_address_generic(const volatile void * addr,const volatile void * const * addrs,std::size_t size)104 std::size_t find_address_generic(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
105 {
106 for (std::size_t i = 0u; i < size; ++i)
107 {
108 if (addrs[i] == addr)
109 return i;
110 }
111
112 return size;
113 }
114
115 namespace lock_pool {
116
117 namespace {
118
119 #if BOOST_ARCH_X86 && (defined(BOOST_ATOMIC_USE_SSE2) || defined(BOOST_ATOMIC_USE_SSE41)) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
120
121 typedef atomics::detail::core_operations< sizeof(find_address_t*), false, false > func_ptr_operations;
122 BOOST_STATIC_ASSERT_MSG(func_ptr_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for function pointers");
123
124 #if defined(BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH)
125 std::size_t find_address_dispatch(const volatile void* addr, const volatile void* const* addrs, std::size_t size);
126 #endif
127
128 union find_address_ptr
129 {
130 find_address_t* as_ptr;
131 func_ptr_operations::storage_type as_storage;
132 }
133 g_find_address =
134 {
135 #if defined(BOOST_ATOMIC_USE_SSE41) && BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 && BOOST_HW_SIMD_X86 >= BOOST_HW_SIMD_X86_SSE4_1_VERSION
136 &find_address_sse41
137 #elif defined(BOOST_ATOMIC_USE_SSE2) && BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4 && BOOST_HW_SIMD_X86 >= BOOST_HW_SIMD_X86_SSE2_VERSION
138 &find_address_sse2
139 #else
140 &find_address_dispatch
141 #endif
142 };
143
144 #if defined(BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH)
145
find_address_dispatch(const volatile void * addr,const volatile void * const * addrs,std::size_t size)146 std::size_t find_address_dispatch(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
147 {
148 find_address_t* find_addr = &find_address_generic;
149
150 #if defined(BOOST_ATOMIC_USE_SSE2)
151 // First, check the max available cpuid function
152 uint32_t eax = 0u, ebx = 0u, ecx = 0u, edx = 0u;
153 atomics::detail::cpuid(eax, ebx, ecx, edx);
154
155 const uint32_t max_cpuid_function = eax;
156 if (max_cpuid_function >= 1u)
157 {
158 // Obtain CPU features
159 eax = 1u;
160 ebx = ecx = edx = 0u;
161 atomics::detail::cpuid(eax, ebx, ecx, edx);
162
163 if ((edx & (1u << 26)) != 0u)
164 find_addr = &find_address_sse2;
165
166 #if defined(BOOST_ATOMIC_USE_SSE41) && BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8
167 if ((ecx & (1u << 19)) != 0u)
168 find_addr = &find_address_sse41;
169 #endif
170 }
171 #endif // defined(BOOST_ATOMIC_USE_SSE2)
172
173 find_address_ptr ptr = {};
174 ptr.as_ptr = find_addr;
175 func_ptr_operations::store(g_find_address.as_storage, ptr.as_storage, boost::memory_order_relaxed);
176
177 return find_addr(addr, addrs, size);
178 }
179
180 #endif // defined(BOOST_ATOMIC_DETAIL_X86_USE_RUNTIME_DISPATCH)
181
find_address(const volatile void * addr,const volatile void * const * addrs,std::size_t size)182 inline std::size_t find_address(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
183 {
184 find_address_ptr ptr;
185 ptr.as_storage = func_ptr_operations::load(g_find_address.as_storage, boost::memory_order_relaxed);
186 return ptr.as_ptr(addr, addrs, size);
187 }
188
189 #else // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
190
191 inline std::size_t find_address(const volatile void* addr, const volatile void* const* addrs, std::size_t size)
192 {
193 return atomics::detail::find_address_generic(addr, addrs, size);
194 }
195
196 #endif // BOOST_ARCH_X86 && defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 8 || BOOST_ATOMIC_DETAIL_SIZEOF_POINTER == 4)
197
198 struct wait_state;
199 struct lock_state;
200
201 //! Base class for a wait state
202 struct wait_state_base
203 {
204 //! Number of waiters referencing this state
205 std::size_t m_ref_count;
206 //! Index of this wait state in the list
207 std::size_t m_index;
208
wait_state_baseboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state_base209 explicit wait_state_base(std::size_t index) BOOST_NOEXCEPT :
210 m_ref_count(0u),
211 m_index(index)
212 {
213 }
214
215 BOOST_DELETED_FUNCTION(wait_state_base(wait_state_base const&))
216 BOOST_DELETED_FUNCTION(wait_state_base& operator= (wait_state_base const&))
217 };
218
219 //! List of wait states. Must be a POD structure.
220 struct wait_state_list
221 {
222 //! List header
223 struct header
224 {
225 //! List size
226 std::size_t size;
227 //! List capacity
228 std::size_t capacity;
229 };
230
231 /*!
232 * \brief Pointer to the list header
233 *
234 * The list buffer consists of three adjacent areas: header object, array of atomic pointers and array of pointers to the wait_state structures.
235 * Each of the arrays have header.capacity elements, of which the first header.size elements correspond to the currently ongoing wait operations
236 * and the rest are spare elements. Spare wait_state structures may still be allocated (in which case the wait_state pointer is not null) and
237 * can be reused on future requests. Spare atomic pointers are null and unused.
238 *
239 * This memory layout was designed to optimize wait state lookup by atomic address and also support memory pooling to reduce dynamic memory allocations.
240 */
241 header* m_header;
242 //! The flag indicates that memory pooling is disabled. Set on process cleanup.
243 bool m_free_memory;
244
245 //! Buffer alignment, in bytes
246 static BOOST_CONSTEXPR_OR_CONST std::size_t buffer_alignment = 16u;
247 //! Alignment of pointer arrays in the buffer, in bytes. This should align atomic pointers to the vector size used in \c find_address implementation.
248 static BOOST_CONSTEXPR_OR_CONST std::size_t entries_alignment = atomics::detail::alignment_of< void* >::value < 16u ? 16u : atomics::detail::alignment_of< void* >::value;
249 //! Offset from the list header to the beginning of the array of atomic pointers in the buffer, in bytes
250 static BOOST_CONSTEXPR_OR_CONST std::size_t entries_offset = (sizeof(header) + entries_alignment - 1u) & ~static_cast< std::size_t >(entries_alignment - 1u);
251 //! Initial buffer capacity, in elements. This should be at least as large as a vector size used in \c find_address implementation.
252 static BOOST_CONSTEXPR_OR_CONST std::size_t initial_capacity = (16u / sizeof(void*)) < 2u ? 2u : (16u / sizeof(void*));
253
254 //! Returns a pointer to the array of atomic pointers
get_atomic_pointersboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state_list255 static const volatile void** get_atomic_pointers(header* p) BOOST_NOEXCEPT
256 {
257 BOOST_ASSERT(p != NULL);
258 return reinterpret_cast< const volatile void** >(reinterpret_cast< unsigned char* >(p) + entries_offset);
259 }
260
261 //! Returns a pointer to the array of atomic pointers
get_atomic_pointersboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state_list262 const volatile void** get_atomic_pointers() const BOOST_NOEXCEPT
263 {
264 return get_atomic_pointers(m_header);
265 }
266
267 //! Returns a pointer to the array of pointers to the wait states
get_wait_statesboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state_list268 static wait_state** get_wait_states(const volatile void** ptrs, std::size_t capacity) BOOST_NOEXCEPT
269 {
270 return reinterpret_cast< wait_state** >(const_cast< void** >(ptrs + capacity));
271 }
272
273 //! Returns a pointer to the array of pointers to the wait states
get_wait_statesboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state_list274 static wait_state** get_wait_states(header* p) BOOST_NOEXCEPT
275 {
276 return get_wait_states(get_atomic_pointers(p), p->capacity);
277 }
278
279 //! Returns a pointer to the array of pointers to the wait states
get_wait_statesboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state_list280 wait_state** get_wait_states() const BOOST_NOEXCEPT
281 {
282 return get_wait_states(m_header);
283 }
284
285 //! Finds an element with the given pointer to the atomic object
findboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state_list286 wait_state* find(const volatile void* addr) const BOOST_NOEXCEPT
287 {
288 wait_state* ws = NULL;
289 if (BOOST_LIKELY(m_header != NULL))
290 {
291 const volatile void* const* addrs = get_atomic_pointers();
292 const std::size_t size = m_header->size;
293 std::size_t pos = find_address(addr, addrs, size);
294 if (pos < size)
295 ws = get_wait_states()[pos];
296 }
297
298 return ws;
299 }
300
301 //! Finds an existing element with the given pointer to the atomic object or allocates a new one. Returns NULL in case of failure.
302 wait_state* find_or_create(const volatile void* addr) BOOST_NOEXCEPT;
303 //! Releases the previously created wait state
304 void erase(wait_state* w) BOOST_NOEXCEPT;
305
306 //! Deallocates spare entries and the list buffer if no allocated entries are left
307 void free_spare() BOOST_NOEXCEPT;
308 //! Allocates new buffer for the list entries. Returns NULL in case of failure.
309 static header* allocate_buffer(std::size_t new_capacity, header* old_header = NULL) BOOST_NOEXCEPT;
310 };
311
312 #define BOOST_ATOMIC_WAIT_STATE_LIST_INIT { NULL, false }
313
314 // In the platform-specific definitions below, lock_state must be a POD structure and wait_state must derive from wait_state_base.
315
316 #if defined(BOOST_ATOMIC_USE_PTHREAD)
317
318 //! State of a wait operation associated with an atomic object
319 struct wait_state :
320 public wait_state_base
321 {
322 //! Condition variable
323 pthread_cond_t m_cond;
324
wait_stateboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state325 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
326 wait_state_base(index)
327 {
328 BOOST_VERIFY(pthread_cond_init(&m_cond, NULL) == 0);
329 }
330
~wait_stateboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state331 ~wait_state() BOOST_NOEXCEPT
332 {
333 pthread_cond_destroy(&m_cond);
334 }
335
336 //! Blocks in the wait operation until notified
337 void wait(lock_state& state) BOOST_NOEXCEPT;
338
339 //! Wakes up one thread blocked in the wait operation
notify_oneboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state340 void notify_one(lock_state&) BOOST_NOEXCEPT
341 {
342 BOOST_VERIFY(pthread_cond_signal(&m_cond) == 0);
343 }
344 //! Wakes up all threads blocked in the wait operation
notify_allboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state345 void notify_all(lock_state&) BOOST_NOEXCEPT
346 {
347 BOOST_VERIFY(pthread_cond_broadcast(&m_cond) == 0);
348 }
349 };
350
351 //! Lock pool entry
352 struct lock_state
353 {
354 //! Mutex
355 pthread_mutex_t m_mutex;
356 //! Wait states
357 wait_state_list m_wait_states;
358
359 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state360 void short_lock() BOOST_NOEXCEPT
361 {
362 long_lock();
363 }
364
365 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state366 void long_lock() BOOST_NOEXCEPT
367 {
368 for (unsigned int i = 0u; i < 5u; ++i)
369 {
370 if (BOOST_LIKELY(pthread_mutex_trylock(&m_mutex) == 0))
371 return;
372
373 atomics::detail::pause();
374 }
375
376 BOOST_VERIFY(pthread_mutex_lock(&m_mutex) == 0);
377 }
378
379 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state380 void unlock() BOOST_NOEXCEPT
381 {
382 BOOST_VERIFY(pthread_mutex_unlock(&m_mutex) == 0);
383 }
384 };
385
386 #define BOOST_ATOMIC_LOCK_STATE_INIT { PTHREAD_MUTEX_INITIALIZER, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
387
388 //! Blocks in the wait operation until notified
wait(lock_state & state)389 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
390 {
391 BOOST_VERIFY(pthread_cond_wait(&m_cond, &state.m_mutex) == 0);
392 }
393
394 #elif defined(BOOST_ATOMIC_USE_FUTEX)
395
396 typedef atomics::detail::core_operations< 4u, false, false > futex_operations;
397 // The storage type must be a 32-bit object, as required by futex API
398 BOOST_STATIC_ASSERT_MSG(futex_operations::is_always_lock_free && sizeof(futex_operations::storage_type) == 4u, "Boost.Atomic unsupported target platform: native atomic operations not implemented for 32-bit integers");
399 typedef atomics::detail::extra_operations< futex_operations, futex_operations::storage_size, futex_operations::is_signed > futex_extra_operations;
400
401 namespace mutex_bits {
402
403 //! The bit indicates a locked mutex
404 BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type locked = 1u;
405 //! The bit indicates that there is at least one thread blocked waiting for the mutex to be released
406 BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type contended = 1u << 1;
407 //! The lowest bit of the counter bits used to mitigate ABA problem. This and any higher bits in the mutex state constitute the counter.
408 BOOST_CONSTEXPR_OR_CONST futex_operations::storage_type counter_one = 1u << 2;
409
410 } // namespace mutex_bits
411
412 //! State of a wait operation associated with an atomic object
413 struct wait_state :
414 public wait_state_base
415 {
416 //! Condition variable futex. Used as the counter of notify calls.
417 BOOST_ATOMIC_DETAIL_ALIGNED_VAR(futex_operations::storage_alignment, futex_operations::storage_type, m_cond);
418 //! Number of currently blocked waiters
419 futex_operations::storage_type m_waiter_count;
420
wait_stateboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state421 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
422 wait_state_base(index),
423 m_cond(0u),
424 m_waiter_count(0u)
425 {
426 }
427
428 //! Blocks in the wait operation until notified
429 void wait(lock_state& state) BOOST_NOEXCEPT;
430
431 //! Wakes up one thread blocked in the wait operation
432 void notify_one(lock_state& state) BOOST_NOEXCEPT;
433 //! Wakes up all threads blocked in the wait operation
434 void notify_all(lock_state& state) BOOST_NOEXCEPT;
435 };
436
437 //! Lock pool entry
438 struct lock_state
439 {
440 //! Mutex futex
441 BOOST_ATOMIC_DETAIL_ALIGNED_VAR(futex_operations::storage_alignment, futex_operations::storage_type, m_mutex);
442 //! Wait states
443 wait_state_list m_wait_states;
444
445 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state446 void short_lock() BOOST_NOEXCEPT
447 {
448 long_lock();
449 }
450
451 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state452 void long_lock() BOOST_NOEXCEPT
453 {
454 for (unsigned int i = 0u; i < 10u; ++i)
455 {
456 futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
457 if (BOOST_LIKELY((prev_state & mutex_bits::locked) == 0u))
458 {
459 futex_operations::storage_type new_state = prev_state | mutex_bits::locked;
460 if (BOOST_LIKELY(futex_operations::compare_exchange_strong(m_mutex, prev_state, new_state, boost::memory_order_acquire, boost::memory_order_relaxed)))
461 return;
462 }
463
464 atomics::detail::pause();
465 }
466
467 lock_slow_path();
468 }
469
470 //! Locks the mutex for a long duration
lock_slow_pathboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state471 void lock_slow_path() BOOST_NOEXCEPT
472 {
473 futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
474 while (true)
475 {
476 if (BOOST_LIKELY((prev_state & mutex_bits::locked) == 0u))
477 {
478 futex_operations::storage_type new_state = prev_state | mutex_bits::locked;
479 if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_acquire, boost::memory_order_relaxed)))
480 return;
481 }
482 else
483 {
484 futex_operations::storage_type new_state = prev_state | mutex_bits::contended;
485 if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_relaxed, boost::memory_order_relaxed)))
486 {
487 atomics::detail::futex_wait_private(&m_mutex, new_state);
488 prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
489 }
490 }
491 }
492 }
493
494 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state495 void unlock() BOOST_NOEXCEPT
496 {
497 futex_operations::storage_type prev_state = futex_operations::load(m_mutex, boost::memory_order_relaxed);
498 futex_operations::storage_type new_state;
499 while (true)
500 {
501 new_state = (prev_state & (~mutex_bits::locked)) + mutex_bits::counter_one;
502 if (BOOST_LIKELY(futex_operations::compare_exchange_weak(m_mutex, prev_state, new_state, boost::memory_order_release, boost::memory_order_relaxed)))
503 break;
504 }
505
506 if ((prev_state & mutex_bits::contended) != 0u)
507 {
508 int woken_count = atomics::detail::futex_signal_private(&m_mutex);
509 if (woken_count == 0)
510 {
511 prev_state = new_state;
512 new_state &= ~mutex_bits::contended;
513 futex_operations::compare_exchange_strong(m_mutex, prev_state, new_state, boost::memory_order_relaxed, boost::memory_order_relaxed);
514 }
515 }
516 }
517 };
518
519 #if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
520 #define BOOST_ATOMIC_LOCK_STATE_INIT { 0u, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
521 #else
522 #define BOOST_ATOMIC_LOCK_STATE_INIT { { 0u }, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
523 #endif
524
525 //! Blocks in the wait operation until notified
wait(lock_state & state)526 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
527 {
528 const futex_operations::storage_type prev_cond = m_cond;
529 ++m_waiter_count;
530
531 state.unlock();
532
533 while (true)
534 {
535 int err = atomics::detail::futex_wait_private(&m_cond, prev_cond);
536 if (BOOST_LIKELY(err != EINTR))
537 break;
538 }
539
540 state.long_lock();
541
542 --m_waiter_count;
543 }
544
545 //! Wakes up one thread blocked in the wait operation
notify_one(lock_state & state)546 inline void wait_state::notify_one(lock_state& state) BOOST_NOEXCEPT
547 {
548 ++m_cond;
549
550 if (BOOST_LIKELY(m_waiter_count > 0u))
551 {
552 // Move one blocked thread to the mutex futex and mark the mutex contended so that the thread is unblocked on unlock()
553 atomics::detail::futex_requeue_private(&m_cond, &state.m_mutex, 0u, 1u);
554 futex_extra_operations::opaque_or(state.m_mutex, mutex_bits::contended, boost::memory_order_relaxed);
555 }
556 }
557
558 //! Wakes up all threads blocked in the wait operation
notify_all(lock_state & state)559 inline void wait_state::notify_all(lock_state& state) BOOST_NOEXCEPT
560 {
561 ++m_cond;
562
563 if (BOOST_LIKELY(m_waiter_count > 0u))
564 {
565 // Move blocked threads to the mutex futex and mark the mutex contended so that a thread is unblocked on unlock()
566 atomics::detail::futex_requeue_private(&m_cond, &state.m_mutex, 0u);
567 futex_extra_operations::opaque_or(state.m_mutex, mutex_bits::contended, boost::memory_order_relaxed);
568 }
569 }
570
571 #else
572
573 #if BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
574
575 //! State of a wait operation associated with an atomic object
576 struct wait_state :
577 public wait_state_base
578 {
579 //! Condition variable
580 boost::winapi::CONDITION_VARIABLE_ m_cond;
581
wait_stateboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state582 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
583 wait_state_base(index)
584 {
585 boost::winapi::InitializeConditionVariable(&m_cond);
586 }
587
588 //! Blocks in the wait operation until notified
589 void wait(lock_state& state) BOOST_NOEXCEPT;
590
591 //! Wakes up one thread blocked in the wait operation
notify_oneboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state592 void notify_one(lock_state&) BOOST_NOEXCEPT
593 {
594 boost::winapi::WakeConditionVariable(&m_cond);
595 }
596 //! Wakes up all threads blocked in the wait operation
notify_allboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state597 void notify_all(lock_state&) BOOST_NOEXCEPT
598 {
599 boost::winapi::WakeAllConditionVariable(&m_cond);
600 }
601 };
602
603 //! Lock pool entry
604 struct lock_state
605 {
606 //! Mutex
607 boost::winapi::SRWLOCK_ m_mutex;
608 //! Wait states
609 wait_state_list m_wait_states;
610
611 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state612 void short_lock() BOOST_NOEXCEPT
613 {
614 long_lock();
615 }
616
617 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state618 void long_lock() BOOST_NOEXCEPT
619 {
620 // Presumably, AcquireSRWLockExclusive already implements spinning internally, so there's no point in doing this ourselves.
621 boost::winapi::AcquireSRWLockExclusive(&m_mutex);
622 }
623
624 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state625 void unlock() BOOST_NOEXCEPT
626 {
627 boost::winapi::ReleaseSRWLockExclusive(&m_mutex);
628 }
629 };
630
631 #define BOOST_ATOMIC_LOCK_STATE_INIT { BOOST_WINAPI_SRWLOCK_INIT, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
632
633 //! Blocks in the wait operation until notified
wait(lock_state & state)634 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
635 {
636 boost::winapi::SleepConditionVariableSRW(&m_cond, &state.m_mutex, boost::winapi::infinite, 0u);
637 }
638
639 #else // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
640
641 typedef atomics::detail::core_operations< 4u, false, false > mutex_operations;
642 BOOST_STATIC_ASSERT_MSG(mutex_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for 32-bit integers");
643
644 namespace fallback_mutex_bits {
645
646 //! The bit indicates a locked mutex
647 BOOST_CONSTEXPR_OR_CONST mutex_operations::storage_type locked = 1u;
648 //! The bit indicates that the critical section is initialized and should be used instead of the fallback mutex
649 BOOST_CONSTEXPR_OR_CONST mutex_operations::storage_type critical_section_initialized = 1u << 1;
650
651 } // namespace mutex_bits
652
653 //! State of a wait operation associated with an atomic object
654 struct wait_state :
655 public wait_state_base
656 {
657 /*!
658 * \brief A semaphore used to block one or more threads
659 *
660 * A semaphore can be used to block a thread if it has no ongoing notifications (i.e. \c m_notify_count is 0).
661 * If there is no such semaphore, the thread has to allocate a new one to block on. This is to guarantee
662 * that a thread that is blocked after a notification is not immediately released by the semaphore while
663 * there are previously blocked threads.
664 *
665 * Semaphores are organized in a circular doubly linked list. A single semaphore object represents a list
666 * of one semaphore and is said to be "singular".
667 */
668 struct semaphore
669 {
670 //! Pointer to the next semaphore in the list
671 semaphore* m_next;
672 //! Pointer to the previous semaphore in the list
673 semaphore* m_prev;
674
675 //! Semaphore handle
676 boost::winapi::HANDLE_ m_semaphore;
677 //! Number of threads blocked on the semaphore
678 boost::winapi::ULONG_ m_waiter_count;
679 //! Number of threads released by notifications
680 boost::winapi::ULONG_ m_notify_count;
681
semaphoreboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state::semaphore682 semaphore() BOOST_NOEXCEPT :
683 m_semaphore(boost::winapi::create_anonymous_semaphore(NULL, 0, (std::numeric_limits< boost::winapi::LONG_ >::max)())),
684 m_waiter_count(0u),
685 m_notify_count(0u)
686 {
687 m_next = m_prev = this;
688 }
689
~semaphoreboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state::semaphore690 ~semaphore() BOOST_NOEXCEPT
691 {
692 BOOST_ASSERT(is_singular());
693
694 if (BOOST_LIKELY(m_semaphore != boost::winapi::invalid_handle_value))
695 boost::winapi::CloseHandle(m_semaphore);
696 }
697
698 //! Creates a new semaphore or returns null in case of failure
createboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state::semaphore699 static semaphore* create() BOOST_NOEXCEPT
700 {
701 semaphore* p = new (std::nothrow) semaphore();
702 if (BOOST_UNLIKELY(p != NULL && p->m_semaphore == boost::winapi::invalid_handle_value))
703 {
704 delete p;
705 p = NULL;
706 }
707 return p;
708 }
709
710 //! Returns \c true if the semaphore is the single element of the list
is_singularboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state::semaphore711 bool is_singular() const BOOST_NOEXCEPT
712 {
713 return m_next == this /* && m_prev == this */;
714 }
715
716 //! Inserts the semaphore list after the specified other semaphore
link_afterboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state::semaphore717 void link_after(semaphore* that) BOOST_NOEXCEPT
718 {
719 link_before(that->m_next);
720 }
721
722 //! Inserts the semaphore list before the specified other semaphore
link_beforeboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state::semaphore723 void link_before(semaphore* that) BOOST_NOEXCEPT
724 {
725 semaphore* prev = that->m_prev;
726 that->m_prev = m_prev;
727 m_prev->m_next = that;
728 m_prev = prev;
729 prev->m_next = this;
730 }
731
732 //! Removes the semaphore from the list
unlinkboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state::semaphore733 void unlink() BOOST_NOEXCEPT
734 {
735 // Load pointers beforehand, in case we are the only element in the list
736 semaphore* next = m_next;
737 semaphore* prev = m_prev;
738 prev->m_next = next;
739 next->m_prev = prev;
740 m_next = m_prev = this;
741 }
742
743 BOOST_DELETED_FUNCTION(semaphore(semaphore const&))
744 BOOST_DELETED_FUNCTION(semaphore& operator= (semaphore const&))
745 };
746
747 //! Doubly linked circular list of semaphores
748 class semaphore_list
749 {
750 private:
751 semaphore* m_head;
752
753 public:
semaphore_list()754 semaphore_list() BOOST_NOEXCEPT :
755 m_head(NULL)
756 {
757 }
758
759 //! Returns \c true if the list is empty
empty() const760 bool empty() const BOOST_NOEXCEPT
761 {
762 return m_head == NULL;
763 }
764
765 //! Returns the first semaphore in the list
front() const766 semaphore* front() const BOOST_NOEXCEPT
767 {
768 return m_head;
769 }
770
771 //! Returns the first semaphore in the list and leaves the list empty
eject()772 semaphore* eject() BOOST_NOEXCEPT
773 {
774 semaphore* sem = m_head;
775 m_head = NULL;
776 return sem;
777 }
778
779 //! Inserts the semaphore at the beginning of the list
push_front(semaphore * sem)780 void push_front(semaphore* sem) BOOST_NOEXCEPT
781 {
782 if (m_head)
783 sem->link_before(m_head);
784
785 m_head = sem;
786 }
787
788 //! Removes the first semaphore from the beginning of the list
pop_front()789 semaphore* pop_front() BOOST_NOEXCEPT
790 {
791 BOOST_ASSERT(!empty());
792 semaphore* sem = m_head;
793 erase(sem);
794 return sem;
795 }
796
797 //! Removes the semaphore from the list
erase(semaphore * sem)798 void erase(semaphore* sem) BOOST_NOEXCEPT
799 {
800 if (sem->is_singular())
801 {
802 BOOST_ASSERT(m_head == sem);
803 m_head = NULL;
804 }
805 else
806 {
807 if (m_head == sem)
808 m_head = sem->m_next;
809 sem->unlink();
810 }
811 }
812
813 BOOST_DELETED_FUNCTION(semaphore_list(semaphore_list const&))
814 BOOST_DELETED_FUNCTION(semaphore_list& operator= (semaphore_list const&))
815 };
816
817 //! List of semaphores used for notifying. Here, every semaphore has m_notify_count > 0 && m_waiter_count > 0.
818 semaphore_list m_notify_semaphores;
819 //! List of semaphores used for waiting. Here, every semaphore has m_notify_count == 0 && m_waiter_count > 0.
820 semaphore_list m_wait_semaphores;
821 //! List of free semaphores. Here, every semaphore has m_notify_count == 0 && m_waiter_count == 0.
822 semaphore_list m_free_semaphores;
823
wait_stateboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state824 explicit wait_state(std::size_t index) BOOST_NOEXCEPT :
825 wait_state_base(index)
826 {
827 }
828
~wait_stateboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state829 ~wait_state() BOOST_NOEXCEPT
830 {
831 // All wait and notification operations must have been completed
832 BOOST_ASSERT(m_notify_semaphores.empty());
833 BOOST_ASSERT(m_wait_semaphores.empty());
834
835 semaphore* sem = m_free_semaphores.eject();
836 if (sem)
837 {
838 while (true)
839 {
840 bool was_last = sem->is_singular();
841 semaphore* next = sem->m_next;
842 sem->unlink();
843
844 delete sem;
845
846 if (was_last)
847 break;
848
849 sem = next;
850 }
851 }
852 }
853
854 //! Blocks in the wait operation until notified
855 void wait(lock_state& state) BOOST_NOEXCEPT;
856 //! Fallback implementation of wait
857 void wait_fallback(lock_state& state) BOOST_NOEXCEPT;
858
859 //! Wakes up one thread blocked in the wait operation
notify_oneboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state860 void notify_one(lock_state&) BOOST_NOEXCEPT
861 {
862 if (m_notify_semaphores.empty())
863 {
864 if (m_wait_semaphores.empty())
865 return;
866
867 // Move the semaphore with waiters to the notify list
868 m_notify_semaphores.push_front(m_wait_semaphores.pop_front());
869 }
870
871 semaphore* sem = m_notify_semaphores.front();
872 ++sem->m_notify_count;
873
874 if (sem->m_notify_count == sem->m_waiter_count)
875 {
876 // Remove this semaphore from the list. The waiter will re-insert it into the waiter or free list once there are no more pending notifications in it.
877 m_notify_semaphores.erase(sem);
878 }
879
880 boost::winapi::ReleaseSemaphore(sem->m_semaphore, 1, NULL);
881 }
882
883 //! Wakes up all threads blocked in the wait operation
notify_allboost::atomics::detail::lock_pool::__anon568319ba0111::wait_state884 void notify_all(lock_state&) BOOST_NOEXCEPT
885 {
886 // Combine all notify and waiter semaphores in one list
887 semaphore* sem = m_notify_semaphores.eject();
888 if (sem)
889 {
890 if (!m_wait_semaphores.empty())
891 {
892 m_wait_semaphores.eject()->link_before(sem);
893 }
894 }
895 else
896 {
897 sem = m_wait_semaphores.eject();
898 }
899
900 if (sem)
901 {
902 while (true)
903 {
904 bool was_last = sem->is_singular();
905 semaphore* next = sem->m_next;
906 sem->unlink();
907
908 boost::winapi::ULONG_ count = sem->m_waiter_count - sem->m_notify_count;
909 sem->m_notify_count += count;
910
911 boost::winapi::ReleaseSemaphore(sem->m_semaphore, count, NULL);
912
913 if (was_last)
914 break;
915
916 sem = next;
917 }
918 }
919 }
920 };
921
922 //! Lock pool entry
923 struct lock_state
924 {
925 //! Mutex
926 boost::winapi::CRITICAL_SECTION_ m_mutex;
927 //! Fallback mutex. Used as indicator of critical section initialization state and a fallback mutex, if critical section cannot be initialized.
928 BOOST_ATOMIC_DETAIL_ALIGNED_VAR(mutex_operations::storage_alignment, mutex_operations::storage_type, m_mutex_fallback);
929 //! Wait states
930 wait_state_list m_wait_states;
931
932 //! Locks the mutex for a short duration
short_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state933 void short_lock() BOOST_NOEXCEPT
934 {
935 long_lock();
936 }
937
938 //! Locks the mutex for a long duration
long_lockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state939 void long_lock() BOOST_NOEXCEPT
940 {
941 mutex_operations::storage_type fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
942 while (true)
943 {
944 if (BOOST_LIKELY(fallback_state == fallback_mutex_bits::critical_section_initialized))
945 {
946 lock_cs:
947 boost::winapi::EnterCriticalSection(&m_mutex);
948 return;
949 }
950
951 while (fallback_state == 0u)
952 {
953 if (!mutex_operations::compare_exchange_weak(m_mutex_fallback, fallback_state, fallback_mutex_bits::locked, boost::memory_order_acquire, boost::memory_order_relaxed))
954 continue;
955
956 if (BOOST_LIKELY(!!boost::winapi::InitializeCriticalSectionAndSpinCount(&m_mutex, 100u)))
957 {
958 mutex_operations::store(m_mutex_fallback, fallback_mutex_bits::critical_section_initialized, boost::memory_order_release);
959 goto lock_cs;
960 }
961
962 // We failed to init the critical section, leave the fallback mutex locked and return
963 return;
964 }
965
966 if (fallback_state == fallback_mutex_bits::locked)
967 {
968 // Wait intil the fallback mutex is unlocked
969 boost::winapi::SwitchToThread();
970 fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
971 }
972 }
973 }
974
975 //! Unlocks the mutex
unlockboost::atomics::detail::lock_pool::__anon568319ba0111::lock_state976 void unlock() BOOST_NOEXCEPT
977 {
978 mutex_operations::storage_type fallback_state = mutex_operations::load(m_mutex_fallback, boost::memory_order_relaxed);
979 if (BOOST_LIKELY(fallback_state == fallback_mutex_bits::critical_section_initialized))
980 {
981 boost::winapi::LeaveCriticalSection(&m_mutex);
982 return;
983 }
984
985 mutex_operations::store(m_mutex_fallback, 0u, boost::memory_order_release);
986 }
987 };
988
989 #if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
990 #define BOOST_ATOMIC_LOCK_STATE_INIT { {}, 0u, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
991 #else
992 #define BOOST_ATOMIC_LOCK_STATE_INIT { {}, { 0u }, BOOST_ATOMIC_WAIT_STATE_LIST_INIT }
993 #endif
994
995 //! Blocks in the wait operation until notified
wait(lock_state & state)996 inline void wait_state::wait(lock_state& state) BOOST_NOEXCEPT
997 {
998 // Find a semaphore to block on
999 semaphore* sem = m_wait_semaphores.front();
1000 if (sem)
1001 {
1002 while (sem->m_waiter_count >= static_cast< boost::winapi::ULONG_ >((std::numeric_limits< boost::winapi::LONG_ >::max)()))
1003 {
1004 if (sem->m_next == m_wait_semaphores.front())
1005 {
1006 sem = NULL;
1007 break;
1008 }
1009
1010 sem = sem->m_next;
1011 }
1012 }
1013
1014 if (!sem)
1015 {
1016 if (BOOST_LIKELY(!m_free_semaphores.empty()))
1017 {
1018 sem = m_free_semaphores.pop_front();
1019 }
1020 else
1021 {
1022 sem = semaphore::create();
1023 if (BOOST_UNLIKELY(!sem))
1024 {
1025 wait_fallback(state);
1026 return;
1027 }
1028 }
1029
1030 m_wait_semaphores.push_front(sem);
1031 }
1032
1033 ++sem->m_waiter_count;
1034
1035 state.unlock();
1036
1037 boost::winapi::WaitForSingleObject(sem->m_semaphore, boost::winapi::infinite);
1038
1039 state.long_lock();
1040
1041 --sem->m_waiter_count;
1042
1043 if (sem->m_notify_count > 0u)
1044 {
1045 // This semaphore is either in the notify list or not in a list at all
1046 if (--sem->m_notify_count == 0u)
1047 {
1048 if (!sem->is_singular() || sem == m_notify_semaphores.front())
1049 m_notify_semaphores.erase(sem);
1050
1051 semaphore_list* list = sem->m_waiter_count == 0u ? &m_free_semaphores : &m_wait_semaphores;
1052 list->push_front(sem);
1053 }
1054 }
1055 else if (sem->m_waiter_count == 0u)
1056 {
1057 // Move the semaphore to the free list
1058 m_wait_semaphores.erase(sem);
1059 m_free_semaphores.push_front(sem);
1060 }
1061 }
1062
1063 //! Fallback implementation of wait
wait_fallback(lock_state & state)1064 inline void wait_state::wait_fallback(lock_state& state) BOOST_NOEXCEPT
1065 {
1066 state.unlock();
1067
1068 boost::winapi::Sleep(0);
1069
1070 state.long_lock();
1071 }
1072
1073 #endif // BOOST_USE_WINAPI_VERSION >= BOOST_WINAPI_VERSION_WIN6
1074
1075 #endif
1076
1077 enum
1078 {
1079 tail_size = sizeof(lock_state) % BOOST_ATOMIC_CACHE_LINE_SIZE,
1080 padding_size = tail_size > 0 ? BOOST_ATOMIC_CACHE_LINE_SIZE - tail_size : 0u
1081 };
1082
1083 template< unsigned int PaddingSize >
BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE)1084 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock_state
1085 {
1086 lock_state state;
1087 // The additional padding is needed to avoid false sharing between locks
1088 char padding[PaddingSize];
1089 };
1090
1091 template< >
BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE)1092 struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock_state< 0u >
1093 {
1094 lock_state state;
1095 };
1096
1097 typedef padded_lock_state< padding_size > padded_lock_state_t;
1098
1099 #if !defined(BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2)
1100 #define BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 8
1101 #endif
1102 #if (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2) < 0
1103 #error "Boost.Atomic: BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 macro value is negative"
1104 #endif
1105 #define BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE (1ull << (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2))
1106
1107 //! Lock pool size. Must be a power of two.
1108 BOOST_CONSTEXPR_OR_CONST std::size_t lock_pool_size = static_cast< std::size_t >(1u) << (BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2);
1109
1110 static padded_lock_state_t g_lock_pool[lock_pool_size] =
1111 {
1112 #if BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
1113 #if (BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE / 256u) > BOOST_PP_LIMIT_ITERATION
1114 #error "Boost.Atomic: BOOST_ATOMIC_LOCK_POOL_SIZE_LOG2 macro value is too large"
1115 #endif
1116 #define BOOST_PP_ITERATION_PARAMS_1 (3, (1, (BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE / 256u), "lock_pool_init256.ipp"))
1117 #else // BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
1118 #define BOOST_PP_ITERATION_PARAMS_1 (3, (1, BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE, "lock_pool_init1.ipp"))
1119 #endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_SIZE > 256u
1120 #include BOOST_PP_ITERATE()
1121 #undef BOOST_PP_ITERATION_PARAMS_1
1122 };
1123
1124 //! Pool cleanup function
cleanup_lock_pool()1125 void cleanup_lock_pool()
1126 {
1127 for (std::size_t i = 0u; i < lock_pool_size; ++i)
1128 {
1129 lock_state& state = g_lock_pool[i].state;
1130 state.long_lock();
1131 state.m_wait_states.m_free_memory = true;
1132 state.m_wait_states.free_spare();
1133 state.unlock();
1134 }
1135 }
1136
1137 BOOST_STATIC_ASSERT_MSG(once_flag_operations::is_always_lock_free, "Boost.Atomic unsupported target platform: native atomic operations not implemented for bytes");
1138 static once_flag g_pool_cleanup_registered = {};
1139
1140 //! Returns index of the lock pool entry for the given pointer value
get_lock_index(atomics::detail::uintptr_t h)1141 BOOST_FORCEINLINE std::size_t get_lock_index(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
1142 {
1143 return h & (lock_pool_size - 1u);
1144 }
1145
1146 //! Finds an existing element with the given pointer to the atomic object or allocates a new one
find_or_create(const volatile void * addr)1147 inline wait_state* wait_state_list::find_or_create(const volatile void* addr) BOOST_NOEXCEPT
1148 {
1149 if (BOOST_UNLIKELY(m_header == NULL))
1150 {
1151 m_header = allocate_buffer(initial_capacity);
1152 if (BOOST_UNLIKELY(m_header == NULL))
1153 return NULL;
1154 }
1155 else
1156 {
1157 wait_state* ws = this->find(addr);
1158 if (BOOST_LIKELY(ws != NULL))
1159 return ws;
1160
1161 if (BOOST_UNLIKELY(m_header->size == m_header->capacity))
1162 {
1163 header* new_header = allocate_buffer(m_header->capacity * 2u, m_header);
1164 if (BOOST_UNLIKELY(new_header == NULL))
1165 return NULL;
1166 boost::alignment::aligned_free(static_cast< void* >(m_header));
1167 m_header = new_header;
1168 }
1169 }
1170
1171 const std::size_t index = m_header->size;
1172 BOOST_ASSERT(index < m_header->capacity);
1173
1174 wait_state** pw = get_wait_states() + index;
1175 wait_state* w = *pw;
1176 if (BOOST_UNLIKELY(w == NULL))
1177 {
1178 w = new (std::nothrow) wait_state(index);
1179 if (BOOST_UNLIKELY(w == NULL))
1180 return NULL;
1181 *pw = w;
1182 }
1183
1184 get_atomic_pointers()[index] = addr;
1185
1186 ++m_header->size;
1187
1188 return w;
1189 }
1190
1191 //! Releases the previously created wait state
erase(wait_state * w)1192 inline void wait_state_list::erase(wait_state* w) BOOST_NOEXCEPT
1193 {
1194 BOOST_ASSERT(m_header != NULL);
1195
1196 const volatile void** pa = get_atomic_pointers();
1197 wait_state** pw = get_wait_states();
1198
1199 std::size_t index = w->m_index;
1200
1201 BOOST_ASSERT(index < m_header->size);
1202 BOOST_ASSERT(pw[index] == w);
1203
1204 std::size_t last_index = m_header->size - 1u;
1205
1206 if (index != last_index)
1207 {
1208 pa[index] = pa[last_index];
1209 pa[last_index] = NULL;
1210
1211 wait_state* last_w = pw[last_index];
1212 pw[index] = last_w;
1213 pw[last_index] = w;
1214
1215 last_w->m_index = index;
1216 w->m_index = last_index;
1217 }
1218 else
1219 {
1220 pa[index] = NULL;
1221 }
1222
1223 --m_header->size;
1224
1225 if (BOOST_UNLIKELY(m_free_memory))
1226 free_spare();
1227 }
1228
1229 //! Allocates new buffer for the list entries
allocate_buffer(std::size_t new_capacity,header * old_header)1230 wait_state_list::header* wait_state_list::allocate_buffer(std::size_t new_capacity, header* old_header) BOOST_NOEXCEPT
1231 {
1232 if (BOOST_UNLIKELY(once_flag_operations::load(g_pool_cleanup_registered.m_flag, boost::memory_order_relaxed) == 0u))
1233 {
1234 if (once_flag_operations::exchange(g_pool_cleanup_registered.m_flag, 1u, boost::memory_order_relaxed) == 0u)
1235 std::atexit(&cleanup_lock_pool);
1236 }
1237
1238 const std::size_t new_buffer_size = entries_offset + new_capacity * sizeof(void*) * 2u;
1239
1240 void* p = boost::alignment::aligned_alloc(buffer_alignment, new_buffer_size);
1241 if (BOOST_UNLIKELY(p == NULL))
1242 return NULL;
1243
1244 header* h = new (p) header;
1245 const volatile void** a = new (get_atomic_pointers(h)) const volatile void*[new_capacity];
1246 wait_state** w = new (get_wait_states(a, new_capacity)) wait_state*[new_capacity];
1247
1248 if (BOOST_LIKELY(old_header != NULL))
1249 {
1250 BOOST_ASSERT(new_capacity >= old_header->capacity);
1251
1252 h->size = old_header->size;
1253
1254 const volatile void** old_a = get_atomic_pointers(old_header);
1255 std::memcpy(a, old_a, old_header->size * sizeof(const volatile void*));
1256 std::memset(a + old_header->size * sizeof(const volatile void*), 0, (new_capacity - old_header->size) * sizeof(const volatile void*));
1257
1258 wait_state** old_w = get_wait_states(old_a, old_header->capacity);
1259 std::memcpy(w, old_w, old_header->capacity * sizeof(wait_state*)); // copy spare wait state pointers
1260 std::memset(w + old_header->capacity * sizeof(wait_state*), 0, (new_capacity - old_header->capacity) * sizeof(wait_state*));
1261 }
1262 else
1263 {
1264 std::memset(p, 0, new_buffer_size);
1265 }
1266
1267 h->capacity = new_capacity;
1268
1269 return h;
1270 }
1271
1272 //! Deallocates spare entries and the list buffer if no allocated entries are left
free_spare()1273 void wait_state_list::free_spare() BOOST_NOEXCEPT
1274 {
1275 if (BOOST_LIKELY(m_header != NULL))
1276 {
1277 wait_state** ws = get_wait_states();
1278 for (std::size_t i = m_header->size, n = m_header->capacity; i < n; ++i)
1279 {
1280 wait_state* w = ws[i];
1281 if (!w)
1282 break;
1283
1284 delete w;
1285 ws[i] = NULL;
1286 }
1287
1288 if (m_header->size == 0u)
1289 {
1290 boost::alignment::aligned_free(static_cast< void* >(m_header));
1291 m_header = NULL;
1292 }
1293 }
1294 }
1295
1296 } // namespace
1297
1298
short_lock(atomics::detail::uintptr_t h)1299 BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
1300 {
1301 lock_state& ls = g_lock_pool[get_lock_index(h)].state;
1302 ls.short_lock();
1303 return &ls;
1304 }
1305
long_lock(atomics::detail::uintptr_t h)1306 BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT
1307 {
1308 lock_state& ls = g_lock_pool[get_lock_index(h)].state;
1309 ls.long_lock();
1310 return &ls;
1311 }
1312
unlock(void * vls)1313 BOOST_ATOMIC_DECL void unlock(void* vls) BOOST_NOEXCEPT
1314 {
1315 static_cast< lock_state* >(vls)->unlock();
1316 }
1317
1318
allocate_wait_state(void * vls,const volatile void * addr)1319 BOOST_ATOMIC_DECL void* allocate_wait_state(void* vls, const volatile void* addr) BOOST_NOEXCEPT
1320 {
1321 BOOST_ASSERT(vls != NULL);
1322
1323 lock_state* ls = static_cast< lock_state* >(vls);
1324
1325 // Note: find_or_create may fail to allocate memory. However, C++20 specifies that wait/notify operations
1326 // are noexcept, so allocate_wait_state must succeed. To implement this we return NULL in case of failure and test for NULL
1327 // in other wait/notify functions so that all of them become nop (which is a conforming, though inefficient behavior).
1328 wait_state* ws = ls->m_wait_states.find_or_create(addr);
1329
1330 if (BOOST_LIKELY(ws != NULL))
1331 ++ws->m_ref_count;
1332
1333 return ws;
1334 }
1335
free_wait_state(void * vls,void * vws)1336 BOOST_ATOMIC_DECL void free_wait_state(void* vls, void* vws) BOOST_NOEXCEPT
1337 {
1338 BOOST_ASSERT(vls != NULL);
1339
1340 wait_state* ws = static_cast< wait_state* >(vws);
1341 if (BOOST_LIKELY(ws != NULL))
1342 {
1343 if (--ws->m_ref_count == 0u)
1344 {
1345 lock_state* ls = static_cast< lock_state* >(vls);
1346 ls->m_wait_states.erase(ws);
1347 }
1348 }
1349 }
1350
wait(void * vls,void * vws)1351 BOOST_ATOMIC_DECL void wait(void* vls, void* vws) BOOST_NOEXCEPT
1352 {
1353 BOOST_ASSERT(vls != NULL);
1354
1355 lock_state* ls = static_cast< lock_state* >(vls);
1356 wait_state* ws = static_cast< wait_state* >(vws);
1357 if (BOOST_LIKELY(ws != NULL))
1358 {
1359 ws->wait(*ls);
1360 }
1361 else
1362 {
1363 // A conforming wait operation must unlock and lock the mutex to allow a notify to complete
1364 ls->unlock();
1365 atomics::detail::wait_some();
1366 ls->long_lock();
1367 }
1368 }
1369
notify_one(void * vls,const volatile void * addr)1370 BOOST_ATOMIC_DECL void notify_one(void* vls, const volatile void* addr) BOOST_NOEXCEPT
1371 {
1372 BOOST_ASSERT(vls != NULL);
1373
1374 lock_state* ls = static_cast< lock_state* >(vls);
1375 wait_state* ws = ls->m_wait_states.find(addr);
1376 if (BOOST_LIKELY(ws != NULL))
1377 ws->notify_one(*ls);
1378 }
1379
notify_all(void * vls,const volatile void * addr)1380 BOOST_ATOMIC_DECL void notify_all(void* vls, const volatile void* addr) BOOST_NOEXCEPT
1381 {
1382 BOOST_ASSERT(vls != NULL);
1383
1384 lock_state* ls = static_cast< lock_state* >(vls);
1385 wait_state* ws = ls->m_wait_states.find(addr);
1386 if (BOOST_LIKELY(ws != NULL))
1387 ws->notify_all(*ls);
1388 }
1389
1390
thread_fence()1391 BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT
1392 {
1393 #if BOOST_ATOMIC_THREAD_FENCE == 2
1394 atomics::detail::fence_operations::thread_fence(memory_order_seq_cst);
1395 #else
1396 // Emulate full fence by locking/unlocking a mutex
1397 lock_pool::unlock(lock_pool::short_lock(0u));
1398 #endif
1399 }
1400
signal_fence()1401 BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT
1402 {
1403 // This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
1404 #if BOOST_ATOMIC_SIGNAL_FENCE == 2
1405 atomics::detail::fence_operations::signal_fence(memory_order_seq_cst);
1406 #endif
1407 }
1408
1409 } // namespace lock_pool
1410 } // namespace detail
1411 } // namespace atomics
1412 } // namespace boost
1413
1414 #include <boost/atomic/detail/footer.hpp>
1415