1 /*
2  * Distributed under the Boost Software License, Version 1.0.
3  * (See accompanying file LICENSE_1_0.txt or copy at
4  * http://www.boost.org/LICENSE_1_0.txt)
5  *
6  * Copyright (c) 2011 Helge Bahmann
7  * Copyright (c) 2013-2014, 2020 Andrey Semashev
8  */
9 /*!
10  * \file   atomic/detail/lock_pool.hpp
11  *
12  * This header contains declaration of the lock pool used to emulate atomic ops.
13  */
14 
15 #ifndef BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
16 #define BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
17 
18 #include <cstddef>
19 #include <boost/atomic/detail/config.hpp>
20 #include <boost/atomic/detail/link.hpp>
21 #include <boost/atomic/detail/intptr.hpp>
22 #if defined(BOOST_WINDOWS)
23 #include <boost/winapi/thread.hpp>
24 #elif defined(BOOST_HAS_NANOSLEEP)
25 #include <time.h>
26 #else
27 #include <unistd.h>
28 #endif
29 #include <boost/atomic/detail/header.hpp>
30 
31 #ifdef BOOST_HAS_PRAGMA_ONCE
32 #pragma once
33 #endif
34 
35 namespace boost {
36 namespace atomics {
37 namespace detail {
38 
wait_some()39 BOOST_FORCEINLINE void wait_some() BOOST_NOEXCEPT
40 {
41 #if defined(BOOST_WINDOWS)
42     boost::winapi::SwitchToThread();
43 #elif defined(BOOST_HAS_NANOSLEEP)
44     // Do not use sched_yield or pthread_yield as at least on Linux it doesn't block the thread if there are no other
45     // pending threads on the current CPU. Proper sleeping is guaranteed to block the thread, which allows other threads
46     // to potentially migrate to this CPU and complete the tasks we're waiting for.
47     struct ::timespec ts = {};
48     ts.tv_sec = 0;
49     ts.tv_nsec = 1000;
50     ::nanosleep(&ts, NULL);
51 #else
52     ::usleep(1);
53 #endif
54 }
55 
56 namespace lock_pool {
57 
58 BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
59 BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
60 BOOST_ATOMIC_DECL void unlock(void* ls) BOOST_NOEXCEPT;
61 
62 BOOST_ATOMIC_DECL void* allocate_wait_state(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
63 BOOST_ATOMIC_DECL void free_wait_state(void* ls, void* ws) BOOST_NOEXCEPT;
64 BOOST_ATOMIC_DECL void wait(void* ls, void* ws) BOOST_NOEXCEPT;
65 BOOST_ATOMIC_DECL void notify_one(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
66 BOOST_ATOMIC_DECL void notify_all(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
67 
68 BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
69 BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
70 
71 template< std::size_t Alignment >
hash_ptr(const volatile void * addr)72 BOOST_FORCEINLINE atomics::detail::uintptr_t hash_ptr(const volatile void* addr) BOOST_NOEXCEPT
73 {
74     atomics::detail::uintptr_t ptr = (atomics::detail::uintptr_t)addr;
75     atomics::detail::uintptr_t h = ptr / Alignment;
76 
77     // Since many malloc/new implementations return pointers with higher alignment
78     // than indicated by Alignment, it makes sense to mix higher bits
79     // into the lower ones. On 64-bit platforms, malloc typically aligns to 16 bytes,
80     // on 32-bit - to 8 bytes.
81     BOOST_CONSTEXPR_OR_CONST std::size_t malloc_alignment = sizeof(void*) >= 8u ? 16u : 8u;
82     BOOST_IF_CONSTEXPR (Alignment != malloc_alignment)
83         h ^= ptr / malloc_alignment;
84 
85     return h;
86 }
87 
88 template< std::size_t Alignment, bool LongLock = false >
89 class scoped_lock
90 {
91 private:
92     void* m_lock;
93 
94 public:
scoped_lock(const volatile void * addr)95     explicit scoped_lock(const volatile void* addr) BOOST_NOEXCEPT
96     {
97         atomics::detail::uintptr_t h = lock_pool::hash_ptr< Alignment >(addr);
98         BOOST_IF_CONSTEXPR (!LongLock)
99             m_lock = lock_pool::short_lock(h);
100         else
101             m_lock = lock_pool::long_lock(h);
102     }
~scoped_lock()103     ~scoped_lock() BOOST_NOEXCEPT
104     {
105         lock_pool::unlock(m_lock);
106     }
107 
get_lock_state() const108     void* get_lock_state() const BOOST_NOEXCEPT
109     {
110         return m_lock;
111     }
112 
113     BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
114     BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
115 };
116 
117 template< std::size_t Alignment >
118 class scoped_wait_state :
119     public scoped_lock< Alignment, true >
120 {
121 private:
122     void* m_wait_state;
123 
124 public:
scoped_wait_state(const volatile void * addr)125     explicit scoped_wait_state(const volatile void* addr) BOOST_NOEXCEPT :
126         scoped_lock< Alignment, true >(addr)
127     {
128         m_wait_state = lock_pool::allocate_wait_state(this->get_lock_state(), addr);
129     }
~scoped_wait_state()130     ~scoped_wait_state() BOOST_NOEXCEPT
131     {
132         lock_pool::free_wait_state(this->get_lock_state(), m_wait_state);
133     }
134 
wait()135     void wait() BOOST_NOEXCEPT
136     {
137         lock_pool::wait(this->get_lock_state(), m_wait_state);
138     }
139 
140     BOOST_DELETED_FUNCTION(scoped_wait_state(scoped_wait_state const&))
141     BOOST_DELETED_FUNCTION(scoped_wait_state& operator=(scoped_wait_state const&))
142 };
143 
144 } // namespace lock_pool
145 } // namespace detail
146 } // namespace atomics
147 } // namespace boost
148 
149 #include <boost/atomic/detail/footer.hpp>
150 
151 #endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
152