Lines Matching +full:use +full:- +full:rtm

1 // SPDX-License-Identifier: GPL-2.0-only
4 * RT-specific reader/writer semaphores and reader/writer locks
14 * 2) Set the reader BIAS, so readers can use the fast path again
36 * for one reader after the other. We can't use multi-reader inheritance
41 * The risk of writer starvation is there, but the pathological use cases
44 * Fast-path orderings:
58 * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is in rwbase_read_trylock()
61 for (r = atomic_read(&rwb->readers); r < 0;) { in rwbase_read_trylock()
62 if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1))) in rwbase_read_trylock()
71 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_read_lock() local
76 raw_spin_lock_irq(&rtm->wait_lock); in __rwbase_read_lock()
79 * Call into the slow lock path with the rtmutex->wait_lock in __rwbase_read_lock()
88 * unlock(m->wait_lock) in __rwbase_read_lock()
91 * lock(m->wait_lock) in __rwbase_read_lock()
92 * sem->writelocked=true in __rwbase_read_lock()
93 * unlock(m->wait_lock) in __rwbase_read_lock()
96 * sem->writelocked=false in __rwbase_read_lock()
114 ret = rwbase_rtmutex_slowlock_locked(rtm, state, &wake_q); in __rwbase_read_lock()
121 * rtmutex->wait_lock has to be unlocked in any case of course. in __rwbase_read_lock()
124 atomic_inc(&rwb->readers); in __rwbase_read_lock()
127 raw_spin_unlock_irq(&rtm->wait_lock); in __rwbase_read_lock()
132 rwbase_rtmutex_unlock(rtm); in __rwbase_read_lock()
142 lockdep_assert(!current->pi_blocked_on); in rwbase_read_lock()
153 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_read_unlock() local
157 raw_spin_lock_irq(&rtm->wait_lock); in __rwbase_read_unlock()
161 * clean up rwb->readers it needs to acquire rtm->wait_lock. The in __rwbase_read_unlock()
164 owner = rt_mutex_owner(rtm); in __rwbase_read_unlock()
170 raw_spin_unlock_irq(&rtm->wait_lock); in __rwbase_read_unlock()
178 * rwb->readers can only hit 0 when a writer is waiting for the in rwbase_read_unlock()
183 if (unlikely(atomic_dec_and_test(&rwb->readers))) in rwbase_read_unlock()
190 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_write_unlock() local
196 (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); in __rwbase_write_unlock()
197 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in __rwbase_write_unlock()
198 rwbase_rtmutex_unlock(rtm); in __rwbase_write_unlock()
203 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_unlock() local
206 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_unlock()
212 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_downgrade() local
215 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_downgrade()
217 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); in rwbase_write_downgrade()
223 lockdep_assert_held(&rwb->rtmutex.wait_lock); in __rwbase_write_trylock()
229 if (!atomic_read_acquire(&rwb->readers)) { in __rwbase_write_trylock()
230 atomic_set(&rwb->readers, WRITER_BIAS); in __rwbase_write_trylock()
240 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_lock() local
244 if (rwbase_rtmutex_lock_state(rtm, state)) in rwbase_write_lock()
245 return -EINTR; in rwbase_write_lock()
248 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_lock()
252 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_lock()
264 trace_contention_end(rwb, -EINTR); in rwbase_write_lock()
265 return -EINTR; in rwbase_write_lock()
271 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in rwbase_write_lock()
273 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_lock()
281 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in rwbase_write_lock()
288 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_trylock() local
291 if (!rwbase_rtmutex_trylock(rtm)) in rwbase_write_trylock()
294 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_trylock()
296 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_trylock()
298 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in rwbase_write_trylock()