Lines Matching full:owner
68 * lock->owner state tracking:
70 * lock->owner holds the task_struct pointer of the owner. Bit 0
73 * owner bit0
81 * possible when bit 0 of lock->owner is 0.
85 * we need to set the bit0 before looking at the lock, and the owner may be
90 * To prevent a cmpxchg of the owner releasing the lock, we need to
95 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_owner_encode() argument
97 unsigned long val = (unsigned long)owner; in rt_mutex_owner_encode()
106 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
110 * for a new lock owner so WRITE_ONCE is insufficient. in rt_mutex_set_owner()
112 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner()
118 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner()
123 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
124 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
130 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters() local
137 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
141 * l->owner=T1 in fixup_rt_mutex_waiters()
144 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
152 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
170 * l->owner = owner in fixup_rt_mutex_waiters()
171 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
172 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
177 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
178 * cmpxchg(l->owner, T1, NULL) in fixup_rt_mutex_waiters()
179 * ===> Success (l->owner = NULL) in fixup_rt_mutex_waiters()
181 * l->owner = owner in fixup_rt_mutex_waiters()
182 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
188 * bit. If the bit is set then nothing can change l->owner either in fixup_rt_mutex_waiters()
193 owner = READ_ONCE(*p); in fixup_rt_mutex_waiters()
194 if (owner & RT_MUTEX_HAS_WAITERS) { in fixup_rt_mutex_waiters()
197 * why xchg_acquire() is used for updating owner for in fixup_rt_mutex_waiters()
205 xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS); in fixup_rt_mutex_waiters()
207 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); in fixup_rt_mutex_waiters()
220 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
232 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
242 unsigned long *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
243 unsigned long owner, new; in mark_rt_mutex_waiters() local
245 owner = READ_ONCE(*p); in mark_rt_mutex_waiters()
247 new = owner | RT_MUTEX_HAS_WAITERS; in mark_rt_mutex_waiters()
248 } while (!try_cmpxchg_relaxed(p, &owner, new)); in mark_rt_mutex_waiters()
268 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe() local
278 * cmpxchg(p, owner, 0) == owner in unlock_rt_mutex_safe()
287 * cmpxchg(p, owner, 0) != owner in unlock_rt_mutex_safe()
296 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
331 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
332 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
336 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
342 lock->owner = NULL; in unlock_rt_mutex_safe()
608 * @task: the task owning the mutex (owner) for which a chain walk is
614 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
618 * its priority to the mutex owner (can be NULL in the case
620 * actually deboosting the owner)
663 * [10] task = owner(lock); [L]
672 * Where P1 is the blocking task and P2 is the lock owner; going up one step
673 * the owner becomes the next blocked task etc..
752 * the previous owner of the lock might have released the lock. in rt_mutex_adjust_prio_chain()
893 * If there is no owner of the lock, end of chain. in rt_mutex_adjust_prio_chain()
900 /* [10] Grab the next task, i.e. owner of @lock */ in rt_mutex_adjust_prio_chain()
907 * [12] Store whether owner is blocked in rt_mutex_adjust_prio_chain()
920 /* If owner is not blocked, end of chain. */ in rt_mutex_adjust_prio_chain()
953 * taking the owner task in [10]. in rt_mutex_adjust_prio_chain()
964 * We must abort the chain walk if there is no lock owner even in rt_mutex_adjust_prio_chain()
982 * [10] Grab the next task, i.e. the owner of @lock in rt_mutex_adjust_prio_chain()
984 * Per holding lock->wait_lock and checking for !owner above, there in rt_mutex_adjust_prio_chain()
985 * must be an owner and it cannot go away. in rt_mutex_adjust_prio_chain()
995 * in the owner tasks pi waiters tree with this waiter in rt_mutex_adjust_prio_chain()
996 * and adjust the priority of the owner. in rt_mutex_adjust_prio_chain()
1007 * the owner tasks pi waiters tree with the new top in rt_mutex_adjust_prio_chain()
1009 * of the owner. in rt_mutex_adjust_prio_chain()
1093 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
1100 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
1111 * If @lock has an owner, give up. in try_to_take_rt_mutex()
1177 * Finish the lock acquisition. @task is the new owner. If in try_to_take_rt_mutex()
1209 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex() local
1228 if (owner == task && !(build_ww_mutex() && ww_ctx)) in task_blocks_on_rt_mutex()
1261 if (!owner) in task_blocks_on_rt_mutex()
1264 raw_spin_lock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1266 rt_mutex_dequeue_pi(owner, top_waiter); in task_blocks_on_rt_mutex()
1267 rt_mutex_enqueue_pi(owner, waiter); in task_blocks_on_rt_mutex()
1269 rt_mutex_adjust_prio(lock, owner); in task_blocks_on_rt_mutex()
1270 if (owner->pi_blocked_on) in task_blocks_on_rt_mutex()
1276 /* Store the lock on which owner is blocked or NULL */ in task_blocks_on_rt_mutex()
1277 next_lock = task_blocked_on_lock(owner); in task_blocks_on_rt_mutex()
1279 raw_spin_unlock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1281 * Even if full deadlock detection is on, if the owner is not in task_blocks_on_rt_mutex()
1289 * The owner can't disappear while holding a lock, in task_blocks_on_rt_mutex()
1290 * so the owner struct is protected by wait_lock. in task_blocks_on_rt_mutex()
1293 get_task_struct(owner); in task_blocks_on_rt_mutex()
1297 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1340 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1379 * If the lock already has an owner we fail to get the lock. in rt_mutex_slowtrylock()
1387 * The mutex has currently no owner. Lock the wait lock and try to in rt_mutex_slowtrylock()
1422 * have no waiters queued we cannot set owner to NULL here in rt_mutex_slowunlock()
1425 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1438 * owner = rt_mutex_owner(lock); in rt_mutex_slowunlock()
1441 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1446 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1448 * lock->owner = NULL; in rt_mutex_slowunlock()
1482 struct task_struct *owner) in rtmutex_spin_on_owner() argument
1488 /* If owner changed, trylock again. */ in rtmutex_spin_on_owner()
1489 if (owner != rt_mutex_owner(lock)) in rtmutex_spin_on_owner()
1492 * Ensure that @owner is dereferenced after checking that in rtmutex_spin_on_owner()
1493 * the lock owner still matches @owner. If that fails, in rtmutex_spin_on_owner()
1494 * @owner might point to freed memory. If it still matches, in rtmutex_spin_on_owner()
1500 * - the lock owner has been scheduled out in rtmutex_spin_on_owner()
1504 * - the VCPU on which owner runs is preempted in rtmutex_spin_on_owner()
1506 if (!owner_on_cpu(owner) || need_resched() || in rtmutex_spin_on_owner()
1519 struct task_struct *owner) in rtmutex_spin_on_owner() argument
1542 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter() local
1554 * waiter of the lock and there is an owner to update. in remove_waiter()
1556 if (!owner || !is_top_waiter) in remove_waiter()
1559 raw_spin_lock(&owner->pi_lock); in remove_waiter()
1561 rt_mutex_dequeue_pi(owner, waiter); in remove_waiter()
1564 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1566 rt_mutex_adjust_prio(lock, owner); in remove_waiter()
1568 /* Store the lock on which owner is blocked or NULL */ in remove_waiter()
1569 next_lock = task_blocked_on_lock(owner); in remove_waiter()
1571 raw_spin_unlock(&owner->pi_lock); in remove_waiter()
1574 * Don't walk the chain, if the owner task is not blocked in remove_waiter()
1581 get_task_struct(owner); in remove_waiter()
1585 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1612 struct task_struct *owner; in rt_mutex_slowlock_block() local
1636 owner = rt_mutex_owner(lock); in rt_mutex_slowlock_block()
1638 owner = NULL; in rt_mutex_slowlock_block()
1641 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1823 struct task_struct *owner; in rtlock_slowlock_locked() local
1845 owner = rt_mutex_owner(lock); in rtlock_slowlock_locked()
1847 owner = NULL; in rtlock_slowlock_locked()
1850 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()