Lines Matching full:owner
36 * The least significant 2 bits of the owner value has the following
45 * into the owner field. It is cleared after an unlock.
48 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
49 * On unlock, the owner field will largely be left untouched. So
50 * for a free or reader-owned rwsem, the owner value may contain
70 …WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, l…
73 atomic_long_read(&(sem)->owner), (long)current, \
131 * All writes to owner are protected by WRITE_ONCE() to make sure that
133 * the owner value concurrently without lock. Read from owner, however,
143 atomic_long_set(&sem->owner, (long)current); in rwsem_set_owner()
149 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner()
153 * Test the flags in the owner field.
157 return atomic_long_read(&sem->owner) & flags; in rwsem_test_oflags()
162 * the owner field.
164 * Note that the owner value just indicates the task has owned the rwsem
165 * previously, it may not be the real owner or one of the real owners
171 struct task_struct *owner) in __rwsem_set_reader_owned() argument
173 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED | in __rwsem_set_reader_owned()
174 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); in __rwsem_set_reader_owned()
176 atomic_long_set(&sem->owner, val); in __rwsem_set_reader_owned()
186 * Return just the real task structure pointer of the owner
191 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner()
211 * is a task pointer in owner of a reader-owned rwsem, it will be the
212 * real owner or one of the real owners. The only exception is when the
217 unsigned long val = atomic_long_read(&sem->owner); in rwsem_clear_reader_owned()
220 if (atomic_long_try_cmpxchg(&sem->owner, &val, in rwsem_clear_reader_owned()
237 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_set_nonspinnable() local
240 if (!(owner & RWSEM_READER_OWNED)) in rwsem_set_nonspinnable()
242 if (owner & RWSEM_NONSPINNABLE) in rwsem_set_nonspinnable()
244 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, in rwsem_set_nonspinnable()
245 owner | RWSEM_NONSPINNABLE)); in rwsem_set_nonspinnable()
276 * Return the real task structure pointer of the owner and the embedded
277 * flags in the owner. pflags must be non-NULL.
282 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_owner_flags() local
284 *pflags = owner & RWSEM_OWNER_FLAGS_MASK; in rwsem_owner_flags()
285 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner_flags()
297 * (3) the owner field has RWSEM_READ_OWNED bit set.
324 atomic_long_set(&sem->owner, 0L); in __init_rwsem()
453 struct task_struct *owner; in rwsem_mark_wake() local
480 owner = waiter->task; in rwsem_mark_wake()
481 __rwsem_set_reader_owned(sem, owner); in rwsem_mark_wake()
668 * depending on the lock owner state.
669 * OWNER_NULL : owner is currently NULL
670 * OWNER_WRITER: when owner changes and is a writer
671 * OWNER_READER: when owner changes and the new owner may be a reader.
674 * owner stops running, is unknown, or its timeslice has
705 struct task_struct *owner; in rwsem_can_spin_on_owner() local
718 owner = rwsem_owner_flags(sem, &flags); in rwsem_can_spin_on_owner()
720 * Don't check the read-owner as the entry may be stale. in rwsem_can_spin_on_owner()
723 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) in rwsem_can_spin_on_owner()
733 rwsem_owner_state(struct task_struct *owner, unsigned long flags) in rwsem_owner_state() argument
741 return owner ? OWNER_WRITER : OWNER_NULL; in rwsem_owner_state()
747 struct task_struct *new, *owner; in rwsem_spin_on_owner() local
753 owner = rwsem_owner_flags(sem, &flags); in rwsem_spin_on_owner()
754 state = rwsem_owner_state(owner, flags); in rwsem_spin_on_owner()
761 * on the owner as well. Once that writer acquires the lock, in rwsem_spin_on_owner()
766 if ((new != owner) || (new_flags != flags)) { in rwsem_spin_on_owner()
772 * Ensure we emit the owner->on_cpu, dereference _after_ in rwsem_spin_on_owner()
773 * checking sem->owner still matches owner, if that fails, in rwsem_spin_on_owner()
774 * owner might point to free()d memory, if it still matches, in rwsem_spin_on_owner()
781 if (need_resched() || !owner_on_cpu(owner)) { in rwsem_spin_on_owner()
829 * Optimistically spin on the owner field and attempt to acquire the in rwsem_optimistic_spin()
830 * lock whenever the owner changes. Spinning will be stopped when: in rwsem_optimistic_spin()
855 * the owner state changes from non-reader to reader. in rwsem_optimistic_spin()
886 * spinning while a NULL owner is detected may miss some in rwsem_optimistic_spin()
893 * 1) The lock owner is in the process of releasing the in rwsem_optimistic_spin()
894 * lock, sem->owner is cleared but the lock has not in rwsem_optimistic_spin()
896 * 2) The lock was free and owner cleared, but another in rwsem_optimistic_spin()
898 * we try to get it. The new owner may be a spinnable in rwsem_optimistic_spin()
905 * new owner is not a writer or spinnable, the RT task will in rwsem_optimistic_spin()
908 * If the owner is a writer, the need_resched() check is in rwsem_optimistic_spin()
909 * done inside rwsem_spin_on_owner(). If the owner is not in rwsem_optimistic_spin()
936 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
942 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); in clear_nonspinnable()
967 * given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely
1006 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && in rwsem_down_read_slowpath()
1162 * the lock, attempt to spin on owner to accelerate lock in rwsem_down_write_slowpath()
1163 * transfer. If the previous owner is a on-cpu writer and it in rwsem_down_write_slowpath()
1364 * sem->owner may differ from current if the ownership is transferred in __up_write()
1505 struct task_struct *owner) in __rwsem_set_reader_owned() argument
1681 * The owner value for a reader-owned lock is mostly for debugging in down_read_non_owner()