Lines Matching full:mutex

3  * kernel/locking/mutex.c
19 * Also see Documentation/locking/mutex-design.rst.
21 #include <linux/mutex.h>
37 #include "mutex.h"
46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init()
64 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked()
78 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) in __mutex_trylock_common()
117 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) in __mutex_trylock_or_handoff()
125 static inline bool __mutex_trylock(struct mutex *lock) in __mutex_trylock()
141 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) in __mutex_trylock_fast()
152 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) in __mutex_unlock_fast()
160 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) in __mutex_set_flag()
165 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) in __mutex_clear_flag()
170 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_waiter_is_first()
180 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in __mutex_add_waiter()
191 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_remove_waiter()
206 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) in __mutex_handoff()
228 * We split the mutex lock/unlock logic into separate fastpath and
233 static void __sched __mutex_lock_slowpath(struct mutex *lock);
236 * mutex_lock - acquire the mutex
237 * @lock: the mutex to be acquired
239 * Lock the mutex exclusively for this task. If the mutex is not
242 * The mutex must later on be released by the same task that
244 * may not exit without first unlocking the mutex. Also, kernel
245 * memory where the mutex resides must not be freed with
246 * the mutex still locked. The mutex must first be initialized
248 * the mutex to 0 is not allowed.
256 void __sched mutex_lock(struct mutex *lock) in mutex_lock()
273 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) in __mutex_trylock_or_owner()
279 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in ww_mutex_spin_on_owner()
327 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner()
365 * Initial check for entering the mutex spinning loop
367 static inline int mutex_can_spin_on_owner(struct mutex *lock) in mutex_can_spin_on_owner()
387 * If lock->owner is not set, the mutex has been released. Return true in mutex_can_spin_on_owner()
402 * The mutex spinners are queued up using MCS lock so that only one
403 * spinner can compete for the mutex. However, if mutex spinning isn't
416 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin()
431 * In order to avoid a stampede of mutex spinners trying to in mutex_optimistic_spin()
432 * acquire the mutex all at once, the spinners need to take a in mutex_optimistic_spin()
442 /* Try to acquire the mutex... */ in mutex_optimistic_spin()
476 * reschedule now, before we try-lock the mutex. This avoids getting in mutex_optimistic_spin()
477 * scheduled out right after we obtained the mutex. in mutex_optimistic_spin()
492 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin()
499 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
502 * mutex_unlock - release the mutex
503 * @lock: the mutex to be released
505 * Unlock a mutex that has been locked by this task previously.
508 * of a not locked mutex is not allowed.
510 * The caller must ensure that the mutex stays alive until this function has
517 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock()
528 * ww_mutex_unlock - release the w/w mutex
529 * @lock: the mutex to be released
531 * Unlock a mutex that has been locked by this task previously with any of the
536 * of a unlocked mutex is not allowed.
546 * Lock a mutex (possibly interruptible), slowpath:
549 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock_common()
727 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock()
734 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __ww_mutex_lock()
741 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
742 * @ww: mutex to lock
745 * Trylocks a mutex with the optional acquire context; no deadlock detection is
746 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
751 * A mutex acquired with this function must be released with ww_mutex_unlock.
780 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested()
788 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock()
795 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_killable_nested()
802 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested()
809 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_io_nested()
884 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) in __mutex_unlock_slowpath()
942 __mutex_lock_killable_slowpath(struct mutex *lock);
945 __mutex_lock_interruptible_slowpath(struct mutex *lock);
948 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
949 * @lock: The mutex to be acquired.
951 * Lock the mutex like mutex_lock(). If a signal is delivered while the
953 * mutex.
959 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible()
972 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
973 * @lock: The mutex to be acquired.
975 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
977 * function will return without acquiring the mutex.
983 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable()
995 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
996 * @lock: The mutex to be acquired.
998 * Lock the mutex like mutex_lock(). While the task is waiting for this
999 * mutex, it will be accounted as being in the IO wait state by the
1004 void __sched mutex_lock_io(struct mutex *lock) in mutex_lock_io()
1015 __mutex_lock_slowpath(struct mutex *lock) in __mutex_lock_slowpath()
1021 __mutex_lock_killable_slowpath(struct mutex *lock) in __mutex_lock_killable_slowpath()
1027 __mutex_lock_interruptible_slowpath(struct mutex *lock) in __mutex_lock_interruptible_slowpath()
1050 * mutex_trylock - try to acquire the mutex, without waiting
1051 * @lock: the mutex to be acquired
1053 * Try to acquire the mutex atomically. Returns 1 if the mutex
1061 * mutex must be released by the same task that acquired it.
1063 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock()
1115 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1117 * @lock: the mutex to return holding if we dec to 0
1121 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock()