1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008 Intel Corporation
4  * Author: Matthew Wilcox <[email protected]>
5  *
6  * This file implements counting semaphores.
7  * A counting semaphore may be acquired 'n' times before sleeping.
8  * See mutex.c for single-acquisition sleeping locks which enforce
9  * rules which allow code to be debugged more easily.
10  */
11 
12 /*
13  * Some notes on the implementation:
14  *
15  * The spinlock controls access to the other members of the semaphore.
16  * down_trylock() and up() can be called from interrupt context, so we
17  * have to disable interrupts when taking the lock.  It turns out various
18  * parts of the kernel expect to be able to use down() on a semaphore in
19  * interrupt context when they know it will succeed, so we have to use
20  * irqsave variants for down(), down_interruptible() and down_killable()
21  * too.
22  *
23  * The ->count variable represents how many more tasks can acquire this
24  * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
25  */
26 
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/wake_q.h>
33 #include <linux/semaphore.h>
34 #include <linux/spinlock.h>
35 #include <linux/ftrace.h>
36 #include <trace/events/lock.h>
37 
38 static noinline void __down(struct semaphore *sem);
39 static noinline int __down_interruptible(struct semaphore *sem);
40 static noinline int __down_killable(struct semaphore *sem);
41 static noinline int __down_timeout(struct semaphore *sem, long timeout);
42 static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q);
43 
44 /**
45  * down - acquire the semaphore
46  * @sem: the semaphore to be acquired
47  *
48  * Acquires the semaphore.  If no more tasks are allowed to acquire the
49  * semaphore, calling this function will put the task to sleep until the
50  * semaphore is released.
51  *
52  * Use of this function is deprecated, please use down_interruptible() or
53  * down_killable() instead.
54  */
down(struct semaphore * sem)55 void __sched down(struct semaphore *sem)
56 {
57 	unsigned long flags;
58 
59 	might_sleep();
60 	raw_spin_lock_irqsave(&sem->lock, flags);
61 	if (likely(sem->count > 0))
62 		sem->count--;
63 	else
64 		__down(sem);
65 	raw_spin_unlock_irqrestore(&sem->lock, flags);
66 }
67 EXPORT_SYMBOL(down);
68 
69 /**
70  * down_interruptible - acquire the semaphore unless interrupted
71  * @sem: the semaphore to be acquired
72  *
73  * Attempts to acquire the semaphore.  If no more tasks are allowed to
74  * acquire the semaphore, calling this function will put the task to sleep.
75  * If the sleep is interrupted by a signal, this function will return -EINTR.
76  * If the semaphore is successfully acquired, this function returns 0.
77  */
down_interruptible(struct semaphore * sem)78 int __sched down_interruptible(struct semaphore *sem)
79 {
80 	unsigned long flags;
81 	int result = 0;
82 
83 	might_sleep();
84 	raw_spin_lock_irqsave(&sem->lock, flags);
85 	if (likely(sem->count > 0))
86 		sem->count--;
87 	else
88 		result = __down_interruptible(sem);
89 	raw_spin_unlock_irqrestore(&sem->lock, flags);
90 
91 	return result;
92 }
93 EXPORT_SYMBOL(down_interruptible);
94 
95 /**
96  * down_killable - acquire the semaphore unless killed
97  * @sem: the semaphore to be acquired
98  *
99  * Attempts to acquire the semaphore.  If no more tasks are allowed to
100  * acquire the semaphore, calling this function will put the task to sleep.
101  * If the sleep is interrupted by a fatal signal, this function will return
102  * -EINTR.  If the semaphore is successfully acquired, this function returns
103  * 0.
104  */
down_killable(struct semaphore * sem)105 int __sched down_killable(struct semaphore *sem)
106 {
107 	unsigned long flags;
108 	int result = 0;
109 
110 	might_sleep();
111 	raw_spin_lock_irqsave(&sem->lock, flags);
112 	if (likely(sem->count > 0))
113 		sem->count--;
114 	else
115 		result = __down_killable(sem);
116 	raw_spin_unlock_irqrestore(&sem->lock, flags);
117 
118 	return result;
119 }
120 EXPORT_SYMBOL(down_killable);
121 
122 /**
123  * down_trylock - try to acquire the semaphore, without waiting
124  * @sem: the semaphore to be acquired
125  *
126  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
127  * been acquired successfully or 1 if it cannot be acquired.
128  *
129  * NOTE: This return value is inverted from both spin_trylock and
130  * mutex_trylock!  Be careful about this when converting code.
131  *
132  * Unlike mutex_trylock, this function can be used from interrupt context,
133  * and the semaphore can be released by any task or interrupt.
134  */
down_trylock(struct semaphore * sem)135 int __sched down_trylock(struct semaphore *sem)
136 {
137 	unsigned long flags;
138 	int count;
139 
140 	raw_spin_lock_irqsave(&sem->lock, flags);
141 	count = sem->count - 1;
142 	if (likely(count >= 0))
143 		sem->count = count;
144 	raw_spin_unlock_irqrestore(&sem->lock, flags);
145 
146 	return (count < 0);
147 }
148 EXPORT_SYMBOL(down_trylock);
149 
150 /**
151  * down_timeout - acquire the semaphore within a specified time
152  * @sem: the semaphore to be acquired
153  * @timeout: how long to wait before failing
154  *
155  * Attempts to acquire the semaphore.  If no more tasks are allowed to
156  * acquire the semaphore, calling this function will put the task to sleep.
157  * If the semaphore is not released within the specified number of jiffies,
158  * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
159  */
down_timeout(struct semaphore * sem,long timeout)160 int __sched down_timeout(struct semaphore *sem, long timeout)
161 {
162 	unsigned long flags;
163 	int result = 0;
164 
165 	might_sleep();
166 	raw_spin_lock_irqsave(&sem->lock, flags);
167 	if (likely(sem->count > 0))
168 		sem->count--;
169 	else
170 		result = __down_timeout(sem, timeout);
171 	raw_spin_unlock_irqrestore(&sem->lock, flags);
172 
173 	return result;
174 }
175 EXPORT_SYMBOL(down_timeout);
176 
177 /**
178  * up - release the semaphore
179  * @sem: the semaphore to release
180  *
181  * Release the semaphore.  Unlike mutexes, up() may be called from any
182  * context and even by tasks which have never called down().
183  */
up(struct semaphore * sem)184 void __sched up(struct semaphore *sem)
185 {
186 	unsigned long flags;
187 	DEFINE_WAKE_Q(wake_q);
188 
189 	raw_spin_lock_irqsave(&sem->lock, flags);
190 	if (likely(list_empty(&sem->wait_list)))
191 		sem->count++;
192 	else
193 		__up(sem, &wake_q);
194 	raw_spin_unlock_irqrestore(&sem->lock, flags);
195 	if (!wake_q_empty(&wake_q))
196 		wake_up_q(&wake_q);
197 }
198 EXPORT_SYMBOL(up);
199 
200 /* Functions for the contended case */
201 
202 struct semaphore_waiter {
203 	struct list_head list;
204 	struct task_struct *task;
205 	bool up;
206 };
207 
208 /*
209  * Because this function is inlined, the 'state' parameter will be
210  * constant, and thus optimised away by the compiler.  Likewise the
211  * 'timeout' parameter for the cases without timeouts.
212  */
___down_common(struct semaphore * sem,long state,long timeout)213 static inline int __sched ___down_common(struct semaphore *sem, long state,
214 								long timeout)
215 {
216 	struct semaphore_waiter waiter;
217 
218 	list_add_tail(&waiter.list, &sem->wait_list);
219 	waiter.task = current;
220 	waiter.up = false;
221 
222 	for (;;) {
223 		if (signal_pending_state(state, current))
224 			goto interrupted;
225 		if (unlikely(timeout <= 0))
226 			goto timed_out;
227 		__set_current_state(state);
228 		raw_spin_unlock_irq(&sem->lock);
229 		timeout = schedule_timeout(timeout);
230 		raw_spin_lock_irq(&sem->lock);
231 		if (waiter.up)
232 			return 0;
233 	}
234 
235  timed_out:
236 	list_del(&waiter.list);
237 	return -ETIME;
238 
239  interrupted:
240 	list_del(&waiter.list);
241 	return -EINTR;
242 }
243 
__down_common(struct semaphore * sem,long state,long timeout)244 static inline int __sched __down_common(struct semaphore *sem, long state,
245 					long timeout)
246 {
247 	int ret;
248 
249 	trace_contention_begin(sem, 0);
250 	ret = ___down_common(sem, state, timeout);
251 	trace_contention_end(sem, ret);
252 
253 	return ret;
254 }
255 
__down(struct semaphore * sem)256 static noinline void __sched __down(struct semaphore *sem)
257 {
258 	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
259 }
260 
__down_interruptible(struct semaphore * sem)261 static noinline int __sched __down_interruptible(struct semaphore *sem)
262 {
263 	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
264 }
265 
__down_killable(struct semaphore * sem)266 static noinline int __sched __down_killable(struct semaphore *sem)
267 {
268 	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
269 }
270 
__down_timeout(struct semaphore * sem,long timeout)271 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
272 {
273 	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
274 }
275 
__up(struct semaphore * sem,struct wake_q_head * wake_q)276 static noinline void __sched __up(struct semaphore *sem,
277 				  struct wake_q_head *wake_q)
278 {
279 	struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
280 						struct semaphore_waiter, list);
281 	list_del(&waiter->list);
282 	waiter->up = true;
283 	wake_q_add(wake_q, waiter->task);
284 }
285