xref: /linux/kernel/locking/semaphore.c (revision b9bdd4b6840454ef87f61b6506c9635c57a81650)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008 Intel Corporation
4  * Author: Matthew Wilcox <willy@linux.intel.com>
5  *
6  * This file implements counting semaphores.
7  * A counting semaphore may be acquired 'n' times before sleeping.
8  * See mutex.c for single-acquisition sleeping locks which enforce
9  * rules which allow code to be debugged more easily.
10  */
11 
12 /*
13  * Some notes on the implementation:
14  *
15  * The spinlock controls access to the other members of the semaphore.
16  * down_trylock() and up() can be called from interrupt context, so we
17  * have to disable interrupts when taking the lock.  It turns out various
18  * parts of the kernel expect to be able to use down() on a semaphore in
19  * interrupt context when they know it will succeed, so we have to use
20  * irqsave variants for down(), down_interruptible() and down_killable()
21  * too.
22  *
23  * The ->count variable represents how many more tasks can acquire this
24  * semaphore.  If it's zero, there may be waiters.
25  */
26 
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/wake_q.h>
33 #include <linux/semaphore.h>
34 #include <linux/spinlock.h>
35 #include <linux/ftrace.h>
36 #include <trace/events/lock.h>
37 #include <linux/hung_task.h>
38 
39 static noinline void __down(struct semaphore *sem);
40 static noinline int __down_interruptible(struct semaphore *sem);
41 static noinline int __down_killable(struct semaphore *sem);
42 static noinline int __down_timeout(struct semaphore *sem, long timeout);
43 static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q);
44 
45 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
46 static inline void hung_task_sem_set_holder(struct semaphore *sem)
47 {
48 	WRITE_ONCE((sem)->last_holder, (unsigned long)current);
49 }
50 
51 static inline void hung_task_sem_clear_if_holder(struct semaphore *sem)
52 {
53 	if (READ_ONCE((sem)->last_holder) == (unsigned long)current)
54 		WRITE_ONCE((sem)->last_holder, 0UL);
55 }
56 
57 unsigned long sem_last_holder(struct semaphore *sem)
58 {
59 	return READ_ONCE(sem->last_holder);
60 }
61 #else
62 static inline void hung_task_sem_set_holder(struct semaphore *sem)
63 {
64 }
65 static inline void hung_task_sem_clear_if_holder(struct semaphore *sem)
66 {
67 }
68 unsigned long sem_last_holder(struct semaphore *sem)
69 {
70 	return 0UL;
71 }
72 #endif
73 
74 static inline void __sem_acquire(struct semaphore *sem)
75 {
76 	sem->count--;
77 	hung_task_sem_set_holder(sem);
78 }
79 
80 /**
81  * down - acquire the semaphore
82  * @sem: the semaphore to be acquired
83  *
84  * Acquires the semaphore.  If no more tasks are allowed to acquire the
85  * semaphore, calling this function will put the task to sleep until the
86  * semaphore is released.
87  *
88  * Use of this function is deprecated, please use down_interruptible() or
89  * down_killable() instead.
90  */
91 void __sched down(struct semaphore *sem)
92 {
93 	unsigned long flags;
94 
95 	might_sleep();
96 	raw_spin_lock_irqsave(&sem->lock, flags);
97 	if (likely(sem->count > 0))
98 		__sem_acquire(sem);
99 	else
100 		__down(sem);
101 	raw_spin_unlock_irqrestore(&sem->lock, flags);
102 }
103 EXPORT_SYMBOL(down);
104 
105 /**
106  * down_interruptible - acquire the semaphore unless interrupted
107  * @sem: the semaphore to be acquired
108  *
109  * Attempts to acquire the semaphore.  If no more tasks are allowed to
110  * acquire the semaphore, calling this function will put the task to sleep.
111  * If the sleep is interrupted by a signal, this function will return -EINTR.
112  * If the semaphore is successfully acquired, this function returns 0.
113  */
114 int __sched down_interruptible(struct semaphore *sem)
115 {
116 	unsigned long flags;
117 	int result = 0;
118 
119 	might_sleep();
120 	raw_spin_lock_irqsave(&sem->lock, flags);
121 	if (likely(sem->count > 0))
122 		__sem_acquire(sem);
123 	else
124 		result = __down_interruptible(sem);
125 	raw_spin_unlock_irqrestore(&sem->lock, flags);
126 
127 	return result;
128 }
129 EXPORT_SYMBOL(down_interruptible);
130 
131 /**
132  * down_killable - acquire the semaphore unless killed
133  * @sem: the semaphore to be acquired
134  *
135  * Attempts to acquire the semaphore.  If no more tasks are allowed to
136  * acquire the semaphore, calling this function will put the task to sleep.
137  * If the sleep is interrupted by a fatal signal, this function will return
138  * -EINTR.  If the semaphore is successfully acquired, this function returns
139  * 0.
140  */
141 int __sched down_killable(struct semaphore *sem)
142 {
143 	unsigned long flags;
144 	int result = 0;
145 
146 	might_sleep();
147 	raw_spin_lock_irqsave(&sem->lock, flags);
148 	if (likely(sem->count > 0))
149 		__sem_acquire(sem);
150 	else
151 		result = __down_killable(sem);
152 	raw_spin_unlock_irqrestore(&sem->lock, flags);
153 
154 	return result;
155 }
156 EXPORT_SYMBOL(down_killable);
157 
158 /**
159  * down_trylock - try to acquire the semaphore, without waiting
160  * @sem: the semaphore to be acquired
161  *
162  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
163  * been acquired successfully or 1 if it cannot be acquired.
164  *
165  * NOTE: This return value is inverted from both spin_trylock and
166  * mutex_trylock!  Be careful about this when converting code.
167  *
168  * Unlike mutex_trylock, this function can be used from interrupt context,
169  * and the semaphore can be released by any task or interrupt.
170  */
171 int __sched down_trylock(struct semaphore *sem)
172 {
173 	unsigned long flags;
174 	int count;
175 
176 	raw_spin_lock_irqsave(&sem->lock, flags);
177 	count = sem->count - 1;
178 	if (likely(count >= 0))
179 		__sem_acquire(sem);
180 	raw_spin_unlock_irqrestore(&sem->lock, flags);
181 
182 	return (count < 0);
183 }
184 EXPORT_SYMBOL(down_trylock);
185 
186 /**
187  * down_timeout - acquire the semaphore within a specified time
188  * @sem: the semaphore to be acquired
189  * @timeout: how long to wait before failing
190  *
191  * Attempts to acquire the semaphore.  If no more tasks are allowed to
192  * acquire the semaphore, calling this function will put the task to sleep.
193  * If the semaphore is not released within the specified number of jiffies,
194  * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
195  */
196 int __sched down_timeout(struct semaphore *sem, long timeout)
197 {
198 	unsigned long flags;
199 	int result = 0;
200 
201 	might_sleep();
202 	raw_spin_lock_irqsave(&sem->lock, flags);
203 	if (likely(sem->count > 0))
204 		__sem_acquire(sem);
205 	else
206 		result = __down_timeout(sem, timeout);
207 	raw_spin_unlock_irqrestore(&sem->lock, flags);
208 
209 	return result;
210 }
211 EXPORT_SYMBOL(down_timeout);
212 
213 /**
214  * up - release the semaphore
215  * @sem: the semaphore to release
216  *
217  * Release the semaphore.  Unlike mutexes, up() may be called from any
218  * context and even by tasks which have never called down().
219  */
220 void __sched up(struct semaphore *sem)
221 {
222 	unsigned long flags;
223 	DEFINE_WAKE_Q(wake_q);
224 
225 	raw_spin_lock_irqsave(&sem->lock, flags);
226 
227 	hung_task_sem_clear_if_holder(sem);
228 
229 	if (likely(!sem->first_waiter))
230 		sem->count++;
231 	else
232 		__up(sem, &wake_q);
233 	raw_spin_unlock_irqrestore(&sem->lock, flags);
234 	if (!wake_q_empty(&wake_q))
235 		wake_up_q(&wake_q);
236 }
237 EXPORT_SYMBOL(up);
238 
239 /* Functions for the contended case */
240 
241 struct semaphore_waiter {
242 	struct list_head list;
243 	struct task_struct *task;
244 	bool up;
245 };
246 
247 static inline
248 void sem_del_waiter(struct semaphore *sem, struct semaphore_waiter *waiter)
249 {
250 	if (list_empty(&waiter->list)) {
251 		sem->first_waiter = NULL;
252 		return;
253 	}
254 
255 	if (sem->first_waiter == waiter) {
256 		sem->first_waiter = list_first_entry(&waiter->list,
257 						     struct semaphore_waiter, list);
258 	}
259 	list_del(&waiter->list);
260 }
261 
262 /*
263  * Because this function is inlined, the 'state' parameter will be
264  * constant, and thus optimised away by the compiler.  Likewise the
265  * 'timeout' parameter for the cases without timeouts.
266  */
267 static inline int __sched ___down_common(struct semaphore *sem, long state,
268 								long timeout)
269 {
270 	struct semaphore_waiter waiter, *first;
271 
272 	first = sem->first_waiter;
273 	if (first) {
274 		list_add_tail(&waiter.list, &first->list);
275 	} else {
276 		INIT_LIST_HEAD(&waiter.list);
277 		sem->first_waiter = &waiter;
278 	}
279 	waiter.task = current;
280 	waiter.up = false;
281 
282 	for (;;) {
283 		if (signal_pending_state(state, current))
284 			goto interrupted;
285 		if (unlikely(timeout <= 0))
286 			goto timed_out;
287 		__set_current_state(state);
288 		raw_spin_unlock_irq(&sem->lock);
289 		timeout = schedule_timeout(timeout);
290 		raw_spin_lock_irq(&sem->lock);
291 		if (waiter.up) {
292 			hung_task_sem_set_holder(sem);
293 			return 0;
294 		}
295 	}
296 
297  timed_out:
298 	sem_del_waiter(sem, &waiter);
299 	return -ETIME;
300 
301  interrupted:
302 	sem_del_waiter(sem, &waiter);
303 	return -EINTR;
304 }
305 
306 static inline int __sched __down_common(struct semaphore *sem, long state,
307 					long timeout)
308 {
309 	int ret;
310 
311 	hung_task_set_blocker(sem, BLOCKER_TYPE_SEM);
312 
313 	trace_contention_begin(sem, 0);
314 	ret = ___down_common(sem, state, timeout);
315 	trace_contention_end(sem, ret);
316 
317 	hung_task_clear_blocker();
318 
319 	return ret;
320 }
321 
322 static noinline void __sched __down(struct semaphore *sem)
323 {
324 	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
325 }
326 
327 static noinline int __sched __down_interruptible(struct semaphore *sem)
328 {
329 	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
330 }
331 
332 static noinline int __sched __down_killable(struct semaphore *sem)
333 {
334 	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
335 }
336 
337 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
338 {
339 	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
340 }
341 
342 static noinline void __sched __up(struct semaphore *sem,
343 				  struct wake_q_head *wake_q)
344 {
345 	struct semaphore_waiter *waiter = sem->first_waiter;
346 
347 	sem_del_waiter(sem, waiter);
348 	waiter->up = true;
349 	wake_q_add(wake_q, waiter->task);
350 }
351