xref: /linux/drivers/tty/tty_ldsem.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Ldisc rw semaphore
3  *
4  * The ldisc semaphore is semantically a rw_semaphore but which enforces
5  * an alternate policy, namely:
6  *   1) Supports lock wait timeouts
7  *   2) Write waiter has priority
8  *   3) Downgrading is not supported
9  *
10  * Implementation notes:
11  *   1) Upper half of semaphore count is a wait count (differs from rwsem
12  *	in that rwsem normalizes the upper half to the wait bias)
13  *   2) Lacks overflow checking
14  *
15  * The generic counting was copied and modified from include/asm-generic/rwsem.h
16  * by Paul Mackerras <paulus@samba.org>.
17  *
18  * The scheduling policy was copied and modified from lib/rwsem.c
19  * Written by David Howells (dhowells@redhat.com).
20  *
21  * This implementation incorporates the write lock stealing work of
22  * Michel Lespinasse <walken@google.com>.
23  *
24  * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
25  *
26  * This file may be redistributed under the terms of the GNU General Public
27  * License v2.
28  */
29 
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/atomic.h>
33 #include <linux/tty.h>
34 #include <linux/sched.h>
35 #include <linux/sched/debug.h>
36 #include <linux/sched/task.h>
37 
38 
39 #ifdef CONFIG_DEBUG_LOCK_ALLOC
40 # define __acq(l, s, t, r, c, n, i)		\
41 				lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
42 # define __rel(l, n, i)				\
43 				lock_release(&(l)->dep_map, n, i)
44 #define lockdep_acquire(l, s, t, i)		__acq(l, s, t, 0, 1, NULL, i)
45 #define lockdep_acquire_nest(l, s, t, n, i)	__acq(l, s, t, 0, 1, n, i)
46 #define lockdep_acquire_read(l, s, t, i)	__acq(l, s, t, 1, 1, NULL, i)
47 #define lockdep_release(l, n, i)		__rel(l, n, i)
48 #else
49 # define lockdep_acquire(l, s, t, i)		do { } while (0)
50 # define lockdep_acquire_nest(l, s, t, n, i)	do { } while (0)
51 # define lockdep_acquire_read(l, s, t, i)	do { } while (0)
52 # define lockdep_release(l, n, i)		do { } while (0)
53 #endif
54 
55 #ifdef CONFIG_LOCK_STAT
56 # define lock_stat(_lock, stat)		lock_##stat(&(_lock)->dep_map, _RET_IP_)
57 #else
58 # define lock_stat(_lock, stat)		do { } while (0)
59 #endif
60 
61 
62 #if BITS_PER_LONG == 64
63 # define LDSEM_ACTIVE_MASK	0xffffffffL
64 #else
65 # define LDSEM_ACTIVE_MASK	0x0000ffffL
66 #endif
67 
68 #define LDSEM_UNLOCKED		0L
69 #define LDSEM_ACTIVE_BIAS	1L
70 #define LDSEM_WAIT_BIAS		(-LDSEM_ACTIVE_MASK-1)
71 #define LDSEM_READ_BIAS		LDSEM_ACTIVE_BIAS
72 #define LDSEM_WRITE_BIAS	(LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
73 
74 struct ldsem_waiter {
75 	struct list_head list;
76 	struct task_struct *task;
77 };
78 
79 static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
80 {
81 	return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
82 }
83 
84 /*
85  * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
86  * Returns 1 if count was successfully changed; @*old will have @new value.
87  * Returns 0 if count was not changed; @*old will have most recent sem->count
88  */
89 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
90 {
91 	long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
92 	if (tmp == *old) {
93 		*old = new;
94 		return 1;
95 	} else {
96 		*old = tmp;
97 		return 0;
98 	}
99 }
100 
101 /*
102  * Initialize an ldsem:
103  */
104 void __init_ldsem(struct ld_semaphore *sem, const char *name,
105 		  struct lock_class_key *key)
106 {
107 #ifdef CONFIG_DEBUG_LOCK_ALLOC
108 	/*
109 	 * Make sure we are not reinitializing a held semaphore:
110 	 */
111 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
112 	lockdep_init_map(&sem->dep_map, name, key, 0);
113 #endif
114 	sem->count = LDSEM_UNLOCKED;
115 	sem->wait_readers = 0;
116 	raw_spin_lock_init(&sem->wait_lock);
117 	INIT_LIST_HEAD(&sem->read_wait);
118 	INIT_LIST_HEAD(&sem->write_wait);
119 }
120 
121 static void __ldsem_wake_readers(struct ld_semaphore *sem)
122 {
123 	struct ldsem_waiter *waiter, *next;
124 	struct task_struct *tsk;
125 	long adjust, count;
126 
127 	/* Try to grant read locks to all readers on the read wait list.
128 	 * Note the 'active part' of the count is incremented by
129 	 * the number of readers before waking any processes up.
130 	 */
131 	adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
132 	count = ldsem_atomic_update(adjust, sem);
133 	do {
134 		if (count > 0)
135 			break;
136 		if (ldsem_cmpxchg(&count, count - adjust, sem))
137 			return;
138 	} while (1);
139 
140 	list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
141 		tsk = waiter->task;
142 		smp_mb();
143 		waiter->task = NULL;
144 		wake_up_process(tsk);
145 		put_task_struct(tsk);
146 	}
147 	INIT_LIST_HEAD(&sem->read_wait);
148 	sem->wait_readers = 0;
149 }
150 
151 static inline int writer_trylock(struct ld_semaphore *sem)
152 {
153 	/* only wake this writer if the active part of the count can be
154 	 * transitioned from 0 -> 1
155 	 */
156 	long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
157 	do {
158 		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
159 			return 1;
160 		if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
161 			return 0;
162 	} while (1);
163 }
164 
165 static void __ldsem_wake_writer(struct ld_semaphore *sem)
166 {
167 	struct ldsem_waiter *waiter;
168 
169 	waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
170 	wake_up_process(waiter->task);
171 }
172 
173 /*
174  * handle the lock release when processes blocked on it that can now run
175  * - if we come here from up_xxxx(), then:
176  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
177  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
178  * - the spinlock must be held by the caller
179  * - woken process blocks are discarded from the list after having task zeroed
180  */
181 static void __ldsem_wake(struct ld_semaphore *sem)
182 {
183 	if (!list_empty(&sem->write_wait))
184 		__ldsem_wake_writer(sem);
185 	else if (!list_empty(&sem->read_wait))
186 		__ldsem_wake_readers(sem);
187 }
188 
189 static void ldsem_wake(struct ld_semaphore *sem)
190 {
191 	unsigned long flags;
192 
193 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
194 	__ldsem_wake(sem);
195 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
196 }
197 
198 /*
199  * wait for the read lock to be granted
200  */
201 static struct ld_semaphore __sched *
202 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
203 {
204 	struct ldsem_waiter waiter;
205 	long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
206 
207 	/* set up my own style of waitqueue */
208 	raw_spin_lock_irq(&sem->wait_lock);
209 
210 	/* Try to reverse the lock attempt but if the count has changed
211 	 * so that reversing fails, check if there are are no waiters,
212 	 * and early-out if not */
213 	do {
214 		if (ldsem_cmpxchg(&count, count + adjust, sem))
215 			break;
216 		if (count > 0) {
217 			raw_spin_unlock_irq(&sem->wait_lock);
218 			return sem;
219 		}
220 	} while (1);
221 
222 	list_add_tail(&waiter.list, &sem->read_wait);
223 	sem->wait_readers++;
224 
225 	waiter.task = current;
226 	get_task_struct(current);
227 
228 	/* if there are no active locks, wake the new lock owner(s) */
229 	if ((count & LDSEM_ACTIVE_MASK) == 0)
230 		__ldsem_wake(sem);
231 
232 	raw_spin_unlock_irq(&sem->wait_lock);
233 
234 	/* wait to be given the lock */
235 	for (;;) {
236 		set_current_state(TASK_UNINTERRUPTIBLE);
237 
238 		if (!waiter.task)
239 			break;
240 		if (!timeout)
241 			break;
242 		timeout = schedule_timeout(timeout);
243 	}
244 
245 	__set_current_state(TASK_RUNNING);
246 
247 	if (!timeout) {
248 		/* lock timed out but check if this task was just
249 		 * granted lock ownership - if so, pretend there
250 		 * was no timeout; otherwise, cleanup lock wait */
251 		raw_spin_lock_irq(&sem->wait_lock);
252 		if (waiter.task) {
253 			ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
254 			list_del(&waiter.list);
255 			raw_spin_unlock_irq(&sem->wait_lock);
256 			put_task_struct(waiter.task);
257 			return NULL;
258 		}
259 		raw_spin_unlock_irq(&sem->wait_lock);
260 	}
261 
262 	return sem;
263 }
264 
265 /*
266  * wait for the write lock to be granted
267  */
268 static struct ld_semaphore __sched *
269 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
270 {
271 	struct ldsem_waiter waiter;
272 	long adjust = -LDSEM_ACTIVE_BIAS;
273 	int locked = 0;
274 
275 	/* set up my own style of waitqueue */
276 	raw_spin_lock_irq(&sem->wait_lock);
277 
278 	/* Try to reverse the lock attempt but if the count has changed
279 	 * so that reversing fails, check if the lock is now owned,
280 	 * and early-out if so */
281 	do {
282 		if (ldsem_cmpxchg(&count, count + adjust, sem))
283 			break;
284 		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
285 			raw_spin_unlock_irq(&sem->wait_lock);
286 			return sem;
287 		}
288 	} while (1);
289 
290 	list_add_tail(&waiter.list, &sem->write_wait);
291 
292 	waiter.task = current;
293 
294 	set_current_state(TASK_UNINTERRUPTIBLE);
295 	for (;;) {
296 		if (!timeout)
297 			break;
298 		raw_spin_unlock_irq(&sem->wait_lock);
299 		timeout = schedule_timeout(timeout);
300 		raw_spin_lock_irq(&sem->wait_lock);
301 		set_current_state(TASK_UNINTERRUPTIBLE);
302 		locked = writer_trylock(sem);
303 		if (locked)
304 			break;
305 	}
306 
307 	if (!locked)
308 		ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
309 	list_del(&waiter.list);
310 	raw_spin_unlock_irq(&sem->wait_lock);
311 
312 	__set_current_state(TASK_RUNNING);
313 
314 	/* lock wait may have timed out */
315 	if (!locked)
316 		return NULL;
317 	return sem;
318 }
319 
320 
321 
322 static int __ldsem_down_read_nested(struct ld_semaphore *sem,
323 					   int subclass, long timeout)
324 {
325 	long count;
326 
327 	lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
328 
329 	count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
330 	if (count <= 0) {
331 		lock_stat(sem, contended);
332 		if (!down_read_failed(sem, count, timeout)) {
333 			lockdep_release(sem, 1, _RET_IP_);
334 			return 0;
335 		}
336 	}
337 	lock_stat(sem, acquired);
338 	return 1;
339 }
340 
341 static int __ldsem_down_write_nested(struct ld_semaphore *sem,
342 					    int subclass, long timeout)
343 {
344 	long count;
345 
346 	lockdep_acquire(sem, subclass, 0, _RET_IP_);
347 
348 	count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
349 	if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
350 		lock_stat(sem, contended);
351 		if (!down_write_failed(sem, count, timeout)) {
352 			lockdep_release(sem, 1, _RET_IP_);
353 			return 0;
354 		}
355 	}
356 	lock_stat(sem, acquired);
357 	return 1;
358 }
359 
360 
361 /*
362  * lock for reading -- returns 1 if successful, 0 if timed out
363  */
364 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
365 {
366 	might_sleep();
367 	return __ldsem_down_read_nested(sem, 0, timeout);
368 }
369 
370 /*
371  * trylock for reading -- returns 1 if successful, 0 if contention
372  */
373 int ldsem_down_read_trylock(struct ld_semaphore *sem)
374 {
375 	long count = sem->count;
376 
377 	while (count >= 0) {
378 		if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
379 			lockdep_acquire_read(sem, 0, 1, _RET_IP_);
380 			lock_stat(sem, acquired);
381 			return 1;
382 		}
383 	}
384 	return 0;
385 }
386 
387 /*
388  * lock for writing -- returns 1 if successful, 0 if timed out
389  */
390 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
391 {
392 	might_sleep();
393 	return __ldsem_down_write_nested(sem, 0, timeout);
394 }
395 
396 /*
397  * trylock for writing -- returns 1 if successful, 0 if contention
398  */
399 int ldsem_down_write_trylock(struct ld_semaphore *sem)
400 {
401 	long count = sem->count;
402 
403 	while ((count & LDSEM_ACTIVE_MASK) == 0) {
404 		if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
405 			lockdep_acquire(sem, 0, 1, _RET_IP_);
406 			lock_stat(sem, acquired);
407 			return 1;
408 		}
409 	}
410 	return 0;
411 }
412 
413 /*
414  * release a read lock
415  */
416 void ldsem_up_read(struct ld_semaphore *sem)
417 {
418 	long count;
419 
420 	lockdep_release(sem, 1, _RET_IP_);
421 
422 	count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
423 	if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
424 		ldsem_wake(sem);
425 }
426 
427 /*
428  * release a write lock
429  */
430 void ldsem_up_write(struct ld_semaphore *sem)
431 {
432 	long count;
433 
434 	lockdep_release(sem, 1, _RET_IP_);
435 
436 	count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
437 	if (count < 0)
438 		ldsem_wake(sem);
439 }
440 
441 
442 #ifdef CONFIG_DEBUG_LOCK_ALLOC
443 
444 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
445 {
446 	might_sleep();
447 	return __ldsem_down_read_nested(sem, subclass, timeout);
448 }
449 
450 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
451 			    long timeout)
452 {
453 	might_sleep();
454 	return __ldsem_down_write_nested(sem, subclass, timeout);
455 }
456 
457 #endif
458