xref: /linux/kernel/locking/qrwlock.c (revision c79c3c34f75d72a066e292b10aa50fc758c97c89)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Queued read/write locks
4  *
5  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6  *
7  * Authors: Waiman Long <waiman.long@hp.com>
8  */
9 #include <linux/smp.h>
10 #include <linux/bug.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/spinlock.h>
15 
16 /**
17  * queued_read_lock_slowpath - acquire read lock of a queue rwlock
18  * @lock: Pointer to queue rwlock structure
19  */
20 void queued_read_lock_slowpath(struct qrwlock *lock)
21 {
22 	/*
23 	 * Readers come here when they cannot get the lock without waiting
24 	 */
25 	if (unlikely(in_interrupt())) {
26 		/*
27 		 * Readers in interrupt context will get the lock immediately
28 		 * if the writer is just waiting (not holding the lock yet),
29 		 * so spin with ACQUIRE semantics until the lock is available
30 		 * without waiting in the queue.
31 		 */
32 		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
33 		return;
34 	}
35 	atomic_sub(_QR_BIAS, &lock->cnts);
36 
37 	/*
38 	 * Put the reader into the wait queue
39 	 */
40 	arch_spin_lock(&lock->wait_lock);
41 	atomic_add(_QR_BIAS, &lock->cnts);
42 
43 	/*
44 	 * The ACQUIRE semantics of the following spinning code ensure
45 	 * that accesses can't leak upwards out of our subsequent critical
46 	 * section in the case that the lock is currently held for write.
47 	 */
48 	atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
49 
50 	/*
51 	 * Signal the next one in queue to become queue head
52 	 */
53 	arch_spin_unlock(&lock->wait_lock);
54 }
55 EXPORT_SYMBOL(queued_read_lock_slowpath);
56 
57 /**
58  * queued_write_lock_slowpath - acquire write lock of a queue rwlock
59  * @lock : Pointer to queue rwlock structure
60  */
61 void queued_write_lock_slowpath(struct qrwlock *lock)
62 {
63 	/* Put the writer into the wait queue */
64 	arch_spin_lock(&lock->wait_lock);
65 
66 	/* Try to acquire the lock directly if no reader is present */
67 	if (!atomic_read(&lock->cnts) &&
68 	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
69 		goto unlock;
70 
71 	/* Set the waiting flag to notify readers that a writer is pending */
72 	atomic_add(_QW_WAITING, &lock->cnts);
73 
74 	/* When no more readers or writers, set the locked flag */
75 	do {
76 		atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
77 	} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
78 					_QW_LOCKED) != _QW_WAITING);
79 unlock:
80 	arch_spin_unlock(&lock->wait_lock);
81 }
82 EXPORT_SYMBOL(queued_write_lock_slowpath);
83