xref: /linux/kernel/locking/percpu-rwsem.c (revision 86287543715ac2a6d92d561cc105d79306511457)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/atomic.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task.h>
10 #include <linux/errno.h>
11 
12 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
13 			const char *name, struct lock_class_key *key)
14 {
15 	sem->read_count = alloc_percpu(int);
16 	if (unlikely(!sem->read_count))
17 		return -ENOMEM;
18 
19 	rcu_sync_init(&sem->rss);
20 	rcuwait_init(&sem->writer);
21 	init_waitqueue_head(&sem->waiters);
22 	atomic_set(&sem->block, 0);
23 #ifdef CONFIG_DEBUG_LOCK_ALLOC
24 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
25 	lockdep_init_map(&sem->dep_map, name, key, 0);
26 #endif
27 	return 0;
28 }
29 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
30 
31 void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
32 {
33 	/*
34 	 * XXX: temporary kludge. The error path in alloc_super()
35 	 * assumes that percpu_free_rwsem() is safe after kzalloc().
36 	 */
37 	if (!sem->read_count)
38 		return;
39 
40 	rcu_sync_dtor(&sem->rss);
41 	free_percpu(sem->read_count);
42 	sem->read_count = NULL; /* catch use after free bugs */
43 }
44 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
45 
46 static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
47 {
48 	__this_cpu_inc(*sem->read_count);
49 
50 	/*
51 	 * Due to having preemption disabled the decrement happens on
52 	 * the same CPU as the increment, avoiding the
53 	 * increment-on-one-CPU-and-decrement-on-another problem.
54 	 *
55 	 * If the reader misses the writer's assignment of sem->block, then the
56 	 * writer is guaranteed to see the reader's increment.
57 	 *
58 	 * Conversely, any readers that increment their sem->read_count after
59 	 * the writer looks are guaranteed to see the sem->block value, which
60 	 * in turn means that they are guaranteed to immediately decrement
61 	 * their sem->read_count, so that it doesn't matter that the writer
62 	 * missed them.
63 	 */
64 
65 	smp_mb(); /* A matches D */
66 
67 	/*
68 	 * If !sem->block the critical section starts here, matched by the
69 	 * release in percpu_up_write().
70 	 */
71 	if (likely(!atomic_read_acquire(&sem->block)))
72 		return true;
73 
74 	__this_cpu_dec(*sem->read_count);
75 
76 	/* Prod writer to re-evaluate readers_active_check() */
77 	rcuwait_wake_up(&sem->writer);
78 
79 	return false;
80 }
81 
82 static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
83 {
84 	if (atomic_read(&sem->block))
85 		return false;
86 
87 	return atomic_xchg(&sem->block, 1) == 0;
88 }
89 
90 static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
91 {
92 	if (reader) {
93 		bool ret;
94 
95 		preempt_disable();
96 		ret = __percpu_down_read_trylock(sem);
97 		preempt_enable();
98 
99 		return ret;
100 	}
101 	return __percpu_down_write_trylock(sem);
102 }
103 
104 /*
105  * The return value of wait_queue_entry::func means:
106  *
107  *  <0 - error, wakeup is terminated and the error is returned
108  *   0 - no wakeup, a next waiter is tried
109  *  >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
110  *
111  * We use EXCLUSIVE for both readers and writers to preserve FIFO order,
112  * and play games with the return value to allow waking multiple readers.
113  *
114  * Specifically, we wake readers until we've woken a single writer, or until a
115  * trylock fails.
116  */
117 static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
118 				      unsigned int mode, int wake_flags,
119 				      void *key)
120 {
121 	struct task_struct *p = get_task_struct(wq_entry->private);
122 	bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
123 	struct percpu_rw_semaphore *sem = key;
124 
125 	/* concurrent against percpu_down_write(), can get stolen */
126 	if (!__percpu_rwsem_trylock(sem, reader))
127 		return 1;
128 
129 	list_del_init(&wq_entry->entry);
130 	smp_store_release(&wq_entry->private, NULL);
131 
132 	wake_up_process(p);
133 	put_task_struct(p);
134 
135 	return !reader; /* wake (readers until) 1 writer */
136 }
137 
138 static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
139 {
140 	DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
141 	bool wait;
142 
143 	spin_lock_irq(&sem->waiters.lock);
144 	/*
145 	 * Serialize against the wakeup in percpu_up_write(), if we fail
146 	 * the trylock, the wakeup must see us on the list.
147 	 */
148 	wait = !__percpu_rwsem_trylock(sem, reader);
149 	if (wait) {
150 		wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
151 		__add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
152 	}
153 	spin_unlock_irq(&sem->waiters.lock);
154 
155 	while (wait) {
156 		set_current_state(TASK_UNINTERRUPTIBLE);
157 		if (!smp_load_acquire(&wq_entry.private))
158 			break;
159 		schedule();
160 	}
161 	__set_current_state(TASK_RUNNING);
162 }
163 
164 bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
165 {
166 	if (__percpu_down_read_trylock(sem))
167 		return true;
168 
169 	if (try)
170 		return false;
171 
172 	preempt_enable();
173 	percpu_rwsem_wait(sem, /* .reader = */ true);
174 	preempt_disable();
175 
176 	return true;
177 }
178 EXPORT_SYMBOL_GPL(__percpu_down_read);
179 
180 #define per_cpu_sum(var)						\
181 ({									\
182 	typeof(var) __sum = 0;						\
183 	int cpu;							\
184 	compiletime_assert_atomic_type(__sum);				\
185 	for_each_possible_cpu(cpu)					\
186 		__sum += per_cpu(var, cpu);				\
187 	__sum;								\
188 })
189 
190 /*
191  * Return true if the modular sum of the sem->read_count per-CPU variable is
192  * zero.  If this sum is zero, then it is stable due to the fact that if any
193  * newly arriving readers increment a given counter, they will immediately
194  * decrement that same counter.
195  *
196  * Assumes sem->block is set.
197  */
198 static bool readers_active_check(struct percpu_rw_semaphore *sem)
199 {
200 	if (per_cpu_sum(*sem->read_count) != 0)
201 		return false;
202 
203 	/*
204 	 * If we observed the decrement; ensure we see the entire critical
205 	 * section.
206 	 */
207 
208 	smp_mb(); /* C matches B */
209 
210 	return true;
211 }
212 
213 void percpu_down_write(struct percpu_rw_semaphore *sem)
214 {
215 	might_sleep();
216 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
217 
218 	/* Notify readers to take the slow path. */
219 	rcu_sync_enter(&sem->rss);
220 
221 	/*
222 	 * Try set sem->block; this provides writer-writer exclusion.
223 	 * Having sem->block set makes new readers block.
224 	 */
225 	if (!__percpu_down_write_trylock(sem))
226 		percpu_rwsem_wait(sem, /* .reader = */ false);
227 
228 	/* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
229 
230 	/*
231 	 * If they don't see our store of sem->block, then we are guaranteed to
232 	 * see their sem->read_count increment, and therefore will wait for
233 	 * them.
234 	 */
235 
236 	/* Wait for all active readers to complete. */
237 	rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
238 }
239 EXPORT_SYMBOL_GPL(percpu_down_write);
240 
241 void percpu_up_write(struct percpu_rw_semaphore *sem)
242 {
243 	rwsem_release(&sem->dep_map, _RET_IP_);
244 
245 	/*
246 	 * Signal the writer is done, no fast path yet.
247 	 *
248 	 * One reason that we cannot just immediately flip to readers_fast is
249 	 * that new readers might fail to see the results of this writer's
250 	 * critical section.
251 	 *
252 	 * Therefore we force it through the slow path which guarantees an
253 	 * acquire and thereby guarantees the critical section's consistency.
254 	 */
255 	atomic_set_release(&sem->block, 0);
256 
257 	/*
258 	 * Prod any pending reader/writer to make progress.
259 	 */
260 	__wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
261 
262 	/*
263 	 * Once this completes (at least one RCU-sched grace period hence) the
264 	 * reader fast path will be available again. Safe to use outside the
265 	 * exclusive write lock because its counting.
266 	 */
267 	rcu_sync_exit(&sem->rss);
268 }
269 EXPORT_SYMBOL_GPL(percpu_up_write);
270