xref: /linux/kernel/locking/percpu-rwsem.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 #include <linux/atomic.h>
2 #include <linux/rwsem.h>
3 #include <linux/percpu.h>
4 #include <linux/lockdep.h>
5 #include <linux/percpu-rwsem.h>
6 #include <linux/rcupdate.h>
7 #include <linux/sched.h>
8 #include <linux/errno.h>
9 
10 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
11 			const char *name, struct lock_class_key *rwsem_key)
12 {
13 	sem->read_count = alloc_percpu(int);
14 	if (unlikely(!sem->read_count))
15 		return -ENOMEM;
16 
17 	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
18 	rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
19 	__init_rwsem(&sem->rw_sem, name, rwsem_key);
20 	rcuwait_init(&sem->writer);
21 	sem->readers_block = 0;
22 	return 0;
23 }
24 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
25 
26 void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
27 {
28 	/*
29 	 * XXX: temporary kludge. The error path in alloc_super()
30 	 * assumes that percpu_free_rwsem() is safe after kzalloc().
31 	 */
32 	if (!sem->read_count)
33 		return;
34 
35 	rcu_sync_dtor(&sem->rss);
36 	free_percpu(sem->read_count);
37 	sem->read_count = NULL; /* catch use after free bugs */
38 }
39 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
40 
41 int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
42 {
43 	/*
44 	 * Due to having preemption disabled the decrement happens on
45 	 * the same CPU as the increment, avoiding the
46 	 * increment-on-one-CPU-and-decrement-on-another problem.
47 	 *
48 	 * If the reader misses the writer's assignment of readers_block, then
49 	 * the writer is guaranteed to see the reader's increment.
50 	 *
51 	 * Conversely, any readers that increment their sem->read_count after
52 	 * the writer looks are guaranteed to see the readers_block value,
53 	 * which in turn means that they are guaranteed to immediately
54 	 * decrement their sem->read_count, so that it doesn't matter that the
55 	 * writer missed them.
56 	 */
57 
58 	smp_mb(); /* A matches D */
59 
60 	/*
61 	 * If !readers_block the critical section starts here, matched by the
62 	 * release in percpu_up_write().
63 	 */
64 	if (likely(!smp_load_acquire(&sem->readers_block)))
65 		return 1;
66 
67 	/*
68 	 * Per the above comment; we still have preemption disabled and
69 	 * will thus decrement on the same CPU as we incremented.
70 	 */
71 	__percpu_up_read(sem);
72 
73 	if (try)
74 		return 0;
75 
76 	/*
77 	 * We either call schedule() in the wait, or we'll fall through
78 	 * and reschedule on the preempt_enable() in percpu_down_read().
79 	 */
80 	preempt_enable_no_resched();
81 
82 	/*
83 	 * Avoid lockdep for the down/up_read() we already have them.
84 	 */
85 	__down_read(&sem->rw_sem);
86 	this_cpu_inc(*sem->read_count);
87 	__up_read(&sem->rw_sem);
88 
89 	preempt_disable();
90 	return 1;
91 }
92 EXPORT_SYMBOL_GPL(__percpu_down_read);
93 
94 void __percpu_up_read(struct percpu_rw_semaphore *sem)
95 {
96 	smp_mb(); /* B matches C */
97 	/*
98 	 * In other words, if they see our decrement (presumably to aggregate
99 	 * zero, as that is the only time it matters) they will also see our
100 	 * critical section.
101 	 */
102 	__this_cpu_dec(*sem->read_count);
103 
104 	/* Prod writer to recheck readers_active */
105 	rcuwait_wake_up(&sem->writer);
106 }
107 EXPORT_SYMBOL_GPL(__percpu_up_read);
108 
109 #define per_cpu_sum(var)						\
110 ({									\
111 	typeof(var) __sum = 0;						\
112 	int cpu;							\
113 	compiletime_assert_atomic_type(__sum);				\
114 	for_each_possible_cpu(cpu)					\
115 		__sum += per_cpu(var, cpu);				\
116 	__sum;								\
117 })
118 
119 /*
120  * Return true if the modular sum of the sem->read_count per-CPU variable is
121  * zero.  If this sum is zero, then it is stable due to the fact that if any
122  * newly arriving readers increment a given counter, they will immediately
123  * decrement that same counter.
124  */
125 static bool readers_active_check(struct percpu_rw_semaphore *sem)
126 {
127 	if (per_cpu_sum(*sem->read_count) != 0)
128 		return false;
129 
130 	/*
131 	 * If we observed the decrement; ensure we see the entire critical
132 	 * section.
133 	 */
134 
135 	smp_mb(); /* C matches B */
136 
137 	return true;
138 }
139 
140 void percpu_down_write(struct percpu_rw_semaphore *sem)
141 {
142 	/* Notify readers to take the slow path. */
143 	rcu_sync_enter(&sem->rss);
144 
145 	down_write(&sem->rw_sem);
146 
147 	/*
148 	 * Notify new readers to block; up until now, and thus throughout the
149 	 * longish rcu_sync_enter() above, new readers could still come in.
150 	 */
151 	WRITE_ONCE(sem->readers_block, 1);
152 
153 	smp_mb(); /* D matches A */
154 
155 	/*
156 	 * If they don't see our writer of readers_block, then we are
157 	 * guaranteed to see their sem->read_count increment, and therefore
158 	 * will wait for them.
159 	 */
160 
161 	/* Wait for all now active readers to complete. */
162 	rcuwait_wait_event(&sem->writer, readers_active_check(sem));
163 }
164 EXPORT_SYMBOL_GPL(percpu_down_write);
165 
166 void percpu_up_write(struct percpu_rw_semaphore *sem)
167 {
168 	/*
169 	 * Signal the writer is done, no fast path yet.
170 	 *
171 	 * One reason that we cannot just immediately flip to readers_fast is
172 	 * that new readers might fail to see the results of this writer's
173 	 * critical section.
174 	 *
175 	 * Therefore we force it through the slow path which guarantees an
176 	 * acquire and thereby guarantees the critical section's consistency.
177 	 */
178 	smp_store_release(&sem->readers_block, 0);
179 
180 	/*
181 	 * Release the write lock, this will allow readers back in the game.
182 	 */
183 	up_write(&sem->rw_sem);
184 
185 	/*
186 	 * Once this completes (at least one RCU-sched grace period hence) the
187 	 * reader fast path will be available again. Safe to use outside the
188 	 * exclusive write lock because its counting.
189 	 */
190 	rcu_sync_exit(&sem->rss);
191 }
192 EXPORT_SYMBOL_GPL(percpu_up_write);
193