1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 *
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
7 *
8 * Authors: Paul McKenney <paulmck@linux.ibm.com>
9 * Lai Jiangshan <laijs@cn.fujitsu.com>
10 *
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
13 *
14 */
15
16 #define pr_fmt(fmt) "rcu: " fmt
17
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/srcu.h>
29
30 #include "rcu.h"
31 #include "rcu_segcblist.h"
32
33 /* Holdoff in nanoseconds for auto-expediting. */
34 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
35 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
36 module_param(exp_holdoff, ulong, 0444);
37
38 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
39 static ulong counter_wrap_check = (ULONG_MAX >> 2);
40 module_param(counter_wrap_check, ulong, 0444);
41
42 /*
43 * Control conversion to SRCU_SIZE_BIG:
44 * 0: Don't convert at all.
45 * 1: Convert at init_srcu_struct() time.
46 * 2: Convert when rcutorture invokes srcu_torture_stats_print().
47 * 3: Decide at boot time based on system shape (default).
48 * 0x1x: Convert when excessive contention encountered.
49 */
50 #define SRCU_SIZING_NONE 0
51 #define SRCU_SIZING_INIT 1
52 #define SRCU_SIZING_TORTURE 2
53 #define SRCU_SIZING_AUTO 3
54 #define SRCU_SIZING_CONTEND 0x10
55 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
56 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
57 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
58 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
59 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60 static int convert_to_big = SRCU_SIZING_AUTO;
61 module_param(convert_to_big, int, 0444);
62
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64 static int big_cpu_lim __read_mostly = 128;
65 module_param(big_cpu_lim, int, 0444);
66
67 /* Contention events per jiffy to initiate transition to big. */
68 static int small_contention_lim __read_mostly = 100;
69 module_param(small_contention_lim, int, 0444);
70
71 /* Early-boot callback-management, so early that no lock is required! */
72 static LIST_HEAD(srcu_boot_list);
73 static bool __read_mostly srcu_init_done;
74
75 static void srcu_invoke_callbacks(struct work_struct *work);
76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
77 static void process_srcu(struct work_struct *work);
78 static void srcu_delay_timer(struct timer_list *t);
79
80 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81 #define spin_lock_rcu_node(p) \
82 do { \
83 spin_lock(&ACCESS_PRIVATE(p, lock)); \
84 smp_mb__after_unlock_lock(); \
85 } while (0)
86
87 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
88
89 #define spin_lock_irq_rcu_node(p) \
90 do { \
91 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
92 smp_mb__after_unlock_lock(); \
93 } while (0)
94
95 #define spin_unlock_irq_rcu_node(p) \
96 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
97
98 #define spin_lock_irqsave_rcu_node(p, flags) \
99 do { \
100 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
101 smp_mb__after_unlock_lock(); \
102 } while (0)
103
104 #define spin_trylock_irqsave_rcu_node(p, flags) \
105 ({ \
106 bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
107 \
108 if (___locked) \
109 smp_mb__after_unlock_lock(); \
110 ___locked; \
111 })
112
113 #define spin_unlock_irqrestore_rcu_node(p, flags) \
114 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
115
116 /*
117 * Initialize SRCU per-CPU data. Note that statically allocated
118 * srcu_struct structures might already have srcu_read_lock() and
119 * srcu_read_unlock() running against them. So if the is_static parameter
120 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
121 */
init_srcu_struct_data(struct srcu_struct * ssp)122 static void init_srcu_struct_data(struct srcu_struct *ssp)
123 {
124 int cpu;
125 struct srcu_data *sdp;
126
127 /*
128 * Initialize the per-CPU srcu_data array, which feeds into the
129 * leaves of the srcu_node tree.
130 */
131 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
132 ARRAY_SIZE(sdp->srcu_unlock_count));
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq;
140 sdp->srcu_barrier_head.next = &sdp->srcu_barrier_head;
141 sdp->mynode = NULL;
142 sdp->cpu = cpu;
143 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
144 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
145 sdp->ssp = ssp;
146 }
147 }
148
149 /* Invalid seq state, used during snp node initialization */
150 #define SRCU_SNP_INIT_SEQ 0x2
151
152 /*
153 * Check whether sequence number corresponding to snp node,
154 * is invalid.
155 */
srcu_invl_snp_seq(unsigned long s)156 static inline bool srcu_invl_snp_seq(unsigned long s)
157 {
158 return s == SRCU_SNP_INIT_SEQ;
159 }
160
161 /*
162 * Allocated and initialize SRCU combining tree. Returns @true if
163 * allocation succeeded and @false otherwise.
164 */
init_srcu_struct_nodes(struct srcu_struct * ssp,gfp_t gfp_flags)165 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
166 {
167 int cpu;
168 int i;
169 int level = 0;
170 int levelspread[RCU_NUM_LVLS];
171 struct srcu_data *sdp;
172 struct srcu_node *snp;
173 struct srcu_node *snp_first;
174
175 /* Initialize geometry if it has not already been initialized. */
176 rcu_init_geometry();
177 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags);
178 if (!ssp->srcu_sup->node)
179 return false;
180
181 /* Work out the overall tree geometry. */
182 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0];
183 for (i = 1; i < rcu_num_lvls; i++)
184 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1];
185 rcu_init_levelspread(levelspread, num_rcu_lvl);
186
187 /* Each pass through this loop initializes one srcu_node structure. */
188 srcu_for_each_node_breadth_first(ssp, snp) {
189 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
190 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
191 ARRAY_SIZE(snp->srcu_data_have_cbs));
192 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
193 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
194 snp->srcu_data_have_cbs[i] = 0;
195 }
196 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
197 snp->grplo = -1;
198 snp->grphi = -1;
199 if (snp == &ssp->srcu_sup->node[0]) {
200 /* Root node, special case. */
201 snp->srcu_parent = NULL;
202 continue;
203 }
204
205 /* Non-root node. */
206 if (snp == ssp->srcu_sup->level[level + 1])
207 level++;
208 snp->srcu_parent = ssp->srcu_sup->level[level - 1] +
209 (snp - ssp->srcu_sup->level[level]) /
210 levelspread[level - 1];
211 }
212
213 /*
214 * Initialize the per-CPU srcu_data array, which feeds into the
215 * leaves of the srcu_node tree.
216 */
217 level = rcu_num_lvls - 1;
218 snp_first = ssp->srcu_sup->level[level];
219 for_each_possible_cpu(cpu) {
220 sdp = per_cpu_ptr(ssp->sda, cpu);
221 sdp->mynode = &snp_first[cpu / levelspread[level]];
222 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
223 if (snp->grplo < 0)
224 snp->grplo = cpu;
225 snp->grphi = cpu;
226 }
227 sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
228 }
229 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
230 return true;
231 }
232
233 /*
234 * Initialize non-compile-time initialized fields, including the
235 * associated srcu_node and srcu_data structures. The is_static parameter
236 * tells us that ->sda has already been wired up to srcu_data.
237 */
init_srcu_struct_fields(struct srcu_struct * ssp,bool is_static)238 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
239 {
240 if (!is_static)
241 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL);
242 if (!ssp->srcu_sup)
243 return -ENOMEM;
244 if (!is_static)
245 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
246 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL;
247 ssp->srcu_sup->node = NULL;
248 mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
249 mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
250 ssp->srcu_idx = 0;
251 ssp->srcu_sup->srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL;
252 ssp->srcu_sup->srcu_barrier_seq = 0;
253 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex);
254 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0);
255 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu);
256 ssp->srcu_sup->sda_is_static = is_static;
257 if (!is_static)
258 ssp->sda = alloc_percpu(struct srcu_data);
259 if (!ssp->sda)
260 goto err_free_sup;
261 init_srcu_struct_data(ssp);
262 ssp->srcu_sup->srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL;
263 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
264 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
265 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC))
266 goto err_free_sda;
267 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
268 }
269 ssp->srcu_sup->srcu_ssp = ssp;
270 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed,
271 SRCU_GP_SEQ_INITIAL_VAL); /* Init done. */
272 return 0;
273
274 err_free_sda:
275 if (!is_static) {
276 free_percpu(ssp->sda);
277 ssp->sda = NULL;
278 }
279 err_free_sup:
280 if (!is_static) {
281 kfree(ssp->srcu_sup);
282 ssp->srcu_sup = NULL;
283 }
284 return -ENOMEM;
285 }
286
287 #ifdef CONFIG_DEBUG_LOCK_ALLOC
288
__init_srcu_struct(struct srcu_struct * ssp,const char * name,struct lock_class_key * key)289 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
290 struct lock_class_key *key)
291 {
292 /* Don't re-initialize a lock while it is held. */
293 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
294 lockdep_init_map(&ssp->dep_map, name, key, 0);
295 return init_srcu_struct_fields(ssp, false);
296 }
297 EXPORT_SYMBOL_GPL(__init_srcu_struct);
298
299 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
300
301 /**
302 * init_srcu_struct - initialize a sleep-RCU structure
303 * @ssp: structure to initialize.
304 *
305 * Must invoke this on a given srcu_struct before passing that srcu_struct
306 * to any other function. Each srcu_struct represents a separate domain
307 * of SRCU protection.
308 */
init_srcu_struct(struct srcu_struct * ssp)309 int init_srcu_struct(struct srcu_struct *ssp)
310 {
311 return init_srcu_struct_fields(ssp, false);
312 }
313 EXPORT_SYMBOL_GPL(init_srcu_struct);
314
315 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
316
317 /*
318 * Initiate a transition to SRCU_SIZE_BIG with lock held.
319 */
__srcu_transition_to_big(struct srcu_struct * ssp)320 static void __srcu_transition_to_big(struct srcu_struct *ssp)
321 {
322 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
323 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC);
324 }
325
326 /*
327 * Initiate an idempotent transition to SRCU_SIZE_BIG.
328 */
srcu_transition_to_big(struct srcu_struct * ssp)329 static void srcu_transition_to_big(struct srcu_struct *ssp)
330 {
331 unsigned long flags;
332
333 /* Double-checked locking on ->srcu_size-state. */
334 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL)
335 return;
336 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
337 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) {
338 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
339 return;
340 }
341 __srcu_transition_to_big(ssp);
342 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
343 }
344
345 /*
346 * Check to see if the just-encountered contention event justifies
347 * a transition to SRCU_SIZE_BIG.
348 */
spin_lock_irqsave_check_contention(struct srcu_struct * ssp)349 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
350 {
351 unsigned long j;
352
353 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state)
354 return;
355 j = jiffies;
356 if (ssp->srcu_sup->srcu_size_jiffies != j) {
357 ssp->srcu_sup->srcu_size_jiffies = j;
358 ssp->srcu_sup->srcu_n_lock_retries = 0;
359 }
360 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim)
361 return;
362 __srcu_transition_to_big(ssp);
363 }
364
365 /*
366 * Acquire the specified srcu_data structure's ->lock, but check for
367 * excessive contention, which results in initiation of a transition
368 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
369 * parameter permits this.
370 */
spin_lock_irqsave_sdp_contention(struct srcu_data * sdp,unsigned long * flags)371 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
372 {
373 struct srcu_struct *ssp = sdp->ssp;
374
375 if (spin_trylock_irqsave_rcu_node(sdp, *flags))
376 return;
377 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
378 spin_lock_irqsave_check_contention(ssp);
379 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags);
380 spin_lock_irqsave_rcu_node(sdp, *flags);
381 }
382
383 /*
384 * Acquire the specified srcu_struct structure's ->lock, but check for
385 * excessive contention, which results in initiation of a transition
386 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
387 * parameter permits this.
388 */
spin_lock_irqsave_ssp_contention(struct srcu_struct * ssp,unsigned long * flags)389 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
390 {
391 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags))
392 return;
393 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
394 spin_lock_irqsave_check_contention(ssp);
395 }
396
397 /*
398 * First-use initialization of statically allocated srcu_struct
399 * structure. Wiring up the combining tree is more than can be
400 * done with compile-time initialization, so this check is added
401 * to each update-side SRCU primitive. Use ssp->lock, which -is-
402 * compile-time initialized, to resolve races involving multiple
403 * CPUs trying to garner first-use privileges.
404 */
check_init_srcu_struct(struct srcu_struct * ssp)405 static void check_init_srcu_struct(struct srcu_struct *ssp)
406 {
407 unsigned long flags;
408
409 /* The smp_load_acquire() pairs with the smp_store_release(). */
410 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/
411 return; /* Already initialized. */
412 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
413 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) {
414 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
415 return;
416 }
417 init_srcu_struct_fields(ssp, true);
418 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
419 }
420
421 /*
422 * Returns approximate total of the readers' ->srcu_lock_count[] values
423 * for the rank of per-CPU counters specified by idx.
424 */
srcu_readers_lock_idx(struct srcu_struct * ssp,int idx)425 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
426 {
427 int cpu;
428 unsigned long sum = 0;
429
430 for_each_possible_cpu(cpu) {
431 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
432
433 sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);
434 }
435 return sum;
436 }
437
438 /*
439 * Returns approximate total of the readers' ->srcu_unlock_count[] values
440 * for the rank of per-CPU counters specified by idx.
441 */
srcu_readers_unlock_idx(struct srcu_struct * ssp,int idx)442 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
443 {
444 int cpu;
445 unsigned long mask = 0;
446 unsigned long sum = 0;
447
448 for_each_possible_cpu(cpu) {
449 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
450
451 sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]);
452 if (IS_ENABLED(CONFIG_PROVE_RCU))
453 mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);
454 }
455 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)),
456 "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
457 return sum;
458 }
459
460 /*
461 * Return true if the number of pre-existing readers is determined to
462 * be zero.
463 */
srcu_readers_active_idx_check(struct srcu_struct * ssp,int idx)464 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
465 {
466 unsigned long unlocks;
467
468 unlocks = srcu_readers_unlock_idx(ssp, idx);
469
470 /*
471 * Make sure that a lock is always counted if the corresponding
472 * unlock is counted. Needs to be a smp_mb() as the read side may
473 * contain a read from a variable that is written to before the
474 * synchronize_srcu() in the write side. In this case smp_mb()s
475 * A and B act like the store buffering pattern.
476 *
477 * This smp_mb() also pairs with smp_mb() C to prevent accesses
478 * after the synchronize_srcu() from being executed before the
479 * grace period ends.
480 */
481 smp_mb(); /* A */
482
483 /*
484 * If the locks are the same as the unlocks, then there must have
485 * been no readers on this index at some point in this function.
486 * But there might be more readers, as a task might have read
487 * the current ->srcu_idx but not yet have incremented its CPU's
488 * ->srcu_lock_count[idx] counter. In fact, it is possible
489 * that most of the tasks have been preempted between fetching
490 * ->srcu_idx and incrementing ->srcu_lock_count[idx]. And there
491 * could be almost (ULONG_MAX / sizeof(struct task_struct)) tasks
492 * in a system whose address space was fully populated with memory.
493 * Call this quantity Nt.
494 *
495 * So suppose that the updater is preempted at this point in the
496 * code for a long time. That now-preempted updater has already
497 * flipped ->srcu_idx (possibly during the preceding grace period),
498 * done an smp_mb() (again, possibly during the preceding grace
499 * period), and summed up the ->srcu_unlock_count[idx] counters.
500 * How many times can a given one of the aforementioned Nt tasks
501 * increment the old ->srcu_idx value's ->srcu_lock_count[idx]
502 * counter, in the absence of nesting?
503 *
504 * It can clearly do so once, given that it has already fetched
505 * the old value of ->srcu_idx and is just about to use that value
506 * to index its increment of ->srcu_lock_count[idx]. But as soon as
507 * it leaves that SRCU read-side critical section, it will increment
508 * ->srcu_unlock_count[idx], which must follow the updater's above
509 * read from that same value. Thus, as soon the reading task does
510 * an smp_mb() and a later fetch from ->srcu_idx, that task will be
511 * guaranteed to get the new index. Except that the increment of
512 * ->srcu_unlock_count[idx] in __srcu_read_unlock() is after the
513 * smp_mb(), and the fetch from ->srcu_idx in __srcu_read_lock()
514 * is before the smp_mb(). Thus, that task might not see the new
515 * value of ->srcu_idx until the -second- __srcu_read_lock(),
516 * which in turn means that this task might well increment
517 * ->srcu_lock_count[idx] for the old value of ->srcu_idx twice,
518 * not just once.
519 *
520 * However, it is important to note that a given smp_mb() takes
521 * effect not just for the task executing it, but also for any
522 * later task running on that same CPU.
523 *
524 * That is, there can be almost Nt + Nc further increments of
525 * ->srcu_lock_count[idx] for the old index, where Nc is the number
526 * of CPUs. But this is OK because the size of the task_struct
527 * structure limits the value of Nt and current systems limit Nc
528 * to a few thousand.
529 *
530 * OK, but what about nesting? This does impose a limit on
531 * nesting of half of the size of the task_struct structure
532 * (measured in bytes), which should be sufficient. A late 2022
533 * TREE01 rcutorture run reported this size to be no less than
534 * 9408 bytes, allowing up to 4704 levels of nesting, which is
535 * comfortably beyond excessive. Especially on 64-bit systems,
536 * which are unlikely to be configured with an address space fully
537 * populated with memory, at least not anytime soon.
538 */
539 return srcu_readers_lock_idx(ssp, idx) == unlocks;
540 }
541
542 /**
543 * srcu_readers_active - returns true if there are readers. and false
544 * otherwise
545 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
546 *
547 * Note that this is not an atomic primitive, and can therefore suffer
548 * severe errors when invoked on an active srcu_struct. That said, it
549 * can be useful as an error check at cleanup time.
550 */
srcu_readers_active(struct srcu_struct * ssp)551 static bool srcu_readers_active(struct srcu_struct *ssp)
552 {
553 int cpu;
554 unsigned long sum = 0;
555
556 for_each_possible_cpu(cpu) {
557 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
558
559 sum += atomic_long_read(&cpuc->srcu_lock_count[0]);
560 sum += atomic_long_read(&cpuc->srcu_lock_count[1]);
561 sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]);
562 sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);
563 }
564 return sum;
565 }
566
567 /*
568 * We use an adaptive strategy for synchronize_srcu() and especially for
569 * synchronize_srcu_expedited(). We spin for a fixed time period
570 * (defined below, boot time configurable) to allow SRCU readers to exit
571 * their read-side critical sections. If there are still some readers
572 * after one jiffy, we repeatedly block for one jiffy time periods.
573 * The blocking time is increased as the grace-period age increases,
574 * with max blocking time capped at 10 jiffies.
575 */
576 #define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
577
578 static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
579 module_param(srcu_retry_check_delay, ulong, 0444);
580
581 #define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
582 #define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
583
584 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
585 // no-delay instances.
586 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
587 // no-delay instances.
588
589 #define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
590 #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
591 #define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
592 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
593 // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
594 // called from process_srcu().
595 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
596 (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
597
598 // Maximum per-GP-phase consecutive no-delay instances.
599 #define SRCU_DEFAULT_MAX_NODELAY_PHASE \
600 SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
601 SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
602 SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
603
604 static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
605 module_param(srcu_max_nodelay_phase, ulong, 0444);
606
607 // Maximum consecutive no-delay instances.
608 #define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
609 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
610
611 static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
612 module_param(srcu_max_nodelay, ulong, 0444);
613
614 /*
615 * Return grace-period delay, zero if there are expedited grace
616 * periods pending, SRCU_INTERVAL otherwise.
617 */
srcu_get_delay(struct srcu_struct * ssp)618 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
619 {
620 unsigned long gpstart;
621 unsigned long j;
622 unsigned long jbase = SRCU_INTERVAL;
623 struct srcu_usage *sup = ssp->srcu_sup;
624
625 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
626 jbase = 0;
627 if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) {
628 j = jiffies - 1;
629 gpstart = READ_ONCE(sup->srcu_gp_start);
630 if (time_after(j, gpstart))
631 jbase += j - gpstart;
632 if (!jbase) {
633 ASSERT_EXCLUSIVE_WRITER(sup->srcu_n_exp_nodelay);
634 WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1);
635 if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
636 jbase = 1;
637 }
638 }
639 return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
640 }
641
642 /**
643 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
644 * @ssp: structure to clean up.
645 *
646 * Must invoke this after you are finished using a given srcu_struct that
647 * was initialized via init_srcu_struct(), else you leak memory.
648 */
cleanup_srcu_struct(struct srcu_struct * ssp)649 void cleanup_srcu_struct(struct srcu_struct *ssp)
650 {
651 int cpu;
652 struct srcu_usage *sup = ssp->srcu_sup;
653
654 if (WARN_ON(!srcu_get_delay(ssp)))
655 return; /* Just leak it! */
656 if (WARN_ON(srcu_readers_active(ssp)))
657 return; /* Just leak it! */
658 flush_delayed_work(&sup->work);
659 for_each_possible_cpu(cpu) {
660 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
661
662 del_timer_sync(&sdp->delay_work);
663 flush_work(&sdp->work);
664 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
665 return; /* Forgot srcu_barrier(), so just leak it! */
666 }
667 if (WARN_ON(rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
668 WARN_ON(rcu_seq_current(&sup->srcu_gp_seq) != sup->srcu_gp_seq_needed) ||
669 WARN_ON(srcu_readers_active(ssp))) {
670 pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
671 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
672 rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed);
673 return; // Caller forgot to stop doing call_srcu()?
674 // Or caller invoked start_poll_synchronize_srcu()
675 // and then cleanup_srcu_struct() before that grace
676 // period ended?
677 }
678 kfree(sup->node);
679 sup->node = NULL;
680 sup->srcu_size_state = SRCU_SIZE_SMALL;
681 if (!sup->sda_is_static) {
682 free_percpu(ssp->sda);
683 ssp->sda = NULL;
684 kfree(sup);
685 ssp->srcu_sup = NULL;
686 }
687 }
688 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
689
690 #ifdef CONFIG_PROVE_RCU
691 /*
692 * Check for consistent NMI safety.
693 */
srcu_check_nmi_safety(struct srcu_struct * ssp,bool nmi_safe)694 void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
695 {
696 int nmi_safe_mask = 1 << nmi_safe;
697 int old_nmi_safe_mask;
698 struct srcu_data *sdp;
699
700 /* NMI-unsafe use in NMI is a bad sign */
701 WARN_ON_ONCE(!nmi_safe && in_nmi());
702 sdp = raw_cpu_ptr(ssp->sda);
703 old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
704 if (!old_nmi_safe_mask) {
705 WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
706 return;
707 }
708 WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
709 }
710 EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
711 #endif /* CONFIG_PROVE_RCU */
712
713 /*
714 * Counts the new reader in the appropriate per-CPU element of the
715 * srcu_struct.
716 * Returns an index that must be passed to the matching srcu_read_unlock().
717 */
__srcu_read_lock(struct srcu_struct * ssp)718 int __srcu_read_lock(struct srcu_struct *ssp)
719 {
720 int idx;
721
722 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
723 this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
724 smp_mb(); /* B */ /* Avoid leaking the critical section. */
725 return idx;
726 }
727 EXPORT_SYMBOL_GPL(__srcu_read_lock);
728
729 /*
730 * Removes the count for the old reader from the appropriate per-CPU
731 * element of the srcu_struct. Note that this may well be a different
732 * CPU than that which was incremented by the corresponding srcu_read_lock().
733 */
__srcu_read_unlock(struct srcu_struct * ssp,int idx)734 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
735 {
736 smp_mb(); /* C */ /* Avoid leaking the critical section. */
737 this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
738 }
739 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
740
741 #ifdef CONFIG_NEED_SRCU_NMI_SAFE
742
743 /*
744 * Counts the new reader in the appropriate per-CPU element of the
745 * srcu_struct, but in an NMI-safe manner using RMW atomics.
746 * Returns an index that must be passed to the matching srcu_read_unlock().
747 */
__srcu_read_lock_nmisafe(struct srcu_struct * ssp)748 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
749 {
750 int idx;
751 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
752
753 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
754 atomic_long_inc(&sdp->srcu_lock_count[idx]);
755 smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
756 return idx;
757 }
758 EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
759
760 /*
761 * Removes the count for the old reader from the appropriate per-CPU
762 * element of the srcu_struct. Note that this may well be a different
763 * CPU than that which was incremented by the corresponding srcu_read_lock().
764 */
__srcu_read_unlock_nmisafe(struct srcu_struct * ssp,int idx)765 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
766 {
767 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
768
769 smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
770 atomic_long_inc(&sdp->srcu_unlock_count[idx]);
771 }
772 EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
773
774 #endif // CONFIG_NEED_SRCU_NMI_SAFE
775
776 /*
777 * Start an SRCU grace period.
778 */
srcu_gp_start(struct srcu_struct * ssp)779 static void srcu_gp_start(struct srcu_struct *ssp)
780 {
781 int state;
782
783 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
784 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
785 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
786 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
787 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
788 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq);
789 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq);
790 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
791 }
792
793
srcu_delay_timer(struct timer_list * t)794 static void srcu_delay_timer(struct timer_list *t)
795 {
796 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
797
798 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
799 }
800
srcu_queue_delayed_work_on(struct srcu_data * sdp,unsigned long delay)801 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
802 unsigned long delay)
803 {
804 if (!delay) {
805 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
806 return;
807 }
808
809 timer_reduce(&sdp->delay_work, jiffies + delay);
810 }
811
812 /*
813 * Schedule callback invocation for the specified srcu_data structure,
814 * if possible, on the corresponding CPU.
815 */
srcu_schedule_cbs_sdp(struct srcu_data * sdp,unsigned long delay)816 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
817 {
818 srcu_queue_delayed_work_on(sdp, delay);
819 }
820
821 /*
822 * Schedule callback invocation for all srcu_data structures associated
823 * with the specified srcu_node structure that have callbacks for the
824 * just-completed grace period, the one corresponding to idx. If possible,
825 * schedule this invocation on the corresponding CPUs.
826 */
srcu_schedule_cbs_snp(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long mask,unsigned long delay)827 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
828 unsigned long mask, unsigned long delay)
829 {
830 int cpu;
831
832 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
833 if (!(mask & (1UL << (cpu - snp->grplo))))
834 continue;
835 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
836 }
837 }
838
839 /*
840 * Note the end of an SRCU grace period. Initiates callback invocation
841 * and starts a new grace period if needed.
842 *
843 * The ->srcu_cb_mutex acquisition does not protect any data, but
844 * instead prevents more than one grace period from starting while we
845 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
846 * array to have a finite number of elements.
847 */
srcu_gp_end(struct srcu_struct * ssp)848 static void srcu_gp_end(struct srcu_struct *ssp)
849 {
850 unsigned long cbdelay = 1;
851 bool cbs;
852 bool last_lvl;
853 int cpu;
854 unsigned long gpseq;
855 int idx;
856 unsigned long mask;
857 struct srcu_data *sdp;
858 unsigned long sgsne;
859 struct srcu_node *snp;
860 int ss_state;
861 struct srcu_usage *sup = ssp->srcu_sup;
862
863 /* Prevent more than one additional grace period. */
864 mutex_lock(&sup->srcu_cb_mutex);
865
866 /* End the current grace period. */
867 spin_lock_irq_rcu_node(sup);
868 idx = rcu_seq_state(sup->srcu_gp_seq);
869 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
870 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
871 cbdelay = 0;
872
873 WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns());
874 rcu_seq_end(&sup->srcu_gp_seq);
875 gpseq = rcu_seq_current(&sup->srcu_gp_seq);
876 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, gpseq))
877 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, gpseq);
878 spin_unlock_irq_rcu_node(sup);
879 mutex_unlock(&sup->srcu_gp_mutex);
880 /* A new grace period can start at this point. But only one. */
881
882 /* Initiate callback invocation as needed. */
883 ss_state = smp_load_acquire(&sup->srcu_size_state);
884 if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
885 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
886 cbdelay);
887 } else {
888 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
889 srcu_for_each_node_breadth_first(ssp, snp) {
890 spin_lock_irq_rcu_node(snp);
891 cbs = false;
892 last_lvl = snp >= sup->level[rcu_num_lvls - 1];
893 if (last_lvl)
894 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
895 snp->srcu_have_cbs[idx] = gpseq;
896 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
897 sgsne = snp->srcu_gp_seq_needed_exp;
898 if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
899 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
900 if (ss_state < SRCU_SIZE_BIG)
901 mask = ~0;
902 else
903 mask = snp->srcu_data_have_cbs[idx];
904 snp->srcu_data_have_cbs[idx] = 0;
905 spin_unlock_irq_rcu_node(snp);
906 if (cbs)
907 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
908 }
909 }
910
911 /* Occasionally prevent srcu_data counter wrap. */
912 if (!(gpseq & counter_wrap_check))
913 for_each_possible_cpu(cpu) {
914 sdp = per_cpu_ptr(ssp->sda, cpu);
915 spin_lock_irq_rcu_node(sdp);
916 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
917 sdp->srcu_gp_seq_needed = gpseq;
918 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
919 sdp->srcu_gp_seq_needed_exp = gpseq;
920 spin_unlock_irq_rcu_node(sdp);
921 }
922
923 /* Callback initiation done, allow grace periods after next. */
924 mutex_unlock(&sup->srcu_cb_mutex);
925
926 /* Start a new grace period if needed. */
927 spin_lock_irq_rcu_node(sup);
928 gpseq = rcu_seq_current(&sup->srcu_gp_seq);
929 if (!rcu_seq_state(gpseq) &&
930 ULONG_CMP_LT(gpseq, sup->srcu_gp_seq_needed)) {
931 srcu_gp_start(ssp);
932 spin_unlock_irq_rcu_node(sup);
933 srcu_reschedule(ssp, 0);
934 } else {
935 spin_unlock_irq_rcu_node(sup);
936 }
937
938 /* Transition to big if needed. */
939 if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
940 if (ss_state == SRCU_SIZE_ALLOC)
941 init_srcu_struct_nodes(ssp, GFP_KERNEL);
942 else
943 smp_store_release(&sup->srcu_size_state, ss_state + 1);
944 }
945 }
946
947 /*
948 * Funnel-locking scheme to scalably mediate many concurrent expedited
949 * grace-period requests. This function is invoked for the first known
950 * expedited request for a grace period that has already been requested,
951 * but without expediting. To start a completely new grace period,
952 * whether expedited or not, use srcu_funnel_gp_start() instead.
953 */
srcu_funnel_exp_start(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long s)954 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
955 unsigned long s)
956 {
957 unsigned long flags;
958 unsigned long sgsne;
959
960 if (snp)
961 for (; snp != NULL; snp = snp->srcu_parent) {
962 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
963 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) ||
964 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
965 return;
966 spin_lock_irqsave_rcu_node(snp, flags);
967 sgsne = snp->srcu_gp_seq_needed_exp;
968 if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
969 spin_unlock_irqrestore_rcu_node(snp, flags);
970 return;
971 }
972 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
973 spin_unlock_irqrestore_rcu_node(snp, flags);
974 }
975 spin_lock_irqsave_ssp_contention(ssp, &flags);
976 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s))
977 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s);
978 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
979 }
980
981 /*
982 * Funnel-locking scheme to scalably mediate many concurrent grace-period
983 * requests. The winner has to do the work of actually starting grace
984 * period s. Losers must either ensure that their desired grace-period
985 * number is recorded on at least their leaf srcu_node structure, or they
986 * must take steps to invoke their own callbacks.
987 *
988 * Note that this function also does the work of srcu_funnel_exp_start(),
989 * in some cases by directly invoking it.
990 *
991 * The srcu read lock should be hold around this function. And s is a seq snap
992 * after holding that lock.
993 */
srcu_funnel_gp_start(struct srcu_struct * ssp,struct srcu_data * sdp,unsigned long s,bool do_norm)994 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
995 unsigned long s, bool do_norm)
996 {
997 unsigned long flags;
998 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
999 unsigned long sgsne;
1000 struct srcu_node *snp;
1001 struct srcu_node *snp_leaf;
1002 unsigned long snp_seq;
1003 struct srcu_usage *sup = ssp->srcu_sup;
1004
1005 /* Ensure that snp node tree is fully initialized before traversing it */
1006 if (smp_load_acquire(&sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1007 snp_leaf = NULL;
1008 else
1009 snp_leaf = sdp->mynode;
1010
1011 if (snp_leaf)
1012 /* Each pass through the loop does one level of the srcu_node tree. */
1013 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
1014 if (WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && snp != snp_leaf)
1015 return; /* GP already done and CBs recorded. */
1016 spin_lock_irqsave_rcu_node(snp, flags);
1017 snp_seq = snp->srcu_have_cbs[idx];
1018 if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
1019 if (snp == snp_leaf && snp_seq == s)
1020 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1021 spin_unlock_irqrestore_rcu_node(snp, flags);
1022 if (snp == snp_leaf && snp_seq != s) {
1023 srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
1024 return;
1025 }
1026 if (!do_norm)
1027 srcu_funnel_exp_start(ssp, snp, s);
1028 return;
1029 }
1030 snp->srcu_have_cbs[idx] = s;
1031 if (snp == snp_leaf)
1032 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1033 sgsne = snp->srcu_gp_seq_needed_exp;
1034 if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
1035 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
1036 spin_unlock_irqrestore_rcu_node(snp, flags);
1037 }
1038
1039 /* Top of tree, must ensure the grace period will be started. */
1040 spin_lock_irqsave_ssp_contention(ssp, &flags);
1041 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed, s)) {
1042 /*
1043 * Record need for grace period s. Pair with load
1044 * acquire setting up for initialization.
1045 */
1046 smp_store_release(&sup->srcu_gp_seq_needed, s); /*^^^*/
1047 }
1048 if (!do_norm && ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, s))
1049 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, s);
1050
1051 /* If grace period not already in progress, start it. */
1052 if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) &&
1053 rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) {
1054 WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed));
1055 srcu_gp_start(ssp);
1056
1057 // And how can that list_add() in the "else" clause
1058 // possibly be safe for concurrent execution? Well,
1059 // it isn't. And it does not have to be. After all, it
1060 // can only be executed during early boot when there is only
1061 // the one boot CPU running with interrupts still disabled.
1062 if (likely(srcu_init_done))
1063 queue_delayed_work(rcu_gp_wq, &sup->work,
1064 !!srcu_get_delay(ssp));
1065 else if (list_empty(&sup->work.work.entry))
1066 list_add(&sup->work.work.entry, &srcu_boot_list);
1067 }
1068 spin_unlock_irqrestore_rcu_node(sup, flags);
1069 }
1070
1071 /*
1072 * Wait until all readers counted by array index idx complete, but
1073 * loop an additional time if there is an expedited grace period pending.
1074 * The caller must ensure that ->srcu_idx is not changed while checking.
1075 */
try_check_zero(struct srcu_struct * ssp,int idx,int trycount)1076 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1077 {
1078 unsigned long curdelay;
1079
1080 curdelay = !srcu_get_delay(ssp);
1081
1082 for (;;) {
1083 if (srcu_readers_active_idx_check(ssp, idx))
1084 return true;
1085 if ((--trycount + curdelay) <= 0)
1086 return false;
1087 udelay(srcu_retry_check_delay);
1088 }
1089 }
1090
1091 /*
1092 * Increment the ->srcu_idx counter so that future SRCU readers will
1093 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
1094 * us to wait for pre-existing readers in a starvation-free manner.
1095 */
srcu_flip(struct srcu_struct * ssp)1096 static void srcu_flip(struct srcu_struct *ssp)
1097 {
1098 /*
1099 * Because the flip of ->srcu_idx is executed only if the
1100 * preceding call to srcu_readers_active_idx_check() found that
1101 * the ->srcu_unlock_count[] and ->srcu_lock_count[] sums matched
1102 * and because that summing uses atomic_long_read(), there is
1103 * ordering due to a control dependency between that summing and
1104 * the WRITE_ONCE() in this call to srcu_flip(). This ordering
1105 * ensures that if this updater saw a given reader's increment from
1106 * __srcu_read_lock(), that reader was using a value of ->srcu_idx
1107 * from before the previous call to srcu_flip(), which should be
1108 * quite rare. This ordering thus helps forward progress because
1109 * the grace period could otherwise be delayed by additional
1110 * calls to __srcu_read_lock() using that old (soon to be new)
1111 * value of ->srcu_idx.
1112 *
1113 * This sum-equality check and ordering also ensures that if
1114 * a given call to __srcu_read_lock() uses the new value of
1115 * ->srcu_idx, this updater's earlier scans cannot have seen
1116 * that reader's increments, which is all to the good, because
1117 * this grace period need not wait on that reader. After all,
1118 * if those earlier scans had seen that reader, there would have
1119 * been a sum mismatch and this code would not be reached.
1120 *
1121 * This means that the following smp_mb() is redundant, but
1122 * it stays until either (1) Compilers learn about this sort of
1123 * control dependency or (2) Some production workload running on
1124 * a production system is unduly delayed by this slowpath smp_mb().
1125 */
1126 smp_mb(); /* E */ /* Pairs with B and C. */
1127
1128 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); // Flip the counter.
1129
1130 /*
1131 * Ensure that if the updater misses an __srcu_read_unlock()
1132 * increment, that task's __srcu_read_lock() following its next
1133 * __srcu_read_lock() or __srcu_read_unlock() will see the above
1134 * counter update. Note that both this memory barrier and the
1135 * one in srcu_readers_active_idx_check() provide the guarantee
1136 * for __srcu_read_lock().
1137 */
1138 smp_mb(); /* D */ /* Pairs with C. */
1139 }
1140
1141 /*
1142 * If SRCU is likely idle, return true, otherwise return false.
1143 *
1144 * Note that it is OK for several current from-idle requests for a new
1145 * grace period from idle to specify expediting because they will all end
1146 * up requesting the same grace period anyhow. So no loss.
1147 *
1148 * Note also that if any CPU (including the current one) is still invoking
1149 * callbacks, this function will nevertheless say "idle". This is not
1150 * ideal, but the overhead of checking all CPUs' callback lists is even
1151 * less ideal, especially on large systems. Furthermore, the wakeup
1152 * can happen before the callback is fully removed, so we have no choice
1153 * but to accept this type of error.
1154 *
1155 * This function is also subject to counter-wrap errors, but let's face
1156 * it, if this function was preempted for enough time for the counters
1157 * to wrap, it really doesn't matter whether or not we expedite the grace
1158 * period. The extra overhead of a needlessly expedited grace period is
1159 * negligible when amortized over that time period, and the extra latency
1160 * of a needlessly non-expedited grace period is similarly negligible.
1161 */
srcu_might_be_idle(struct srcu_struct * ssp)1162 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1163 {
1164 unsigned long curseq;
1165 unsigned long flags;
1166 struct srcu_data *sdp;
1167 unsigned long t;
1168 unsigned long tlast;
1169
1170 check_init_srcu_struct(ssp);
1171 /* If the local srcu_data structure has callbacks, not idle. */
1172 sdp = raw_cpu_ptr(ssp->sda);
1173 spin_lock_irqsave_rcu_node(sdp, flags);
1174 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1175 spin_unlock_irqrestore_rcu_node(sdp, flags);
1176 return false; /* Callbacks already present, so not idle. */
1177 }
1178 spin_unlock_irqrestore_rcu_node(sdp, flags);
1179
1180 /*
1181 * No local callbacks, so probabilistically probe global state.
1182 * Exact information would require acquiring locks, which would
1183 * kill scalability, hence the probabilistic nature of the probe.
1184 */
1185
1186 /* First, see if enough time has passed since the last GP. */
1187 t = ktime_get_mono_fast_ns();
1188 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end);
1189 if (exp_holdoff == 0 ||
1190 time_in_range_open(t, tlast, tlast + exp_holdoff))
1191 return false; /* Too soon after last GP. */
1192
1193 /* Next, check for probable idleness. */
1194 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1195 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1196 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed)))
1197 return false; /* Grace period in progress, so not idle. */
1198 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1199 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq))
1200 return false; /* GP # changed, so not idle. */
1201 return true; /* With reasonable probability, idle! */
1202 }
1203
1204 /*
1205 * SRCU callback function to leak a callback.
1206 */
srcu_leak_callback(struct rcu_head * rhp)1207 static void srcu_leak_callback(struct rcu_head *rhp)
1208 {
1209 }
1210
1211 /*
1212 * Start an SRCU grace period, and also queue the callback if non-NULL.
1213 */
srcu_gp_start_if_needed(struct srcu_struct * ssp,struct rcu_head * rhp,bool do_norm)1214 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1215 struct rcu_head *rhp, bool do_norm)
1216 {
1217 unsigned long flags;
1218 int idx;
1219 bool needexp = false;
1220 bool needgp = false;
1221 unsigned long s;
1222 struct srcu_data *sdp;
1223 struct srcu_node *sdp_mynode;
1224 int ss_state;
1225
1226 check_init_srcu_struct(ssp);
1227 /*
1228 * While starting a new grace period, make sure we are in an
1229 * SRCU read-side critical section so that the grace-period
1230 * sequence number cannot wrap around in the meantime.
1231 */
1232 idx = __srcu_read_lock_nmisafe(ssp);
1233 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state);
1234 if (ss_state < SRCU_SIZE_WAIT_CALL)
1235 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
1236 else
1237 sdp = raw_cpu_ptr(ssp->sda);
1238 spin_lock_irqsave_sdp_contention(sdp, &flags);
1239 if (rhp)
1240 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1241 /*
1242 * It's crucial to capture the snapshot 's' for acceleration before
1243 * reading the current gp_seq that is used for advancing. This is
1244 * essential because if the acceleration snapshot is taken after a
1245 * failed advancement attempt, there's a risk that a grace period may
1246 * conclude and a new one may start in the interim. If the snapshot is
1247 * captured after this sequence of events, the acceleration snapshot 's'
1248 * could be excessively advanced, leading to acceleration failure.
1249 * In such a scenario, an 'acceleration leak' can occur, where new
1250 * callbacks become indefinitely stuck in the RCU_NEXT_TAIL segment.
1251 * Also note that encountering advancing failures is a normal
1252 * occurrence when the grace period for RCU_WAIT_TAIL is in progress.
1253 *
1254 * To see this, consider the following events which occur if
1255 * rcu_seq_snap() were to be called after advance:
1256 *
1257 * 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
1258 * RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
1259 *
1260 * 2) The grace period for RCU_WAIT_TAIL is seen as started but not
1261 * completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
1262 *
1263 * 3) This value is passed to rcu_segcblist_advance() which can't move
1264 * any segment forward and fails.
1265 *
1266 * 4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
1267 * But then the call to rcu_seq_snap() observes the grace period for the
1268 * RCU_WAIT_TAIL segment as completed and the subsequent one for the
1269 * RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
1270 * so it returns a snapshot of the next grace period, which is X + 12.
1271 *
1272 * 5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
1273 * freshly enqueued callback in RCU_NEXT_TAIL can't move to
1274 * RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
1275 * period (gp_num = X + 8). So acceleration fails.
1276 */
1277 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1278 if (rhp) {
1279 rcu_segcblist_advance(&sdp->srcu_cblist,
1280 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1281 /*
1282 * Acceleration can never fail because the base current gp_seq
1283 * used for acceleration is <= the value of gp_seq used for
1284 * advancing. This means that RCU_NEXT_TAIL segment will
1285 * always be able to be emptied by the acceleration into the
1286 * RCU_NEXT_READY_TAIL or RCU_WAIT_TAIL segments.
1287 */
1288 WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s));
1289 }
1290 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1291 sdp->srcu_gp_seq_needed = s;
1292 needgp = true;
1293 }
1294 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1295 sdp->srcu_gp_seq_needed_exp = s;
1296 needexp = true;
1297 }
1298 spin_unlock_irqrestore_rcu_node(sdp, flags);
1299
1300 /* Ensure that snp node tree is fully initialized before traversing it */
1301 if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1302 sdp_mynode = NULL;
1303 else
1304 sdp_mynode = sdp->mynode;
1305
1306 if (needgp)
1307 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1308 else if (needexp)
1309 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1310 __srcu_read_unlock_nmisafe(ssp, idx);
1311 return s;
1312 }
1313
1314 /*
1315 * Enqueue an SRCU callback on the srcu_data structure associated with
1316 * the current CPU and the specified srcu_struct structure, initiating
1317 * grace-period processing if it is not already running.
1318 *
1319 * Note that all CPUs must agree that the grace period extended beyond
1320 * all pre-existing SRCU read-side critical section. On systems with
1321 * more than one CPU, this means that when "func()" is invoked, each CPU
1322 * is guaranteed to have executed a full memory barrier since the end of
1323 * its last corresponding SRCU read-side critical section whose beginning
1324 * preceded the call to call_srcu(). It also means that each CPU executing
1325 * an SRCU read-side critical section that continues beyond the start of
1326 * "func()" must have executed a memory barrier after the call_srcu()
1327 * but before the beginning of that SRCU read-side critical section.
1328 * Note that these guarantees include CPUs that are offline, idle, or
1329 * executing in user mode, as well as CPUs that are executing in the kernel.
1330 *
1331 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1332 * resulting SRCU callback function "func()", then both CPU A and CPU
1333 * B are guaranteed to execute a full memory barrier during the time
1334 * interval between the call to call_srcu() and the invocation of "func()".
1335 * This guarantee applies even if CPU A and CPU B are the same CPU (but
1336 * again only if the system has more than one CPU).
1337 *
1338 * Of course, these guarantees apply only for invocations of call_srcu(),
1339 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1340 * srcu_struct structure.
1341 */
__call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func,bool do_norm)1342 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1343 rcu_callback_t func, bool do_norm)
1344 {
1345 if (debug_rcu_head_queue(rhp)) {
1346 /* Probable double call_srcu(), so leak the callback. */
1347 WRITE_ONCE(rhp->func, srcu_leak_callback);
1348 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1349 return;
1350 }
1351 rhp->func = func;
1352 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1353 }
1354
1355 /**
1356 * call_srcu() - Queue a callback for invocation after an SRCU grace period
1357 * @ssp: srcu_struct in queue the callback
1358 * @rhp: structure to be used for queueing the SRCU callback.
1359 * @func: function to be invoked after the SRCU grace period
1360 *
1361 * The callback function will be invoked some time after a full SRCU
1362 * grace period elapses, in other words after all pre-existing SRCU
1363 * read-side critical sections have completed. However, the callback
1364 * function might well execute concurrently with other SRCU read-side
1365 * critical sections that started after call_srcu() was invoked. SRCU
1366 * read-side critical sections are delimited by srcu_read_lock() and
1367 * srcu_read_unlock(), and may be nested.
1368 *
1369 * The callback will be invoked from process context, but must nevertheless
1370 * be fast and must not block.
1371 */
call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func)1372 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1373 rcu_callback_t func)
1374 {
1375 __call_srcu(ssp, rhp, func, true);
1376 }
1377 EXPORT_SYMBOL_GPL(call_srcu);
1378
1379 /*
1380 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1381 */
__synchronize_srcu(struct srcu_struct * ssp,bool do_norm)1382 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1383 {
1384 struct rcu_synchronize rcu;
1385
1386 srcu_lock_sync(&ssp->dep_map);
1387
1388 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1389 lock_is_held(&rcu_bh_lock_map) ||
1390 lock_is_held(&rcu_lock_map) ||
1391 lock_is_held(&rcu_sched_lock_map),
1392 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1393
1394 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1395 return;
1396 might_sleep();
1397 check_init_srcu_struct(ssp);
1398 init_completion(&rcu.completion);
1399 init_rcu_head_on_stack(&rcu.head);
1400 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1401 wait_for_completion(&rcu.completion);
1402 destroy_rcu_head_on_stack(&rcu.head);
1403
1404 /*
1405 * Make sure that later code is ordered after the SRCU grace
1406 * period. This pairs with the spin_lock_irq_rcu_node()
1407 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
1408 * because the current CPU might have been totally uninvolved with
1409 * (and thus unordered against) that grace period.
1410 */
1411 smp_mb();
1412 }
1413
1414 /**
1415 * synchronize_srcu_expedited - Brute-force SRCU grace period
1416 * @ssp: srcu_struct with which to synchronize.
1417 *
1418 * Wait for an SRCU grace period to elapse, but be more aggressive about
1419 * spinning rather than blocking when waiting.
1420 *
1421 * Note that synchronize_srcu_expedited() has the same deadlock and
1422 * memory-ordering properties as does synchronize_srcu().
1423 */
synchronize_srcu_expedited(struct srcu_struct * ssp)1424 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1425 {
1426 __synchronize_srcu(ssp, rcu_gp_is_normal());
1427 }
1428 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1429
1430 /**
1431 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1432 * @ssp: srcu_struct with which to synchronize.
1433 *
1434 * Wait for the count to drain to zero of both indexes. To avoid the
1435 * possible starvation of synchronize_srcu(), it waits for the count of
1436 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1437 * and then flip the srcu_idx and wait for the count of the other index.
1438 *
1439 * Can block; must be called from process context.
1440 *
1441 * Note that it is illegal to call synchronize_srcu() from the corresponding
1442 * SRCU read-side critical section; doing so will result in deadlock.
1443 * However, it is perfectly legal to call synchronize_srcu() on one
1444 * srcu_struct from some other srcu_struct's read-side critical section,
1445 * as long as the resulting graph of srcu_structs is acyclic.
1446 *
1447 * There are memory-ordering constraints implied by synchronize_srcu().
1448 * On systems with more than one CPU, when synchronize_srcu() returns,
1449 * each CPU is guaranteed to have executed a full memory barrier since
1450 * the end of its last corresponding SRCU read-side critical section
1451 * whose beginning preceded the call to synchronize_srcu(). In addition,
1452 * each CPU having an SRCU read-side critical section that extends beyond
1453 * the return from synchronize_srcu() is guaranteed to have executed a
1454 * full memory barrier after the beginning of synchronize_srcu() and before
1455 * the beginning of that SRCU read-side critical section. Note that these
1456 * guarantees include CPUs that are offline, idle, or executing in user mode,
1457 * as well as CPUs that are executing in the kernel.
1458 *
1459 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1460 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1461 * to have executed a full memory barrier during the execution of
1462 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
1463 * are the same CPU, but again only if the system has more than one CPU.
1464 *
1465 * Of course, these memory-ordering guarantees apply only when
1466 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1467 * passed the same srcu_struct structure.
1468 *
1469 * Implementation of these memory-ordering guarantees is similar to
1470 * that of synchronize_rcu().
1471 *
1472 * If SRCU is likely idle, expedite the first request. This semantic
1473 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1474 * SRCU must also provide it. Note that detecting idleness is heuristic
1475 * and subject to both false positives and negatives.
1476 */
synchronize_srcu(struct srcu_struct * ssp)1477 void synchronize_srcu(struct srcu_struct *ssp)
1478 {
1479 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1480 synchronize_srcu_expedited(ssp);
1481 else
1482 __synchronize_srcu(ssp, true);
1483 }
1484 EXPORT_SYMBOL_GPL(synchronize_srcu);
1485
1486 /**
1487 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1488 * @ssp: srcu_struct to provide cookie for.
1489 *
1490 * This function returns a cookie that can be passed to
1491 * poll_state_synchronize_srcu(), which will return true if a full grace
1492 * period has elapsed in the meantime. It is the caller's responsibility
1493 * to make sure that grace period happens, for example, by invoking
1494 * call_srcu() after return from get_state_synchronize_srcu().
1495 */
get_state_synchronize_srcu(struct srcu_struct * ssp)1496 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1497 {
1498 // Any prior manipulation of SRCU-protected data must happen
1499 // before the load from ->srcu_gp_seq.
1500 smp_mb();
1501 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1502 }
1503 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1504
1505 /**
1506 * start_poll_synchronize_srcu - Provide cookie and start grace period
1507 * @ssp: srcu_struct to provide cookie for.
1508 *
1509 * This function returns a cookie that can be passed to
1510 * poll_state_synchronize_srcu(), which will return true if a full grace
1511 * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1512 * this function also ensures that any needed SRCU grace period will be
1513 * started. This convenience does come at a cost in terms of CPU overhead.
1514 */
start_poll_synchronize_srcu(struct srcu_struct * ssp)1515 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1516 {
1517 return srcu_gp_start_if_needed(ssp, NULL, true);
1518 }
1519 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1520
1521 /**
1522 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1523 * @ssp: srcu_struct to provide cookie for.
1524 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1525 *
1526 * This function takes the cookie that was returned from either
1527 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1528 * returns @true if an SRCU grace period elapsed since the time that the
1529 * cookie was created.
1530 *
1531 * Because cookies are finite in size, wrapping/overflow is possible.
1532 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1533 * where in theory wrapping could happen in about 14 hours assuming
1534 * 25-microsecond expedited SRCU grace periods. However, a more likely
1535 * overflow lower bound is on the order of 24 days in the case of
1536 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1537 * system requires geologic timespans, as in more than seven million years
1538 * even for expedited SRCU grace periods.
1539 *
1540 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1541 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
1542 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1543 * few minutes. If this proves to be a problem, this counter will be
1544 * expanded to the same size as for Tree SRCU.
1545 */
poll_state_synchronize_srcu(struct srcu_struct * ssp,unsigned long cookie)1546 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1547 {
1548 if (cookie != SRCU_GET_STATE_COMPLETED &&
1549 !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
1550 return false;
1551 // Ensure that the end of the SRCU grace period happens before
1552 // any subsequent code that the caller might execute.
1553 smp_mb(); // ^^^
1554 return true;
1555 }
1556 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1557
1558 /*
1559 * Callback function for srcu_barrier() use.
1560 */
srcu_barrier_cb(struct rcu_head * rhp)1561 static void srcu_barrier_cb(struct rcu_head *rhp)
1562 {
1563 struct srcu_data *sdp;
1564 struct srcu_struct *ssp;
1565
1566 rhp->next = rhp; // Mark the callback as having been invoked.
1567 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1568 ssp = sdp->ssp;
1569 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1570 complete(&ssp->srcu_sup->srcu_barrier_completion);
1571 }
1572
1573 /*
1574 * Enqueue an srcu_barrier() callback on the specified srcu_data
1575 * structure's ->cblist. but only if that ->cblist already has at least one
1576 * callback enqueued. Note that if a CPU already has callbacks enqueue,
1577 * it must have already registered the need for a future grace period,
1578 * so all we need do is enqueue a callback that will use the same grace
1579 * period as the last callback already in the queue.
1580 */
srcu_barrier_one_cpu(struct srcu_struct * ssp,struct srcu_data * sdp)1581 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1582 {
1583 spin_lock_irq_rcu_node(sdp);
1584 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1585 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1586 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1587 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1588 &sdp->srcu_barrier_head)) {
1589 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1590 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1591 }
1592 spin_unlock_irq_rcu_node(sdp);
1593 }
1594
1595 /**
1596 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1597 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1598 */
srcu_barrier(struct srcu_struct * ssp)1599 void srcu_barrier(struct srcu_struct *ssp)
1600 {
1601 int cpu;
1602 int idx;
1603 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq);
1604
1605 check_init_srcu_struct(ssp);
1606 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex);
1607 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) {
1608 smp_mb(); /* Force ordering following return. */
1609 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1610 return; /* Someone else did our work for us. */
1611 }
1612 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq);
1613 init_completion(&ssp->srcu_sup->srcu_barrier_completion);
1614
1615 /* Initial count prevents reaching zero until all CBs are posted. */
1616 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1);
1617
1618 idx = __srcu_read_lock_nmisafe(ssp);
1619 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1620 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id()));
1621 else
1622 for_each_possible_cpu(cpu)
1623 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1624 __srcu_read_unlock_nmisafe(ssp, idx);
1625
1626 /* Remove the initial count, at which point reaching zero can happen. */
1627 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1628 complete(&ssp->srcu_sup->srcu_barrier_completion);
1629 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion);
1630
1631 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq);
1632 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1633 }
1634 EXPORT_SYMBOL_GPL(srcu_barrier);
1635
1636 /**
1637 * srcu_batches_completed - return batches completed.
1638 * @ssp: srcu_struct on which to report batch completion.
1639 *
1640 * Report the number of batches, correlated with, but not necessarily
1641 * precisely the same as, the number of grace periods that have elapsed.
1642 */
srcu_batches_completed(struct srcu_struct * ssp)1643 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1644 {
1645 return READ_ONCE(ssp->srcu_idx);
1646 }
1647 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1648
1649 /*
1650 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1651 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1652 * completed in that state.
1653 */
srcu_advance_state(struct srcu_struct * ssp)1654 static void srcu_advance_state(struct srcu_struct *ssp)
1655 {
1656 int idx;
1657
1658 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex);
1659
1660 /*
1661 * Because readers might be delayed for an extended period after
1662 * fetching ->srcu_idx for their index, at any point in time there
1663 * might well be readers using both idx=0 and idx=1. We therefore
1664 * need to wait for readers to clear from both index values before
1665 * invoking a callback.
1666 *
1667 * The load-acquire ensures that we see the accesses performed
1668 * by the prior grace period.
1669 */
1670 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */
1671 if (idx == SRCU_STATE_IDLE) {
1672 spin_lock_irq_rcu_node(ssp->srcu_sup);
1673 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1674 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq));
1675 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1676 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1677 return;
1678 }
1679 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq));
1680 if (idx == SRCU_STATE_IDLE)
1681 srcu_gp_start(ssp);
1682 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1683 if (idx != SRCU_STATE_IDLE) {
1684 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1685 return; /* Someone else started the grace period. */
1686 }
1687 }
1688
1689 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1690 idx = 1 ^ (ssp->srcu_idx & 1);
1691 if (!try_check_zero(ssp, idx, 1)) {
1692 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1693 return; /* readers present, retry later. */
1694 }
1695 srcu_flip(ssp);
1696 spin_lock_irq_rcu_node(ssp->srcu_sup);
1697 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2);
1698 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1699 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1700 }
1701
1702 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1703
1704 /*
1705 * SRCU read-side critical sections are normally short,
1706 * so check at least twice in quick succession after a flip.
1707 */
1708 idx = 1 ^ (ssp->srcu_idx & 1);
1709 if (!try_check_zero(ssp, idx, 2)) {
1710 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1711 return; /* readers present, retry later. */
1712 }
1713 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1714 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1715 }
1716 }
1717
1718 /*
1719 * Invoke a limited number of SRCU callbacks that have passed through
1720 * their grace period. If there are more to do, SRCU will reschedule
1721 * the workqueue. Note that needed memory barriers have been executed
1722 * in this task's context by srcu_readers_active_idx_check().
1723 */
srcu_invoke_callbacks(struct work_struct * work)1724 static void srcu_invoke_callbacks(struct work_struct *work)
1725 {
1726 long len;
1727 bool more;
1728 struct rcu_cblist ready_cbs;
1729 struct rcu_head *rhp;
1730 struct srcu_data *sdp;
1731 struct srcu_struct *ssp;
1732
1733 sdp = container_of(work, struct srcu_data, work);
1734
1735 ssp = sdp->ssp;
1736 rcu_cblist_init(&ready_cbs);
1737 spin_lock_irq_rcu_node(sdp);
1738 WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
1739 rcu_segcblist_advance(&sdp->srcu_cblist,
1740 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1741 /*
1742 * Although this function is theoretically re-entrant, concurrent
1743 * callbacks invocation is disallowed to avoid executing an SRCU barrier
1744 * too early.
1745 */
1746 if (sdp->srcu_cblist_invoking ||
1747 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1748 spin_unlock_irq_rcu_node(sdp);
1749 return; /* Someone else on the job or nothing to do. */
1750 }
1751
1752 /* We are on the job! Extract and invoke ready callbacks. */
1753 sdp->srcu_cblist_invoking = true;
1754 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1755 len = ready_cbs.len;
1756 spin_unlock_irq_rcu_node(sdp);
1757 rhp = rcu_cblist_dequeue(&ready_cbs);
1758 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1759 debug_rcu_head_unqueue(rhp);
1760 debug_rcu_head_callback(rhp);
1761 local_bh_disable();
1762 rhp->func(rhp);
1763 local_bh_enable();
1764 }
1765 WARN_ON_ONCE(ready_cbs.len);
1766
1767 /*
1768 * Update counts, accelerate new callbacks, and if needed,
1769 * schedule another round of callback invocation.
1770 */
1771 spin_lock_irq_rcu_node(sdp);
1772 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1773 sdp->srcu_cblist_invoking = false;
1774 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1775 spin_unlock_irq_rcu_node(sdp);
1776 /* An SRCU barrier or callbacks from previous nesting work pending */
1777 if (more)
1778 srcu_schedule_cbs_sdp(sdp, 0);
1779 }
1780
1781 /*
1782 * Finished one round of SRCU grace period. Start another if there are
1783 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1784 */
srcu_reschedule(struct srcu_struct * ssp,unsigned long delay)1785 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1786 {
1787 bool pushgp = true;
1788
1789 spin_lock_irq_rcu_node(ssp->srcu_sup);
1790 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1791 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) {
1792 /* All requests fulfilled, time to go idle. */
1793 pushgp = false;
1794 }
1795 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) {
1796 /* Outstanding request and no GP. Start one. */
1797 srcu_gp_start(ssp);
1798 }
1799 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1800
1801 if (pushgp)
1802 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay);
1803 }
1804
1805 /*
1806 * This is the work-queue function that handles SRCU grace periods.
1807 */
process_srcu(struct work_struct * work)1808 static void process_srcu(struct work_struct *work)
1809 {
1810 unsigned long curdelay;
1811 unsigned long j;
1812 struct srcu_struct *ssp;
1813 struct srcu_usage *sup;
1814
1815 sup = container_of(work, struct srcu_usage, work.work);
1816 ssp = sup->srcu_ssp;
1817
1818 srcu_advance_state(ssp);
1819 curdelay = srcu_get_delay(ssp);
1820 if (curdelay) {
1821 WRITE_ONCE(sup->reschedule_count, 0);
1822 } else {
1823 j = jiffies;
1824 if (READ_ONCE(sup->reschedule_jiffies) == j) {
1825 ASSERT_EXCLUSIVE_WRITER(sup->reschedule_count);
1826 WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1);
1827 if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay)
1828 curdelay = 1;
1829 } else {
1830 WRITE_ONCE(sup->reschedule_count, 1);
1831 WRITE_ONCE(sup->reschedule_jiffies, j);
1832 }
1833 }
1834 srcu_reschedule(ssp, curdelay);
1835 }
1836
srcutorture_get_gp_data(struct srcu_struct * ssp,int * flags,unsigned long * gp_seq)1837 void srcutorture_get_gp_data(struct srcu_struct *ssp, int *flags,
1838 unsigned long *gp_seq)
1839 {
1840 *flags = 0;
1841 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1842 }
1843 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1844
1845 static const char * const srcu_size_state_name[] = {
1846 "SRCU_SIZE_SMALL",
1847 "SRCU_SIZE_ALLOC",
1848 "SRCU_SIZE_WAIT_BARRIER",
1849 "SRCU_SIZE_WAIT_CALL",
1850 "SRCU_SIZE_WAIT_CBS1",
1851 "SRCU_SIZE_WAIT_CBS2",
1852 "SRCU_SIZE_WAIT_CBS3",
1853 "SRCU_SIZE_WAIT_CBS4",
1854 "SRCU_SIZE_BIG",
1855 "SRCU_SIZE_???",
1856 };
1857
srcu_torture_stats_print(struct srcu_struct * ssp,char * tt,char * tf)1858 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1859 {
1860 int cpu;
1861 int idx;
1862 unsigned long s0 = 0, s1 = 0;
1863 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state);
1864 int ss_state_idx = ss_state;
1865
1866 idx = ssp->srcu_idx & 0x1;
1867 if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1868 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1869 pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1870 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state,
1871 srcu_size_state_name[ss_state_idx]);
1872 if (!ssp->sda) {
1873 // Called after cleanup_srcu_struct(), perhaps.
1874 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1875 } else {
1876 pr_cont(" per-CPU(idx=%d):", idx);
1877 for_each_possible_cpu(cpu) {
1878 unsigned long l0, l1;
1879 unsigned long u0, u1;
1880 long c0, c1;
1881 struct srcu_data *sdp;
1882
1883 sdp = per_cpu_ptr(ssp->sda, cpu);
1884 u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx]));
1885 u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx]));
1886
1887 /*
1888 * Make sure that a lock is always counted if the corresponding
1889 * unlock is counted.
1890 */
1891 smp_rmb();
1892
1893 l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx]));
1894 l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx]));
1895
1896 c0 = l0 - u0;
1897 c1 = l1 - u1;
1898 pr_cont(" %d(%ld,%ld %c)",
1899 cpu, c0, c1,
1900 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1901 s0 += c0;
1902 s1 += c1;
1903 }
1904 pr_cont(" T(%ld,%ld)\n", s0, s1);
1905 }
1906 if (SRCU_SIZING_IS_TORTURE())
1907 srcu_transition_to_big(ssp);
1908 }
1909 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1910
srcu_bootup_announce(void)1911 static int __init srcu_bootup_announce(void)
1912 {
1913 pr_info("Hierarchical SRCU implementation.\n");
1914 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1915 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1916 if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1917 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1918 if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1919 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1920 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1921 return 0;
1922 }
1923 early_initcall(srcu_bootup_announce);
1924
srcu_init(void)1925 void __init srcu_init(void)
1926 {
1927 struct srcu_usage *sup;
1928
1929 /* Decide on srcu_struct-size strategy. */
1930 if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1931 if (nr_cpu_ids >= big_cpu_lim) {
1932 convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1933 pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1934 } else {
1935 convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1936 pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1937 }
1938 }
1939
1940 /*
1941 * Once that is set, call_srcu() can follow the normal path and
1942 * queue delayed work. This must follow RCU workqueues creation
1943 * and timers initialization.
1944 */
1945 srcu_init_done = true;
1946 while (!list_empty(&srcu_boot_list)) {
1947 sup = list_first_entry(&srcu_boot_list, struct srcu_usage,
1948 work.work.entry);
1949 list_del_init(&sup->work.work.entry);
1950 if (SRCU_SIZING_IS(SRCU_SIZING_INIT) &&
1951 sup->srcu_size_state == SRCU_SIZE_SMALL)
1952 sup->srcu_size_state = SRCU_SIZE_ALLOC;
1953 queue_work(rcu_gp_wq, &sup->work.work);
1954 }
1955 }
1956
1957 #ifdef CONFIG_MODULES
1958
1959 /* Initialize any global-scope srcu_struct structures used by this module. */
srcu_module_coming(struct module * mod)1960 static int srcu_module_coming(struct module *mod)
1961 {
1962 int i;
1963 struct srcu_struct *ssp;
1964 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1965
1966 for (i = 0; i < mod->num_srcu_structs; i++) {
1967 ssp = *(sspp++);
1968 ssp->sda = alloc_percpu(struct srcu_data);
1969 if (WARN_ON_ONCE(!ssp->sda))
1970 return -ENOMEM;
1971 }
1972 return 0;
1973 }
1974
1975 /* Clean up any global-scope srcu_struct structures used by this module. */
srcu_module_going(struct module * mod)1976 static void srcu_module_going(struct module *mod)
1977 {
1978 int i;
1979 struct srcu_struct *ssp;
1980 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1981
1982 for (i = 0; i < mod->num_srcu_structs; i++) {
1983 ssp = *(sspp++);
1984 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) &&
1985 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static))
1986 cleanup_srcu_struct(ssp);
1987 if (!WARN_ON(srcu_readers_active(ssp)))
1988 free_percpu(ssp->sda);
1989 }
1990 }
1991
1992 /* Handle one module, either coming or going. */
srcu_module_notify(struct notifier_block * self,unsigned long val,void * data)1993 static int srcu_module_notify(struct notifier_block *self,
1994 unsigned long val, void *data)
1995 {
1996 struct module *mod = data;
1997 int ret = 0;
1998
1999 switch (val) {
2000 case MODULE_STATE_COMING:
2001 ret = srcu_module_coming(mod);
2002 break;
2003 case MODULE_STATE_GOING:
2004 srcu_module_going(mod);
2005 break;
2006 default:
2007 break;
2008 }
2009 return ret;
2010 }
2011
2012 static struct notifier_block srcu_module_nb = {
2013 .notifier_call = srcu_module_notify,
2014 .priority = 0,
2015 };
2016
init_srcu_module_notifier(void)2017 static __init int init_srcu_module_notifier(void)
2018 {
2019 int ret;
2020
2021 ret = register_module_notifier(&srcu_module_nb);
2022 if (ret)
2023 pr_warn("Failed to register srcu module notifier\n");
2024 return ret;
2025 }
2026 late_initcall(init_srcu_module_notifier);
2027
2028 #endif /* #ifdef CONFIG_MODULES */
2029