xref: /linux/kernel/rcu/srcutree.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Sleepable Read-Copy Update mechanism for mutual exclusion.
4  *
5  * Copyright (C) IBM Corporation, 2006
6  * Copyright (C) Fujitsu, 2012
7  *
8  * Authors: Paul McKenney <paulmck@linux.ibm.com>
9  *	   Lai Jiangshan <laijs@cn.fujitsu.com>
10  *
11  * For detailed explanation of Read-Copy Update mechanism see -
12  *		Documentation/RCU/ *.txt
13  *
14  */
15 
16 #define pr_fmt(fmt) "rcu: " fmt
17 
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/srcu.h>
29 
30 #include "rcu.h"
31 #include "rcu_segcblist.h"
32 
33 /* Holdoff in nanoseconds for auto-expediting. */
34 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
35 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
36 module_param(exp_holdoff, ulong, 0444);
37 
38 /* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
39 static ulong counter_wrap_check = (ULONG_MAX >> 2);
40 module_param(counter_wrap_check, ulong, 0444);
41 
42 /*
43  * Control conversion to SRCU_SIZE_BIG:
44  *    0: Don't convert at all.
45  *    1: Convert at init_srcu_struct() time.
46  *    2: Convert when rcutorture invokes srcu_torture_stats_print().
47  *    3: Decide at boot time based on system shape (default).
48  * 0x1x: Convert when excessive contention encountered.
49  */
50 #define SRCU_SIZING_NONE	0
51 #define SRCU_SIZING_INIT	1
52 #define SRCU_SIZING_TORTURE	2
53 #define SRCU_SIZING_AUTO	3
54 #define SRCU_SIZING_CONTEND	0x10
55 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
56 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
57 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
58 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
59 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60 static int convert_to_big = SRCU_SIZING_AUTO;
61 module_param(convert_to_big, int, 0444);
62 
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64 static int big_cpu_lim __read_mostly = 128;
65 module_param(big_cpu_lim, int, 0444);
66 
67 /* Contention events per jiffy to initiate transition to big. */
68 static int small_contention_lim __read_mostly = 100;
69 module_param(small_contention_lim, int, 0444);
70 
71 /* Early-boot callback-management, so early that no lock is required! */
72 static LIST_HEAD(srcu_boot_list);
73 static bool __read_mostly srcu_init_done;
74 
75 static void srcu_invoke_callbacks(struct work_struct *work);
76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
77 static void process_srcu(struct work_struct *work);
78 static void srcu_delay_timer(struct timer_list *t);
79 
80 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81 #define spin_lock_rcu_node(p)							\
82 do {										\
83 	spin_lock(&ACCESS_PRIVATE(p, lock));					\
84 	smp_mb__after_unlock_lock();						\
85 } while (0)
86 
87 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
88 
89 #define spin_lock_irq_rcu_node(p)						\
90 do {										\
91 	spin_lock_irq(&ACCESS_PRIVATE(p, lock));				\
92 	smp_mb__after_unlock_lock();						\
93 } while (0)
94 
95 #define spin_unlock_irq_rcu_node(p)						\
96 	spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
97 
98 #define spin_lock_irqsave_rcu_node(p, flags)					\
99 do {										\
100 	spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);			\
101 	smp_mb__after_unlock_lock();						\
102 } while (0)
103 
104 #define spin_trylock_irqsave_rcu_node(p, flags)					\
105 ({										\
106 	bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
107 										\
108 	if (___locked)								\
109 		smp_mb__after_unlock_lock();					\
110 	___locked;								\
111 })
112 
113 #define spin_unlock_irqrestore_rcu_node(p, flags)				\
114 	spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)			\
115 
116 /*
117  * Initialize SRCU per-CPU data.  Note that statically allocated
118  * srcu_struct structures might already have srcu_read_lock() and
119  * srcu_read_unlock() running against them.  So if the is_static parameter
120  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
121  */
122 static void init_srcu_struct_data(struct srcu_struct *ssp)
123 {
124 	int cpu;
125 	struct srcu_data *sdp;
126 
127 	/*
128 	 * Initialize the per-CPU srcu_data array, which feeds into the
129 	 * leaves of the srcu_node tree.
130 	 */
131 	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
132 		     ARRAY_SIZE(sdp->srcu_unlock_count));
133 	for_each_possible_cpu(cpu) {
134 		sdp = per_cpu_ptr(ssp->sda, cpu);
135 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 		rcu_segcblist_init(&sdp->srcu_cblist);
137 		sdp->srcu_cblist_invoking = false;
138 		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
139 		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
140 		sdp->mynode = NULL;
141 		sdp->cpu = cpu;
142 		INIT_WORK(&sdp->work, srcu_invoke_callbacks);
143 		timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
144 		sdp->ssp = ssp;
145 	}
146 }
147 
148 /* Invalid seq state, used during snp node initialization */
149 #define SRCU_SNP_INIT_SEQ		0x2
150 
151 /*
152  * Check whether sequence number corresponding to snp node,
153  * is invalid.
154  */
155 static inline bool srcu_invl_snp_seq(unsigned long s)
156 {
157 	return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
158 }
159 
160 /*
161  * Allocated and initialize SRCU combining tree.  Returns @true if
162  * allocation succeeded and @false otherwise.
163  */
164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
165 {
166 	int cpu;
167 	int i;
168 	int level = 0;
169 	int levelspread[RCU_NUM_LVLS];
170 	struct srcu_data *sdp;
171 	struct srcu_node *snp;
172 	struct srcu_node *snp_first;
173 
174 	/* Initialize geometry if it has not already been initialized. */
175 	rcu_init_geometry();
176 	ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
177 	if (!ssp->node)
178 		return false;
179 
180 	/* Work out the overall tree geometry. */
181 	ssp->level[0] = &ssp->node[0];
182 	for (i = 1; i < rcu_num_lvls; i++)
183 		ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
184 	rcu_init_levelspread(levelspread, num_rcu_lvl);
185 
186 	/* Each pass through this loop initializes one srcu_node structure. */
187 	srcu_for_each_node_breadth_first(ssp, snp) {
188 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
191 		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192 			snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193 			snp->srcu_data_have_cbs[i] = 0;
194 		}
195 		snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
196 		snp->grplo = -1;
197 		snp->grphi = -1;
198 		if (snp == &ssp->node[0]) {
199 			/* Root node, special case. */
200 			snp->srcu_parent = NULL;
201 			continue;
202 		}
203 
204 		/* Non-root node. */
205 		if (snp == ssp->level[level + 1])
206 			level++;
207 		snp->srcu_parent = ssp->level[level - 1] +
208 				   (snp - ssp->level[level]) /
209 				   levelspread[level - 1];
210 	}
211 
212 	/*
213 	 * Initialize the per-CPU srcu_data array, which feeds into the
214 	 * leaves of the srcu_node tree.
215 	 */
216 	level = rcu_num_lvls - 1;
217 	snp_first = ssp->level[level];
218 	for_each_possible_cpu(cpu) {
219 		sdp = per_cpu_ptr(ssp->sda, cpu);
220 		sdp->mynode = &snp_first[cpu / levelspread[level]];
221 		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
222 			if (snp->grplo < 0)
223 				snp->grplo = cpu;
224 			snp->grphi = cpu;
225 		}
226 		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
227 	}
228 	smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
229 	return true;
230 }
231 
232 /*
233  * Initialize non-compile-time initialized fields, including the
234  * associated srcu_node and srcu_data structures.  The is_static parameter
235  * tells us that ->sda has already been wired up to srcu_data.
236  */
237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
238 {
239 	ssp->srcu_size_state = SRCU_SIZE_SMALL;
240 	ssp->node = NULL;
241 	mutex_init(&ssp->srcu_cb_mutex);
242 	mutex_init(&ssp->srcu_gp_mutex);
243 	ssp->srcu_idx = 0;
244 	ssp->srcu_gp_seq = 0;
245 	ssp->srcu_barrier_seq = 0;
246 	mutex_init(&ssp->srcu_barrier_mutex);
247 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
248 	INIT_DELAYED_WORK(&ssp->work, process_srcu);
249 	ssp->sda_is_static = is_static;
250 	if (!is_static)
251 		ssp->sda = alloc_percpu(struct srcu_data);
252 	if (!ssp->sda)
253 		return -ENOMEM;
254 	init_srcu_struct_data(ssp);
255 	ssp->srcu_gp_seq_needed_exp = 0;
256 	ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
257 	if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
258 		if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
259 			if (!ssp->sda_is_static) {
260 				free_percpu(ssp->sda);
261 				ssp->sda = NULL;
262 				return -ENOMEM;
263 			}
264 		} else {
265 			WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
266 		}
267 	}
268 	smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
269 	return 0;
270 }
271 
272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
273 
274 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
275 		       struct lock_class_key *key)
276 {
277 	/* Don't re-initialize a lock while it is held. */
278 	debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
279 	lockdep_init_map(&ssp->dep_map, name, key, 0);
280 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
281 	return init_srcu_struct_fields(ssp, false);
282 }
283 EXPORT_SYMBOL_GPL(__init_srcu_struct);
284 
285 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
286 
287 /**
288  * init_srcu_struct - initialize a sleep-RCU structure
289  * @ssp: structure to initialize.
290  *
291  * Must invoke this on a given srcu_struct before passing that srcu_struct
292  * to any other function.  Each srcu_struct represents a separate domain
293  * of SRCU protection.
294  */
295 int init_srcu_struct(struct srcu_struct *ssp)
296 {
297 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
298 	return init_srcu_struct_fields(ssp, false);
299 }
300 EXPORT_SYMBOL_GPL(init_srcu_struct);
301 
302 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
303 
304 /*
305  * Initiate a transition to SRCU_SIZE_BIG with lock held.
306  */
307 static void __srcu_transition_to_big(struct srcu_struct *ssp)
308 {
309 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
310 	smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
311 }
312 
313 /*
314  * Initiate an idempotent transition to SRCU_SIZE_BIG.
315  */
316 static void srcu_transition_to_big(struct srcu_struct *ssp)
317 {
318 	unsigned long flags;
319 
320 	/* Double-checked locking on ->srcu_size-state. */
321 	if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
322 		return;
323 	spin_lock_irqsave_rcu_node(ssp, flags);
324 	if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
325 		spin_unlock_irqrestore_rcu_node(ssp, flags);
326 		return;
327 	}
328 	__srcu_transition_to_big(ssp);
329 	spin_unlock_irqrestore_rcu_node(ssp, flags);
330 }
331 
332 /*
333  * Check to see if the just-encountered contention event justifies
334  * a transition to SRCU_SIZE_BIG.
335  */
336 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
337 {
338 	unsigned long j;
339 
340 	if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
341 		return;
342 	j = jiffies;
343 	if (ssp->srcu_size_jiffies != j) {
344 		ssp->srcu_size_jiffies = j;
345 		ssp->srcu_n_lock_retries = 0;
346 	}
347 	if (++ssp->srcu_n_lock_retries <= small_contention_lim)
348 		return;
349 	__srcu_transition_to_big(ssp);
350 }
351 
352 /*
353  * Acquire the specified srcu_data structure's ->lock, but check for
354  * excessive contention, which results in initiation of a transition
355  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
356  * parameter permits this.
357  */
358 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
359 {
360 	struct srcu_struct *ssp = sdp->ssp;
361 
362 	if (spin_trylock_irqsave_rcu_node(sdp, *flags))
363 		return;
364 	spin_lock_irqsave_rcu_node(ssp, *flags);
365 	spin_lock_irqsave_check_contention(ssp);
366 	spin_unlock_irqrestore_rcu_node(ssp, *flags);
367 	spin_lock_irqsave_rcu_node(sdp, *flags);
368 }
369 
370 /*
371  * Acquire the specified srcu_struct structure's ->lock, but check for
372  * excessive contention, which results in initiation of a transition
373  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
374  * parameter permits this.
375  */
376 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
377 {
378 	if (spin_trylock_irqsave_rcu_node(ssp, *flags))
379 		return;
380 	spin_lock_irqsave_rcu_node(ssp, *flags);
381 	spin_lock_irqsave_check_contention(ssp);
382 }
383 
384 /*
385  * First-use initialization of statically allocated srcu_struct
386  * structure.  Wiring up the combining tree is more than can be
387  * done with compile-time initialization, so this check is added
388  * to each update-side SRCU primitive.  Use ssp->lock, which -is-
389  * compile-time initialized, to resolve races involving multiple
390  * CPUs trying to garner first-use privileges.
391  */
392 static void check_init_srcu_struct(struct srcu_struct *ssp)
393 {
394 	unsigned long flags;
395 
396 	/* The smp_load_acquire() pairs with the smp_store_release(). */
397 	if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
398 		return; /* Already initialized. */
399 	spin_lock_irqsave_rcu_node(ssp, flags);
400 	if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
401 		spin_unlock_irqrestore_rcu_node(ssp, flags);
402 		return;
403 	}
404 	init_srcu_struct_fields(ssp, true);
405 	spin_unlock_irqrestore_rcu_node(ssp, flags);
406 }
407 
408 /*
409  * Returns approximate total of the readers' ->srcu_lock_count[] values
410  * for the rank of per-CPU counters specified by idx.
411  */
412 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
413 {
414 	int cpu;
415 	unsigned long sum = 0;
416 
417 	for_each_possible_cpu(cpu) {
418 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
419 
420 		sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
421 	}
422 	return sum;
423 }
424 
425 /*
426  * Returns approximate total of the readers' ->srcu_unlock_count[] values
427  * for the rank of per-CPU counters specified by idx.
428  */
429 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
430 {
431 	int cpu;
432 	unsigned long sum = 0;
433 
434 	for_each_possible_cpu(cpu) {
435 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
436 
437 		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
438 	}
439 	return sum;
440 }
441 
442 /*
443  * Return true if the number of pre-existing readers is determined to
444  * be zero.
445  */
446 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
447 {
448 	unsigned long unlocks;
449 
450 	unlocks = srcu_readers_unlock_idx(ssp, idx);
451 
452 	/*
453 	 * Make sure that a lock is always counted if the corresponding
454 	 * unlock is counted. Needs to be a smp_mb() as the read side may
455 	 * contain a read from a variable that is written to before the
456 	 * synchronize_srcu() in the write side. In this case smp_mb()s
457 	 * A and B act like the store buffering pattern.
458 	 *
459 	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
460 	 * after the synchronize_srcu() from being executed before the
461 	 * grace period ends.
462 	 */
463 	smp_mb(); /* A */
464 
465 	/*
466 	 * If the locks are the same as the unlocks, then there must have
467 	 * been no readers on this index at some time in between. This does
468 	 * not mean that there are no more readers, as one could have read
469 	 * the current index but not have incremented the lock counter yet.
470 	 *
471 	 * So suppose that the updater is preempted here for so long
472 	 * that more than ULONG_MAX non-nested readers come and go in
473 	 * the meantime.  It turns out that this cannot result in overflow
474 	 * because if a reader modifies its unlock count after we read it
475 	 * above, then that reader's next load of ->srcu_idx is guaranteed
476 	 * to get the new value, which will cause it to operate on the
477 	 * other bank of counters, where it cannot contribute to the
478 	 * overflow of these counters.  This means that there is a maximum
479 	 * of 2*NR_CPUS increments, which cannot overflow given current
480 	 * systems, especially not on 64-bit systems.
481 	 *
482 	 * OK, how about nesting?  This does impose a limit on nesting
483 	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
484 	 * especially on 64-bit systems.
485 	 */
486 	return srcu_readers_lock_idx(ssp, idx) == unlocks;
487 }
488 
489 /**
490  * srcu_readers_active - returns true if there are readers. and false
491  *                       otherwise
492  * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
493  *
494  * Note that this is not an atomic primitive, and can therefore suffer
495  * severe errors when invoked on an active srcu_struct.  That said, it
496  * can be useful as an error check at cleanup time.
497  */
498 static bool srcu_readers_active(struct srcu_struct *ssp)
499 {
500 	int cpu;
501 	unsigned long sum = 0;
502 
503 	for_each_possible_cpu(cpu) {
504 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
505 
506 		sum += READ_ONCE(cpuc->srcu_lock_count[0]);
507 		sum += READ_ONCE(cpuc->srcu_lock_count[1]);
508 		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
509 		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
510 	}
511 	return sum;
512 }
513 
514 #define SRCU_INTERVAL		1	// Base delay if no expedited GPs pending.
515 #define SRCU_MAX_INTERVAL	10	// Maximum incremental delay from slow readers.
516 #define SRCU_MAX_NODELAY_PHASE	1	// Maximum per-GP-phase consecutive no-delay instances.
517 #define SRCU_MAX_NODELAY	100	// Maximum consecutive no-delay instances.
518 
519 /*
520  * Return grace-period delay, zero if there are expedited grace
521  * periods pending, SRCU_INTERVAL otherwise.
522  */
523 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
524 {
525 	unsigned long jbase = SRCU_INTERVAL;
526 
527 	if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
528 		jbase = 0;
529 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)))
530 		jbase += jiffies - READ_ONCE(ssp->srcu_gp_start);
531 	if (!jbase) {
532 		WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
533 		if (READ_ONCE(ssp->srcu_n_exp_nodelay) > SRCU_MAX_NODELAY_PHASE)
534 			jbase = 1;
535 	}
536 	return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
537 }
538 
539 /**
540  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
541  * @ssp: structure to clean up.
542  *
543  * Must invoke this after you are finished using a given srcu_struct that
544  * was initialized via init_srcu_struct(), else you leak memory.
545  */
546 void cleanup_srcu_struct(struct srcu_struct *ssp)
547 {
548 	int cpu;
549 
550 	if (WARN_ON(!srcu_get_delay(ssp)))
551 		return; /* Just leak it! */
552 	if (WARN_ON(srcu_readers_active(ssp)))
553 		return; /* Just leak it! */
554 	flush_delayed_work(&ssp->work);
555 	for_each_possible_cpu(cpu) {
556 		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
557 
558 		del_timer_sync(&sdp->delay_work);
559 		flush_work(&sdp->work);
560 		if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
561 			return; /* Forgot srcu_barrier(), so just leak it! */
562 	}
563 	if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
564 	    WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
565 	    WARN_ON(srcu_readers_active(ssp))) {
566 		pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
567 			__func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
568 			rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
569 		return; /* Caller forgot to stop doing call_srcu()? */
570 	}
571 	if (!ssp->sda_is_static) {
572 		free_percpu(ssp->sda);
573 		ssp->sda = NULL;
574 	}
575 	kfree(ssp->node);
576 	ssp->node = NULL;
577 	ssp->srcu_size_state = SRCU_SIZE_SMALL;
578 }
579 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
580 
581 /*
582  * Counts the new reader in the appropriate per-CPU element of the
583  * srcu_struct.
584  * Returns an index that must be passed to the matching srcu_read_unlock().
585  */
586 int __srcu_read_lock(struct srcu_struct *ssp)
587 {
588 	int idx;
589 
590 	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
591 	this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
592 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
593 	return idx;
594 }
595 EXPORT_SYMBOL_GPL(__srcu_read_lock);
596 
597 /*
598  * Removes the count for the old reader from the appropriate per-CPU
599  * element of the srcu_struct.  Note that this may well be a different
600  * CPU than that which was incremented by the corresponding srcu_read_lock().
601  */
602 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
603 {
604 	smp_mb(); /* C */  /* Avoid leaking the critical section. */
605 	this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
606 }
607 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
608 
609 /*
610  * We use an adaptive strategy for synchronize_srcu() and especially for
611  * synchronize_srcu_expedited().  We spin for a fixed time period
612  * (defined below) to allow SRCU readers to exit their read-side critical
613  * sections.  If there are still some readers after a few microseconds,
614  * we repeatedly block for 1-millisecond time periods.
615  */
616 #define SRCU_RETRY_CHECK_DELAY		5
617 
618 /*
619  * Start an SRCU grace period.
620  */
621 static void srcu_gp_start(struct srcu_struct *ssp)
622 {
623 	struct srcu_data *sdp;
624 	int state;
625 
626 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
627 		sdp = per_cpu_ptr(ssp->sda, 0);
628 	else
629 		sdp = this_cpu_ptr(ssp->sda);
630 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
631 	WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
632 	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
633 	rcu_segcblist_advance(&sdp->srcu_cblist,
634 			      rcu_seq_current(&ssp->srcu_gp_seq));
635 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
636 				       rcu_seq_snap(&ssp->srcu_gp_seq));
637 	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
638 	WRITE_ONCE(ssp->srcu_gp_start, jiffies);
639 	WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
640 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
641 	rcu_seq_start(&ssp->srcu_gp_seq);
642 	state = rcu_seq_state(ssp->srcu_gp_seq);
643 	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
644 }
645 
646 
647 static void srcu_delay_timer(struct timer_list *t)
648 {
649 	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
650 
651 	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
652 }
653 
654 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
655 				       unsigned long delay)
656 {
657 	if (!delay) {
658 		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
659 		return;
660 	}
661 
662 	timer_reduce(&sdp->delay_work, jiffies + delay);
663 }
664 
665 /*
666  * Schedule callback invocation for the specified srcu_data structure,
667  * if possible, on the corresponding CPU.
668  */
669 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
670 {
671 	srcu_queue_delayed_work_on(sdp, delay);
672 }
673 
674 /*
675  * Schedule callback invocation for all srcu_data structures associated
676  * with the specified srcu_node structure that have callbacks for the
677  * just-completed grace period, the one corresponding to idx.  If possible,
678  * schedule this invocation on the corresponding CPUs.
679  */
680 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
681 				  unsigned long mask, unsigned long delay)
682 {
683 	int cpu;
684 
685 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
686 		if (!(mask & (1 << (cpu - snp->grplo))))
687 			continue;
688 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
689 	}
690 }
691 
692 /*
693  * Note the end of an SRCU grace period.  Initiates callback invocation
694  * and starts a new grace period if needed.
695  *
696  * The ->srcu_cb_mutex acquisition does not protect any data, but
697  * instead prevents more than one grace period from starting while we
698  * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
699  * array to have a finite number of elements.
700  */
701 static void srcu_gp_end(struct srcu_struct *ssp)
702 {
703 	unsigned long cbdelay;
704 	bool cbs;
705 	bool last_lvl;
706 	int cpu;
707 	unsigned long flags;
708 	unsigned long gpseq;
709 	int idx;
710 	unsigned long mask;
711 	struct srcu_data *sdp;
712 	unsigned long sgsne;
713 	struct srcu_node *snp;
714 	int ss_state;
715 
716 	/* Prevent more than one additional grace period. */
717 	mutex_lock(&ssp->srcu_cb_mutex);
718 
719 	/* End the current grace period. */
720 	spin_lock_irq_rcu_node(ssp);
721 	idx = rcu_seq_state(ssp->srcu_gp_seq);
722 	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
723 	cbdelay = !!srcu_get_delay(ssp);
724 	WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
725 	rcu_seq_end(&ssp->srcu_gp_seq);
726 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
727 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
728 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
729 	spin_unlock_irq_rcu_node(ssp);
730 	mutex_unlock(&ssp->srcu_gp_mutex);
731 	/* A new grace period can start at this point.  But only one. */
732 
733 	/* Initiate callback invocation as needed. */
734 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
735 	if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
736 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
737 	} else {
738 		idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
739 		srcu_for_each_node_breadth_first(ssp, snp) {
740 			spin_lock_irq_rcu_node(snp);
741 			cbs = false;
742 			last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
743 			if (last_lvl)
744 				cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
745 			snp->srcu_have_cbs[idx] = gpseq;
746 			rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
747 			sgsne = snp->srcu_gp_seq_needed_exp;
748 			if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
749 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
750 			if (ss_state < SRCU_SIZE_BIG)
751 				mask = ~0;
752 			else
753 				mask = snp->srcu_data_have_cbs[idx];
754 			snp->srcu_data_have_cbs[idx] = 0;
755 			spin_unlock_irq_rcu_node(snp);
756 			if (cbs)
757 				srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
758 		}
759 	}
760 
761 	/* Occasionally prevent srcu_data counter wrap. */
762 	if (!(gpseq & counter_wrap_check))
763 		for_each_possible_cpu(cpu) {
764 			sdp = per_cpu_ptr(ssp->sda, cpu);
765 			spin_lock_irqsave_rcu_node(sdp, flags);
766 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
767 				sdp->srcu_gp_seq_needed = gpseq;
768 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
769 				sdp->srcu_gp_seq_needed_exp = gpseq;
770 			spin_unlock_irqrestore_rcu_node(sdp, flags);
771 		}
772 
773 	/* Callback initiation done, allow grace periods after next. */
774 	mutex_unlock(&ssp->srcu_cb_mutex);
775 
776 	/* Start a new grace period if needed. */
777 	spin_lock_irq_rcu_node(ssp);
778 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
779 	if (!rcu_seq_state(gpseq) &&
780 	    ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
781 		srcu_gp_start(ssp);
782 		spin_unlock_irq_rcu_node(ssp);
783 		srcu_reschedule(ssp, 0);
784 	} else {
785 		spin_unlock_irq_rcu_node(ssp);
786 	}
787 
788 	/* Transition to big if needed. */
789 	if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
790 		if (ss_state == SRCU_SIZE_ALLOC)
791 			init_srcu_struct_nodes(ssp, GFP_KERNEL);
792 		else
793 			smp_store_release(&ssp->srcu_size_state, ss_state + 1);
794 	}
795 }
796 
797 /*
798  * Funnel-locking scheme to scalably mediate many concurrent expedited
799  * grace-period requests.  This function is invoked for the first known
800  * expedited request for a grace period that has already been requested,
801  * but without expediting.  To start a completely new grace period,
802  * whether expedited or not, use srcu_funnel_gp_start() instead.
803  */
804 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
805 				  unsigned long s)
806 {
807 	unsigned long flags;
808 	unsigned long sgsne;
809 
810 	if (snp)
811 		for (; snp != NULL; snp = snp->srcu_parent) {
812 			sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
813 			if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
814 			    (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
815 				return;
816 			spin_lock_irqsave_rcu_node(snp, flags);
817 			sgsne = snp->srcu_gp_seq_needed_exp;
818 			if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
819 				spin_unlock_irqrestore_rcu_node(snp, flags);
820 				return;
821 			}
822 			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
823 			spin_unlock_irqrestore_rcu_node(snp, flags);
824 		}
825 	spin_lock_irqsave_ssp_contention(ssp, &flags);
826 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
827 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
828 	spin_unlock_irqrestore_rcu_node(ssp, flags);
829 }
830 
831 /*
832  * Funnel-locking scheme to scalably mediate many concurrent grace-period
833  * requests.  The winner has to do the work of actually starting grace
834  * period s.  Losers must either ensure that their desired grace-period
835  * number is recorded on at least their leaf srcu_node structure, or they
836  * must take steps to invoke their own callbacks.
837  *
838  * Note that this function also does the work of srcu_funnel_exp_start(),
839  * in some cases by directly invoking it.
840  */
841 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
842 				 unsigned long s, bool do_norm)
843 {
844 	unsigned long flags;
845 	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
846 	unsigned long sgsne;
847 	struct srcu_node *snp;
848 	struct srcu_node *snp_leaf;
849 	unsigned long snp_seq;
850 
851 	/* Ensure that snp node tree is fully initialized before traversing it */
852 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
853 		snp_leaf = NULL;
854 	else
855 		snp_leaf = sdp->mynode;
856 
857 	if (snp_leaf)
858 		/* Each pass through the loop does one level of the srcu_node tree. */
859 		for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
860 			if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
861 				return; /* GP already done and CBs recorded. */
862 			spin_lock_irqsave_rcu_node(snp, flags);
863 			snp_seq = snp->srcu_have_cbs[idx];
864 			if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
865 				if (snp == snp_leaf && snp_seq == s)
866 					snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
867 				spin_unlock_irqrestore_rcu_node(snp, flags);
868 				if (snp == snp_leaf && snp_seq != s) {
869 					srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
870 					return;
871 				}
872 				if (!do_norm)
873 					srcu_funnel_exp_start(ssp, snp, s);
874 				return;
875 			}
876 			snp->srcu_have_cbs[idx] = s;
877 			if (snp == snp_leaf)
878 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
879 			sgsne = snp->srcu_gp_seq_needed_exp;
880 			if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
881 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
882 			spin_unlock_irqrestore_rcu_node(snp, flags);
883 		}
884 
885 	/* Top of tree, must ensure the grace period will be started. */
886 	spin_lock_irqsave_ssp_contention(ssp, &flags);
887 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
888 		/*
889 		 * Record need for grace period s.  Pair with load
890 		 * acquire setting up for initialization.
891 		 */
892 		smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
893 	}
894 	if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
895 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
896 
897 	/* If grace period not already done and none in progress, start it. */
898 	if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
899 	    rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
900 		WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
901 		srcu_gp_start(ssp);
902 
903 		// And how can that list_add() in the "else" clause
904 		// possibly be safe for concurrent execution?  Well,
905 		// it isn't.  And it does not have to be.  After all, it
906 		// can only be executed during early boot when there is only
907 		// the one boot CPU running with interrupts still disabled.
908 		if (likely(srcu_init_done))
909 			queue_delayed_work(rcu_gp_wq, &ssp->work,
910 					   !!srcu_get_delay(ssp));
911 		else if (list_empty(&ssp->work.work.entry))
912 			list_add(&ssp->work.work.entry, &srcu_boot_list);
913 	}
914 	spin_unlock_irqrestore_rcu_node(ssp, flags);
915 }
916 
917 /*
918  * Wait until all readers counted by array index idx complete, but
919  * loop an additional time if there is an expedited grace period pending.
920  * The caller must ensure that ->srcu_idx is not changed while checking.
921  */
922 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
923 {
924 	for (;;) {
925 		if (srcu_readers_active_idx_check(ssp, idx))
926 			return true;
927 		if (--trycount + !srcu_get_delay(ssp) <= 0)
928 			return false;
929 		udelay(SRCU_RETRY_CHECK_DELAY);
930 	}
931 }
932 
933 /*
934  * Increment the ->srcu_idx counter so that future SRCU readers will
935  * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
936  * us to wait for pre-existing readers in a starvation-free manner.
937  */
938 static void srcu_flip(struct srcu_struct *ssp)
939 {
940 	/*
941 	 * Ensure that if this updater saw a given reader's increment
942 	 * from __srcu_read_lock(), that reader was using an old value
943 	 * of ->srcu_idx.  Also ensure that if a given reader sees the
944 	 * new value of ->srcu_idx, this updater's earlier scans cannot
945 	 * have seen that reader's increments (which is OK, because this
946 	 * grace period need not wait on that reader).
947 	 */
948 	smp_mb(); /* E */  /* Pairs with B and C. */
949 
950 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
951 
952 	/*
953 	 * Ensure that if the updater misses an __srcu_read_unlock()
954 	 * increment, that task's next __srcu_read_lock() will see the
955 	 * above counter update.  Note that both this memory barrier
956 	 * and the one in srcu_readers_active_idx_check() provide the
957 	 * guarantee for __srcu_read_lock().
958 	 */
959 	smp_mb(); /* D */  /* Pairs with C. */
960 }
961 
962 /*
963  * If SRCU is likely idle, return true, otherwise return false.
964  *
965  * Note that it is OK for several current from-idle requests for a new
966  * grace period from idle to specify expediting because they will all end
967  * up requesting the same grace period anyhow.  So no loss.
968  *
969  * Note also that if any CPU (including the current one) is still invoking
970  * callbacks, this function will nevertheless say "idle".  This is not
971  * ideal, but the overhead of checking all CPUs' callback lists is even
972  * less ideal, especially on large systems.  Furthermore, the wakeup
973  * can happen before the callback is fully removed, so we have no choice
974  * but to accept this type of error.
975  *
976  * This function is also subject to counter-wrap errors, but let's face
977  * it, if this function was preempted for enough time for the counters
978  * to wrap, it really doesn't matter whether or not we expedite the grace
979  * period.  The extra overhead of a needlessly expedited grace period is
980  * negligible when amortized over that time period, and the extra latency
981  * of a needlessly non-expedited grace period is similarly negligible.
982  */
983 static bool srcu_might_be_idle(struct srcu_struct *ssp)
984 {
985 	unsigned long curseq;
986 	unsigned long flags;
987 	struct srcu_data *sdp;
988 	unsigned long t;
989 	unsigned long tlast;
990 
991 	check_init_srcu_struct(ssp);
992 	/* If the local srcu_data structure has callbacks, not idle.  */
993 	sdp = raw_cpu_ptr(ssp->sda);
994 	spin_lock_irqsave_rcu_node(sdp, flags);
995 	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
996 		spin_unlock_irqrestore_rcu_node(sdp, flags);
997 		return false; /* Callbacks already present, so not idle. */
998 	}
999 	spin_unlock_irqrestore_rcu_node(sdp, flags);
1000 
1001 	/*
1002 	 * No local callbacks, so probabilistically probe global state.
1003 	 * Exact information would require acquiring locks, which would
1004 	 * kill scalability, hence the probabilistic nature of the probe.
1005 	 */
1006 
1007 	/* First, see if enough time has passed since the last GP. */
1008 	t = ktime_get_mono_fast_ns();
1009 	tlast = READ_ONCE(ssp->srcu_last_gp_end);
1010 	if (exp_holdoff == 0 ||
1011 	    time_in_range_open(t, tlast, tlast + exp_holdoff))
1012 		return false; /* Too soon after last GP. */
1013 
1014 	/* Next, check for probable idleness. */
1015 	curseq = rcu_seq_current(&ssp->srcu_gp_seq);
1016 	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1017 	if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
1018 		return false; /* Grace period in progress, so not idle. */
1019 	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1020 	if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
1021 		return false; /* GP # changed, so not idle. */
1022 	return true; /* With reasonable probability, idle! */
1023 }
1024 
1025 /*
1026  * SRCU callback function to leak a callback.
1027  */
1028 static void srcu_leak_callback(struct rcu_head *rhp)
1029 {
1030 }
1031 
1032 /*
1033  * Start an SRCU grace period, and also queue the callback if non-NULL.
1034  */
1035 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1036 					     struct rcu_head *rhp, bool do_norm)
1037 {
1038 	unsigned long flags;
1039 	int idx;
1040 	bool needexp = false;
1041 	bool needgp = false;
1042 	unsigned long s;
1043 	struct srcu_data *sdp;
1044 	struct srcu_node *sdp_mynode;
1045 	int ss_state;
1046 
1047 	check_init_srcu_struct(ssp);
1048 	idx = srcu_read_lock(ssp);
1049 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
1050 	if (ss_state < SRCU_SIZE_WAIT_CALL)
1051 		sdp = per_cpu_ptr(ssp->sda, 0);
1052 	else
1053 		sdp = raw_cpu_ptr(ssp->sda);
1054 	spin_lock_irqsave_sdp_contention(sdp, &flags);
1055 	if (rhp)
1056 		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1057 	rcu_segcblist_advance(&sdp->srcu_cblist,
1058 			      rcu_seq_current(&ssp->srcu_gp_seq));
1059 	s = rcu_seq_snap(&ssp->srcu_gp_seq);
1060 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
1061 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1062 		sdp->srcu_gp_seq_needed = s;
1063 		needgp = true;
1064 	}
1065 	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1066 		sdp->srcu_gp_seq_needed_exp = s;
1067 		needexp = true;
1068 	}
1069 	spin_unlock_irqrestore_rcu_node(sdp, flags);
1070 
1071 	/* Ensure that snp node tree is fully initialized before traversing it */
1072 	if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1073 		sdp_mynode = NULL;
1074 	else
1075 		sdp_mynode = sdp->mynode;
1076 
1077 	if (needgp)
1078 		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1079 	else if (needexp)
1080 		srcu_funnel_exp_start(ssp, sdp_mynode, s);
1081 	srcu_read_unlock(ssp, idx);
1082 	return s;
1083 }
1084 
1085 /*
1086  * Enqueue an SRCU callback on the srcu_data structure associated with
1087  * the current CPU and the specified srcu_struct structure, initiating
1088  * grace-period processing if it is not already running.
1089  *
1090  * Note that all CPUs must agree that the grace period extended beyond
1091  * all pre-existing SRCU read-side critical section.  On systems with
1092  * more than one CPU, this means that when "func()" is invoked, each CPU
1093  * is guaranteed to have executed a full memory barrier since the end of
1094  * its last corresponding SRCU read-side critical section whose beginning
1095  * preceded the call to call_srcu().  It also means that each CPU executing
1096  * an SRCU read-side critical section that continues beyond the start of
1097  * "func()" must have executed a memory barrier after the call_srcu()
1098  * but before the beginning of that SRCU read-side critical section.
1099  * Note that these guarantees include CPUs that are offline, idle, or
1100  * executing in user mode, as well as CPUs that are executing in the kernel.
1101  *
1102  * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1103  * resulting SRCU callback function "func()", then both CPU A and CPU
1104  * B are guaranteed to execute a full memory barrier during the time
1105  * interval between the call to call_srcu() and the invocation of "func()".
1106  * This guarantee applies even if CPU A and CPU B are the same CPU (but
1107  * again only if the system has more than one CPU).
1108  *
1109  * Of course, these guarantees apply only for invocations of call_srcu(),
1110  * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1111  * srcu_struct structure.
1112  */
1113 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1114 			rcu_callback_t func, bool do_norm)
1115 {
1116 	if (debug_rcu_head_queue(rhp)) {
1117 		/* Probable double call_srcu(), so leak the callback. */
1118 		WRITE_ONCE(rhp->func, srcu_leak_callback);
1119 		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1120 		return;
1121 	}
1122 	rhp->func = func;
1123 	(void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1124 }
1125 
1126 /**
1127  * call_srcu() - Queue a callback for invocation after an SRCU grace period
1128  * @ssp: srcu_struct in queue the callback
1129  * @rhp: structure to be used for queueing the SRCU callback.
1130  * @func: function to be invoked after the SRCU grace period
1131  *
1132  * The callback function will be invoked some time after a full SRCU
1133  * grace period elapses, in other words after all pre-existing SRCU
1134  * read-side critical sections have completed.  However, the callback
1135  * function might well execute concurrently with other SRCU read-side
1136  * critical sections that started after call_srcu() was invoked.  SRCU
1137  * read-side critical sections are delimited by srcu_read_lock() and
1138  * srcu_read_unlock(), and may be nested.
1139  *
1140  * The callback will be invoked from process context, but must nevertheless
1141  * be fast and must not block.
1142  */
1143 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1144 	       rcu_callback_t func)
1145 {
1146 	__call_srcu(ssp, rhp, func, true);
1147 }
1148 EXPORT_SYMBOL_GPL(call_srcu);
1149 
1150 /*
1151  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1152  */
1153 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1154 {
1155 	struct rcu_synchronize rcu;
1156 
1157 	RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1158 			 lock_is_held(&rcu_bh_lock_map) ||
1159 			 lock_is_held(&rcu_lock_map) ||
1160 			 lock_is_held(&rcu_sched_lock_map),
1161 			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1162 
1163 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1164 		return;
1165 	might_sleep();
1166 	check_init_srcu_struct(ssp);
1167 	init_completion(&rcu.completion);
1168 	init_rcu_head_on_stack(&rcu.head);
1169 	__call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1170 	wait_for_completion(&rcu.completion);
1171 	destroy_rcu_head_on_stack(&rcu.head);
1172 
1173 	/*
1174 	 * Make sure that later code is ordered after the SRCU grace
1175 	 * period.  This pairs with the spin_lock_irq_rcu_node()
1176 	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
1177 	 * because the current CPU might have been totally uninvolved with
1178 	 * (and thus unordered against) that grace period.
1179 	 */
1180 	smp_mb();
1181 }
1182 
1183 /**
1184  * synchronize_srcu_expedited - Brute-force SRCU grace period
1185  * @ssp: srcu_struct with which to synchronize.
1186  *
1187  * Wait for an SRCU grace period to elapse, but be more aggressive about
1188  * spinning rather than blocking when waiting.
1189  *
1190  * Note that synchronize_srcu_expedited() has the same deadlock and
1191  * memory-ordering properties as does synchronize_srcu().
1192  */
1193 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1194 {
1195 	__synchronize_srcu(ssp, rcu_gp_is_normal());
1196 }
1197 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1198 
1199 /**
1200  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1201  * @ssp: srcu_struct with which to synchronize.
1202  *
1203  * Wait for the count to drain to zero of both indexes. To avoid the
1204  * possible starvation of synchronize_srcu(), it waits for the count of
1205  * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1206  * and then flip the srcu_idx and wait for the count of the other index.
1207  *
1208  * Can block; must be called from process context.
1209  *
1210  * Note that it is illegal to call synchronize_srcu() from the corresponding
1211  * SRCU read-side critical section; doing so will result in deadlock.
1212  * However, it is perfectly legal to call synchronize_srcu() on one
1213  * srcu_struct from some other srcu_struct's read-side critical section,
1214  * as long as the resulting graph of srcu_structs is acyclic.
1215  *
1216  * There are memory-ordering constraints implied by synchronize_srcu().
1217  * On systems with more than one CPU, when synchronize_srcu() returns,
1218  * each CPU is guaranteed to have executed a full memory barrier since
1219  * the end of its last corresponding SRCU read-side critical section
1220  * whose beginning preceded the call to synchronize_srcu().  In addition,
1221  * each CPU having an SRCU read-side critical section that extends beyond
1222  * the return from synchronize_srcu() is guaranteed to have executed a
1223  * full memory barrier after the beginning of synchronize_srcu() and before
1224  * the beginning of that SRCU read-side critical section.  Note that these
1225  * guarantees include CPUs that are offline, idle, or executing in user mode,
1226  * as well as CPUs that are executing in the kernel.
1227  *
1228  * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1229  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1230  * to have executed a full memory barrier during the execution of
1231  * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
1232  * are the same CPU, but again only if the system has more than one CPU.
1233  *
1234  * Of course, these memory-ordering guarantees apply only when
1235  * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1236  * passed the same srcu_struct structure.
1237  *
1238  * Implementation of these memory-ordering guarantees is similar to
1239  * that of synchronize_rcu().
1240  *
1241  * If SRCU is likely idle, expedite the first request.  This semantic
1242  * was provided by Classic SRCU, and is relied upon by its users, so TREE
1243  * SRCU must also provide it.  Note that detecting idleness is heuristic
1244  * and subject to both false positives and negatives.
1245  */
1246 void synchronize_srcu(struct srcu_struct *ssp)
1247 {
1248 	if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1249 		synchronize_srcu_expedited(ssp);
1250 	else
1251 		__synchronize_srcu(ssp, true);
1252 }
1253 EXPORT_SYMBOL_GPL(synchronize_srcu);
1254 
1255 /**
1256  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1257  * @ssp: srcu_struct to provide cookie for.
1258  *
1259  * This function returns a cookie that can be passed to
1260  * poll_state_synchronize_srcu(), which will return true if a full grace
1261  * period has elapsed in the meantime.  It is the caller's responsibility
1262  * to make sure that grace period happens, for example, by invoking
1263  * call_srcu() after return from get_state_synchronize_srcu().
1264  */
1265 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1266 {
1267 	// Any prior manipulation of SRCU-protected data must happen
1268 	// before the load from ->srcu_gp_seq.
1269 	smp_mb();
1270 	return rcu_seq_snap(&ssp->srcu_gp_seq);
1271 }
1272 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1273 
1274 /**
1275  * start_poll_synchronize_srcu - Provide cookie and start grace period
1276  * @ssp: srcu_struct to provide cookie for.
1277  *
1278  * This function returns a cookie that can be passed to
1279  * poll_state_synchronize_srcu(), which will return true if a full grace
1280  * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
1281  * this function also ensures that any needed SRCU grace period will be
1282  * started.  This convenience does come at a cost in terms of CPU overhead.
1283  */
1284 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1285 {
1286 	return srcu_gp_start_if_needed(ssp, NULL, true);
1287 }
1288 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1289 
1290 /**
1291  * poll_state_synchronize_srcu - Has cookie's grace period ended?
1292  * @ssp: srcu_struct to provide cookie for.
1293  * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1294  *
1295  * This function takes the cookie that was returned from either
1296  * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1297  * returns @true if an SRCU grace period elapsed since the time that the
1298  * cookie was created.
1299  *
1300  * Because cookies are finite in size, wrapping/overflow is possible.
1301  * This is more pronounced on 32-bit systems where cookies are 32 bits,
1302  * where in theory wrapping could happen in about 14 hours assuming
1303  * 25-microsecond expedited SRCU grace periods.  However, a more likely
1304  * overflow lower bound is on the order of 24 days in the case of
1305  * one-millisecond SRCU grace periods.  Of course, wrapping in a 64-bit
1306  * system requires geologic timespans, as in more than seven million years
1307  * even for expedited SRCU grace periods.
1308  *
1309  * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1310  * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU.  This uses
1311  * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1312  * few minutes.  If this proves to be a problem, this counter will be
1313  * expanded to the same size as for Tree SRCU.
1314  */
1315 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1316 {
1317 	if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1318 		return false;
1319 	// Ensure that the end of the SRCU grace period happens before
1320 	// any subsequent code that the caller might execute.
1321 	smp_mb(); // ^^^
1322 	return true;
1323 }
1324 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1325 
1326 /*
1327  * Callback function for srcu_barrier() use.
1328  */
1329 static void srcu_barrier_cb(struct rcu_head *rhp)
1330 {
1331 	struct srcu_data *sdp;
1332 	struct srcu_struct *ssp;
1333 
1334 	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1335 	ssp = sdp->ssp;
1336 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1337 		complete(&ssp->srcu_barrier_completion);
1338 }
1339 
1340 /*
1341  * Enqueue an srcu_barrier() callback on the specified srcu_data
1342  * structure's ->cblist.  but only if that ->cblist already has at least one
1343  * callback enqueued.  Note that if a CPU already has callbacks enqueue,
1344  * it must have already registered the need for a future grace period,
1345  * so all we need do is enqueue a callback that will use the same grace
1346  * period as the last callback already in the queue.
1347  */
1348 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1349 {
1350 	spin_lock_irq_rcu_node(sdp);
1351 	atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1352 	sdp->srcu_barrier_head.func = srcu_barrier_cb;
1353 	debug_rcu_head_queue(&sdp->srcu_barrier_head);
1354 	if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1355 				   &sdp->srcu_barrier_head)) {
1356 		debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1357 		atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1358 	}
1359 	spin_unlock_irq_rcu_node(sdp);
1360 }
1361 
1362 /**
1363  * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1364  * @ssp: srcu_struct on which to wait for in-flight callbacks.
1365  */
1366 void srcu_barrier(struct srcu_struct *ssp)
1367 {
1368 	int cpu;
1369 	int idx;
1370 	unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1371 
1372 	check_init_srcu_struct(ssp);
1373 	mutex_lock(&ssp->srcu_barrier_mutex);
1374 	if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1375 		smp_mb(); /* Force ordering following return. */
1376 		mutex_unlock(&ssp->srcu_barrier_mutex);
1377 		return; /* Someone else did our work for us. */
1378 	}
1379 	rcu_seq_start(&ssp->srcu_barrier_seq);
1380 	init_completion(&ssp->srcu_barrier_completion);
1381 
1382 	/* Initial count prevents reaching zero until all CBs are posted. */
1383 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1384 
1385 	idx = srcu_read_lock(ssp);
1386 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1387 		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
1388 	else
1389 		for_each_possible_cpu(cpu)
1390 			srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1391 	srcu_read_unlock(ssp, idx);
1392 
1393 	/* Remove the initial count, at which point reaching zero can happen. */
1394 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1395 		complete(&ssp->srcu_barrier_completion);
1396 	wait_for_completion(&ssp->srcu_barrier_completion);
1397 
1398 	rcu_seq_end(&ssp->srcu_barrier_seq);
1399 	mutex_unlock(&ssp->srcu_barrier_mutex);
1400 }
1401 EXPORT_SYMBOL_GPL(srcu_barrier);
1402 
1403 /**
1404  * srcu_batches_completed - return batches completed.
1405  * @ssp: srcu_struct on which to report batch completion.
1406  *
1407  * Report the number of batches, correlated with, but not necessarily
1408  * precisely the same as, the number of grace periods that have elapsed.
1409  */
1410 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1411 {
1412 	return READ_ONCE(ssp->srcu_idx);
1413 }
1414 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1415 
1416 /*
1417  * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1418  * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1419  * completed in that state.
1420  */
1421 static void srcu_advance_state(struct srcu_struct *ssp)
1422 {
1423 	int idx;
1424 
1425 	mutex_lock(&ssp->srcu_gp_mutex);
1426 
1427 	/*
1428 	 * Because readers might be delayed for an extended period after
1429 	 * fetching ->srcu_idx for their index, at any point in time there
1430 	 * might well be readers using both idx=0 and idx=1.  We therefore
1431 	 * need to wait for readers to clear from both index values before
1432 	 * invoking a callback.
1433 	 *
1434 	 * The load-acquire ensures that we see the accesses performed
1435 	 * by the prior grace period.
1436 	 */
1437 	idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1438 	if (idx == SRCU_STATE_IDLE) {
1439 		spin_lock_irq_rcu_node(ssp);
1440 		if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1441 			WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1442 			spin_unlock_irq_rcu_node(ssp);
1443 			mutex_unlock(&ssp->srcu_gp_mutex);
1444 			return;
1445 		}
1446 		idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1447 		if (idx == SRCU_STATE_IDLE)
1448 			srcu_gp_start(ssp);
1449 		spin_unlock_irq_rcu_node(ssp);
1450 		if (idx != SRCU_STATE_IDLE) {
1451 			mutex_unlock(&ssp->srcu_gp_mutex);
1452 			return; /* Someone else started the grace period. */
1453 		}
1454 	}
1455 
1456 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1457 		idx = 1 ^ (ssp->srcu_idx & 1);
1458 		if (!try_check_zero(ssp, idx, 1)) {
1459 			mutex_unlock(&ssp->srcu_gp_mutex);
1460 			return; /* readers present, retry later. */
1461 		}
1462 		srcu_flip(ssp);
1463 		spin_lock_irq_rcu_node(ssp);
1464 		rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1465 		ssp->srcu_n_exp_nodelay = 0;
1466 		spin_unlock_irq_rcu_node(ssp);
1467 	}
1468 
1469 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1470 
1471 		/*
1472 		 * SRCU read-side critical sections are normally short,
1473 		 * so check at least twice in quick succession after a flip.
1474 		 */
1475 		idx = 1 ^ (ssp->srcu_idx & 1);
1476 		if (!try_check_zero(ssp, idx, 2)) {
1477 			mutex_unlock(&ssp->srcu_gp_mutex);
1478 			return; /* readers present, retry later. */
1479 		}
1480 		ssp->srcu_n_exp_nodelay = 0;
1481 		srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1482 	}
1483 }
1484 
1485 /*
1486  * Invoke a limited number of SRCU callbacks that have passed through
1487  * their grace period.  If there are more to do, SRCU will reschedule
1488  * the workqueue.  Note that needed memory barriers have been executed
1489  * in this task's context by srcu_readers_active_idx_check().
1490  */
1491 static void srcu_invoke_callbacks(struct work_struct *work)
1492 {
1493 	long len;
1494 	bool more;
1495 	struct rcu_cblist ready_cbs;
1496 	struct rcu_head *rhp;
1497 	struct srcu_data *sdp;
1498 	struct srcu_struct *ssp;
1499 
1500 	sdp = container_of(work, struct srcu_data, work);
1501 
1502 	ssp = sdp->ssp;
1503 	rcu_cblist_init(&ready_cbs);
1504 	spin_lock_irq_rcu_node(sdp);
1505 	rcu_segcblist_advance(&sdp->srcu_cblist,
1506 			      rcu_seq_current(&ssp->srcu_gp_seq));
1507 	if (sdp->srcu_cblist_invoking ||
1508 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1509 		spin_unlock_irq_rcu_node(sdp);
1510 		return;  /* Someone else on the job or nothing to do. */
1511 	}
1512 
1513 	/* We are on the job!  Extract and invoke ready callbacks. */
1514 	sdp->srcu_cblist_invoking = true;
1515 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1516 	len = ready_cbs.len;
1517 	spin_unlock_irq_rcu_node(sdp);
1518 	rhp = rcu_cblist_dequeue(&ready_cbs);
1519 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1520 		debug_rcu_head_unqueue(rhp);
1521 		local_bh_disable();
1522 		rhp->func(rhp);
1523 		local_bh_enable();
1524 	}
1525 	WARN_ON_ONCE(ready_cbs.len);
1526 
1527 	/*
1528 	 * Update counts, accelerate new callbacks, and if needed,
1529 	 * schedule another round of callback invocation.
1530 	 */
1531 	spin_lock_irq_rcu_node(sdp);
1532 	rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1533 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1534 				       rcu_seq_snap(&ssp->srcu_gp_seq));
1535 	sdp->srcu_cblist_invoking = false;
1536 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1537 	spin_unlock_irq_rcu_node(sdp);
1538 	if (more)
1539 		srcu_schedule_cbs_sdp(sdp, 0);
1540 }
1541 
1542 /*
1543  * Finished one round of SRCU grace period.  Start another if there are
1544  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1545  */
1546 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1547 {
1548 	bool pushgp = true;
1549 
1550 	spin_lock_irq_rcu_node(ssp);
1551 	if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1552 		if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1553 			/* All requests fulfilled, time to go idle. */
1554 			pushgp = false;
1555 		}
1556 	} else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1557 		/* Outstanding request and no GP.  Start one. */
1558 		srcu_gp_start(ssp);
1559 	}
1560 	spin_unlock_irq_rcu_node(ssp);
1561 
1562 	if (pushgp)
1563 		queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1564 }
1565 
1566 /*
1567  * This is the work-queue function that handles SRCU grace periods.
1568  */
1569 static void process_srcu(struct work_struct *work)
1570 {
1571 	unsigned long curdelay;
1572 	unsigned long j;
1573 	struct srcu_struct *ssp;
1574 
1575 	ssp = container_of(work, struct srcu_struct, work.work);
1576 
1577 	srcu_advance_state(ssp);
1578 	curdelay = srcu_get_delay(ssp);
1579 	if (curdelay) {
1580 		WRITE_ONCE(ssp->reschedule_count, 0);
1581 	} else {
1582 		j = jiffies;
1583 		if (READ_ONCE(ssp->reschedule_jiffies) == j) {
1584 			WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
1585 			if (READ_ONCE(ssp->reschedule_count) > SRCU_MAX_NODELAY)
1586 				curdelay = 1;
1587 		} else {
1588 			WRITE_ONCE(ssp->reschedule_count, 1);
1589 			WRITE_ONCE(ssp->reschedule_jiffies, j);
1590 		}
1591 	}
1592 	srcu_reschedule(ssp, curdelay);
1593 }
1594 
1595 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1596 			     struct srcu_struct *ssp, int *flags,
1597 			     unsigned long *gp_seq)
1598 {
1599 	if (test_type != SRCU_FLAVOR)
1600 		return;
1601 	*flags = 0;
1602 	*gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1603 }
1604 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1605 
1606 static const char * const srcu_size_state_name[] = {
1607 	"SRCU_SIZE_SMALL",
1608 	"SRCU_SIZE_ALLOC",
1609 	"SRCU_SIZE_WAIT_BARRIER",
1610 	"SRCU_SIZE_WAIT_CALL",
1611 	"SRCU_SIZE_WAIT_CBS1",
1612 	"SRCU_SIZE_WAIT_CBS2",
1613 	"SRCU_SIZE_WAIT_CBS3",
1614 	"SRCU_SIZE_WAIT_CBS4",
1615 	"SRCU_SIZE_BIG",
1616 	"SRCU_SIZE_???",
1617 };
1618 
1619 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1620 {
1621 	int cpu;
1622 	int idx;
1623 	unsigned long s0 = 0, s1 = 0;
1624 	int ss_state = READ_ONCE(ssp->srcu_size_state);
1625 	int ss_state_idx = ss_state;
1626 
1627 	idx = ssp->srcu_idx & 0x1;
1628 	if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1629 		ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1630 	pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1631 		 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
1632 		 srcu_size_state_name[ss_state_idx]);
1633 	if (!ssp->sda) {
1634 		// Called after cleanup_srcu_struct(), perhaps.
1635 		pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1636 	} else {
1637 		pr_cont(" per-CPU(idx=%d):", idx);
1638 		for_each_possible_cpu(cpu) {
1639 			unsigned long l0, l1;
1640 			unsigned long u0, u1;
1641 			long c0, c1;
1642 			struct srcu_data *sdp;
1643 
1644 			sdp = per_cpu_ptr(ssp->sda, cpu);
1645 			u0 = data_race(sdp->srcu_unlock_count[!idx]);
1646 			u1 = data_race(sdp->srcu_unlock_count[idx]);
1647 
1648 			/*
1649 			 * Make sure that a lock is always counted if the corresponding
1650 			 * unlock is counted.
1651 			 */
1652 			smp_rmb();
1653 
1654 			l0 = data_race(sdp->srcu_lock_count[!idx]);
1655 			l1 = data_race(sdp->srcu_lock_count[idx]);
1656 
1657 			c0 = l0 - u0;
1658 			c1 = l1 - u1;
1659 			pr_cont(" %d(%ld,%ld %c)",
1660 				cpu, c0, c1,
1661 				"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1662 			s0 += c0;
1663 			s1 += c1;
1664 		}
1665 		pr_cont(" T(%ld,%ld)\n", s0, s1);
1666 	}
1667 	if (SRCU_SIZING_IS_TORTURE())
1668 		srcu_transition_to_big(ssp);
1669 }
1670 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1671 
1672 static int __init srcu_bootup_announce(void)
1673 {
1674 	pr_info("Hierarchical SRCU implementation.\n");
1675 	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1676 		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1677 	return 0;
1678 }
1679 early_initcall(srcu_bootup_announce);
1680 
1681 void __init srcu_init(void)
1682 {
1683 	struct srcu_struct *ssp;
1684 
1685 	/* Decide on srcu_struct-size strategy. */
1686 	if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1687 		if (nr_cpu_ids >= big_cpu_lim) {
1688 			convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1689 			pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1690 		} else {
1691 			convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1692 			pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1693 		}
1694 	}
1695 
1696 	/*
1697 	 * Once that is set, call_srcu() can follow the normal path and
1698 	 * queue delayed work. This must follow RCU workqueues creation
1699 	 * and timers initialization.
1700 	 */
1701 	srcu_init_done = true;
1702 	while (!list_empty(&srcu_boot_list)) {
1703 		ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1704 				      work.work.entry);
1705 		list_del_init(&ssp->work.work.entry);
1706 		if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
1707 			ssp->srcu_size_state = SRCU_SIZE_ALLOC;
1708 		queue_work(rcu_gp_wq, &ssp->work.work);
1709 	}
1710 }
1711 
1712 #ifdef CONFIG_MODULES
1713 
1714 /* Initialize any global-scope srcu_struct structures used by this module. */
1715 static int srcu_module_coming(struct module *mod)
1716 {
1717 	int i;
1718 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1719 	int ret;
1720 
1721 	for (i = 0; i < mod->num_srcu_structs; i++) {
1722 		ret = init_srcu_struct(*(sspp++));
1723 		if (WARN_ON_ONCE(ret))
1724 			return ret;
1725 	}
1726 	return 0;
1727 }
1728 
1729 /* Clean up any global-scope srcu_struct structures used by this module. */
1730 static void srcu_module_going(struct module *mod)
1731 {
1732 	int i;
1733 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1734 
1735 	for (i = 0; i < mod->num_srcu_structs; i++)
1736 		cleanup_srcu_struct(*(sspp++));
1737 }
1738 
1739 /* Handle one module, either coming or going. */
1740 static int srcu_module_notify(struct notifier_block *self,
1741 			      unsigned long val, void *data)
1742 {
1743 	struct module *mod = data;
1744 	int ret = 0;
1745 
1746 	switch (val) {
1747 	case MODULE_STATE_COMING:
1748 		ret = srcu_module_coming(mod);
1749 		break;
1750 	case MODULE_STATE_GOING:
1751 		srcu_module_going(mod);
1752 		break;
1753 	default:
1754 		break;
1755 	}
1756 	return ret;
1757 }
1758 
1759 static struct notifier_block srcu_module_nb = {
1760 	.notifier_call = srcu_module_notify,
1761 	.priority = 0,
1762 };
1763 
1764 static __init int init_srcu_module_notifier(void)
1765 {
1766 	int ret;
1767 
1768 	ret = register_module_notifier(&srcu_module_nb);
1769 	if (ret)
1770 		pr_warn("Failed to register srcu module notifier\n");
1771 	return ret;
1772 }
1773 late_initcall(init_srcu_module_notifier);
1774 
1775 #endif /* #ifdef CONFIG_MODULES */
1776