xref: /linux/kernel/rcu/tree.c (revision f49040c7aaa5532a1f94355ef5073c49e6b32349)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
64 #include <linux/mm.h>
65 #include <linux/kasan.h>
66 #include <linux/context_tracking.h>
67 #include "../time/tick-internal.h"
68 
69 #include "tree.h"
70 #include "rcu.h"
71 
72 #ifdef MODULE_PARAM_PREFIX
73 #undef MODULE_PARAM_PREFIX
74 #endif
75 #define MODULE_PARAM_PREFIX "rcutree."
76 
77 /* Data structures. */
78 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *);
79 
80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
81 	.gpwrap = true,
82 };
83 static struct rcu_state rcu_state = {
84 	.level = { &rcu_state.node[0] },
85 	.gp_state = RCU_GP_IDLE,
86 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
87 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
88 	.barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
89 	.name = RCU_NAME,
90 	.abbr = RCU_ABBR,
91 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
92 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
93 	.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
94 	.srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
95 		rcu_sr_normal_gp_cleanup_work),
96 	.srs_cleanups_pending = ATOMIC_INIT(0),
97 #ifdef CONFIG_RCU_NOCB_CPU
98 	.nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex),
99 #endif
100 };
101 
102 /* Dump rcu_node combining tree at boot to verify correct setup. */
103 static bool dump_tree;
104 module_param(dump_tree, bool, 0444);
105 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
106 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
107 #ifndef CONFIG_PREEMPT_RT
108 module_param(use_softirq, bool, 0444);
109 #endif
110 /* Control rcu_node-tree auto-balancing at boot time. */
111 static bool rcu_fanout_exact;
112 module_param(rcu_fanout_exact, bool, 0444);
113 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
114 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
115 module_param(rcu_fanout_leaf, int, 0444);
116 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
117 /* Number of rcu_nodes at specified level. */
118 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
119 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
120 
121 /*
122  * The rcu_scheduler_active variable is initialized to the value
123  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
124  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
125  * RCU can assume that there is but one task, allowing RCU to (for example)
126  * optimize synchronize_rcu() to a simple barrier().  When this variable
127  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
128  * to detect real grace periods.  This variable is also used to suppress
129  * boot-time false positives from lockdep-RCU error checking.  Finally, it
130  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
131  * is fully initialized, including all of its kthreads having been spawned.
132  */
133 int rcu_scheduler_active __read_mostly;
134 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
135 
136 /*
137  * The rcu_scheduler_fully_active variable transitions from zero to one
138  * during the early_initcall() processing, which is after the scheduler
139  * is capable of creating new tasks.  So RCU processing (for example,
140  * creating tasks for RCU priority boosting) must be delayed until after
141  * rcu_scheduler_fully_active transitions from zero to one.  We also
142  * currently delay invocation of any RCU callbacks until after this point.
143  *
144  * It might later prove better for people registering RCU callbacks during
145  * early boot to take responsibility for these callbacks, but one step at
146  * a time.
147  */
148 static int rcu_scheduler_fully_active __read_mostly;
149 
150 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
151 			      unsigned long gps, unsigned long flags);
152 static struct task_struct *rcu_boost_task(struct rcu_node *rnp);
153 static void invoke_rcu_core(void);
154 static void rcu_report_exp_rdp(struct rcu_data *rdp);
155 static void sync_sched_exp_online_cleanup(int cpu);
156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
157 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
158 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
159 static bool rcu_init_invoked(void);
160 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
161 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
162 
163 /*
164  * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
165  * real-time priority(enabling/disabling) is controlled by
166  * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
167  */
168 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
169 module_param(kthread_prio, int, 0444);
170 
171 /* Delay in jiffies for grace-period initialization delays, debug only. */
172 
173 static int gp_preinit_delay;
174 module_param(gp_preinit_delay, int, 0444);
175 static int gp_init_delay;
176 module_param(gp_init_delay, int, 0444);
177 static int gp_cleanup_delay;
178 module_param(gp_cleanup_delay, int, 0444);
179 static int nohz_full_patience_delay;
180 module_param(nohz_full_patience_delay, int, 0444);
181 static int nohz_full_patience_delay_jiffies;
182 
183 // Add delay to rcu_read_unlock() for strict grace periods.
184 static int rcu_unlock_delay;
185 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
186 module_param(rcu_unlock_delay, int, 0444);
187 #endif
188 
189 /* Retrieve RCU kthreads priority for rcutorture */
190 int rcu_get_gp_kthreads_prio(void)
191 {
192 	return kthread_prio;
193 }
194 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
195 
196 /*
197  * Number of grace periods between delays, normalized by the duration of
198  * the delay.  The longer the delay, the more the grace periods between
199  * each delay.  The reason for this normalization is that it means that,
200  * for non-zero delays, the overall slowdown of grace periods is constant
201  * regardless of the duration of the delay.  This arrangement balances
202  * the need for long delays to increase some race probabilities with the
203  * need for fast grace periods to increase other race probabilities.
204  */
205 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */
206 
207 /*
208  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
209  * permit this function to be invoked without holding the root rcu_node
210  * structure's ->lock, but of course results can be subject to change.
211  */
212 static int rcu_gp_in_progress(void)
213 {
214 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
215 }
216 
217 /*
218  * Return the number of callbacks queued on the specified CPU.
219  * Handles both the nocbs and normal cases.
220  */
221 static long rcu_get_n_cbs_cpu(int cpu)
222 {
223 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
224 
225 	if (rcu_segcblist_is_enabled(&rdp->cblist))
226 		return rcu_segcblist_n_cbs(&rdp->cblist);
227 	return 0;
228 }
229 
230 /**
231  * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
232  *
233  * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU.
234  * This is a special-purpose function to be used in the softirq
235  * infrastructure and perhaps the occasional long-running softirq
236  * handler.
237  *
238  * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is
239  * equivalent to momentarily completely enabling preemption.  For
240  * example, given this code::
241  *
242  *	local_bh_disable();
243  *	do_something();
244  *	rcu_softirq_qs();  // A
245  *	do_something_else();
246  *	local_bh_enable();  // B
247  *
248  * A call to synchronize_rcu() that began concurrently with the
249  * call to do_something() would be guaranteed to wait only until
250  * execution reached statement A.  Without that rcu_softirq_qs(),
251  * that same synchronize_rcu() would instead be guaranteed to wait
252  * until execution reached statement B.
253  */
254 void rcu_softirq_qs(void)
255 {
256 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
257 			 lock_is_held(&rcu_lock_map) ||
258 			 lock_is_held(&rcu_sched_lock_map),
259 			 "Illegal rcu_softirq_qs() in RCU read-side critical section");
260 	rcu_qs();
261 	rcu_preempt_deferred_qs(current);
262 	rcu_tasks_qs(current, false);
263 }
264 
265 /*
266  * Reset the current CPU's RCU_WATCHING counter to indicate that the
267  * newly onlined CPU is no longer in an extended quiescent state.
268  * This will either leave the counter unchanged, or increment it
269  * to the next non-quiescent value.
270  *
271  * The non-atomic test/increment sequence works because the upper bits
272  * of the ->state variable are manipulated only by the corresponding CPU,
273  * or when the corresponding CPU is offline.
274  */
275 static void rcu_watching_online(void)
276 {
277 	if (ct_rcu_watching() & CT_RCU_WATCHING)
278 		return;
279 	ct_state_inc(CT_RCU_WATCHING);
280 }
281 
282 /*
283  * Return true if the snapshot returned from ct_rcu_watching()
284  * indicates that RCU is in an extended quiescent state.
285  */
286 static bool rcu_watching_snap_in_eqs(int snap)
287 {
288 	return !(snap & CT_RCU_WATCHING);
289 }
290 
291 /**
292  * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
293  * since the specified @snap?
294  *
295  * @rdp: The rcu_data corresponding to the CPU for which to check EQS.
296  * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS.
297  *
298  * Returns true if the CPU corresponding to @rdp has spent some time in an
299  * extended quiescent state since @snap. Note that this doesn't check if it
300  * /still/ is in an EQS, just that it went through one since @snap.
301  *
302  * This is meant to be used in a loop waiting for a CPU to go through an EQS.
303  */
304 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap)
305 {
306 	/*
307 	 * The first failing snapshot is already ordered against the accesses
308 	 * performed by the remote CPU after it exits idle.
309 	 *
310 	 * The second snapshot therefore only needs to order against accesses
311 	 * performed by the remote CPU prior to entering idle and therefore can
312 	 * rely solely on acquire semantics.
313 	 */
314 	if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap)))
315 		return true;
316 
317 	return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
318 }
319 
320 /*
321  * Return true if the referenced integer is zero while the specified
322  * CPU remains within a single extended quiescent state.
323  */
324 bool rcu_watching_zero_in_eqs(int cpu, int *vp)
325 {
326 	int snap;
327 
328 	// If not quiescent, force back to earlier extended quiescent state.
329 	snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
330 	smp_rmb(); // Order CT state and *vp reads.
331 	if (READ_ONCE(*vp))
332 		return false;  // Non-zero, so report failure;
333 	smp_rmb(); // Order *vp read and CT state re-read.
334 
335 	// If still in the same extended quiescent state, we are good!
336 	return snap == ct_rcu_watching_cpu(cpu);
337 }
338 
339 /*
340  * Let the RCU core know that this CPU has gone through the scheduler,
341  * which is a quiescent state.  This is called when the need for a
342  * quiescent state is urgent, so we burn an atomic operation and full
343  * memory barriers to let the RCU core know about it, regardless of what
344  * this CPU might (or might not) do in the near future.
345  *
346  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
347  *
348  * The caller must have disabled interrupts and must not be idle.
349  */
350 notrace void rcu_momentary_eqs(void)
351 {
352 	int seq;
353 
354 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
355 	seq = ct_state_inc(2 * CT_RCU_WATCHING);
356 	/* It is illegal to call this from idle state. */
357 	WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
358 	rcu_preempt_deferred_qs(current);
359 }
360 EXPORT_SYMBOL_GPL(rcu_momentary_eqs);
361 
362 /**
363  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
364  *
365  * If the current CPU is idle and running at a first-level (not nested)
366  * interrupt, or directly, from idle, return true.
367  *
368  * The caller must have at least disabled IRQs.
369  */
370 static int rcu_is_cpu_rrupt_from_idle(void)
371 {
372 	long nesting;
373 
374 	/*
375 	 * Usually called from the tick; but also used from smp_function_call()
376 	 * for expedited grace periods. This latter can result in running from
377 	 * the idle task, instead of an actual IPI.
378 	 */
379 	lockdep_assert_irqs_disabled();
380 
381 	/* Check for counter underflows */
382 	RCU_LOCKDEP_WARN(ct_nesting() < 0,
383 			 "RCU nesting counter underflow!");
384 	RCU_LOCKDEP_WARN(ct_nmi_nesting() <= 0,
385 			 "RCU nmi_nesting counter underflow/zero!");
386 
387 	/* Are we at first interrupt nesting level? */
388 	nesting = ct_nmi_nesting();
389 	if (nesting > 1)
390 		return false;
391 
392 	/*
393 	 * If we're not in an interrupt, we must be in the idle task!
394 	 */
395 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
396 
397 	/* Does CPU appear to be idle from an RCU standpoint? */
398 	return ct_nesting() == 0;
399 }
400 
401 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
402 				// Maximum callbacks per rcu_do_batch ...
403 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
404 static long blimit = DEFAULT_RCU_BLIMIT;
405 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
406 static long qhimark = DEFAULT_RCU_QHIMARK;
407 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
408 static long qlowmark = DEFAULT_RCU_QLOMARK;
409 #define DEFAULT_RCU_QOVLD_MULT 2
410 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
411 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
412 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
413 
414 module_param(blimit, long, 0444);
415 module_param(qhimark, long, 0444);
416 module_param(qlowmark, long, 0444);
417 module_param(qovld, long, 0444);
418 
419 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
420 static ulong jiffies_till_next_fqs = ULONG_MAX;
421 static bool rcu_kick_kthreads;
422 static int rcu_divisor = 7;
423 module_param(rcu_divisor, int, 0644);
424 
425 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
426 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
427 module_param(rcu_resched_ns, long, 0644);
428 
429 /*
430  * How long the grace period must be before we start recruiting
431  * quiescent-state help from rcu_note_context_switch().
432  */
433 static ulong jiffies_till_sched_qs = ULONG_MAX;
434 module_param(jiffies_till_sched_qs, ulong, 0444);
435 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
436 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
437 
438 /*
439  * Make sure that we give the grace-period kthread time to detect any
440  * idle CPUs before taking active measures to force quiescent states.
441  * However, don't go below 100 milliseconds, adjusted upwards for really
442  * large systems.
443  */
444 static void adjust_jiffies_till_sched_qs(void)
445 {
446 	unsigned long j;
447 
448 	/* If jiffies_till_sched_qs was specified, respect the request. */
449 	if (jiffies_till_sched_qs != ULONG_MAX) {
450 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
451 		return;
452 	}
453 	/* Otherwise, set to third fqs scan, but bound below on large system. */
454 	j = READ_ONCE(jiffies_till_first_fqs) +
455 		      2 * READ_ONCE(jiffies_till_next_fqs);
456 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
457 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
458 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
459 	WRITE_ONCE(jiffies_to_sched_qs, j);
460 }
461 
462 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
463 {
464 	ulong j;
465 	int ret = kstrtoul(val, 0, &j);
466 
467 	if (!ret) {
468 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
469 		adjust_jiffies_till_sched_qs();
470 	}
471 	return ret;
472 }
473 
474 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
475 {
476 	ulong j;
477 	int ret = kstrtoul(val, 0, &j);
478 
479 	if (!ret) {
480 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
481 		adjust_jiffies_till_sched_qs();
482 	}
483 	return ret;
484 }
485 
486 static const struct kernel_param_ops first_fqs_jiffies_ops = {
487 	.set = param_set_first_fqs_jiffies,
488 	.get = param_get_ulong,
489 };
490 
491 static const struct kernel_param_ops next_fqs_jiffies_ops = {
492 	.set = param_set_next_fqs_jiffies,
493 	.get = param_get_ulong,
494 };
495 
496 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
497 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
498 module_param(rcu_kick_kthreads, bool, 0644);
499 
500 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
501 static int rcu_pending(int user);
502 
503 /*
504  * Return the number of RCU GPs completed thus far for debug & stats.
505  */
506 unsigned long rcu_get_gp_seq(void)
507 {
508 	return READ_ONCE(rcu_state.gp_seq);
509 }
510 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
511 
512 /*
513  * Return the number of RCU expedited batches completed thus far for
514  * debug & stats.  Odd numbers mean that a batch is in progress, even
515  * numbers mean idle.  The value returned will thus be roughly double
516  * the cumulative batches since boot.
517  */
518 unsigned long rcu_exp_batches_completed(void)
519 {
520 	return rcu_state.expedited_sequence;
521 }
522 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
523 
524 /*
525  * Return the root node of the rcu_state structure.
526  */
527 static struct rcu_node *rcu_get_root(void)
528 {
529 	return &rcu_state.node[0];
530 }
531 
532 /*
533  * Send along grace-period-related data for rcutorture diagnostics.
534  */
535 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq)
536 {
537 	*flags = READ_ONCE(rcu_state.gp_flags);
538 	*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
539 }
540 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
541 
542 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
543 /*
544  * An empty function that will trigger a reschedule on
545  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
546  */
547 static void late_wakeup_func(struct irq_work *work)
548 {
549 }
550 
551 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
552 	IRQ_WORK_INIT(late_wakeup_func);
553 
554 /*
555  * If either:
556  *
557  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
558  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
559  *
560  * In these cases the late RCU wake ups aren't supported in the resched loops and our
561  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
562  * get re-enabled again.
563  */
564 noinstr void rcu_irq_work_resched(void)
565 {
566 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
567 
568 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
569 		return;
570 
571 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
572 		return;
573 
574 	instrumentation_begin();
575 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
576 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
577 	}
578 	instrumentation_end();
579 }
580 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
581 
582 #ifdef CONFIG_PROVE_RCU
583 /**
584  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
585  */
586 void rcu_irq_exit_check_preempt(void)
587 {
588 	lockdep_assert_irqs_disabled();
589 
590 	RCU_LOCKDEP_WARN(ct_nesting() <= 0,
591 			 "RCU nesting counter underflow/zero!");
592 	RCU_LOCKDEP_WARN(ct_nmi_nesting() !=
593 			 CT_NESTING_IRQ_NONIDLE,
594 			 "Bad RCU  nmi_nesting counter\n");
595 	RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
596 			 "RCU in extended quiescent state!");
597 }
598 #endif /* #ifdef CONFIG_PROVE_RCU */
599 
600 #ifdef CONFIG_NO_HZ_FULL
601 /**
602  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
603  *
604  * The scheduler tick is not normally enabled when CPUs enter the kernel
605  * from nohz_full userspace execution.  After all, nohz_full userspace
606  * execution is an RCU quiescent state and the time executing in the kernel
607  * is quite short.  Except of course when it isn't.  And it is not hard to
608  * cause a large system to spend tens of seconds or even minutes looping
609  * in the kernel, which can cause a number of problems, include RCU CPU
610  * stall warnings.
611  *
612  * Therefore, if a nohz_full CPU fails to report a quiescent state
613  * in a timely manner, the RCU grace-period kthread sets that CPU's
614  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
615  * exception will invoke this function, which will turn on the scheduler
616  * tick, which will enable RCU to detect that CPU's quiescent states,
617  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
618  * The tick will be disabled once a quiescent state is reported for
619  * this CPU.
620  *
621  * Of course, in carefully tuned systems, there might never be an
622  * interrupt or exception.  In that case, the RCU grace-period kthread
623  * will eventually cause one to happen.  However, in less carefully
624  * controlled environments, this function allows RCU to get what it
625  * needs without creating otherwise useless interruptions.
626  */
627 void __rcu_irq_enter_check_tick(void)
628 {
629 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
630 
631 	// If we're here from NMI there's nothing to do.
632 	if (in_nmi())
633 		return;
634 
635 	RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
636 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
637 
638 	if (!tick_nohz_full_cpu(rdp->cpu) ||
639 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
640 	    READ_ONCE(rdp->rcu_forced_tick)) {
641 		// RCU doesn't need nohz_full help from this CPU, or it is
642 		// already getting that help.
643 		return;
644 	}
645 
646 	// We get here only when not in an extended quiescent state and
647 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
648 	// already watching and (2) The fact that we are in an interrupt
649 	// handler and that the rcu_node lock is an irq-disabled lock
650 	// prevents self-deadlock.  So we can safely recheck under the lock.
651 	// Note that the nohz_full state currently cannot change.
652 	raw_spin_lock_rcu_node(rdp->mynode);
653 	if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
654 		// A nohz_full CPU is in the kernel and RCU needs a
655 		// quiescent state.  Turn on the tick!
656 		WRITE_ONCE(rdp->rcu_forced_tick, true);
657 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
658 	}
659 	raw_spin_unlock_rcu_node(rdp->mynode);
660 }
661 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
662 #endif /* CONFIG_NO_HZ_FULL */
663 
664 /*
665  * Check to see if any future non-offloaded RCU-related work will need
666  * to be done by the current CPU, even if none need be done immediately,
667  * returning 1 if so.  This function is part of the RCU implementation;
668  * it is -not- an exported member of the RCU API.  This is used by
669  * the idle-entry code to figure out whether it is safe to disable the
670  * scheduler-clock interrupt.
671  *
672  * Just check whether or not this CPU has non-offloaded RCU callbacks
673  * queued.
674  */
675 int rcu_needs_cpu(void)
676 {
677 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
678 		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
679 }
680 
681 /*
682  * If any sort of urgency was applied to the current CPU (for example,
683  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
684  * to get to a quiescent state, disable it.
685  */
686 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
687 {
688 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
689 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
690 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
691 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
692 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
693 		WRITE_ONCE(rdp->rcu_forced_tick, false);
694 	}
695 }
696 
697 /**
698  * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
699  *
700  * Return @true if RCU is watching the running CPU and @false otherwise.
701  * An @true return means that this CPU can safely enter RCU read-side
702  * critical sections.
703  *
704  * Although calls to rcu_is_watching() from most parts of the kernel
705  * will return @true, there are important exceptions.  For example, if the
706  * current CPU is deep within its idle loop, in kernel entry/exit code,
707  * or offline, rcu_is_watching() will return @false.
708  *
709  * Make notrace because it can be called by the internal functions of
710  * ftrace, and making this notrace removes unnecessary recursion calls.
711  */
712 notrace bool rcu_is_watching(void)
713 {
714 	bool ret;
715 
716 	preempt_disable_notrace();
717 	ret = rcu_is_watching_curr_cpu();
718 	preempt_enable_notrace();
719 	return ret;
720 }
721 EXPORT_SYMBOL_GPL(rcu_is_watching);
722 
723 /*
724  * If a holdout task is actually running, request an urgent quiescent
725  * state from its CPU.  This is unsynchronized, so migrations can cause
726  * the request to go to the wrong CPU.  Which is OK, all that will happen
727  * is that the CPU's next context switch will be a bit slower and next
728  * time around this task will generate another request.
729  */
730 void rcu_request_urgent_qs_task(struct task_struct *t)
731 {
732 	int cpu;
733 
734 	barrier();
735 	cpu = task_cpu(t);
736 	if (!task_curr(t))
737 		return; /* This task is not running on that CPU. */
738 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
739 }
740 
741 /*
742  * When trying to report a quiescent state on behalf of some other CPU,
743  * it is our responsibility to check for and handle potential overflow
744  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
745  * After all, the CPU might be in deep idle state, and thus executing no
746  * code whatsoever.
747  */
748 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
749 {
750 	raw_lockdep_assert_held_rcu_node(rnp);
751 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
752 			 rnp->gp_seq))
753 		WRITE_ONCE(rdp->gpwrap, true);
754 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
755 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
756 }
757 
758 /*
759  * Snapshot the specified CPU's RCU_WATCHING counter so that we can later
760  * credit them with an implicit quiescent state.  Return 1 if this CPU
761  * is in dynticks idle mode, which is an extended quiescent state.
762  */
763 static int rcu_watching_snap_save(struct rcu_data *rdp)
764 {
765 	/*
766 	 * Full ordering between remote CPU's post idle accesses and updater's
767 	 * accesses prior to current GP (and also the started GP sequence number)
768 	 * is enforced by rcu_seq_start() implicit barrier and even further by
769 	 * smp_mb__after_unlock_lock() barriers chained all the way throughout the
770 	 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp
771 	 * locking.
772 	 *
773 	 * Ordering between remote CPU's pre idle accesses and post grace period
774 	 * updater's accesses is enforced by the below acquire semantic.
775 	 */
776 	rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
777 	if (rcu_watching_snap_in_eqs(rdp->watching_snap)) {
778 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
779 		rcu_gpnum_ovf(rdp->mynode, rdp);
780 		return 1;
781 	}
782 	return 0;
783 }
784 
785 /*
786  * Returns positive if the specified CPU has passed through a quiescent state
787  * by virtue of being in or having passed through an dynticks idle state since
788  * the last call to rcu_watching_snap_save() for this same CPU, or by
789  * virtue of having been offline.
790  *
791  * Returns negative if the specified CPU needs a force resched.
792  *
793  * Returns zero otherwise.
794  */
795 static int rcu_watching_snap_recheck(struct rcu_data *rdp)
796 {
797 	unsigned long jtsq;
798 	int ret = 0;
799 	struct rcu_node *rnp = rdp->mynode;
800 
801 	/*
802 	 * If the CPU passed through or entered a dynticks idle phase with
803 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
804 	 * already acknowledged the request to pass through a quiescent
805 	 * state.  Either way, that CPU cannot possibly be in an RCU
806 	 * read-side critical section that started before the beginning
807 	 * of the current RCU grace period.
808 	 */
809 	if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) {
810 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
811 		rcu_gpnum_ovf(rnp, rdp);
812 		return 1;
813 	}
814 
815 	/*
816 	 * Complain if a CPU that is considered to be offline from RCU's
817 	 * perspective has not yet reported a quiescent state.  After all,
818 	 * the offline CPU should have reported a quiescent state during
819 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
820 	 * if it ran concurrently with either the CPU going offline or the
821 	 * last task on a leaf rcu_node structure exiting its RCU read-side
822 	 * critical section while all CPUs corresponding to that structure
823 	 * are offline.  This added warning detects bugs in any of these
824 	 * code paths.
825 	 *
826 	 * The rcu_node structure's ->lock is held here, which excludes
827 	 * the relevant portions the CPU-hotplug code, the grace-period
828 	 * initialization code, and the rcu_read_unlock() code paths.
829 	 *
830 	 * For more detail, please refer to the "Hotplug CPU" section
831 	 * of RCU's Requirements documentation.
832 	 */
833 	if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
834 		struct rcu_node *rnp1;
835 
836 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
837 			__func__, rnp->grplo, rnp->grphi, rnp->level,
838 			(long)rnp->gp_seq, (long)rnp->completedqs);
839 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
840 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
841 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
842 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
843 			__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
844 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
845 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
846 		return 1; /* Break things loose after complaining. */
847 	}
848 
849 	/*
850 	 * A CPU running for an extended time within the kernel can
851 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
852 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
853 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
854 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
855 	 * variable are safe because the assignments are repeated if this
856 	 * CPU failed to pass through a quiescent state.  This code
857 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
858 	 * is set way high.
859 	 */
860 	jtsq = READ_ONCE(jiffies_to_sched_qs);
861 	if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
862 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
863 	     time_after(jiffies, rcu_state.jiffies_resched) ||
864 	     rcu_state.cbovld)) {
865 		WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
866 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
867 		smp_store_release(&rdp->rcu_urgent_qs, true);
868 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
869 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
870 	}
871 
872 	/*
873 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
874 	 * The above code handles this, but only for straight cond_resched().
875 	 * And some in-kernel loops check need_resched() before calling
876 	 * cond_resched(), which defeats the above code for CPUs that are
877 	 * running in-kernel with scheduling-clock interrupts disabled.
878 	 * So hit them over the head with the resched_cpu() hammer!
879 	 */
880 	if (tick_nohz_full_cpu(rdp->cpu) &&
881 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
882 	     rcu_state.cbovld)) {
883 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
884 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
885 		ret = -1;
886 	}
887 
888 	/*
889 	 * If more than halfway to RCU CPU stall-warning time, invoke
890 	 * resched_cpu() more frequently to try to loosen things up a bit.
891 	 * Also check to see if the CPU is getting hammered with interrupts,
892 	 * but only once per grace period, just to keep the IPIs down to
893 	 * a dull roar.
894 	 */
895 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
896 		if (time_after(jiffies,
897 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
898 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
899 			ret = -1;
900 		}
901 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
902 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
903 		    (rnp->ffmask & rdp->grpmask)) {
904 			rdp->rcu_iw_pending = true;
905 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
906 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
907 		}
908 
909 		if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
910 			int cpu = rdp->cpu;
911 			struct rcu_snap_record *rsrp;
912 			struct kernel_cpustat *kcsp;
913 
914 			kcsp = &kcpustat_cpu(cpu);
915 
916 			rsrp = &rdp->snap_record;
917 			rsrp->cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
918 			rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
919 			rsrp->cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
920 			rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
921 			rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
922 			rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
923 			rsrp->jiffies = jiffies;
924 			rsrp->gp_seq = rdp->gp_seq;
925 		}
926 	}
927 
928 	return ret;
929 }
930 
931 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
932 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
933 			      unsigned long gp_seq_req, const char *s)
934 {
935 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
936 				      gp_seq_req, rnp->level,
937 				      rnp->grplo, rnp->grphi, s);
938 }
939 
940 /*
941  * rcu_start_this_gp - Request the start of a particular grace period
942  * @rnp_start: The leaf node of the CPU from which to start.
943  * @rdp: The rcu_data corresponding to the CPU from which to start.
944  * @gp_seq_req: The gp_seq of the grace period to start.
945  *
946  * Start the specified grace period, as needed to handle newly arrived
947  * callbacks.  The required future grace periods are recorded in each
948  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
949  * is reason to awaken the grace-period kthread.
950  *
951  * The caller must hold the specified rcu_node structure's ->lock, which
952  * is why the caller is responsible for waking the grace-period kthread.
953  *
954  * Returns true if the GP thread needs to be awakened else false.
955  */
956 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
957 			      unsigned long gp_seq_req)
958 {
959 	bool ret = false;
960 	struct rcu_node *rnp;
961 
962 	/*
963 	 * Use funnel locking to either acquire the root rcu_node
964 	 * structure's lock or bail out if the need for this grace period
965 	 * has already been recorded -- or if that grace period has in
966 	 * fact already started.  If there is already a grace period in
967 	 * progress in a non-leaf node, no recording is needed because the
968 	 * end of the grace period will scan the leaf rcu_node structures.
969 	 * Note that rnp_start->lock must not be released.
970 	 */
971 	raw_lockdep_assert_held_rcu_node(rnp_start);
972 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
973 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
974 		if (rnp != rnp_start)
975 			raw_spin_lock_rcu_node(rnp);
976 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
977 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
978 		    (rnp != rnp_start &&
979 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
980 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
981 					  TPS("Prestarted"));
982 			goto unlock_out;
983 		}
984 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
985 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
986 			/*
987 			 * We just marked the leaf or internal node, and a
988 			 * grace period is in progress, which means that
989 			 * rcu_gp_cleanup() will see the marking.  Bail to
990 			 * reduce contention.
991 			 */
992 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
993 					  TPS("Startedleaf"));
994 			goto unlock_out;
995 		}
996 		if (rnp != rnp_start && rnp->parent != NULL)
997 			raw_spin_unlock_rcu_node(rnp);
998 		if (!rnp->parent)
999 			break;  /* At root, and perhaps also leaf. */
1000 	}
1001 
1002 	/* If GP already in progress, just leave, otherwise start one. */
1003 	if (rcu_gp_in_progress()) {
1004 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1005 		goto unlock_out;
1006 	}
1007 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1008 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1009 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1010 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1011 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1012 		goto unlock_out;
1013 	}
1014 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1015 	ret = true;  /* Caller must wake GP kthread. */
1016 unlock_out:
1017 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1018 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1019 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1020 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1021 	}
1022 	if (rnp != rnp_start)
1023 		raw_spin_unlock_rcu_node(rnp);
1024 	return ret;
1025 }
1026 
1027 /*
1028  * Clean up any old requests for the just-ended grace period.  Also return
1029  * whether any additional grace periods have been requested.
1030  */
1031 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1032 {
1033 	bool needmore;
1034 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1035 
1036 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1037 	if (!needmore)
1038 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1039 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1040 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1041 	return needmore;
1042 }
1043 
1044 static void swake_up_one_online_ipi(void *arg)
1045 {
1046 	struct swait_queue_head *wqh = arg;
1047 
1048 	swake_up_one(wqh);
1049 }
1050 
1051 static void swake_up_one_online(struct swait_queue_head *wqh)
1052 {
1053 	int cpu = get_cpu();
1054 
1055 	/*
1056 	 * If called from rcutree_report_cpu_starting(), wake up
1057 	 * is dangerous that late in the CPU-down hotplug process. The
1058 	 * scheduler might queue an ignored hrtimer. Defer the wake up
1059 	 * to an online CPU instead.
1060 	 */
1061 	if (unlikely(cpu_is_offline(cpu))) {
1062 		int target;
1063 
1064 		target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
1065 					 cpu_online_mask);
1066 
1067 		smp_call_function_single(target, swake_up_one_online_ipi,
1068 					 wqh, 0);
1069 		put_cpu();
1070 	} else {
1071 		put_cpu();
1072 		swake_up_one(wqh);
1073 	}
1074 }
1075 
1076 /*
1077  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1078  * interrupt or softirq handler, in which case we just might immediately
1079  * sleep upon return, resulting in a grace-period hang), and don't bother
1080  * awakening when there is nothing for the grace-period kthread to do
1081  * (as in several CPUs raced to awaken, we lost), and finally don't try
1082  * to awaken a kthread that has not yet been created.  If all those checks
1083  * are passed, track some debug information and awaken.
1084  *
1085  * So why do the self-wakeup when in an interrupt or softirq handler
1086  * in the grace-period kthread's context?  Because the kthread might have
1087  * been interrupted just as it was going to sleep, and just after the final
1088  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1089  * is required, and is therefore supplied.
1090  */
1091 static void rcu_gp_kthread_wake(void)
1092 {
1093 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1094 
1095 	if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1096 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1097 		return;
1098 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1099 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1100 	swake_up_one_online(&rcu_state.gp_wq);
1101 }
1102 
1103 /*
1104  * If there is room, assign a ->gp_seq number to any callbacks on this
1105  * CPU that have not already been assigned.  Also accelerate any callbacks
1106  * that were previously assigned a ->gp_seq number that has since proven
1107  * to be too conservative, which can happen if callbacks get assigned a
1108  * ->gp_seq number while RCU is idle, but with reference to a non-root
1109  * rcu_node structure.  This function is idempotent, so it does not hurt
1110  * to call it repeatedly.  Returns an flag saying that we should awaken
1111  * the RCU grace-period kthread.
1112  *
1113  * The caller must hold rnp->lock with interrupts disabled.
1114  */
1115 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1116 {
1117 	unsigned long gp_seq_req;
1118 	bool ret = false;
1119 
1120 	rcu_lockdep_assert_cblist_protected(rdp);
1121 	raw_lockdep_assert_held_rcu_node(rnp);
1122 
1123 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1124 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1125 		return false;
1126 
1127 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1128 
1129 	/*
1130 	 * Callbacks are often registered with incomplete grace-period
1131 	 * information.  Something about the fact that getting exact
1132 	 * information requires acquiring a global lock...  RCU therefore
1133 	 * makes a conservative estimate of the grace period number at which
1134 	 * a given callback will become ready to invoke.	The following
1135 	 * code checks this estimate and improves it when possible, thus
1136 	 * accelerating callback invocation to an earlier grace-period
1137 	 * number.
1138 	 */
1139 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1140 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1141 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1142 
1143 	/* Trace depending on how much we were able to accelerate. */
1144 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1145 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1146 	else
1147 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1148 
1149 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1150 
1151 	return ret;
1152 }
1153 
1154 /*
1155  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1156  * rcu_node structure's ->lock be held.  It consults the cached value
1157  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1158  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1159  * while holding the leaf rcu_node structure's ->lock.
1160  */
1161 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1162 					struct rcu_data *rdp)
1163 {
1164 	unsigned long c;
1165 	bool needwake;
1166 
1167 	rcu_lockdep_assert_cblist_protected(rdp);
1168 	c = rcu_seq_snap(&rcu_state.gp_seq);
1169 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1170 		/* Old request still live, so mark recent callbacks. */
1171 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1172 		return;
1173 	}
1174 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1175 	needwake = rcu_accelerate_cbs(rnp, rdp);
1176 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1177 	if (needwake)
1178 		rcu_gp_kthread_wake();
1179 }
1180 
1181 /*
1182  * Move any callbacks whose grace period has completed to the
1183  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1184  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1185  * sublist.  This function is idempotent, so it does not hurt to
1186  * invoke it repeatedly.  As long as it is not invoked -too- often...
1187  * Returns true if the RCU grace-period kthread needs to be awakened.
1188  *
1189  * The caller must hold rnp->lock with interrupts disabled.
1190  */
1191 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1192 {
1193 	rcu_lockdep_assert_cblist_protected(rdp);
1194 	raw_lockdep_assert_held_rcu_node(rnp);
1195 
1196 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1197 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1198 		return false;
1199 
1200 	/*
1201 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1202 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1203 	 */
1204 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1205 
1206 	/* Classify any remaining callbacks. */
1207 	return rcu_accelerate_cbs(rnp, rdp);
1208 }
1209 
1210 /*
1211  * Move and classify callbacks, but only if doing so won't require
1212  * that the RCU grace-period kthread be awakened.
1213  */
1214 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1215 						  struct rcu_data *rdp)
1216 {
1217 	rcu_lockdep_assert_cblist_protected(rdp);
1218 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1219 		return;
1220 	// The grace period cannot end while we hold the rcu_node lock.
1221 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1222 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1223 	raw_spin_unlock_rcu_node(rnp);
1224 }
1225 
1226 /*
1227  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1228  * quiescent state.  This is intended to be invoked when the CPU notices
1229  * a new grace period.
1230  */
1231 static void rcu_strict_gp_check_qs(void)
1232 {
1233 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1234 		rcu_read_lock();
1235 		rcu_read_unlock();
1236 	}
1237 }
1238 
1239 /*
1240  * Update CPU-local rcu_data state to record the beginnings and ends of
1241  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1242  * structure corresponding to the current CPU, and must have irqs disabled.
1243  * Returns true if the grace-period kthread needs to be awakened.
1244  */
1245 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1246 {
1247 	bool ret = false;
1248 	bool need_qs;
1249 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1250 
1251 	raw_lockdep_assert_held_rcu_node(rnp);
1252 
1253 	if (rdp->gp_seq == rnp->gp_seq)
1254 		return false; /* Nothing to do. */
1255 
1256 	/* Handle the ends of any preceding grace periods first. */
1257 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1258 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1259 		if (!offloaded)
1260 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1261 		rdp->core_needs_qs = false;
1262 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1263 	} else {
1264 		if (!offloaded)
1265 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1266 		if (rdp->core_needs_qs)
1267 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1268 	}
1269 
1270 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1271 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1272 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1273 		/*
1274 		 * If the current grace period is waiting for this CPU,
1275 		 * set up to detect a quiescent state, otherwise don't
1276 		 * go looking for one.
1277 		 */
1278 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1279 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1280 		rdp->cpu_no_qs.b.norm = need_qs;
1281 		rdp->core_needs_qs = need_qs;
1282 		zero_cpu_stall_ticks(rdp);
1283 	}
1284 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1285 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1286 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1287 	if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1288 		WRITE_ONCE(rdp->last_sched_clock, jiffies);
1289 	WRITE_ONCE(rdp->gpwrap, false);
1290 	rcu_gpnum_ovf(rnp, rdp);
1291 	return ret;
1292 }
1293 
1294 static void note_gp_changes(struct rcu_data *rdp)
1295 {
1296 	unsigned long flags;
1297 	bool needwake;
1298 	struct rcu_node *rnp;
1299 
1300 	local_irq_save(flags);
1301 	rnp = rdp->mynode;
1302 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1303 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1304 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1305 		local_irq_restore(flags);
1306 		return;
1307 	}
1308 	needwake = __note_gp_changes(rnp, rdp);
1309 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1310 	rcu_strict_gp_check_qs();
1311 	if (needwake)
1312 		rcu_gp_kthread_wake();
1313 }
1314 
1315 static atomic_t *rcu_gp_slow_suppress;
1316 
1317 /* Register a counter to suppress debugging grace-period delays. */
1318 void rcu_gp_slow_register(atomic_t *rgssp)
1319 {
1320 	WARN_ON_ONCE(rcu_gp_slow_suppress);
1321 
1322 	WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1323 }
1324 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1325 
1326 /* Unregister a counter, with NULL for not caring which. */
1327 void rcu_gp_slow_unregister(atomic_t *rgssp)
1328 {
1329 	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
1330 
1331 	WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1332 }
1333 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1334 
1335 static bool rcu_gp_slow_is_suppressed(void)
1336 {
1337 	atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1338 
1339 	return rgssp && atomic_read(rgssp);
1340 }
1341 
1342 static void rcu_gp_slow(int delay)
1343 {
1344 	if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1345 	    !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1346 		schedule_timeout_idle(delay);
1347 }
1348 
1349 static unsigned long sleep_duration;
1350 
1351 /* Allow rcutorture to stall the grace-period kthread. */
1352 void rcu_gp_set_torture_wait(int duration)
1353 {
1354 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1355 		WRITE_ONCE(sleep_duration, duration);
1356 }
1357 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1358 
1359 /* Actually implement the aforementioned wait. */
1360 static void rcu_gp_torture_wait(void)
1361 {
1362 	unsigned long duration;
1363 
1364 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1365 		return;
1366 	duration = xchg(&sleep_duration, 0UL);
1367 	if (duration > 0) {
1368 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1369 		schedule_timeout_idle(duration);
1370 		pr_alert("%s: Wait complete\n", __func__);
1371 	}
1372 }
1373 
1374 /*
1375  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1376  * processing.
1377  */
1378 static void rcu_strict_gp_boundary(void *unused)
1379 {
1380 	invoke_rcu_core();
1381 }
1382 
1383 // Make the polled API aware of the beginning of a grace period.
1384 static void rcu_poll_gp_seq_start(unsigned long *snap)
1385 {
1386 	struct rcu_node *rnp = rcu_get_root();
1387 
1388 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1389 		raw_lockdep_assert_held_rcu_node(rnp);
1390 
1391 	// If RCU was idle, note beginning of GP.
1392 	if (!rcu_seq_state(rcu_state.gp_seq_polled))
1393 		rcu_seq_start(&rcu_state.gp_seq_polled);
1394 
1395 	// Either way, record current state.
1396 	*snap = rcu_state.gp_seq_polled;
1397 }
1398 
1399 // Make the polled API aware of the end of a grace period.
1400 static void rcu_poll_gp_seq_end(unsigned long *snap)
1401 {
1402 	struct rcu_node *rnp = rcu_get_root();
1403 
1404 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1405 		raw_lockdep_assert_held_rcu_node(rnp);
1406 
1407 	// If the previously noted GP is still in effect, record the
1408 	// end of that GP.  Either way, zero counter to avoid counter-wrap
1409 	// problems.
1410 	if (*snap && *snap == rcu_state.gp_seq_polled) {
1411 		rcu_seq_end(&rcu_state.gp_seq_polled);
1412 		rcu_state.gp_seq_polled_snap = 0;
1413 		rcu_state.gp_seq_polled_exp_snap = 0;
1414 	} else {
1415 		*snap = 0;
1416 	}
1417 }
1418 
1419 // Make the polled API aware of the beginning of a grace period, but
1420 // where caller does not hold the root rcu_node structure's lock.
1421 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1422 {
1423 	unsigned long flags;
1424 	struct rcu_node *rnp = rcu_get_root();
1425 
1426 	if (rcu_init_invoked()) {
1427 		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1428 			lockdep_assert_irqs_enabled();
1429 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1430 	}
1431 	rcu_poll_gp_seq_start(snap);
1432 	if (rcu_init_invoked())
1433 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1434 }
1435 
1436 // Make the polled API aware of the end of a grace period, but where
1437 // caller does not hold the root rcu_node structure's lock.
1438 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1439 {
1440 	unsigned long flags;
1441 	struct rcu_node *rnp = rcu_get_root();
1442 
1443 	if (rcu_init_invoked()) {
1444 		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1445 			lockdep_assert_irqs_enabled();
1446 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1447 	}
1448 	rcu_poll_gp_seq_end(snap);
1449 	if (rcu_init_invoked())
1450 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1451 }
1452 
1453 /*
1454  * There is a single llist, which is used for handling
1455  * synchronize_rcu() users' enqueued rcu_synchronize nodes.
1456  * Within this llist, there are two tail pointers:
1457  *
1458  * wait tail: Tracks the set of nodes, which need to
1459  *            wait for the current GP to complete.
1460  * done tail: Tracks the set of nodes, for which grace
1461  *            period has elapsed. These nodes processing
1462  *            will be done as part of the cleanup work
1463  *            execution by a kworker.
1464  *
1465  * At every grace period init, a new wait node is added
1466  * to the llist. This wait node is used as wait tail
1467  * for this new grace period. Given that there are a fixed
1468  * number of wait nodes, if all wait nodes are in use
1469  * (which can happen when kworker callback processing
1470  * is delayed) and additional grace period is requested.
1471  * This means, a system is slow in processing callbacks.
1472  *
1473  * TODO: If a slow processing is detected, a first node
1474  * in the llist should be used as a wait-tail for this
1475  * grace period, therefore users which should wait due
1476  * to a slow process are handled by _this_ grace period
1477  * and not next.
1478  *
1479  * Below is an illustration of how the done and wait
1480  * tail pointers move from one set of rcu_synchronize nodes
1481  * to the other, as grace periods start and finish and
1482  * nodes are processed by kworker.
1483  *
1484  *
1485  * a. Initial llist callbacks list:
1486  *
1487  * +----------+           +--------+          +-------+
1488  * |          |           |        |          |       |
1489  * |   head   |---------> |   cb2  |--------->| cb1   |
1490  * |          |           |        |          |       |
1491  * +----------+           +--------+          +-------+
1492  *
1493  *
1494  *
1495  * b. New GP1 Start:
1496  *
1497  *                    WAIT TAIL
1498  *                      |
1499  *                      |
1500  *                      v
1501  * +----------+     +--------+      +--------+        +-------+
1502  * |          |     |        |      |        |        |       |
1503  * |   head   ------> wait   |------>   cb2  |------> |  cb1  |
1504  * |          |     | head1  |      |        |        |       |
1505  * +----------+     +--------+      +--------+        +-------+
1506  *
1507  *
1508  *
1509  * c. GP completion:
1510  *
1511  * WAIT_TAIL == DONE_TAIL
1512  *
1513  *                   DONE TAIL
1514  *                     |
1515  *                     |
1516  *                     v
1517  * +----------+     +--------+      +--------+        +-------+
1518  * |          |     |        |      |        |        |       |
1519  * |   head   ------> wait   |------>   cb2  |------> |  cb1  |
1520  * |          |     | head1  |      |        |        |       |
1521  * +----------+     +--------+      +--------+        +-------+
1522  *
1523  *
1524  *
1525  * d. New callbacks and GP2 start:
1526  *
1527  *                    WAIT TAIL                          DONE TAIL
1528  *                      |                                 |
1529  *                      |                                 |
1530  *                      v                                 v
1531  * +----------+     +------+    +------+    +------+    +-----+    +-----+    +-----+
1532  * |          |     |      |    |      |    |      |    |     |    |     |    |     |
1533  * |   head   ------> wait |--->|  cb4 |--->| cb3  |--->|wait |--->| cb2 |--->| cb1 |
1534  * |          |     | head2|    |      |    |      |    |head1|    |     |    |     |
1535  * +----------+     +------+    +------+    +------+    +-----+    +-----+    +-----+
1536  *
1537  *
1538  *
1539  * e. GP2 completion:
1540  *
1541  * WAIT_TAIL == DONE_TAIL
1542  *                   DONE TAIL
1543  *                      |
1544  *                      |
1545  *                      v
1546  * +----------+     +------+    +------+    +------+    +-----+    +-----+    +-----+
1547  * |          |     |      |    |      |    |      |    |     |    |     |    |     |
1548  * |   head   ------> wait |--->|  cb4 |--->| cb3  |--->|wait |--->| cb2 |--->| cb1 |
1549  * |          |     | head2|    |      |    |      |    |head1|    |     |    |     |
1550  * +----------+     +------+    +------+    +------+    +-----+    +-----+    +-----+
1551  *
1552  *
1553  * While the llist state transitions from d to e, a kworker
1554  * can start executing rcu_sr_normal_gp_cleanup_work() and
1555  * can observe either the old done tail (@c) or the new
1556  * done tail (@e). So, done tail updates and reads need
1557  * to use the rel-acq semantics. If the concurrent kworker
1558  * observes the old done tail, the newly queued work
1559  * execution will process the updated done tail. If the
1560  * concurrent kworker observes the new done tail, then
1561  * the newly queued work will skip processing the done
1562  * tail, as workqueue semantics guarantees that the new
1563  * work is executed only after the previous one completes.
1564  *
1565  * f. kworker callbacks processing complete:
1566  *
1567  *
1568  *                   DONE TAIL
1569  *                     |
1570  *                     |
1571  *                     v
1572  * +----------+     +--------+
1573  * |          |     |        |
1574  * |   head   ------> wait   |
1575  * |          |     | head2  |
1576  * +----------+     +--------+
1577  *
1578  */
1579 static bool rcu_sr_is_wait_head(struct llist_node *node)
1580 {
1581 	return &(rcu_state.srs_wait_nodes)[0].node <= node &&
1582 		node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
1583 }
1584 
1585 static struct llist_node *rcu_sr_get_wait_head(void)
1586 {
1587 	struct sr_wait_node *sr_wn;
1588 	int i;
1589 
1590 	for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) {
1591 		sr_wn = &(rcu_state.srs_wait_nodes)[i];
1592 
1593 		if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1))
1594 			return &sr_wn->node;
1595 	}
1596 
1597 	return NULL;
1598 }
1599 
1600 static void rcu_sr_put_wait_head(struct llist_node *node)
1601 {
1602 	struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node);
1603 
1604 	atomic_set_release(&sr_wn->inuse, 0);
1605 }
1606 
1607 /* Disabled by default. */
1608 static int rcu_normal_wake_from_gp;
1609 module_param(rcu_normal_wake_from_gp, int, 0644);
1610 static struct workqueue_struct *sync_wq;
1611 
1612 static void rcu_sr_normal_complete(struct llist_node *node)
1613 {
1614 	struct rcu_synchronize *rs = container_of(
1615 		(struct rcu_head *) node, struct rcu_synchronize, head);
1616 	unsigned long oldstate = (unsigned long) rs->head.func;
1617 
1618 	WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
1619 		!poll_state_synchronize_rcu(oldstate),
1620 		"A full grace period is not passed yet: %lu",
1621 		rcu_seq_diff(get_state_synchronize_rcu(), oldstate));
1622 
1623 	/* Finally. */
1624 	complete(&rs->completion);
1625 }
1626 
1627 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
1628 {
1629 	struct llist_node *done, *rcu, *next, *head;
1630 
1631 	/*
1632 	 * This work execution can potentially execute
1633 	 * while a new done tail is being updated by
1634 	 * grace period kthread in rcu_sr_normal_gp_cleanup().
1635 	 * So, read and updates of done tail need to
1636 	 * follow acq-rel semantics.
1637 	 *
1638 	 * Given that wq semantics guarantees that a single work
1639 	 * cannot execute concurrently by multiple kworkers,
1640 	 * the done tail list manipulations are protected here.
1641 	 */
1642 	done = smp_load_acquire(&rcu_state.srs_done_tail);
1643 	if (WARN_ON_ONCE(!done))
1644 		return;
1645 
1646 	WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
1647 	head = done->next;
1648 	done->next = NULL;
1649 
1650 	/*
1651 	 * The dummy node, which is pointed to by the
1652 	 * done tail which is acq-read above is not removed
1653 	 * here.  This allows lockless additions of new
1654 	 * rcu_synchronize nodes in rcu_sr_normal_add_req(),
1655 	 * while the cleanup work executes. The dummy
1656 	 * nodes is removed, in next round of cleanup
1657 	 * work execution.
1658 	 */
1659 	llist_for_each_safe(rcu, next, head) {
1660 		if (!rcu_sr_is_wait_head(rcu)) {
1661 			rcu_sr_normal_complete(rcu);
1662 			continue;
1663 		}
1664 
1665 		rcu_sr_put_wait_head(rcu);
1666 	}
1667 
1668 	/* Order list manipulations with atomic access. */
1669 	atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
1670 }
1671 
1672 /*
1673  * Helper function for rcu_gp_cleanup().
1674  */
1675 static void rcu_sr_normal_gp_cleanup(void)
1676 {
1677 	struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
1678 	int done = 0;
1679 
1680 	wait_tail = rcu_state.srs_wait_tail;
1681 	if (wait_tail == NULL)
1682 		return;
1683 
1684 	rcu_state.srs_wait_tail = NULL;
1685 	ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1686 	WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail));
1687 
1688 	/*
1689 	 * Process (a) and (d) cases. See an illustration.
1690 	 */
1691 	llist_for_each_safe(rcu, next, wait_tail->next) {
1692 		if (rcu_sr_is_wait_head(rcu))
1693 			break;
1694 
1695 		rcu_sr_normal_complete(rcu);
1696 		// It can be last, update a next on this step.
1697 		wait_tail->next = next;
1698 
1699 		if (++done == SR_MAX_USERS_WAKE_FROM_GP)
1700 			break;
1701 	}
1702 
1703 	/*
1704 	 * Fast path, no more users to process except putting the second last
1705 	 * wait head if no inflight-workers. If there are in-flight workers,
1706 	 * they will remove the last wait head.
1707 	 *
1708 	 * Note that the ACQUIRE orders atomic access with list manipulation.
1709 	 */
1710 	if (wait_tail->next && wait_tail->next->next == NULL &&
1711 	    rcu_sr_is_wait_head(wait_tail->next) &&
1712 	    !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
1713 		rcu_sr_put_wait_head(wait_tail->next);
1714 		wait_tail->next = NULL;
1715 	}
1716 
1717 	/* Concurrent sr_normal_gp_cleanup work might observe this update. */
1718 	ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
1719 	smp_store_release(&rcu_state.srs_done_tail, wait_tail);
1720 
1721 	/*
1722 	 * We schedule a work in order to perform a final processing
1723 	 * of outstanding users(if still left) and releasing wait-heads
1724 	 * added by rcu_sr_normal_gp_init() call.
1725 	 */
1726 	if (wait_tail->next) {
1727 		atomic_inc(&rcu_state.srs_cleanups_pending);
1728 		if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
1729 			atomic_dec(&rcu_state.srs_cleanups_pending);
1730 	}
1731 }
1732 
1733 /*
1734  * Helper function for rcu_gp_init().
1735  */
1736 static bool rcu_sr_normal_gp_init(void)
1737 {
1738 	struct llist_node *first;
1739 	struct llist_node *wait_head;
1740 	bool start_new_poll = false;
1741 
1742 	first = READ_ONCE(rcu_state.srs_next.first);
1743 	if (!first || rcu_sr_is_wait_head(first))
1744 		return start_new_poll;
1745 
1746 	wait_head = rcu_sr_get_wait_head();
1747 	if (!wait_head) {
1748 		// Kick another GP to retry.
1749 		start_new_poll = true;
1750 		return start_new_poll;
1751 	}
1752 
1753 	/* Inject a wait-dummy-node. */
1754 	llist_add(wait_head, &rcu_state.srs_next);
1755 
1756 	/*
1757 	 * A waiting list of rcu_synchronize nodes should be empty on
1758 	 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(),
1759 	 * rolls it over. If not, it is a BUG, warn a user.
1760 	 */
1761 	WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL);
1762 	rcu_state.srs_wait_tail = wait_head;
1763 	ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1764 
1765 	return start_new_poll;
1766 }
1767 
1768 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs)
1769 {
1770 	llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
1771 }
1772 
1773 /*
1774  * Initialize a new grace period.  Return false if no grace period required.
1775  */
1776 static noinline_for_stack bool rcu_gp_init(void)
1777 {
1778 	unsigned long flags;
1779 	unsigned long oldmask;
1780 	unsigned long mask;
1781 	struct rcu_data *rdp;
1782 	struct rcu_node *rnp = rcu_get_root();
1783 	bool start_new_poll;
1784 
1785 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1786 	raw_spin_lock_irq_rcu_node(rnp);
1787 	if (!rcu_state.gp_flags) {
1788 		/* Spurious wakeup, tell caller to go back to sleep.  */
1789 		raw_spin_unlock_irq_rcu_node(rnp);
1790 		return false;
1791 	}
1792 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1793 
1794 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1795 		/*
1796 		 * Grace period already in progress, don't start another.
1797 		 * Not supposed to be able to happen.
1798 		 */
1799 		raw_spin_unlock_irq_rcu_node(rnp);
1800 		return false;
1801 	}
1802 
1803 	/* Advance to a new grace period and initialize state. */
1804 	record_gp_stall_check_time();
1805 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1806 	rcu_seq_start(&rcu_state.gp_seq);
1807 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1808 	start_new_poll = rcu_sr_normal_gp_init();
1809 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1810 	rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1811 	raw_spin_unlock_irq_rcu_node(rnp);
1812 
1813 	/*
1814 	 * The "start_new_poll" is set to true, only when this GP is not able
1815 	 * to handle anything and there are outstanding users. It happens when
1816 	 * the rcu_sr_normal_gp_init() function was not able to insert a dummy
1817 	 * separator to the llist, because there were no left any dummy-nodes.
1818 	 *
1819 	 * Number of dummy-nodes is fixed, it could be that we are run out of
1820 	 * them, if so we start a new pool request to repeat a try. It is rare
1821 	 * and it means that a system is doing a slow processing of callbacks.
1822 	 */
1823 	if (start_new_poll)
1824 		(void) start_poll_synchronize_rcu();
1825 
1826 	/*
1827 	 * Apply per-leaf buffered online and offline operations to
1828 	 * the rcu_node tree. Note that this new grace period need not
1829 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1830 	 * offlining path, when combined with checks in this function,
1831 	 * will handle CPUs that are currently going offline or that will
1832 	 * go offline later.  Please also refer to "Hotplug CPU" section
1833 	 * of RCU's Requirements documentation.
1834 	 */
1835 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1836 	/* Exclude CPU hotplug operations. */
1837 	rcu_for_each_leaf_node(rnp) {
1838 		local_irq_disable();
1839 		arch_spin_lock(&rcu_state.ofl_lock);
1840 		raw_spin_lock_rcu_node(rnp);
1841 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1842 		    !rnp->wait_blkd_tasks) {
1843 			/* Nothing to do on this leaf rcu_node structure. */
1844 			raw_spin_unlock_rcu_node(rnp);
1845 			arch_spin_unlock(&rcu_state.ofl_lock);
1846 			local_irq_enable();
1847 			continue;
1848 		}
1849 
1850 		/* Record old state, apply changes to ->qsmaskinit field. */
1851 		oldmask = rnp->qsmaskinit;
1852 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1853 
1854 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1855 		if (!oldmask != !rnp->qsmaskinit) {
1856 			if (!oldmask) { /* First online CPU for rcu_node. */
1857 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1858 					rcu_init_new_rnp(rnp);
1859 			} else if (rcu_preempt_has_tasks(rnp)) {
1860 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1861 			} else { /* Last offline CPU and can propagate. */
1862 				rcu_cleanup_dead_rnp(rnp);
1863 			}
1864 		}
1865 
1866 		/*
1867 		 * If all waited-on tasks from prior grace period are
1868 		 * done, and if all this rcu_node structure's CPUs are
1869 		 * still offline, propagate up the rcu_node tree and
1870 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1871 		 * rcu_node structure's CPUs has since come back online,
1872 		 * simply clear ->wait_blkd_tasks.
1873 		 */
1874 		if (rnp->wait_blkd_tasks &&
1875 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1876 			rnp->wait_blkd_tasks = false;
1877 			if (!rnp->qsmaskinit)
1878 				rcu_cleanup_dead_rnp(rnp);
1879 		}
1880 
1881 		raw_spin_unlock_rcu_node(rnp);
1882 		arch_spin_unlock(&rcu_state.ofl_lock);
1883 		local_irq_enable();
1884 	}
1885 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1886 
1887 	/*
1888 	 * Set the quiescent-state-needed bits in all the rcu_node
1889 	 * structures for all currently online CPUs in breadth-first
1890 	 * order, starting from the root rcu_node structure, relying on the
1891 	 * layout of the tree within the rcu_state.node[] array.  Note that
1892 	 * other CPUs will access only the leaves of the hierarchy, thus
1893 	 * seeing that no grace period is in progress, at least until the
1894 	 * corresponding leaf node has been initialized.
1895 	 *
1896 	 * The grace period cannot complete until the initialization
1897 	 * process finishes, because this kthread handles both.
1898 	 */
1899 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1900 	rcu_for_each_node_breadth_first(rnp) {
1901 		rcu_gp_slow(gp_init_delay);
1902 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1903 		rdp = this_cpu_ptr(&rcu_data);
1904 		rcu_preempt_check_blocked_tasks(rnp);
1905 		rnp->qsmask = rnp->qsmaskinit;
1906 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1907 		if (rnp == rdp->mynode)
1908 			(void)__note_gp_changes(rnp, rdp);
1909 		rcu_preempt_boost_start_gp(rnp);
1910 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1911 					    rnp->level, rnp->grplo,
1912 					    rnp->grphi, rnp->qsmask);
1913 		/* Quiescent states for tasks on any now-offline CPUs. */
1914 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1915 		rnp->rcu_gp_init_mask = mask;
1916 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1917 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1918 		else
1919 			raw_spin_unlock_irq_rcu_node(rnp);
1920 		cond_resched_tasks_rcu_qs();
1921 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1922 	}
1923 
1924 	// If strict, make all CPUs aware of new grace period.
1925 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1926 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1927 
1928 	return true;
1929 }
1930 
1931 /*
1932  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1933  * time.
1934  */
1935 static bool rcu_gp_fqs_check_wake(int *gfp)
1936 {
1937 	struct rcu_node *rnp = rcu_get_root();
1938 
1939 	// If under overload conditions, force an immediate FQS scan.
1940 	if (*gfp & RCU_GP_FLAG_OVLD)
1941 		return true;
1942 
1943 	// Someone like call_rcu() requested a force-quiescent-state scan.
1944 	*gfp = READ_ONCE(rcu_state.gp_flags);
1945 	if (*gfp & RCU_GP_FLAG_FQS)
1946 		return true;
1947 
1948 	// The current grace period has completed.
1949 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1950 		return true;
1951 
1952 	return false;
1953 }
1954 
1955 /*
1956  * Do one round of quiescent-state forcing.
1957  */
1958 static void rcu_gp_fqs(bool first_time)
1959 {
1960 	int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1961 	struct rcu_node *rnp = rcu_get_root();
1962 
1963 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1964 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1965 
1966 	WARN_ON_ONCE(nr_fqs > 3);
1967 	/* Only countdown nr_fqs for stall purposes if jiffies moves. */
1968 	if (nr_fqs) {
1969 		if (nr_fqs == 1) {
1970 			WRITE_ONCE(rcu_state.jiffies_stall,
1971 				   jiffies + rcu_jiffies_till_stall_check());
1972 		}
1973 		WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1974 	}
1975 
1976 	if (first_time) {
1977 		/* Collect dyntick-idle snapshots. */
1978 		force_qs_rnp(rcu_watching_snap_save);
1979 	} else {
1980 		/* Handle dyntick-idle and offline CPUs. */
1981 		force_qs_rnp(rcu_watching_snap_recheck);
1982 	}
1983 	/* Clear flag to prevent immediate re-entry. */
1984 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1985 		raw_spin_lock_irq_rcu_node(rnp);
1986 		WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS);
1987 		raw_spin_unlock_irq_rcu_node(rnp);
1988 	}
1989 }
1990 
1991 /*
1992  * Loop doing repeated quiescent-state forcing until the grace period ends.
1993  */
1994 static noinline_for_stack void rcu_gp_fqs_loop(void)
1995 {
1996 	bool first_gp_fqs = true;
1997 	int gf = 0;
1998 	unsigned long j;
1999 	int ret;
2000 	struct rcu_node *rnp = rcu_get_root();
2001 
2002 	j = READ_ONCE(jiffies_till_first_fqs);
2003 	if (rcu_state.cbovld)
2004 		gf = RCU_GP_FLAG_OVLD;
2005 	ret = 0;
2006 	for (;;) {
2007 		if (rcu_state.cbovld) {
2008 			j = (j + 2) / 3;
2009 			if (j <= 0)
2010 				j = 1;
2011 		}
2012 		if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
2013 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
2014 			/*
2015 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
2016 			 * update; required for stall checks.
2017 			 */
2018 			smp_wmb();
2019 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
2020 				   jiffies + (j ? 3 * j : 2));
2021 		}
2022 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2023 				       TPS("fqswait"));
2024 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
2025 		(void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
2026 				 rcu_gp_fqs_check_wake(&gf), j);
2027 		rcu_gp_torture_wait();
2028 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2029 		/* Locking provides needed memory barriers. */
2030 		/*
2031 		 * Exit the loop if the root rcu_node structure indicates that the grace period
2032 		 * has ended, leave the loop.  The rcu_preempt_blocked_readers_cgp(rnp) check
2033 		 * is required only for single-node rcu_node trees because readers blocking
2034 		 * the current grace period are queued only on leaf rcu_node structures.
2035 		 * For multi-node trees, checking the root node's ->qsmask suffices, because a
2036 		 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
2037 		 * the corresponding leaf nodes have passed through their quiescent state.
2038 		 */
2039 		if (!READ_ONCE(rnp->qsmask) &&
2040 		    !rcu_preempt_blocked_readers_cgp(rnp))
2041 			break;
2042 		/* If time for quiescent-state forcing, do it. */
2043 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2044 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
2045 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2046 					       TPS("fqsstart"));
2047 			rcu_gp_fqs(first_gp_fqs);
2048 			gf = 0;
2049 			if (first_gp_fqs) {
2050 				first_gp_fqs = false;
2051 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2052 			}
2053 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2054 					       TPS("fqsend"));
2055 			cond_resched_tasks_rcu_qs();
2056 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2057 			ret = 0; /* Force full wait till next FQS. */
2058 			j = READ_ONCE(jiffies_till_next_fqs);
2059 		} else {
2060 			/* Deal with stray signal. */
2061 			cond_resched_tasks_rcu_qs();
2062 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2063 			WARN_ON(signal_pending(current));
2064 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2065 					       TPS("fqswaitsig"));
2066 			ret = 1; /* Keep old FQS timing. */
2067 			j = jiffies;
2068 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
2069 				j = 1;
2070 			else
2071 				j = rcu_state.jiffies_force_qs - j;
2072 			gf = 0;
2073 		}
2074 	}
2075 }
2076 
2077 /*
2078  * Clean up after the old grace period.
2079  */
2080 static noinline void rcu_gp_cleanup(void)
2081 {
2082 	int cpu;
2083 	bool needgp = false;
2084 	unsigned long gp_duration;
2085 	unsigned long new_gp_seq;
2086 	bool offloaded;
2087 	struct rcu_data *rdp;
2088 	struct rcu_node *rnp = rcu_get_root();
2089 	struct swait_queue_head *sq;
2090 
2091 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
2092 	raw_spin_lock_irq_rcu_node(rnp);
2093 	rcu_state.gp_end = jiffies;
2094 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2095 	if (gp_duration > rcu_state.gp_max)
2096 		rcu_state.gp_max = gp_duration;
2097 
2098 	/*
2099 	 * We know the grace period is complete, but to everyone else
2100 	 * it appears to still be ongoing.  But it is also the case
2101 	 * that to everyone else it looks like there is nothing that
2102 	 * they can do to advance the grace period.  It is therefore
2103 	 * safe for us to drop the lock in order to mark the grace
2104 	 * period as completed in all of the rcu_node structures.
2105 	 */
2106 	rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
2107 	raw_spin_unlock_irq_rcu_node(rnp);
2108 
2109 	/*
2110 	 * Propagate new ->gp_seq value to rcu_node structures so that
2111 	 * other CPUs don't have to wait until the start of the next grace
2112 	 * period to process their callbacks.  This also avoids some nasty
2113 	 * RCU grace-period initialization races by forcing the end of
2114 	 * the current grace period to be completely recorded in all of
2115 	 * the rcu_node structures before the beginning of the next grace
2116 	 * period is recorded in any of the rcu_node structures.
2117 	 */
2118 	new_gp_seq = rcu_state.gp_seq;
2119 	rcu_seq_end(&new_gp_seq);
2120 	rcu_for_each_node_breadth_first(rnp) {
2121 		raw_spin_lock_irq_rcu_node(rnp);
2122 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2123 			dump_blkd_tasks(rnp, 10);
2124 		WARN_ON_ONCE(rnp->qsmask);
2125 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2126 		if (!rnp->parent)
2127 			smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
2128 		rdp = this_cpu_ptr(&rcu_data);
2129 		if (rnp == rdp->mynode)
2130 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2131 		/* smp_mb() provided by prior unlock-lock pair. */
2132 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2133 		// Reset overload indication for CPUs no longer overloaded
2134 		if (rcu_is_leaf_node(rnp))
2135 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2136 				rdp = per_cpu_ptr(&rcu_data, cpu);
2137 				check_cb_ovld_locked(rdp, rnp);
2138 			}
2139 		sq = rcu_nocb_gp_get(rnp);
2140 		raw_spin_unlock_irq_rcu_node(rnp);
2141 		rcu_nocb_gp_cleanup(sq);
2142 		cond_resched_tasks_rcu_qs();
2143 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2144 		rcu_gp_slow(gp_cleanup_delay);
2145 	}
2146 	rnp = rcu_get_root();
2147 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2148 
2149 	/* Declare grace period done, trace first to use old GP number. */
2150 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2151 	rcu_seq_end(&rcu_state.gp_seq);
2152 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2153 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2154 	/* Check for GP requests since above loop. */
2155 	rdp = this_cpu_ptr(&rcu_data);
2156 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2157 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2158 				  TPS("CleanupMore"));
2159 		needgp = true;
2160 	}
2161 	/* Advance CBs to reduce false positives below. */
2162 	offloaded = rcu_rdp_is_offloaded(rdp);
2163 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2164 
2165 		// We get here if a grace period was needed (“needgp”)
2166 		// and the above call to rcu_accelerate_cbs() did not set
2167 		// the RCU_GP_FLAG_INIT bit in ->gp_state (which records
2168 		// the need for another grace period).  The purpose
2169 		// of the “offloaded” check is to avoid invoking
2170 		// rcu_accelerate_cbs() on an offloaded CPU because we do not
2171 		// hold the ->nocb_lock needed to safely access an offloaded
2172 		// ->cblist.  We do not want to acquire that lock because
2173 		// it can be heavily contended during callback floods.
2174 
2175 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2176 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2177 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
2178 	} else {
2179 
2180 		// We get here either if there is no need for an
2181 		// additional grace period or if rcu_accelerate_cbs() has
2182 		// already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
2183 		// So all we need to do is to clear all of the other
2184 		// ->gp_flags bits.
2185 
2186 		WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2187 	}
2188 	raw_spin_unlock_irq_rcu_node(rnp);
2189 
2190 	// Make synchronize_rcu() users aware of the end of old grace period.
2191 	rcu_sr_normal_gp_cleanup();
2192 
2193 	// If strict, make all CPUs aware of the end of the old grace period.
2194 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2195 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2196 }
2197 
2198 /*
2199  * Body of kthread that handles grace periods.
2200  */
2201 static int __noreturn rcu_gp_kthread(void *unused)
2202 {
2203 	rcu_bind_gp_kthread();
2204 	for (;;) {
2205 
2206 		/* Handle grace-period start. */
2207 		for (;;) {
2208 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2209 					       TPS("reqwait"));
2210 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2211 			swait_event_idle_exclusive(rcu_state.gp_wq,
2212 					 READ_ONCE(rcu_state.gp_flags) &
2213 					 RCU_GP_FLAG_INIT);
2214 			rcu_gp_torture_wait();
2215 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2216 			/* Locking provides needed memory barrier. */
2217 			if (rcu_gp_init())
2218 				break;
2219 			cond_resched_tasks_rcu_qs();
2220 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2221 			WARN_ON(signal_pending(current));
2222 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2223 					       TPS("reqwaitsig"));
2224 		}
2225 
2226 		/* Handle quiescent-state forcing. */
2227 		rcu_gp_fqs_loop();
2228 
2229 		/* Handle grace-period end. */
2230 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2231 		rcu_gp_cleanup();
2232 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2233 	}
2234 }
2235 
2236 /*
2237  * Report a full set of quiescent states to the rcu_state data structure.
2238  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2239  * another grace period is required.  Whether we wake the grace-period
2240  * kthread or it awakens itself for the next round of quiescent-state
2241  * forcing, that kthread will clean up after the just-completed grace
2242  * period.  Note that the caller must hold rnp->lock, which is released
2243  * before return.
2244  */
2245 static void rcu_report_qs_rsp(unsigned long flags)
2246 	__releases(rcu_get_root()->lock)
2247 {
2248 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2249 	WARN_ON_ONCE(!rcu_gp_in_progress());
2250 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2251 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2252 	rcu_gp_kthread_wake();
2253 }
2254 
2255 /*
2256  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2257  * Allows quiescent states for a group of CPUs to be reported at one go
2258  * to the specified rcu_node structure, though all the CPUs in the group
2259  * must be represented by the same rcu_node structure (which need not be a
2260  * leaf rcu_node structure, though it often will be).  The gps parameter
2261  * is the grace-period snapshot, which means that the quiescent states
2262  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2263  * must be held upon entry, and it is released before return.
2264  *
2265  * As a special case, if mask is zero, the bit-already-cleared check is
2266  * disabled.  This allows propagating quiescent state due to resumed tasks
2267  * during grace-period initialization.
2268  */
2269 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2270 			      unsigned long gps, unsigned long flags)
2271 	__releases(rnp->lock)
2272 {
2273 	unsigned long oldmask = 0;
2274 	struct rcu_node *rnp_c;
2275 
2276 	raw_lockdep_assert_held_rcu_node(rnp);
2277 
2278 	/* Walk up the rcu_node hierarchy. */
2279 	for (;;) {
2280 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2281 
2282 			/*
2283 			 * Our bit has already been cleared, or the
2284 			 * relevant grace period is already over, so done.
2285 			 */
2286 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2287 			return;
2288 		}
2289 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2290 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2291 			     rcu_preempt_blocked_readers_cgp(rnp));
2292 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2293 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2294 						 mask, rnp->qsmask, rnp->level,
2295 						 rnp->grplo, rnp->grphi,
2296 						 !!rnp->gp_tasks);
2297 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2298 
2299 			/* Other bits still set at this level, so done. */
2300 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2301 			return;
2302 		}
2303 		rnp->completedqs = rnp->gp_seq;
2304 		mask = rnp->grpmask;
2305 		if (rnp->parent == NULL) {
2306 
2307 			/* No more levels.  Exit loop holding root lock. */
2308 
2309 			break;
2310 		}
2311 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2312 		rnp_c = rnp;
2313 		rnp = rnp->parent;
2314 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2315 		oldmask = READ_ONCE(rnp_c->qsmask);
2316 	}
2317 
2318 	/*
2319 	 * Get here if we are the last CPU to pass through a quiescent
2320 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2321 	 * to clean up and start the next grace period if one is needed.
2322 	 */
2323 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2324 }
2325 
2326 /*
2327  * Record a quiescent state for all tasks that were previously queued
2328  * on the specified rcu_node structure and that were blocking the current
2329  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2330  * irqs disabled, and this lock is released upon return, but irqs remain
2331  * disabled.
2332  */
2333 static void __maybe_unused
2334 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2335 	__releases(rnp->lock)
2336 {
2337 	unsigned long gps;
2338 	unsigned long mask;
2339 	struct rcu_node *rnp_p;
2340 
2341 	raw_lockdep_assert_held_rcu_node(rnp);
2342 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2343 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2344 	    rnp->qsmask != 0) {
2345 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2346 		return;  /* Still need more quiescent states! */
2347 	}
2348 
2349 	rnp->completedqs = rnp->gp_seq;
2350 	rnp_p = rnp->parent;
2351 	if (rnp_p == NULL) {
2352 		/*
2353 		 * Only one rcu_node structure in the tree, so don't
2354 		 * try to report up to its nonexistent parent!
2355 		 */
2356 		rcu_report_qs_rsp(flags);
2357 		return;
2358 	}
2359 
2360 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2361 	gps = rnp->gp_seq;
2362 	mask = rnp->grpmask;
2363 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2364 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2365 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2366 }
2367 
2368 /*
2369  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2370  * structure.  This must be called from the specified CPU.
2371  */
2372 static void
2373 rcu_report_qs_rdp(struct rcu_data *rdp)
2374 {
2375 	unsigned long flags;
2376 	unsigned long mask;
2377 	struct rcu_node *rnp;
2378 
2379 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2380 	rnp = rdp->mynode;
2381 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2382 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2383 	    rdp->gpwrap) {
2384 
2385 		/*
2386 		 * The grace period in which this quiescent state was
2387 		 * recorded has ended, so don't report it upwards.
2388 		 * We will instead need a new quiescent state that lies
2389 		 * within the current grace period.
2390 		 */
2391 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2392 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2393 		return;
2394 	}
2395 	mask = rdp->grpmask;
2396 	rdp->core_needs_qs = false;
2397 	if ((rnp->qsmask & mask) == 0) {
2398 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2399 	} else {
2400 		/*
2401 		 * This GP can't end until cpu checks in, so all of our
2402 		 * callbacks can be processed during the next GP.
2403 		 *
2404 		 * NOCB kthreads have their own way to deal with that...
2405 		 */
2406 		if (!rcu_rdp_is_offloaded(rdp)) {
2407 			/*
2408 			 * The current GP has not yet ended, so it
2409 			 * should not be possible for rcu_accelerate_cbs()
2410 			 * to return true.  So complain, but don't awaken.
2411 			 */
2412 			WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2413 		}
2414 
2415 		rcu_disable_urgency_upon_qs(rdp);
2416 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2417 		/* ^^^ Released rnp->lock */
2418 	}
2419 }
2420 
2421 /*
2422  * Check to see if there is a new grace period of which this CPU
2423  * is not yet aware, and if so, set up local rcu_data state for it.
2424  * Otherwise, see if this CPU has just passed through its first
2425  * quiescent state for this grace period, and record that fact if so.
2426  */
2427 static void
2428 rcu_check_quiescent_state(struct rcu_data *rdp)
2429 {
2430 	/* Check for grace-period ends and beginnings. */
2431 	note_gp_changes(rdp);
2432 
2433 	/*
2434 	 * Does this CPU still need to do its part for current grace period?
2435 	 * If no, return and let the other CPUs do their part as well.
2436 	 */
2437 	if (!rdp->core_needs_qs)
2438 		return;
2439 
2440 	/*
2441 	 * Was there a quiescent state since the beginning of the grace
2442 	 * period? If no, then exit and wait for the next call.
2443 	 */
2444 	if (rdp->cpu_no_qs.b.norm)
2445 		return;
2446 
2447 	/*
2448 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2449 	 * judge of that).
2450 	 */
2451 	rcu_report_qs_rdp(rdp);
2452 }
2453 
2454 /* Return true if callback-invocation time limit exceeded. */
2455 static bool rcu_do_batch_check_time(long count, long tlimit,
2456 				    bool jlimit_check, unsigned long jlimit)
2457 {
2458 	// Invoke local_clock() only once per 32 consecutive callbacks.
2459 	return unlikely(tlimit) &&
2460 	       (!likely(count & 31) ||
2461 		(IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2462 		 jlimit_check && time_after(jiffies, jlimit))) &&
2463 	       local_clock() >= tlimit;
2464 }
2465 
2466 /*
2467  * Invoke any RCU callbacks that have made it to the end of their grace
2468  * period.  Throttle as specified by rdp->blimit.
2469  */
2470 static void rcu_do_batch(struct rcu_data *rdp)
2471 {
2472 	long bl;
2473 	long count = 0;
2474 	int div;
2475 	bool __maybe_unused empty;
2476 	unsigned long flags;
2477 	unsigned long jlimit;
2478 	bool jlimit_check = false;
2479 	long pending;
2480 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2481 	struct rcu_head *rhp;
2482 	long tlimit = 0;
2483 
2484 	/* If no callbacks are ready, just return. */
2485 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2486 		trace_rcu_batch_start(rcu_state.name,
2487 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2488 		trace_rcu_batch_end(rcu_state.name, 0,
2489 				    !rcu_segcblist_empty(&rdp->cblist),
2490 				    need_resched(), is_idle_task(current),
2491 				    rcu_is_callbacks_kthread(rdp));
2492 		return;
2493 	}
2494 
2495 	/*
2496 	 * Extract the list of ready callbacks, disabling IRQs to prevent
2497 	 * races with call_rcu() from interrupt handlers.  Leave the
2498 	 * callback counts, as rcu_barrier() needs to be conservative.
2499 	 *
2500 	 * Callbacks execution is fully ordered against preceding grace period
2501 	 * completion (materialized by rnp->gp_seq update) thanks to the
2502 	 * smp_mb__after_unlock_lock() upon node locking required for callbacks
2503 	 * advancing. In NOCB mode this ordering is then further relayed through
2504 	 * the nocb locking that protects both callbacks advancing and extraction.
2505 	 */
2506 	rcu_nocb_lock_irqsave(rdp, flags);
2507 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2508 	pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2509 	div = READ_ONCE(rcu_divisor);
2510 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2511 	bl = max(rdp->blimit, pending >> div);
2512 	if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2513 	    (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2514 		const long npj = NSEC_PER_SEC / HZ;
2515 		long rrn = READ_ONCE(rcu_resched_ns);
2516 
2517 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2518 		tlimit = local_clock() + rrn;
2519 		jlimit = jiffies + (rrn + npj + 1) / npj;
2520 		jlimit_check = true;
2521 	}
2522 	trace_rcu_batch_start(rcu_state.name,
2523 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2524 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2525 	if (rcu_rdp_is_offloaded(rdp))
2526 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2527 
2528 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2529 	rcu_nocb_unlock_irqrestore(rdp, flags);
2530 
2531 	/* Invoke callbacks. */
2532 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2533 	rhp = rcu_cblist_dequeue(&rcl);
2534 
2535 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2536 		rcu_callback_t f;
2537 
2538 		count++;
2539 		debug_rcu_head_unqueue(rhp);
2540 
2541 		rcu_lock_acquire(&rcu_callback_map);
2542 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2543 
2544 		f = rhp->func;
2545 		debug_rcu_head_callback(rhp);
2546 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2547 		f(rhp);
2548 
2549 		rcu_lock_release(&rcu_callback_map);
2550 
2551 		/*
2552 		 * Stop only if limit reached and CPU has something to do.
2553 		 */
2554 		if (in_serving_softirq()) {
2555 			if (count >= bl && (need_resched() || !is_idle_task(current)))
2556 				break;
2557 			/*
2558 			 * Make sure we don't spend too much time here and deprive other
2559 			 * softirq vectors of CPU cycles.
2560 			 */
2561 			if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2562 				break;
2563 		} else {
2564 			// In rcuc/rcuoc context, so no worries about
2565 			// depriving other softirq vectors of CPU cycles.
2566 			local_bh_enable();
2567 			lockdep_assert_irqs_enabled();
2568 			cond_resched_tasks_rcu_qs();
2569 			lockdep_assert_irqs_enabled();
2570 			local_bh_disable();
2571 			// But rcuc kthreads can delay quiescent-state
2572 			// reporting, so check time limits for them.
2573 			if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2574 			    rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2575 				rdp->rcu_cpu_has_work = 1;
2576 				break;
2577 			}
2578 		}
2579 	}
2580 
2581 	rcu_nocb_lock_irqsave(rdp, flags);
2582 	rdp->n_cbs_invoked += count;
2583 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2584 			    is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2585 
2586 	/* Update counts and requeue any remaining callbacks. */
2587 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2588 	rcu_segcblist_add_len(&rdp->cblist, -count);
2589 
2590 	/* Reinstate batch limit if we have worked down the excess. */
2591 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2592 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2593 		rdp->blimit = blimit;
2594 
2595 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2596 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2597 		rdp->qlen_last_fqs_check = 0;
2598 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2599 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2600 		rdp->qlen_last_fqs_check = count;
2601 
2602 	/*
2603 	 * The following usually indicates a double call_rcu().  To track
2604 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2605 	 */
2606 	empty = rcu_segcblist_empty(&rdp->cblist);
2607 	WARN_ON_ONCE(count == 0 && !empty);
2608 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2609 		     count != 0 && empty);
2610 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2611 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2612 
2613 	rcu_nocb_unlock_irqrestore(rdp, flags);
2614 
2615 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2616 }
2617 
2618 /*
2619  * This function is invoked from each scheduling-clock interrupt,
2620  * and checks to see if this CPU is in a non-context-switch quiescent
2621  * state, for example, user mode or idle loop.  It also schedules RCU
2622  * core processing.  If the current grace period has gone on too long,
2623  * it will ask the scheduler to manufacture a context switch for the sole
2624  * purpose of providing the needed quiescent state.
2625  */
2626 void rcu_sched_clock_irq(int user)
2627 {
2628 	unsigned long j;
2629 
2630 	if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2631 		j = jiffies;
2632 		WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2633 		__this_cpu_write(rcu_data.last_sched_clock, j);
2634 	}
2635 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2636 	lockdep_assert_irqs_disabled();
2637 	raw_cpu_inc(rcu_data.ticks_this_gp);
2638 	/* The load-acquire pairs with the store-release setting to true. */
2639 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2640 		/* Idle and userspace execution already are quiescent states. */
2641 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2642 			set_tsk_need_resched(current);
2643 			set_preempt_need_resched();
2644 		}
2645 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2646 	}
2647 	rcu_flavor_sched_clock_irq(user);
2648 	if (rcu_pending(user))
2649 		invoke_rcu_core();
2650 	if (user || rcu_is_cpu_rrupt_from_idle())
2651 		rcu_note_voluntary_context_switch(current);
2652 	lockdep_assert_irqs_disabled();
2653 
2654 	trace_rcu_utilization(TPS("End scheduler-tick"));
2655 }
2656 
2657 /*
2658  * Scan the leaf rcu_node structures.  For each structure on which all
2659  * CPUs have reported a quiescent state and on which there are tasks
2660  * blocking the current grace period, initiate RCU priority boosting.
2661  * Otherwise, invoke the specified function to check dyntick state for
2662  * each CPU that has not yet reported a quiescent state.
2663  */
2664 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2665 {
2666 	int cpu;
2667 	unsigned long flags;
2668 	struct rcu_node *rnp;
2669 
2670 	rcu_state.cbovld = rcu_state.cbovldnext;
2671 	rcu_state.cbovldnext = false;
2672 	rcu_for_each_leaf_node(rnp) {
2673 		unsigned long mask = 0;
2674 		unsigned long rsmask = 0;
2675 
2676 		cond_resched_tasks_rcu_qs();
2677 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2678 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2679 		if (rnp->qsmask == 0) {
2680 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2681 				/*
2682 				 * No point in scanning bits because they
2683 				 * are all zero.  But we might need to
2684 				 * priority-boost blocked readers.
2685 				 */
2686 				rcu_initiate_boost(rnp, flags);
2687 				/* rcu_initiate_boost() releases rnp->lock */
2688 				continue;
2689 			}
2690 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2691 			continue;
2692 		}
2693 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2694 			struct rcu_data *rdp;
2695 			int ret;
2696 
2697 			rdp = per_cpu_ptr(&rcu_data, cpu);
2698 			ret = f(rdp);
2699 			if (ret > 0) {
2700 				mask |= rdp->grpmask;
2701 				rcu_disable_urgency_upon_qs(rdp);
2702 			}
2703 			if (ret < 0)
2704 				rsmask |= rdp->grpmask;
2705 		}
2706 		if (mask != 0) {
2707 			/* Idle/offline CPUs, report (releases rnp->lock). */
2708 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2709 		} else {
2710 			/* Nothing to do here, so just drop the lock. */
2711 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2712 		}
2713 
2714 		for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
2715 			resched_cpu(cpu);
2716 	}
2717 }
2718 
2719 /*
2720  * Force quiescent states on reluctant CPUs, and also detect which
2721  * CPUs are in dyntick-idle mode.
2722  */
2723 void rcu_force_quiescent_state(void)
2724 {
2725 	unsigned long flags;
2726 	bool ret;
2727 	struct rcu_node *rnp;
2728 	struct rcu_node *rnp_old = NULL;
2729 
2730 	if (!rcu_gp_in_progress())
2731 		return;
2732 	/* Funnel through hierarchy to reduce memory contention. */
2733 	rnp = raw_cpu_read(rcu_data.mynode);
2734 	for (; rnp != NULL; rnp = rnp->parent) {
2735 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2736 		       !raw_spin_trylock(&rnp->fqslock);
2737 		if (rnp_old != NULL)
2738 			raw_spin_unlock(&rnp_old->fqslock);
2739 		if (ret)
2740 			return;
2741 		rnp_old = rnp;
2742 	}
2743 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2744 
2745 	/* Reached the root of the rcu_node tree, acquire lock. */
2746 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2747 	raw_spin_unlock(&rnp_old->fqslock);
2748 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2749 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2750 		return;  /* Someone beat us to it. */
2751 	}
2752 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2753 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2754 	rcu_gp_kthread_wake();
2755 }
2756 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2757 
2758 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2759 // grace periods.
2760 static void strict_work_handler(struct work_struct *work)
2761 {
2762 	rcu_read_lock();
2763 	rcu_read_unlock();
2764 }
2765 
2766 /* Perform RCU core processing work for the current CPU.  */
2767 static __latent_entropy void rcu_core(void)
2768 {
2769 	unsigned long flags;
2770 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2771 	struct rcu_node *rnp = rdp->mynode;
2772 
2773 	if (cpu_is_offline(smp_processor_id()))
2774 		return;
2775 	trace_rcu_utilization(TPS("Start RCU core"));
2776 	WARN_ON_ONCE(!rdp->beenonline);
2777 
2778 	/* Report any deferred quiescent states if preemption enabled. */
2779 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2780 		rcu_preempt_deferred_qs(current);
2781 	} else if (rcu_preempt_need_deferred_qs(current)) {
2782 		set_tsk_need_resched(current);
2783 		set_preempt_need_resched();
2784 	}
2785 
2786 	/* Update RCU state based on any recent quiescent states. */
2787 	rcu_check_quiescent_state(rdp);
2788 
2789 	/* No grace period and unregistered callbacks? */
2790 	if (!rcu_gp_in_progress() &&
2791 	    rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
2792 		local_irq_save(flags);
2793 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2794 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2795 		local_irq_restore(flags);
2796 	}
2797 
2798 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2799 
2800 	/* If there are callbacks ready, invoke them. */
2801 	if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2802 	    likely(READ_ONCE(rcu_scheduler_fully_active))) {
2803 		rcu_do_batch(rdp);
2804 		/* Re-invoke RCU core processing if there are callbacks remaining. */
2805 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
2806 			invoke_rcu_core();
2807 	}
2808 
2809 	/* Do any needed deferred wakeups of rcuo kthreads. */
2810 	do_nocb_deferred_wakeup(rdp);
2811 	trace_rcu_utilization(TPS("End RCU core"));
2812 
2813 	// If strict GPs, schedule an RCU reader in a clean environment.
2814 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2815 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2816 }
2817 
2818 static void rcu_core_si(void)
2819 {
2820 	rcu_core();
2821 }
2822 
2823 static void rcu_wake_cond(struct task_struct *t, int status)
2824 {
2825 	/*
2826 	 * If the thread is yielding, only wake it when this
2827 	 * is invoked from idle
2828 	 */
2829 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2830 		wake_up_process(t);
2831 }
2832 
2833 static void invoke_rcu_core_kthread(void)
2834 {
2835 	struct task_struct *t;
2836 	unsigned long flags;
2837 
2838 	local_irq_save(flags);
2839 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2840 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2841 	if (t != NULL && t != current)
2842 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2843 	local_irq_restore(flags);
2844 }
2845 
2846 /*
2847  * Wake up this CPU's rcuc kthread to do RCU core processing.
2848  */
2849 static void invoke_rcu_core(void)
2850 {
2851 	if (!cpu_online(smp_processor_id()))
2852 		return;
2853 	if (use_softirq)
2854 		raise_softirq(RCU_SOFTIRQ);
2855 	else
2856 		invoke_rcu_core_kthread();
2857 }
2858 
2859 static void rcu_cpu_kthread_park(unsigned int cpu)
2860 {
2861 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2862 }
2863 
2864 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2865 {
2866 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2867 }
2868 
2869 /*
2870  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2871  * the RCU softirq used in configurations of RCU that do not support RCU
2872  * priority boosting.
2873  */
2874 static void rcu_cpu_kthread(unsigned int cpu)
2875 {
2876 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2877 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2878 	unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2879 	int spincnt;
2880 
2881 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2882 	for (spincnt = 0; spincnt < 10; spincnt++) {
2883 		WRITE_ONCE(*j, jiffies);
2884 		local_bh_disable();
2885 		*statusp = RCU_KTHREAD_RUNNING;
2886 		local_irq_disable();
2887 		work = *workp;
2888 		WRITE_ONCE(*workp, 0);
2889 		local_irq_enable();
2890 		if (work)
2891 			rcu_core();
2892 		local_bh_enable();
2893 		if (!READ_ONCE(*workp)) {
2894 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2895 			*statusp = RCU_KTHREAD_WAITING;
2896 			return;
2897 		}
2898 	}
2899 	*statusp = RCU_KTHREAD_YIELDING;
2900 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2901 	schedule_timeout_idle(2);
2902 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2903 	*statusp = RCU_KTHREAD_WAITING;
2904 	WRITE_ONCE(*j, jiffies);
2905 }
2906 
2907 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2908 	.store			= &rcu_data.rcu_cpu_kthread_task,
2909 	.thread_should_run	= rcu_cpu_kthread_should_run,
2910 	.thread_fn		= rcu_cpu_kthread,
2911 	.thread_comm		= "rcuc/%u",
2912 	.setup			= rcu_cpu_kthread_setup,
2913 	.park			= rcu_cpu_kthread_park,
2914 };
2915 
2916 /*
2917  * Spawn per-CPU RCU core processing kthreads.
2918  */
2919 static int __init rcu_spawn_core_kthreads(void)
2920 {
2921 	int cpu;
2922 
2923 	for_each_possible_cpu(cpu)
2924 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2925 	if (use_softirq)
2926 		return 0;
2927 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2928 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2929 	return 0;
2930 }
2931 
2932 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
2933 {
2934 	rcu_segcblist_enqueue(&rdp->cblist, head);
2935 	if (__is_kvfree_rcu_offset((unsigned long)func))
2936 		trace_rcu_kvfree_callback(rcu_state.name, head,
2937 					 (unsigned long)func,
2938 					 rcu_segcblist_n_cbs(&rdp->cblist));
2939 	else
2940 		trace_rcu_callback(rcu_state.name, head,
2941 				   rcu_segcblist_n_cbs(&rdp->cblist));
2942 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2943 }
2944 
2945 /*
2946  * Handle any core-RCU processing required by a call_rcu() invocation.
2947  */
2948 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2949 			  rcu_callback_t func, unsigned long flags)
2950 {
2951 	rcutree_enqueue(rdp, head, func);
2952 	/*
2953 	 * If called from an extended quiescent state, invoke the RCU
2954 	 * core in order to force a re-evaluation of RCU's idleness.
2955 	 */
2956 	if (!rcu_is_watching())
2957 		invoke_rcu_core();
2958 
2959 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2960 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2961 		return;
2962 
2963 	/*
2964 	 * Force the grace period if too many callbacks or too long waiting.
2965 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2966 	 * if some other CPU has recently done so.  Also, don't bother
2967 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2968 	 * is the only one waiting for a grace period to complete.
2969 	 */
2970 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2971 		     rdp->qlen_last_fqs_check + qhimark)) {
2972 
2973 		/* Are we ignoring a completed grace period? */
2974 		note_gp_changes(rdp);
2975 
2976 		/* Start a new grace period if one not already started. */
2977 		if (!rcu_gp_in_progress()) {
2978 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2979 		} else {
2980 			/* Give the grace period a kick. */
2981 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2982 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2983 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2984 				rcu_force_quiescent_state();
2985 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2986 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2987 		}
2988 	}
2989 }
2990 
2991 /*
2992  * RCU callback function to leak a callback.
2993  */
2994 static void rcu_leak_callback(struct rcu_head *rhp)
2995 {
2996 }
2997 
2998 /*
2999  * Check and if necessary update the leaf rcu_node structure's
3000  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3001  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
3002  * structure's ->lock.
3003  */
3004 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3005 {
3006 	raw_lockdep_assert_held_rcu_node(rnp);
3007 	if (qovld_calc <= 0)
3008 		return; // Early boot and wildcard value set.
3009 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
3010 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
3011 	else
3012 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
3013 }
3014 
3015 /*
3016  * Check and if necessary update the leaf rcu_node structure's
3017  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3018  * number of queued RCU callbacks.  No locks need be held, but the
3019  * caller must have disabled interrupts.
3020  *
3021  * Note that this function ignores the possibility that there are a lot
3022  * of callbacks all of which have already seen the end of their respective
3023  * grace periods.  This omission is due to the need for no-CBs CPUs to
3024  * be holding ->nocb_lock to do this check, which is too heavy for a
3025  * common-case operation.
3026  */
3027 static void check_cb_ovld(struct rcu_data *rdp)
3028 {
3029 	struct rcu_node *const rnp = rdp->mynode;
3030 
3031 	if (qovld_calc <= 0 ||
3032 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3033 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3034 		return; // Early boot wildcard value or already set correctly.
3035 	raw_spin_lock_rcu_node(rnp);
3036 	check_cb_ovld_locked(rdp, rnp);
3037 	raw_spin_unlock_rcu_node(rnp);
3038 }
3039 
3040 static void
3041 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
3042 {
3043 	static atomic_t doublefrees;
3044 	unsigned long flags;
3045 	bool lazy;
3046 	struct rcu_data *rdp;
3047 
3048 	/* Misaligned rcu_head! */
3049 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3050 
3051 	if (debug_rcu_head_queue(head)) {
3052 		/*
3053 		 * Probable double call_rcu(), so leak the callback.
3054 		 * Use rcu:rcu_callback trace event to find the previous
3055 		 * time callback was passed to call_rcu().
3056 		 */
3057 		if (atomic_inc_return(&doublefrees) < 4) {
3058 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
3059 			mem_dump_obj(head);
3060 		}
3061 		WRITE_ONCE(head->func, rcu_leak_callback);
3062 		return;
3063 	}
3064 	head->func = func;
3065 	head->next = NULL;
3066 	kasan_record_aux_stack_noalloc(head);
3067 
3068 	local_irq_save(flags);
3069 	rdp = this_cpu_ptr(&rcu_data);
3070 	RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!");
3071 
3072 	lazy = lazy_in && !rcu_async_should_hurry();
3073 
3074 	/* Add the callback to our list. */
3075 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3076 		// This can trigger due to call_rcu() from offline CPU:
3077 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3078 		WARN_ON_ONCE(!rcu_is_watching());
3079 		// Very early boot, before rcu_init().  Initialize if needed
3080 		// and then drop through to queue the callback.
3081 		if (rcu_segcblist_empty(&rdp->cblist))
3082 			rcu_segcblist_init(&rdp->cblist);
3083 	}
3084 
3085 	check_cb_ovld(rdp);
3086 
3087 	if (unlikely(rcu_rdp_is_offloaded(rdp)))
3088 		call_rcu_nocb(rdp, head, func, flags, lazy);
3089 	else
3090 		call_rcu_core(rdp, head, func, flags);
3091 	local_irq_restore(flags);
3092 }
3093 
3094 #ifdef CONFIG_RCU_LAZY
3095 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF);
3096 module_param(enable_rcu_lazy, bool, 0444);
3097 
3098 /**
3099  * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3100  * flush all lazy callbacks (including the new one) to the main ->cblist while
3101  * doing so.
3102  *
3103  * @head: structure to be used for queueing the RCU updates.
3104  * @func: actual callback function to be invoked after the grace period
3105  *
3106  * The callback function will be invoked some time after a full grace
3107  * period elapses, in other words after all pre-existing RCU read-side
3108  * critical sections have completed.
3109  *
3110  * Use this API instead of call_rcu() if you don't want the callback to be
3111  * invoked after very long periods of time, which can happen on systems without
3112  * memory pressure and on systems which are lightly loaded or mostly idle.
3113  * This function will cause callbacks to be invoked sooner than later at the
3114  * expense of extra power. Other than that, this function is identical to, and
3115  * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
3116  * ordering and other functionality.
3117  */
3118 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
3119 {
3120 	__call_rcu_common(head, func, false);
3121 }
3122 EXPORT_SYMBOL_GPL(call_rcu_hurry);
3123 #else
3124 #define enable_rcu_lazy		false
3125 #endif
3126 
3127 /**
3128  * call_rcu() - Queue an RCU callback for invocation after a grace period.
3129  * By default the callbacks are 'lazy' and are kept hidden from the main
3130  * ->cblist to prevent starting of grace periods too soon.
3131  * If you desire grace periods to start very soon, use call_rcu_hurry().
3132  *
3133  * @head: structure to be used for queueing the RCU updates.
3134  * @func: actual callback function to be invoked after the grace period
3135  *
3136  * The callback function will be invoked some time after a full grace
3137  * period elapses, in other words after all pre-existing RCU read-side
3138  * critical sections have completed.  However, the callback function
3139  * might well execute concurrently with RCU read-side critical sections
3140  * that started after call_rcu() was invoked.
3141  *
3142  * RCU read-side critical sections are delimited by rcu_read_lock()
3143  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3144  * v5.0 and later, regions of code across which interrupts, preemption,
3145  * or softirqs have been disabled also serve as RCU read-side critical
3146  * sections.  This includes hardware interrupt handlers, softirq handlers,
3147  * and NMI handlers.
3148  *
3149  * Note that all CPUs must agree that the grace period extended beyond
3150  * all pre-existing RCU read-side critical section.  On systems with more
3151  * than one CPU, this means that when "func()" is invoked, each CPU is
3152  * guaranteed to have executed a full memory barrier since the end of its
3153  * last RCU read-side critical section whose beginning preceded the call
3154  * to call_rcu().  It also means that each CPU executing an RCU read-side
3155  * critical section that continues beyond the start of "func()" must have
3156  * executed a memory barrier after the call_rcu() but before the beginning
3157  * of that RCU read-side critical section.  Note that these guarantees
3158  * include CPUs that are offline, idle, or executing in user mode, as
3159  * well as CPUs that are executing in the kernel.
3160  *
3161  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3162  * resulting RCU callback function "func()", then both CPU A and CPU B are
3163  * guaranteed to execute a full memory barrier during the time interval
3164  * between the call to call_rcu() and the invocation of "func()" -- even
3165  * if CPU A and CPU B are the same CPU (but again only if the system has
3166  * more than one CPU).
3167  *
3168  * Implementation of these memory-ordering guarantees is described here:
3169  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3170  */
3171 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3172 {
3173 	__call_rcu_common(head, func, enable_rcu_lazy);
3174 }
3175 EXPORT_SYMBOL_GPL(call_rcu);
3176 
3177 /*
3178  * During early boot, any blocking grace-period wait automatically
3179  * implies a grace period.
3180  *
3181  * Later on, this could in theory be the case for kernels built with
3182  * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3183  * is not a common case.  Furthermore, this optimization would cause
3184  * the rcu_gp_oldstate structure to expand by 50%, so this potential
3185  * grace-period optimization is ignored once the scheduler is running.
3186  */
3187 static int rcu_blocking_is_gp(void)
3188 {
3189 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3190 		might_sleep();
3191 		return false;
3192 	}
3193 	return true;
3194 }
3195 
3196 /*
3197  * Helper function for the synchronize_rcu() API.
3198  */
3199 static void synchronize_rcu_normal(void)
3200 {
3201 	struct rcu_synchronize rs;
3202 
3203 	trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request"));
3204 
3205 	if (!READ_ONCE(rcu_normal_wake_from_gp)) {
3206 		wait_rcu_gp(call_rcu_hurry);
3207 		goto trace_complete_out;
3208 	}
3209 
3210 	init_rcu_head_on_stack(&rs.head);
3211 	init_completion(&rs.completion);
3212 
3213 	/*
3214 	 * This code might be preempted, therefore take a GP
3215 	 * snapshot before adding a request.
3216 	 */
3217 	if (IS_ENABLED(CONFIG_PROVE_RCU))
3218 		rs.head.func = (void *) get_state_synchronize_rcu();
3219 
3220 	rcu_sr_normal_add_req(&rs);
3221 
3222 	/* Kick a GP and start waiting. */
3223 	(void) start_poll_synchronize_rcu();
3224 
3225 	/* Now we can wait. */
3226 	wait_for_completion(&rs.completion);
3227 	destroy_rcu_head_on_stack(&rs.head);
3228 
3229 trace_complete_out:
3230 	trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete"));
3231 }
3232 
3233 /**
3234  * synchronize_rcu - wait until a grace period has elapsed.
3235  *
3236  * Control will return to the caller some time after a full grace
3237  * period has elapsed, in other words after all currently executing RCU
3238  * read-side critical sections have completed.  Note, however, that
3239  * upon return from synchronize_rcu(), the caller might well be executing
3240  * concurrently with new RCU read-side critical sections that began while
3241  * synchronize_rcu() was waiting.
3242  *
3243  * RCU read-side critical sections are delimited by rcu_read_lock()
3244  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3245  * v5.0 and later, regions of code across which interrupts, preemption,
3246  * or softirqs have been disabled also serve as RCU read-side critical
3247  * sections.  This includes hardware interrupt handlers, softirq handlers,
3248  * and NMI handlers.
3249  *
3250  * Note that this guarantee implies further memory-ordering guarantees.
3251  * On systems with more than one CPU, when synchronize_rcu() returns,
3252  * each CPU is guaranteed to have executed a full memory barrier since
3253  * the end of its last RCU read-side critical section whose beginning
3254  * preceded the call to synchronize_rcu().  In addition, each CPU having
3255  * an RCU read-side critical section that extends beyond the return from
3256  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3257  * after the beginning of synchronize_rcu() and before the beginning of
3258  * that RCU read-side critical section.  Note that these guarantees include
3259  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3260  * that are executing in the kernel.
3261  *
3262  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3263  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3264  * to have executed a full memory barrier during the execution of
3265  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3266  * again only if the system has more than one CPU).
3267  *
3268  * Implementation of these memory-ordering guarantees is described here:
3269  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3270  */
3271 void synchronize_rcu(void)
3272 {
3273 	unsigned long flags;
3274 	struct rcu_node *rnp;
3275 
3276 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3277 			 lock_is_held(&rcu_lock_map) ||
3278 			 lock_is_held(&rcu_sched_lock_map),
3279 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3280 	if (!rcu_blocking_is_gp()) {
3281 		if (rcu_gp_is_expedited())
3282 			synchronize_rcu_expedited();
3283 		else
3284 			synchronize_rcu_normal();
3285 		return;
3286 	}
3287 
3288 	// Context allows vacuous grace periods.
3289 	// Note well that this code runs with !PREEMPT && !SMP.
3290 	// In addition, all code that advances grace periods runs at
3291 	// process level.  Therefore, this normal GP overlaps with other
3292 	// normal GPs only by being fully nested within them, which allows
3293 	// reuse of ->gp_seq_polled_snap.
3294 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3295 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3296 
3297 	// Update the normal grace-period counters to record
3298 	// this grace period, but only those used by the boot CPU.
3299 	// The rcu_scheduler_starting() will take care of the rest of
3300 	// these counters.
3301 	local_irq_save(flags);
3302 	WARN_ON_ONCE(num_online_cpus() > 1);
3303 	rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3304 	for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3305 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3306 	local_irq_restore(flags);
3307 }
3308 EXPORT_SYMBOL_GPL(synchronize_rcu);
3309 
3310 /**
3311  * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3312  * @rgosp: Place to put state cookie
3313  *
3314  * Stores into @rgosp a value that will always be treated by functions
3315  * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3316  * has already completed.
3317  */
3318 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3319 {
3320 	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3321 	rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3322 }
3323 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3324 
3325 /**
3326  * get_state_synchronize_rcu - Snapshot current RCU state
3327  *
3328  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3329  * or poll_state_synchronize_rcu() to determine whether or not a full
3330  * grace period has elapsed in the meantime.
3331  */
3332 unsigned long get_state_synchronize_rcu(void)
3333 {
3334 	/*
3335 	 * Any prior manipulation of RCU-protected data must happen
3336 	 * before the load from ->gp_seq.
3337 	 */
3338 	smp_mb();  /* ^^^ */
3339 	return rcu_seq_snap(&rcu_state.gp_seq_polled);
3340 }
3341 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3342 
3343 /**
3344  * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3345  * @rgosp: location to place combined normal/expedited grace-period state
3346  *
3347  * Places the normal and expedited grace-period states in @rgosp.  This
3348  * state value can be passed to a later call to cond_synchronize_rcu_full()
3349  * or poll_state_synchronize_rcu_full() to determine whether or not a
3350  * grace period (whether normal or expedited) has elapsed in the meantime.
3351  * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3352  * long, but is guaranteed to see all grace periods.  In contrast, the
3353  * combined state occupies less memory, but can sometimes fail to take
3354  * grace periods into account.
3355  *
3356  * This does not guarantee that the needed grace period will actually
3357  * start.
3358  */
3359 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3360 {
3361 	struct rcu_node *rnp = rcu_get_root();
3362 
3363 	/*
3364 	 * Any prior manipulation of RCU-protected data must happen
3365 	 * before the loads from ->gp_seq and ->expedited_sequence.
3366 	 */
3367 	smp_mb();  /* ^^^ */
3368 	rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3369 	rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3370 }
3371 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3372 
3373 /*
3374  * Helper function for start_poll_synchronize_rcu() and
3375  * start_poll_synchronize_rcu_full().
3376  */
3377 static void start_poll_synchronize_rcu_common(void)
3378 {
3379 	unsigned long flags;
3380 	bool needwake;
3381 	struct rcu_data *rdp;
3382 	struct rcu_node *rnp;
3383 
3384 	local_irq_save(flags);
3385 	rdp = this_cpu_ptr(&rcu_data);
3386 	rnp = rdp->mynode;
3387 	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3388 	// Note it is possible for a grace period to have elapsed between
3389 	// the above call to get_state_synchronize_rcu() and the below call
3390 	// to rcu_seq_snap.  This is OK, the worst that happens is that we
3391 	// get a grace period that no one needed.  These accesses are ordered
3392 	// by smp_mb(), and we are accessing them in the opposite order
3393 	// from which they are updated at grace-period start, as required.
3394 	needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3395 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3396 	if (needwake)
3397 		rcu_gp_kthread_wake();
3398 }
3399 
3400 /**
3401  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3402  *
3403  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3404  * or poll_state_synchronize_rcu() to determine whether or not a full
3405  * grace period has elapsed in the meantime.  If the needed grace period
3406  * is not already slated to start, notifies RCU core of the need for that
3407  * grace period.
3408  */
3409 unsigned long start_poll_synchronize_rcu(void)
3410 {
3411 	unsigned long gp_seq = get_state_synchronize_rcu();
3412 
3413 	start_poll_synchronize_rcu_common();
3414 	return gp_seq;
3415 }
3416 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3417 
3418 /**
3419  * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3420  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3421  *
3422  * Places the normal and expedited grace-period states in *@rgos.  This
3423  * state value can be passed to a later call to cond_synchronize_rcu_full()
3424  * or poll_state_synchronize_rcu_full() to determine whether or not a
3425  * grace period (whether normal or expedited) has elapsed in the meantime.
3426  * If the needed grace period is not already slated to start, notifies
3427  * RCU core of the need for that grace period.
3428  */
3429 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3430 {
3431 	get_state_synchronize_rcu_full(rgosp);
3432 
3433 	start_poll_synchronize_rcu_common();
3434 }
3435 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3436 
3437 /**
3438  * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3439  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3440  *
3441  * If a full RCU grace period has elapsed since the earlier call from
3442  * which @oldstate was obtained, return @true, otherwise return @false.
3443  * If @false is returned, it is the caller's responsibility to invoke this
3444  * function later on until it does return @true.  Alternatively, the caller
3445  * can explicitly wait for a grace period, for example, by passing @oldstate
3446  * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3447  * on the one hand or by directly invoking either synchronize_rcu() or
3448  * synchronize_rcu_expedited() on the other.
3449  *
3450  * Yes, this function does not take counter wrap into account.
3451  * But counter wrap is harmless.  If the counter wraps, we have waited for
3452  * more than a billion grace periods (and way more on a 64-bit system!).
3453  * Those needing to keep old state values for very long time periods
3454  * (many hours even on 32-bit systems) should check them occasionally and
3455  * either refresh them or set a flag indicating that the grace period has
3456  * completed.  Alternatively, they can use get_completed_synchronize_rcu()
3457  * to get a guaranteed-completed grace-period state.
3458  *
3459  * In addition, because oldstate compresses the grace-period state for
3460  * both normal and expedited grace periods into a single unsigned long,
3461  * it can miss a grace period when synchronize_rcu() runs concurrently
3462  * with synchronize_rcu_expedited().  If this is unacceptable, please
3463  * instead use the _full() variant of these polling APIs.
3464  *
3465  * This function provides the same memory-ordering guarantees that
3466  * would be provided by a synchronize_rcu() that was invoked at the call
3467  * to the function that provided @oldstate, and that returned at the end
3468  * of this function.
3469  */
3470 bool poll_state_synchronize_rcu(unsigned long oldstate)
3471 {
3472 	if (oldstate == RCU_GET_STATE_COMPLETED ||
3473 	    rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3474 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3475 		return true;
3476 	}
3477 	return false;
3478 }
3479 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3480 
3481 /**
3482  * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3483  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3484  *
3485  * If a full RCU grace period has elapsed since the earlier call from
3486  * which *rgosp was obtained, return @true, otherwise return @false.
3487  * If @false is returned, it is the caller's responsibility to invoke this
3488  * function later on until it does return @true.  Alternatively, the caller
3489  * can explicitly wait for a grace period, for example, by passing @rgosp
3490  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3491  *
3492  * Yes, this function does not take counter wrap into account.
3493  * But counter wrap is harmless.  If the counter wraps, we have waited
3494  * for more than a billion grace periods (and way more on a 64-bit
3495  * system!).  Those needing to keep rcu_gp_oldstate values for very
3496  * long time periods (many hours even on 32-bit systems) should check
3497  * them occasionally and either refresh them or set a flag indicating
3498  * that the grace period has completed.  Alternatively, they can use
3499  * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3500  * grace-period state.
3501  *
3502  * This function provides the same memory-ordering guarantees that would
3503  * be provided by a synchronize_rcu() that was invoked at the call to
3504  * the function that provided @rgosp, and that returned at the end of this
3505  * function.  And this guarantee requires that the root rcu_node structure's
3506  * ->gp_seq field be checked instead of that of the rcu_state structure.
3507  * The problem is that the just-ending grace-period's callbacks can be
3508  * invoked between the time that the root rcu_node structure's ->gp_seq
3509  * field is updated and the time that the rcu_state structure's ->gp_seq
3510  * field is updated.  Therefore, if a single synchronize_rcu() is to
3511  * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3512  * then the root rcu_node structure is the one that needs to be polled.
3513  */
3514 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3515 {
3516 	struct rcu_node *rnp = rcu_get_root();
3517 
3518 	smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3519 	if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3520 	    rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3521 	    rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3522 	    rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3523 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3524 		return true;
3525 	}
3526 	return false;
3527 }
3528 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3529 
3530 /**
3531  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3532  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3533  *
3534  * If a full RCU grace period has elapsed since the earlier call to
3535  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3536  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3537  *
3538  * Yes, this function does not take counter wrap into account.
3539  * But counter wrap is harmless.  If the counter wraps, we have waited for
3540  * more than 2 billion grace periods (and way more on a 64-bit system!),
3541  * so waiting for a couple of additional grace periods should be just fine.
3542  *
3543  * This function provides the same memory-ordering guarantees that
3544  * would be provided by a synchronize_rcu() that was invoked at the call
3545  * to the function that provided @oldstate and that returned at the end
3546  * of this function.
3547  */
3548 void cond_synchronize_rcu(unsigned long oldstate)
3549 {
3550 	if (!poll_state_synchronize_rcu(oldstate))
3551 		synchronize_rcu();
3552 }
3553 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3554 
3555 /**
3556  * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3557  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3558  *
3559  * If a full RCU grace period has elapsed since the call to
3560  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3561  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3562  * obtained, just return.  Otherwise, invoke synchronize_rcu() to wait
3563  * for a full grace period.
3564  *
3565  * Yes, this function does not take counter wrap into account.
3566  * But counter wrap is harmless.  If the counter wraps, we have waited for
3567  * more than 2 billion grace periods (and way more on a 64-bit system!),
3568  * so waiting for a couple of additional grace periods should be just fine.
3569  *
3570  * This function provides the same memory-ordering guarantees that
3571  * would be provided by a synchronize_rcu() that was invoked at the call
3572  * to the function that provided @rgosp and that returned at the end of
3573  * this function.
3574  */
3575 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3576 {
3577 	if (!poll_state_synchronize_rcu_full(rgosp))
3578 		synchronize_rcu();
3579 }
3580 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3581 
3582 /*
3583  * Check to see if there is any immediate RCU-related work to be done by
3584  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3585  * in order of increasing expense: checks that can be carried out against
3586  * CPU-local state are performed first.  However, we must check for CPU
3587  * stalls first, else we might not get a chance.
3588  */
3589 static int rcu_pending(int user)
3590 {
3591 	bool gp_in_progress;
3592 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3593 	struct rcu_node *rnp = rdp->mynode;
3594 
3595 	lockdep_assert_irqs_disabled();
3596 
3597 	/* Check for CPU stalls, if enabled. */
3598 	check_cpu_stall(rdp);
3599 
3600 	/* Does this CPU need a deferred NOCB wakeup? */
3601 	if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3602 		return 1;
3603 
3604 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3605 	gp_in_progress = rcu_gp_in_progress();
3606 	if ((user || rcu_is_cpu_rrupt_from_idle() ||
3607 	     (gp_in_progress &&
3608 	      time_before(jiffies, READ_ONCE(rcu_state.gp_start) +
3609 			  nohz_full_patience_delay_jiffies))) &&
3610 	    rcu_nohz_full_cpu())
3611 		return 0;
3612 
3613 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3614 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3615 		return 1;
3616 
3617 	/* Does this CPU have callbacks ready to invoke? */
3618 	if (!rcu_rdp_is_offloaded(rdp) &&
3619 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3620 		return 1;
3621 
3622 	/* Has RCU gone idle with this CPU needing another grace period? */
3623 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3624 	    !rcu_rdp_is_offloaded(rdp) &&
3625 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3626 		return 1;
3627 
3628 	/* Have RCU grace period completed or started?  */
3629 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3630 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3631 		return 1;
3632 
3633 	/* nothing to do */
3634 	return 0;
3635 }
3636 
3637 /*
3638  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3639  * the compiler is expected to optimize this away.
3640  */
3641 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3642 {
3643 	trace_rcu_barrier(rcu_state.name, s, cpu,
3644 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3645 }
3646 
3647 /*
3648  * RCU callback function for rcu_barrier().  If we are last, wake
3649  * up the task executing rcu_barrier().
3650  *
3651  * Note that the value of rcu_state.barrier_sequence must be captured
3652  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3653  * other CPUs might count the value down to zero before this CPU gets
3654  * around to invoking rcu_barrier_trace(), which might result in bogus
3655  * data from the next instance of rcu_barrier().
3656  */
3657 static void rcu_barrier_callback(struct rcu_head *rhp)
3658 {
3659 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3660 
3661 	rhp->next = rhp; // Mark the callback as having been invoked.
3662 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3663 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3664 		complete(&rcu_state.barrier_completion);
3665 	} else {
3666 		rcu_barrier_trace(TPS("CB"), -1, s);
3667 	}
3668 }
3669 
3670 /*
3671  * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3672  */
3673 static void rcu_barrier_entrain(struct rcu_data *rdp)
3674 {
3675 	unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3676 	unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3677 	bool wake_nocb = false;
3678 	bool was_alldone = false;
3679 
3680 	lockdep_assert_held(&rcu_state.barrier_lock);
3681 	if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3682 		return;
3683 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3684 	rdp->barrier_head.func = rcu_barrier_callback;
3685 	debug_rcu_head_queue(&rdp->barrier_head);
3686 	rcu_nocb_lock(rdp);
3687 	/*
3688 	 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3689 	 * queue. This way we don't wait for bypass timer that can reach seconds
3690 	 * if it's fully lazy.
3691 	 */
3692 	was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3693 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3694 	wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3695 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3696 		atomic_inc(&rcu_state.barrier_cpu_count);
3697 	} else {
3698 		debug_rcu_head_unqueue(&rdp->barrier_head);
3699 		rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3700 	}
3701 	rcu_nocb_unlock(rdp);
3702 	if (wake_nocb)
3703 		wake_nocb_gp(rdp, false);
3704 	smp_store_release(&rdp->barrier_seq_snap, gseq);
3705 }
3706 
3707 /*
3708  * Called with preemption disabled, and from cross-cpu IRQ context.
3709  */
3710 static void rcu_barrier_handler(void *cpu_in)
3711 {
3712 	uintptr_t cpu = (uintptr_t)cpu_in;
3713 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3714 
3715 	lockdep_assert_irqs_disabled();
3716 	WARN_ON_ONCE(cpu != rdp->cpu);
3717 	WARN_ON_ONCE(cpu != smp_processor_id());
3718 	raw_spin_lock(&rcu_state.barrier_lock);
3719 	rcu_barrier_entrain(rdp);
3720 	raw_spin_unlock(&rcu_state.barrier_lock);
3721 }
3722 
3723 /**
3724  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3725  *
3726  * Note that this primitive does not necessarily wait for an RCU grace period
3727  * to complete.  For example, if there are no RCU callbacks queued anywhere
3728  * in the system, then rcu_barrier() is within its rights to return
3729  * immediately, without waiting for anything, much less an RCU grace period.
3730  */
3731 void rcu_barrier(void)
3732 {
3733 	uintptr_t cpu;
3734 	unsigned long flags;
3735 	unsigned long gseq;
3736 	struct rcu_data *rdp;
3737 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3738 
3739 	rcu_barrier_trace(TPS("Begin"), -1, s);
3740 
3741 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3742 	mutex_lock(&rcu_state.barrier_mutex);
3743 
3744 	/* Did someone else do our work for us? */
3745 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3746 		rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
3747 		smp_mb(); /* caller's subsequent code after above check. */
3748 		mutex_unlock(&rcu_state.barrier_mutex);
3749 		return;
3750 	}
3751 
3752 	/* Mark the start of the barrier operation. */
3753 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3754 	rcu_seq_start(&rcu_state.barrier_sequence);
3755 	gseq = rcu_state.barrier_sequence;
3756 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3757 
3758 	/*
3759 	 * Initialize the count to two rather than to zero in order
3760 	 * to avoid a too-soon return to zero in case of an immediate
3761 	 * invocation of the just-enqueued callback (or preemption of
3762 	 * this task).  Exclude CPU-hotplug operations to ensure that no
3763 	 * offline non-offloaded CPU has callbacks queued.
3764 	 */
3765 	init_completion(&rcu_state.barrier_completion);
3766 	atomic_set(&rcu_state.barrier_cpu_count, 2);
3767 	raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3768 
3769 	/*
3770 	 * Force each CPU with callbacks to register a new callback.
3771 	 * When that callback is invoked, we will know that all of the
3772 	 * corresponding CPU's preceding callbacks have been invoked.
3773 	 */
3774 	for_each_possible_cpu(cpu) {
3775 		rdp = per_cpu_ptr(&rcu_data, cpu);
3776 retry:
3777 		if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
3778 			continue;
3779 		raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3780 		if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
3781 			WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3782 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3783 			rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
3784 			continue;
3785 		}
3786 		if (!rcu_rdp_cpu_online(rdp)) {
3787 			rcu_barrier_entrain(rdp);
3788 			WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3789 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3790 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
3791 			continue;
3792 		}
3793 		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3794 		if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
3795 			schedule_timeout_uninterruptible(1);
3796 			goto retry;
3797 		}
3798 		WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3799 		rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
3800 	}
3801 
3802 	/*
3803 	 * Now that we have an rcu_barrier_callback() callback on each
3804 	 * CPU, and thus each counted, remove the initial count.
3805 	 */
3806 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3807 		complete(&rcu_state.barrier_completion);
3808 
3809 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3810 	wait_for_completion(&rcu_state.barrier_completion);
3811 
3812 	/* Mark the end of the barrier operation. */
3813 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3814 	rcu_seq_end(&rcu_state.barrier_sequence);
3815 	gseq = rcu_state.barrier_sequence;
3816 	for_each_possible_cpu(cpu) {
3817 		rdp = per_cpu_ptr(&rcu_data, cpu);
3818 
3819 		WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3820 	}
3821 
3822 	/* Other rcu_barrier() invocations can now safely proceed. */
3823 	mutex_unlock(&rcu_state.barrier_mutex);
3824 }
3825 EXPORT_SYMBOL_GPL(rcu_barrier);
3826 
3827 static unsigned long rcu_barrier_last_throttle;
3828 
3829 /**
3830  * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3831  *
3832  * This can be thought of as guard rails around rcu_barrier() that
3833  * permits unrestricted userspace use, at least assuming the hardware's
3834  * try_cmpxchg() is robust.  There will be at most one call per second to
3835  * rcu_barrier() system-wide from use of this function, which means that
3836  * callers might needlessly wait a second or three.
3837  *
3838  * This is intended for use by test suites to avoid OOM by flushing RCU
3839  * callbacks from the previous test before starting the next.  See the
3840  * rcutree.do_rcu_barrier module parameter for more information.
3841  *
3842  * Why not simply make rcu_barrier() more scalable?  That might be
3843  * the eventual endpoint, but let's keep it simple for the time being.
3844  * Note that the module parameter infrastructure serializes calls to a
3845  * given .set() function, but should concurrent .set() invocation ever be
3846  * possible, we are ready!
3847  */
3848 static void rcu_barrier_throttled(void)
3849 {
3850 	unsigned long j = jiffies;
3851 	unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
3852 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3853 
3854 	while (time_in_range(j, old, old + HZ / 16) ||
3855 	       !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
3856 		schedule_timeout_idle(HZ / 16);
3857 		if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3858 			smp_mb(); /* caller's subsequent code after above check. */
3859 			return;
3860 		}
3861 		j = jiffies;
3862 		old = READ_ONCE(rcu_barrier_last_throttle);
3863 	}
3864 	rcu_barrier();
3865 }
3866 
3867 /*
3868  * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
3869  * request arrives.  We insist on a true value to allow for possible
3870  * future expansion.
3871  */
3872 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
3873 {
3874 	bool b;
3875 	int ret;
3876 
3877 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
3878 		return -EAGAIN;
3879 	ret = kstrtobool(val, &b);
3880 	if (!ret && b) {
3881 		atomic_inc((atomic_t *)kp->arg);
3882 		rcu_barrier_throttled();
3883 		atomic_dec((atomic_t *)kp->arg);
3884 	}
3885 	return ret;
3886 }
3887 
3888 /*
3889  * Output the number of outstanding rcutree.do_rcu_barrier requests.
3890  */
3891 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
3892 {
3893 	return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
3894 }
3895 
3896 static const struct kernel_param_ops do_rcu_barrier_ops = {
3897 	.set = param_set_do_rcu_barrier,
3898 	.get = param_get_do_rcu_barrier,
3899 };
3900 static atomic_t do_rcu_barrier;
3901 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
3902 
3903 /*
3904  * Compute the mask of online CPUs for the specified rcu_node structure.
3905  * This will not be stable unless the rcu_node structure's ->lock is
3906  * held, but the bit corresponding to the current CPU will be stable
3907  * in most contexts.
3908  */
3909 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
3910 {
3911 	return READ_ONCE(rnp->qsmaskinitnext);
3912 }
3913 
3914 /*
3915  * Is the CPU corresponding to the specified rcu_data structure online
3916  * from RCU's perspective?  This perspective is given by that structure's
3917  * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3918  */
3919 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
3920 {
3921 	return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
3922 }
3923 
3924 bool rcu_cpu_online(int cpu)
3925 {
3926 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3927 
3928 	return rcu_rdp_cpu_online(rdp);
3929 }
3930 
3931 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
3932 
3933 /*
3934  * Is the current CPU online as far as RCU is concerned?
3935  *
3936  * Disable preemption to avoid false positives that could otherwise
3937  * happen due to the current CPU number being sampled, this task being
3938  * preempted, its old CPU being taken offline, resuming on some other CPU,
3939  * then determining that its old CPU is now offline.
3940  *
3941  * Disable checking if in an NMI handler because we cannot safely
3942  * report errors from NMI handlers anyway.  In addition, it is OK to use
3943  * RCU on an offline processor during initial boot, hence the check for
3944  * rcu_scheduler_fully_active.
3945  */
3946 bool rcu_lockdep_current_cpu_online(void)
3947 {
3948 	struct rcu_data *rdp;
3949 	bool ret = false;
3950 
3951 	if (in_nmi() || !rcu_scheduler_fully_active)
3952 		return true;
3953 	preempt_disable_notrace();
3954 	rdp = this_cpu_ptr(&rcu_data);
3955 	/*
3956 	 * Strictly, we care here about the case where the current CPU is
3957 	 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
3958 	 * not being up to date. So arch_spin_is_locked() might have a
3959 	 * false positive if it's held by some *other* CPU, but that's
3960 	 * OK because that just means a false *negative* on the warning.
3961 	 */
3962 	if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
3963 		ret = true;
3964 	preempt_enable_notrace();
3965 	return ret;
3966 }
3967 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
3968 
3969 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
3970 
3971 // Has rcu_init() been invoked?  This is used (for example) to determine
3972 // whether spinlocks may be acquired safely.
3973 static bool rcu_init_invoked(void)
3974 {
3975 	return !!READ_ONCE(rcu_state.n_online_cpus);
3976 }
3977 
3978 /*
3979  * All CPUs for the specified rcu_node structure have gone offline,
3980  * and all tasks that were preempted within an RCU read-side critical
3981  * section while running on one of those CPUs have since exited their RCU
3982  * read-side critical section.  Some other CPU is reporting this fact with
3983  * the specified rcu_node structure's ->lock held and interrupts disabled.
3984  * This function therefore goes up the tree of rcu_node structures,
3985  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
3986  * the leaf rcu_node structure's ->qsmaskinit field has already been
3987  * updated.
3988  *
3989  * This function does check that the specified rcu_node structure has
3990  * all CPUs offline and no blocked tasks, so it is OK to invoke it
3991  * prematurely.  That said, invoking it after the fact will cost you
3992  * a needless lock acquisition.  So once it has done its work, don't
3993  * invoke it again.
3994  */
3995 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
3996 {
3997 	long mask;
3998 	struct rcu_node *rnp = rnp_leaf;
3999 
4000 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4001 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4002 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4003 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4004 		return;
4005 	for (;;) {
4006 		mask = rnp->grpmask;
4007 		rnp = rnp->parent;
4008 		if (!rnp)
4009 			break;
4010 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4011 		rnp->qsmaskinit &= ~mask;
4012 		/* Between grace periods, so better already be zero! */
4013 		WARN_ON_ONCE(rnp->qsmask);
4014 		if (rnp->qsmaskinit) {
4015 			raw_spin_unlock_rcu_node(rnp);
4016 			/* irqs remain disabled. */
4017 			return;
4018 		}
4019 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4020 	}
4021 }
4022 
4023 /*
4024  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4025  * first CPU in a given leaf rcu_node structure coming online.  The caller
4026  * must hold the corresponding leaf rcu_node ->lock with interrupts
4027  * disabled.
4028  */
4029 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4030 {
4031 	long mask;
4032 	long oldmask;
4033 	struct rcu_node *rnp = rnp_leaf;
4034 
4035 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4036 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4037 	for (;;) {
4038 		mask = rnp->grpmask;
4039 		rnp = rnp->parent;
4040 		if (rnp == NULL)
4041 			return;
4042 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4043 		oldmask = rnp->qsmaskinit;
4044 		rnp->qsmaskinit |= mask;
4045 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4046 		if (oldmask)
4047 			return;
4048 	}
4049 }
4050 
4051 /*
4052  * Do boot-time initialization of a CPU's per-CPU RCU data.
4053  */
4054 static void __init
4055 rcu_boot_init_percpu_data(int cpu)
4056 {
4057 	struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4058 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4059 
4060 	/* Set up local state, ensuring consistent view of global state. */
4061 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4062 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4063 	WARN_ON_ONCE(ct->nesting != 1);
4064 	WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu)));
4065 	rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4066 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4067 	rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
4068 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4069 	rdp->rcu_onl_gp_state = RCU_GP_CLEANED;
4070 	rdp->last_sched_clock = jiffies;
4071 	rdp->cpu = cpu;
4072 	rcu_boot_init_nocb_percpu_data(rdp);
4073 }
4074 
4075 struct kthread_worker *rcu_exp_gp_kworker;
4076 
4077 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4078 {
4079 	struct kthread_worker *kworker;
4080 	const char *name = "rcu_exp_par_gp_kthread_worker/%d";
4081 	struct sched_param param = { .sched_priority = kthread_prio };
4082 	int rnp_index = rnp - rcu_get_root();
4083 
4084 	if (rnp->exp_kworker)
4085 		return;
4086 
4087 	kworker = kthread_create_worker(0, name, rnp_index);
4088 	if (IS_ERR_OR_NULL(kworker)) {
4089 		pr_err("Failed to create par gp kworker on %d/%d\n",
4090 		       rnp->grplo, rnp->grphi);
4091 		return;
4092 	}
4093 	WRITE_ONCE(rnp->exp_kworker, kworker);
4094 
4095 	if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4096 		sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
4097 }
4098 
4099 static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
4100 {
4101 	struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker);
4102 
4103 	if (!kworker)
4104 		return NULL;
4105 
4106 	return kworker->task;
4107 }
4108 
4109 static void __init rcu_start_exp_gp_kworker(void)
4110 {
4111 	const char *name = "rcu_exp_gp_kthread_worker";
4112 	struct sched_param param = { .sched_priority = kthread_prio };
4113 
4114 	rcu_exp_gp_kworker = kthread_create_worker(0, name);
4115 	if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4116 		pr_err("Failed to create %s!\n", name);
4117 		rcu_exp_gp_kworker = NULL;
4118 		return;
4119 	}
4120 
4121 	if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4122 		sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4123 }
4124 
4125 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
4126 {
4127 	if (rcu_scheduler_fully_active) {
4128 		mutex_lock(&rnp->kthread_mutex);
4129 		rcu_spawn_one_boost_kthread(rnp);
4130 		rcu_spawn_exp_par_gp_kworker(rnp);
4131 		mutex_unlock(&rnp->kthread_mutex);
4132 	}
4133 }
4134 
4135 /*
4136  * Invoked early in the CPU-online process, when pretty much all services
4137  * are available.  The incoming CPU is not present.
4138  *
4139  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4140  * offline event can be happening at a given time.  Note also that we can
4141  * accept some slop in the rsp->gp_seq access due to the fact that this
4142  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4143  * And any offloaded callbacks are being numbered elsewhere.
4144  */
4145 int rcutree_prepare_cpu(unsigned int cpu)
4146 {
4147 	unsigned long flags;
4148 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4149 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4150 	struct rcu_node *rnp = rcu_get_root();
4151 
4152 	/* Set up local state, ensuring consistent view of global state. */
4153 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4154 	rdp->qlen_last_fqs_check = 0;
4155 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4156 	rdp->blimit = blimit;
4157 	ct->nesting = 1;	/* CPU not up, no tearing. */
4158 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4159 
4160 	/*
4161 	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4162 	 * (re-)initialized.
4163 	 */
4164 	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4165 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4166 
4167 	/*
4168 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4169 	 * propagation up the rcu_node tree will happen at the beginning
4170 	 * of the next grace period.
4171 	 */
4172 	rnp = rdp->mynode;
4173 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4174 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4175 	rdp->gp_seq_needed = rdp->gp_seq;
4176 	rdp->cpu_no_qs.b.norm = true;
4177 	rdp->core_needs_qs = false;
4178 	rdp->rcu_iw_pending = false;
4179 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4180 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4181 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4182 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4183 	rcu_spawn_rnp_kthreads(rnp);
4184 	rcu_spawn_cpu_nocb_kthread(cpu);
4185 	ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4186 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4187 
4188 	return 0;
4189 }
4190 
4191 /*
4192  * Update kthreads affinity during CPU-hotplug changes.
4193  *
4194  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
4195  * served by the rcu_node in question.  The CPU hotplug lock is still
4196  * held, so the value of rnp->qsmaskinit will be stable.
4197  *
4198  * We don't include outgoingcpu in the affinity set, use -1 if there is
4199  * no outgoing CPU.  If there are no CPUs left in the affinity set,
4200  * this function allows the kthread to execute on any CPU.
4201  *
4202  * Any future concurrent calls are serialized via ->kthread_mutex.
4203  */
4204 static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
4205 {
4206 	cpumask_var_t cm;
4207 	unsigned long mask;
4208 	struct rcu_data *rdp;
4209 	struct rcu_node *rnp;
4210 	struct task_struct *task_boost, *task_exp;
4211 
4212 	rdp = per_cpu_ptr(&rcu_data, cpu);
4213 	rnp = rdp->mynode;
4214 
4215 	task_boost = rcu_boost_task(rnp);
4216 	task_exp = rcu_exp_par_gp_task(rnp);
4217 
4218 	/*
4219 	 * If CPU is the boot one, those tasks are created later from early
4220 	 * initcall since kthreadd must be created first.
4221 	 */
4222 	if (!task_boost && !task_exp)
4223 		return;
4224 
4225 	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
4226 		return;
4227 
4228 	mutex_lock(&rnp->kthread_mutex);
4229 	mask = rcu_rnp_online_cpus(rnp);
4230 	for_each_leaf_node_possible_cpu(rnp, cpu)
4231 		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
4232 		    cpu != outgoingcpu)
4233 			cpumask_set_cpu(cpu, cm);
4234 	cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
4235 	if (cpumask_empty(cm)) {
4236 		cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
4237 		if (outgoingcpu >= 0)
4238 			cpumask_clear_cpu(outgoingcpu, cm);
4239 	}
4240 
4241 	if (task_exp)
4242 		set_cpus_allowed_ptr(task_exp, cm);
4243 
4244 	if (task_boost)
4245 		set_cpus_allowed_ptr(task_boost, cm);
4246 
4247 	mutex_unlock(&rnp->kthread_mutex);
4248 
4249 	free_cpumask_var(cm);
4250 }
4251 
4252 /*
4253  * Has the specified (known valid) CPU ever been fully online?
4254  */
4255 bool rcu_cpu_beenfullyonline(int cpu)
4256 {
4257 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4258 
4259 	return smp_load_acquire(&rdp->beenonline);
4260 }
4261 
4262 /*
4263  * Near the end of the CPU-online process.  Pretty much all services
4264  * enabled, and the CPU is now very much alive.
4265  */
4266 int rcutree_online_cpu(unsigned int cpu)
4267 {
4268 	unsigned long flags;
4269 	struct rcu_data *rdp;
4270 	struct rcu_node *rnp;
4271 
4272 	rdp = per_cpu_ptr(&rcu_data, cpu);
4273 	rnp = rdp->mynode;
4274 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4275 	rnp->ffmask |= rdp->grpmask;
4276 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4277 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4278 		return 0; /* Too early in boot for scheduler work. */
4279 	sync_sched_exp_online_cleanup(cpu);
4280 	rcutree_affinity_setting(cpu, -1);
4281 
4282 	// Stop-machine done, so allow nohz_full to disable tick.
4283 	tick_dep_clear(TICK_DEP_BIT_RCU);
4284 	return 0;
4285 }
4286 
4287 /*
4288  * Mark the specified CPU as being online so that subsequent grace periods
4289  * (both expedited and normal) will wait on it.  Note that this means that
4290  * incoming CPUs are not allowed to use RCU read-side critical sections
4291  * until this function is called.  Failing to observe this restriction
4292  * will result in lockdep splats.
4293  *
4294  * Note that this function is special in that it is invoked directly
4295  * from the incoming CPU rather than from the cpuhp_step mechanism.
4296  * This is because this function must be invoked at a precise location.
4297  * This incoming CPU must not have enabled interrupts yet.
4298  *
4299  * This mirrors the effects of rcutree_report_cpu_dead().
4300  */
4301 void rcutree_report_cpu_starting(unsigned int cpu)
4302 {
4303 	unsigned long mask;
4304 	struct rcu_data *rdp;
4305 	struct rcu_node *rnp;
4306 	bool newcpu;
4307 
4308 	lockdep_assert_irqs_disabled();
4309 	rdp = per_cpu_ptr(&rcu_data, cpu);
4310 	if (rdp->cpu_started)
4311 		return;
4312 	rdp->cpu_started = true;
4313 
4314 	rnp = rdp->mynode;
4315 	mask = rdp->grpmask;
4316 	arch_spin_lock(&rcu_state.ofl_lock);
4317 	rcu_watching_online();
4318 	raw_spin_lock(&rcu_state.barrier_lock);
4319 	raw_spin_lock_rcu_node(rnp);
4320 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4321 	raw_spin_unlock(&rcu_state.barrier_lock);
4322 	newcpu = !(rnp->expmaskinitnext & mask);
4323 	rnp->expmaskinitnext |= mask;
4324 	/* Allow lockless access for expedited grace periods. */
4325 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4326 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4327 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4328 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4329 	rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state);
4330 
4331 	/* An incoming CPU should never be blocking a grace period. */
4332 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4333 		/* rcu_report_qs_rnp() *really* wants some flags to restore */
4334 		unsigned long flags;
4335 
4336 		local_irq_save(flags);
4337 		rcu_disable_urgency_upon_qs(rdp);
4338 		/* Report QS -after- changing ->qsmaskinitnext! */
4339 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4340 	} else {
4341 		raw_spin_unlock_rcu_node(rnp);
4342 	}
4343 	arch_spin_unlock(&rcu_state.ofl_lock);
4344 	smp_store_release(&rdp->beenonline, true);
4345 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4346 }
4347 
4348 /*
4349  * The outgoing function has no further need of RCU, so remove it from
4350  * the rcu_node tree's ->qsmaskinitnext bit masks.
4351  *
4352  * Note that this function is special in that it is invoked directly
4353  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4354  * This is because this function must be invoked at a precise location.
4355  *
4356  * This mirrors the effect of rcutree_report_cpu_starting().
4357  */
4358 void rcutree_report_cpu_dead(void)
4359 {
4360 	unsigned long flags;
4361 	unsigned long mask;
4362 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4363 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4364 
4365 	/*
4366 	 * IRQS must be disabled from now on and until the CPU dies, or an interrupt
4367 	 * may introduce a new READ-side while it is actually off the QS masks.
4368 	 */
4369 	lockdep_assert_irqs_disabled();
4370 	// Do any dangling deferred wakeups.
4371 	do_nocb_deferred_wakeup(rdp);
4372 
4373 	rcu_preempt_deferred_qs(current);
4374 
4375 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4376 	mask = rdp->grpmask;
4377 	arch_spin_lock(&rcu_state.ofl_lock);
4378 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4379 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4380 	rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state);
4381 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4382 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4383 		rcu_disable_urgency_upon_qs(rdp);
4384 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4385 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4386 	}
4387 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4388 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4389 	arch_spin_unlock(&rcu_state.ofl_lock);
4390 	rdp->cpu_started = false;
4391 }
4392 
4393 #ifdef CONFIG_HOTPLUG_CPU
4394 /*
4395  * The outgoing CPU has just passed through the dying-idle state, and we
4396  * are being invoked from the CPU that was IPIed to continue the offline
4397  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4398  */
4399 void rcutree_migrate_callbacks(int cpu)
4400 {
4401 	unsigned long flags;
4402 	struct rcu_data *my_rdp;
4403 	struct rcu_node *my_rnp;
4404 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4405 	bool needwake;
4406 
4407 	if (rcu_rdp_is_offloaded(rdp))
4408 		return;
4409 
4410 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4411 	if (rcu_segcblist_empty(&rdp->cblist)) {
4412 		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4413 		return;  /* No callbacks to migrate. */
4414 	}
4415 
4416 	WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4417 	rcu_barrier_entrain(rdp);
4418 	my_rdp = this_cpu_ptr(&rcu_data);
4419 	my_rnp = my_rdp->mynode;
4420 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4421 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4422 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4423 	/* Leverage recent GPs and set GP for new callbacks. */
4424 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4425 		   rcu_advance_cbs(my_rnp, my_rdp);
4426 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4427 	raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4428 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4429 	rcu_segcblist_disable(&rdp->cblist);
4430 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4431 	check_cb_ovld_locked(my_rdp, my_rnp);
4432 	if (rcu_rdp_is_offloaded(my_rdp)) {
4433 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4434 		__call_rcu_nocb_wake(my_rdp, true, flags);
4435 	} else {
4436 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4437 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4438 	}
4439 	local_irq_restore(flags);
4440 	if (needwake)
4441 		rcu_gp_kthread_wake();
4442 	lockdep_assert_irqs_enabled();
4443 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4444 		  !rcu_segcblist_empty(&rdp->cblist),
4445 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4446 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4447 		  rcu_segcblist_first_cb(&rdp->cblist));
4448 }
4449 
4450 /*
4451  * The CPU has been completely removed, and some other CPU is reporting
4452  * this fact from process context.  Do the remainder of the cleanup.
4453  * There can only be one CPU hotplug operation at a time, so no need for
4454  * explicit locking.
4455  */
4456 int rcutree_dead_cpu(unsigned int cpu)
4457 {
4458 	ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4459 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4460 	// Stop-machine done, so allow nohz_full to disable tick.
4461 	tick_dep_clear(TICK_DEP_BIT_RCU);
4462 	return 0;
4463 }
4464 
4465 /*
4466  * Near the end of the offline process.  Trace the fact that this CPU
4467  * is going offline.
4468  */
4469 int rcutree_dying_cpu(unsigned int cpu)
4470 {
4471 	bool blkd;
4472 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4473 	struct rcu_node *rnp = rdp->mynode;
4474 
4475 	blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4476 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4477 			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4478 	return 0;
4479 }
4480 
4481 /*
4482  * Near the beginning of the process.  The CPU is still very much alive
4483  * with pretty much all services enabled.
4484  */
4485 int rcutree_offline_cpu(unsigned int cpu)
4486 {
4487 	unsigned long flags;
4488 	struct rcu_data *rdp;
4489 	struct rcu_node *rnp;
4490 
4491 	rdp = per_cpu_ptr(&rcu_data, cpu);
4492 	rnp = rdp->mynode;
4493 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4494 	rnp->ffmask &= ~rdp->grpmask;
4495 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4496 
4497 	rcutree_affinity_setting(cpu, cpu);
4498 
4499 	// nohz_full CPUs need the tick for stop-machine to work quickly
4500 	tick_dep_set(TICK_DEP_BIT_RCU);
4501 	return 0;
4502 }
4503 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
4504 
4505 /*
4506  * On non-huge systems, use expedited RCU grace periods to make suspend
4507  * and hibernation run faster.
4508  */
4509 static int rcu_pm_notify(struct notifier_block *self,
4510 			 unsigned long action, void *hcpu)
4511 {
4512 	switch (action) {
4513 	case PM_HIBERNATION_PREPARE:
4514 	case PM_SUSPEND_PREPARE:
4515 		rcu_async_hurry();
4516 		rcu_expedite_gp();
4517 		break;
4518 	case PM_POST_HIBERNATION:
4519 	case PM_POST_SUSPEND:
4520 		rcu_unexpedite_gp();
4521 		rcu_async_relax();
4522 		break;
4523 	default:
4524 		break;
4525 	}
4526 	return NOTIFY_OK;
4527 }
4528 
4529 /*
4530  * Spawn the kthreads that handle RCU's grace periods.
4531  */
4532 static int __init rcu_spawn_gp_kthread(void)
4533 {
4534 	unsigned long flags;
4535 	struct rcu_node *rnp;
4536 	struct sched_param sp;
4537 	struct task_struct *t;
4538 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4539 
4540 	rcu_scheduler_fully_active = 1;
4541 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4542 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4543 		return 0;
4544 	if (kthread_prio) {
4545 		sp.sched_priority = kthread_prio;
4546 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4547 	}
4548 	rnp = rcu_get_root();
4549 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4550 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4551 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4552 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4553 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4554 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4555 	wake_up_process(t);
4556 	/* This is a pre-SMP initcall, we expect a single CPU */
4557 	WARN_ON(num_online_cpus() > 1);
4558 	/*
4559 	 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4560 	 * due to rcu_scheduler_fully_active.
4561 	 */
4562 	rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4563 	rcu_spawn_rnp_kthreads(rdp->mynode);
4564 	rcu_spawn_core_kthreads();
4565 	/* Create kthread worker for expedited GPs */
4566 	rcu_start_exp_gp_kworker();
4567 	return 0;
4568 }
4569 early_initcall(rcu_spawn_gp_kthread);
4570 
4571 /*
4572  * This function is invoked towards the end of the scheduler's
4573  * initialization process.  Before this is called, the idle task might
4574  * contain synchronous grace-period primitives (during which time, this idle
4575  * task is booting the system, and such primitives are no-ops).  After this
4576  * function is called, any synchronous grace-period primitives are run as
4577  * expedited, with the requesting task driving the grace period forward.
4578  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4579  * runtime RCU functionality.
4580  */
4581 void rcu_scheduler_starting(void)
4582 {
4583 	unsigned long flags;
4584 	struct rcu_node *rnp;
4585 
4586 	WARN_ON(num_online_cpus() != 1);
4587 	WARN_ON(nr_context_switches() > 0);
4588 	rcu_test_sync_prims();
4589 
4590 	// Fix up the ->gp_seq counters.
4591 	local_irq_save(flags);
4592 	rcu_for_each_node_breadth_first(rnp)
4593 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4594 	local_irq_restore(flags);
4595 
4596 	// Switch out of early boot mode.
4597 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4598 	rcu_test_sync_prims();
4599 }
4600 
4601 /*
4602  * Helper function for rcu_init() that initializes the rcu_state structure.
4603  */
4604 static void __init rcu_init_one(void)
4605 {
4606 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4607 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4608 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4609 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4610 
4611 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4612 	int cpustride = 1;
4613 	int i;
4614 	int j;
4615 	struct rcu_node *rnp;
4616 
4617 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4618 
4619 	/* Silence gcc 4.8 false positive about array index out of range. */
4620 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4621 		panic("rcu_init_one: rcu_num_lvls out of range");
4622 
4623 	/* Initialize the level-tracking arrays. */
4624 
4625 	for (i = 1; i < rcu_num_lvls; i++)
4626 		rcu_state.level[i] =
4627 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4628 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4629 
4630 	/* Initialize the elements themselves, starting from the leaves. */
4631 
4632 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4633 		cpustride *= levelspread[i];
4634 		rnp = rcu_state.level[i];
4635 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4636 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4637 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4638 						   &rcu_node_class[i], buf[i]);
4639 			raw_spin_lock_init(&rnp->fqslock);
4640 			lockdep_set_class_and_name(&rnp->fqslock,
4641 						   &rcu_fqs_class[i], fqs[i]);
4642 			rnp->gp_seq = rcu_state.gp_seq;
4643 			rnp->gp_seq_needed = rcu_state.gp_seq;
4644 			rnp->completedqs = rcu_state.gp_seq;
4645 			rnp->qsmask = 0;
4646 			rnp->qsmaskinit = 0;
4647 			rnp->grplo = j * cpustride;
4648 			rnp->grphi = (j + 1) * cpustride - 1;
4649 			if (rnp->grphi >= nr_cpu_ids)
4650 				rnp->grphi = nr_cpu_ids - 1;
4651 			if (i == 0) {
4652 				rnp->grpnum = 0;
4653 				rnp->grpmask = 0;
4654 				rnp->parent = NULL;
4655 			} else {
4656 				rnp->grpnum = j % levelspread[i - 1];
4657 				rnp->grpmask = BIT(rnp->grpnum);
4658 				rnp->parent = rcu_state.level[i - 1] +
4659 					      j / levelspread[i - 1];
4660 			}
4661 			rnp->level = i;
4662 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4663 			rcu_init_one_nocb(rnp);
4664 			init_waitqueue_head(&rnp->exp_wq[0]);
4665 			init_waitqueue_head(&rnp->exp_wq[1]);
4666 			init_waitqueue_head(&rnp->exp_wq[2]);
4667 			init_waitqueue_head(&rnp->exp_wq[3]);
4668 			spin_lock_init(&rnp->exp_lock);
4669 			mutex_init(&rnp->kthread_mutex);
4670 			raw_spin_lock_init(&rnp->exp_poll_lock);
4671 			rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4672 			INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4673 		}
4674 	}
4675 
4676 	init_swait_queue_head(&rcu_state.gp_wq);
4677 	init_swait_queue_head(&rcu_state.expedited_wq);
4678 	rnp = rcu_first_leaf_node();
4679 	for_each_possible_cpu(i) {
4680 		while (i > rnp->grphi)
4681 			rnp++;
4682 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4683 		per_cpu_ptr(&rcu_data, i)->barrier_head.next =
4684 			&per_cpu_ptr(&rcu_data, i)->barrier_head;
4685 		rcu_boot_init_percpu_data(i);
4686 	}
4687 }
4688 
4689 /*
4690  * Force priority from the kernel command-line into range.
4691  */
4692 static void __init sanitize_kthread_prio(void)
4693 {
4694 	int kthread_prio_in = kthread_prio;
4695 
4696 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4697 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4698 		kthread_prio = 2;
4699 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4700 		kthread_prio = 1;
4701 	else if (kthread_prio < 0)
4702 		kthread_prio = 0;
4703 	else if (kthread_prio > 99)
4704 		kthread_prio = 99;
4705 
4706 	if (kthread_prio != kthread_prio_in)
4707 		pr_alert("%s: Limited prio to %d from %d\n",
4708 			 __func__, kthread_prio, kthread_prio_in);
4709 }
4710 
4711 /*
4712  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4713  * replace the definitions in tree.h because those are needed to size
4714  * the ->node array in the rcu_state structure.
4715  */
4716 void rcu_init_geometry(void)
4717 {
4718 	ulong d;
4719 	int i;
4720 	static unsigned long old_nr_cpu_ids;
4721 	int rcu_capacity[RCU_NUM_LVLS];
4722 	static bool initialized;
4723 
4724 	if (initialized) {
4725 		/*
4726 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4727 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4728 		 */
4729 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4730 		return;
4731 	}
4732 
4733 	old_nr_cpu_ids = nr_cpu_ids;
4734 	initialized = true;
4735 
4736 	/*
4737 	 * Initialize any unspecified boot parameters.
4738 	 * The default values of jiffies_till_first_fqs and
4739 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4740 	 * value, which is a function of HZ, then adding one for each
4741 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4742 	 */
4743 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4744 	if (jiffies_till_first_fqs == ULONG_MAX)
4745 		jiffies_till_first_fqs = d;
4746 	if (jiffies_till_next_fqs == ULONG_MAX)
4747 		jiffies_till_next_fqs = d;
4748 	adjust_jiffies_till_sched_qs();
4749 
4750 	/* If the compile-time values are accurate, just leave. */
4751 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4752 	    nr_cpu_ids == NR_CPUS)
4753 		return;
4754 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4755 		rcu_fanout_leaf, nr_cpu_ids);
4756 
4757 	/*
4758 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4759 	 * and cannot exceed the number of bits in the rcu_node masks.
4760 	 * Complain and fall back to the compile-time values if this
4761 	 * limit is exceeded.
4762 	 */
4763 	if (rcu_fanout_leaf < 2 || rcu_fanout_leaf > BITS_PER_LONG) {
4764 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4765 		WARN_ON(1);
4766 		return;
4767 	}
4768 
4769 	/*
4770 	 * Compute number of nodes that can be handled an rcu_node tree
4771 	 * with the given number of levels.
4772 	 */
4773 	rcu_capacity[0] = rcu_fanout_leaf;
4774 	for (i = 1; i < RCU_NUM_LVLS; i++)
4775 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4776 
4777 	/*
4778 	 * The tree must be able to accommodate the configured number of CPUs.
4779 	 * If this limit is exceeded, fall back to the compile-time values.
4780 	 */
4781 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4782 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4783 		WARN_ON(1);
4784 		return;
4785 	}
4786 
4787 	/* Calculate the number of levels in the tree. */
4788 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4789 	}
4790 	rcu_num_lvls = i + 1;
4791 
4792 	/* Calculate the number of rcu_nodes at each level of the tree. */
4793 	for (i = 0; i < rcu_num_lvls; i++) {
4794 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4795 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4796 	}
4797 
4798 	/* Calculate the total number of rcu_node structures. */
4799 	rcu_num_nodes = 0;
4800 	for (i = 0; i < rcu_num_lvls; i++)
4801 		rcu_num_nodes += num_rcu_lvl[i];
4802 }
4803 
4804 /*
4805  * Dump out the structure of the rcu_node combining tree associated
4806  * with the rcu_state structure.
4807  */
4808 static void __init rcu_dump_rcu_node_tree(void)
4809 {
4810 	int level = 0;
4811 	struct rcu_node *rnp;
4812 
4813 	pr_info("rcu_node tree layout dump\n");
4814 	pr_info(" ");
4815 	rcu_for_each_node_breadth_first(rnp) {
4816 		if (rnp->level != level) {
4817 			pr_cont("\n");
4818 			pr_info(" ");
4819 			level = rnp->level;
4820 		}
4821 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4822 	}
4823 	pr_cont("\n");
4824 }
4825 
4826 struct workqueue_struct *rcu_gp_wq;
4827 
4828 void __init rcu_init(void)
4829 {
4830 	int cpu = smp_processor_id();
4831 
4832 	rcu_early_boot_tests();
4833 
4834 	rcu_bootup_announce();
4835 	sanitize_kthread_prio();
4836 	rcu_init_geometry();
4837 	rcu_init_one();
4838 	if (dump_tree)
4839 		rcu_dump_rcu_node_tree();
4840 	if (use_softirq)
4841 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4842 
4843 	/*
4844 	 * We don't need protection against CPU-hotplug here because
4845 	 * this is called early in boot, before either interrupts
4846 	 * or the scheduler are operational.
4847 	 */
4848 	pm_notifier(rcu_pm_notify, 0);
4849 	WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4850 	rcutree_prepare_cpu(cpu);
4851 	rcutree_report_cpu_starting(cpu);
4852 	rcutree_online_cpu(cpu);
4853 
4854 	/* Create workqueue for Tree SRCU and for expedited GPs. */
4855 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4856 	WARN_ON(!rcu_gp_wq);
4857 
4858 	sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
4859 	WARN_ON(!sync_wq);
4860 
4861 	/* Fill in default value for rcutree.qovld boot parameter. */
4862 	/* -After- the rcu_node ->lock fields are initialized! */
4863 	if (qovld < 0)
4864 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4865 	else
4866 		qovld_calc = qovld;
4867 
4868 	// Kick-start in case any polled grace periods started early.
4869 	(void)start_poll_synchronize_rcu_expedited();
4870 
4871 	rcu_test_sync_prims();
4872 
4873 	tasks_cblist_init_generic();
4874 }
4875 
4876 #include "tree_stall.h"
4877 #include "tree_exp.h"
4878 #include "tree_nocb.h"
4879 #include "tree_plugin.h"
4880