xref: /linux/kernel/rcu/tree.c (revision 031fba65fc202abf1f193e321be7a2c274fd88ba)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
64 #include <linux/mm.h>
65 #include <linux/kasan.h>
66 #include <linux/context_tracking.h>
67 #include "../time/tick-internal.h"
68 
69 #include "tree.h"
70 #include "rcu.h"
71 
72 #ifdef MODULE_PARAM_PREFIX
73 #undef MODULE_PARAM_PREFIX
74 #endif
75 #define MODULE_PARAM_PREFIX "rcutree."
76 
77 /* Data structures. */
78 
79 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
80 	.gpwrap = true,
81 #ifdef CONFIG_RCU_NOCB_CPU
82 	.cblist.flags = SEGCBLIST_RCU_CORE,
83 #endif
84 };
85 static struct rcu_state rcu_state = {
86 	.level = { &rcu_state.node[0] },
87 	.gp_state = RCU_GP_IDLE,
88 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
89 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
90 	.barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
91 	.name = RCU_NAME,
92 	.abbr = RCU_ABBR,
93 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
94 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
95 	.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
96 };
97 
98 /* Dump rcu_node combining tree at boot to verify correct setup. */
99 static bool dump_tree;
100 module_param(dump_tree, bool, 0444);
101 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
102 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
103 #ifndef CONFIG_PREEMPT_RT
104 module_param(use_softirq, bool, 0444);
105 #endif
106 /* Control rcu_node-tree auto-balancing at boot time. */
107 static bool rcu_fanout_exact;
108 module_param(rcu_fanout_exact, bool, 0444);
109 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
110 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
111 module_param(rcu_fanout_leaf, int, 0444);
112 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
113 /* Number of rcu_nodes at specified level. */
114 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
115 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
116 
117 /*
118  * The rcu_scheduler_active variable is initialized to the value
119  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
120  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
121  * RCU can assume that there is but one task, allowing RCU to (for example)
122  * optimize synchronize_rcu() to a simple barrier().  When this variable
123  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
124  * to detect real grace periods.  This variable is also used to suppress
125  * boot-time false positives from lockdep-RCU error checking.  Finally, it
126  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
127  * is fully initialized, including all of its kthreads having been spawned.
128  */
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
131 
132 /*
133  * The rcu_scheduler_fully_active variable transitions from zero to one
134  * during the early_initcall() processing, which is after the scheduler
135  * is capable of creating new tasks.  So RCU processing (for example,
136  * creating tasks for RCU priority boosting) must be delayed until after
137  * rcu_scheduler_fully_active transitions from zero to one.  We also
138  * currently delay invocation of any RCU callbacks until after this point.
139  *
140  * It might later prove better for people registering RCU callbacks during
141  * early boot to take responsibility for these callbacks, but one step at
142  * a time.
143  */
144 static int rcu_scheduler_fully_active __read_mostly;
145 
146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
147 			      unsigned long gps, unsigned long flags);
148 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
149 static void invoke_rcu_core(void);
150 static void rcu_report_exp_rdp(struct rcu_data *rdp);
151 static void sync_sched_exp_online_cleanup(int cpu);
152 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
153 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
154 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
155 static bool rcu_init_invoked(void);
156 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
157 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
158 
159 /*
160  * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
161  * real-time priority(enabling/disabling) is controlled by
162  * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
163  */
164 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
165 module_param(kthread_prio, int, 0444);
166 
167 /* Delay in jiffies for grace-period initialization delays, debug only. */
168 
169 static int gp_preinit_delay;
170 module_param(gp_preinit_delay, int, 0444);
171 static int gp_init_delay;
172 module_param(gp_init_delay, int, 0444);
173 static int gp_cleanup_delay;
174 module_param(gp_cleanup_delay, int, 0444);
175 
176 // Add delay to rcu_read_unlock() for strict grace periods.
177 static int rcu_unlock_delay;
178 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
179 module_param(rcu_unlock_delay, int, 0444);
180 #endif
181 
182 /*
183  * This rcu parameter is runtime-read-only. It reflects
184  * a minimum allowed number of objects which can be cached
185  * per-CPU. Object size is equal to one page. This value
186  * can be changed at boot time.
187  */
188 static int rcu_min_cached_objs = 5;
189 module_param(rcu_min_cached_objs, int, 0444);
190 
191 // A page shrinker can ask for pages to be freed to make them
192 // available for other parts of the system. This usually happens
193 // under low memory conditions, and in that case we should also
194 // defer page-cache filling for a short time period.
195 //
196 // The default value is 5 seconds, which is long enough to reduce
197 // interference with the shrinker while it asks other systems to
198 // drain their caches.
199 static int rcu_delay_page_cache_fill_msec = 5000;
200 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
201 
202 /* Retrieve RCU kthreads priority for rcutorture */
203 int rcu_get_gp_kthreads_prio(void)
204 {
205 	return kthread_prio;
206 }
207 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
208 
209 /*
210  * Number of grace periods between delays, normalized by the duration of
211  * the delay.  The longer the delay, the more the grace periods between
212  * each delay.  The reason for this normalization is that it means that,
213  * for non-zero delays, the overall slowdown of grace periods is constant
214  * regardless of the duration of the delay.  This arrangement balances
215  * the need for long delays to increase some race probabilities with the
216  * need for fast grace periods to increase other race probabilities.
217  */
218 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */
219 
220 /*
221  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
222  * permit this function to be invoked without holding the root rcu_node
223  * structure's ->lock, but of course results can be subject to change.
224  */
225 static int rcu_gp_in_progress(void)
226 {
227 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
228 }
229 
230 /*
231  * Return the number of callbacks queued on the specified CPU.
232  * Handles both the nocbs and normal cases.
233  */
234 static long rcu_get_n_cbs_cpu(int cpu)
235 {
236 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
237 
238 	if (rcu_segcblist_is_enabled(&rdp->cblist))
239 		return rcu_segcblist_n_cbs(&rdp->cblist);
240 	return 0;
241 }
242 
243 void rcu_softirq_qs(void)
244 {
245 	rcu_qs();
246 	rcu_preempt_deferred_qs(current);
247 	rcu_tasks_qs(current, false);
248 }
249 
250 /*
251  * Reset the current CPU's ->dynticks counter to indicate that the
252  * newly onlined CPU is no longer in an extended quiescent state.
253  * This will either leave the counter unchanged, or increment it
254  * to the next non-quiescent value.
255  *
256  * The non-atomic test/increment sequence works because the upper bits
257  * of the ->dynticks counter are manipulated only by the corresponding CPU,
258  * or when the corresponding CPU is offline.
259  */
260 static void rcu_dynticks_eqs_online(void)
261 {
262 	if (ct_dynticks() & RCU_DYNTICKS_IDX)
263 		return;
264 	ct_state_inc(RCU_DYNTICKS_IDX);
265 }
266 
267 /*
268  * Snapshot the ->dynticks counter with full ordering so as to allow
269  * stable comparison of this counter with past and future snapshots.
270  */
271 static int rcu_dynticks_snap(int cpu)
272 {
273 	smp_mb();  // Fundamental RCU ordering guarantee.
274 	return ct_dynticks_cpu_acquire(cpu);
275 }
276 
277 /*
278  * Return true if the snapshot returned from rcu_dynticks_snap()
279  * indicates that RCU is in an extended quiescent state.
280  */
281 static bool rcu_dynticks_in_eqs(int snap)
282 {
283 	return !(snap & RCU_DYNTICKS_IDX);
284 }
285 
286 /*
287  * Return true if the CPU corresponding to the specified rcu_data
288  * structure has spent some time in an extended quiescent state since
289  * rcu_dynticks_snap() returned the specified snapshot.
290  */
291 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
292 {
293 	return snap != rcu_dynticks_snap(rdp->cpu);
294 }
295 
296 /*
297  * Return true if the referenced integer is zero while the specified
298  * CPU remains within a single extended quiescent state.
299  */
300 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
301 {
302 	int snap;
303 
304 	// If not quiescent, force back to earlier extended quiescent state.
305 	snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
306 	smp_rmb(); // Order ->dynticks and *vp reads.
307 	if (READ_ONCE(*vp))
308 		return false;  // Non-zero, so report failure;
309 	smp_rmb(); // Order *vp read and ->dynticks re-read.
310 
311 	// If still in the same extended quiescent state, we are good!
312 	return snap == ct_dynticks_cpu(cpu);
313 }
314 
315 /*
316  * Let the RCU core know that this CPU has gone through the scheduler,
317  * which is a quiescent state.  This is called when the need for a
318  * quiescent state is urgent, so we burn an atomic operation and full
319  * memory barriers to let the RCU core know about it, regardless of what
320  * this CPU might (or might not) do in the near future.
321  *
322  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
323  *
324  * The caller must have disabled interrupts and must not be idle.
325  */
326 notrace void rcu_momentary_dyntick_idle(void)
327 {
328 	int seq;
329 
330 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
331 	seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
332 	/* It is illegal to call this from idle state. */
333 	WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
334 	rcu_preempt_deferred_qs(current);
335 }
336 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
337 
338 /**
339  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
340  *
341  * If the current CPU is idle and running at a first-level (not nested)
342  * interrupt, or directly, from idle, return true.
343  *
344  * The caller must have at least disabled IRQs.
345  */
346 static int rcu_is_cpu_rrupt_from_idle(void)
347 {
348 	long nesting;
349 
350 	/*
351 	 * Usually called from the tick; but also used from smp_function_call()
352 	 * for expedited grace periods. This latter can result in running from
353 	 * the idle task, instead of an actual IPI.
354 	 */
355 	lockdep_assert_irqs_disabled();
356 
357 	/* Check for counter underflows */
358 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
359 			 "RCU dynticks_nesting counter underflow!");
360 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
361 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
362 
363 	/* Are we at first interrupt nesting level? */
364 	nesting = ct_dynticks_nmi_nesting();
365 	if (nesting > 1)
366 		return false;
367 
368 	/*
369 	 * If we're not in an interrupt, we must be in the idle task!
370 	 */
371 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
372 
373 	/* Does CPU appear to be idle from an RCU standpoint? */
374 	return ct_dynticks_nesting() == 0;
375 }
376 
377 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
378 				// Maximum callbacks per rcu_do_batch ...
379 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
380 static long blimit = DEFAULT_RCU_BLIMIT;
381 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
382 static long qhimark = DEFAULT_RCU_QHIMARK;
383 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
384 static long qlowmark = DEFAULT_RCU_QLOMARK;
385 #define DEFAULT_RCU_QOVLD_MULT 2
386 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
387 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
388 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
389 
390 module_param(blimit, long, 0444);
391 module_param(qhimark, long, 0444);
392 module_param(qlowmark, long, 0444);
393 module_param(qovld, long, 0444);
394 
395 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
396 static ulong jiffies_till_next_fqs = ULONG_MAX;
397 static bool rcu_kick_kthreads;
398 static int rcu_divisor = 7;
399 module_param(rcu_divisor, int, 0644);
400 
401 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
402 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
403 module_param(rcu_resched_ns, long, 0644);
404 
405 /*
406  * How long the grace period must be before we start recruiting
407  * quiescent-state help from rcu_note_context_switch().
408  */
409 static ulong jiffies_till_sched_qs = ULONG_MAX;
410 module_param(jiffies_till_sched_qs, ulong, 0444);
411 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
412 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
413 
414 /*
415  * Make sure that we give the grace-period kthread time to detect any
416  * idle CPUs before taking active measures to force quiescent states.
417  * However, don't go below 100 milliseconds, adjusted upwards for really
418  * large systems.
419  */
420 static void adjust_jiffies_till_sched_qs(void)
421 {
422 	unsigned long j;
423 
424 	/* If jiffies_till_sched_qs was specified, respect the request. */
425 	if (jiffies_till_sched_qs != ULONG_MAX) {
426 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
427 		return;
428 	}
429 	/* Otherwise, set to third fqs scan, but bound below on large system. */
430 	j = READ_ONCE(jiffies_till_first_fqs) +
431 		      2 * READ_ONCE(jiffies_till_next_fqs);
432 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
433 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
434 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
435 	WRITE_ONCE(jiffies_to_sched_qs, j);
436 }
437 
438 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
439 {
440 	ulong j;
441 	int ret = kstrtoul(val, 0, &j);
442 
443 	if (!ret) {
444 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
445 		adjust_jiffies_till_sched_qs();
446 	}
447 	return ret;
448 }
449 
450 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
451 {
452 	ulong j;
453 	int ret = kstrtoul(val, 0, &j);
454 
455 	if (!ret) {
456 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
457 		adjust_jiffies_till_sched_qs();
458 	}
459 	return ret;
460 }
461 
462 static const struct kernel_param_ops first_fqs_jiffies_ops = {
463 	.set = param_set_first_fqs_jiffies,
464 	.get = param_get_ulong,
465 };
466 
467 static const struct kernel_param_ops next_fqs_jiffies_ops = {
468 	.set = param_set_next_fqs_jiffies,
469 	.get = param_get_ulong,
470 };
471 
472 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
473 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
474 module_param(rcu_kick_kthreads, bool, 0644);
475 
476 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
477 static int rcu_pending(int user);
478 
479 /*
480  * Return the number of RCU GPs completed thus far for debug & stats.
481  */
482 unsigned long rcu_get_gp_seq(void)
483 {
484 	return READ_ONCE(rcu_state.gp_seq);
485 }
486 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
487 
488 /*
489  * Return the number of RCU expedited batches completed thus far for
490  * debug & stats.  Odd numbers mean that a batch is in progress, even
491  * numbers mean idle.  The value returned will thus be roughly double
492  * the cumulative batches since boot.
493  */
494 unsigned long rcu_exp_batches_completed(void)
495 {
496 	return rcu_state.expedited_sequence;
497 }
498 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
499 
500 /*
501  * Return the root node of the rcu_state structure.
502  */
503 static struct rcu_node *rcu_get_root(void)
504 {
505 	return &rcu_state.node[0];
506 }
507 
508 /*
509  * Send along grace-period-related data for rcutorture diagnostics.
510  */
511 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
512 			    unsigned long *gp_seq)
513 {
514 	switch (test_type) {
515 	case RCU_FLAVOR:
516 		*flags = READ_ONCE(rcu_state.gp_flags);
517 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
518 		break;
519 	default:
520 		break;
521 	}
522 }
523 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
524 
525 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
526 /*
527  * An empty function that will trigger a reschedule on
528  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
529  */
530 static void late_wakeup_func(struct irq_work *work)
531 {
532 }
533 
534 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
535 	IRQ_WORK_INIT(late_wakeup_func);
536 
537 /*
538  * If either:
539  *
540  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
541  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
542  *
543  * In these cases the late RCU wake ups aren't supported in the resched loops and our
544  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
545  * get re-enabled again.
546  */
547 noinstr void rcu_irq_work_resched(void)
548 {
549 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
550 
551 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
552 		return;
553 
554 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
555 		return;
556 
557 	instrumentation_begin();
558 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
559 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
560 	}
561 	instrumentation_end();
562 }
563 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
564 
565 #ifdef CONFIG_PROVE_RCU
566 /**
567  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
568  */
569 void rcu_irq_exit_check_preempt(void)
570 {
571 	lockdep_assert_irqs_disabled();
572 
573 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
574 			 "RCU dynticks_nesting counter underflow/zero!");
575 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
576 			 DYNTICK_IRQ_NONIDLE,
577 			 "Bad RCU  dynticks_nmi_nesting counter\n");
578 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
579 			 "RCU in extended quiescent state!");
580 }
581 #endif /* #ifdef CONFIG_PROVE_RCU */
582 
583 #ifdef CONFIG_NO_HZ_FULL
584 /**
585  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
586  *
587  * The scheduler tick is not normally enabled when CPUs enter the kernel
588  * from nohz_full userspace execution.  After all, nohz_full userspace
589  * execution is an RCU quiescent state and the time executing in the kernel
590  * is quite short.  Except of course when it isn't.  And it is not hard to
591  * cause a large system to spend tens of seconds or even minutes looping
592  * in the kernel, which can cause a number of problems, include RCU CPU
593  * stall warnings.
594  *
595  * Therefore, if a nohz_full CPU fails to report a quiescent state
596  * in a timely manner, the RCU grace-period kthread sets that CPU's
597  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
598  * exception will invoke this function, which will turn on the scheduler
599  * tick, which will enable RCU to detect that CPU's quiescent states,
600  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
601  * The tick will be disabled once a quiescent state is reported for
602  * this CPU.
603  *
604  * Of course, in carefully tuned systems, there might never be an
605  * interrupt or exception.  In that case, the RCU grace-period kthread
606  * will eventually cause one to happen.  However, in less carefully
607  * controlled environments, this function allows RCU to get what it
608  * needs without creating otherwise useless interruptions.
609  */
610 void __rcu_irq_enter_check_tick(void)
611 {
612 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
613 
614 	// If we're here from NMI there's nothing to do.
615 	if (in_nmi())
616 		return;
617 
618 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
619 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
620 
621 	if (!tick_nohz_full_cpu(rdp->cpu) ||
622 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
623 	    READ_ONCE(rdp->rcu_forced_tick)) {
624 		// RCU doesn't need nohz_full help from this CPU, or it is
625 		// already getting that help.
626 		return;
627 	}
628 
629 	// We get here only when not in an extended quiescent state and
630 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
631 	// already watching and (2) The fact that we are in an interrupt
632 	// handler and that the rcu_node lock is an irq-disabled lock
633 	// prevents self-deadlock.  So we can safely recheck under the lock.
634 	// Note that the nohz_full state currently cannot change.
635 	raw_spin_lock_rcu_node(rdp->mynode);
636 	if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
637 		// A nohz_full CPU is in the kernel and RCU needs a
638 		// quiescent state.  Turn on the tick!
639 		WRITE_ONCE(rdp->rcu_forced_tick, true);
640 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
641 	}
642 	raw_spin_unlock_rcu_node(rdp->mynode);
643 }
644 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
645 #endif /* CONFIG_NO_HZ_FULL */
646 
647 /*
648  * Check to see if any future non-offloaded RCU-related work will need
649  * to be done by the current CPU, even if none need be done immediately,
650  * returning 1 if so.  This function is part of the RCU implementation;
651  * it is -not- an exported member of the RCU API.  This is used by
652  * the idle-entry code to figure out whether it is safe to disable the
653  * scheduler-clock interrupt.
654  *
655  * Just check whether or not this CPU has non-offloaded RCU callbacks
656  * queued.
657  */
658 int rcu_needs_cpu(void)
659 {
660 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
661 		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
662 }
663 
664 /*
665  * If any sort of urgency was applied to the current CPU (for example,
666  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
667  * to get to a quiescent state, disable it.
668  */
669 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
670 {
671 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
672 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
673 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
674 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
675 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
676 		WRITE_ONCE(rdp->rcu_forced_tick, false);
677 	}
678 }
679 
680 /**
681  * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
682  *
683  * Return @true if RCU is watching the running CPU and @false otherwise.
684  * An @true return means that this CPU can safely enter RCU read-side
685  * critical sections.
686  *
687  * Although calls to rcu_is_watching() from most parts of the kernel
688  * will return @true, there are important exceptions.  For example, if the
689  * current CPU is deep within its idle loop, in kernel entry/exit code,
690  * or offline, rcu_is_watching() will return @false.
691  *
692  * Make notrace because it can be called by the internal functions of
693  * ftrace, and making this notrace removes unnecessary recursion calls.
694  */
695 notrace bool rcu_is_watching(void)
696 {
697 	bool ret;
698 
699 	preempt_disable_notrace();
700 	ret = !rcu_dynticks_curr_cpu_in_eqs();
701 	preempt_enable_notrace();
702 	return ret;
703 }
704 EXPORT_SYMBOL_GPL(rcu_is_watching);
705 
706 /*
707  * If a holdout task is actually running, request an urgent quiescent
708  * state from its CPU.  This is unsynchronized, so migrations can cause
709  * the request to go to the wrong CPU.  Which is OK, all that will happen
710  * is that the CPU's next context switch will be a bit slower and next
711  * time around this task will generate another request.
712  */
713 void rcu_request_urgent_qs_task(struct task_struct *t)
714 {
715 	int cpu;
716 
717 	barrier();
718 	cpu = task_cpu(t);
719 	if (!task_curr(t))
720 		return; /* This task is not running on that CPU. */
721 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
722 }
723 
724 /*
725  * When trying to report a quiescent state on behalf of some other CPU,
726  * it is our responsibility to check for and handle potential overflow
727  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
728  * After all, the CPU might be in deep idle state, and thus executing no
729  * code whatsoever.
730  */
731 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
732 {
733 	raw_lockdep_assert_held_rcu_node(rnp);
734 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
735 			 rnp->gp_seq))
736 		WRITE_ONCE(rdp->gpwrap, true);
737 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
738 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
739 }
740 
741 /*
742  * Snapshot the specified CPU's dynticks counter so that we can later
743  * credit them with an implicit quiescent state.  Return 1 if this CPU
744  * is in dynticks idle mode, which is an extended quiescent state.
745  */
746 static int dyntick_save_progress_counter(struct rcu_data *rdp)
747 {
748 	rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
749 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
750 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
751 		rcu_gpnum_ovf(rdp->mynode, rdp);
752 		return 1;
753 	}
754 	return 0;
755 }
756 
757 /*
758  * Return true if the specified CPU has passed through a quiescent
759  * state by virtue of being in or having passed through an dynticks
760  * idle state since the last call to dyntick_save_progress_counter()
761  * for this same CPU, or by virtue of having been offline.
762  */
763 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
764 {
765 	unsigned long jtsq;
766 	struct rcu_node *rnp = rdp->mynode;
767 
768 	/*
769 	 * If the CPU passed through or entered a dynticks idle phase with
770 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
771 	 * already acknowledged the request to pass through a quiescent
772 	 * state.  Either way, that CPU cannot possibly be in an RCU
773 	 * read-side critical section that started before the beginning
774 	 * of the current RCU grace period.
775 	 */
776 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
777 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
778 		rcu_gpnum_ovf(rnp, rdp);
779 		return 1;
780 	}
781 
782 	/*
783 	 * Complain if a CPU that is considered to be offline from RCU's
784 	 * perspective has not yet reported a quiescent state.  After all,
785 	 * the offline CPU should have reported a quiescent state during
786 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
787 	 * if it ran concurrently with either the CPU going offline or the
788 	 * last task on a leaf rcu_node structure exiting its RCU read-side
789 	 * critical section while all CPUs corresponding to that structure
790 	 * are offline.  This added warning detects bugs in any of these
791 	 * code paths.
792 	 *
793 	 * The rcu_node structure's ->lock is held here, which excludes
794 	 * the relevant portions the CPU-hotplug code, the grace-period
795 	 * initialization code, and the rcu_read_unlock() code paths.
796 	 *
797 	 * For more detail, please refer to the "Hotplug CPU" section
798 	 * of RCU's Requirements documentation.
799 	 */
800 	if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
801 		struct rcu_node *rnp1;
802 
803 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
804 			__func__, rnp->grplo, rnp->grphi, rnp->level,
805 			(long)rnp->gp_seq, (long)rnp->completedqs);
806 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
807 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
808 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
809 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
810 			__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
811 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
812 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
813 		return 1; /* Break things loose after complaining. */
814 	}
815 
816 	/*
817 	 * A CPU running for an extended time within the kernel can
818 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
819 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
820 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
821 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
822 	 * variable are safe because the assignments are repeated if this
823 	 * CPU failed to pass through a quiescent state.  This code
824 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
825 	 * is set way high.
826 	 */
827 	jtsq = READ_ONCE(jiffies_to_sched_qs);
828 	if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
829 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
830 	     time_after(jiffies, rcu_state.jiffies_resched) ||
831 	     rcu_state.cbovld)) {
832 		WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
833 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
834 		smp_store_release(&rdp->rcu_urgent_qs, true);
835 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
836 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
837 	}
838 
839 	/*
840 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
841 	 * The above code handles this, but only for straight cond_resched().
842 	 * And some in-kernel loops check need_resched() before calling
843 	 * cond_resched(), which defeats the above code for CPUs that are
844 	 * running in-kernel with scheduling-clock interrupts disabled.
845 	 * So hit them over the head with the resched_cpu() hammer!
846 	 */
847 	if (tick_nohz_full_cpu(rdp->cpu) &&
848 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
849 	     rcu_state.cbovld)) {
850 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
851 		resched_cpu(rdp->cpu);
852 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
853 	}
854 
855 	/*
856 	 * If more than halfway to RCU CPU stall-warning time, invoke
857 	 * resched_cpu() more frequently to try to loosen things up a bit.
858 	 * Also check to see if the CPU is getting hammered with interrupts,
859 	 * but only once per grace period, just to keep the IPIs down to
860 	 * a dull roar.
861 	 */
862 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
863 		if (time_after(jiffies,
864 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
865 			resched_cpu(rdp->cpu);
866 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
867 		}
868 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
869 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
870 		    (rnp->ffmask & rdp->grpmask)) {
871 			rdp->rcu_iw_pending = true;
872 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
873 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
874 		}
875 
876 		if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
877 			int cpu = rdp->cpu;
878 			struct rcu_snap_record *rsrp;
879 			struct kernel_cpustat *kcsp;
880 
881 			kcsp = &kcpustat_cpu(cpu);
882 
883 			rsrp = &rdp->snap_record;
884 			rsrp->cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
885 			rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
886 			rsrp->cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
887 			rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
888 			rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
889 			rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
890 			rsrp->jiffies = jiffies;
891 			rsrp->gp_seq = rdp->gp_seq;
892 		}
893 	}
894 
895 	return 0;
896 }
897 
898 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
899 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
900 			      unsigned long gp_seq_req, const char *s)
901 {
902 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
903 				      gp_seq_req, rnp->level,
904 				      rnp->grplo, rnp->grphi, s);
905 }
906 
907 /*
908  * rcu_start_this_gp - Request the start of a particular grace period
909  * @rnp_start: The leaf node of the CPU from which to start.
910  * @rdp: The rcu_data corresponding to the CPU from which to start.
911  * @gp_seq_req: The gp_seq of the grace period to start.
912  *
913  * Start the specified grace period, as needed to handle newly arrived
914  * callbacks.  The required future grace periods are recorded in each
915  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
916  * is reason to awaken the grace-period kthread.
917  *
918  * The caller must hold the specified rcu_node structure's ->lock, which
919  * is why the caller is responsible for waking the grace-period kthread.
920  *
921  * Returns true if the GP thread needs to be awakened else false.
922  */
923 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
924 			      unsigned long gp_seq_req)
925 {
926 	bool ret = false;
927 	struct rcu_node *rnp;
928 
929 	/*
930 	 * Use funnel locking to either acquire the root rcu_node
931 	 * structure's lock or bail out if the need for this grace period
932 	 * has already been recorded -- or if that grace period has in
933 	 * fact already started.  If there is already a grace period in
934 	 * progress in a non-leaf node, no recording is needed because the
935 	 * end of the grace period will scan the leaf rcu_node structures.
936 	 * Note that rnp_start->lock must not be released.
937 	 */
938 	raw_lockdep_assert_held_rcu_node(rnp_start);
939 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
940 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
941 		if (rnp != rnp_start)
942 			raw_spin_lock_rcu_node(rnp);
943 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
944 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
945 		    (rnp != rnp_start &&
946 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
947 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
948 					  TPS("Prestarted"));
949 			goto unlock_out;
950 		}
951 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
952 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
953 			/*
954 			 * We just marked the leaf or internal node, and a
955 			 * grace period is in progress, which means that
956 			 * rcu_gp_cleanup() will see the marking.  Bail to
957 			 * reduce contention.
958 			 */
959 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
960 					  TPS("Startedleaf"));
961 			goto unlock_out;
962 		}
963 		if (rnp != rnp_start && rnp->parent != NULL)
964 			raw_spin_unlock_rcu_node(rnp);
965 		if (!rnp->parent)
966 			break;  /* At root, and perhaps also leaf. */
967 	}
968 
969 	/* If GP already in progress, just leave, otherwise start one. */
970 	if (rcu_gp_in_progress()) {
971 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
972 		goto unlock_out;
973 	}
974 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
975 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
976 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
977 	if (!READ_ONCE(rcu_state.gp_kthread)) {
978 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
979 		goto unlock_out;
980 	}
981 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
982 	ret = true;  /* Caller must wake GP kthread. */
983 unlock_out:
984 	/* Push furthest requested GP to leaf node and rcu_data structure. */
985 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
986 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
987 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
988 	}
989 	if (rnp != rnp_start)
990 		raw_spin_unlock_rcu_node(rnp);
991 	return ret;
992 }
993 
994 /*
995  * Clean up any old requests for the just-ended grace period.  Also return
996  * whether any additional grace periods have been requested.
997  */
998 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
999 {
1000 	bool needmore;
1001 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1002 
1003 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1004 	if (!needmore)
1005 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1006 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1007 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1008 	return needmore;
1009 }
1010 
1011 /*
1012  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1013  * interrupt or softirq handler, in which case we just might immediately
1014  * sleep upon return, resulting in a grace-period hang), and don't bother
1015  * awakening when there is nothing for the grace-period kthread to do
1016  * (as in several CPUs raced to awaken, we lost), and finally don't try
1017  * to awaken a kthread that has not yet been created.  If all those checks
1018  * are passed, track some debug information and awaken.
1019  *
1020  * So why do the self-wakeup when in an interrupt or softirq handler
1021  * in the grace-period kthread's context?  Because the kthread might have
1022  * been interrupted just as it was going to sleep, and just after the final
1023  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1024  * is required, and is therefore supplied.
1025  */
1026 static void rcu_gp_kthread_wake(void)
1027 {
1028 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1029 
1030 	if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1031 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1032 		return;
1033 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1034 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1035 	swake_up_one(&rcu_state.gp_wq);
1036 }
1037 
1038 /*
1039  * If there is room, assign a ->gp_seq number to any callbacks on this
1040  * CPU that have not already been assigned.  Also accelerate any callbacks
1041  * that were previously assigned a ->gp_seq number that has since proven
1042  * to be too conservative, which can happen if callbacks get assigned a
1043  * ->gp_seq number while RCU is idle, but with reference to a non-root
1044  * rcu_node structure.  This function is idempotent, so it does not hurt
1045  * to call it repeatedly.  Returns an flag saying that we should awaken
1046  * the RCU grace-period kthread.
1047  *
1048  * The caller must hold rnp->lock with interrupts disabled.
1049  */
1050 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1051 {
1052 	unsigned long gp_seq_req;
1053 	bool ret = false;
1054 
1055 	rcu_lockdep_assert_cblist_protected(rdp);
1056 	raw_lockdep_assert_held_rcu_node(rnp);
1057 
1058 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1059 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1060 		return false;
1061 
1062 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1063 
1064 	/*
1065 	 * Callbacks are often registered with incomplete grace-period
1066 	 * information.  Something about the fact that getting exact
1067 	 * information requires acquiring a global lock...  RCU therefore
1068 	 * makes a conservative estimate of the grace period number at which
1069 	 * a given callback will become ready to invoke.	The following
1070 	 * code checks this estimate and improves it when possible, thus
1071 	 * accelerating callback invocation to an earlier grace-period
1072 	 * number.
1073 	 */
1074 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1075 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1076 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1077 
1078 	/* Trace depending on how much we were able to accelerate. */
1079 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1080 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1081 	else
1082 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1083 
1084 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1085 
1086 	return ret;
1087 }
1088 
1089 /*
1090  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1091  * rcu_node structure's ->lock be held.  It consults the cached value
1092  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1093  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1094  * while holding the leaf rcu_node structure's ->lock.
1095  */
1096 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1097 					struct rcu_data *rdp)
1098 {
1099 	unsigned long c;
1100 	bool needwake;
1101 
1102 	rcu_lockdep_assert_cblist_protected(rdp);
1103 	c = rcu_seq_snap(&rcu_state.gp_seq);
1104 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1105 		/* Old request still live, so mark recent callbacks. */
1106 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1107 		return;
1108 	}
1109 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1110 	needwake = rcu_accelerate_cbs(rnp, rdp);
1111 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1112 	if (needwake)
1113 		rcu_gp_kthread_wake();
1114 }
1115 
1116 /*
1117  * Move any callbacks whose grace period has completed to the
1118  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1119  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1120  * sublist.  This function is idempotent, so it does not hurt to
1121  * invoke it repeatedly.  As long as it is not invoked -too- often...
1122  * Returns true if the RCU grace-period kthread needs to be awakened.
1123  *
1124  * The caller must hold rnp->lock with interrupts disabled.
1125  */
1126 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1127 {
1128 	rcu_lockdep_assert_cblist_protected(rdp);
1129 	raw_lockdep_assert_held_rcu_node(rnp);
1130 
1131 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1132 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1133 		return false;
1134 
1135 	/*
1136 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1137 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1138 	 */
1139 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1140 
1141 	/* Classify any remaining callbacks. */
1142 	return rcu_accelerate_cbs(rnp, rdp);
1143 }
1144 
1145 /*
1146  * Move and classify callbacks, but only if doing so won't require
1147  * that the RCU grace-period kthread be awakened.
1148  */
1149 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1150 						  struct rcu_data *rdp)
1151 {
1152 	rcu_lockdep_assert_cblist_protected(rdp);
1153 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1154 		return;
1155 	// The grace period cannot end while we hold the rcu_node lock.
1156 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1157 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1158 	raw_spin_unlock_rcu_node(rnp);
1159 }
1160 
1161 /*
1162  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1163  * quiescent state.  This is intended to be invoked when the CPU notices
1164  * a new grace period.
1165  */
1166 static void rcu_strict_gp_check_qs(void)
1167 {
1168 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1169 		rcu_read_lock();
1170 		rcu_read_unlock();
1171 	}
1172 }
1173 
1174 /*
1175  * Update CPU-local rcu_data state to record the beginnings and ends of
1176  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1177  * structure corresponding to the current CPU, and must have irqs disabled.
1178  * Returns true if the grace-period kthread needs to be awakened.
1179  */
1180 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1181 {
1182 	bool ret = false;
1183 	bool need_qs;
1184 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1185 
1186 	raw_lockdep_assert_held_rcu_node(rnp);
1187 
1188 	if (rdp->gp_seq == rnp->gp_seq)
1189 		return false; /* Nothing to do. */
1190 
1191 	/* Handle the ends of any preceding grace periods first. */
1192 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1193 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1194 		if (!offloaded)
1195 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1196 		rdp->core_needs_qs = false;
1197 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1198 	} else {
1199 		if (!offloaded)
1200 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1201 		if (rdp->core_needs_qs)
1202 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1203 	}
1204 
1205 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1206 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1207 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1208 		/*
1209 		 * If the current grace period is waiting for this CPU,
1210 		 * set up to detect a quiescent state, otherwise don't
1211 		 * go looking for one.
1212 		 */
1213 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1214 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1215 		rdp->cpu_no_qs.b.norm = need_qs;
1216 		rdp->core_needs_qs = need_qs;
1217 		zero_cpu_stall_ticks(rdp);
1218 	}
1219 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1220 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1221 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1222 	if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1223 		WRITE_ONCE(rdp->last_sched_clock, jiffies);
1224 	WRITE_ONCE(rdp->gpwrap, false);
1225 	rcu_gpnum_ovf(rnp, rdp);
1226 	return ret;
1227 }
1228 
1229 static void note_gp_changes(struct rcu_data *rdp)
1230 {
1231 	unsigned long flags;
1232 	bool needwake;
1233 	struct rcu_node *rnp;
1234 
1235 	local_irq_save(flags);
1236 	rnp = rdp->mynode;
1237 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1238 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1239 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1240 		local_irq_restore(flags);
1241 		return;
1242 	}
1243 	needwake = __note_gp_changes(rnp, rdp);
1244 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1245 	rcu_strict_gp_check_qs();
1246 	if (needwake)
1247 		rcu_gp_kthread_wake();
1248 }
1249 
1250 static atomic_t *rcu_gp_slow_suppress;
1251 
1252 /* Register a counter to suppress debugging grace-period delays. */
1253 void rcu_gp_slow_register(atomic_t *rgssp)
1254 {
1255 	WARN_ON_ONCE(rcu_gp_slow_suppress);
1256 
1257 	WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1258 }
1259 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1260 
1261 /* Unregister a counter, with NULL for not caring which. */
1262 void rcu_gp_slow_unregister(atomic_t *rgssp)
1263 {
1264 	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
1265 
1266 	WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1267 }
1268 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1269 
1270 static bool rcu_gp_slow_is_suppressed(void)
1271 {
1272 	atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1273 
1274 	return rgssp && atomic_read(rgssp);
1275 }
1276 
1277 static void rcu_gp_slow(int delay)
1278 {
1279 	if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1280 	    !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1281 		schedule_timeout_idle(delay);
1282 }
1283 
1284 static unsigned long sleep_duration;
1285 
1286 /* Allow rcutorture to stall the grace-period kthread. */
1287 void rcu_gp_set_torture_wait(int duration)
1288 {
1289 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1290 		WRITE_ONCE(sleep_duration, duration);
1291 }
1292 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1293 
1294 /* Actually implement the aforementioned wait. */
1295 static void rcu_gp_torture_wait(void)
1296 {
1297 	unsigned long duration;
1298 
1299 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1300 		return;
1301 	duration = xchg(&sleep_duration, 0UL);
1302 	if (duration > 0) {
1303 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1304 		schedule_timeout_idle(duration);
1305 		pr_alert("%s: Wait complete\n", __func__);
1306 	}
1307 }
1308 
1309 /*
1310  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1311  * processing.
1312  */
1313 static void rcu_strict_gp_boundary(void *unused)
1314 {
1315 	invoke_rcu_core();
1316 }
1317 
1318 // Make the polled API aware of the beginning of a grace period.
1319 static void rcu_poll_gp_seq_start(unsigned long *snap)
1320 {
1321 	struct rcu_node *rnp = rcu_get_root();
1322 
1323 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1324 		raw_lockdep_assert_held_rcu_node(rnp);
1325 
1326 	// If RCU was idle, note beginning of GP.
1327 	if (!rcu_seq_state(rcu_state.gp_seq_polled))
1328 		rcu_seq_start(&rcu_state.gp_seq_polled);
1329 
1330 	// Either way, record current state.
1331 	*snap = rcu_state.gp_seq_polled;
1332 }
1333 
1334 // Make the polled API aware of the end of a grace period.
1335 static void rcu_poll_gp_seq_end(unsigned long *snap)
1336 {
1337 	struct rcu_node *rnp = rcu_get_root();
1338 
1339 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1340 		raw_lockdep_assert_held_rcu_node(rnp);
1341 
1342 	// If the previously noted GP is still in effect, record the
1343 	// end of that GP.  Either way, zero counter to avoid counter-wrap
1344 	// problems.
1345 	if (*snap && *snap == rcu_state.gp_seq_polled) {
1346 		rcu_seq_end(&rcu_state.gp_seq_polled);
1347 		rcu_state.gp_seq_polled_snap = 0;
1348 		rcu_state.gp_seq_polled_exp_snap = 0;
1349 	} else {
1350 		*snap = 0;
1351 	}
1352 }
1353 
1354 // Make the polled API aware of the beginning of a grace period, but
1355 // where caller does not hold the root rcu_node structure's lock.
1356 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1357 {
1358 	unsigned long flags;
1359 	struct rcu_node *rnp = rcu_get_root();
1360 
1361 	if (rcu_init_invoked()) {
1362 		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1363 			lockdep_assert_irqs_enabled();
1364 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1365 	}
1366 	rcu_poll_gp_seq_start(snap);
1367 	if (rcu_init_invoked())
1368 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1369 }
1370 
1371 // Make the polled API aware of the end of a grace period, but where
1372 // caller does not hold the root rcu_node structure's lock.
1373 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1374 {
1375 	unsigned long flags;
1376 	struct rcu_node *rnp = rcu_get_root();
1377 
1378 	if (rcu_init_invoked()) {
1379 		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1380 			lockdep_assert_irqs_enabled();
1381 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1382 	}
1383 	rcu_poll_gp_seq_end(snap);
1384 	if (rcu_init_invoked())
1385 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1386 }
1387 
1388 /*
1389  * Initialize a new grace period.  Return false if no grace period required.
1390  */
1391 static noinline_for_stack bool rcu_gp_init(void)
1392 {
1393 	unsigned long flags;
1394 	unsigned long oldmask;
1395 	unsigned long mask;
1396 	struct rcu_data *rdp;
1397 	struct rcu_node *rnp = rcu_get_root();
1398 
1399 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1400 	raw_spin_lock_irq_rcu_node(rnp);
1401 	if (!READ_ONCE(rcu_state.gp_flags)) {
1402 		/* Spurious wakeup, tell caller to go back to sleep.  */
1403 		raw_spin_unlock_irq_rcu_node(rnp);
1404 		return false;
1405 	}
1406 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1407 
1408 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1409 		/*
1410 		 * Grace period already in progress, don't start another.
1411 		 * Not supposed to be able to happen.
1412 		 */
1413 		raw_spin_unlock_irq_rcu_node(rnp);
1414 		return false;
1415 	}
1416 
1417 	/* Advance to a new grace period and initialize state. */
1418 	record_gp_stall_check_time();
1419 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1420 	rcu_seq_start(&rcu_state.gp_seq);
1421 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1422 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1423 	rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1424 	raw_spin_unlock_irq_rcu_node(rnp);
1425 
1426 	/*
1427 	 * Apply per-leaf buffered online and offline operations to
1428 	 * the rcu_node tree. Note that this new grace period need not
1429 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1430 	 * offlining path, when combined with checks in this function,
1431 	 * will handle CPUs that are currently going offline or that will
1432 	 * go offline later.  Please also refer to "Hotplug CPU" section
1433 	 * of RCU's Requirements documentation.
1434 	 */
1435 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1436 	/* Exclude CPU hotplug operations. */
1437 	rcu_for_each_leaf_node(rnp) {
1438 		local_irq_save(flags);
1439 		arch_spin_lock(&rcu_state.ofl_lock);
1440 		raw_spin_lock_rcu_node(rnp);
1441 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1442 		    !rnp->wait_blkd_tasks) {
1443 			/* Nothing to do on this leaf rcu_node structure. */
1444 			raw_spin_unlock_rcu_node(rnp);
1445 			arch_spin_unlock(&rcu_state.ofl_lock);
1446 			local_irq_restore(flags);
1447 			continue;
1448 		}
1449 
1450 		/* Record old state, apply changes to ->qsmaskinit field. */
1451 		oldmask = rnp->qsmaskinit;
1452 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1453 
1454 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1455 		if (!oldmask != !rnp->qsmaskinit) {
1456 			if (!oldmask) { /* First online CPU for rcu_node. */
1457 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1458 					rcu_init_new_rnp(rnp);
1459 			} else if (rcu_preempt_has_tasks(rnp)) {
1460 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1461 			} else { /* Last offline CPU and can propagate. */
1462 				rcu_cleanup_dead_rnp(rnp);
1463 			}
1464 		}
1465 
1466 		/*
1467 		 * If all waited-on tasks from prior grace period are
1468 		 * done, and if all this rcu_node structure's CPUs are
1469 		 * still offline, propagate up the rcu_node tree and
1470 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1471 		 * rcu_node structure's CPUs has since come back online,
1472 		 * simply clear ->wait_blkd_tasks.
1473 		 */
1474 		if (rnp->wait_blkd_tasks &&
1475 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1476 			rnp->wait_blkd_tasks = false;
1477 			if (!rnp->qsmaskinit)
1478 				rcu_cleanup_dead_rnp(rnp);
1479 		}
1480 
1481 		raw_spin_unlock_rcu_node(rnp);
1482 		arch_spin_unlock(&rcu_state.ofl_lock);
1483 		local_irq_restore(flags);
1484 	}
1485 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1486 
1487 	/*
1488 	 * Set the quiescent-state-needed bits in all the rcu_node
1489 	 * structures for all currently online CPUs in breadth-first
1490 	 * order, starting from the root rcu_node structure, relying on the
1491 	 * layout of the tree within the rcu_state.node[] array.  Note that
1492 	 * other CPUs will access only the leaves of the hierarchy, thus
1493 	 * seeing that no grace period is in progress, at least until the
1494 	 * corresponding leaf node has been initialized.
1495 	 *
1496 	 * The grace period cannot complete until the initialization
1497 	 * process finishes, because this kthread handles both.
1498 	 */
1499 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1500 	rcu_for_each_node_breadth_first(rnp) {
1501 		rcu_gp_slow(gp_init_delay);
1502 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1503 		rdp = this_cpu_ptr(&rcu_data);
1504 		rcu_preempt_check_blocked_tasks(rnp);
1505 		rnp->qsmask = rnp->qsmaskinit;
1506 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1507 		if (rnp == rdp->mynode)
1508 			(void)__note_gp_changes(rnp, rdp);
1509 		rcu_preempt_boost_start_gp(rnp);
1510 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1511 					    rnp->level, rnp->grplo,
1512 					    rnp->grphi, rnp->qsmask);
1513 		/* Quiescent states for tasks on any now-offline CPUs. */
1514 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1515 		rnp->rcu_gp_init_mask = mask;
1516 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1517 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1518 		else
1519 			raw_spin_unlock_irq_rcu_node(rnp);
1520 		cond_resched_tasks_rcu_qs();
1521 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1522 	}
1523 
1524 	// If strict, make all CPUs aware of new grace period.
1525 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1526 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1527 
1528 	return true;
1529 }
1530 
1531 /*
1532  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1533  * time.
1534  */
1535 static bool rcu_gp_fqs_check_wake(int *gfp)
1536 {
1537 	struct rcu_node *rnp = rcu_get_root();
1538 
1539 	// If under overload conditions, force an immediate FQS scan.
1540 	if (*gfp & RCU_GP_FLAG_OVLD)
1541 		return true;
1542 
1543 	// Someone like call_rcu() requested a force-quiescent-state scan.
1544 	*gfp = READ_ONCE(rcu_state.gp_flags);
1545 	if (*gfp & RCU_GP_FLAG_FQS)
1546 		return true;
1547 
1548 	// The current grace period has completed.
1549 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1550 		return true;
1551 
1552 	return false;
1553 }
1554 
1555 /*
1556  * Do one round of quiescent-state forcing.
1557  */
1558 static void rcu_gp_fqs(bool first_time)
1559 {
1560 	int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1561 	struct rcu_node *rnp = rcu_get_root();
1562 
1563 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1564 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1565 
1566 	WARN_ON_ONCE(nr_fqs > 3);
1567 	/* Only countdown nr_fqs for stall purposes if jiffies moves. */
1568 	if (nr_fqs) {
1569 		if (nr_fqs == 1) {
1570 			WRITE_ONCE(rcu_state.jiffies_stall,
1571 				   jiffies + rcu_jiffies_till_stall_check());
1572 		}
1573 		WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1574 	}
1575 
1576 	if (first_time) {
1577 		/* Collect dyntick-idle snapshots. */
1578 		force_qs_rnp(dyntick_save_progress_counter);
1579 	} else {
1580 		/* Handle dyntick-idle and offline CPUs. */
1581 		force_qs_rnp(rcu_implicit_dynticks_qs);
1582 	}
1583 	/* Clear flag to prevent immediate re-entry. */
1584 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1585 		raw_spin_lock_irq_rcu_node(rnp);
1586 		WRITE_ONCE(rcu_state.gp_flags,
1587 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1588 		raw_spin_unlock_irq_rcu_node(rnp);
1589 	}
1590 }
1591 
1592 /*
1593  * Loop doing repeated quiescent-state forcing until the grace period ends.
1594  */
1595 static noinline_for_stack void rcu_gp_fqs_loop(void)
1596 {
1597 	bool first_gp_fqs = true;
1598 	int gf = 0;
1599 	unsigned long j;
1600 	int ret;
1601 	struct rcu_node *rnp = rcu_get_root();
1602 
1603 	j = READ_ONCE(jiffies_till_first_fqs);
1604 	if (rcu_state.cbovld)
1605 		gf = RCU_GP_FLAG_OVLD;
1606 	ret = 0;
1607 	for (;;) {
1608 		if (rcu_state.cbovld) {
1609 			j = (j + 2) / 3;
1610 			if (j <= 0)
1611 				j = 1;
1612 		}
1613 		if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1614 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1615 			/*
1616 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1617 			 * update; required for stall checks.
1618 			 */
1619 			smp_wmb();
1620 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1621 				   jiffies + (j ? 3 * j : 2));
1622 		}
1623 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1624 				       TPS("fqswait"));
1625 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1626 		(void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1627 				 rcu_gp_fqs_check_wake(&gf), j);
1628 		rcu_gp_torture_wait();
1629 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1630 		/* Locking provides needed memory barriers. */
1631 		/*
1632 		 * Exit the loop if the root rcu_node structure indicates that the grace period
1633 		 * has ended, leave the loop.  The rcu_preempt_blocked_readers_cgp(rnp) check
1634 		 * is required only for single-node rcu_node trees because readers blocking
1635 		 * the current grace period are queued only on leaf rcu_node structures.
1636 		 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1637 		 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1638 		 * the corresponding leaf nodes have passed through their quiescent state.
1639 		 */
1640 		if (!READ_ONCE(rnp->qsmask) &&
1641 		    !rcu_preempt_blocked_readers_cgp(rnp))
1642 			break;
1643 		/* If time for quiescent-state forcing, do it. */
1644 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1645 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1646 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1647 					       TPS("fqsstart"));
1648 			rcu_gp_fqs(first_gp_fqs);
1649 			gf = 0;
1650 			if (first_gp_fqs) {
1651 				first_gp_fqs = false;
1652 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1653 			}
1654 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1655 					       TPS("fqsend"));
1656 			cond_resched_tasks_rcu_qs();
1657 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1658 			ret = 0; /* Force full wait till next FQS. */
1659 			j = READ_ONCE(jiffies_till_next_fqs);
1660 		} else {
1661 			/* Deal with stray signal. */
1662 			cond_resched_tasks_rcu_qs();
1663 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1664 			WARN_ON(signal_pending(current));
1665 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1666 					       TPS("fqswaitsig"));
1667 			ret = 1; /* Keep old FQS timing. */
1668 			j = jiffies;
1669 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1670 				j = 1;
1671 			else
1672 				j = rcu_state.jiffies_force_qs - j;
1673 			gf = 0;
1674 		}
1675 	}
1676 }
1677 
1678 /*
1679  * Clean up after the old grace period.
1680  */
1681 static noinline void rcu_gp_cleanup(void)
1682 {
1683 	int cpu;
1684 	bool needgp = false;
1685 	unsigned long gp_duration;
1686 	unsigned long new_gp_seq;
1687 	bool offloaded;
1688 	struct rcu_data *rdp;
1689 	struct rcu_node *rnp = rcu_get_root();
1690 	struct swait_queue_head *sq;
1691 
1692 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1693 	raw_spin_lock_irq_rcu_node(rnp);
1694 	rcu_state.gp_end = jiffies;
1695 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1696 	if (gp_duration > rcu_state.gp_max)
1697 		rcu_state.gp_max = gp_duration;
1698 
1699 	/*
1700 	 * We know the grace period is complete, but to everyone else
1701 	 * it appears to still be ongoing.  But it is also the case
1702 	 * that to everyone else it looks like there is nothing that
1703 	 * they can do to advance the grace period.  It is therefore
1704 	 * safe for us to drop the lock in order to mark the grace
1705 	 * period as completed in all of the rcu_node structures.
1706 	 */
1707 	rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1708 	raw_spin_unlock_irq_rcu_node(rnp);
1709 
1710 	/*
1711 	 * Propagate new ->gp_seq value to rcu_node structures so that
1712 	 * other CPUs don't have to wait until the start of the next grace
1713 	 * period to process their callbacks.  This also avoids some nasty
1714 	 * RCU grace-period initialization races by forcing the end of
1715 	 * the current grace period to be completely recorded in all of
1716 	 * the rcu_node structures before the beginning of the next grace
1717 	 * period is recorded in any of the rcu_node structures.
1718 	 */
1719 	new_gp_seq = rcu_state.gp_seq;
1720 	rcu_seq_end(&new_gp_seq);
1721 	rcu_for_each_node_breadth_first(rnp) {
1722 		raw_spin_lock_irq_rcu_node(rnp);
1723 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1724 			dump_blkd_tasks(rnp, 10);
1725 		WARN_ON_ONCE(rnp->qsmask);
1726 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1727 		if (!rnp->parent)
1728 			smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1729 		rdp = this_cpu_ptr(&rcu_data);
1730 		if (rnp == rdp->mynode)
1731 			needgp = __note_gp_changes(rnp, rdp) || needgp;
1732 		/* smp_mb() provided by prior unlock-lock pair. */
1733 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
1734 		// Reset overload indication for CPUs no longer overloaded
1735 		if (rcu_is_leaf_node(rnp))
1736 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1737 				rdp = per_cpu_ptr(&rcu_data, cpu);
1738 				check_cb_ovld_locked(rdp, rnp);
1739 			}
1740 		sq = rcu_nocb_gp_get(rnp);
1741 		raw_spin_unlock_irq_rcu_node(rnp);
1742 		rcu_nocb_gp_cleanup(sq);
1743 		cond_resched_tasks_rcu_qs();
1744 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1745 		rcu_gp_slow(gp_cleanup_delay);
1746 	}
1747 	rnp = rcu_get_root();
1748 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1749 
1750 	/* Declare grace period done, trace first to use old GP number. */
1751 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1752 	rcu_seq_end(&rcu_state.gp_seq);
1753 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1754 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1755 	/* Check for GP requests since above loop. */
1756 	rdp = this_cpu_ptr(&rcu_data);
1757 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1758 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1759 				  TPS("CleanupMore"));
1760 		needgp = true;
1761 	}
1762 	/* Advance CBs to reduce false positives below. */
1763 	offloaded = rcu_rdp_is_offloaded(rdp);
1764 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1765 
1766 		// We get here if a grace period was needed (“needgp”)
1767 		// and the above call to rcu_accelerate_cbs() did not set
1768 		// the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1769 		// the need for another grace period).  The purpose
1770 		// of the “offloaded” check is to avoid invoking
1771 		// rcu_accelerate_cbs() on an offloaded CPU because we do not
1772 		// hold the ->nocb_lock needed to safely access an offloaded
1773 		// ->cblist.  We do not want to acquire that lock because
1774 		// it can be heavily contended during callback floods.
1775 
1776 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1777 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1778 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1779 	} else {
1780 
1781 		// We get here either if there is no need for an
1782 		// additional grace period or if rcu_accelerate_cbs() has
1783 		// already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
1784 		// So all we need to do is to clear all of the other
1785 		// ->gp_flags bits.
1786 
1787 		WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1788 	}
1789 	raw_spin_unlock_irq_rcu_node(rnp);
1790 
1791 	// If strict, make all CPUs aware of the end of the old grace period.
1792 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1793 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1794 }
1795 
1796 /*
1797  * Body of kthread that handles grace periods.
1798  */
1799 static int __noreturn rcu_gp_kthread(void *unused)
1800 {
1801 	rcu_bind_gp_kthread();
1802 	for (;;) {
1803 
1804 		/* Handle grace-period start. */
1805 		for (;;) {
1806 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1807 					       TPS("reqwait"));
1808 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1809 			swait_event_idle_exclusive(rcu_state.gp_wq,
1810 					 READ_ONCE(rcu_state.gp_flags) &
1811 					 RCU_GP_FLAG_INIT);
1812 			rcu_gp_torture_wait();
1813 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1814 			/* Locking provides needed memory barrier. */
1815 			if (rcu_gp_init())
1816 				break;
1817 			cond_resched_tasks_rcu_qs();
1818 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1819 			WARN_ON(signal_pending(current));
1820 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1821 					       TPS("reqwaitsig"));
1822 		}
1823 
1824 		/* Handle quiescent-state forcing. */
1825 		rcu_gp_fqs_loop();
1826 
1827 		/* Handle grace-period end. */
1828 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1829 		rcu_gp_cleanup();
1830 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1831 	}
1832 }
1833 
1834 /*
1835  * Report a full set of quiescent states to the rcu_state data structure.
1836  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1837  * another grace period is required.  Whether we wake the grace-period
1838  * kthread or it awakens itself for the next round of quiescent-state
1839  * forcing, that kthread will clean up after the just-completed grace
1840  * period.  Note that the caller must hold rnp->lock, which is released
1841  * before return.
1842  */
1843 static void rcu_report_qs_rsp(unsigned long flags)
1844 	__releases(rcu_get_root()->lock)
1845 {
1846 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
1847 	WARN_ON_ONCE(!rcu_gp_in_progress());
1848 	WRITE_ONCE(rcu_state.gp_flags,
1849 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1850 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1851 	rcu_gp_kthread_wake();
1852 }
1853 
1854 /*
1855  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1856  * Allows quiescent states for a group of CPUs to be reported at one go
1857  * to the specified rcu_node structure, though all the CPUs in the group
1858  * must be represented by the same rcu_node structure (which need not be a
1859  * leaf rcu_node structure, though it often will be).  The gps parameter
1860  * is the grace-period snapshot, which means that the quiescent states
1861  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
1862  * must be held upon entry, and it is released before return.
1863  *
1864  * As a special case, if mask is zero, the bit-already-cleared check is
1865  * disabled.  This allows propagating quiescent state due to resumed tasks
1866  * during grace-period initialization.
1867  */
1868 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1869 			      unsigned long gps, unsigned long flags)
1870 	__releases(rnp->lock)
1871 {
1872 	unsigned long oldmask = 0;
1873 	struct rcu_node *rnp_c;
1874 
1875 	raw_lockdep_assert_held_rcu_node(rnp);
1876 
1877 	/* Walk up the rcu_node hierarchy. */
1878 	for (;;) {
1879 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1880 
1881 			/*
1882 			 * Our bit has already been cleared, or the
1883 			 * relevant grace period is already over, so done.
1884 			 */
1885 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1886 			return;
1887 		}
1888 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1889 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1890 			     rcu_preempt_blocked_readers_cgp(rnp));
1891 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1892 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1893 						 mask, rnp->qsmask, rnp->level,
1894 						 rnp->grplo, rnp->grphi,
1895 						 !!rnp->gp_tasks);
1896 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1897 
1898 			/* Other bits still set at this level, so done. */
1899 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1900 			return;
1901 		}
1902 		rnp->completedqs = rnp->gp_seq;
1903 		mask = rnp->grpmask;
1904 		if (rnp->parent == NULL) {
1905 
1906 			/* No more levels.  Exit loop holding root lock. */
1907 
1908 			break;
1909 		}
1910 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1911 		rnp_c = rnp;
1912 		rnp = rnp->parent;
1913 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1914 		oldmask = READ_ONCE(rnp_c->qsmask);
1915 	}
1916 
1917 	/*
1918 	 * Get here if we are the last CPU to pass through a quiescent
1919 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1920 	 * to clean up and start the next grace period if one is needed.
1921 	 */
1922 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1923 }
1924 
1925 /*
1926  * Record a quiescent state for all tasks that were previously queued
1927  * on the specified rcu_node structure and that were blocking the current
1928  * RCU grace period.  The caller must hold the corresponding rnp->lock with
1929  * irqs disabled, and this lock is released upon return, but irqs remain
1930  * disabled.
1931  */
1932 static void __maybe_unused
1933 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1934 	__releases(rnp->lock)
1935 {
1936 	unsigned long gps;
1937 	unsigned long mask;
1938 	struct rcu_node *rnp_p;
1939 
1940 	raw_lockdep_assert_held_rcu_node(rnp);
1941 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1942 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1943 	    rnp->qsmask != 0) {
1944 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1945 		return;  /* Still need more quiescent states! */
1946 	}
1947 
1948 	rnp->completedqs = rnp->gp_seq;
1949 	rnp_p = rnp->parent;
1950 	if (rnp_p == NULL) {
1951 		/*
1952 		 * Only one rcu_node structure in the tree, so don't
1953 		 * try to report up to its nonexistent parent!
1954 		 */
1955 		rcu_report_qs_rsp(flags);
1956 		return;
1957 	}
1958 
1959 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1960 	gps = rnp->gp_seq;
1961 	mask = rnp->grpmask;
1962 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
1963 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
1964 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1965 }
1966 
1967 /*
1968  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1969  * structure.  This must be called from the specified CPU.
1970  */
1971 static void
1972 rcu_report_qs_rdp(struct rcu_data *rdp)
1973 {
1974 	unsigned long flags;
1975 	unsigned long mask;
1976 	bool needacc = false;
1977 	struct rcu_node *rnp;
1978 
1979 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
1980 	rnp = rdp->mynode;
1981 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1982 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
1983 	    rdp->gpwrap) {
1984 
1985 		/*
1986 		 * The grace period in which this quiescent state was
1987 		 * recorded has ended, so don't report it upwards.
1988 		 * We will instead need a new quiescent state that lies
1989 		 * within the current grace period.
1990 		 */
1991 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
1992 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1993 		return;
1994 	}
1995 	mask = rdp->grpmask;
1996 	rdp->core_needs_qs = false;
1997 	if ((rnp->qsmask & mask) == 0) {
1998 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1999 	} else {
2000 		/*
2001 		 * This GP can't end until cpu checks in, so all of our
2002 		 * callbacks can be processed during the next GP.
2003 		 *
2004 		 * NOCB kthreads have their own way to deal with that...
2005 		 */
2006 		if (!rcu_rdp_is_offloaded(rdp)) {
2007 			/*
2008 			 * The current GP has not yet ended, so it
2009 			 * should not be possible for rcu_accelerate_cbs()
2010 			 * to return true.  So complain, but don't awaken.
2011 			 */
2012 			WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2013 		} else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2014 			/*
2015 			 * ...but NOCB kthreads may miss or delay callbacks acceleration
2016 			 * if in the middle of a (de-)offloading process.
2017 			 */
2018 			needacc = true;
2019 		}
2020 
2021 		rcu_disable_urgency_upon_qs(rdp);
2022 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2023 		/* ^^^ Released rnp->lock */
2024 
2025 		if (needacc) {
2026 			rcu_nocb_lock_irqsave(rdp, flags);
2027 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2028 			rcu_nocb_unlock_irqrestore(rdp, flags);
2029 		}
2030 	}
2031 }
2032 
2033 /*
2034  * Check to see if there is a new grace period of which this CPU
2035  * is not yet aware, and if so, set up local rcu_data state for it.
2036  * Otherwise, see if this CPU has just passed through its first
2037  * quiescent state for this grace period, and record that fact if so.
2038  */
2039 static void
2040 rcu_check_quiescent_state(struct rcu_data *rdp)
2041 {
2042 	/* Check for grace-period ends and beginnings. */
2043 	note_gp_changes(rdp);
2044 
2045 	/*
2046 	 * Does this CPU still need to do its part for current grace period?
2047 	 * If no, return and let the other CPUs do their part as well.
2048 	 */
2049 	if (!rdp->core_needs_qs)
2050 		return;
2051 
2052 	/*
2053 	 * Was there a quiescent state since the beginning of the grace
2054 	 * period? If no, then exit and wait for the next call.
2055 	 */
2056 	if (rdp->cpu_no_qs.b.norm)
2057 		return;
2058 
2059 	/*
2060 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2061 	 * judge of that).
2062 	 */
2063 	rcu_report_qs_rdp(rdp);
2064 }
2065 
2066 /* Return true if callback-invocation time limit exceeded. */
2067 static bool rcu_do_batch_check_time(long count, long tlimit,
2068 				    bool jlimit_check, unsigned long jlimit)
2069 {
2070 	// Invoke local_clock() only once per 32 consecutive callbacks.
2071 	return unlikely(tlimit) &&
2072 	       (!likely(count & 31) ||
2073 		(IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2074 		 jlimit_check && time_after(jiffies, jlimit))) &&
2075 	       local_clock() >= tlimit;
2076 }
2077 
2078 /*
2079  * Invoke any RCU callbacks that have made it to the end of their grace
2080  * period.  Throttle as specified by rdp->blimit.
2081  */
2082 static void rcu_do_batch(struct rcu_data *rdp)
2083 {
2084 	long bl;
2085 	long count = 0;
2086 	int div;
2087 	bool __maybe_unused empty;
2088 	unsigned long flags;
2089 	unsigned long jlimit;
2090 	bool jlimit_check = false;
2091 	long pending;
2092 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2093 	struct rcu_head *rhp;
2094 	long tlimit = 0;
2095 
2096 	/* If no callbacks are ready, just return. */
2097 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2098 		trace_rcu_batch_start(rcu_state.name,
2099 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2100 		trace_rcu_batch_end(rcu_state.name, 0,
2101 				    !rcu_segcblist_empty(&rdp->cblist),
2102 				    need_resched(), is_idle_task(current),
2103 				    rcu_is_callbacks_kthread(rdp));
2104 		return;
2105 	}
2106 
2107 	/*
2108 	 * Extract the list of ready callbacks, disabling IRQs to prevent
2109 	 * races with call_rcu() from interrupt handlers.  Leave the
2110 	 * callback counts, as rcu_barrier() needs to be conservative.
2111 	 */
2112 	rcu_nocb_lock_irqsave(rdp, flags);
2113 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2114 	pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2115 	div = READ_ONCE(rcu_divisor);
2116 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2117 	bl = max(rdp->blimit, pending >> div);
2118 	if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2119 	    (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2120 		const long npj = NSEC_PER_SEC / HZ;
2121 		long rrn = READ_ONCE(rcu_resched_ns);
2122 
2123 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2124 		tlimit = local_clock() + rrn;
2125 		jlimit = jiffies + (rrn + npj + 1) / npj;
2126 		jlimit_check = true;
2127 	}
2128 	trace_rcu_batch_start(rcu_state.name,
2129 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2130 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2131 	if (rcu_rdp_is_offloaded(rdp))
2132 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2133 
2134 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2135 	rcu_nocb_unlock_irqrestore(rdp, flags);
2136 
2137 	/* Invoke callbacks. */
2138 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2139 	rhp = rcu_cblist_dequeue(&rcl);
2140 
2141 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2142 		rcu_callback_t f;
2143 
2144 		count++;
2145 		debug_rcu_head_unqueue(rhp);
2146 
2147 		rcu_lock_acquire(&rcu_callback_map);
2148 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2149 
2150 		f = rhp->func;
2151 		debug_rcu_head_callback(rhp);
2152 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2153 		f(rhp);
2154 
2155 		rcu_lock_release(&rcu_callback_map);
2156 
2157 		/*
2158 		 * Stop only if limit reached and CPU has something to do.
2159 		 */
2160 		if (in_serving_softirq()) {
2161 			if (count >= bl && (need_resched() || !is_idle_task(current)))
2162 				break;
2163 			/*
2164 			 * Make sure we don't spend too much time here and deprive other
2165 			 * softirq vectors of CPU cycles.
2166 			 */
2167 			if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2168 				break;
2169 		} else {
2170 			// In rcuc/rcuoc context, so no worries about
2171 			// depriving other softirq vectors of CPU cycles.
2172 			local_bh_enable();
2173 			lockdep_assert_irqs_enabled();
2174 			cond_resched_tasks_rcu_qs();
2175 			lockdep_assert_irqs_enabled();
2176 			local_bh_disable();
2177 			// But rcuc kthreads can delay quiescent-state
2178 			// reporting, so check time limits for them.
2179 			if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2180 			    rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2181 				rdp->rcu_cpu_has_work = 1;
2182 				break;
2183 			}
2184 		}
2185 	}
2186 
2187 	rcu_nocb_lock_irqsave(rdp, flags);
2188 	rdp->n_cbs_invoked += count;
2189 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2190 			    is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2191 
2192 	/* Update counts and requeue any remaining callbacks. */
2193 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2194 	rcu_segcblist_add_len(&rdp->cblist, -count);
2195 
2196 	/* Reinstate batch limit if we have worked down the excess. */
2197 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2198 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2199 		rdp->blimit = blimit;
2200 
2201 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2202 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2203 		rdp->qlen_last_fqs_check = 0;
2204 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2205 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2206 		rdp->qlen_last_fqs_check = count;
2207 
2208 	/*
2209 	 * The following usually indicates a double call_rcu().  To track
2210 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2211 	 */
2212 	empty = rcu_segcblist_empty(&rdp->cblist);
2213 	WARN_ON_ONCE(count == 0 && !empty);
2214 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2215 		     count != 0 && empty);
2216 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2217 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2218 
2219 	rcu_nocb_unlock_irqrestore(rdp, flags);
2220 
2221 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2222 }
2223 
2224 /*
2225  * This function is invoked from each scheduling-clock interrupt,
2226  * and checks to see if this CPU is in a non-context-switch quiescent
2227  * state, for example, user mode or idle loop.  It also schedules RCU
2228  * core processing.  If the current grace period has gone on too long,
2229  * it will ask the scheduler to manufacture a context switch for the sole
2230  * purpose of providing the needed quiescent state.
2231  */
2232 void rcu_sched_clock_irq(int user)
2233 {
2234 	unsigned long j;
2235 
2236 	if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2237 		j = jiffies;
2238 		WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2239 		__this_cpu_write(rcu_data.last_sched_clock, j);
2240 	}
2241 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2242 	lockdep_assert_irqs_disabled();
2243 	raw_cpu_inc(rcu_data.ticks_this_gp);
2244 	/* The load-acquire pairs with the store-release setting to true. */
2245 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2246 		/* Idle and userspace execution already are quiescent states. */
2247 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2248 			set_tsk_need_resched(current);
2249 			set_preempt_need_resched();
2250 		}
2251 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2252 	}
2253 	rcu_flavor_sched_clock_irq(user);
2254 	if (rcu_pending(user))
2255 		invoke_rcu_core();
2256 	if (user || rcu_is_cpu_rrupt_from_idle())
2257 		rcu_note_voluntary_context_switch(current);
2258 	lockdep_assert_irqs_disabled();
2259 
2260 	trace_rcu_utilization(TPS("End scheduler-tick"));
2261 }
2262 
2263 /*
2264  * Scan the leaf rcu_node structures.  For each structure on which all
2265  * CPUs have reported a quiescent state and on which there are tasks
2266  * blocking the current grace period, initiate RCU priority boosting.
2267  * Otherwise, invoke the specified function to check dyntick state for
2268  * each CPU that has not yet reported a quiescent state.
2269  */
2270 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2271 {
2272 	int cpu;
2273 	unsigned long flags;
2274 	unsigned long mask;
2275 	struct rcu_data *rdp;
2276 	struct rcu_node *rnp;
2277 
2278 	rcu_state.cbovld = rcu_state.cbovldnext;
2279 	rcu_state.cbovldnext = false;
2280 	rcu_for_each_leaf_node(rnp) {
2281 		cond_resched_tasks_rcu_qs();
2282 		mask = 0;
2283 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2284 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2285 		if (rnp->qsmask == 0) {
2286 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2287 				/*
2288 				 * No point in scanning bits because they
2289 				 * are all zero.  But we might need to
2290 				 * priority-boost blocked readers.
2291 				 */
2292 				rcu_initiate_boost(rnp, flags);
2293 				/* rcu_initiate_boost() releases rnp->lock */
2294 				continue;
2295 			}
2296 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2297 			continue;
2298 		}
2299 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2300 			rdp = per_cpu_ptr(&rcu_data, cpu);
2301 			if (f(rdp)) {
2302 				mask |= rdp->grpmask;
2303 				rcu_disable_urgency_upon_qs(rdp);
2304 			}
2305 		}
2306 		if (mask != 0) {
2307 			/* Idle/offline CPUs, report (releases rnp->lock). */
2308 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2309 		} else {
2310 			/* Nothing to do here, so just drop the lock. */
2311 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2312 		}
2313 	}
2314 }
2315 
2316 /*
2317  * Force quiescent states on reluctant CPUs, and also detect which
2318  * CPUs are in dyntick-idle mode.
2319  */
2320 void rcu_force_quiescent_state(void)
2321 {
2322 	unsigned long flags;
2323 	bool ret;
2324 	struct rcu_node *rnp;
2325 	struct rcu_node *rnp_old = NULL;
2326 
2327 	/* Funnel through hierarchy to reduce memory contention. */
2328 	rnp = raw_cpu_read(rcu_data.mynode);
2329 	for (; rnp != NULL; rnp = rnp->parent) {
2330 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2331 		       !raw_spin_trylock(&rnp->fqslock);
2332 		if (rnp_old != NULL)
2333 			raw_spin_unlock(&rnp_old->fqslock);
2334 		if (ret)
2335 			return;
2336 		rnp_old = rnp;
2337 	}
2338 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2339 
2340 	/* Reached the root of the rcu_node tree, acquire lock. */
2341 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2342 	raw_spin_unlock(&rnp_old->fqslock);
2343 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2344 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2345 		return;  /* Someone beat us to it. */
2346 	}
2347 	WRITE_ONCE(rcu_state.gp_flags,
2348 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2349 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2350 	rcu_gp_kthread_wake();
2351 }
2352 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2353 
2354 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2355 // grace periods.
2356 static void strict_work_handler(struct work_struct *work)
2357 {
2358 	rcu_read_lock();
2359 	rcu_read_unlock();
2360 }
2361 
2362 /* Perform RCU core processing work for the current CPU.  */
2363 static __latent_entropy void rcu_core(void)
2364 {
2365 	unsigned long flags;
2366 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2367 	struct rcu_node *rnp = rdp->mynode;
2368 	/*
2369 	 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2370 	 * Therefore this function can race with concurrent NOCB (de-)offloading
2371 	 * on this CPU and the below condition must be considered volatile.
2372 	 * However if we race with:
2373 	 *
2374 	 * _ Offloading:   In the worst case we accelerate or process callbacks
2375 	 *                 concurrently with NOCB kthreads. We are guaranteed to
2376 	 *                 call rcu_nocb_lock() if that happens.
2377 	 *
2378 	 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2379 	 *                 processing. This is fine because the early stage
2380 	 *                 of deoffloading invokes rcu_core() after setting
2381 	 *                 SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2382 	 *                 what could have been dismissed without the need to wait
2383 	 *                 for the next rcu_pending() check in the next jiffy.
2384 	 */
2385 	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2386 
2387 	if (cpu_is_offline(smp_processor_id()))
2388 		return;
2389 	trace_rcu_utilization(TPS("Start RCU core"));
2390 	WARN_ON_ONCE(!rdp->beenonline);
2391 
2392 	/* Report any deferred quiescent states if preemption enabled. */
2393 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2394 		rcu_preempt_deferred_qs(current);
2395 	} else if (rcu_preempt_need_deferred_qs(current)) {
2396 		set_tsk_need_resched(current);
2397 		set_preempt_need_resched();
2398 	}
2399 
2400 	/* Update RCU state based on any recent quiescent states. */
2401 	rcu_check_quiescent_state(rdp);
2402 
2403 	/* No grace period and unregistered callbacks? */
2404 	if (!rcu_gp_in_progress() &&
2405 	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2406 		rcu_nocb_lock_irqsave(rdp, flags);
2407 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2408 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2409 		rcu_nocb_unlock_irqrestore(rdp, flags);
2410 	}
2411 
2412 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2413 
2414 	/* If there are callbacks ready, invoke them. */
2415 	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2416 	    likely(READ_ONCE(rcu_scheduler_fully_active))) {
2417 		rcu_do_batch(rdp);
2418 		/* Re-invoke RCU core processing if there are callbacks remaining. */
2419 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
2420 			invoke_rcu_core();
2421 	}
2422 
2423 	/* Do any needed deferred wakeups of rcuo kthreads. */
2424 	do_nocb_deferred_wakeup(rdp);
2425 	trace_rcu_utilization(TPS("End RCU core"));
2426 
2427 	// If strict GPs, schedule an RCU reader in a clean environment.
2428 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2429 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2430 }
2431 
2432 static void rcu_core_si(struct softirq_action *h)
2433 {
2434 	rcu_core();
2435 }
2436 
2437 static void rcu_wake_cond(struct task_struct *t, int status)
2438 {
2439 	/*
2440 	 * If the thread is yielding, only wake it when this
2441 	 * is invoked from idle
2442 	 */
2443 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2444 		wake_up_process(t);
2445 }
2446 
2447 static void invoke_rcu_core_kthread(void)
2448 {
2449 	struct task_struct *t;
2450 	unsigned long flags;
2451 
2452 	local_irq_save(flags);
2453 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2454 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2455 	if (t != NULL && t != current)
2456 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2457 	local_irq_restore(flags);
2458 }
2459 
2460 /*
2461  * Wake up this CPU's rcuc kthread to do RCU core processing.
2462  */
2463 static void invoke_rcu_core(void)
2464 {
2465 	if (!cpu_online(smp_processor_id()))
2466 		return;
2467 	if (use_softirq)
2468 		raise_softirq(RCU_SOFTIRQ);
2469 	else
2470 		invoke_rcu_core_kthread();
2471 }
2472 
2473 static void rcu_cpu_kthread_park(unsigned int cpu)
2474 {
2475 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2476 }
2477 
2478 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2479 {
2480 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2481 }
2482 
2483 /*
2484  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2485  * the RCU softirq used in configurations of RCU that do not support RCU
2486  * priority boosting.
2487  */
2488 static void rcu_cpu_kthread(unsigned int cpu)
2489 {
2490 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2491 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2492 	unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2493 	int spincnt;
2494 
2495 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2496 	for (spincnt = 0; spincnt < 10; spincnt++) {
2497 		WRITE_ONCE(*j, jiffies);
2498 		local_bh_disable();
2499 		*statusp = RCU_KTHREAD_RUNNING;
2500 		local_irq_disable();
2501 		work = *workp;
2502 		WRITE_ONCE(*workp, 0);
2503 		local_irq_enable();
2504 		if (work)
2505 			rcu_core();
2506 		local_bh_enable();
2507 		if (!READ_ONCE(*workp)) {
2508 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2509 			*statusp = RCU_KTHREAD_WAITING;
2510 			return;
2511 		}
2512 	}
2513 	*statusp = RCU_KTHREAD_YIELDING;
2514 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2515 	schedule_timeout_idle(2);
2516 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2517 	*statusp = RCU_KTHREAD_WAITING;
2518 	WRITE_ONCE(*j, jiffies);
2519 }
2520 
2521 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2522 	.store			= &rcu_data.rcu_cpu_kthread_task,
2523 	.thread_should_run	= rcu_cpu_kthread_should_run,
2524 	.thread_fn		= rcu_cpu_kthread,
2525 	.thread_comm		= "rcuc/%u",
2526 	.setup			= rcu_cpu_kthread_setup,
2527 	.park			= rcu_cpu_kthread_park,
2528 };
2529 
2530 /*
2531  * Spawn per-CPU RCU core processing kthreads.
2532  */
2533 static int __init rcu_spawn_core_kthreads(void)
2534 {
2535 	int cpu;
2536 
2537 	for_each_possible_cpu(cpu)
2538 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2539 	if (use_softirq)
2540 		return 0;
2541 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2542 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2543 	return 0;
2544 }
2545 
2546 /*
2547  * Handle any core-RCU processing required by a call_rcu() invocation.
2548  */
2549 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2550 			    unsigned long flags)
2551 {
2552 	/*
2553 	 * If called from an extended quiescent state, invoke the RCU
2554 	 * core in order to force a re-evaluation of RCU's idleness.
2555 	 */
2556 	if (!rcu_is_watching())
2557 		invoke_rcu_core();
2558 
2559 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2560 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2561 		return;
2562 
2563 	/*
2564 	 * Force the grace period if too many callbacks or too long waiting.
2565 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2566 	 * if some other CPU has recently done so.  Also, don't bother
2567 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2568 	 * is the only one waiting for a grace period to complete.
2569 	 */
2570 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2571 		     rdp->qlen_last_fqs_check + qhimark)) {
2572 
2573 		/* Are we ignoring a completed grace period? */
2574 		note_gp_changes(rdp);
2575 
2576 		/* Start a new grace period if one not already started. */
2577 		if (!rcu_gp_in_progress()) {
2578 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2579 		} else {
2580 			/* Give the grace period a kick. */
2581 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2582 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2583 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2584 				rcu_force_quiescent_state();
2585 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2586 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2587 		}
2588 	}
2589 }
2590 
2591 /*
2592  * RCU callback function to leak a callback.
2593  */
2594 static void rcu_leak_callback(struct rcu_head *rhp)
2595 {
2596 }
2597 
2598 /*
2599  * Check and if necessary update the leaf rcu_node structure's
2600  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2601  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2602  * structure's ->lock.
2603  */
2604 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2605 {
2606 	raw_lockdep_assert_held_rcu_node(rnp);
2607 	if (qovld_calc <= 0)
2608 		return; // Early boot and wildcard value set.
2609 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2610 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2611 	else
2612 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2613 }
2614 
2615 /*
2616  * Check and if necessary update the leaf rcu_node structure's
2617  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2618  * number of queued RCU callbacks.  No locks need be held, but the
2619  * caller must have disabled interrupts.
2620  *
2621  * Note that this function ignores the possibility that there are a lot
2622  * of callbacks all of which have already seen the end of their respective
2623  * grace periods.  This omission is due to the need for no-CBs CPUs to
2624  * be holding ->nocb_lock to do this check, which is too heavy for a
2625  * common-case operation.
2626  */
2627 static void check_cb_ovld(struct rcu_data *rdp)
2628 {
2629 	struct rcu_node *const rnp = rdp->mynode;
2630 
2631 	if (qovld_calc <= 0 ||
2632 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2633 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2634 		return; // Early boot wildcard value or already set correctly.
2635 	raw_spin_lock_rcu_node(rnp);
2636 	check_cb_ovld_locked(rdp, rnp);
2637 	raw_spin_unlock_rcu_node(rnp);
2638 }
2639 
2640 static void
2641 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
2642 {
2643 	static atomic_t doublefrees;
2644 	unsigned long flags;
2645 	bool lazy;
2646 	struct rcu_data *rdp;
2647 	bool was_alldone;
2648 
2649 	/* Misaligned rcu_head! */
2650 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2651 
2652 	if (debug_rcu_head_queue(head)) {
2653 		/*
2654 		 * Probable double call_rcu(), so leak the callback.
2655 		 * Use rcu:rcu_callback trace event to find the previous
2656 		 * time callback was passed to call_rcu().
2657 		 */
2658 		if (atomic_inc_return(&doublefrees) < 4) {
2659 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
2660 			mem_dump_obj(head);
2661 		}
2662 		WRITE_ONCE(head->func, rcu_leak_callback);
2663 		return;
2664 	}
2665 	head->func = func;
2666 	head->next = NULL;
2667 	kasan_record_aux_stack_noalloc(head);
2668 	local_irq_save(flags);
2669 	rdp = this_cpu_ptr(&rcu_data);
2670 	lazy = lazy_in && !rcu_async_should_hurry();
2671 
2672 	/* Add the callback to our list. */
2673 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2674 		// This can trigger due to call_rcu() from offline CPU:
2675 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2676 		WARN_ON_ONCE(!rcu_is_watching());
2677 		// Very early boot, before rcu_init().  Initialize if needed
2678 		// and then drop through to queue the callback.
2679 		if (rcu_segcblist_empty(&rdp->cblist))
2680 			rcu_segcblist_init(&rdp->cblist);
2681 	}
2682 
2683 	check_cb_ovld(rdp);
2684 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2685 		return; // Enqueued onto ->nocb_bypass, so just leave.
2686 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2687 	rcu_segcblist_enqueue(&rdp->cblist, head);
2688 	if (__is_kvfree_rcu_offset((unsigned long)func))
2689 		trace_rcu_kvfree_callback(rcu_state.name, head,
2690 					 (unsigned long)func,
2691 					 rcu_segcblist_n_cbs(&rdp->cblist));
2692 	else
2693 		trace_rcu_callback(rcu_state.name, head,
2694 				   rcu_segcblist_n_cbs(&rdp->cblist));
2695 
2696 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2697 
2698 	/* Go handle any RCU core processing required. */
2699 	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2700 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2701 	} else {
2702 		__call_rcu_core(rdp, head, flags);
2703 		local_irq_restore(flags);
2704 	}
2705 }
2706 
2707 #ifdef CONFIG_RCU_LAZY
2708 /**
2709  * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2710  * flush all lazy callbacks (including the new one) to the main ->cblist while
2711  * doing so.
2712  *
2713  * @head: structure to be used for queueing the RCU updates.
2714  * @func: actual callback function to be invoked after the grace period
2715  *
2716  * The callback function will be invoked some time after a full grace
2717  * period elapses, in other words after all pre-existing RCU read-side
2718  * critical sections have completed.
2719  *
2720  * Use this API instead of call_rcu() if you don't want the callback to be
2721  * invoked after very long periods of time, which can happen on systems without
2722  * memory pressure and on systems which are lightly loaded or mostly idle.
2723  * This function will cause callbacks to be invoked sooner than later at the
2724  * expense of extra power. Other than that, this function is identical to, and
2725  * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2726  * ordering and other functionality.
2727  */
2728 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2729 {
2730 	__call_rcu_common(head, func, false);
2731 }
2732 EXPORT_SYMBOL_GPL(call_rcu_hurry);
2733 #endif
2734 
2735 /**
2736  * call_rcu() - Queue an RCU callback for invocation after a grace period.
2737  * By default the callbacks are 'lazy' and are kept hidden from the main
2738  * ->cblist to prevent starting of grace periods too soon.
2739  * If you desire grace periods to start very soon, use call_rcu_hurry().
2740  *
2741  * @head: structure to be used for queueing the RCU updates.
2742  * @func: actual callback function to be invoked after the grace period
2743  *
2744  * The callback function will be invoked some time after a full grace
2745  * period elapses, in other words after all pre-existing RCU read-side
2746  * critical sections have completed.  However, the callback function
2747  * might well execute concurrently with RCU read-side critical sections
2748  * that started after call_rcu() was invoked.
2749  *
2750  * RCU read-side critical sections are delimited by rcu_read_lock()
2751  * and rcu_read_unlock(), and may be nested.  In addition, but only in
2752  * v5.0 and later, regions of code across which interrupts, preemption,
2753  * or softirqs have been disabled also serve as RCU read-side critical
2754  * sections.  This includes hardware interrupt handlers, softirq handlers,
2755  * and NMI handlers.
2756  *
2757  * Note that all CPUs must agree that the grace period extended beyond
2758  * all pre-existing RCU read-side critical section.  On systems with more
2759  * than one CPU, this means that when "func()" is invoked, each CPU is
2760  * guaranteed to have executed a full memory barrier since the end of its
2761  * last RCU read-side critical section whose beginning preceded the call
2762  * to call_rcu().  It also means that each CPU executing an RCU read-side
2763  * critical section that continues beyond the start of "func()" must have
2764  * executed a memory barrier after the call_rcu() but before the beginning
2765  * of that RCU read-side critical section.  Note that these guarantees
2766  * include CPUs that are offline, idle, or executing in user mode, as
2767  * well as CPUs that are executing in the kernel.
2768  *
2769  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2770  * resulting RCU callback function "func()", then both CPU A and CPU B are
2771  * guaranteed to execute a full memory barrier during the time interval
2772  * between the call to call_rcu() and the invocation of "func()" -- even
2773  * if CPU A and CPU B are the same CPU (but again only if the system has
2774  * more than one CPU).
2775  *
2776  * Implementation of these memory-ordering guarantees is described here:
2777  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2778  */
2779 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2780 {
2781 	__call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
2782 }
2783 EXPORT_SYMBOL_GPL(call_rcu);
2784 
2785 /* Maximum number of jiffies to wait before draining a batch. */
2786 #define KFREE_DRAIN_JIFFIES (5 * HZ)
2787 #define KFREE_N_BATCHES 2
2788 #define FREE_N_CHANNELS 2
2789 
2790 /**
2791  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2792  * @list: List node. All blocks are linked between each other
2793  * @gp_snap: Snapshot of RCU state for objects placed to this bulk
2794  * @nr_records: Number of active pointers in the array
2795  * @records: Array of the kvfree_rcu() pointers
2796  */
2797 struct kvfree_rcu_bulk_data {
2798 	struct list_head list;
2799 	struct rcu_gp_oldstate gp_snap;
2800 	unsigned long nr_records;
2801 	void *records[];
2802 };
2803 
2804 /*
2805  * This macro defines how many entries the "records" array
2806  * will contain. It is based on the fact that the size of
2807  * kvfree_rcu_bulk_data structure becomes exactly one page.
2808  */
2809 #define KVFREE_BULK_MAX_ENTR \
2810 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2811 
2812 /**
2813  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2814  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2815  * @head_free: List of kfree_rcu() objects waiting for a grace period
2816  * @head_free_gp_snap: Grace-period snapshot to check for attempted premature frees.
2817  * @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2818  * @krcp: Pointer to @kfree_rcu_cpu structure
2819  */
2820 
2821 struct kfree_rcu_cpu_work {
2822 	struct rcu_work rcu_work;
2823 	struct rcu_head *head_free;
2824 	struct rcu_gp_oldstate head_free_gp_snap;
2825 	struct list_head bulk_head_free[FREE_N_CHANNELS];
2826 	struct kfree_rcu_cpu *krcp;
2827 };
2828 
2829 /**
2830  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2831  * @head: List of kfree_rcu() objects not yet waiting for a grace period
2832  * @head_gp_snap: Snapshot of RCU state for objects placed to "@head"
2833  * @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2834  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2835  * @lock: Synchronize access to this structure
2836  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2837  * @initialized: The @rcu_work fields have been initialized
2838  * @head_count: Number of objects in rcu_head singular list
2839  * @bulk_count: Number of objects in bulk-list
2840  * @bkvcache:
2841  *	A simple cache list that contains objects for reuse purpose.
2842  *	In order to save some per-cpu space the list is singular.
2843  *	Even though it is lockless an access has to be protected by the
2844  *	per-cpu lock.
2845  * @page_cache_work: A work to refill the cache when it is empty
2846  * @backoff_page_cache_fill: Delay cache refills
2847  * @work_in_progress: Indicates that page_cache_work is running
2848  * @hrtimer: A hrtimer for scheduling a page_cache_work
2849  * @nr_bkv_objs: number of allocated objects at @bkvcache.
2850  *
2851  * This is a per-CPU structure.  The reason that it is not included in
2852  * the rcu_data structure is to permit this code to be extracted from
2853  * the RCU files.  Such extraction could allow further optimization of
2854  * the interactions with the slab allocators.
2855  */
2856 struct kfree_rcu_cpu {
2857 	// Objects queued on a linked list
2858 	// through their rcu_head structures.
2859 	struct rcu_head *head;
2860 	unsigned long head_gp_snap;
2861 	atomic_t head_count;
2862 
2863 	// Objects queued on a bulk-list.
2864 	struct list_head bulk_head[FREE_N_CHANNELS];
2865 	atomic_t bulk_count[FREE_N_CHANNELS];
2866 
2867 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2868 	raw_spinlock_t lock;
2869 	struct delayed_work monitor_work;
2870 	bool initialized;
2871 
2872 	struct delayed_work page_cache_work;
2873 	atomic_t backoff_page_cache_fill;
2874 	atomic_t work_in_progress;
2875 	struct hrtimer hrtimer;
2876 
2877 	struct llist_head bkvcache;
2878 	int nr_bkv_objs;
2879 };
2880 
2881 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2882 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2883 };
2884 
2885 static __always_inline void
2886 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2887 {
2888 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2889 	int i;
2890 
2891 	for (i = 0; i < bhead->nr_records; i++)
2892 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2893 #endif
2894 }
2895 
2896 static inline struct kfree_rcu_cpu *
2897 krc_this_cpu_lock(unsigned long *flags)
2898 {
2899 	struct kfree_rcu_cpu *krcp;
2900 
2901 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
2902 	krcp = this_cpu_ptr(&krc);
2903 	raw_spin_lock(&krcp->lock);
2904 
2905 	return krcp;
2906 }
2907 
2908 static inline void
2909 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2910 {
2911 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2912 }
2913 
2914 static inline struct kvfree_rcu_bulk_data *
2915 get_cached_bnode(struct kfree_rcu_cpu *krcp)
2916 {
2917 	if (!krcp->nr_bkv_objs)
2918 		return NULL;
2919 
2920 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2921 	return (struct kvfree_rcu_bulk_data *)
2922 		llist_del_first(&krcp->bkvcache);
2923 }
2924 
2925 static inline bool
2926 put_cached_bnode(struct kfree_rcu_cpu *krcp,
2927 	struct kvfree_rcu_bulk_data *bnode)
2928 {
2929 	// Check the limit.
2930 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
2931 		return false;
2932 
2933 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
2934 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
2935 	return true;
2936 }
2937 
2938 static int
2939 drain_page_cache(struct kfree_rcu_cpu *krcp)
2940 {
2941 	unsigned long flags;
2942 	struct llist_node *page_list, *pos, *n;
2943 	int freed = 0;
2944 
2945 	if (!rcu_min_cached_objs)
2946 		return 0;
2947 
2948 	raw_spin_lock_irqsave(&krcp->lock, flags);
2949 	page_list = llist_del_all(&krcp->bkvcache);
2950 	WRITE_ONCE(krcp->nr_bkv_objs, 0);
2951 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2952 
2953 	llist_for_each_safe(pos, n, page_list) {
2954 		free_page((unsigned long)pos);
2955 		freed++;
2956 	}
2957 
2958 	return freed;
2959 }
2960 
2961 static void
2962 kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
2963 	struct kvfree_rcu_bulk_data *bnode, int idx)
2964 {
2965 	unsigned long flags;
2966 	int i;
2967 
2968 	if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
2969 		debug_rcu_bhead_unqueue(bnode);
2970 		rcu_lock_acquire(&rcu_callback_map);
2971 		if (idx == 0) { // kmalloc() / kfree().
2972 			trace_rcu_invoke_kfree_bulk_callback(
2973 				rcu_state.name, bnode->nr_records,
2974 				bnode->records);
2975 
2976 			kfree_bulk(bnode->nr_records, bnode->records);
2977 		} else { // vmalloc() / vfree().
2978 			for (i = 0; i < bnode->nr_records; i++) {
2979 				trace_rcu_invoke_kvfree_callback(
2980 					rcu_state.name, bnode->records[i], 0);
2981 
2982 				vfree(bnode->records[i]);
2983 			}
2984 		}
2985 		rcu_lock_release(&rcu_callback_map);
2986 	}
2987 
2988 	raw_spin_lock_irqsave(&krcp->lock, flags);
2989 	if (put_cached_bnode(krcp, bnode))
2990 		bnode = NULL;
2991 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2992 
2993 	if (bnode)
2994 		free_page((unsigned long) bnode);
2995 
2996 	cond_resched_tasks_rcu_qs();
2997 }
2998 
2999 static void
3000 kvfree_rcu_list(struct rcu_head *head)
3001 {
3002 	struct rcu_head *next;
3003 
3004 	for (; head; head = next) {
3005 		void *ptr = (void *) head->func;
3006 		unsigned long offset = (void *) head - ptr;
3007 
3008 		next = head->next;
3009 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3010 		rcu_lock_acquire(&rcu_callback_map);
3011 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3012 
3013 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3014 			kvfree(ptr);
3015 
3016 		rcu_lock_release(&rcu_callback_map);
3017 		cond_resched_tasks_rcu_qs();
3018 	}
3019 }
3020 
3021 /*
3022  * This function is invoked in workqueue context after a grace period.
3023  * It frees all the objects queued on ->bulk_head_free or ->head_free.
3024  */
3025 static void kfree_rcu_work(struct work_struct *work)
3026 {
3027 	unsigned long flags;
3028 	struct kvfree_rcu_bulk_data *bnode, *n;
3029 	struct list_head bulk_head[FREE_N_CHANNELS];
3030 	struct rcu_head *head;
3031 	struct kfree_rcu_cpu *krcp;
3032 	struct kfree_rcu_cpu_work *krwp;
3033 	struct rcu_gp_oldstate head_gp_snap;
3034 	int i;
3035 
3036 	krwp = container_of(to_rcu_work(work),
3037 		struct kfree_rcu_cpu_work, rcu_work);
3038 	krcp = krwp->krcp;
3039 
3040 	raw_spin_lock_irqsave(&krcp->lock, flags);
3041 	// Channels 1 and 2.
3042 	for (i = 0; i < FREE_N_CHANNELS; i++)
3043 		list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
3044 
3045 	// Channel 3.
3046 	head = krwp->head_free;
3047 	krwp->head_free = NULL;
3048 	head_gp_snap = krwp->head_free_gp_snap;
3049 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3050 
3051 	// Handle the first two channels.
3052 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3053 		// Start from the tail page, so a GP is likely passed for it.
3054 		list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
3055 			kvfree_rcu_bulk(krcp, bnode, i);
3056 	}
3057 
3058 	/*
3059 	 * This is used when the "bulk" path can not be used for the
3060 	 * double-argument of kvfree_rcu().  This happens when the
3061 	 * page-cache is empty, which means that objects are instead
3062 	 * queued on a linked list through their rcu_head structures.
3063 	 * This list is named "Channel 3".
3064 	 */
3065 	if (head && !WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&head_gp_snap)))
3066 		kvfree_rcu_list(head);
3067 }
3068 
3069 static bool
3070 need_offload_krc(struct kfree_rcu_cpu *krcp)
3071 {
3072 	int i;
3073 
3074 	for (i = 0; i < FREE_N_CHANNELS; i++)
3075 		if (!list_empty(&krcp->bulk_head[i]))
3076 			return true;
3077 
3078 	return !!READ_ONCE(krcp->head);
3079 }
3080 
3081 static bool
3082 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3083 {
3084 	int i;
3085 
3086 	for (i = 0; i < FREE_N_CHANNELS; i++)
3087 		if (!list_empty(&krwp->bulk_head_free[i]))
3088 			return true;
3089 
3090 	return !!krwp->head_free;
3091 }
3092 
3093 static int krc_count(struct kfree_rcu_cpu *krcp)
3094 {
3095 	int sum = atomic_read(&krcp->head_count);
3096 	int i;
3097 
3098 	for (i = 0; i < FREE_N_CHANNELS; i++)
3099 		sum += atomic_read(&krcp->bulk_count[i]);
3100 
3101 	return sum;
3102 }
3103 
3104 static void
3105 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3106 {
3107 	long delay, delay_left;
3108 
3109 	delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3110 	if (delayed_work_pending(&krcp->monitor_work)) {
3111 		delay_left = krcp->monitor_work.timer.expires - jiffies;
3112 		if (delay < delay_left)
3113 			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
3114 		return;
3115 	}
3116 	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3117 }
3118 
3119 static void
3120 kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
3121 {
3122 	struct list_head bulk_ready[FREE_N_CHANNELS];
3123 	struct kvfree_rcu_bulk_data *bnode, *n;
3124 	struct rcu_head *head_ready = NULL;
3125 	unsigned long flags;
3126 	int i;
3127 
3128 	raw_spin_lock_irqsave(&krcp->lock, flags);
3129 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3130 		INIT_LIST_HEAD(&bulk_ready[i]);
3131 
3132 		list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
3133 			if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
3134 				break;
3135 
3136 			atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
3137 			list_move(&bnode->list, &bulk_ready[i]);
3138 		}
3139 	}
3140 
3141 	if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) {
3142 		head_ready = krcp->head;
3143 		atomic_set(&krcp->head_count, 0);
3144 		WRITE_ONCE(krcp->head, NULL);
3145 	}
3146 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3147 
3148 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3149 		list_for_each_entry_safe(bnode, n, &bulk_ready[i], list)
3150 			kvfree_rcu_bulk(krcp, bnode, i);
3151 	}
3152 
3153 	if (head_ready)
3154 		kvfree_rcu_list(head_ready);
3155 }
3156 
3157 /*
3158  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3159  */
3160 static void kfree_rcu_monitor(struct work_struct *work)
3161 {
3162 	struct kfree_rcu_cpu *krcp = container_of(work,
3163 		struct kfree_rcu_cpu, monitor_work.work);
3164 	unsigned long flags;
3165 	int i, j;
3166 
3167 	// Drain ready for reclaim.
3168 	kvfree_rcu_drain_ready(krcp);
3169 
3170 	raw_spin_lock_irqsave(&krcp->lock, flags);
3171 
3172 	// Attempt to start a new batch.
3173 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3174 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3175 
3176 		// Try to detach bulk_head or head and attach it, only when
3177 		// all channels are free.  Any channel is not free means at krwp
3178 		// there is on-going rcu work to handle krwp's free business.
3179 		if (need_wait_for_krwp_work(krwp))
3180 			continue;
3181 
3182 		// kvfree_rcu_drain_ready() might handle this krcp, if so give up.
3183 		if (need_offload_krc(krcp)) {
3184 			// Channel 1 corresponds to the SLAB-pointer bulk path.
3185 			// Channel 2 corresponds to vmalloc-pointer bulk path.
3186 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3187 				if (list_empty(&krwp->bulk_head_free[j])) {
3188 					atomic_set(&krcp->bulk_count[j], 0);
3189 					list_replace_init(&krcp->bulk_head[j],
3190 						&krwp->bulk_head_free[j]);
3191 				}
3192 			}
3193 
3194 			// Channel 3 corresponds to both SLAB and vmalloc
3195 			// objects queued on the linked list.
3196 			if (!krwp->head_free) {
3197 				krwp->head_free = krcp->head;
3198 				get_state_synchronize_rcu_full(&krwp->head_free_gp_snap);
3199 				atomic_set(&krcp->head_count, 0);
3200 				WRITE_ONCE(krcp->head, NULL);
3201 			}
3202 
3203 			// One work is per one batch, so there are three
3204 			// "free channels", the batch can handle. It can
3205 			// be that the work is in the pending state when
3206 			// channels have been detached following by each
3207 			// other.
3208 			queue_rcu_work(system_wq, &krwp->rcu_work);
3209 		}
3210 	}
3211 
3212 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3213 
3214 	// If there is nothing to detach, it means that our job is
3215 	// successfully done here. In case of having at least one
3216 	// of the channels that is still busy we should rearm the
3217 	// work to repeat an attempt. Because previous batches are
3218 	// still in progress.
3219 	if (need_offload_krc(krcp))
3220 		schedule_delayed_monitor_work(krcp);
3221 }
3222 
3223 static enum hrtimer_restart
3224 schedule_page_work_fn(struct hrtimer *t)
3225 {
3226 	struct kfree_rcu_cpu *krcp =
3227 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3228 
3229 	queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3230 	return HRTIMER_NORESTART;
3231 }
3232 
3233 static void fill_page_cache_func(struct work_struct *work)
3234 {
3235 	struct kvfree_rcu_bulk_data *bnode;
3236 	struct kfree_rcu_cpu *krcp =
3237 		container_of(work, struct kfree_rcu_cpu,
3238 			page_cache_work.work);
3239 	unsigned long flags;
3240 	int nr_pages;
3241 	bool pushed;
3242 	int i;
3243 
3244 	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3245 		1 : rcu_min_cached_objs;
3246 
3247 	for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) {
3248 		bnode = (struct kvfree_rcu_bulk_data *)
3249 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3250 
3251 		if (!bnode)
3252 			break;
3253 
3254 		raw_spin_lock_irqsave(&krcp->lock, flags);
3255 		pushed = put_cached_bnode(krcp, bnode);
3256 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3257 
3258 		if (!pushed) {
3259 			free_page((unsigned long) bnode);
3260 			break;
3261 		}
3262 	}
3263 
3264 	atomic_set(&krcp->work_in_progress, 0);
3265 	atomic_set(&krcp->backoff_page_cache_fill, 0);
3266 }
3267 
3268 static void
3269 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3270 {
3271 	// If cache disabled, bail out.
3272 	if (!rcu_min_cached_objs)
3273 		return;
3274 
3275 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3276 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3277 		if (atomic_read(&krcp->backoff_page_cache_fill)) {
3278 			queue_delayed_work(system_wq,
3279 				&krcp->page_cache_work,
3280 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3281 		} else {
3282 			hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3283 			krcp->hrtimer.function = schedule_page_work_fn;
3284 			hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3285 		}
3286 	}
3287 }
3288 
3289 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3290 // state specified by flags.  If can_alloc is true, the caller must
3291 // be schedulable and not be holding any locks or mutexes that might be
3292 // acquired by the memory allocator or anything that it might invoke.
3293 // Returns true if ptr was successfully recorded, else the caller must
3294 // use a fallback.
3295 static inline bool
3296 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3297 	unsigned long *flags, void *ptr, bool can_alloc)
3298 {
3299 	struct kvfree_rcu_bulk_data *bnode;
3300 	int idx;
3301 
3302 	*krcp = krc_this_cpu_lock(flags);
3303 	if (unlikely(!(*krcp)->initialized))
3304 		return false;
3305 
3306 	idx = !!is_vmalloc_addr(ptr);
3307 	bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
3308 		struct kvfree_rcu_bulk_data, list);
3309 
3310 	/* Check if a new block is required. */
3311 	if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
3312 		bnode = get_cached_bnode(*krcp);
3313 		if (!bnode && can_alloc) {
3314 			krc_this_cpu_unlock(*krcp, *flags);
3315 
3316 			// __GFP_NORETRY - allows a light-weight direct reclaim
3317 			// what is OK from minimizing of fallback hitting point of
3318 			// view. Apart of that it forbids any OOM invoking what is
3319 			// also beneficial since we are about to release memory soon.
3320 			//
3321 			// __GFP_NOMEMALLOC - prevents from consuming of all the
3322 			// memory reserves. Please note we have a fallback path.
3323 			//
3324 			// __GFP_NOWARN - it is supposed that an allocation can
3325 			// be failed under low memory or high memory pressure
3326 			// scenarios.
3327 			bnode = (struct kvfree_rcu_bulk_data *)
3328 				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3329 			raw_spin_lock_irqsave(&(*krcp)->lock, *flags);
3330 		}
3331 
3332 		if (!bnode)
3333 			return false;
3334 
3335 		// Initialize the new block and attach it.
3336 		bnode->nr_records = 0;
3337 		list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
3338 	}
3339 
3340 	// Finally insert and update the GP for this page.
3341 	bnode->records[bnode->nr_records++] = ptr;
3342 	get_state_synchronize_rcu_full(&bnode->gp_snap);
3343 	atomic_inc(&(*krcp)->bulk_count[idx]);
3344 
3345 	return true;
3346 }
3347 
3348 /*
3349  * Queue a request for lazy invocation of the appropriate free routine
3350  * after a grace period.  Please note that three paths are maintained,
3351  * two for the common case using arrays of pointers and a third one that
3352  * is used only when the main paths cannot be used, for example, due to
3353  * memory pressure.
3354  *
3355  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3356  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3357  * be free'd in workqueue context. This allows us to: batch requests together to
3358  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3359  */
3360 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
3361 {
3362 	unsigned long flags;
3363 	struct kfree_rcu_cpu *krcp;
3364 	bool success;
3365 
3366 	/*
3367 	 * Please note there is a limitation for the head-less
3368 	 * variant, that is why there is a clear rule for such
3369 	 * objects: it can be used from might_sleep() context
3370 	 * only. For other places please embed an rcu_head to
3371 	 * your data.
3372 	 */
3373 	if (!head)
3374 		might_sleep();
3375 
3376 	// Queue the object but don't yet schedule the batch.
3377 	if (debug_rcu_head_queue(ptr)) {
3378 		// Probable double kfree_rcu(), just leak.
3379 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3380 			  __func__, head);
3381 
3382 		// Mark as success and leave.
3383 		return;
3384 	}
3385 
3386 	kasan_record_aux_stack_noalloc(ptr);
3387 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3388 	if (!success) {
3389 		run_page_cache_worker(krcp);
3390 
3391 		if (head == NULL)
3392 			// Inline if kvfree_rcu(one_arg) call.
3393 			goto unlock_return;
3394 
3395 		head->func = ptr;
3396 		head->next = krcp->head;
3397 		WRITE_ONCE(krcp->head, head);
3398 		atomic_inc(&krcp->head_count);
3399 
3400 		// Take a snapshot for this krcp.
3401 		krcp->head_gp_snap = get_state_synchronize_rcu();
3402 		success = true;
3403 	}
3404 
3405 	/*
3406 	 * The kvfree_rcu() caller considers the pointer freed at this point
3407 	 * and likely removes any references to it. Since the actual slab
3408 	 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
3409 	 * this object (no scanning or false positives reporting).
3410 	 */
3411 	kmemleak_ignore(ptr);
3412 
3413 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3414 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3415 		schedule_delayed_monitor_work(krcp);
3416 
3417 unlock_return:
3418 	krc_this_cpu_unlock(krcp, flags);
3419 
3420 	/*
3421 	 * Inline kvfree() after synchronize_rcu(). We can do
3422 	 * it from might_sleep() context only, so the current
3423 	 * CPU can pass the QS state.
3424 	 */
3425 	if (!success) {
3426 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3427 		synchronize_rcu();
3428 		kvfree(ptr);
3429 	}
3430 }
3431 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3432 
3433 static unsigned long
3434 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3435 {
3436 	int cpu;
3437 	unsigned long count = 0;
3438 
3439 	/* Snapshot count of all CPUs */
3440 	for_each_possible_cpu(cpu) {
3441 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3442 
3443 		count += krc_count(krcp);
3444 		count += READ_ONCE(krcp->nr_bkv_objs);
3445 		atomic_set(&krcp->backoff_page_cache_fill, 1);
3446 	}
3447 
3448 	return count == 0 ? SHRINK_EMPTY : count;
3449 }
3450 
3451 static unsigned long
3452 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3453 {
3454 	int cpu, freed = 0;
3455 
3456 	for_each_possible_cpu(cpu) {
3457 		int count;
3458 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3459 
3460 		count = krc_count(krcp);
3461 		count += drain_page_cache(krcp);
3462 		kfree_rcu_monitor(&krcp->monitor_work.work);
3463 
3464 		sc->nr_to_scan -= count;
3465 		freed += count;
3466 
3467 		if (sc->nr_to_scan <= 0)
3468 			break;
3469 	}
3470 
3471 	return freed == 0 ? SHRINK_STOP : freed;
3472 }
3473 
3474 static struct shrinker kfree_rcu_shrinker = {
3475 	.count_objects = kfree_rcu_shrink_count,
3476 	.scan_objects = kfree_rcu_shrink_scan,
3477 	.batch = 0,
3478 	.seeks = DEFAULT_SEEKS,
3479 };
3480 
3481 void __init kfree_rcu_scheduler_running(void)
3482 {
3483 	int cpu;
3484 
3485 	for_each_possible_cpu(cpu) {
3486 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3487 
3488 		if (need_offload_krc(krcp))
3489 			schedule_delayed_monitor_work(krcp);
3490 	}
3491 }
3492 
3493 /*
3494  * During early boot, any blocking grace-period wait automatically
3495  * implies a grace period.
3496  *
3497  * Later on, this could in theory be the case for kernels built with
3498  * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3499  * is not a common case.  Furthermore, this optimization would cause
3500  * the rcu_gp_oldstate structure to expand by 50%, so this potential
3501  * grace-period optimization is ignored once the scheduler is running.
3502  */
3503 static int rcu_blocking_is_gp(void)
3504 {
3505 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3506 		might_sleep();
3507 		return false;
3508 	}
3509 	return true;
3510 }
3511 
3512 /**
3513  * synchronize_rcu - wait until a grace period has elapsed.
3514  *
3515  * Control will return to the caller some time after a full grace
3516  * period has elapsed, in other words after all currently executing RCU
3517  * read-side critical sections have completed.  Note, however, that
3518  * upon return from synchronize_rcu(), the caller might well be executing
3519  * concurrently with new RCU read-side critical sections that began while
3520  * synchronize_rcu() was waiting.
3521  *
3522  * RCU read-side critical sections are delimited by rcu_read_lock()
3523  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3524  * v5.0 and later, regions of code across which interrupts, preemption,
3525  * or softirqs have been disabled also serve as RCU read-side critical
3526  * sections.  This includes hardware interrupt handlers, softirq handlers,
3527  * and NMI handlers.
3528  *
3529  * Note that this guarantee implies further memory-ordering guarantees.
3530  * On systems with more than one CPU, when synchronize_rcu() returns,
3531  * each CPU is guaranteed to have executed a full memory barrier since
3532  * the end of its last RCU read-side critical section whose beginning
3533  * preceded the call to synchronize_rcu().  In addition, each CPU having
3534  * an RCU read-side critical section that extends beyond the return from
3535  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3536  * after the beginning of synchronize_rcu() and before the beginning of
3537  * that RCU read-side critical section.  Note that these guarantees include
3538  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3539  * that are executing in the kernel.
3540  *
3541  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3542  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3543  * to have executed a full memory barrier during the execution of
3544  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3545  * again only if the system has more than one CPU).
3546  *
3547  * Implementation of these memory-ordering guarantees is described here:
3548  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3549  */
3550 void synchronize_rcu(void)
3551 {
3552 	unsigned long flags;
3553 	struct rcu_node *rnp;
3554 
3555 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3556 			 lock_is_held(&rcu_lock_map) ||
3557 			 lock_is_held(&rcu_sched_lock_map),
3558 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3559 	if (!rcu_blocking_is_gp()) {
3560 		if (rcu_gp_is_expedited())
3561 			synchronize_rcu_expedited();
3562 		else
3563 			wait_rcu_gp(call_rcu_hurry);
3564 		return;
3565 	}
3566 
3567 	// Context allows vacuous grace periods.
3568 	// Note well that this code runs with !PREEMPT && !SMP.
3569 	// In addition, all code that advances grace periods runs at
3570 	// process level.  Therefore, this normal GP overlaps with other
3571 	// normal GPs only by being fully nested within them, which allows
3572 	// reuse of ->gp_seq_polled_snap.
3573 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3574 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3575 
3576 	// Update the normal grace-period counters to record
3577 	// this grace period, but only those used by the boot CPU.
3578 	// The rcu_scheduler_starting() will take care of the rest of
3579 	// these counters.
3580 	local_irq_save(flags);
3581 	WARN_ON_ONCE(num_online_cpus() > 1);
3582 	rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3583 	for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3584 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3585 	local_irq_restore(flags);
3586 }
3587 EXPORT_SYMBOL_GPL(synchronize_rcu);
3588 
3589 /**
3590  * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3591  * @rgosp: Place to put state cookie
3592  *
3593  * Stores into @rgosp a value that will always be treated by functions
3594  * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3595  * has already completed.
3596  */
3597 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3598 {
3599 	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3600 	rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3601 }
3602 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3603 
3604 /**
3605  * get_state_synchronize_rcu - Snapshot current RCU state
3606  *
3607  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3608  * or poll_state_synchronize_rcu() to determine whether or not a full
3609  * grace period has elapsed in the meantime.
3610  */
3611 unsigned long get_state_synchronize_rcu(void)
3612 {
3613 	/*
3614 	 * Any prior manipulation of RCU-protected data must happen
3615 	 * before the load from ->gp_seq.
3616 	 */
3617 	smp_mb();  /* ^^^ */
3618 	return rcu_seq_snap(&rcu_state.gp_seq_polled);
3619 }
3620 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3621 
3622 /**
3623  * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3624  * @rgosp: location to place combined normal/expedited grace-period state
3625  *
3626  * Places the normal and expedited grace-period states in @rgosp.  This
3627  * state value can be passed to a later call to cond_synchronize_rcu_full()
3628  * or poll_state_synchronize_rcu_full() to determine whether or not a
3629  * grace period (whether normal or expedited) has elapsed in the meantime.
3630  * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3631  * long, but is guaranteed to see all grace periods.  In contrast, the
3632  * combined state occupies less memory, but can sometimes fail to take
3633  * grace periods into account.
3634  *
3635  * This does not guarantee that the needed grace period will actually
3636  * start.
3637  */
3638 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3639 {
3640 	struct rcu_node *rnp = rcu_get_root();
3641 
3642 	/*
3643 	 * Any prior manipulation of RCU-protected data must happen
3644 	 * before the loads from ->gp_seq and ->expedited_sequence.
3645 	 */
3646 	smp_mb();  /* ^^^ */
3647 	rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3648 	rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3649 }
3650 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3651 
3652 /*
3653  * Helper function for start_poll_synchronize_rcu() and
3654  * start_poll_synchronize_rcu_full().
3655  */
3656 static void start_poll_synchronize_rcu_common(void)
3657 {
3658 	unsigned long flags;
3659 	bool needwake;
3660 	struct rcu_data *rdp;
3661 	struct rcu_node *rnp;
3662 
3663 	lockdep_assert_irqs_enabled();
3664 	local_irq_save(flags);
3665 	rdp = this_cpu_ptr(&rcu_data);
3666 	rnp = rdp->mynode;
3667 	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3668 	// Note it is possible for a grace period to have elapsed between
3669 	// the above call to get_state_synchronize_rcu() and the below call
3670 	// to rcu_seq_snap.  This is OK, the worst that happens is that we
3671 	// get a grace period that no one needed.  These accesses are ordered
3672 	// by smp_mb(), and we are accessing them in the opposite order
3673 	// from which they are updated at grace-period start, as required.
3674 	needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3675 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3676 	if (needwake)
3677 		rcu_gp_kthread_wake();
3678 }
3679 
3680 /**
3681  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3682  *
3683  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3684  * or poll_state_synchronize_rcu() to determine whether or not a full
3685  * grace period has elapsed in the meantime.  If the needed grace period
3686  * is not already slated to start, notifies RCU core of the need for that
3687  * grace period.
3688  *
3689  * Interrupts must be enabled for the case where it is necessary to awaken
3690  * the grace-period kthread.
3691  */
3692 unsigned long start_poll_synchronize_rcu(void)
3693 {
3694 	unsigned long gp_seq = get_state_synchronize_rcu();
3695 
3696 	start_poll_synchronize_rcu_common();
3697 	return gp_seq;
3698 }
3699 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3700 
3701 /**
3702  * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3703  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3704  *
3705  * Places the normal and expedited grace-period states in *@rgos.  This
3706  * state value can be passed to a later call to cond_synchronize_rcu_full()
3707  * or poll_state_synchronize_rcu_full() to determine whether or not a
3708  * grace period (whether normal or expedited) has elapsed in the meantime.
3709  * If the needed grace period is not already slated to start, notifies
3710  * RCU core of the need for that grace period.
3711  *
3712  * Interrupts must be enabled for the case where it is necessary to awaken
3713  * the grace-period kthread.
3714  */
3715 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3716 {
3717 	get_state_synchronize_rcu_full(rgosp);
3718 
3719 	start_poll_synchronize_rcu_common();
3720 }
3721 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3722 
3723 /**
3724  * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3725  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3726  *
3727  * If a full RCU grace period has elapsed since the earlier call from
3728  * which @oldstate was obtained, return @true, otherwise return @false.
3729  * If @false is returned, it is the caller's responsibility to invoke this
3730  * function later on until it does return @true.  Alternatively, the caller
3731  * can explicitly wait for a grace period, for example, by passing @oldstate
3732  * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3733  * on the one hand or by directly invoking either synchronize_rcu() or
3734  * synchronize_rcu_expedited() on the other.
3735  *
3736  * Yes, this function does not take counter wrap into account.
3737  * But counter wrap is harmless.  If the counter wraps, we have waited for
3738  * more than a billion grace periods (and way more on a 64-bit system!).
3739  * Those needing to keep old state values for very long time periods
3740  * (many hours even on 32-bit systems) should check them occasionally and
3741  * either refresh them or set a flag indicating that the grace period has
3742  * completed.  Alternatively, they can use get_completed_synchronize_rcu()
3743  * to get a guaranteed-completed grace-period state.
3744  *
3745  * In addition, because oldstate compresses the grace-period state for
3746  * both normal and expedited grace periods into a single unsigned long,
3747  * it can miss a grace period when synchronize_rcu() runs concurrently
3748  * with synchronize_rcu_expedited().  If this is unacceptable, please
3749  * instead use the _full() variant of these polling APIs.
3750  *
3751  * This function provides the same memory-ordering guarantees that
3752  * would be provided by a synchronize_rcu() that was invoked at the call
3753  * to the function that provided @oldstate, and that returned at the end
3754  * of this function.
3755  */
3756 bool poll_state_synchronize_rcu(unsigned long oldstate)
3757 {
3758 	if (oldstate == RCU_GET_STATE_COMPLETED ||
3759 	    rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3760 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3761 		return true;
3762 	}
3763 	return false;
3764 }
3765 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3766 
3767 /**
3768  * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3769  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3770  *
3771  * If a full RCU grace period has elapsed since the earlier call from
3772  * which *rgosp was obtained, return @true, otherwise return @false.
3773  * If @false is returned, it is the caller's responsibility to invoke this
3774  * function later on until it does return @true.  Alternatively, the caller
3775  * can explicitly wait for a grace period, for example, by passing @rgosp
3776  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3777  *
3778  * Yes, this function does not take counter wrap into account.
3779  * But counter wrap is harmless.  If the counter wraps, we have waited
3780  * for more than a billion grace periods (and way more on a 64-bit
3781  * system!).  Those needing to keep rcu_gp_oldstate values for very
3782  * long time periods (many hours even on 32-bit systems) should check
3783  * them occasionally and either refresh them or set a flag indicating
3784  * that the grace period has completed.  Alternatively, they can use
3785  * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3786  * grace-period state.
3787  *
3788  * This function provides the same memory-ordering guarantees that would
3789  * be provided by a synchronize_rcu() that was invoked at the call to
3790  * the function that provided @rgosp, and that returned at the end of this
3791  * function.  And this guarantee requires that the root rcu_node structure's
3792  * ->gp_seq field be checked instead of that of the rcu_state structure.
3793  * The problem is that the just-ending grace-period's callbacks can be
3794  * invoked between the time that the root rcu_node structure's ->gp_seq
3795  * field is updated and the time that the rcu_state structure's ->gp_seq
3796  * field is updated.  Therefore, if a single synchronize_rcu() is to
3797  * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3798  * then the root rcu_node structure is the one that needs to be polled.
3799  */
3800 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3801 {
3802 	struct rcu_node *rnp = rcu_get_root();
3803 
3804 	smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3805 	if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3806 	    rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3807 	    rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3808 	    rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3809 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3810 		return true;
3811 	}
3812 	return false;
3813 }
3814 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3815 
3816 /**
3817  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3818  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3819  *
3820  * If a full RCU grace period has elapsed since the earlier call to
3821  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3822  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3823  *
3824  * Yes, this function does not take counter wrap into account.
3825  * But counter wrap is harmless.  If the counter wraps, we have waited for
3826  * more than 2 billion grace periods (and way more on a 64-bit system!),
3827  * so waiting for a couple of additional grace periods should be just fine.
3828  *
3829  * This function provides the same memory-ordering guarantees that
3830  * would be provided by a synchronize_rcu() that was invoked at the call
3831  * to the function that provided @oldstate and that returned at the end
3832  * of this function.
3833  */
3834 void cond_synchronize_rcu(unsigned long oldstate)
3835 {
3836 	if (!poll_state_synchronize_rcu(oldstate))
3837 		synchronize_rcu();
3838 }
3839 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3840 
3841 /**
3842  * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3843  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3844  *
3845  * If a full RCU grace period has elapsed since the call to
3846  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3847  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3848  * obtained, just return.  Otherwise, invoke synchronize_rcu() to wait
3849  * for a full grace period.
3850  *
3851  * Yes, this function does not take counter wrap into account.
3852  * But counter wrap is harmless.  If the counter wraps, we have waited for
3853  * more than 2 billion grace periods (and way more on a 64-bit system!),
3854  * so waiting for a couple of additional grace periods should be just fine.
3855  *
3856  * This function provides the same memory-ordering guarantees that
3857  * would be provided by a synchronize_rcu() that was invoked at the call
3858  * to the function that provided @rgosp and that returned at the end of
3859  * this function.
3860  */
3861 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3862 {
3863 	if (!poll_state_synchronize_rcu_full(rgosp))
3864 		synchronize_rcu();
3865 }
3866 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3867 
3868 /*
3869  * Check to see if there is any immediate RCU-related work to be done by
3870  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3871  * in order of increasing expense: checks that can be carried out against
3872  * CPU-local state are performed first.  However, we must check for CPU
3873  * stalls first, else we might not get a chance.
3874  */
3875 static int rcu_pending(int user)
3876 {
3877 	bool gp_in_progress;
3878 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3879 	struct rcu_node *rnp = rdp->mynode;
3880 
3881 	lockdep_assert_irqs_disabled();
3882 
3883 	/* Check for CPU stalls, if enabled. */
3884 	check_cpu_stall(rdp);
3885 
3886 	/* Does this CPU need a deferred NOCB wakeup? */
3887 	if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3888 		return 1;
3889 
3890 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3891 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3892 		return 0;
3893 
3894 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3895 	gp_in_progress = rcu_gp_in_progress();
3896 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3897 		return 1;
3898 
3899 	/* Does this CPU have callbacks ready to invoke? */
3900 	if (!rcu_rdp_is_offloaded(rdp) &&
3901 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3902 		return 1;
3903 
3904 	/* Has RCU gone idle with this CPU needing another grace period? */
3905 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3906 	    !rcu_rdp_is_offloaded(rdp) &&
3907 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3908 		return 1;
3909 
3910 	/* Have RCU grace period completed or started?  */
3911 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3912 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3913 		return 1;
3914 
3915 	/* nothing to do */
3916 	return 0;
3917 }
3918 
3919 /*
3920  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3921  * the compiler is expected to optimize this away.
3922  */
3923 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3924 {
3925 	trace_rcu_barrier(rcu_state.name, s, cpu,
3926 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3927 }
3928 
3929 /*
3930  * RCU callback function for rcu_barrier().  If we are last, wake
3931  * up the task executing rcu_barrier().
3932  *
3933  * Note that the value of rcu_state.barrier_sequence must be captured
3934  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3935  * other CPUs might count the value down to zero before this CPU gets
3936  * around to invoking rcu_barrier_trace(), which might result in bogus
3937  * data from the next instance of rcu_barrier().
3938  */
3939 static void rcu_barrier_callback(struct rcu_head *rhp)
3940 {
3941 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3942 
3943 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3944 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3945 		complete(&rcu_state.barrier_completion);
3946 	} else {
3947 		rcu_barrier_trace(TPS("CB"), -1, s);
3948 	}
3949 }
3950 
3951 /*
3952  * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3953  */
3954 static void rcu_barrier_entrain(struct rcu_data *rdp)
3955 {
3956 	unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3957 	unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3958 	bool wake_nocb = false;
3959 	bool was_alldone = false;
3960 
3961 	lockdep_assert_held(&rcu_state.barrier_lock);
3962 	if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3963 		return;
3964 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3965 	rdp->barrier_head.func = rcu_barrier_callback;
3966 	debug_rcu_head_queue(&rdp->barrier_head);
3967 	rcu_nocb_lock(rdp);
3968 	/*
3969 	 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3970 	 * queue. This way we don't wait for bypass timer that can reach seconds
3971 	 * if it's fully lazy.
3972 	 */
3973 	was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3974 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3975 	wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3976 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3977 		atomic_inc(&rcu_state.barrier_cpu_count);
3978 	} else {
3979 		debug_rcu_head_unqueue(&rdp->barrier_head);
3980 		rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3981 	}
3982 	rcu_nocb_unlock(rdp);
3983 	if (wake_nocb)
3984 		wake_nocb_gp(rdp, false);
3985 	smp_store_release(&rdp->barrier_seq_snap, gseq);
3986 }
3987 
3988 /*
3989  * Called with preemption disabled, and from cross-cpu IRQ context.
3990  */
3991 static void rcu_barrier_handler(void *cpu_in)
3992 {
3993 	uintptr_t cpu = (uintptr_t)cpu_in;
3994 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3995 
3996 	lockdep_assert_irqs_disabled();
3997 	WARN_ON_ONCE(cpu != rdp->cpu);
3998 	WARN_ON_ONCE(cpu != smp_processor_id());
3999 	raw_spin_lock(&rcu_state.barrier_lock);
4000 	rcu_barrier_entrain(rdp);
4001 	raw_spin_unlock(&rcu_state.barrier_lock);
4002 }
4003 
4004 /**
4005  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4006  *
4007  * Note that this primitive does not necessarily wait for an RCU grace period
4008  * to complete.  For example, if there are no RCU callbacks queued anywhere
4009  * in the system, then rcu_barrier() is within its rights to return
4010  * immediately, without waiting for anything, much less an RCU grace period.
4011  */
4012 void rcu_barrier(void)
4013 {
4014 	uintptr_t cpu;
4015 	unsigned long flags;
4016 	unsigned long gseq;
4017 	struct rcu_data *rdp;
4018 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4019 
4020 	rcu_barrier_trace(TPS("Begin"), -1, s);
4021 
4022 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
4023 	mutex_lock(&rcu_state.barrier_mutex);
4024 
4025 	/* Did someone else do our work for us? */
4026 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4027 		rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4028 		smp_mb(); /* caller's subsequent code after above check. */
4029 		mutex_unlock(&rcu_state.barrier_mutex);
4030 		return;
4031 	}
4032 
4033 	/* Mark the start of the barrier operation. */
4034 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4035 	rcu_seq_start(&rcu_state.barrier_sequence);
4036 	gseq = rcu_state.barrier_sequence;
4037 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4038 
4039 	/*
4040 	 * Initialize the count to two rather than to zero in order
4041 	 * to avoid a too-soon return to zero in case of an immediate
4042 	 * invocation of the just-enqueued callback (or preemption of
4043 	 * this task).  Exclude CPU-hotplug operations to ensure that no
4044 	 * offline non-offloaded CPU has callbacks queued.
4045 	 */
4046 	init_completion(&rcu_state.barrier_completion);
4047 	atomic_set(&rcu_state.barrier_cpu_count, 2);
4048 	raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4049 
4050 	/*
4051 	 * Force each CPU with callbacks to register a new callback.
4052 	 * When that callback is invoked, we will know that all of the
4053 	 * corresponding CPU's preceding callbacks have been invoked.
4054 	 */
4055 	for_each_possible_cpu(cpu) {
4056 		rdp = per_cpu_ptr(&rcu_data, cpu);
4057 retry:
4058 		if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4059 			continue;
4060 		raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4061 		if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4062 			WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4063 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4064 			rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4065 			continue;
4066 		}
4067 		if (!rcu_rdp_cpu_online(rdp)) {
4068 			rcu_barrier_entrain(rdp);
4069 			WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4070 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4071 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4072 			continue;
4073 		}
4074 		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4075 		if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4076 			schedule_timeout_uninterruptible(1);
4077 			goto retry;
4078 		}
4079 		WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4080 		rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4081 	}
4082 
4083 	/*
4084 	 * Now that we have an rcu_barrier_callback() callback on each
4085 	 * CPU, and thus each counted, remove the initial count.
4086 	 */
4087 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4088 		complete(&rcu_state.barrier_completion);
4089 
4090 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4091 	wait_for_completion(&rcu_state.barrier_completion);
4092 
4093 	/* Mark the end of the barrier operation. */
4094 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4095 	rcu_seq_end(&rcu_state.barrier_sequence);
4096 	gseq = rcu_state.barrier_sequence;
4097 	for_each_possible_cpu(cpu) {
4098 		rdp = per_cpu_ptr(&rcu_data, cpu);
4099 
4100 		WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4101 	}
4102 
4103 	/* Other rcu_barrier() invocations can now safely proceed. */
4104 	mutex_unlock(&rcu_state.barrier_mutex);
4105 }
4106 EXPORT_SYMBOL_GPL(rcu_barrier);
4107 
4108 static unsigned long rcu_barrier_last_throttle;
4109 
4110 /**
4111  * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
4112  *
4113  * This can be thought of as guard rails around rcu_barrier() that
4114  * permits unrestricted userspace use, at least assuming the hardware's
4115  * try_cmpxchg() is robust.  There will be at most one call per second to
4116  * rcu_barrier() system-wide from use of this function, which means that
4117  * callers might needlessly wait a second or three.
4118  *
4119  * This is intended for use by test suites to avoid OOM by flushing RCU
4120  * callbacks from the previous test before starting the next.  See the
4121  * rcutree.do_rcu_barrier module parameter for more information.
4122  *
4123  * Why not simply make rcu_barrier() more scalable?  That might be
4124  * the eventual endpoint, but let's keep it simple for the time being.
4125  * Note that the module parameter infrastructure serializes calls to a
4126  * given .set() function, but should concurrent .set() invocation ever be
4127  * possible, we are ready!
4128  */
4129 static void rcu_barrier_throttled(void)
4130 {
4131 	unsigned long j = jiffies;
4132 	unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
4133 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4134 
4135 	while (time_in_range(j, old, old + HZ / 16) ||
4136 	       !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
4137 		schedule_timeout_idle(HZ / 16);
4138 		if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4139 			smp_mb(); /* caller's subsequent code after above check. */
4140 			return;
4141 		}
4142 		j = jiffies;
4143 		old = READ_ONCE(rcu_barrier_last_throttle);
4144 	}
4145 	rcu_barrier();
4146 }
4147 
4148 /*
4149  * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
4150  * request arrives.  We insist on a true value to allow for possible
4151  * future expansion.
4152  */
4153 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
4154 {
4155 	bool b;
4156 	int ret;
4157 
4158 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
4159 		return -EAGAIN;
4160 	ret = kstrtobool(val, &b);
4161 	if (!ret && b) {
4162 		atomic_inc((atomic_t *)kp->arg);
4163 		rcu_barrier_throttled();
4164 		atomic_dec((atomic_t *)kp->arg);
4165 	}
4166 	return ret;
4167 }
4168 
4169 /*
4170  * Output the number of outstanding rcutree.do_rcu_barrier requests.
4171  */
4172 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
4173 {
4174 	return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
4175 }
4176 
4177 static const struct kernel_param_ops do_rcu_barrier_ops = {
4178 	.set = param_set_do_rcu_barrier,
4179 	.get = param_get_do_rcu_barrier,
4180 };
4181 static atomic_t do_rcu_barrier;
4182 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
4183 
4184 /*
4185  * Compute the mask of online CPUs for the specified rcu_node structure.
4186  * This will not be stable unless the rcu_node structure's ->lock is
4187  * held, but the bit corresponding to the current CPU will be stable
4188  * in most contexts.
4189  */
4190 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
4191 {
4192 	return READ_ONCE(rnp->qsmaskinitnext);
4193 }
4194 
4195 /*
4196  * Is the CPU corresponding to the specified rcu_data structure online
4197  * from RCU's perspective?  This perspective is given by that structure's
4198  * ->qsmaskinitnext field rather than by the global cpu_online_mask.
4199  */
4200 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
4201 {
4202 	return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
4203 }
4204 
4205 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
4206 
4207 /*
4208  * Is the current CPU online as far as RCU is concerned?
4209  *
4210  * Disable preemption to avoid false positives that could otherwise
4211  * happen due to the current CPU number being sampled, this task being
4212  * preempted, its old CPU being taken offline, resuming on some other CPU,
4213  * then determining that its old CPU is now offline.
4214  *
4215  * Disable checking if in an NMI handler because we cannot safely
4216  * report errors from NMI handlers anyway.  In addition, it is OK to use
4217  * RCU on an offline processor during initial boot, hence the check for
4218  * rcu_scheduler_fully_active.
4219  */
4220 bool rcu_lockdep_current_cpu_online(void)
4221 {
4222 	struct rcu_data *rdp;
4223 	bool ret = false;
4224 
4225 	if (in_nmi() || !rcu_scheduler_fully_active)
4226 		return true;
4227 	preempt_disable_notrace();
4228 	rdp = this_cpu_ptr(&rcu_data);
4229 	/*
4230 	 * Strictly, we care here about the case where the current CPU is
4231 	 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
4232 	 * not being up to date. So arch_spin_is_locked() might have a
4233 	 * false positive if it's held by some *other* CPU, but that's
4234 	 * OK because that just means a false *negative* on the warning.
4235 	 */
4236 	if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4237 		ret = true;
4238 	preempt_enable_notrace();
4239 	return ret;
4240 }
4241 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
4242 
4243 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
4244 
4245 // Has rcu_init() been invoked?  This is used (for example) to determine
4246 // whether spinlocks may be acquired safely.
4247 static bool rcu_init_invoked(void)
4248 {
4249 	return !!rcu_state.n_online_cpus;
4250 }
4251 
4252 /*
4253  * All CPUs for the specified rcu_node structure have gone offline,
4254  * and all tasks that were preempted within an RCU read-side critical
4255  * section while running on one of those CPUs have since exited their RCU
4256  * read-side critical section.  Some other CPU is reporting this fact with
4257  * the specified rcu_node structure's ->lock held and interrupts disabled.
4258  * This function therefore goes up the tree of rcu_node structures,
4259  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
4260  * the leaf rcu_node structure's ->qsmaskinit field has already been
4261  * updated.
4262  *
4263  * This function does check that the specified rcu_node structure has
4264  * all CPUs offline and no blocked tasks, so it is OK to invoke it
4265  * prematurely.  That said, invoking it after the fact will cost you
4266  * a needless lock acquisition.  So once it has done its work, don't
4267  * invoke it again.
4268  */
4269 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4270 {
4271 	long mask;
4272 	struct rcu_node *rnp = rnp_leaf;
4273 
4274 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4275 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4276 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4277 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4278 		return;
4279 	for (;;) {
4280 		mask = rnp->grpmask;
4281 		rnp = rnp->parent;
4282 		if (!rnp)
4283 			break;
4284 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4285 		rnp->qsmaskinit &= ~mask;
4286 		/* Between grace periods, so better already be zero! */
4287 		WARN_ON_ONCE(rnp->qsmask);
4288 		if (rnp->qsmaskinit) {
4289 			raw_spin_unlock_rcu_node(rnp);
4290 			/* irqs remain disabled. */
4291 			return;
4292 		}
4293 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4294 	}
4295 }
4296 
4297 /*
4298  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4299  * first CPU in a given leaf rcu_node structure coming online.  The caller
4300  * must hold the corresponding leaf rcu_node ->lock with interrupts
4301  * disabled.
4302  */
4303 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4304 {
4305 	long mask;
4306 	long oldmask;
4307 	struct rcu_node *rnp = rnp_leaf;
4308 
4309 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4310 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4311 	for (;;) {
4312 		mask = rnp->grpmask;
4313 		rnp = rnp->parent;
4314 		if (rnp == NULL)
4315 			return;
4316 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4317 		oldmask = rnp->qsmaskinit;
4318 		rnp->qsmaskinit |= mask;
4319 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4320 		if (oldmask)
4321 			return;
4322 	}
4323 }
4324 
4325 /*
4326  * Do boot-time initialization of a CPU's per-CPU RCU data.
4327  */
4328 static void __init
4329 rcu_boot_init_percpu_data(int cpu)
4330 {
4331 	struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4332 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4333 
4334 	/* Set up local state, ensuring consistent view of global state. */
4335 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4336 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4337 	WARN_ON_ONCE(ct->dynticks_nesting != 1);
4338 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4339 	rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4340 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4341 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4342 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4343 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4344 	rdp->last_sched_clock = jiffies;
4345 	rdp->cpu = cpu;
4346 	rcu_boot_init_nocb_percpu_data(rdp);
4347 }
4348 
4349 /*
4350  * Invoked early in the CPU-online process, when pretty much all services
4351  * are available.  The incoming CPU is not present.
4352  *
4353  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4354  * offline event can be happening at a given time.  Note also that we can
4355  * accept some slop in the rsp->gp_seq access due to the fact that this
4356  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4357  * And any offloaded callbacks are being numbered elsewhere.
4358  */
4359 int rcutree_prepare_cpu(unsigned int cpu)
4360 {
4361 	unsigned long flags;
4362 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4363 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4364 	struct rcu_node *rnp = rcu_get_root();
4365 
4366 	/* Set up local state, ensuring consistent view of global state. */
4367 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4368 	rdp->qlen_last_fqs_check = 0;
4369 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4370 	rdp->blimit = blimit;
4371 	ct->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4372 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4373 
4374 	/*
4375 	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4376 	 * (re-)initialized.
4377 	 */
4378 	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4379 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4380 
4381 	/*
4382 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4383 	 * propagation up the rcu_node tree will happen at the beginning
4384 	 * of the next grace period.
4385 	 */
4386 	rnp = rdp->mynode;
4387 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4388 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4389 	rdp->gp_seq_needed = rdp->gp_seq;
4390 	rdp->cpu_no_qs.b.norm = true;
4391 	rdp->core_needs_qs = false;
4392 	rdp->rcu_iw_pending = false;
4393 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4394 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4395 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4396 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4397 	rcu_spawn_one_boost_kthread(rnp);
4398 	rcu_spawn_cpu_nocb_kthread(cpu);
4399 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4400 
4401 	return 0;
4402 }
4403 
4404 /*
4405  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4406  */
4407 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4408 {
4409 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4410 
4411 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4412 }
4413 
4414 /*
4415  * Has the specified (known valid) CPU ever been fully online?
4416  */
4417 bool rcu_cpu_beenfullyonline(int cpu)
4418 {
4419 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4420 
4421 	return smp_load_acquire(&rdp->beenonline);
4422 }
4423 
4424 /*
4425  * Near the end of the CPU-online process.  Pretty much all services
4426  * enabled, and the CPU is now very much alive.
4427  */
4428 int rcutree_online_cpu(unsigned int cpu)
4429 {
4430 	unsigned long flags;
4431 	struct rcu_data *rdp;
4432 	struct rcu_node *rnp;
4433 
4434 	rdp = per_cpu_ptr(&rcu_data, cpu);
4435 	rnp = rdp->mynode;
4436 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4437 	rnp->ffmask |= rdp->grpmask;
4438 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4439 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4440 		return 0; /* Too early in boot for scheduler work. */
4441 	sync_sched_exp_online_cleanup(cpu);
4442 	rcutree_affinity_setting(cpu, -1);
4443 
4444 	// Stop-machine done, so allow nohz_full to disable tick.
4445 	tick_dep_clear(TICK_DEP_BIT_RCU);
4446 	return 0;
4447 }
4448 
4449 /*
4450  * Mark the specified CPU as being online so that subsequent grace periods
4451  * (both expedited and normal) will wait on it.  Note that this means that
4452  * incoming CPUs are not allowed to use RCU read-side critical sections
4453  * until this function is called.  Failing to observe this restriction
4454  * will result in lockdep splats.
4455  *
4456  * Note that this function is special in that it is invoked directly
4457  * from the incoming CPU rather than from the cpuhp_step mechanism.
4458  * This is because this function must be invoked at a precise location.
4459  * This incoming CPU must not have enabled interrupts yet.
4460  *
4461  * This mirrors the effects of rcutree_report_cpu_dead().
4462  */
4463 void rcutree_report_cpu_starting(unsigned int cpu)
4464 {
4465 	unsigned long mask;
4466 	struct rcu_data *rdp;
4467 	struct rcu_node *rnp;
4468 	bool newcpu;
4469 
4470 	lockdep_assert_irqs_disabled();
4471 	rdp = per_cpu_ptr(&rcu_data, cpu);
4472 	if (rdp->cpu_started)
4473 		return;
4474 	rdp->cpu_started = true;
4475 
4476 	rnp = rdp->mynode;
4477 	mask = rdp->grpmask;
4478 	arch_spin_lock(&rcu_state.ofl_lock);
4479 	rcu_dynticks_eqs_online();
4480 	raw_spin_lock(&rcu_state.barrier_lock);
4481 	raw_spin_lock_rcu_node(rnp);
4482 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4483 	raw_spin_unlock(&rcu_state.barrier_lock);
4484 	newcpu = !(rnp->expmaskinitnext & mask);
4485 	rnp->expmaskinitnext |= mask;
4486 	/* Allow lockless access for expedited grace periods. */
4487 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4488 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4489 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4490 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4491 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4492 
4493 	/* An incoming CPU should never be blocking a grace period. */
4494 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4495 		/* rcu_report_qs_rnp() *really* wants some flags to restore */
4496 		unsigned long flags;
4497 
4498 		local_irq_save(flags);
4499 		rcu_disable_urgency_upon_qs(rdp);
4500 		/* Report QS -after- changing ->qsmaskinitnext! */
4501 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4502 	} else {
4503 		raw_spin_unlock_rcu_node(rnp);
4504 	}
4505 	arch_spin_unlock(&rcu_state.ofl_lock);
4506 	smp_store_release(&rdp->beenonline, true);
4507 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4508 }
4509 
4510 /*
4511  * The outgoing function has no further need of RCU, so remove it from
4512  * the rcu_node tree's ->qsmaskinitnext bit masks.
4513  *
4514  * Note that this function is special in that it is invoked directly
4515  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4516  * This is because this function must be invoked at a precise location.
4517  *
4518  * This mirrors the effect of rcutree_report_cpu_starting().
4519  */
4520 void rcutree_report_cpu_dead(void)
4521 {
4522 	unsigned long flags;
4523 	unsigned long mask;
4524 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4525 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4526 
4527 	/*
4528 	 * IRQS must be disabled from now on and until the CPU dies, or an interrupt
4529 	 * may introduce a new READ-side while it is actually off the QS masks.
4530 	 */
4531 	lockdep_assert_irqs_disabled();
4532 	// Do any dangling deferred wakeups.
4533 	do_nocb_deferred_wakeup(rdp);
4534 
4535 	rcu_preempt_deferred_qs(current);
4536 
4537 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4538 	mask = rdp->grpmask;
4539 	arch_spin_lock(&rcu_state.ofl_lock);
4540 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4541 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4542 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4543 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4544 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4545 		rcu_disable_urgency_upon_qs(rdp);
4546 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4547 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4548 	}
4549 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4550 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4551 	arch_spin_unlock(&rcu_state.ofl_lock);
4552 	rdp->cpu_started = false;
4553 }
4554 
4555 #ifdef CONFIG_HOTPLUG_CPU
4556 /*
4557  * The outgoing CPU has just passed through the dying-idle state, and we
4558  * are being invoked from the CPU that was IPIed to continue the offline
4559  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4560  */
4561 void rcutree_migrate_callbacks(int cpu)
4562 {
4563 	unsigned long flags;
4564 	struct rcu_data *my_rdp;
4565 	struct rcu_node *my_rnp;
4566 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4567 	bool needwake;
4568 
4569 	if (rcu_rdp_is_offloaded(rdp) ||
4570 	    rcu_segcblist_empty(&rdp->cblist))
4571 		return;  /* No callbacks to migrate. */
4572 
4573 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4574 	WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4575 	rcu_barrier_entrain(rdp);
4576 	my_rdp = this_cpu_ptr(&rcu_data);
4577 	my_rnp = my_rdp->mynode;
4578 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4579 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4580 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4581 	/* Leverage recent GPs and set GP for new callbacks. */
4582 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4583 		   rcu_advance_cbs(my_rnp, my_rdp);
4584 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4585 	raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4586 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4587 	rcu_segcblist_disable(&rdp->cblist);
4588 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4589 	check_cb_ovld_locked(my_rdp, my_rnp);
4590 	if (rcu_rdp_is_offloaded(my_rdp)) {
4591 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4592 		__call_rcu_nocb_wake(my_rdp, true, flags);
4593 	} else {
4594 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4595 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4596 	}
4597 	if (needwake)
4598 		rcu_gp_kthread_wake();
4599 	lockdep_assert_irqs_enabled();
4600 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4601 		  !rcu_segcblist_empty(&rdp->cblist),
4602 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4603 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4604 		  rcu_segcblist_first_cb(&rdp->cblist));
4605 }
4606 
4607 /*
4608  * The CPU has been completely removed, and some other CPU is reporting
4609  * this fact from process context.  Do the remainder of the cleanup.
4610  * There can only be one CPU hotplug operation at a time, so no need for
4611  * explicit locking.
4612  */
4613 int rcutree_dead_cpu(unsigned int cpu)
4614 {
4615 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4616 	// Stop-machine done, so allow nohz_full to disable tick.
4617 	tick_dep_clear(TICK_DEP_BIT_RCU);
4618 	return 0;
4619 }
4620 
4621 /*
4622  * Near the end of the offline process.  Trace the fact that this CPU
4623  * is going offline.
4624  */
4625 int rcutree_dying_cpu(unsigned int cpu)
4626 {
4627 	bool blkd;
4628 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4629 	struct rcu_node *rnp = rdp->mynode;
4630 
4631 	blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4632 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4633 			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4634 	return 0;
4635 }
4636 
4637 /*
4638  * Near the beginning of the process.  The CPU is still very much alive
4639  * with pretty much all services enabled.
4640  */
4641 int rcutree_offline_cpu(unsigned int cpu)
4642 {
4643 	unsigned long flags;
4644 	struct rcu_data *rdp;
4645 	struct rcu_node *rnp;
4646 
4647 	rdp = per_cpu_ptr(&rcu_data, cpu);
4648 	rnp = rdp->mynode;
4649 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4650 	rnp->ffmask &= ~rdp->grpmask;
4651 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4652 
4653 	rcutree_affinity_setting(cpu, cpu);
4654 
4655 	// nohz_full CPUs need the tick for stop-machine to work quickly
4656 	tick_dep_set(TICK_DEP_BIT_RCU);
4657 	return 0;
4658 }
4659 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
4660 
4661 /*
4662  * On non-huge systems, use expedited RCU grace periods to make suspend
4663  * and hibernation run faster.
4664  */
4665 static int rcu_pm_notify(struct notifier_block *self,
4666 			 unsigned long action, void *hcpu)
4667 {
4668 	switch (action) {
4669 	case PM_HIBERNATION_PREPARE:
4670 	case PM_SUSPEND_PREPARE:
4671 		rcu_async_hurry();
4672 		rcu_expedite_gp();
4673 		break;
4674 	case PM_POST_HIBERNATION:
4675 	case PM_POST_SUSPEND:
4676 		rcu_unexpedite_gp();
4677 		rcu_async_relax();
4678 		break;
4679 	default:
4680 		break;
4681 	}
4682 	return NOTIFY_OK;
4683 }
4684 
4685 #ifdef CONFIG_RCU_EXP_KTHREAD
4686 struct kthread_worker *rcu_exp_gp_kworker;
4687 struct kthread_worker *rcu_exp_par_gp_kworker;
4688 
4689 static void __init rcu_start_exp_gp_kworkers(void)
4690 {
4691 	const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4692 	const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4693 	struct sched_param param = { .sched_priority = kthread_prio };
4694 
4695 	rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4696 	if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4697 		pr_err("Failed to create %s!\n", gp_kworker_name);
4698 		return;
4699 	}
4700 
4701 	rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4702 	if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4703 		pr_err("Failed to create %s!\n", par_gp_kworker_name);
4704 		kthread_destroy_worker(rcu_exp_gp_kworker);
4705 		return;
4706 	}
4707 
4708 	sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4709 	sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4710 				   &param);
4711 }
4712 
4713 static inline void rcu_alloc_par_gp_wq(void)
4714 {
4715 }
4716 #else /* !CONFIG_RCU_EXP_KTHREAD */
4717 struct workqueue_struct *rcu_par_gp_wq;
4718 
4719 static void __init rcu_start_exp_gp_kworkers(void)
4720 {
4721 }
4722 
4723 static inline void rcu_alloc_par_gp_wq(void)
4724 {
4725 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4726 	WARN_ON(!rcu_par_gp_wq);
4727 }
4728 #endif /* CONFIG_RCU_EXP_KTHREAD */
4729 
4730 /*
4731  * Spawn the kthreads that handle RCU's grace periods.
4732  */
4733 static int __init rcu_spawn_gp_kthread(void)
4734 {
4735 	unsigned long flags;
4736 	struct rcu_node *rnp;
4737 	struct sched_param sp;
4738 	struct task_struct *t;
4739 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4740 
4741 	rcu_scheduler_fully_active = 1;
4742 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4743 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4744 		return 0;
4745 	if (kthread_prio) {
4746 		sp.sched_priority = kthread_prio;
4747 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4748 	}
4749 	rnp = rcu_get_root();
4750 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4751 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4752 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4753 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4754 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4755 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4756 	wake_up_process(t);
4757 	/* This is a pre-SMP initcall, we expect a single CPU */
4758 	WARN_ON(num_online_cpus() > 1);
4759 	/*
4760 	 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4761 	 * due to rcu_scheduler_fully_active.
4762 	 */
4763 	rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4764 	rcu_spawn_one_boost_kthread(rdp->mynode);
4765 	rcu_spawn_core_kthreads();
4766 	/* Create kthread worker for expedited GPs */
4767 	rcu_start_exp_gp_kworkers();
4768 	return 0;
4769 }
4770 early_initcall(rcu_spawn_gp_kthread);
4771 
4772 /*
4773  * This function is invoked towards the end of the scheduler's
4774  * initialization process.  Before this is called, the idle task might
4775  * contain synchronous grace-period primitives (during which time, this idle
4776  * task is booting the system, and such primitives are no-ops).  After this
4777  * function is called, any synchronous grace-period primitives are run as
4778  * expedited, with the requesting task driving the grace period forward.
4779  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4780  * runtime RCU functionality.
4781  */
4782 void rcu_scheduler_starting(void)
4783 {
4784 	unsigned long flags;
4785 	struct rcu_node *rnp;
4786 
4787 	WARN_ON(num_online_cpus() != 1);
4788 	WARN_ON(nr_context_switches() > 0);
4789 	rcu_test_sync_prims();
4790 
4791 	// Fix up the ->gp_seq counters.
4792 	local_irq_save(flags);
4793 	rcu_for_each_node_breadth_first(rnp)
4794 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4795 	local_irq_restore(flags);
4796 
4797 	// Switch out of early boot mode.
4798 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4799 	rcu_test_sync_prims();
4800 }
4801 
4802 /*
4803  * Helper function for rcu_init() that initializes the rcu_state structure.
4804  */
4805 static void __init rcu_init_one(void)
4806 {
4807 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4808 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4809 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4810 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4811 
4812 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4813 	int cpustride = 1;
4814 	int i;
4815 	int j;
4816 	struct rcu_node *rnp;
4817 
4818 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4819 
4820 	/* Silence gcc 4.8 false positive about array index out of range. */
4821 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4822 		panic("rcu_init_one: rcu_num_lvls out of range");
4823 
4824 	/* Initialize the level-tracking arrays. */
4825 
4826 	for (i = 1; i < rcu_num_lvls; i++)
4827 		rcu_state.level[i] =
4828 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4829 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4830 
4831 	/* Initialize the elements themselves, starting from the leaves. */
4832 
4833 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4834 		cpustride *= levelspread[i];
4835 		rnp = rcu_state.level[i];
4836 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4837 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4838 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4839 						   &rcu_node_class[i], buf[i]);
4840 			raw_spin_lock_init(&rnp->fqslock);
4841 			lockdep_set_class_and_name(&rnp->fqslock,
4842 						   &rcu_fqs_class[i], fqs[i]);
4843 			rnp->gp_seq = rcu_state.gp_seq;
4844 			rnp->gp_seq_needed = rcu_state.gp_seq;
4845 			rnp->completedqs = rcu_state.gp_seq;
4846 			rnp->qsmask = 0;
4847 			rnp->qsmaskinit = 0;
4848 			rnp->grplo = j * cpustride;
4849 			rnp->grphi = (j + 1) * cpustride - 1;
4850 			if (rnp->grphi >= nr_cpu_ids)
4851 				rnp->grphi = nr_cpu_ids - 1;
4852 			if (i == 0) {
4853 				rnp->grpnum = 0;
4854 				rnp->grpmask = 0;
4855 				rnp->parent = NULL;
4856 			} else {
4857 				rnp->grpnum = j % levelspread[i - 1];
4858 				rnp->grpmask = BIT(rnp->grpnum);
4859 				rnp->parent = rcu_state.level[i - 1] +
4860 					      j / levelspread[i - 1];
4861 			}
4862 			rnp->level = i;
4863 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4864 			rcu_init_one_nocb(rnp);
4865 			init_waitqueue_head(&rnp->exp_wq[0]);
4866 			init_waitqueue_head(&rnp->exp_wq[1]);
4867 			init_waitqueue_head(&rnp->exp_wq[2]);
4868 			init_waitqueue_head(&rnp->exp_wq[3]);
4869 			spin_lock_init(&rnp->exp_lock);
4870 			mutex_init(&rnp->boost_kthread_mutex);
4871 			raw_spin_lock_init(&rnp->exp_poll_lock);
4872 			rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4873 			INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4874 		}
4875 	}
4876 
4877 	init_swait_queue_head(&rcu_state.gp_wq);
4878 	init_swait_queue_head(&rcu_state.expedited_wq);
4879 	rnp = rcu_first_leaf_node();
4880 	for_each_possible_cpu(i) {
4881 		while (i > rnp->grphi)
4882 			rnp++;
4883 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4884 		rcu_boot_init_percpu_data(i);
4885 	}
4886 }
4887 
4888 /*
4889  * Force priority from the kernel command-line into range.
4890  */
4891 static void __init sanitize_kthread_prio(void)
4892 {
4893 	int kthread_prio_in = kthread_prio;
4894 
4895 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4896 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4897 		kthread_prio = 2;
4898 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4899 		kthread_prio = 1;
4900 	else if (kthread_prio < 0)
4901 		kthread_prio = 0;
4902 	else if (kthread_prio > 99)
4903 		kthread_prio = 99;
4904 
4905 	if (kthread_prio != kthread_prio_in)
4906 		pr_alert("%s: Limited prio to %d from %d\n",
4907 			 __func__, kthread_prio, kthread_prio_in);
4908 }
4909 
4910 /*
4911  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4912  * replace the definitions in tree.h because those are needed to size
4913  * the ->node array in the rcu_state structure.
4914  */
4915 void rcu_init_geometry(void)
4916 {
4917 	ulong d;
4918 	int i;
4919 	static unsigned long old_nr_cpu_ids;
4920 	int rcu_capacity[RCU_NUM_LVLS];
4921 	static bool initialized;
4922 
4923 	if (initialized) {
4924 		/*
4925 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4926 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4927 		 */
4928 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4929 		return;
4930 	}
4931 
4932 	old_nr_cpu_ids = nr_cpu_ids;
4933 	initialized = true;
4934 
4935 	/*
4936 	 * Initialize any unspecified boot parameters.
4937 	 * The default values of jiffies_till_first_fqs and
4938 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4939 	 * value, which is a function of HZ, then adding one for each
4940 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4941 	 */
4942 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4943 	if (jiffies_till_first_fqs == ULONG_MAX)
4944 		jiffies_till_first_fqs = d;
4945 	if (jiffies_till_next_fqs == ULONG_MAX)
4946 		jiffies_till_next_fqs = d;
4947 	adjust_jiffies_till_sched_qs();
4948 
4949 	/* If the compile-time values are accurate, just leave. */
4950 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4951 	    nr_cpu_ids == NR_CPUS)
4952 		return;
4953 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4954 		rcu_fanout_leaf, nr_cpu_ids);
4955 
4956 	/*
4957 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4958 	 * and cannot exceed the number of bits in the rcu_node masks.
4959 	 * Complain and fall back to the compile-time values if this
4960 	 * limit is exceeded.
4961 	 */
4962 	if (rcu_fanout_leaf < 2 ||
4963 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4964 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4965 		WARN_ON(1);
4966 		return;
4967 	}
4968 
4969 	/*
4970 	 * Compute number of nodes that can be handled an rcu_node tree
4971 	 * with the given number of levels.
4972 	 */
4973 	rcu_capacity[0] = rcu_fanout_leaf;
4974 	for (i = 1; i < RCU_NUM_LVLS; i++)
4975 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4976 
4977 	/*
4978 	 * The tree must be able to accommodate the configured number of CPUs.
4979 	 * If this limit is exceeded, fall back to the compile-time values.
4980 	 */
4981 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4982 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4983 		WARN_ON(1);
4984 		return;
4985 	}
4986 
4987 	/* Calculate the number of levels in the tree. */
4988 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4989 	}
4990 	rcu_num_lvls = i + 1;
4991 
4992 	/* Calculate the number of rcu_nodes at each level of the tree. */
4993 	for (i = 0; i < rcu_num_lvls; i++) {
4994 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4995 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4996 	}
4997 
4998 	/* Calculate the total number of rcu_node structures. */
4999 	rcu_num_nodes = 0;
5000 	for (i = 0; i < rcu_num_lvls; i++)
5001 		rcu_num_nodes += num_rcu_lvl[i];
5002 }
5003 
5004 /*
5005  * Dump out the structure of the rcu_node combining tree associated
5006  * with the rcu_state structure.
5007  */
5008 static void __init rcu_dump_rcu_node_tree(void)
5009 {
5010 	int level = 0;
5011 	struct rcu_node *rnp;
5012 
5013 	pr_info("rcu_node tree layout dump\n");
5014 	pr_info(" ");
5015 	rcu_for_each_node_breadth_first(rnp) {
5016 		if (rnp->level != level) {
5017 			pr_cont("\n");
5018 			pr_info(" ");
5019 			level = rnp->level;
5020 		}
5021 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
5022 	}
5023 	pr_cont("\n");
5024 }
5025 
5026 struct workqueue_struct *rcu_gp_wq;
5027 
5028 static void __init kfree_rcu_batch_init(void)
5029 {
5030 	int cpu;
5031 	int i, j;
5032 
5033 	/* Clamp it to [0:100] seconds interval. */
5034 	if (rcu_delay_page_cache_fill_msec < 0 ||
5035 		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
5036 
5037 		rcu_delay_page_cache_fill_msec =
5038 			clamp(rcu_delay_page_cache_fill_msec, 0,
5039 				(int) (100 * MSEC_PER_SEC));
5040 
5041 		pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
5042 			rcu_delay_page_cache_fill_msec);
5043 	}
5044 
5045 	for_each_possible_cpu(cpu) {
5046 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
5047 
5048 		for (i = 0; i < KFREE_N_BATCHES; i++) {
5049 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
5050 			krcp->krw_arr[i].krcp = krcp;
5051 
5052 			for (j = 0; j < FREE_N_CHANNELS; j++)
5053 				INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]);
5054 		}
5055 
5056 		for (i = 0; i < FREE_N_CHANNELS; i++)
5057 			INIT_LIST_HEAD(&krcp->bulk_head[i]);
5058 
5059 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
5060 		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
5061 		krcp->initialized = true;
5062 	}
5063 	if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
5064 		pr_err("Failed to register kfree_rcu() shrinker!\n");
5065 }
5066 
5067 void __init rcu_init(void)
5068 {
5069 	int cpu = smp_processor_id();
5070 
5071 	rcu_early_boot_tests();
5072 
5073 	kfree_rcu_batch_init();
5074 	rcu_bootup_announce();
5075 	sanitize_kthread_prio();
5076 	rcu_init_geometry();
5077 	rcu_init_one();
5078 	if (dump_tree)
5079 		rcu_dump_rcu_node_tree();
5080 	if (use_softirq)
5081 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
5082 
5083 	/*
5084 	 * We don't need protection against CPU-hotplug here because
5085 	 * this is called early in boot, before either interrupts
5086 	 * or the scheduler are operational.
5087 	 */
5088 	pm_notifier(rcu_pm_notify, 0);
5089 	WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
5090 	rcutree_prepare_cpu(cpu);
5091 	rcutree_report_cpu_starting(cpu);
5092 	rcutree_online_cpu(cpu);
5093 
5094 	/* Create workqueue for Tree SRCU and for expedited GPs. */
5095 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
5096 	WARN_ON(!rcu_gp_wq);
5097 	rcu_alloc_par_gp_wq();
5098 
5099 	/* Fill in default value for rcutree.qovld boot parameter. */
5100 	/* -After- the rcu_node ->lock fields are initialized! */
5101 	if (qovld < 0)
5102 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
5103 	else
5104 		qovld_calc = qovld;
5105 
5106 	// Kick-start in case any polled grace periods started early.
5107 	(void)start_poll_synchronize_rcu_expedited();
5108 
5109 	rcu_test_sync_prims();
5110 }
5111 
5112 #include "tree_stall.h"
5113 #include "tree_exp.h"
5114 #include "tree_nocb.h"
5115 #include "tree_plugin.h"
5116