1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 *
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 *
14 * For detailed explanation of Read-Copy Update mechanism see -
15 * Documentation/RCU
16 */
17
18 #define pr_fmt(fmt) "rcu: " fmt
19
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
64 #include <linux/mm.h>
65 #include <linux/kasan.h>
66 #include <linux/context_tracking.h>
67 #include "../time/tick-internal.h"
68
69 #include "tree.h"
70 #include "rcu.h"
71
72 #ifdef MODULE_PARAM_PREFIX
73 #undef MODULE_PARAM_PREFIX
74 #endif
75 #define MODULE_PARAM_PREFIX "rcutree."
76
77 /* Data structures. */
78 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *);
79
80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
81 .gpwrap = true,
82 };
83
rcu_get_gpwrap_count(int cpu)84 int rcu_get_gpwrap_count(int cpu)
85 {
86 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
87
88 return READ_ONCE(rdp->gpwrap_count);
89 }
90 EXPORT_SYMBOL_GPL(rcu_get_gpwrap_count);
91
92 static struct rcu_state rcu_state = {
93 .level = { &rcu_state.node[0] },
94 .gp_state = RCU_GP_IDLE,
95 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
96 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
97 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
98 .name = RCU_NAME,
99 .abbr = RCU_ABBR,
100 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
101 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
102 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
103 .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
104 rcu_sr_normal_gp_cleanup_work),
105 .srs_cleanups_pending = ATOMIC_INIT(0),
106 #ifdef CONFIG_RCU_NOCB_CPU
107 .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex),
108 #endif
109 };
110
111 /* Dump rcu_node combining tree at boot to verify correct setup. */
112 static bool dump_tree;
113 module_param(dump_tree, bool, 0444);
114 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
115 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
116 #ifndef CONFIG_PREEMPT_RT
117 module_param(use_softirq, bool, 0444);
118 #endif
119 /* Control rcu_node-tree auto-balancing at boot time. */
120 static bool rcu_fanout_exact;
121 module_param(rcu_fanout_exact, bool, 0444);
122 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
123 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
124 module_param(rcu_fanout_leaf, int, 0444);
125 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
126 /* Number of rcu_nodes at specified level. */
127 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
128 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
129
130 /*
131 * The rcu_scheduler_active variable is initialized to the value
132 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
133 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
134 * RCU can assume that there is but one task, allowing RCU to (for example)
135 * optimize synchronize_rcu() to a simple barrier(). When this variable
136 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
137 * to detect real grace periods. This variable is also used to suppress
138 * boot-time false positives from lockdep-RCU error checking. Finally, it
139 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
140 * is fully initialized, including all of its kthreads having been spawned.
141 */
142 int rcu_scheduler_active __read_mostly;
143 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
144
145 /*
146 * The rcu_scheduler_fully_active variable transitions from zero to one
147 * during the early_initcall() processing, which is after the scheduler
148 * is capable of creating new tasks. So RCU processing (for example,
149 * creating tasks for RCU priority boosting) must be delayed until after
150 * rcu_scheduler_fully_active transitions from zero to one. We also
151 * currently delay invocation of any RCU callbacks until after this point.
152 *
153 * It might later prove better for people registering RCU callbacks during
154 * early boot to take responsibility for these callbacks, but one step at
155 * a time.
156 */
157 static int rcu_scheduler_fully_active __read_mostly;
158
159 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
160 unsigned long gps, unsigned long flags);
161 static void invoke_rcu_core(void);
162 static void rcu_report_exp_rdp(struct rcu_data *rdp);
163 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
164 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
165 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
166 static bool rcu_init_invoked(void);
167 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
168 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
169
170 /*
171 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
172 * real-time priority(enabling/disabling) is controlled by
173 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
174 */
175 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
176 module_param(kthread_prio, int, 0444);
177
178 /* Delay in jiffies for grace-period initialization delays, debug only. */
179
180 static int gp_preinit_delay;
181 module_param(gp_preinit_delay, int, 0444);
182 static int gp_init_delay;
183 module_param(gp_init_delay, int, 0444);
184 static int gp_cleanup_delay;
185 module_param(gp_cleanup_delay, int, 0444);
186 static int nohz_full_patience_delay;
187 module_param(nohz_full_patience_delay, int, 0444);
188 static int nohz_full_patience_delay_jiffies;
189
190 // Add delay to rcu_read_unlock() for strict grace periods.
191 static int rcu_unlock_delay;
192 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
193 module_param(rcu_unlock_delay, int, 0444);
194 #endif
195
196 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)197 int rcu_get_gp_kthreads_prio(void)
198 {
199 return kthread_prio;
200 }
201 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
202
203 /*
204 * Number of grace periods between delays, normalized by the duration of
205 * the delay. The longer the delay, the more the grace periods between
206 * each delay. The reason for this normalization is that it means that,
207 * for non-zero delays, the overall slowdown of grace periods is constant
208 * regardless of the duration of the delay. This arrangement balances
209 * the need for long delays to increase some race probabilities with the
210 * need for fast grace periods to increase other race probabilities.
211 */
212 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
213
214 /*
215 * Return true if an RCU grace period is in progress. The READ_ONCE()s
216 * permit this function to be invoked without holding the root rcu_node
217 * structure's ->lock, but of course results can be subject to change.
218 */
rcu_gp_in_progress(void)219 static int rcu_gp_in_progress(void)
220 {
221 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
222 }
223
224 /*
225 * Return the number of callbacks queued on the specified CPU.
226 * Handles both the nocbs and normal cases.
227 */
rcu_get_n_cbs_cpu(int cpu)228 static long rcu_get_n_cbs_cpu(int cpu)
229 {
230 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
231
232 if (rcu_segcblist_is_enabled(&rdp->cblist))
233 return rcu_segcblist_n_cbs(&rdp->cblist);
234 return 0;
235 }
236
237 /**
238 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
239 *
240 * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU.
241 * This is a special-purpose function to be used in the softirq
242 * infrastructure and perhaps the occasional long-running softirq
243 * handler.
244 *
245 * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is
246 * equivalent to momentarily completely enabling preemption. For
247 * example, given this code::
248 *
249 * local_bh_disable();
250 * do_something();
251 * rcu_softirq_qs(); // A
252 * do_something_else();
253 * local_bh_enable(); // B
254 *
255 * A call to synchronize_rcu() that began concurrently with the
256 * call to do_something() would be guaranteed to wait only until
257 * execution reached statement A. Without that rcu_softirq_qs(),
258 * that same synchronize_rcu() would instead be guaranteed to wait
259 * until execution reached statement B.
260 */
rcu_softirq_qs(void)261 void rcu_softirq_qs(void)
262 {
263 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
264 lock_is_held(&rcu_lock_map) ||
265 lock_is_held(&rcu_sched_lock_map),
266 "Illegal rcu_softirq_qs() in RCU read-side critical section");
267 rcu_qs();
268 rcu_preempt_deferred_qs(current);
269 rcu_tasks_qs(current, false);
270 }
271
272 /*
273 * Reset the current CPU's RCU_WATCHING counter to indicate that the
274 * newly onlined CPU is no longer in an extended quiescent state.
275 * This will either leave the counter unchanged, or increment it
276 * to the next non-quiescent value.
277 *
278 * The non-atomic test/increment sequence works because the upper bits
279 * of the ->state variable are manipulated only by the corresponding CPU,
280 * or when the corresponding CPU is offline.
281 */
rcu_watching_online(void)282 static void rcu_watching_online(void)
283 {
284 if (ct_rcu_watching() & CT_RCU_WATCHING)
285 return;
286 ct_state_inc(CT_RCU_WATCHING);
287 }
288
289 /*
290 * Return true if the snapshot returned from ct_rcu_watching()
291 * indicates that RCU is in an extended quiescent state.
292 */
rcu_watching_snap_in_eqs(int snap)293 static bool rcu_watching_snap_in_eqs(int snap)
294 {
295 return !(snap & CT_RCU_WATCHING);
296 }
297
298 /**
299 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
300 * since the specified @snap?
301 *
302 * @rdp: The rcu_data corresponding to the CPU for which to check EQS.
303 * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS.
304 *
305 * Returns true if the CPU corresponding to @rdp has spent some time in an
306 * extended quiescent state since @snap. Note that this doesn't check if it
307 * /still/ is in an EQS, just that it went through one since @snap.
308 *
309 * This is meant to be used in a loop waiting for a CPU to go through an EQS.
310 */
rcu_watching_snap_stopped_since(struct rcu_data * rdp,int snap)311 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap)
312 {
313 /*
314 * The first failing snapshot is already ordered against the accesses
315 * performed by the remote CPU after it exits idle.
316 *
317 * The second snapshot therefore only needs to order against accesses
318 * performed by the remote CPU prior to entering idle and therefore can
319 * rely solely on acquire semantics.
320 */
321 if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap)))
322 return true;
323
324 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
325 }
326
327 /*
328 * Return true if the referenced integer is zero while the specified
329 * CPU remains within a single extended quiescent state.
330 */
rcu_watching_zero_in_eqs(int cpu,int * vp)331 bool rcu_watching_zero_in_eqs(int cpu, int *vp)
332 {
333 int snap;
334
335 // If not quiescent, force back to earlier extended quiescent state.
336 snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
337 smp_rmb(); // Order CT state and *vp reads.
338 if (READ_ONCE(*vp))
339 return false; // Non-zero, so report failure;
340 smp_rmb(); // Order *vp read and CT state re-read.
341
342 // If still in the same extended quiescent state, we are good!
343 return snap == ct_rcu_watching_cpu(cpu);
344 }
345
346 /*
347 * Let the RCU core know that this CPU has gone through the scheduler,
348 * which is a quiescent state. This is called when the need for a
349 * quiescent state is urgent, so we burn an atomic operation and full
350 * memory barriers to let the RCU core know about it, regardless of what
351 * this CPU might (or might not) do in the near future.
352 *
353 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
354 *
355 * The caller must have disabled interrupts and must not be idle.
356 */
rcu_momentary_eqs(void)357 notrace void rcu_momentary_eqs(void)
358 {
359 int seq;
360
361 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
362 seq = ct_state_inc(2 * CT_RCU_WATCHING);
363 /* It is illegal to call this from idle state. */
364 WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
365 rcu_preempt_deferred_qs(current);
366 }
367 EXPORT_SYMBOL_GPL(rcu_momentary_eqs);
368
369 /**
370 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
371 *
372 * If the current CPU is idle and running at a first-level (not nested)
373 * interrupt, or directly, from idle, return true.
374 *
375 * The caller must have at least disabled IRQs.
376 */
rcu_is_cpu_rrupt_from_idle(void)377 static int rcu_is_cpu_rrupt_from_idle(void)
378 {
379 long nmi_nesting = ct_nmi_nesting();
380
381 /*
382 * Usually called from the tick; but also used from smp_function_call()
383 * for expedited grace periods. This latter can result in running from
384 * the idle task, instead of an actual IPI.
385 */
386 lockdep_assert_irqs_disabled();
387
388 /* Check for counter underflows */
389 RCU_LOCKDEP_WARN(ct_nesting() < 0,
390 "RCU nesting counter underflow!");
391
392 /* Non-idle interrupt or nested idle interrupt */
393 if (nmi_nesting > 1)
394 return false;
395
396 /*
397 * Non nested idle interrupt (interrupting section where RCU
398 * wasn't watching).
399 */
400 if (nmi_nesting == 1)
401 return true;
402
403 /* Not in an interrupt */
404 if (!nmi_nesting) {
405 RCU_LOCKDEP_WARN(!in_task() || !is_idle_task(current),
406 "RCU nmi_nesting counter not in idle task!");
407 return !rcu_is_watching_curr_cpu();
408 }
409
410 RCU_LOCKDEP_WARN(1, "RCU nmi_nesting counter underflow/zero!");
411
412 return false;
413 }
414
415 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
416 // Maximum callbacks per rcu_do_batch ...
417 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
418 static long blimit = DEFAULT_RCU_BLIMIT;
419 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
420 static long qhimark = DEFAULT_RCU_QHIMARK;
421 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
422 static long qlowmark = DEFAULT_RCU_QLOMARK;
423 #define DEFAULT_RCU_QOVLD_MULT 2
424 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
425 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
426 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
427
428 module_param(blimit, long, 0444);
429 module_param(qhimark, long, 0444);
430 module_param(qlowmark, long, 0444);
431 module_param(qovld, long, 0444);
432
433 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
434 static ulong jiffies_till_next_fqs = ULONG_MAX;
435 static bool rcu_kick_kthreads;
436 static int rcu_divisor = 7;
437 module_param(rcu_divisor, int, 0644);
438
439 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
440 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
441 module_param(rcu_resched_ns, long, 0644);
442
443 /*
444 * How long the grace period must be before we start recruiting
445 * quiescent-state help from rcu_note_context_switch().
446 */
447 static ulong jiffies_till_sched_qs = ULONG_MAX;
448 module_param(jiffies_till_sched_qs, ulong, 0444);
449 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
450 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
451
452 /*
453 * Make sure that we give the grace-period kthread time to detect any
454 * idle CPUs before taking active measures to force quiescent states.
455 * However, don't go below 100 milliseconds, adjusted upwards for really
456 * large systems.
457 */
adjust_jiffies_till_sched_qs(void)458 static void adjust_jiffies_till_sched_qs(void)
459 {
460 unsigned long j;
461
462 /* If jiffies_till_sched_qs was specified, respect the request. */
463 if (jiffies_till_sched_qs != ULONG_MAX) {
464 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
465 return;
466 }
467 /* Otherwise, set to third fqs scan, but bound below on large system. */
468 j = READ_ONCE(jiffies_till_first_fqs) +
469 2 * READ_ONCE(jiffies_till_next_fqs);
470 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
471 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
472 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
473 WRITE_ONCE(jiffies_to_sched_qs, j);
474 }
475
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)476 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
477 {
478 ulong j;
479 int ret = kstrtoul(val, 0, &j);
480
481 if (!ret) {
482 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
483 adjust_jiffies_till_sched_qs();
484 }
485 return ret;
486 }
487
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)488 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
489 {
490 ulong j;
491 int ret = kstrtoul(val, 0, &j);
492
493 if (!ret) {
494 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
495 adjust_jiffies_till_sched_qs();
496 }
497 return ret;
498 }
499
500 static const struct kernel_param_ops first_fqs_jiffies_ops = {
501 .set = param_set_first_fqs_jiffies,
502 .get = param_get_ulong,
503 };
504
505 static const struct kernel_param_ops next_fqs_jiffies_ops = {
506 .set = param_set_next_fqs_jiffies,
507 .get = param_get_ulong,
508 };
509
510 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
511 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
512 module_param(rcu_kick_kthreads, bool, 0644);
513
514 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
515 static int rcu_pending(int user);
516
517 /*
518 * Return the number of RCU GPs completed thus far for debug & stats.
519 */
rcu_get_gp_seq(void)520 unsigned long rcu_get_gp_seq(void)
521 {
522 return READ_ONCE(rcu_state.gp_seq);
523 }
524 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
525
526 /*
527 * Return the number of RCU expedited batches completed thus far for
528 * debug & stats. Odd numbers mean that a batch is in progress, even
529 * numbers mean idle. The value returned will thus be roughly double
530 * the cumulative batches since boot.
531 */
rcu_exp_batches_completed(void)532 unsigned long rcu_exp_batches_completed(void)
533 {
534 return rcu_state.expedited_sequence;
535 }
536 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
537
538 /*
539 * Return the root node of the rcu_state structure.
540 */
rcu_get_root(void)541 static struct rcu_node *rcu_get_root(void)
542 {
543 return &rcu_state.node[0];
544 }
545
546 /*
547 * Send along grace-period-related data for rcutorture diagnostics.
548 */
rcutorture_get_gp_data(int * flags,unsigned long * gp_seq)549 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq)
550 {
551 *flags = READ_ONCE(rcu_state.gp_flags);
552 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
553 }
554 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
555
556 /* Gather grace-period sequence numbers for rcutorture diagnostics. */
rcutorture_gather_gp_seqs(void)557 unsigned long long rcutorture_gather_gp_seqs(void)
558 {
559 return ((READ_ONCE(rcu_state.gp_seq) & 0xffffULL) << 40) |
560 ((READ_ONCE(rcu_state.expedited_sequence) & 0xffffffULL) << 16) |
561 (READ_ONCE(rcu_state.gp_seq_polled) & 0xffffULL);
562 }
563 EXPORT_SYMBOL_GPL(rcutorture_gather_gp_seqs);
564
565 /* Format grace-period sequence numbers for rcutorture diagnostics. */
rcutorture_format_gp_seqs(unsigned long long seqs,char * cp,size_t len)566 void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len)
567 {
568 unsigned int egp = (seqs >> 16) & 0xffffffULL;
569 unsigned int ggp = (seqs >> 40) & 0xffffULL;
570 unsigned int pgp = seqs & 0xffffULL;
571
572 snprintf(cp, len, "g%04x:e%06x:p%04x", ggp, egp, pgp);
573 }
574 EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs);
575
576 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
577 /*
578 * An empty function that will trigger a reschedule on
579 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
580 */
late_wakeup_func(struct irq_work * work)581 static void late_wakeup_func(struct irq_work *work)
582 {
583 }
584
585 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
586 IRQ_WORK_INIT(late_wakeup_func);
587
588 /*
589 * If either:
590 *
591 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
592 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
593 *
594 * In these cases the late RCU wake ups aren't supported in the resched loops and our
595 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
596 * get re-enabled again.
597 */
rcu_irq_work_resched(void)598 noinstr void rcu_irq_work_resched(void)
599 {
600 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
601
602 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
603 return;
604
605 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
606 return;
607
608 instrumentation_begin();
609 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
610 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
611 }
612 instrumentation_end();
613 }
614 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
615
616 #ifdef CONFIG_PROVE_RCU
617 /**
618 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
619 */
rcu_irq_exit_check_preempt(void)620 void rcu_irq_exit_check_preempt(void)
621 {
622 lockdep_assert_irqs_disabled();
623
624 RCU_LOCKDEP_WARN(ct_nesting() <= 0,
625 "RCU nesting counter underflow/zero!");
626 RCU_LOCKDEP_WARN(ct_nmi_nesting() !=
627 CT_NESTING_IRQ_NONIDLE,
628 "Bad RCU nmi_nesting counter\n");
629 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
630 "RCU in extended quiescent state!");
631 }
632 #endif /* #ifdef CONFIG_PROVE_RCU */
633
634 #ifdef CONFIG_NO_HZ_FULL
635 /**
636 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
637 *
638 * The scheduler tick is not normally enabled when CPUs enter the kernel
639 * from nohz_full userspace execution. After all, nohz_full userspace
640 * execution is an RCU quiescent state and the time executing in the kernel
641 * is quite short. Except of course when it isn't. And it is not hard to
642 * cause a large system to spend tens of seconds or even minutes looping
643 * in the kernel, which can cause a number of problems, include RCU CPU
644 * stall warnings.
645 *
646 * Therefore, if a nohz_full CPU fails to report a quiescent state
647 * in a timely manner, the RCU grace-period kthread sets that CPU's
648 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
649 * exception will invoke this function, which will turn on the scheduler
650 * tick, which will enable RCU to detect that CPU's quiescent states,
651 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
652 * The tick will be disabled once a quiescent state is reported for
653 * this CPU.
654 *
655 * Of course, in carefully tuned systems, there might never be an
656 * interrupt or exception. In that case, the RCU grace-period kthread
657 * will eventually cause one to happen. However, in less carefully
658 * controlled environments, this function allows RCU to get what it
659 * needs without creating otherwise useless interruptions.
660 */
__rcu_irq_enter_check_tick(void)661 void __rcu_irq_enter_check_tick(void)
662 {
663 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
664
665 // If we're here from NMI there's nothing to do.
666 if (in_nmi())
667 return;
668
669 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
670 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
671
672 if (!tick_nohz_full_cpu(rdp->cpu) ||
673 !READ_ONCE(rdp->rcu_urgent_qs) ||
674 READ_ONCE(rdp->rcu_forced_tick)) {
675 // RCU doesn't need nohz_full help from this CPU, or it is
676 // already getting that help.
677 return;
678 }
679
680 // We get here only when not in an extended quiescent state and
681 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
682 // already watching and (2) The fact that we are in an interrupt
683 // handler and that the rcu_node lock is an irq-disabled lock
684 // prevents self-deadlock. So we can safely recheck under the lock.
685 // Note that the nohz_full state currently cannot change.
686 raw_spin_lock_rcu_node(rdp->mynode);
687 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
688 // A nohz_full CPU is in the kernel and RCU needs a
689 // quiescent state. Turn on the tick!
690 WRITE_ONCE(rdp->rcu_forced_tick, true);
691 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
692 }
693 raw_spin_unlock_rcu_node(rdp->mynode);
694 }
695 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
696 #endif /* CONFIG_NO_HZ_FULL */
697
698 /*
699 * Check to see if any future non-offloaded RCU-related work will need
700 * to be done by the current CPU, even if none need be done immediately,
701 * returning 1 if so. This function is part of the RCU implementation;
702 * it is -not- an exported member of the RCU API. This is used by
703 * the idle-entry code to figure out whether it is safe to disable the
704 * scheduler-clock interrupt.
705 *
706 * Just check whether or not this CPU has non-offloaded RCU callbacks
707 * queued.
708 */
rcu_needs_cpu(void)709 int rcu_needs_cpu(void)
710 {
711 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
712 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
713 }
714
715 /*
716 * If any sort of urgency was applied to the current CPU (for example,
717 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
718 * to get to a quiescent state, disable it.
719 */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)720 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
721 {
722 raw_lockdep_assert_held_rcu_node(rdp->mynode);
723 WRITE_ONCE(rdp->rcu_urgent_qs, false);
724 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
725 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
726 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
727 WRITE_ONCE(rdp->rcu_forced_tick, false);
728 }
729 }
730
731 /**
732 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
733 *
734 * Return @true if RCU is watching the running CPU and @false otherwise.
735 * An @true return means that this CPU can safely enter RCU read-side
736 * critical sections.
737 *
738 * Although calls to rcu_is_watching() from most parts of the kernel
739 * will return @true, there are important exceptions. For example, if the
740 * current CPU is deep within its idle loop, in kernel entry/exit code,
741 * or offline, rcu_is_watching() will return @false.
742 *
743 * Make notrace because it can be called by the internal functions of
744 * ftrace, and making this notrace removes unnecessary recursion calls.
745 */
rcu_is_watching(void)746 notrace bool rcu_is_watching(void)
747 {
748 bool ret;
749
750 preempt_disable_notrace();
751 ret = rcu_is_watching_curr_cpu();
752 preempt_enable_notrace();
753 return ret;
754 }
755 EXPORT_SYMBOL_GPL(rcu_is_watching);
756
757 /*
758 * If a holdout task is actually running, request an urgent quiescent
759 * state from its CPU. This is unsynchronized, so migrations can cause
760 * the request to go to the wrong CPU. Which is OK, all that will happen
761 * is that the CPU's next context switch will be a bit slower and next
762 * time around this task will generate another request.
763 */
rcu_request_urgent_qs_task(struct task_struct * t)764 void rcu_request_urgent_qs_task(struct task_struct *t)
765 {
766 int cpu;
767
768 barrier();
769 cpu = task_cpu(t);
770 if (!task_curr(t))
771 return; /* This task is not running on that CPU. */
772 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
773 }
774
775 static unsigned long seq_gpwrap_lag = ULONG_MAX / 4;
776
777 /**
778 * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value.
779 * @lag_gps: Set overflow lag to this many grace period worth of counters
780 * which is used by rcutorture to quickly force a gpwrap situation.
781 * @lag_gps = 0 means we reset it back to the boot-time value.
782 */
rcu_set_gpwrap_lag(unsigned long lag_gps)783 void rcu_set_gpwrap_lag(unsigned long lag_gps)
784 {
785 unsigned long lag_seq_count;
786
787 lag_seq_count = (lag_gps == 0)
788 ? ULONG_MAX / 4
789 : lag_gps << RCU_SEQ_CTR_SHIFT;
790 WRITE_ONCE(seq_gpwrap_lag, lag_seq_count);
791 }
792 EXPORT_SYMBOL_GPL(rcu_set_gpwrap_lag);
793
794 /*
795 * When trying to report a quiescent state on behalf of some other CPU,
796 * it is our responsibility to check for and handle potential overflow
797 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
798 * After all, the CPU might be in deep idle state, and thus executing no
799 * code whatsoever.
800 */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)801 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
802 {
803 raw_lockdep_assert_held_rcu_node(rnp);
804 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag,
805 rnp->gp_seq)) {
806 WRITE_ONCE(rdp->gpwrap, true);
807 WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1);
808 }
809 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
810 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
811 }
812
813 /*
814 * Snapshot the specified CPU's RCU_WATCHING counter so that we can later
815 * credit them with an implicit quiescent state. Return 1 if this CPU
816 * is in dynticks idle mode, which is an extended quiescent state.
817 */
rcu_watching_snap_save(struct rcu_data * rdp)818 static int rcu_watching_snap_save(struct rcu_data *rdp)
819 {
820 /*
821 * Full ordering between remote CPU's post idle accesses and updater's
822 * accesses prior to current GP (and also the started GP sequence number)
823 * is enforced by rcu_seq_start() implicit barrier and even further by
824 * smp_mb__after_unlock_lock() barriers chained all the way throughout the
825 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp
826 * locking.
827 *
828 * Ordering between remote CPU's pre idle accesses and post grace period
829 * updater's accesses is enforced by the below acquire semantic.
830 */
831 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
832 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) {
833 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
834 rcu_gpnum_ovf(rdp->mynode, rdp);
835 return 1;
836 }
837 return 0;
838 }
839
840 #ifndef arch_irq_stat_cpu
841 #define arch_irq_stat_cpu(cpu) 0
842 #endif
843
844 /*
845 * Returns positive if the specified CPU has passed through a quiescent state
846 * by virtue of being in or having passed through an dynticks idle state since
847 * the last call to rcu_watching_snap_save() for this same CPU, or by
848 * virtue of having been offline.
849 *
850 * Returns negative if the specified CPU needs a force resched.
851 *
852 * Returns zero otherwise.
853 */
rcu_watching_snap_recheck(struct rcu_data * rdp)854 static int rcu_watching_snap_recheck(struct rcu_data *rdp)
855 {
856 unsigned long jtsq;
857 int ret = 0;
858 struct rcu_node *rnp = rdp->mynode;
859
860 /*
861 * If the CPU passed through or entered a dynticks idle phase with
862 * no active irq/NMI handlers, then we can safely pretend that the CPU
863 * already acknowledged the request to pass through a quiescent
864 * state. Either way, that CPU cannot possibly be in an RCU
865 * read-side critical section that started before the beginning
866 * of the current RCU grace period.
867 */
868 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) {
869 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
870 rcu_gpnum_ovf(rnp, rdp);
871 return 1;
872 }
873
874 /*
875 * Complain if a CPU that is considered to be offline from RCU's
876 * perspective has not yet reported a quiescent state. After all,
877 * the offline CPU should have reported a quiescent state during
878 * the CPU-offline process, or, failing that, by rcu_gp_init()
879 * if it ran concurrently with either the CPU going offline or the
880 * last task on a leaf rcu_node structure exiting its RCU read-side
881 * critical section while all CPUs corresponding to that structure
882 * are offline. This added warning detects bugs in any of these
883 * code paths.
884 *
885 * The rcu_node structure's ->lock is held here, which excludes
886 * the relevant portions the CPU-hotplug code, the grace-period
887 * initialization code, and the rcu_read_unlock() code paths.
888 *
889 * For more detail, please refer to the "Hotplug CPU" section
890 * of RCU's Requirements documentation.
891 */
892 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
893 struct rcu_node *rnp1;
894
895 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
896 __func__, rnp->grplo, rnp->grphi, rnp->level,
897 (long)rnp->gp_seq, (long)rnp->completedqs);
898 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
899 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
900 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
901 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
902 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
903 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
904 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
905 return 1; /* Break things loose after complaining. */
906 }
907
908 /*
909 * A CPU running for an extended time within the kernel can
910 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
911 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
912 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
913 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
914 * variable are safe because the assignments are repeated if this
915 * CPU failed to pass through a quiescent state. This code
916 * also checks .jiffies_resched in case jiffies_to_sched_qs
917 * is set way high.
918 */
919 jtsq = READ_ONCE(jiffies_to_sched_qs);
920 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
921 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
922 time_after(jiffies, rcu_state.jiffies_resched) ||
923 rcu_state.cbovld)) {
924 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
925 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
926 smp_store_release(&rdp->rcu_urgent_qs, true);
927 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
928 WRITE_ONCE(rdp->rcu_urgent_qs, true);
929 }
930
931 /*
932 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
933 * The above code handles this, but only for straight cond_resched().
934 * And some in-kernel loops check need_resched() before calling
935 * cond_resched(), which defeats the above code for CPUs that are
936 * running in-kernel with scheduling-clock interrupts disabled.
937 * So hit them over the head with the resched_cpu() hammer!
938 */
939 if (tick_nohz_full_cpu(rdp->cpu) &&
940 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
941 rcu_state.cbovld)) {
942 WRITE_ONCE(rdp->rcu_urgent_qs, true);
943 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
944 ret = -1;
945 }
946
947 /*
948 * If more than halfway to RCU CPU stall-warning time, invoke
949 * resched_cpu() more frequently to try to loosen things up a bit.
950 * Also check to see if the CPU is getting hammered with interrupts,
951 * but only once per grace period, just to keep the IPIs down to
952 * a dull roar.
953 */
954 if (time_after(jiffies, rcu_state.jiffies_resched)) {
955 if (time_after(jiffies,
956 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
957 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
958 ret = -1;
959 }
960 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
961 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
962 (rnp->ffmask & rdp->grpmask)) {
963 rdp->rcu_iw_pending = true;
964 rdp->rcu_iw_gp_seq = rnp->gp_seq;
965 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
966 }
967
968 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
969 int cpu = rdp->cpu;
970 struct rcu_snap_record *rsrp;
971 struct kernel_cpustat *kcsp;
972
973 kcsp = &kcpustat_cpu(cpu);
974
975 rsrp = &rdp->snap_record;
976 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
977 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
978 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
979 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu);
980 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu);
981 rsrp->nr_csw = nr_context_switches_cpu(cpu);
982 rsrp->jiffies = jiffies;
983 rsrp->gp_seq = rdp->gp_seq;
984 }
985 }
986
987 return ret;
988 }
989
990 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)991 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
992 unsigned long gp_seq_req, const char *s)
993 {
994 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
995 gp_seq_req, rnp->level,
996 rnp->grplo, rnp->grphi, s);
997 }
998
999 /*
1000 * rcu_start_this_gp - Request the start of a particular grace period
1001 * @rnp_start: The leaf node of the CPU from which to start.
1002 * @rdp: The rcu_data corresponding to the CPU from which to start.
1003 * @gp_seq_req: The gp_seq of the grace period to start.
1004 *
1005 * Start the specified grace period, as needed to handle newly arrived
1006 * callbacks. The required future grace periods are recorded in each
1007 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1008 * is reason to awaken the grace-period kthread.
1009 *
1010 * The caller must hold the specified rcu_node structure's ->lock, which
1011 * is why the caller is responsible for waking the grace-period kthread.
1012 *
1013 * Returns true if the GP thread needs to be awakened else false.
1014 */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1015 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1016 unsigned long gp_seq_req)
1017 {
1018 bool ret = false;
1019 struct rcu_node *rnp;
1020
1021 /*
1022 * Use funnel locking to either acquire the root rcu_node
1023 * structure's lock or bail out if the need for this grace period
1024 * has already been recorded -- or if that grace period has in
1025 * fact already started. If there is already a grace period in
1026 * progress in a non-leaf node, no recording is needed because the
1027 * end of the grace period will scan the leaf rcu_node structures.
1028 * Note that rnp_start->lock must not be released.
1029 */
1030 raw_lockdep_assert_held_rcu_node(rnp_start);
1031 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1032 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1033 if (rnp != rnp_start)
1034 raw_spin_lock_rcu_node(rnp);
1035 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1036 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1037 (rnp != rnp_start &&
1038 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1039 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1040 TPS("Prestarted"));
1041 goto unlock_out;
1042 }
1043 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1044 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1045 /*
1046 * We just marked the leaf or internal node, and a
1047 * grace period is in progress, which means that
1048 * rcu_gp_cleanup() will see the marking. Bail to
1049 * reduce contention.
1050 */
1051 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1052 TPS("Startedleaf"));
1053 goto unlock_out;
1054 }
1055 if (rnp != rnp_start && rnp->parent != NULL)
1056 raw_spin_unlock_rcu_node(rnp);
1057 if (!rnp->parent)
1058 break; /* At root, and perhaps also leaf. */
1059 }
1060
1061 /* If GP already in progress, just leave, otherwise start one. */
1062 if (rcu_gp_in_progress()) {
1063 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1064 goto unlock_out;
1065 }
1066 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1067 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1068 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1069 if (!READ_ONCE(rcu_state.gp_kthread)) {
1070 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1071 goto unlock_out;
1072 }
1073 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1074 ret = true; /* Caller must wake GP kthread. */
1075 unlock_out:
1076 /* Push furthest requested GP to leaf node and rcu_data structure. */
1077 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1078 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1079 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1080 }
1081 if (rnp != rnp_start)
1082 raw_spin_unlock_rcu_node(rnp);
1083 return ret;
1084 }
1085
1086 /*
1087 * Clean up any old requests for the just-ended grace period. Also return
1088 * whether any additional grace periods have been requested.
1089 */
rcu_future_gp_cleanup(struct rcu_node * rnp)1090 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1091 {
1092 bool needmore;
1093 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1094
1095 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1096 if (!needmore)
1097 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1098 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1099 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1100 return needmore;
1101 }
1102
1103 /*
1104 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1105 * interrupt or softirq handler, in which case we just might immediately
1106 * sleep upon return, resulting in a grace-period hang), and don't bother
1107 * awakening when there is nothing for the grace-period kthread to do
1108 * (as in several CPUs raced to awaken, we lost), and finally don't try
1109 * to awaken a kthread that has not yet been created. If all those checks
1110 * are passed, track some debug information and awaken.
1111 *
1112 * So why do the self-wakeup when in an interrupt or softirq handler
1113 * in the grace-period kthread's context? Because the kthread might have
1114 * been interrupted just as it was going to sleep, and just after the final
1115 * pre-sleep check of the awaken condition. In this case, a wakeup really
1116 * is required, and is therefore supplied.
1117 */
rcu_gp_kthread_wake(void)1118 static void rcu_gp_kthread_wake(void)
1119 {
1120 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1121
1122 if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1123 !READ_ONCE(rcu_state.gp_flags) || !t)
1124 return;
1125 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1126 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1127 swake_up_one(&rcu_state.gp_wq);
1128 }
1129
1130 /*
1131 * If there is room, assign a ->gp_seq number to any callbacks on this
1132 * CPU that have not already been assigned. Also accelerate any callbacks
1133 * that were previously assigned a ->gp_seq number that has since proven
1134 * to be too conservative, which can happen if callbacks get assigned a
1135 * ->gp_seq number while RCU is idle, but with reference to a non-root
1136 * rcu_node structure. This function is idempotent, so it does not hurt
1137 * to call it repeatedly. Returns an flag saying that we should awaken
1138 * the RCU grace-period kthread.
1139 *
1140 * The caller must hold rnp->lock with interrupts disabled.
1141 */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1142 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1143 {
1144 unsigned long gp_seq_req;
1145 bool ret = false;
1146
1147 rcu_lockdep_assert_cblist_protected(rdp);
1148 raw_lockdep_assert_held_rcu_node(rnp);
1149
1150 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1151 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1152 return false;
1153
1154 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1155
1156 /*
1157 * Callbacks are often registered with incomplete grace-period
1158 * information. Something about the fact that getting exact
1159 * information requires acquiring a global lock... RCU therefore
1160 * makes a conservative estimate of the grace period number at which
1161 * a given callback will become ready to invoke. The following
1162 * code checks this estimate and improves it when possible, thus
1163 * accelerating callback invocation to an earlier grace-period
1164 * number.
1165 */
1166 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1167 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1168 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1169
1170 /* Trace depending on how much we were able to accelerate. */
1171 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1172 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1173 else
1174 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1175
1176 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1177
1178 return ret;
1179 }
1180
1181 /*
1182 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1183 * rcu_node structure's ->lock be held. It consults the cached value
1184 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1185 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1186 * while holding the leaf rcu_node structure's ->lock.
1187 */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1188 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1189 struct rcu_data *rdp)
1190 {
1191 unsigned long c;
1192 bool needwake;
1193
1194 rcu_lockdep_assert_cblist_protected(rdp);
1195 c = rcu_seq_snap(&rcu_state.gp_seq);
1196 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1197 /* Old request still live, so mark recent callbacks. */
1198 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1199 return;
1200 }
1201 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1202 needwake = rcu_accelerate_cbs(rnp, rdp);
1203 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1204 if (needwake)
1205 rcu_gp_kthread_wake();
1206 }
1207
1208 /*
1209 * Move any callbacks whose grace period has completed to the
1210 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1211 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1212 * sublist. This function is idempotent, so it does not hurt to
1213 * invoke it repeatedly. As long as it is not invoked -too- often...
1214 * Returns true if the RCU grace-period kthread needs to be awakened.
1215 *
1216 * The caller must hold rnp->lock with interrupts disabled.
1217 */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1218 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1219 {
1220 rcu_lockdep_assert_cblist_protected(rdp);
1221 raw_lockdep_assert_held_rcu_node(rnp);
1222
1223 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1224 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1225 return false;
1226
1227 /*
1228 * Find all callbacks whose ->gp_seq numbers indicate that they
1229 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1230 */
1231 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1232
1233 /* Classify any remaining callbacks. */
1234 return rcu_accelerate_cbs(rnp, rdp);
1235 }
1236
1237 /*
1238 * Move and classify callbacks, but only if doing so won't require
1239 * that the RCU grace-period kthread be awakened.
1240 */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1241 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1242 struct rcu_data *rdp)
1243 {
1244 rcu_lockdep_assert_cblist_protected(rdp);
1245 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1246 return;
1247 // The grace period cannot end while we hold the rcu_node lock.
1248 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1249 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1250 raw_spin_unlock_rcu_node(rnp);
1251 }
1252
1253 /*
1254 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1255 * quiescent state. This is intended to be invoked when the CPU notices
1256 * a new grace period.
1257 */
rcu_strict_gp_check_qs(void)1258 static void rcu_strict_gp_check_qs(void)
1259 {
1260 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1261 rcu_read_lock();
1262 rcu_read_unlock();
1263 }
1264 }
1265
1266 /*
1267 * Update CPU-local rcu_data state to record the beginnings and ends of
1268 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1269 * structure corresponding to the current CPU, and must have irqs disabled.
1270 * Returns true if the grace-period kthread needs to be awakened.
1271 */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1272 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1273 {
1274 bool ret = false;
1275 bool need_qs;
1276 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1277
1278 raw_lockdep_assert_held_rcu_node(rnp);
1279
1280 if (rdp->gp_seq == rnp->gp_seq)
1281 return false; /* Nothing to do. */
1282
1283 /* Handle the ends of any preceding grace periods first. */
1284 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1285 unlikely(rdp->gpwrap)) {
1286 if (!offloaded)
1287 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1288 rdp->core_needs_qs = false;
1289 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1290 } else {
1291 if (!offloaded)
1292 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1293 if (rdp->core_needs_qs)
1294 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1295 }
1296
1297 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1298 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1299 unlikely(rdp->gpwrap)) {
1300 /*
1301 * If the current grace period is waiting for this CPU,
1302 * set up to detect a quiescent state, otherwise don't
1303 * go looking for one.
1304 */
1305 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1306 need_qs = !!(rnp->qsmask & rdp->grpmask);
1307 rdp->cpu_no_qs.b.norm = need_qs;
1308 rdp->core_needs_qs = need_qs;
1309 zero_cpu_stall_ticks(rdp);
1310 }
1311 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1312 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1313 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1314 if (IS_ENABLED(CONFIG_PROVE_RCU) && rdp->gpwrap)
1315 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1316 WRITE_ONCE(rdp->gpwrap, false);
1317 rcu_gpnum_ovf(rnp, rdp);
1318 return ret;
1319 }
1320
note_gp_changes(struct rcu_data * rdp)1321 static void note_gp_changes(struct rcu_data *rdp)
1322 {
1323 unsigned long flags;
1324 bool needwake;
1325 struct rcu_node *rnp;
1326
1327 local_irq_save(flags);
1328 rnp = rdp->mynode;
1329 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1330 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1331 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1332 local_irq_restore(flags);
1333 return;
1334 }
1335 needwake = __note_gp_changes(rnp, rdp);
1336 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1337 rcu_strict_gp_check_qs();
1338 if (needwake)
1339 rcu_gp_kthread_wake();
1340 }
1341
1342 static atomic_t *rcu_gp_slow_suppress;
1343
1344 /* Register a counter to suppress debugging grace-period delays. */
rcu_gp_slow_register(atomic_t * rgssp)1345 void rcu_gp_slow_register(atomic_t *rgssp)
1346 {
1347 WARN_ON_ONCE(rcu_gp_slow_suppress);
1348
1349 WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1350 }
1351 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1352
1353 /* Unregister a counter, with NULL for not caring which. */
rcu_gp_slow_unregister(atomic_t * rgssp)1354 void rcu_gp_slow_unregister(atomic_t *rgssp)
1355 {
1356 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
1357
1358 WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1359 }
1360 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1361
rcu_gp_slow_is_suppressed(void)1362 static bool rcu_gp_slow_is_suppressed(void)
1363 {
1364 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1365
1366 return rgssp && atomic_read(rgssp);
1367 }
1368
rcu_gp_slow(int delay)1369 static void rcu_gp_slow(int delay)
1370 {
1371 if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1372 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1373 schedule_timeout_idle(delay);
1374 }
1375
1376 static unsigned long sleep_duration;
1377
1378 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1379 void rcu_gp_set_torture_wait(int duration)
1380 {
1381 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1382 WRITE_ONCE(sleep_duration, duration);
1383 }
1384 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1385
1386 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1387 static void rcu_gp_torture_wait(void)
1388 {
1389 unsigned long duration;
1390
1391 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1392 return;
1393 duration = xchg(&sleep_duration, 0UL);
1394 if (duration > 0) {
1395 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1396 schedule_timeout_idle(duration);
1397 pr_alert("%s: Wait complete\n", __func__);
1398 }
1399 }
1400
1401 /*
1402 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1403 * processing.
1404 */
rcu_strict_gp_boundary(void * unused)1405 static void rcu_strict_gp_boundary(void *unused)
1406 {
1407 invoke_rcu_core();
1408 }
1409
1410 // Make the polled API aware of the beginning of a grace period.
rcu_poll_gp_seq_start(unsigned long * snap)1411 static void rcu_poll_gp_seq_start(unsigned long *snap)
1412 {
1413 struct rcu_node *rnp = rcu_get_root();
1414
1415 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1416 raw_lockdep_assert_held_rcu_node(rnp);
1417
1418 // If RCU was idle, note beginning of GP.
1419 if (!rcu_seq_state(rcu_state.gp_seq_polled))
1420 rcu_seq_start(&rcu_state.gp_seq_polled);
1421
1422 // Either way, record current state.
1423 *snap = rcu_state.gp_seq_polled;
1424 }
1425
1426 // Make the polled API aware of the end of a grace period.
rcu_poll_gp_seq_end(unsigned long * snap)1427 static void rcu_poll_gp_seq_end(unsigned long *snap)
1428 {
1429 struct rcu_node *rnp = rcu_get_root();
1430
1431 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1432 raw_lockdep_assert_held_rcu_node(rnp);
1433
1434 // If the previously noted GP is still in effect, record the
1435 // end of that GP. Either way, zero counter to avoid counter-wrap
1436 // problems.
1437 if (*snap && *snap == rcu_state.gp_seq_polled) {
1438 rcu_seq_end(&rcu_state.gp_seq_polled);
1439 rcu_state.gp_seq_polled_snap = 0;
1440 rcu_state.gp_seq_polled_exp_snap = 0;
1441 } else {
1442 *snap = 0;
1443 }
1444 }
1445
1446 // Make the polled API aware of the beginning of a grace period, but
1447 // where caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_start_unlocked(unsigned long * snap)1448 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1449 {
1450 unsigned long flags;
1451 struct rcu_node *rnp = rcu_get_root();
1452
1453 if (rcu_init_invoked()) {
1454 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1455 lockdep_assert_irqs_enabled();
1456 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1457 }
1458 rcu_poll_gp_seq_start(snap);
1459 if (rcu_init_invoked())
1460 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1461 }
1462
1463 // Make the polled API aware of the end of a grace period, but where
1464 // caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_end_unlocked(unsigned long * snap)1465 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1466 {
1467 unsigned long flags;
1468 struct rcu_node *rnp = rcu_get_root();
1469
1470 if (rcu_init_invoked()) {
1471 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1472 lockdep_assert_irqs_enabled();
1473 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1474 }
1475 rcu_poll_gp_seq_end(snap);
1476 if (rcu_init_invoked())
1477 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1478 }
1479
1480 /*
1481 * There is a single llist, which is used for handling
1482 * synchronize_rcu() users' enqueued rcu_synchronize nodes.
1483 * Within this llist, there are two tail pointers:
1484 *
1485 * wait tail: Tracks the set of nodes, which need to
1486 * wait for the current GP to complete.
1487 * done tail: Tracks the set of nodes, for which grace
1488 * period has elapsed. These nodes processing
1489 * will be done as part of the cleanup work
1490 * execution by a kworker.
1491 *
1492 * At every grace period init, a new wait node is added
1493 * to the llist. This wait node is used as wait tail
1494 * for this new grace period. Given that there are a fixed
1495 * number of wait nodes, if all wait nodes are in use
1496 * (which can happen when kworker callback processing
1497 * is delayed) and additional grace period is requested.
1498 * This means, a system is slow in processing callbacks.
1499 *
1500 * TODO: If a slow processing is detected, a first node
1501 * in the llist should be used as a wait-tail for this
1502 * grace period, therefore users which should wait due
1503 * to a slow process are handled by _this_ grace period
1504 * and not next.
1505 *
1506 * Below is an illustration of how the done and wait
1507 * tail pointers move from one set of rcu_synchronize nodes
1508 * to the other, as grace periods start and finish and
1509 * nodes are processed by kworker.
1510 *
1511 *
1512 * a. Initial llist callbacks list:
1513 *
1514 * +----------+ +--------+ +-------+
1515 * | | | | | |
1516 * | head |---------> | cb2 |--------->| cb1 |
1517 * | | | | | |
1518 * +----------+ +--------+ +-------+
1519 *
1520 *
1521 *
1522 * b. New GP1 Start:
1523 *
1524 * WAIT TAIL
1525 * |
1526 * |
1527 * v
1528 * +----------+ +--------+ +--------+ +-------+
1529 * | | | | | | | |
1530 * | head ------> wait |------> cb2 |------> | cb1 |
1531 * | | | head1 | | | | |
1532 * +----------+ +--------+ +--------+ +-------+
1533 *
1534 *
1535 *
1536 * c. GP completion:
1537 *
1538 * WAIT_TAIL == DONE_TAIL
1539 *
1540 * DONE TAIL
1541 * |
1542 * |
1543 * v
1544 * +----------+ +--------+ +--------+ +-------+
1545 * | | | | | | | |
1546 * | head ------> wait |------> cb2 |------> | cb1 |
1547 * | | | head1 | | | | |
1548 * +----------+ +--------+ +--------+ +-------+
1549 *
1550 *
1551 *
1552 * d. New callbacks and GP2 start:
1553 *
1554 * WAIT TAIL DONE TAIL
1555 * | |
1556 * | |
1557 * v v
1558 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1559 * | | | | | | | | | | | | | |
1560 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1561 * | | | head2| | | | | |head1| | | | |
1562 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1563 *
1564 *
1565 *
1566 * e. GP2 completion:
1567 *
1568 * WAIT_TAIL == DONE_TAIL
1569 * DONE TAIL
1570 * |
1571 * |
1572 * v
1573 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1574 * | | | | | | | | | | | | | |
1575 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1576 * | | | head2| | | | | |head1| | | | |
1577 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1578 *
1579 *
1580 * While the llist state transitions from d to e, a kworker
1581 * can start executing rcu_sr_normal_gp_cleanup_work() and
1582 * can observe either the old done tail (@c) or the new
1583 * done tail (@e). So, done tail updates and reads need
1584 * to use the rel-acq semantics. If the concurrent kworker
1585 * observes the old done tail, the newly queued work
1586 * execution will process the updated done tail. If the
1587 * concurrent kworker observes the new done tail, then
1588 * the newly queued work will skip processing the done
1589 * tail, as workqueue semantics guarantees that the new
1590 * work is executed only after the previous one completes.
1591 *
1592 * f. kworker callbacks processing complete:
1593 *
1594 *
1595 * DONE TAIL
1596 * |
1597 * |
1598 * v
1599 * +----------+ +--------+
1600 * | | | |
1601 * | head ------> wait |
1602 * | | | head2 |
1603 * +----------+ +--------+
1604 *
1605 */
rcu_sr_is_wait_head(struct llist_node * node)1606 static bool rcu_sr_is_wait_head(struct llist_node *node)
1607 {
1608 return &(rcu_state.srs_wait_nodes)[0].node <= node &&
1609 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
1610 }
1611
rcu_sr_get_wait_head(void)1612 static struct llist_node *rcu_sr_get_wait_head(void)
1613 {
1614 struct sr_wait_node *sr_wn;
1615 int i;
1616
1617 for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) {
1618 sr_wn = &(rcu_state.srs_wait_nodes)[i];
1619
1620 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1))
1621 return &sr_wn->node;
1622 }
1623
1624 return NULL;
1625 }
1626
rcu_sr_put_wait_head(struct llist_node * node)1627 static void rcu_sr_put_wait_head(struct llist_node *node)
1628 {
1629 struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node);
1630
1631 atomic_set_release(&sr_wn->inuse, 0);
1632 }
1633
1634 /* Enable rcu_normal_wake_from_gp automatically on small systems. */
1635 #define WAKE_FROM_GP_CPU_THRESHOLD 16
1636
1637 static int rcu_normal_wake_from_gp = -1;
1638 module_param(rcu_normal_wake_from_gp, int, 0644);
1639 static struct workqueue_struct *sync_wq;
1640
rcu_sr_normal_complete(struct llist_node * node)1641 static void rcu_sr_normal_complete(struct llist_node *node)
1642 {
1643 struct rcu_synchronize *rs = container_of(
1644 (struct rcu_head *) node, struct rcu_synchronize, head);
1645
1646 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
1647 !poll_state_synchronize_rcu_full(&rs->oldstate),
1648 "A full grace period is not passed yet!\n");
1649
1650 /* Finally. */
1651 complete(&rs->completion);
1652 }
1653
rcu_sr_normal_gp_cleanup_work(struct work_struct * work)1654 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
1655 {
1656 struct llist_node *done, *rcu, *next, *head;
1657
1658 /*
1659 * This work execution can potentially execute
1660 * while a new done tail is being updated by
1661 * grace period kthread in rcu_sr_normal_gp_cleanup().
1662 * So, read and updates of done tail need to
1663 * follow acq-rel semantics.
1664 *
1665 * Given that wq semantics guarantees that a single work
1666 * cannot execute concurrently by multiple kworkers,
1667 * the done tail list manipulations are protected here.
1668 */
1669 done = smp_load_acquire(&rcu_state.srs_done_tail);
1670 if (WARN_ON_ONCE(!done))
1671 return;
1672
1673 WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
1674 head = done->next;
1675 done->next = NULL;
1676
1677 /*
1678 * The dummy node, which is pointed to by the
1679 * done tail which is acq-read above is not removed
1680 * here. This allows lockless additions of new
1681 * rcu_synchronize nodes in rcu_sr_normal_add_req(),
1682 * while the cleanup work executes. The dummy
1683 * nodes is removed, in next round of cleanup
1684 * work execution.
1685 */
1686 llist_for_each_safe(rcu, next, head) {
1687 if (!rcu_sr_is_wait_head(rcu)) {
1688 rcu_sr_normal_complete(rcu);
1689 continue;
1690 }
1691
1692 rcu_sr_put_wait_head(rcu);
1693 }
1694
1695 /* Order list manipulations with atomic access. */
1696 atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
1697 }
1698
1699 /*
1700 * Helper function for rcu_gp_cleanup().
1701 */
rcu_sr_normal_gp_cleanup(void)1702 static void rcu_sr_normal_gp_cleanup(void)
1703 {
1704 struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
1705 int done = 0;
1706
1707 wait_tail = rcu_state.srs_wait_tail;
1708 if (wait_tail == NULL)
1709 return;
1710
1711 rcu_state.srs_wait_tail = NULL;
1712 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1713 WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail));
1714
1715 /*
1716 * Process (a) and (d) cases. See an illustration.
1717 */
1718 llist_for_each_safe(rcu, next, wait_tail->next) {
1719 if (rcu_sr_is_wait_head(rcu))
1720 break;
1721
1722 rcu_sr_normal_complete(rcu);
1723 // It can be last, update a next on this step.
1724 wait_tail->next = next;
1725
1726 if (++done == SR_MAX_USERS_WAKE_FROM_GP)
1727 break;
1728 }
1729
1730 /*
1731 * Fast path, no more users to process except putting the second last
1732 * wait head if no inflight-workers. If there are in-flight workers,
1733 * they will remove the last wait head.
1734 *
1735 * Note that the ACQUIRE orders atomic access with list manipulation.
1736 */
1737 if (wait_tail->next && wait_tail->next->next == NULL &&
1738 rcu_sr_is_wait_head(wait_tail->next) &&
1739 !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
1740 rcu_sr_put_wait_head(wait_tail->next);
1741 wait_tail->next = NULL;
1742 }
1743
1744 /* Concurrent sr_normal_gp_cleanup work might observe this update. */
1745 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
1746 smp_store_release(&rcu_state.srs_done_tail, wait_tail);
1747
1748 /*
1749 * We schedule a work in order to perform a final processing
1750 * of outstanding users(if still left) and releasing wait-heads
1751 * added by rcu_sr_normal_gp_init() call.
1752 */
1753 if (wait_tail->next) {
1754 atomic_inc(&rcu_state.srs_cleanups_pending);
1755 if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
1756 atomic_dec(&rcu_state.srs_cleanups_pending);
1757 }
1758 }
1759
1760 /*
1761 * Helper function for rcu_gp_init().
1762 */
rcu_sr_normal_gp_init(void)1763 static bool rcu_sr_normal_gp_init(void)
1764 {
1765 struct llist_node *first;
1766 struct llist_node *wait_head;
1767 bool start_new_poll = false;
1768
1769 first = READ_ONCE(rcu_state.srs_next.first);
1770 if (!first || rcu_sr_is_wait_head(first))
1771 return start_new_poll;
1772
1773 wait_head = rcu_sr_get_wait_head();
1774 if (!wait_head) {
1775 // Kick another GP to retry.
1776 start_new_poll = true;
1777 return start_new_poll;
1778 }
1779
1780 /* Inject a wait-dummy-node. */
1781 llist_add(wait_head, &rcu_state.srs_next);
1782
1783 /*
1784 * A waiting list of rcu_synchronize nodes should be empty on
1785 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(),
1786 * rolls it over. If not, it is a BUG, warn a user.
1787 */
1788 WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL);
1789 rcu_state.srs_wait_tail = wait_head;
1790 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1791
1792 return start_new_poll;
1793 }
1794
rcu_sr_normal_add_req(struct rcu_synchronize * rs)1795 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs)
1796 {
1797 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
1798 }
1799
1800 /*
1801 * Initialize a new grace period. Return false if no grace period required.
1802 */
rcu_gp_init(void)1803 static noinline_for_stack bool rcu_gp_init(void)
1804 {
1805 unsigned long flags;
1806 unsigned long oldmask;
1807 unsigned long mask;
1808 struct rcu_data *rdp;
1809 struct rcu_node *rnp = rcu_get_root();
1810 bool start_new_poll;
1811 unsigned long old_gp_seq;
1812
1813 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1814 raw_spin_lock_irq_rcu_node(rnp);
1815 if (!rcu_state.gp_flags) {
1816 /* Spurious wakeup, tell caller to go back to sleep. */
1817 raw_spin_unlock_irq_rcu_node(rnp);
1818 return false;
1819 }
1820 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1821
1822 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1823 /*
1824 * Grace period already in progress, don't start another.
1825 * Not supposed to be able to happen.
1826 */
1827 raw_spin_unlock_irq_rcu_node(rnp);
1828 return false;
1829 }
1830
1831 /* Advance to a new grace period and initialize state. */
1832 record_gp_stall_check_time();
1833 /*
1834 * A new wait segment must be started before gp_seq advanced, so
1835 * that previous gp waiters won't observe the new gp_seq.
1836 */
1837 start_new_poll = rcu_sr_normal_gp_init();
1838 /* Record GP times before starting GP, hence rcu_seq_start(). */
1839 old_gp_seq = rcu_state.gp_seq;
1840 /*
1841 * Critical ordering: rcu_seq_start() must happen BEFORE the CPU hotplug
1842 * scan below. Otherwise we risk a race where a newly onlining CPU could
1843 * be missed by the current grace period, potentially leading to
1844 * use-after-free errors. For a detailed explanation of this race, see
1845 * Documentation/RCU/Design/Requirements/Requirements.rst in the
1846 * "Hotplug CPU" section.
1847 *
1848 * Also note that the root rnp's gp_seq is kept separate from, and lags,
1849 * the rcu_state's gp_seq, for a reason. See the Quick-Quiz on
1850 * Single-node systems for more details (in Data-Structures.rst).
1851 */
1852 rcu_seq_start(&rcu_state.gp_seq);
1853 /* Ensure that rcu_seq_done_exact() guardband doesn't give false positives. */
1854 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
1855 rcu_seq_done_exact(&old_gp_seq, rcu_seq_snap(&rcu_state.gp_seq)));
1856
1857 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1858 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1859 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1860 raw_spin_unlock_irq_rcu_node(rnp);
1861
1862 /*
1863 * The "start_new_poll" is set to true, only when this GP is not able
1864 * to handle anything and there are outstanding users. It happens when
1865 * the rcu_sr_normal_gp_init() function was not able to insert a dummy
1866 * separator to the llist, because there were no left any dummy-nodes.
1867 *
1868 * Number of dummy-nodes is fixed, it could be that we are run out of
1869 * them, if so we start a new pool request to repeat a try. It is rare
1870 * and it means that a system is doing a slow processing of callbacks.
1871 */
1872 if (start_new_poll)
1873 (void) start_poll_synchronize_rcu();
1874
1875 /*
1876 * Apply per-leaf buffered online and offline operations to
1877 * the rcu_node tree. Note that this new grace period need not
1878 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1879 * offlining path, when combined with checks in this function,
1880 * will handle CPUs that are currently going offline or that will
1881 * go offline later. Please also refer to "Hotplug CPU" section
1882 * of RCU's Requirements documentation.
1883 */
1884 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1885 /* Exclude CPU hotplug operations. */
1886 rcu_for_each_leaf_node(rnp) {
1887 local_irq_disable();
1888 /*
1889 * Serialize with CPU offline. See Requirements.rst > Hotplug CPU >
1890 * Concurrent Quiescent State Reporting for Offline CPUs.
1891 */
1892 arch_spin_lock(&rcu_state.ofl_lock);
1893 raw_spin_lock_rcu_node(rnp);
1894 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1895 !rnp->wait_blkd_tasks) {
1896 /* Nothing to do on this leaf rcu_node structure. */
1897 raw_spin_unlock_rcu_node(rnp);
1898 arch_spin_unlock(&rcu_state.ofl_lock);
1899 local_irq_enable();
1900 continue;
1901 }
1902
1903 /* Record old state, apply changes to ->qsmaskinit field. */
1904 oldmask = rnp->qsmaskinit;
1905 rnp->qsmaskinit = rnp->qsmaskinitnext;
1906
1907 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1908 if (!oldmask != !rnp->qsmaskinit) {
1909 if (!oldmask) { /* First online CPU for rcu_node. */
1910 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1911 rcu_init_new_rnp(rnp);
1912 } else if (rcu_preempt_has_tasks(rnp)) {
1913 rnp->wait_blkd_tasks = true; /* blocked tasks */
1914 } else { /* Last offline CPU and can propagate. */
1915 rcu_cleanup_dead_rnp(rnp);
1916 }
1917 }
1918
1919 /*
1920 * If all waited-on tasks from prior grace period are
1921 * done, and if all this rcu_node structure's CPUs are
1922 * still offline, propagate up the rcu_node tree and
1923 * clear ->wait_blkd_tasks. Otherwise, if one of this
1924 * rcu_node structure's CPUs has since come back online,
1925 * simply clear ->wait_blkd_tasks.
1926 */
1927 if (rnp->wait_blkd_tasks &&
1928 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1929 rnp->wait_blkd_tasks = false;
1930 if (!rnp->qsmaskinit)
1931 rcu_cleanup_dead_rnp(rnp);
1932 }
1933
1934 raw_spin_unlock_rcu_node(rnp);
1935 arch_spin_unlock(&rcu_state.ofl_lock);
1936 local_irq_enable();
1937 }
1938 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1939
1940 /*
1941 * Set the quiescent-state-needed bits in all the rcu_node
1942 * structures for all currently online CPUs in breadth-first
1943 * order, starting from the root rcu_node structure, relying on the
1944 * layout of the tree within the rcu_state.node[] array. Note that
1945 * other CPUs will access only the leaves of the hierarchy, thus
1946 * seeing that no grace period is in progress, at least until the
1947 * corresponding leaf node has been initialized.
1948 *
1949 * The grace period cannot complete until the initialization
1950 * process finishes, because this kthread handles both.
1951 */
1952 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1953 rcu_for_each_node_breadth_first(rnp) {
1954 rcu_gp_slow(gp_init_delay);
1955 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1956 rdp = this_cpu_ptr(&rcu_data);
1957 rcu_preempt_check_blocked_tasks(rnp);
1958 rnp->qsmask = rnp->qsmaskinit;
1959 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1960 if (rnp == rdp->mynode)
1961 (void)__note_gp_changes(rnp, rdp);
1962 rcu_preempt_boost_start_gp(rnp);
1963 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1964 rnp->level, rnp->grplo,
1965 rnp->grphi, rnp->qsmask);
1966 /*
1967 * Quiescent states for tasks on any now-offline CPUs. Since we
1968 * released the ofl and rnp lock before this loop, CPUs might
1969 * have gone offline and we have to report QS on their behalf.
1970 * See Requirements.rst > Hotplug CPU > Concurrent QS Reporting.
1971 */
1972 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1973 rnp->rcu_gp_init_mask = mask;
1974 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1975 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1976 else
1977 raw_spin_unlock_irq_rcu_node(rnp);
1978 cond_resched_tasks_rcu_qs();
1979 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1980 }
1981
1982 // If strict, make all CPUs aware of new grace period.
1983 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1984 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1985
1986 return true;
1987 }
1988
1989 /*
1990 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1991 * time.
1992 */
rcu_gp_fqs_check_wake(int * gfp)1993 static bool rcu_gp_fqs_check_wake(int *gfp)
1994 {
1995 struct rcu_node *rnp = rcu_get_root();
1996
1997 // If under overload conditions, force an immediate FQS scan.
1998 if (*gfp & RCU_GP_FLAG_OVLD)
1999 return true;
2000
2001 // Someone like call_rcu() requested a force-quiescent-state scan.
2002 *gfp = READ_ONCE(rcu_state.gp_flags);
2003 if (*gfp & RCU_GP_FLAG_FQS)
2004 return true;
2005
2006 // The current grace period has completed.
2007 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
2008 return true;
2009
2010 return false;
2011 }
2012
2013 /*
2014 * Do one round of quiescent-state forcing.
2015 */
rcu_gp_fqs(bool first_time)2016 static void rcu_gp_fqs(bool first_time)
2017 {
2018 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
2019 struct rcu_node *rnp = rcu_get_root();
2020
2021 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2022 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
2023
2024 WARN_ON_ONCE(nr_fqs > 3);
2025 /* Only countdown nr_fqs for stall purposes if jiffies moves. */
2026 if (nr_fqs) {
2027 if (nr_fqs == 1) {
2028 WRITE_ONCE(rcu_state.jiffies_stall,
2029 jiffies + rcu_jiffies_till_stall_check());
2030 }
2031 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
2032 }
2033
2034 if (first_time) {
2035 /* Collect dyntick-idle snapshots. */
2036 force_qs_rnp(rcu_watching_snap_save);
2037 } else {
2038 /* Handle dyntick-idle and offline CPUs. */
2039 force_qs_rnp(rcu_watching_snap_recheck);
2040 }
2041 /* Clear flag to prevent immediate re-entry. */
2042 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2043 raw_spin_lock_irq_rcu_node(rnp);
2044 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS);
2045 raw_spin_unlock_irq_rcu_node(rnp);
2046 }
2047 }
2048
2049 /*
2050 * Loop doing repeated quiescent-state forcing until the grace period ends.
2051 */
rcu_gp_fqs_loop(void)2052 static noinline_for_stack void rcu_gp_fqs_loop(void)
2053 {
2054 bool first_gp_fqs = true;
2055 int gf = 0;
2056 unsigned long j;
2057 int ret;
2058 struct rcu_node *rnp = rcu_get_root();
2059
2060 j = READ_ONCE(jiffies_till_first_fqs);
2061 if (rcu_state.cbovld)
2062 gf = RCU_GP_FLAG_OVLD;
2063 ret = 0;
2064 for (;;) {
2065 if (rcu_state.cbovld) {
2066 j = (j + 2) / 3;
2067 if (j <= 0)
2068 j = 1;
2069 }
2070 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
2071 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
2072 /*
2073 * jiffies_force_qs before RCU_GP_WAIT_FQS state
2074 * update; required for stall checks.
2075 */
2076 smp_wmb();
2077 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
2078 jiffies + (j ? 3 * j : 2));
2079 }
2080 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2081 TPS("fqswait"));
2082 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
2083 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
2084 rcu_gp_fqs_check_wake(&gf), j);
2085 rcu_gp_torture_wait();
2086 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2087 /* Locking provides needed memory barriers. */
2088 /*
2089 * Exit the loop if the root rcu_node structure indicates that the grace period
2090 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check
2091 * is required only for single-node rcu_node trees because readers blocking
2092 * the current grace period are queued only on leaf rcu_node structures.
2093 * For multi-node trees, checking the root node's ->qsmask suffices, because a
2094 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
2095 * the corresponding leaf nodes have passed through their quiescent state.
2096 */
2097 if (!READ_ONCE(rnp->qsmask) &&
2098 !rcu_preempt_blocked_readers_cgp(rnp))
2099 break;
2100 /* If time for quiescent-state forcing, do it. */
2101 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2102 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
2103 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2104 TPS("fqsstart"));
2105 rcu_gp_fqs(first_gp_fqs);
2106 gf = 0;
2107 if (first_gp_fqs) {
2108 first_gp_fqs = false;
2109 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2110 }
2111 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2112 TPS("fqsend"));
2113 cond_resched_tasks_rcu_qs();
2114 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2115 ret = 0; /* Force full wait till next FQS. */
2116 j = READ_ONCE(jiffies_till_next_fqs);
2117 } else {
2118 /* Deal with stray signal. */
2119 cond_resched_tasks_rcu_qs();
2120 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2121 WARN_ON(signal_pending(current));
2122 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2123 TPS("fqswaitsig"));
2124 ret = 1; /* Keep old FQS timing. */
2125 j = jiffies;
2126 if (time_after(jiffies, rcu_state.jiffies_force_qs))
2127 j = 1;
2128 else
2129 j = rcu_state.jiffies_force_qs - j;
2130 gf = 0;
2131 }
2132 }
2133 }
2134
2135 /*
2136 * Clean up after the old grace period.
2137 */
rcu_gp_cleanup(void)2138 static noinline void rcu_gp_cleanup(void)
2139 {
2140 int cpu;
2141 bool needgp = false;
2142 unsigned long gp_duration;
2143 unsigned long new_gp_seq;
2144 bool offloaded;
2145 struct rcu_data *rdp;
2146 struct rcu_node *rnp = rcu_get_root();
2147 struct swait_queue_head *sq;
2148
2149 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2150 raw_spin_lock_irq_rcu_node(rnp);
2151 rcu_state.gp_end = jiffies;
2152 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2153 if (gp_duration > rcu_state.gp_max)
2154 rcu_state.gp_max = gp_duration;
2155
2156 /*
2157 * We know the grace period is complete, but to everyone else
2158 * it appears to still be ongoing. But it is also the case
2159 * that to everyone else it looks like there is nothing that
2160 * they can do to advance the grace period. It is therefore
2161 * safe for us to drop the lock in order to mark the grace
2162 * period as completed in all of the rcu_node structures.
2163 */
2164 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
2165 raw_spin_unlock_irq_rcu_node(rnp);
2166
2167 /*
2168 * Propagate new ->gp_seq value to rcu_node structures so that
2169 * other CPUs don't have to wait until the start of the next grace
2170 * period to process their callbacks. This also avoids some nasty
2171 * RCU grace-period initialization races by forcing the end of
2172 * the current grace period to be completely recorded in all of
2173 * the rcu_node structures before the beginning of the next grace
2174 * period is recorded in any of the rcu_node structures.
2175 */
2176 new_gp_seq = rcu_state.gp_seq;
2177 rcu_seq_end(&new_gp_seq);
2178 rcu_for_each_node_breadth_first(rnp) {
2179 raw_spin_lock_irq_rcu_node(rnp);
2180 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2181 dump_blkd_tasks(rnp, 10);
2182 WARN_ON_ONCE(rnp->qsmask);
2183 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2184 if (!rnp->parent)
2185 smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
2186 rdp = this_cpu_ptr(&rcu_data);
2187 if (rnp == rdp->mynode)
2188 needgp = __note_gp_changes(rnp, rdp) || needgp;
2189 /* smp_mb() provided by prior unlock-lock pair. */
2190 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2191 // Reset overload indication for CPUs no longer overloaded
2192 if (rcu_is_leaf_node(rnp))
2193 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2194 rdp = per_cpu_ptr(&rcu_data, cpu);
2195 check_cb_ovld_locked(rdp, rnp);
2196 }
2197 sq = rcu_nocb_gp_get(rnp);
2198 raw_spin_unlock_irq_rcu_node(rnp);
2199 rcu_nocb_gp_cleanup(sq);
2200 cond_resched_tasks_rcu_qs();
2201 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2202 rcu_gp_slow(gp_cleanup_delay);
2203 }
2204 rnp = rcu_get_root();
2205 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2206
2207 /* Declare grace period done, trace first to use old GP number. */
2208 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2209 rcu_seq_end(&rcu_state.gp_seq);
2210 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2211 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2212 /* Check for GP requests since above loop. */
2213 rdp = this_cpu_ptr(&rcu_data);
2214 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2215 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2216 TPS("CleanupMore"));
2217 needgp = true;
2218 }
2219 /* Advance CBs to reduce false positives below. */
2220 offloaded = rcu_rdp_is_offloaded(rdp);
2221 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2222
2223 // We get here if a grace period was needed (“needgp”)
2224 // and the above call to rcu_accelerate_cbs() did not set
2225 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
2226 // the need for another grace period). The purpose
2227 // of the “offloaded” check is to avoid invoking
2228 // rcu_accelerate_cbs() on an offloaded CPU because we do not
2229 // hold the ->nocb_lock needed to safely access an offloaded
2230 // ->cblist. We do not want to acquire that lock because
2231 // it can be heavily contended during callback floods.
2232
2233 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2234 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2235 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
2236 } else {
2237
2238 // We get here either if there is no need for an
2239 // additional grace period or if rcu_accelerate_cbs() has
2240 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
2241 // So all we need to do is to clear all of the other
2242 // ->gp_flags bits.
2243
2244 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2245 }
2246 raw_spin_unlock_irq_rcu_node(rnp);
2247
2248 // Make synchronize_rcu() users aware of the end of old grace period.
2249 rcu_sr_normal_gp_cleanup();
2250
2251 // If strict, make all CPUs aware of the end of the old grace period.
2252 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2253 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2254 }
2255
2256 /*
2257 * Body of kthread that handles grace periods.
2258 */
rcu_gp_kthread(void * unused)2259 static int __noreturn rcu_gp_kthread(void *unused)
2260 {
2261 rcu_bind_gp_kthread();
2262 for (;;) {
2263
2264 /* Handle grace-period start. */
2265 for (;;) {
2266 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2267 TPS("reqwait"));
2268 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2269 swait_event_idle_exclusive(rcu_state.gp_wq,
2270 READ_ONCE(rcu_state.gp_flags) &
2271 RCU_GP_FLAG_INIT);
2272 rcu_gp_torture_wait();
2273 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2274 /* Locking provides needed memory barrier. */
2275 if (rcu_gp_init())
2276 break;
2277 cond_resched_tasks_rcu_qs();
2278 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2279 WARN_ON(signal_pending(current));
2280 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2281 TPS("reqwaitsig"));
2282 }
2283
2284 /* Handle quiescent-state forcing. */
2285 rcu_gp_fqs_loop();
2286
2287 /* Handle grace-period end. */
2288 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2289 rcu_gp_cleanup();
2290 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2291 }
2292 }
2293
2294 /*
2295 * Report a full set of quiescent states to the rcu_state data structure.
2296 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2297 * another grace period is required. Whether we wake the grace-period
2298 * kthread or it awakens itself for the next round of quiescent-state
2299 * forcing, that kthread will clean up after the just-completed grace
2300 * period. Note that the caller must hold rnp->lock, which is released
2301 * before return.
2302 */
rcu_report_qs_rsp(unsigned long flags)2303 static void rcu_report_qs_rsp(unsigned long flags)
2304 __releases(rcu_get_root()->lock)
2305 {
2306 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2307 WARN_ON_ONCE(!rcu_gp_in_progress());
2308 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2309 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2310 rcu_gp_kthread_wake();
2311 }
2312
2313 /*
2314 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2315 * Allows quiescent states for a group of CPUs to be reported at one go
2316 * to the specified rcu_node structure, though all the CPUs in the group
2317 * must be represented by the same rcu_node structure (which need not be a
2318 * leaf rcu_node structure, though it often will be). The gps parameter
2319 * is the grace-period snapshot, which means that the quiescent states
2320 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2321 * must be held upon entry, and it is released before return.
2322 *
2323 * As a special case, if mask is zero, the bit-already-cleared check is
2324 * disabled. This allows propagating quiescent state due to resumed tasks
2325 * during grace-period initialization.
2326 */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2327 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2328 unsigned long gps, unsigned long flags)
2329 __releases(rnp->lock)
2330 {
2331 unsigned long oldmask = 0;
2332 struct rcu_node *rnp_c;
2333
2334 raw_lockdep_assert_held_rcu_node(rnp);
2335
2336 /* Walk up the rcu_node hierarchy. */
2337 for (;;) {
2338 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2339
2340 /*
2341 * Our bit has already been cleared, or the
2342 * relevant grace period is already over, so done.
2343 */
2344 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2345 return;
2346 }
2347 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2348 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2349 rcu_preempt_blocked_readers_cgp(rnp));
2350 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2351 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2352 mask, rnp->qsmask, rnp->level,
2353 rnp->grplo, rnp->grphi,
2354 !!rnp->gp_tasks);
2355 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2356
2357 /* Other bits still set at this level, so done. */
2358 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2359 return;
2360 }
2361 rnp->completedqs = rnp->gp_seq;
2362 mask = rnp->grpmask;
2363 if (rnp->parent == NULL) {
2364
2365 /* No more levels. Exit loop holding root lock. */
2366
2367 break;
2368 }
2369 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2370 rnp_c = rnp;
2371 rnp = rnp->parent;
2372 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2373 oldmask = READ_ONCE(rnp_c->qsmask);
2374 }
2375
2376 /*
2377 * Get here if we are the last CPU to pass through a quiescent
2378 * state for this grace period. Invoke rcu_report_qs_rsp()
2379 * to clean up and start the next grace period if one is needed.
2380 */
2381 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2382 }
2383
2384 /*
2385 * Record a quiescent state for all tasks that were previously queued
2386 * on the specified rcu_node structure and that were blocking the current
2387 * RCU grace period. The caller must hold the corresponding rnp->lock with
2388 * irqs disabled, and this lock is released upon return, but irqs remain
2389 * disabled.
2390 */
2391 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2392 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2393 __releases(rnp->lock)
2394 {
2395 unsigned long gps;
2396 unsigned long mask;
2397 struct rcu_node *rnp_p;
2398
2399 raw_lockdep_assert_held_rcu_node(rnp);
2400 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2401 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2402 rnp->qsmask != 0) {
2403 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2404 return; /* Still need more quiescent states! */
2405 }
2406
2407 rnp->completedqs = rnp->gp_seq;
2408 rnp_p = rnp->parent;
2409 if (rnp_p == NULL) {
2410 /*
2411 * Only one rcu_node structure in the tree, so don't
2412 * try to report up to its nonexistent parent!
2413 */
2414 rcu_report_qs_rsp(flags);
2415 return;
2416 }
2417
2418 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2419 gps = rnp->gp_seq;
2420 mask = rnp->grpmask;
2421 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2422 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2423 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2424 }
2425
2426 /*
2427 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2428 * structure. This must be called from the specified CPU.
2429 */
2430 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2431 rcu_report_qs_rdp(struct rcu_data *rdp)
2432 {
2433 unsigned long flags;
2434 unsigned long mask;
2435 struct rcu_node *rnp;
2436
2437 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2438 rnp = rdp->mynode;
2439 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2440 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2441 rdp->gpwrap) {
2442
2443 /*
2444 * The grace period in which this quiescent state was
2445 * recorded has ended, so don't report it upwards.
2446 * We will instead need a new quiescent state that lies
2447 * within the current grace period.
2448 */
2449 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2450 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2451 return;
2452 }
2453 mask = rdp->grpmask;
2454 rdp->core_needs_qs = false;
2455 if ((rnp->qsmask & mask) == 0) {
2456 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2457 } else {
2458 /*
2459 * This GP can't end until cpu checks in, so all of our
2460 * callbacks can be processed during the next GP.
2461 *
2462 * NOCB kthreads have their own way to deal with that...
2463 */
2464 if (!rcu_rdp_is_offloaded(rdp)) {
2465 /*
2466 * The current GP has not yet ended, so it
2467 * should not be possible for rcu_accelerate_cbs()
2468 * to return true. So complain, but don't awaken.
2469 */
2470 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2471 }
2472
2473 rcu_disable_urgency_upon_qs(rdp);
2474 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2475 /* ^^^ Released rnp->lock */
2476 }
2477 }
2478
2479 /*
2480 * Check to see if there is a new grace period of which this CPU
2481 * is not yet aware, and if so, set up local rcu_data state for it.
2482 * Otherwise, see if this CPU has just passed through its first
2483 * quiescent state for this grace period, and record that fact if so.
2484 */
2485 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2486 rcu_check_quiescent_state(struct rcu_data *rdp)
2487 {
2488 /* Check for grace-period ends and beginnings. */
2489 note_gp_changes(rdp);
2490
2491 /*
2492 * Does this CPU still need to do its part for current grace period?
2493 * If no, return and let the other CPUs do their part as well.
2494 */
2495 if (!rdp->core_needs_qs)
2496 return;
2497
2498 /*
2499 * Was there a quiescent state since the beginning of the grace
2500 * period? If no, then exit and wait for the next call.
2501 */
2502 if (rdp->cpu_no_qs.b.norm)
2503 return;
2504
2505 /*
2506 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2507 * judge of that).
2508 */
2509 rcu_report_qs_rdp(rdp);
2510 }
2511
2512 /* Return true if callback-invocation time limit exceeded. */
rcu_do_batch_check_time(long count,long tlimit,bool jlimit_check,unsigned long jlimit)2513 static bool rcu_do_batch_check_time(long count, long tlimit,
2514 bool jlimit_check, unsigned long jlimit)
2515 {
2516 // Invoke local_clock() only once per 32 consecutive callbacks.
2517 return unlikely(tlimit) &&
2518 (!likely(count & 31) ||
2519 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2520 jlimit_check && time_after(jiffies, jlimit))) &&
2521 local_clock() >= tlimit;
2522 }
2523
2524 /*
2525 * Invoke any RCU callbacks that have made it to the end of their grace
2526 * period. Throttle as specified by rdp->blimit.
2527 */
rcu_do_batch(struct rcu_data * rdp)2528 static void rcu_do_batch(struct rcu_data *rdp)
2529 {
2530 long bl;
2531 long count = 0;
2532 int div;
2533 bool __maybe_unused empty;
2534 unsigned long flags;
2535 unsigned long jlimit;
2536 bool jlimit_check = false;
2537 long pending;
2538 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2539 struct rcu_head *rhp;
2540 long tlimit = 0;
2541
2542 /* If no callbacks are ready, just return. */
2543 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2544 trace_rcu_batch_start(rcu_state.name,
2545 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2546 trace_rcu_batch_end(rcu_state.name, 0,
2547 !rcu_segcblist_empty(&rdp->cblist),
2548 need_resched(), is_idle_task(current),
2549 rcu_is_callbacks_kthread(rdp));
2550 return;
2551 }
2552
2553 /*
2554 * Extract the list of ready callbacks, disabling IRQs to prevent
2555 * races with call_rcu() from interrupt handlers. Leave the
2556 * callback counts, as rcu_barrier() needs to be conservative.
2557 *
2558 * Callbacks execution is fully ordered against preceding grace period
2559 * completion (materialized by rnp->gp_seq update) thanks to the
2560 * smp_mb__after_unlock_lock() upon node locking required for callbacks
2561 * advancing. In NOCB mode this ordering is then further relayed through
2562 * the nocb locking that protects both callbacks advancing and extraction.
2563 */
2564 rcu_nocb_lock_irqsave(rdp, flags);
2565 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2566 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2567 div = READ_ONCE(rcu_divisor);
2568 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2569 bl = max(rdp->blimit, pending >> div);
2570 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2571 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2572 const long npj = NSEC_PER_SEC / HZ;
2573 long rrn = READ_ONCE(rcu_resched_ns);
2574
2575 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2576 tlimit = local_clock() + rrn;
2577 jlimit = jiffies + (rrn + npj + 1) / npj;
2578 jlimit_check = true;
2579 }
2580 trace_rcu_batch_start(rcu_state.name,
2581 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2582 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2583 if (rcu_rdp_is_offloaded(rdp))
2584 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2585
2586 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2587 rcu_nocb_unlock_irqrestore(rdp, flags);
2588
2589 /* Invoke callbacks. */
2590 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2591 rhp = rcu_cblist_dequeue(&rcl);
2592
2593 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2594 rcu_callback_t f;
2595
2596 count++;
2597 debug_rcu_head_unqueue(rhp);
2598
2599 rcu_lock_acquire(&rcu_callback_map);
2600 trace_rcu_invoke_callback(rcu_state.name, rhp);
2601
2602 f = rhp->func;
2603 debug_rcu_head_callback(rhp);
2604 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2605 f(rhp);
2606
2607 rcu_lock_release(&rcu_callback_map);
2608
2609 /*
2610 * Stop only if limit reached and CPU has something to do.
2611 */
2612 if (in_serving_softirq()) {
2613 if (count >= bl && (need_resched() || !is_idle_task(current)))
2614 break;
2615 /*
2616 * Make sure we don't spend too much time here and deprive other
2617 * softirq vectors of CPU cycles.
2618 */
2619 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2620 break;
2621 } else {
2622 // In rcuc/rcuoc context, so no worries about
2623 // depriving other softirq vectors of CPU cycles.
2624 local_bh_enable();
2625 lockdep_assert_irqs_enabled();
2626 cond_resched_tasks_rcu_qs();
2627 lockdep_assert_irqs_enabled();
2628 local_bh_disable();
2629 // But rcuc kthreads can delay quiescent-state
2630 // reporting, so check time limits for them.
2631 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2632 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2633 rdp->rcu_cpu_has_work = 1;
2634 break;
2635 }
2636 }
2637 }
2638
2639 rcu_nocb_lock_irqsave(rdp, flags);
2640 rdp->n_cbs_invoked += count;
2641 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2642 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2643
2644 /* Update counts and requeue any remaining callbacks. */
2645 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2646 rcu_segcblist_add_len(&rdp->cblist, -count);
2647
2648 /* Reinstate batch limit if we have worked down the excess. */
2649 count = rcu_segcblist_n_cbs(&rdp->cblist);
2650 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2651 rdp->blimit = blimit;
2652
2653 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2654 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2655 rdp->qlen_last_fqs_check = 0;
2656 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2657 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2658 rdp->qlen_last_fqs_check = count;
2659
2660 /*
2661 * The following usually indicates a double call_rcu(). To track
2662 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2663 */
2664 empty = rcu_segcblist_empty(&rdp->cblist);
2665 WARN_ON_ONCE(count == 0 && !empty);
2666 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2667 count != 0 && empty);
2668 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2669 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2670
2671 rcu_nocb_unlock_irqrestore(rdp, flags);
2672
2673 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2674 }
2675
2676 /*
2677 * This function is invoked from each scheduling-clock interrupt,
2678 * and checks to see if this CPU is in a non-context-switch quiescent
2679 * state, for example, user mode or idle loop. It also schedules RCU
2680 * core processing. If the current grace period has gone on too long,
2681 * it will ask the scheduler to manufacture a context switch for the sole
2682 * purpose of providing the needed quiescent state.
2683 */
rcu_sched_clock_irq(int user)2684 void rcu_sched_clock_irq(int user)
2685 {
2686 unsigned long j;
2687
2688 if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2689 j = jiffies;
2690 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2691 __this_cpu_write(rcu_data.last_sched_clock, j);
2692 }
2693 trace_rcu_utilization(TPS("Start scheduler-tick"));
2694 lockdep_assert_irqs_disabled();
2695 raw_cpu_inc(rcu_data.ticks_this_gp);
2696 /* The load-acquire pairs with the store-release setting to true. */
2697 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2698 /* Idle and userspace execution already are quiescent states. */
2699 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2700 set_tsk_need_resched(current);
2701 set_preempt_need_resched();
2702 }
2703 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2704 }
2705 rcu_flavor_sched_clock_irq(user);
2706 if (rcu_pending(user))
2707 invoke_rcu_core();
2708 if (user || rcu_is_cpu_rrupt_from_idle())
2709 rcu_note_voluntary_context_switch(current);
2710 lockdep_assert_irqs_disabled();
2711
2712 trace_rcu_utilization(TPS("End scheduler-tick"));
2713 }
2714
2715 /*
2716 * Scan the leaf rcu_node structures. For each structure on which all
2717 * CPUs have reported a quiescent state and on which there are tasks
2718 * blocking the current grace period, initiate RCU priority boosting.
2719 * Otherwise, invoke the specified function to check dyntick state for
2720 * each CPU that has not yet reported a quiescent state.
2721 */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2722 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2723 {
2724 int cpu;
2725 unsigned long flags;
2726 struct rcu_node *rnp;
2727
2728 rcu_state.cbovld = rcu_state.cbovldnext;
2729 rcu_state.cbovldnext = false;
2730 rcu_for_each_leaf_node(rnp) {
2731 unsigned long mask = 0;
2732 unsigned long rsmask = 0;
2733
2734 cond_resched_tasks_rcu_qs();
2735 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2736 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2737 if (rnp->qsmask == 0) {
2738 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2739 /*
2740 * No point in scanning bits because they
2741 * are all zero. But we might need to
2742 * priority-boost blocked readers.
2743 */
2744 rcu_initiate_boost(rnp, flags);
2745 /* rcu_initiate_boost() releases rnp->lock */
2746 continue;
2747 }
2748 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2749 continue;
2750 }
2751 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2752 struct rcu_data *rdp;
2753 int ret;
2754
2755 rdp = per_cpu_ptr(&rcu_data, cpu);
2756 ret = f(rdp);
2757 if (ret > 0) {
2758 mask |= rdp->grpmask;
2759 rcu_disable_urgency_upon_qs(rdp);
2760 }
2761 if (ret < 0)
2762 rsmask |= rdp->grpmask;
2763 }
2764 if (mask != 0) {
2765 /* Idle/offline CPUs, report (releases rnp->lock). */
2766 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2767 } else {
2768 /* Nothing to do here, so just drop the lock. */
2769 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2770 }
2771
2772 for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
2773 resched_cpu(cpu);
2774 }
2775 }
2776
2777 /*
2778 * Force quiescent states on reluctant CPUs, and also detect which
2779 * CPUs are in dyntick-idle mode.
2780 */
rcu_force_quiescent_state(void)2781 void rcu_force_quiescent_state(void)
2782 {
2783 unsigned long flags;
2784 bool ret;
2785 struct rcu_node *rnp;
2786 struct rcu_node *rnp_old = NULL;
2787
2788 if (!rcu_gp_in_progress())
2789 return;
2790 /* Funnel through hierarchy to reduce memory contention. */
2791 rnp = raw_cpu_read(rcu_data.mynode);
2792 for (; rnp != NULL; rnp = rnp->parent) {
2793 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2794 !raw_spin_trylock(&rnp->fqslock);
2795 if (rnp_old != NULL)
2796 raw_spin_unlock(&rnp_old->fqslock);
2797 if (ret)
2798 return;
2799 rnp_old = rnp;
2800 }
2801 /* rnp_old == rcu_get_root(), rnp == NULL. */
2802
2803 /* Reached the root of the rcu_node tree, acquire lock. */
2804 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2805 raw_spin_unlock(&rnp_old->fqslock);
2806 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2807 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2808 return; /* Someone beat us to it. */
2809 }
2810 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2811 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2812 rcu_gp_kthread_wake();
2813 }
2814 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2815
2816 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2817 // grace periods.
strict_work_handler(struct work_struct * work)2818 static void strict_work_handler(struct work_struct *work)
2819 {
2820 rcu_read_lock();
2821 rcu_read_unlock();
2822 }
2823
2824 /* Perform RCU core processing work for the current CPU. */
rcu_core(void)2825 static __latent_entropy void rcu_core(void)
2826 {
2827 unsigned long flags;
2828 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2829 struct rcu_node *rnp = rdp->mynode;
2830
2831 if (cpu_is_offline(smp_processor_id()))
2832 return;
2833 trace_rcu_utilization(TPS("Start RCU core"));
2834 WARN_ON_ONCE(!rdp->beenonline);
2835
2836 /* Report any deferred quiescent states if preemption enabled. */
2837 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2838 rcu_preempt_deferred_qs(current);
2839 } else if (rcu_preempt_need_deferred_qs(current)) {
2840 set_tsk_need_resched(current);
2841 set_preempt_need_resched();
2842 }
2843
2844 /* Update RCU state based on any recent quiescent states. */
2845 rcu_check_quiescent_state(rdp);
2846
2847 /* No grace period and unregistered callbacks? */
2848 if (!rcu_gp_in_progress() &&
2849 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
2850 local_irq_save(flags);
2851 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2852 rcu_accelerate_cbs_unlocked(rnp, rdp);
2853 local_irq_restore(flags);
2854 }
2855
2856 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2857
2858 /* If there are callbacks ready, invoke them. */
2859 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2860 likely(READ_ONCE(rcu_scheduler_fully_active))) {
2861 rcu_do_batch(rdp);
2862 /* Re-invoke RCU core processing if there are callbacks remaining. */
2863 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2864 invoke_rcu_core();
2865 }
2866
2867 /* Do any needed deferred wakeups of rcuo kthreads. */
2868 do_nocb_deferred_wakeup(rdp);
2869 trace_rcu_utilization(TPS("End RCU core"));
2870
2871 // If strict GPs, schedule an RCU reader in a clean environment.
2872 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2873 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2874 }
2875
rcu_core_si(void)2876 static void rcu_core_si(void)
2877 {
2878 rcu_core();
2879 }
2880
rcu_wake_cond(struct task_struct * t,int status)2881 static void rcu_wake_cond(struct task_struct *t, int status)
2882 {
2883 /*
2884 * If the thread is yielding, only wake it when this
2885 * is invoked from idle
2886 */
2887 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2888 wake_up_process(t);
2889 }
2890
invoke_rcu_core_kthread(void)2891 static void invoke_rcu_core_kthread(void)
2892 {
2893 struct task_struct *t;
2894 unsigned long flags;
2895
2896 local_irq_save(flags);
2897 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2898 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2899 if (t != NULL && t != current)
2900 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2901 local_irq_restore(flags);
2902 }
2903
2904 /*
2905 * Wake up this CPU's rcuc kthread to do RCU core processing.
2906 */
invoke_rcu_core(void)2907 static void invoke_rcu_core(void)
2908 {
2909 if (!cpu_online(smp_processor_id()))
2910 return;
2911 if (use_softirq)
2912 raise_softirq(RCU_SOFTIRQ);
2913 else
2914 invoke_rcu_core_kthread();
2915 }
2916
rcu_cpu_kthread_park(unsigned int cpu)2917 static void rcu_cpu_kthread_park(unsigned int cpu)
2918 {
2919 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2920 }
2921
rcu_cpu_kthread_should_run(unsigned int cpu)2922 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2923 {
2924 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2925 }
2926
2927 /*
2928 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2929 * the RCU softirq used in configurations of RCU that do not support RCU
2930 * priority boosting.
2931 */
rcu_cpu_kthread(unsigned int cpu)2932 static void rcu_cpu_kthread(unsigned int cpu)
2933 {
2934 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2935 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2936 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2937 int spincnt;
2938
2939 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2940 for (spincnt = 0; spincnt < 10; spincnt++) {
2941 WRITE_ONCE(*j, jiffies);
2942 local_bh_disable();
2943 *statusp = RCU_KTHREAD_RUNNING;
2944 local_irq_disable();
2945 work = *workp;
2946 WRITE_ONCE(*workp, 0);
2947 local_irq_enable();
2948 if (work)
2949 rcu_core();
2950 local_bh_enable();
2951 if (!READ_ONCE(*workp)) {
2952 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2953 *statusp = RCU_KTHREAD_WAITING;
2954 return;
2955 }
2956 }
2957 *statusp = RCU_KTHREAD_YIELDING;
2958 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2959 schedule_timeout_idle(2);
2960 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2961 *statusp = RCU_KTHREAD_WAITING;
2962 WRITE_ONCE(*j, jiffies);
2963 }
2964
2965 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2966 .store = &rcu_data.rcu_cpu_kthread_task,
2967 .thread_should_run = rcu_cpu_kthread_should_run,
2968 .thread_fn = rcu_cpu_kthread,
2969 .thread_comm = "rcuc/%u",
2970 .setup = rcu_cpu_kthread_setup,
2971 .park = rcu_cpu_kthread_park,
2972 };
2973
2974 /*
2975 * Spawn per-CPU RCU core processing kthreads.
2976 */
rcu_spawn_core_kthreads(void)2977 static int __init rcu_spawn_core_kthreads(void)
2978 {
2979 int cpu;
2980
2981 for_each_possible_cpu(cpu)
2982 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2983 if (use_softirq)
2984 return 0;
2985 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2986 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2987 return 0;
2988 }
2989
rcutree_enqueue(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func)2990 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
2991 {
2992 rcu_segcblist_enqueue(&rdp->cblist, head);
2993 trace_rcu_callback(rcu_state.name, head,
2994 rcu_segcblist_n_cbs(&rdp->cblist));
2995 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2996 }
2997
2998 /*
2999 * Handle any core-RCU processing required by a call_rcu() invocation.
3000 */
call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func,unsigned long flags)3001 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
3002 rcu_callback_t func, unsigned long flags)
3003 {
3004 rcutree_enqueue(rdp, head, func);
3005 /*
3006 * If called from an extended quiescent state, invoke the RCU
3007 * core in order to force a re-evaluation of RCU's idleness.
3008 */
3009 if (!rcu_is_watching())
3010 invoke_rcu_core();
3011
3012 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
3013 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
3014 return;
3015
3016 /*
3017 * Force the grace period if too many callbacks or too long waiting.
3018 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
3019 * if some other CPU has recently done so. Also, don't bother
3020 * invoking rcu_force_quiescent_state() if the newly enqueued callback
3021 * is the only one waiting for a grace period to complete.
3022 */
3023 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
3024 rdp->qlen_last_fqs_check + qhimark)) {
3025
3026 /* Are we ignoring a completed grace period? */
3027 note_gp_changes(rdp);
3028
3029 /* Start a new grace period if one not already started. */
3030 if (!rcu_gp_in_progress()) {
3031 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
3032 } else {
3033 /* Give the grace period a kick. */
3034 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
3035 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
3036 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
3037 rcu_force_quiescent_state();
3038 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
3039 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
3040 }
3041 }
3042 }
3043
3044 /*
3045 * RCU callback function to leak a callback.
3046 */
rcu_leak_callback(struct rcu_head * rhp)3047 static void rcu_leak_callback(struct rcu_head *rhp)
3048 {
3049 }
3050
3051 /*
3052 * Check and if necessary update the leaf rcu_node structure's
3053 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3054 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
3055 * structure's ->lock.
3056 */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)3057 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3058 {
3059 raw_lockdep_assert_held_rcu_node(rnp);
3060 if (qovld_calc <= 0)
3061 return; // Early boot and wildcard value set.
3062 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
3063 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
3064 else
3065 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
3066 }
3067
3068 /*
3069 * Check and if necessary update the leaf rcu_node structure's
3070 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3071 * number of queued RCU callbacks. No locks need be held, but the
3072 * caller must have disabled interrupts.
3073 *
3074 * Note that this function ignores the possibility that there are a lot
3075 * of callbacks all of which have already seen the end of their respective
3076 * grace periods. This omission is due to the need for no-CBs CPUs to
3077 * be holding ->nocb_lock to do this check, which is too heavy for a
3078 * common-case operation.
3079 */
check_cb_ovld(struct rcu_data * rdp)3080 static void check_cb_ovld(struct rcu_data *rdp)
3081 {
3082 struct rcu_node *const rnp = rdp->mynode;
3083
3084 if (qovld_calc <= 0 ||
3085 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3086 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3087 return; // Early boot wildcard value or already set correctly.
3088 raw_spin_lock_rcu_node(rnp);
3089 check_cb_ovld_locked(rdp, rnp);
3090 raw_spin_unlock_rcu_node(rnp);
3091 }
3092
3093 static void
__call_rcu_common(struct rcu_head * head,rcu_callback_t func,bool lazy_in)3094 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
3095 {
3096 static atomic_t doublefrees;
3097 unsigned long flags;
3098 bool lazy;
3099 struct rcu_data *rdp;
3100
3101 /* Misaligned rcu_head! */
3102 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3103
3104 /* Avoid NULL dereference if callback is NULL. */
3105 if (WARN_ON_ONCE(!func))
3106 return;
3107
3108 if (debug_rcu_head_queue(head)) {
3109 /*
3110 * Probable double call_rcu(), so leak the callback.
3111 * Use rcu:rcu_callback trace event to find the previous
3112 * time callback was passed to call_rcu().
3113 */
3114 if (atomic_inc_return(&doublefrees) < 4) {
3115 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
3116 mem_dump_obj(head);
3117 }
3118 WRITE_ONCE(head->func, rcu_leak_callback);
3119 return;
3120 }
3121 head->func = func;
3122 head->next = NULL;
3123 kasan_record_aux_stack(head);
3124
3125 local_irq_save(flags);
3126 rdp = this_cpu_ptr(&rcu_data);
3127 RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!");
3128
3129 lazy = lazy_in && !rcu_async_should_hurry();
3130
3131 /* Add the callback to our list. */
3132 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3133 // This can trigger due to call_rcu() from offline CPU:
3134 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3135 WARN_ON_ONCE(!rcu_is_watching());
3136 // Very early boot, before rcu_init(). Initialize if needed
3137 // and then drop through to queue the callback.
3138 if (rcu_segcblist_empty(&rdp->cblist))
3139 rcu_segcblist_init(&rdp->cblist);
3140 }
3141
3142 check_cb_ovld(rdp);
3143
3144 if (unlikely(rcu_rdp_is_offloaded(rdp)))
3145 call_rcu_nocb(rdp, head, func, flags, lazy);
3146 else
3147 call_rcu_core(rdp, head, func, flags);
3148 local_irq_restore(flags);
3149 }
3150
3151 #ifdef CONFIG_RCU_LAZY
3152 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF);
3153 module_param(enable_rcu_lazy, bool, 0444);
3154
3155 /**
3156 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3157 * flush all lazy callbacks (including the new one) to the main ->cblist while
3158 * doing so.
3159 *
3160 * @head: structure to be used for queueing the RCU updates.
3161 * @func: actual callback function to be invoked after the grace period
3162 *
3163 * The callback function will be invoked some time after a full grace
3164 * period elapses, in other words after all pre-existing RCU read-side
3165 * critical sections have completed.
3166 *
3167 * Use this API instead of call_rcu() if you don't want the callback to be
3168 * delayed for very long periods of time, which can happen on systems without
3169 * memory pressure and on systems which are lightly loaded or mostly idle.
3170 * This function will cause callbacks to be invoked sooner than later at the
3171 * expense of extra power. Other than that, this function is identical to, and
3172 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
3173 * ordering and other functionality.
3174 */
call_rcu_hurry(struct rcu_head * head,rcu_callback_t func)3175 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
3176 {
3177 __call_rcu_common(head, func, false);
3178 }
3179 EXPORT_SYMBOL_GPL(call_rcu_hurry);
3180 #else
3181 #define enable_rcu_lazy false
3182 #endif
3183
3184 /**
3185 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3186 * By default the callbacks are 'lazy' and are kept hidden from the main
3187 * ->cblist to prevent starting of grace periods too soon.
3188 * If you desire grace periods to start very soon, use call_rcu_hurry().
3189 *
3190 * @head: structure to be used for queueing the RCU updates.
3191 * @func: actual callback function to be invoked after the grace period
3192 *
3193 * The callback function will be invoked some time after a full grace
3194 * period elapses, in other words after all pre-existing RCU read-side
3195 * critical sections have completed. However, the callback function
3196 * might well execute concurrently with RCU read-side critical sections
3197 * that started after call_rcu() was invoked.
3198 *
3199 * It is perfectly legal to repost an RCU callback, potentially with
3200 * a different callback function, from within its callback function.
3201 * The specified function will be invoked after another full grace period
3202 * has elapsed. This use case is similar in form to the common practice
3203 * of reposting a timer from within its own handler.
3204 *
3205 * RCU read-side critical sections are delimited by rcu_read_lock()
3206 * and rcu_read_unlock(), and may be nested. In addition, but only in
3207 * v5.0 and later, regions of code across which interrupts, preemption,
3208 * or softirqs have been disabled also serve as RCU read-side critical
3209 * sections. This includes hardware interrupt handlers, softirq handlers,
3210 * and NMI handlers.
3211 *
3212 * Note that all CPUs must agree that the grace period extended beyond
3213 * all pre-existing RCU read-side critical section. On systems with more
3214 * than one CPU, this means that when "func()" is invoked, each CPU is
3215 * guaranteed to have executed a full memory barrier since the end of its
3216 * last RCU read-side critical section whose beginning preceded the call
3217 * to call_rcu(). It also means that each CPU executing an RCU read-side
3218 * critical section that continues beyond the start of "func()" must have
3219 * executed a memory barrier after the call_rcu() but before the beginning
3220 * of that RCU read-side critical section. Note that these guarantees
3221 * include CPUs that are offline, idle, or executing in user mode, as
3222 * well as CPUs that are executing in the kernel.
3223 *
3224 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3225 * resulting RCU callback function "func()", then both CPU A and CPU B are
3226 * guaranteed to execute a full memory barrier during the time interval
3227 * between the call to call_rcu() and the invocation of "func()" -- even
3228 * if CPU A and CPU B are the same CPU (but again only if the system has
3229 * more than one CPU).
3230 *
3231 * Implementation of these memory-ordering guarantees is described here:
3232 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3233 *
3234 * Specific to call_rcu() (as opposed to the other call_rcu*() functions),
3235 * in kernels built with CONFIG_RCU_LAZY=y, call_rcu() might delay for many
3236 * seconds before starting the grace period needed by the corresponding
3237 * callback. This delay can significantly improve energy-efficiency
3238 * on low-utilization battery-powered devices. To avoid this delay,
3239 * in latency-sensitive kernel code, use call_rcu_hurry().
3240 */
call_rcu(struct rcu_head * head,rcu_callback_t func)3241 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3242 {
3243 __call_rcu_common(head, func, enable_rcu_lazy);
3244 }
3245 EXPORT_SYMBOL_GPL(call_rcu);
3246
3247 /*
3248 * During early boot, any blocking grace-period wait automatically
3249 * implies a grace period.
3250 *
3251 * Later on, this could in theory be the case for kernels built with
3252 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3253 * is not a common case. Furthermore, this optimization would cause
3254 * the rcu_gp_oldstate structure to expand by 50%, so this potential
3255 * grace-period optimization is ignored once the scheduler is running.
3256 */
rcu_blocking_is_gp(void)3257 static int rcu_blocking_is_gp(void)
3258 {
3259 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3260 might_sleep();
3261 return false;
3262 }
3263 return true;
3264 }
3265
3266 /*
3267 * Helper function for the synchronize_rcu() API.
3268 */
synchronize_rcu_normal(void)3269 static void synchronize_rcu_normal(void)
3270 {
3271 struct rcu_synchronize rs;
3272
3273 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request"));
3274
3275 if (READ_ONCE(rcu_normal_wake_from_gp) < 1) {
3276 wait_rcu_gp(call_rcu_hurry);
3277 goto trace_complete_out;
3278 }
3279
3280 init_rcu_head_on_stack(&rs.head);
3281 init_completion(&rs.completion);
3282
3283 /*
3284 * This code might be preempted, therefore take a GP
3285 * snapshot before adding a request.
3286 */
3287 if (IS_ENABLED(CONFIG_PROVE_RCU))
3288 get_state_synchronize_rcu_full(&rs.oldstate);
3289
3290 rcu_sr_normal_add_req(&rs);
3291
3292 /* Kick a GP and start waiting. */
3293 (void) start_poll_synchronize_rcu();
3294
3295 /* Now we can wait. */
3296 wait_for_completion(&rs.completion);
3297 destroy_rcu_head_on_stack(&rs.head);
3298
3299 trace_complete_out:
3300 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete"));
3301 }
3302
3303 /**
3304 * synchronize_rcu - wait until a grace period has elapsed.
3305 *
3306 * Control will return to the caller some time after a full grace
3307 * period has elapsed, in other words after all currently executing RCU
3308 * read-side critical sections have completed. Note, however, that
3309 * upon return from synchronize_rcu(), the caller might well be executing
3310 * concurrently with new RCU read-side critical sections that began while
3311 * synchronize_rcu() was waiting.
3312 *
3313 * RCU read-side critical sections are delimited by rcu_read_lock()
3314 * and rcu_read_unlock(), and may be nested. In addition, but only in
3315 * v5.0 and later, regions of code across which interrupts, preemption,
3316 * or softirqs have been disabled also serve as RCU read-side critical
3317 * sections. This includes hardware interrupt handlers, softirq handlers,
3318 * and NMI handlers.
3319 *
3320 * Note that this guarantee implies further memory-ordering guarantees.
3321 * On systems with more than one CPU, when synchronize_rcu() returns,
3322 * each CPU is guaranteed to have executed a full memory barrier since
3323 * the end of its last RCU read-side critical section whose beginning
3324 * preceded the call to synchronize_rcu(). In addition, each CPU having
3325 * an RCU read-side critical section that extends beyond the return from
3326 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3327 * after the beginning of synchronize_rcu() and before the beginning of
3328 * that RCU read-side critical section. Note that these guarantees include
3329 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3330 * that are executing in the kernel.
3331 *
3332 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3333 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3334 * to have executed a full memory barrier during the execution of
3335 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3336 * again only if the system has more than one CPU).
3337 *
3338 * Implementation of these memory-ordering guarantees is described here:
3339 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3340 */
synchronize_rcu(void)3341 void synchronize_rcu(void)
3342 {
3343 unsigned long flags;
3344 struct rcu_node *rnp;
3345
3346 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3347 lock_is_held(&rcu_lock_map) ||
3348 lock_is_held(&rcu_sched_lock_map),
3349 "Illegal synchronize_rcu() in RCU read-side critical section");
3350 if (!rcu_blocking_is_gp()) {
3351 if (rcu_gp_is_expedited())
3352 synchronize_rcu_expedited();
3353 else
3354 synchronize_rcu_normal();
3355 return;
3356 }
3357
3358 // Context allows vacuous grace periods.
3359 // Note well that this code runs with !PREEMPT && !SMP.
3360 // In addition, all code that advances grace periods runs at
3361 // process level. Therefore, this normal GP overlaps with other
3362 // normal GPs only by being fully nested within them, which allows
3363 // reuse of ->gp_seq_polled_snap.
3364 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3365 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3366
3367 // Update the normal grace-period counters to record
3368 // this grace period, but only those used by the boot CPU.
3369 // The rcu_scheduler_starting() will take care of the rest of
3370 // these counters.
3371 local_irq_save(flags);
3372 WARN_ON_ONCE(num_online_cpus() > 1);
3373 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3374 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3375 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3376 local_irq_restore(flags);
3377 }
3378 EXPORT_SYMBOL_GPL(synchronize_rcu);
3379
3380 /**
3381 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3382 * @rgosp: Place to put state cookie
3383 *
3384 * Stores into @rgosp a value that will always be treated by functions
3385 * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3386 * has already completed.
3387 */
get_completed_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3388 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3389 {
3390 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3391 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3392 }
3393 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3394
3395 /**
3396 * get_state_synchronize_rcu - Snapshot current RCU state
3397 *
3398 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3399 * or poll_state_synchronize_rcu() to determine whether or not a full
3400 * grace period has elapsed in the meantime.
3401 */
get_state_synchronize_rcu(void)3402 unsigned long get_state_synchronize_rcu(void)
3403 {
3404 /*
3405 * Any prior manipulation of RCU-protected data must happen
3406 * before the load from ->gp_seq.
3407 */
3408 smp_mb(); /* ^^^ */
3409 return rcu_seq_snap(&rcu_state.gp_seq_polled);
3410 }
3411 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3412
3413 /**
3414 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3415 * @rgosp: location to place combined normal/expedited grace-period state
3416 *
3417 * Places the normal and expedited grace-period states in @rgosp. This
3418 * state value can be passed to a later call to cond_synchronize_rcu_full()
3419 * or poll_state_synchronize_rcu_full() to determine whether or not a
3420 * grace period (whether normal or expedited) has elapsed in the meantime.
3421 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3422 * long, but is guaranteed to see all grace periods. In contrast, the
3423 * combined state occupies less memory, but can sometimes fail to take
3424 * grace periods into account.
3425 *
3426 * This does not guarantee that the needed grace period will actually
3427 * start.
3428 */
get_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3429 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3430 {
3431 /*
3432 * Any prior manipulation of RCU-protected data must happen
3433 * before the loads from ->gp_seq and ->expedited_sequence.
3434 */
3435 smp_mb(); /* ^^^ */
3436
3437 // Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use
3438 // in poll_state_synchronize_rcu_full() notwithstanding. Use of
3439 // the latter here would result in too-short grace periods due to
3440 // interactions with newly onlined CPUs.
3441 rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq);
3442 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3443 }
3444 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3445
3446 /*
3447 * Helper function for start_poll_synchronize_rcu() and
3448 * start_poll_synchronize_rcu_full().
3449 */
start_poll_synchronize_rcu_common(void)3450 static void start_poll_synchronize_rcu_common(void)
3451 {
3452 unsigned long flags;
3453 bool needwake;
3454 struct rcu_data *rdp;
3455 struct rcu_node *rnp;
3456
3457 local_irq_save(flags);
3458 rdp = this_cpu_ptr(&rcu_data);
3459 rnp = rdp->mynode;
3460 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3461 // Note it is possible for a grace period to have elapsed between
3462 // the above call to get_state_synchronize_rcu() and the below call
3463 // to rcu_seq_snap. This is OK, the worst that happens is that we
3464 // get a grace period that no one needed. These accesses are ordered
3465 // by smp_mb(), and we are accessing them in the opposite order
3466 // from which they are updated at grace-period start, as required.
3467 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3468 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3469 if (needwake)
3470 rcu_gp_kthread_wake();
3471 }
3472
3473 /**
3474 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3475 *
3476 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3477 * or poll_state_synchronize_rcu() to determine whether or not a full
3478 * grace period has elapsed in the meantime. If the needed grace period
3479 * is not already slated to start, notifies RCU core of the need for that
3480 * grace period.
3481 */
start_poll_synchronize_rcu(void)3482 unsigned long start_poll_synchronize_rcu(void)
3483 {
3484 unsigned long gp_seq = get_state_synchronize_rcu();
3485
3486 start_poll_synchronize_rcu_common();
3487 return gp_seq;
3488 }
3489 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3490
3491 /**
3492 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3493 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3494 *
3495 * Places the normal and expedited grace-period states in *@rgos. This
3496 * state value can be passed to a later call to cond_synchronize_rcu_full()
3497 * or poll_state_synchronize_rcu_full() to determine whether or not a
3498 * grace period (whether normal or expedited) has elapsed in the meantime.
3499 * If the needed grace period is not already slated to start, notifies
3500 * RCU core of the need for that grace period.
3501 */
start_poll_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3502 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3503 {
3504 get_state_synchronize_rcu_full(rgosp);
3505
3506 start_poll_synchronize_rcu_common();
3507 }
3508 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3509
3510 /**
3511 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3512 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3513 *
3514 * If a full RCU grace period has elapsed since the earlier call from
3515 * which @oldstate was obtained, return @true, otherwise return @false.
3516 * If @false is returned, it is the caller's responsibility to invoke this
3517 * function later on until it does return @true. Alternatively, the caller
3518 * can explicitly wait for a grace period, for example, by passing @oldstate
3519 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3520 * on the one hand or by directly invoking either synchronize_rcu() or
3521 * synchronize_rcu_expedited() on the other.
3522 *
3523 * Yes, this function does not take counter wrap into account.
3524 * But counter wrap is harmless. If the counter wraps, we have waited for
3525 * more than a billion grace periods (and way more on a 64-bit system!).
3526 * Those needing to keep old state values for very long time periods
3527 * (many hours even on 32-bit systems) should check them occasionally and
3528 * either refresh them or set a flag indicating that the grace period has
3529 * completed. Alternatively, they can use get_completed_synchronize_rcu()
3530 * to get a guaranteed-completed grace-period state.
3531 *
3532 * In addition, because oldstate compresses the grace-period state for
3533 * both normal and expedited grace periods into a single unsigned long,
3534 * it can miss a grace period when synchronize_rcu() runs concurrently
3535 * with synchronize_rcu_expedited(). If this is unacceptable, please
3536 * instead use the _full() variant of these polling APIs.
3537 *
3538 * This function provides the same memory-ordering guarantees that
3539 * would be provided by a synchronize_rcu() that was invoked at the call
3540 * to the function that provided @oldstate, and that returned at the end
3541 * of this function.
3542 */
poll_state_synchronize_rcu(unsigned long oldstate)3543 bool poll_state_synchronize_rcu(unsigned long oldstate)
3544 {
3545 if (oldstate == RCU_GET_STATE_COMPLETED ||
3546 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3547 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3548 return true;
3549 }
3550 return false;
3551 }
3552 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3553
3554 /**
3555 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3556 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3557 *
3558 * If a full RCU grace period has elapsed since the earlier call from
3559 * which *rgosp was obtained, return @true, otherwise return @false.
3560 * If @false is returned, it is the caller's responsibility to invoke this
3561 * function later on until it does return @true. Alternatively, the caller
3562 * can explicitly wait for a grace period, for example, by passing @rgosp
3563 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3564 *
3565 * Yes, this function does not take counter wrap into account.
3566 * But counter wrap is harmless. If the counter wraps, we have waited
3567 * for more than a billion grace periods (and way more on a 64-bit
3568 * system!). Those needing to keep rcu_gp_oldstate values for very
3569 * long time periods (many hours even on 32-bit systems) should check
3570 * them occasionally and either refresh them or set a flag indicating
3571 * that the grace period has completed. Alternatively, they can use
3572 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3573 * grace-period state.
3574 *
3575 * This function provides the same memory-ordering guarantees that would
3576 * be provided by a synchronize_rcu() that was invoked at the call to
3577 * the function that provided @rgosp, and that returned at the end of this
3578 * function. And this guarantee requires that the root rcu_node structure's
3579 * ->gp_seq field be checked instead of that of the rcu_state structure.
3580 * The problem is that the just-ending grace-period's callbacks can be
3581 * invoked between the time that the root rcu_node structure's ->gp_seq
3582 * field is updated and the time that the rcu_state structure's ->gp_seq
3583 * field is updated. Therefore, if a single synchronize_rcu() is to
3584 * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3585 * then the root rcu_node structure is the one that needs to be polled.
3586 */
poll_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3587 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3588 {
3589 struct rcu_node *rnp = rcu_get_root();
3590
3591 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3592 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3593 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3594 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3595 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3596 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3597 return true;
3598 }
3599 return false;
3600 }
3601 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3602
3603 /**
3604 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3605 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3606 *
3607 * If a full RCU grace period has elapsed since the earlier call to
3608 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3609 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3610 *
3611 * Yes, this function does not take counter wrap into account.
3612 * But counter wrap is harmless. If the counter wraps, we have waited for
3613 * more than 2 billion grace periods (and way more on a 64-bit system!),
3614 * so waiting for a couple of additional grace periods should be just fine.
3615 *
3616 * This function provides the same memory-ordering guarantees that
3617 * would be provided by a synchronize_rcu() that was invoked at the call
3618 * to the function that provided @oldstate and that returned at the end
3619 * of this function.
3620 */
cond_synchronize_rcu(unsigned long oldstate)3621 void cond_synchronize_rcu(unsigned long oldstate)
3622 {
3623 if (!poll_state_synchronize_rcu(oldstate))
3624 synchronize_rcu();
3625 }
3626 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3627
3628 /**
3629 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3630 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3631 *
3632 * If a full RCU grace period has elapsed since the call to
3633 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3634 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3635 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3636 * for a full grace period.
3637 *
3638 * Yes, this function does not take counter wrap into account.
3639 * But counter wrap is harmless. If the counter wraps, we have waited for
3640 * more than 2 billion grace periods (and way more on a 64-bit system!),
3641 * so waiting for a couple of additional grace periods should be just fine.
3642 *
3643 * This function provides the same memory-ordering guarantees that
3644 * would be provided by a synchronize_rcu() that was invoked at the call
3645 * to the function that provided @rgosp and that returned at the end of
3646 * this function.
3647 */
cond_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3648 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3649 {
3650 if (!poll_state_synchronize_rcu_full(rgosp))
3651 synchronize_rcu();
3652 }
3653 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3654
3655 /*
3656 * Check to see if there is any immediate RCU-related work to be done by
3657 * the current CPU, returning 1 if so and zero otherwise. The checks are
3658 * in order of increasing expense: checks that can be carried out against
3659 * CPU-local state are performed first. However, we must check for CPU
3660 * stalls first, else we might not get a chance.
3661 */
rcu_pending(int user)3662 static int rcu_pending(int user)
3663 {
3664 bool gp_in_progress;
3665 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3666 struct rcu_node *rnp = rdp->mynode;
3667
3668 lockdep_assert_irqs_disabled();
3669
3670 /* Check for CPU stalls, if enabled. */
3671 check_cpu_stall(rdp);
3672
3673 /* Does this CPU need a deferred NOCB wakeup? */
3674 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3675 return 1;
3676
3677 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3678 gp_in_progress = rcu_gp_in_progress();
3679 if ((user || rcu_is_cpu_rrupt_from_idle() ||
3680 (gp_in_progress &&
3681 time_before(jiffies, READ_ONCE(rcu_state.gp_start) +
3682 nohz_full_patience_delay_jiffies))) &&
3683 rcu_nohz_full_cpu())
3684 return 0;
3685
3686 /* Is the RCU core waiting for a quiescent state from this CPU? */
3687 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3688 return 1;
3689
3690 /* Does this CPU have callbacks ready to invoke? */
3691 if (!rcu_rdp_is_offloaded(rdp) &&
3692 rcu_segcblist_ready_cbs(&rdp->cblist))
3693 return 1;
3694
3695 /* Has RCU gone idle with this CPU needing another grace period? */
3696 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3697 !rcu_rdp_is_offloaded(rdp) &&
3698 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3699 return 1;
3700
3701 /* Have RCU grace period completed or started? */
3702 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3703 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3704 return 1;
3705
3706 /* nothing to do */
3707 return 0;
3708 }
3709
3710 /*
3711 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3712 * the compiler is expected to optimize this away.
3713 */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3714 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3715 {
3716 trace_rcu_barrier(rcu_state.name, s, cpu,
3717 atomic_read(&rcu_state.barrier_cpu_count), done);
3718 }
3719
3720 /*
3721 * RCU callback function for rcu_barrier(). If we are last, wake
3722 * up the task executing rcu_barrier().
3723 *
3724 * Note that the value of rcu_state.barrier_sequence must be captured
3725 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3726 * other CPUs might count the value down to zero before this CPU gets
3727 * around to invoking rcu_barrier_trace(), which might result in bogus
3728 * data from the next instance of rcu_barrier().
3729 */
rcu_barrier_callback(struct rcu_head * rhp)3730 static void rcu_barrier_callback(struct rcu_head *rhp)
3731 {
3732 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3733
3734 rhp->next = rhp; // Mark the callback as having been invoked.
3735 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3736 rcu_barrier_trace(TPS("LastCB"), -1, s);
3737 complete(&rcu_state.barrier_completion);
3738 } else {
3739 rcu_barrier_trace(TPS("CB"), -1, s);
3740 }
3741 }
3742
3743 /*
3744 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3745 */
rcu_barrier_entrain(struct rcu_data * rdp)3746 static void rcu_barrier_entrain(struct rcu_data *rdp)
3747 {
3748 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3749 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3750 bool wake_nocb = false;
3751 bool was_alldone = false;
3752
3753 lockdep_assert_held(&rcu_state.barrier_lock);
3754 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3755 return;
3756 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3757 rdp->barrier_head.func = rcu_barrier_callback;
3758 debug_rcu_head_queue(&rdp->barrier_head);
3759 rcu_nocb_lock(rdp);
3760 /*
3761 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3762 * queue. This way we don't wait for bypass timer that can reach seconds
3763 * if it's fully lazy.
3764 */
3765 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3766 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3767 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3768 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3769 atomic_inc(&rcu_state.barrier_cpu_count);
3770 } else {
3771 debug_rcu_head_unqueue(&rdp->barrier_head);
3772 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3773 }
3774 rcu_nocb_unlock(rdp);
3775 if (wake_nocb)
3776 wake_nocb_gp(rdp, false);
3777 smp_store_release(&rdp->barrier_seq_snap, gseq);
3778 }
3779
3780 /*
3781 * Called with preemption disabled, and from cross-cpu IRQ context.
3782 */
rcu_barrier_handler(void * cpu_in)3783 static void rcu_barrier_handler(void *cpu_in)
3784 {
3785 uintptr_t cpu = (uintptr_t)cpu_in;
3786 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3787
3788 lockdep_assert_irqs_disabled();
3789 WARN_ON_ONCE(cpu != rdp->cpu);
3790 WARN_ON_ONCE(cpu != smp_processor_id());
3791 raw_spin_lock(&rcu_state.barrier_lock);
3792 rcu_barrier_entrain(rdp);
3793 raw_spin_unlock(&rcu_state.barrier_lock);
3794 }
3795
3796 /**
3797 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3798 *
3799 * Note that this primitive does not necessarily wait for an RCU grace period
3800 * to complete. For example, if there are no RCU callbacks queued anywhere
3801 * in the system, then rcu_barrier() is within its rights to return
3802 * immediately, without waiting for anything, much less an RCU grace period.
3803 */
rcu_barrier(void)3804 void rcu_barrier(void)
3805 {
3806 uintptr_t cpu;
3807 unsigned long flags;
3808 unsigned long gseq;
3809 struct rcu_data *rdp;
3810 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3811
3812 rcu_barrier_trace(TPS("Begin"), -1, s);
3813
3814 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3815 mutex_lock(&rcu_state.barrier_mutex);
3816
3817 /* Did someone else do our work for us? */
3818 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3819 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
3820 smp_mb(); /* caller's subsequent code after above check. */
3821 mutex_unlock(&rcu_state.barrier_mutex);
3822 return;
3823 }
3824
3825 /* Mark the start of the barrier operation. */
3826 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3827 rcu_seq_start(&rcu_state.barrier_sequence);
3828 gseq = rcu_state.barrier_sequence;
3829 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3830
3831 /*
3832 * Initialize the count to two rather than to zero in order
3833 * to avoid a too-soon return to zero in case of an immediate
3834 * invocation of the just-enqueued callback (or preemption of
3835 * this task). Exclude CPU-hotplug operations to ensure that no
3836 * offline non-offloaded CPU has callbacks queued.
3837 */
3838 init_completion(&rcu_state.barrier_completion);
3839 atomic_set(&rcu_state.barrier_cpu_count, 2);
3840 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3841
3842 /*
3843 * Force each CPU with callbacks to register a new callback.
3844 * When that callback is invoked, we will know that all of the
3845 * corresponding CPU's preceding callbacks have been invoked.
3846 */
3847 for_each_possible_cpu(cpu) {
3848 rdp = per_cpu_ptr(&rcu_data, cpu);
3849 retry:
3850 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
3851 continue;
3852 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3853 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
3854 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3855 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3856 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
3857 continue;
3858 }
3859 if (!rcu_rdp_cpu_online(rdp)) {
3860 rcu_barrier_entrain(rdp);
3861 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3862 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3863 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
3864 continue;
3865 }
3866 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3867 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
3868 schedule_timeout_uninterruptible(1);
3869 goto retry;
3870 }
3871 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3872 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
3873 }
3874
3875 /*
3876 * Now that we have an rcu_barrier_callback() callback on each
3877 * CPU, and thus each counted, remove the initial count.
3878 */
3879 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3880 complete(&rcu_state.barrier_completion);
3881
3882 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3883 wait_for_completion(&rcu_state.barrier_completion);
3884
3885 /* Mark the end of the barrier operation. */
3886 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3887 rcu_seq_end(&rcu_state.barrier_sequence);
3888 gseq = rcu_state.barrier_sequence;
3889 for_each_possible_cpu(cpu) {
3890 rdp = per_cpu_ptr(&rcu_data, cpu);
3891
3892 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3893 }
3894
3895 /* Other rcu_barrier() invocations can now safely proceed. */
3896 mutex_unlock(&rcu_state.barrier_mutex);
3897 }
3898 EXPORT_SYMBOL_GPL(rcu_barrier);
3899
3900 static unsigned long rcu_barrier_last_throttle;
3901
3902 /**
3903 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3904 *
3905 * This can be thought of as guard rails around rcu_barrier() that
3906 * permits unrestricted userspace use, at least assuming the hardware's
3907 * try_cmpxchg() is robust. There will be at most one call per second to
3908 * rcu_barrier() system-wide from use of this function, which means that
3909 * callers might needlessly wait a second or three.
3910 *
3911 * This is intended for use by test suites to avoid OOM by flushing RCU
3912 * callbacks from the previous test before starting the next. See the
3913 * rcutree.do_rcu_barrier module parameter for more information.
3914 *
3915 * Why not simply make rcu_barrier() more scalable? That might be
3916 * the eventual endpoint, but let's keep it simple for the time being.
3917 * Note that the module parameter infrastructure serializes calls to a
3918 * given .set() function, but should concurrent .set() invocation ever be
3919 * possible, we are ready!
3920 */
rcu_barrier_throttled(void)3921 static void rcu_barrier_throttled(void)
3922 {
3923 unsigned long j = jiffies;
3924 unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
3925 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3926
3927 while (time_in_range(j, old, old + HZ / 16) ||
3928 !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
3929 schedule_timeout_idle(HZ / 16);
3930 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3931 smp_mb(); /* caller's subsequent code after above check. */
3932 return;
3933 }
3934 j = jiffies;
3935 old = READ_ONCE(rcu_barrier_last_throttle);
3936 }
3937 rcu_barrier();
3938 }
3939
3940 /*
3941 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
3942 * request arrives. We insist on a true value to allow for possible
3943 * future expansion.
3944 */
param_set_do_rcu_barrier(const char * val,const struct kernel_param * kp)3945 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
3946 {
3947 bool b;
3948 int ret;
3949
3950 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
3951 return -EAGAIN;
3952 ret = kstrtobool(val, &b);
3953 if (!ret && b) {
3954 atomic_inc((atomic_t *)kp->arg);
3955 rcu_barrier_throttled();
3956 atomic_dec((atomic_t *)kp->arg);
3957 }
3958 return ret;
3959 }
3960
3961 /*
3962 * Output the number of outstanding rcutree.do_rcu_barrier requests.
3963 */
param_get_do_rcu_barrier(char * buffer,const struct kernel_param * kp)3964 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
3965 {
3966 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
3967 }
3968
3969 static const struct kernel_param_ops do_rcu_barrier_ops = {
3970 .set = param_set_do_rcu_barrier,
3971 .get = param_get_do_rcu_barrier,
3972 };
3973 static atomic_t do_rcu_barrier;
3974 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
3975
3976 /*
3977 * Compute the mask of online CPUs for the specified rcu_node structure.
3978 * This will not be stable unless the rcu_node structure's ->lock is
3979 * held, but the bit corresponding to the current CPU will be stable
3980 * in most contexts.
3981 */
rcu_rnp_online_cpus(struct rcu_node * rnp)3982 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
3983 {
3984 return READ_ONCE(rnp->qsmaskinitnext);
3985 }
3986
3987 /*
3988 * Is the CPU corresponding to the specified rcu_data structure online
3989 * from RCU's perspective? This perspective is given by that structure's
3990 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3991 */
rcu_rdp_cpu_online(struct rcu_data * rdp)3992 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
3993 {
3994 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
3995 }
3996
rcu_cpu_online(int cpu)3997 bool rcu_cpu_online(int cpu)
3998 {
3999 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4000
4001 return rcu_rdp_cpu_online(rdp);
4002 }
4003
4004 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
4005
4006 /*
4007 * Is the current CPU online as far as RCU is concerned?
4008 *
4009 * Disable preemption to avoid false positives that could otherwise
4010 * happen due to the current CPU number being sampled, this task being
4011 * preempted, its old CPU being taken offline, resuming on some other CPU,
4012 * then determining that its old CPU is now offline.
4013 *
4014 * Disable checking if in an NMI handler because we cannot safely
4015 * report errors from NMI handlers anyway. In addition, it is OK to use
4016 * RCU on an offline processor during initial boot, hence the check for
4017 * rcu_scheduler_fully_active.
4018 */
rcu_lockdep_current_cpu_online(void)4019 bool rcu_lockdep_current_cpu_online(void)
4020 {
4021 struct rcu_data *rdp;
4022 bool ret = false;
4023
4024 if (in_nmi() || !rcu_scheduler_fully_active)
4025 return true;
4026 preempt_disable_notrace();
4027 rdp = this_cpu_ptr(&rcu_data);
4028 /*
4029 * Strictly, we care here about the case where the current CPU is
4030 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
4031 * not being up to date. So arch_spin_is_locked() might have a
4032 * false positive if it's held by some *other* CPU, but that's
4033 * OK because that just means a false *negative* on the warning.
4034 */
4035 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4036 ret = true;
4037 preempt_enable_notrace();
4038 return ret;
4039 }
4040 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
4041
4042 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
4043
4044 // Has rcu_init() been invoked? This is used (for example) to determine
4045 // whether spinlocks may be acquired safely.
rcu_init_invoked(void)4046 static bool rcu_init_invoked(void)
4047 {
4048 return !!READ_ONCE(rcu_state.n_online_cpus);
4049 }
4050
4051 /*
4052 * All CPUs for the specified rcu_node structure have gone offline,
4053 * and all tasks that were preempted within an RCU read-side critical
4054 * section while running on one of those CPUs have since exited their RCU
4055 * read-side critical section. Some other CPU is reporting this fact with
4056 * the specified rcu_node structure's ->lock held and interrupts disabled.
4057 * This function therefore goes up the tree of rcu_node structures,
4058 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
4059 * the leaf rcu_node structure's ->qsmaskinit field has already been
4060 * updated.
4061 *
4062 * This function does check that the specified rcu_node structure has
4063 * all CPUs offline and no blocked tasks, so it is OK to invoke it
4064 * prematurely. That said, invoking it after the fact will cost you
4065 * a needless lock acquisition. So once it has done its work, don't
4066 * invoke it again.
4067 */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)4068 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4069 {
4070 long mask;
4071 struct rcu_node *rnp = rnp_leaf;
4072
4073 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4074 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4075 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4076 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4077 return;
4078 for (;;) {
4079 mask = rnp->grpmask;
4080 rnp = rnp->parent;
4081 if (!rnp)
4082 break;
4083 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4084 rnp->qsmaskinit &= ~mask;
4085 /* Between grace periods, so better already be zero! */
4086 WARN_ON_ONCE(rnp->qsmask);
4087 if (rnp->qsmaskinit) {
4088 raw_spin_unlock_rcu_node(rnp);
4089 /* irqs remain disabled. */
4090 return;
4091 }
4092 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4093 }
4094 }
4095
4096 /*
4097 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4098 * first CPU in a given leaf rcu_node structure coming online. The caller
4099 * must hold the corresponding leaf rcu_node ->lock with interrupts
4100 * disabled.
4101 */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)4102 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4103 {
4104 long mask;
4105 long oldmask;
4106 struct rcu_node *rnp = rnp_leaf;
4107
4108 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4109 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4110 for (;;) {
4111 mask = rnp->grpmask;
4112 rnp = rnp->parent;
4113 if (rnp == NULL)
4114 return;
4115 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4116 oldmask = rnp->qsmaskinit;
4117 rnp->qsmaskinit |= mask;
4118 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4119 if (oldmask)
4120 return;
4121 }
4122 }
4123
4124 /*
4125 * Do boot-time initialization of a CPU's per-CPU RCU data.
4126 */
4127 static void __init
rcu_boot_init_percpu_data(int cpu)4128 rcu_boot_init_percpu_data(int cpu)
4129 {
4130 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4131 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4132
4133 /* Set up local state, ensuring consistent view of global state. */
4134 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4135 INIT_WORK(&rdp->strict_work, strict_work_handler);
4136 WARN_ON_ONCE(ct->nesting != 1);
4137 WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu)));
4138 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4139 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4140 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
4141 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4142 rdp->rcu_onl_gp_state = RCU_GP_CLEANED;
4143 rdp->last_sched_clock = jiffies;
4144 rdp->cpu = cpu;
4145 rcu_boot_init_nocb_percpu_data(rdp);
4146 }
4147
rcu_thread_affine_rnp(struct task_struct * t,struct rcu_node * rnp)4148 static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
4149 {
4150 cpumask_var_t affinity;
4151 int cpu;
4152
4153 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
4154 return;
4155
4156 for_each_leaf_node_possible_cpu(rnp, cpu)
4157 cpumask_set_cpu(cpu, affinity);
4158
4159 kthread_affine_preferred(t, affinity);
4160
4161 free_cpumask_var(affinity);
4162 }
4163
4164 struct kthread_worker *rcu_exp_gp_kworker;
4165
rcu_spawn_exp_par_gp_kworker(struct rcu_node * rnp)4166 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4167 {
4168 struct kthread_worker *kworker;
4169 const char *name = "rcu_exp_par_gp_kthread_worker/%d";
4170 struct sched_param param = { .sched_priority = kthread_prio };
4171 int rnp_index = rnp - rcu_get_root();
4172
4173 if (rnp->exp_kworker)
4174 return;
4175
4176 kworker = kthread_create_worker(0, name, rnp_index);
4177 if (IS_ERR_OR_NULL(kworker)) {
4178 pr_err("Failed to create par gp kworker on %d/%d\n",
4179 rnp->grplo, rnp->grphi);
4180 return;
4181 }
4182 WRITE_ONCE(rnp->exp_kworker, kworker);
4183
4184 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4185 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
4186
4187 rcu_thread_affine_rnp(kworker->task, rnp);
4188 wake_up_process(kworker->task);
4189 }
4190
rcu_start_exp_gp_kworker(void)4191 static void __init rcu_start_exp_gp_kworker(void)
4192 {
4193 const char *name = "rcu_exp_gp_kthread_worker";
4194 struct sched_param param = { .sched_priority = kthread_prio };
4195
4196 rcu_exp_gp_kworker = kthread_run_worker(0, name);
4197 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4198 pr_err("Failed to create %s!\n", name);
4199 rcu_exp_gp_kworker = NULL;
4200 return;
4201 }
4202
4203 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4204 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m);
4205 }
4206
rcu_spawn_rnp_kthreads(struct rcu_node * rnp)4207 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
4208 {
4209 if (rcu_scheduler_fully_active) {
4210 mutex_lock(&rnp->kthread_mutex);
4211 rcu_spawn_one_boost_kthread(rnp);
4212 rcu_spawn_exp_par_gp_kworker(rnp);
4213 mutex_unlock(&rnp->kthread_mutex);
4214 }
4215 }
4216
4217 /*
4218 * Invoked early in the CPU-online process, when pretty much all services
4219 * are available. The incoming CPU is not present.
4220 *
4221 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4222 * offline event can be happening at a given time. Note also that we can
4223 * accept some slop in the rsp->gp_seq access due to the fact that this
4224 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4225 * And any offloaded callbacks are being numbered elsewhere.
4226 */
rcutree_prepare_cpu(unsigned int cpu)4227 int rcutree_prepare_cpu(unsigned int cpu)
4228 {
4229 unsigned long flags;
4230 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4231 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4232 struct rcu_node *rnp = rcu_get_root();
4233
4234 /* Set up local state, ensuring consistent view of global state. */
4235 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4236 rdp->qlen_last_fqs_check = 0;
4237 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4238 rdp->blimit = blimit;
4239 ct->nesting = 1; /* CPU not up, no tearing. */
4240 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4241
4242 /*
4243 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4244 * (re-)initialized.
4245 */
4246 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4247 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4248
4249 /*
4250 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4251 * propagation up the rcu_node tree will happen at the beginning
4252 * of the next grace period.
4253 */
4254 rnp = rdp->mynode;
4255 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4256 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4257 rdp->gp_seq_needed = rdp->gp_seq;
4258 rdp->cpu_no_qs.b.norm = true;
4259 rdp->core_needs_qs = false;
4260 rdp->rcu_iw_pending = false;
4261 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4262 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4263 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4264 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4265 rcu_spawn_rnp_kthreads(rnp);
4266 rcu_spawn_cpu_nocb_kthread(cpu);
4267 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4268 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4269
4270 return 0;
4271 }
4272
4273 /*
4274 * Has the specified (known valid) CPU ever been fully online?
4275 */
rcu_cpu_beenfullyonline(int cpu)4276 bool rcu_cpu_beenfullyonline(int cpu)
4277 {
4278 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4279
4280 return smp_load_acquire(&rdp->beenonline);
4281 }
4282
4283 /*
4284 * Near the end of the CPU-online process. Pretty much all services
4285 * enabled, and the CPU is now very much alive.
4286 */
rcutree_online_cpu(unsigned int cpu)4287 int rcutree_online_cpu(unsigned int cpu)
4288 {
4289 unsigned long flags;
4290 struct rcu_data *rdp;
4291 struct rcu_node *rnp;
4292
4293 rdp = per_cpu_ptr(&rcu_data, cpu);
4294 rnp = rdp->mynode;
4295 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4296 rnp->ffmask |= rdp->grpmask;
4297 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4298 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4299 return 0; /* Too early in boot for scheduler work. */
4300
4301 // Stop-machine done, so allow nohz_full to disable tick.
4302 tick_dep_clear(TICK_DEP_BIT_RCU);
4303 return 0;
4304 }
4305
4306 /*
4307 * Mark the specified CPU as being online so that subsequent grace periods
4308 * (both expedited and normal) will wait on it. Note that this means that
4309 * incoming CPUs are not allowed to use RCU read-side critical sections
4310 * until this function is called. Failing to observe this restriction
4311 * will result in lockdep splats.
4312 *
4313 * Note that this function is special in that it is invoked directly
4314 * from the incoming CPU rather than from the cpuhp_step mechanism.
4315 * This is because this function must be invoked at a precise location.
4316 * This incoming CPU must not have enabled interrupts yet.
4317 *
4318 * This mirrors the effects of rcutree_report_cpu_dead().
4319 */
rcutree_report_cpu_starting(unsigned int cpu)4320 void rcutree_report_cpu_starting(unsigned int cpu)
4321 {
4322 unsigned long mask;
4323 struct rcu_data *rdp;
4324 struct rcu_node *rnp;
4325 bool newcpu;
4326
4327 lockdep_assert_irqs_disabled();
4328 rdp = per_cpu_ptr(&rcu_data, cpu);
4329 if (rdp->cpu_started)
4330 return;
4331 rdp->cpu_started = true;
4332
4333 rnp = rdp->mynode;
4334 mask = rdp->grpmask;
4335 arch_spin_lock(&rcu_state.ofl_lock);
4336 rcu_watching_online();
4337 raw_spin_lock(&rcu_state.barrier_lock);
4338 raw_spin_lock_rcu_node(rnp);
4339 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4340 raw_spin_unlock(&rcu_state.barrier_lock);
4341 newcpu = !(rnp->expmaskinitnext & mask);
4342 rnp->expmaskinitnext |= mask;
4343 /* Allow lockless access for expedited grace periods. */
4344 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4345 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4346 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4347 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4348 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state);
4349
4350 /* An incoming CPU should never be blocking a grace period. */
4351 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4352 /* rcu_report_qs_rnp() *really* wants some flags to restore */
4353 unsigned long flags;
4354
4355 local_irq_save(flags);
4356 rcu_disable_urgency_upon_qs(rdp);
4357 /* Report QS -after- changing ->qsmaskinitnext! */
4358 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4359 } else {
4360 raw_spin_unlock_rcu_node(rnp);
4361 }
4362 arch_spin_unlock(&rcu_state.ofl_lock);
4363 smp_store_release(&rdp->beenonline, true);
4364 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4365 }
4366
4367 /*
4368 * The outgoing function has no further need of RCU, so remove it from
4369 * the rcu_node tree's ->qsmaskinitnext bit masks.
4370 *
4371 * Note that this function is special in that it is invoked directly
4372 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4373 * This is because this function must be invoked at a precise location.
4374 *
4375 * This mirrors the effect of rcutree_report_cpu_starting().
4376 */
rcutree_report_cpu_dead(void)4377 void rcutree_report_cpu_dead(void)
4378 {
4379 unsigned long flags;
4380 unsigned long mask;
4381 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4382 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4383
4384 /*
4385 * IRQS must be disabled from now on and until the CPU dies, or an interrupt
4386 * may introduce a new READ-side while it is actually off the QS masks.
4387 */
4388 lockdep_assert_irqs_disabled();
4389 /*
4390 * CPUHP_AP_SMPCFD_DYING was the last call for rcu_exp_handler() execution.
4391 * The requested QS must have been reported on the last context switch
4392 * from stop machine to idle.
4393 */
4394 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
4395 // Do any dangling deferred wakeups.
4396 do_nocb_deferred_wakeup(rdp);
4397
4398 rcu_preempt_deferred_qs(current);
4399
4400 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4401 mask = rdp->grpmask;
4402
4403 /*
4404 * Hold the ofl_lock and rnp lock to avoid races between CPU going
4405 * offline and doing a QS report (as below), versus rcu_gp_init().
4406 * See Requirements.rst > Hotplug CPU > Concurrent QS Reporting section
4407 * for more details.
4408 */
4409 arch_spin_lock(&rcu_state.ofl_lock);
4410 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4411 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4412 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state);
4413 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4414 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4415 rcu_disable_urgency_upon_qs(rdp);
4416 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4417 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4418 }
4419 /* Clear from ->qsmaskinitnext to mark offline. */
4420 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4421 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4422 arch_spin_unlock(&rcu_state.ofl_lock);
4423 rdp->cpu_started = false;
4424 }
4425
4426 #ifdef CONFIG_HOTPLUG_CPU
4427 /*
4428 * The outgoing CPU has just passed through the dying-idle state, and we
4429 * are being invoked from the CPU that was IPIed to continue the offline
4430 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4431 */
rcutree_migrate_callbacks(int cpu)4432 void rcutree_migrate_callbacks(int cpu)
4433 {
4434 unsigned long flags;
4435 struct rcu_data *my_rdp;
4436 struct rcu_node *my_rnp;
4437 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4438 bool needwake;
4439
4440 if (rcu_rdp_is_offloaded(rdp))
4441 return;
4442
4443 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4444 if (rcu_segcblist_empty(&rdp->cblist)) {
4445 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4446 return; /* No callbacks to migrate. */
4447 }
4448
4449 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4450 rcu_barrier_entrain(rdp);
4451 my_rdp = this_cpu_ptr(&rcu_data);
4452 my_rnp = my_rdp->mynode;
4453 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4454 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4455 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4456 /* Leverage recent GPs and set GP for new callbacks. */
4457 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4458 rcu_advance_cbs(my_rnp, my_rdp);
4459 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4460 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4461 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4462 rcu_segcblist_disable(&rdp->cblist);
4463 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4464 check_cb_ovld_locked(my_rdp, my_rnp);
4465 if (rcu_rdp_is_offloaded(my_rdp)) {
4466 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4467 __call_rcu_nocb_wake(my_rdp, true, flags);
4468 } else {
4469 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4470 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4471 }
4472 local_irq_restore(flags);
4473 if (needwake)
4474 rcu_gp_kthread_wake();
4475 lockdep_assert_irqs_enabled();
4476 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4477 !rcu_segcblist_empty(&rdp->cblist),
4478 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4479 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4480 rcu_segcblist_first_cb(&rdp->cblist));
4481 }
4482
4483 /*
4484 * The CPU has been completely removed, and some other CPU is reporting
4485 * this fact from process context. Do the remainder of the cleanup.
4486 * There can only be one CPU hotplug operation at a time, so no need for
4487 * explicit locking.
4488 */
rcutree_dead_cpu(unsigned int cpu)4489 int rcutree_dead_cpu(unsigned int cpu)
4490 {
4491 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4492 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4493 // Stop-machine done, so allow nohz_full to disable tick.
4494 tick_dep_clear(TICK_DEP_BIT_RCU);
4495 return 0;
4496 }
4497
4498 /*
4499 * Near the end of the offline process. Trace the fact that this CPU
4500 * is going offline.
4501 */
rcutree_dying_cpu(unsigned int cpu)4502 int rcutree_dying_cpu(unsigned int cpu)
4503 {
4504 bool blkd;
4505 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4506 struct rcu_node *rnp = rdp->mynode;
4507
4508 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4509 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4510 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4511 return 0;
4512 }
4513
4514 /*
4515 * Near the beginning of the process. The CPU is still very much alive
4516 * with pretty much all services enabled.
4517 */
rcutree_offline_cpu(unsigned int cpu)4518 int rcutree_offline_cpu(unsigned int cpu)
4519 {
4520 unsigned long flags;
4521 struct rcu_data *rdp;
4522 struct rcu_node *rnp;
4523
4524 rdp = per_cpu_ptr(&rcu_data, cpu);
4525 rnp = rdp->mynode;
4526 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4527 rnp->ffmask &= ~rdp->grpmask;
4528 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4529
4530 // nohz_full CPUs need the tick for stop-machine to work quickly
4531 tick_dep_set(TICK_DEP_BIT_RCU);
4532 return 0;
4533 }
4534 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
4535
4536 /*
4537 * On non-huge systems, use expedited RCU grace periods to make suspend
4538 * and hibernation run faster.
4539 */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4540 static int rcu_pm_notify(struct notifier_block *self,
4541 unsigned long action, void *hcpu)
4542 {
4543 switch (action) {
4544 case PM_HIBERNATION_PREPARE:
4545 case PM_SUSPEND_PREPARE:
4546 rcu_async_hurry();
4547 rcu_expedite_gp();
4548 break;
4549 case PM_POST_HIBERNATION:
4550 case PM_POST_SUSPEND:
4551 rcu_unexpedite_gp();
4552 rcu_async_relax();
4553 break;
4554 default:
4555 break;
4556 }
4557 return NOTIFY_OK;
4558 }
4559
4560 /*
4561 * Spawn the kthreads that handle RCU's grace periods.
4562 */
rcu_spawn_gp_kthread(void)4563 static int __init rcu_spawn_gp_kthread(void)
4564 {
4565 unsigned long flags;
4566 struct rcu_node *rnp;
4567 struct sched_param sp;
4568 struct task_struct *t;
4569 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4570
4571 rcu_scheduler_fully_active = 1;
4572 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4573 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4574 return 0;
4575 if (kthread_prio) {
4576 sp.sched_priority = kthread_prio;
4577 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4578 }
4579 rnp = rcu_get_root();
4580 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4581 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4582 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4583 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4584 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4585 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4586 wake_up_process(t);
4587 /* This is a pre-SMP initcall, we expect a single CPU */
4588 WARN_ON(num_online_cpus() > 1);
4589 /*
4590 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4591 * due to rcu_scheduler_fully_active.
4592 */
4593 rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4594 rcu_spawn_rnp_kthreads(rdp->mynode);
4595 rcu_spawn_core_kthreads();
4596 /* Create kthread worker for expedited GPs */
4597 rcu_start_exp_gp_kworker();
4598 return 0;
4599 }
4600 early_initcall(rcu_spawn_gp_kthread);
4601
4602 /*
4603 * This function is invoked towards the end of the scheduler's
4604 * initialization process. Before this is called, the idle task might
4605 * contain synchronous grace-period primitives (during which time, this idle
4606 * task is booting the system, and such primitives are no-ops). After this
4607 * function is called, any synchronous grace-period primitives are run as
4608 * expedited, with the requesting task driving the grace period forward.
4609 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4610 * runtime RCU functionality.
4611 */
rcu_scheduler_starting(void)4612 void rcu_scheduler_starting(void)
4613 {
4614 unsigned long flags;
4615 struct rcu_node *rnp;
4616
4617 WARN_ON(num_online_cpus() != 1);
4618 WARN_ON(nr_context_switches() > 0);
4619 rcu_test_sync_prims();
4620
4621 // Fix up the ->gp_seq counters.
4622 local_irq_save(flags);
4623 rcu_for_each_node_breadth_first(rnp)
4624 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4625 local_irq_restore(flags);
4626
4627 // Switch out of early boot mode.
4628 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4629 rcu_test_sync_prims();
4630 }
4631
4632 /*
4633 * Helper function for rcu_init() that initializes the rcu_state structure.
4634 */
rcu_init_one(void)4635 static void __init rcu_init_one(void)
4636 {
4637 static const char * const buf[] = RCU_NODE_NAME_INIT;
4638 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4639 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4640 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4641
4642 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4643 int cpustride = 1;
4644 int i;
4645 int j;
4646 struct rcu_node *rnp;
4647
4648 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4649
4650 /* Silence gcc 4.8 false positive about array index out of range. */
4651 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4652 panic("rcu_init_one: rcu_num_lvls out of range");
4653
4654 /* Initialize the level-tracking arrays. */
4655
4656 for (i = 1; i < rcu_num_lvls; i++)
4657 rcu_state.level[i] =
4658 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4659 rcu_init_levelspread(levelspread, num_rcu_lvl);
4660
4661 /* Initialize the elements themselves, starting from the leaves. */
4662
4663 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4664 cpustride *= levelspread[i];
4665 rnp = rcu_state.level[i];
4666 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4667 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4668 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4669 &rcu_node_class[i], buf[i]);
4670 raw_spin_lock_init(&rnp->fqslock);
4671 lockdep_set_class_and_name(&rnp->fqslock,
4672 &rcu_fqs_class[i], fqs[i]);
4673 rnp->gp_seq = rcu_state.gp_seq;
4674 rnp->gp_seq_needed = rcu_state.gp_seq;
4675 rnp->completedqs = rcu_state.gp_seq;
4676 rnp->qsmask = 0;
4677 rnp->qsmaskinit = 0;
4678 rnp->grplo = j * cpustride;
4679 rnp->grphi = (j + 1) * cpustride - 1;
4680 if (rnp->grphi >= nr_cpu_ids)
4681 rnp->grphi = nr_cpu_ids - 1;
4682 if (i == 0) {
4683 rnp->grpnum = 0;
4684 rnp->grpmask = 0;
4685 rnp->parent = NULL;
4686 } else {
4687 rnp->grpnum = j % levelspread[i - 1];
4688 rnp->grpmask = BIT(rnp->grpnum);
4689 rnp->parent = rcu_state.level[i - 1] +
4690 j / levelspread[i - 1];
4691 }
4692 rnp->level = i;
4693 INIT_LIST_HEAD(&rnp->blkd_tasks);
4694 rcu_init_one_nocb(rnp);
4695 init_waitqueue_head(&rnp->exp_wq[0]);
4696 init_waitqueue_head(&rnp->exp_wq[1]);
4697 init_waitqueue_head(&rnp->exp_wq[2]);
4698 init_waitqueue_head(&rnp->exp_wq[3]);
4699 spin_lock_init(&rnp->exp_lock);
4700 mutex_init(&rnp->kthread_mutex);
4701 raw_spin_lock_init(&rnp->exp_poll_lock);
4702 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4703 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4704 }
4705 }
4706
4707 init_swait_queue_head(&rcu_state.gp_wq);
4708 init_swait_queue_head(&rcu_state.expedited_wq);
4709 rnp = rcu_first_leaf_node();
4710 for_each_possible_cpu(i) {
4711 while (i > rnp->grphi)
4712 rnp++;
4713 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4714 per_cpu_ptr(&rcu_data, i)->barrier_head.next =
4715 &per_cpu_ptr(&rcu_data, i)->barrier_head;
4716 rcu_boot_init_percpu_data(i);
4717 }
4718 }
4719
4720 /*
4721 * Force priority from the kernel command-line into range.
4722 */
sanitize_kthread_prio(void)4723 static void __init sanitize_kthread_prio(void)
4724 {
4725 int kthread_prio_in = kthread_prio;
4726
4727 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4728 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4729 kthread_prio = 2;
4730 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4731 kthread_prio = 1;
4732 else if (kthread_prio < 0)
4733 kthread_prio = 0;
4734 else if (kthread_prio > 99)
4735 kthread_prio = 99;
4736
4737 if (kthread_prio != kthread_prio_in)
4738 pr_alert("%s: Limited prio to %d from %d\n",
4739 __func__, kthread_prio, kthread_prio_in);
4740 }
4741
4742 /*
4743 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4744 * replace the definitions in tree.h because those are needed to size
4745 * the ->node array in the rcu_state structure.
4746 */
rcu_init_geometry(void)4747 void rcu_init_geometry(void)
4748 {
4749 ulong d;
4750 int i;
4751 static unsigned long old_nr_cpu_ids;
4752 int rcu_capacity[RCU_NUM_LVLS];
4753 static bool initialized;
4754
4755 if (initialized) {
4756 /*
4757 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4758 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4759 */
4760 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4761 return;
4762 }
4763
4764 old_nr_cpu_ids = nr_cpu_ids;
4765 initialized = true;
4766
4767 /*
4768 * Initialize any unspecified boot parameters.
4769 * The default values of jiffies_till_first_fqs and
4770 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4771 * value, which is a function of HZ, then adding one for each
4772 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4773 */
4774 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4775 if (jiffies_till_first_fqs == ULONG_MAX)
4776 jiffies_till_first_fqs = d;
4777 if (jiffies_till_next_fqs == ULONG_MAX)
4778 jiffies_till_next_fqs = d;
4779 adjust_jiffies_till_sched_qs();
4780
4781 /* If the compile-time values are accurate, just leave. */
4782 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4783 nr_cpu_ids == NR_CPUS)
4784 return;
4785 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4786 rcu_fanout_leaf, nr_cpu_ids);
4787
4788 /*
4789 * The boot-time rcu_fanout_leaf parameter must be at least two
4790 * and cannot exceed the number of bits in the rcu_node masks.
4791 * Complain and fall back to the compile-time values if this
4792 * limit is exceeded.
4793 */
4794 if (rcu_fanout_leaf < 2 || rcu_fanout_leaf > BITS_PER_LONG) {
4795 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4796 WARN_ON(1);
4797 return;
4798 }
4799
4800 /*
4801 * Compute number of nodes that can be handled an rcu_node tree
4802 * with the given number of levels.
4803 */
4804 rcu_capacity[0] = rcu_fanout_leaf;
4805 for (i = 1; i < RCU_NUM_LVLS; i++)
4806 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4807
4808 /*
4809 * The tree must be able to accommodate the configured number of CPUs.
4810 * If this limit is exceeded, fall back to the compile-time values.
4811 */
4812 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4813 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4814 WARN_ON(1);
4815 return;
4816 }
4817
4818 /* Calculate the number of levels in the tree. */
4819 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4820 }
4821 rcu_num_lvls = i + 1;
4822
4823 /* Calculate the number of rcu_nodes at each level of the tree. */
4824 for (i = 0; i < rcu_num_lvls; i++) {
4825 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4826 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4827 }
4828
4829 /* Calculate the total number of rcu_node structures. */
4830 rcu_num_nodes = 0;
4831 for (i = 0; i < rcu_num_lvls; i++)
4832 rcu_num_nodes += num_rcu_lvl[i];
4833 }
4834
4835 /*
4836 * Dump out the structure of the rcu_node combining tree associated
4837 * with the rcu_state structure.
4838 */
rcu_dump_rcu_node_tree(void)4839 static void __init rcu_dump_rcu_node_tree(void)
4840 {
4841 int level = 0;
4842 struct rcu_node *rnp;
4843
4844 pr_info("rcu_node tree layout dump\n");
4845 pr_info(" ");
4846 rcu_for_each_node_breadth_first(rnp) {
4847 if (rnp->level != level) {
4848 pr_cont("\n");
4849 pr_info(" ");
4850 level = rnp->level;
4851 }
4852 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4853 }
4854 pr_cont("\n");
4855 }
4856
4857 struct workqueue_struct *rcu_gp_wq;
4858
rcu_init(void)4859 void __init rcu_init(void)
4860 {
4861 int cpu = smp_processor_id();
4862
4863 rcu_early_boot_tests();
4864
4865 rcu_bootup_announce();
4866 sanitize_kthread_prio();
4867 rcu_init_geometry();
4868 rcu_init_one();
4869 if (dump_tree)
4870 rcu_dump_rcu_node_tree();
4871 if (use_softirq)
4872 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4873
4874 /*
4875 * We don't need protection against CPU-hotplug here because
4876 * this is called early in boot, before either interrupts
4877 * or the scheduler are operational.
4878 */
4879 pm_notifier(rcu_pm_notify, 0);
4880 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4881 rcutree_prepare_cpu(cpu);
4882 rcutree_report_cpu_starting(cpu);
4883 rcutree_online_cpu(cpu);
4884
4885 /* Create workqueue for Tree SRCU and for expedited GPs. */
4886 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4887 WARN_ON(!rcu_gp_wq);
4888
4889 sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
4890 WARN_ON(!sync_wq);
4891
4892 /* Respect if explicitly disabled via a boot parameter. */
4893 if (rcu_normal_wake_from_gp < 0) {
4894 if (num_possible_cpus() <= WAKE_FROM_GP_CPU_THRESHOLD)
4895 rcu_normal_wake_from_gp = 1;
4896 }
4897
4898 /* Fill in default value for rcutree.qovld boot parameter. */
4899 /* -After- the rcu_node ->lock fields are initialized! */
4900 if (qovld < 0)
4901 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4902 else
4903 qovld_calc = qovld;
4904
4905 // Kick-start in case any polled grace periods started early.
4906 (void)start_poll_synchronize_rcu_expedited();
4907
4908 rcu_test_sync_prims();
4909
4910 tasks_cblist_init_generic();
4911 }
4912
4913 #include "tree_stall.h"
4914 #include "tree_exp.h"
4915 #include "tree_nocb.h"
4916 #include "tree_plugin.h"
4917