1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 *
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 *
14 * For detailed explanation of Read-Copy Update mechanism see -
15 * Documentation/RCU
16 */
17
18 #define pr_fmt(fmt) "rcu: " fmt
19
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
64 #include <linux/mm.h>
65 #include <linux/kasan.h>
66 #include <linux/context_tracking.h>
67 #include "../time/tick-internal.h"
68
69 #include "tree.h"
70 #include "rcu.h"
71
72 #ifdef MODULE_PARAM_PREFIX
73 #undef MODULE_PARAM_PREFIX
74 #endif
75 #define MODULE_PARAM_PREFIX "rcutree."
76
77 /* Data structures. */
78 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *);
79
80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
81 .gpwrap = true,
82 };
83
rcu_get_gpwrap_count(int cpu)84 int rcu_get_gpwrap_count(int cpu)
85 {
86 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
87
88 return READ_ONCE(rdp->gpwrap_count);
89 }
90 EXPORT_SYMBOL_GPL(rcu_get_gpwrap_count);
91
92 static struct rcu_state rcu_state = {
93 .level = { &rcu_state.node[0] },
94 .gp_state = RCU_GP_IDLE,
95 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
96 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
97 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
98 .name = RCU_NAME,
99 .abbr = RCU_ABBR,
100 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
101 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
102 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
103 .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
104 rcu_sr_normal_gp_cleanup_work),
105 .srs_cleanups_pending = ATOMIC_INIT(0),
106 #ifdef CONFIG_RCU_NOCB_CPU
107 .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex),
108 #endif
109 };
110
111 /* Dump rcu_node combining tree at boot to verify correct setup. */
112 static bool dump_tree;
113 module_param(dump_tree, bool, 0444);
114 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
115 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
116 #ifndef CONFIG_PREEMPT_RT
117 module_param(use_softirq, bool, 0444);
118 #endif
119 /* Control rcu_node-tree auto-balancing at boot time. */
120 static bool rcu_fanout_exact;
121 module_param(rcu_fanout_exact, bool, 0444);
122 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
123 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
124 module_param(rcu_fanout_leaf, int, 0444);
125 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
126 /* Number of rcu_nodes at specified level. */
127 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
128 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
129
130 /*
131 * The rcu_scheduler_active variable is initialized to the value
132 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
133 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
134 * RCU can assume that there is but one task, allowing RCU to (for example)
135 * optimize synchronize_rcu() to a simple barrier(). When this variable
136 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
137 * to detect real grace periods. This variable is also used to suppress
138 * boot-time false positives from lockdep-RCU error checking. Finally, it
139 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
140 * is fully initialized, including all of its kthreads having been spawned.
141 */
142 int rcu_scheduler_active __read_mostly;
143 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
144
145 /*
146 * The rcu_scheduler_fully_active variable transitions from zero to one
147 * during the early_initcall() processing, which is after the scheduler
148 * is capable of creating new tasks. So RCU processing (for example,
149 * creating tasks for RCU priority boosting) must be delayed until after
150 * rcu_scheduler_fully_active transitions from zero to one. We also
151 * currently delay invocation of any RCU callbacks until after this point.
152 *
153 * It might later prove better for people registering RCU callbacks during
154 * early boot to take responsibility for these callbacks, but one step at
155 * a time.
156 */
157 static int rcu_scheduler_fully_active __read_mostly;
158
159 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
160 unsigned long gps, unsigned long flags);
161 static void invoke_rcu_core(void);
162 static void rcu_report_exp_rdp(struct rcu_data *rdp);
163 static void sync_sched_exp_online_cleanup(int cpu);
164 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
165 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
166 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
167 static bool rcu_init_invoked(void);
168 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
169 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
170
171 /*
172 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
173 * real-time priority(enabling/disabling) is controlled by
174 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
175 */
176 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
177 module_param(kthread_prio, int, 0444);
178
179 /* Delay in jiffies for grace-period initialization delays, debug only. */
180
181 static int gp_preinit_delay;
182 module_param(gp_preinit_delay, int, 0444);
183 static int gp_init_delay;
184 module_param(gp_init_delay, int, 0444);
185 static int gp_cleanup_delay;
186 module_param(gp_cleanup_delay, int, 0444);
187 static int nohz_full_patience_delay;
188 module_param(nohz_full_patience_delay, int, 0444);
189 static int nohz_full_patience_delay_jiffies;
190
191 // Add delay to rcu_read_unlock() for strict grace periods.
192 static int rcu_unlock_delay;
193 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
194 module_param(rcu_unlock_delay, int, 0444);
195 #endif
196
197 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)198 int rcu_get_gp_kthreads_prio(void)
199 {
200 return kthread_prio;
201 }
202 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
203
204 /*
205 * Number of grace periods between delays, normalized by the duration of
206 * the delay. The longer the delay, the more the grace periods between
207 * each delay. The reason for this normalization is that it means that,
208 * for non-zero delays, the overall slowdown of grace periods is constant
209 * regardless of the duration of the delay. This arrangement balances
210 * the need for long delays to increase some race probabilities with the
211 * need for fast grace periods to increase other race probabilities.
212 */
213 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
214
215 /*
216 * Return true if an RCU grace period is in progress. The READ_ONCE()s
217 * permit this function to be invoked without holding the root rcu_node
218 * structure's ->lock, but of course results can be subject to change.
219 */
rcu_gp_in_progress(void)220 static int rcu_gp_in_progress(void)
221 {
222 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
223 }
224
225 /*
226 * Return the number of callbacks queued on the specified CPU.
227 * Handles both the nocbs and normal cases.
228 */
rcu_get_n_cbs_cpu(int cpu)229 static long rcu_get_n_cbs_cpu(int cpu)
230 {
231 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
232
233 if (rcu_segcblist_is_enabled(&rdp->cblist))
234 return rcu_segcblist_n_cbs(&rdp->cblist);
235 return 0;
236 }
237
238 /**
239 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
240 *
241 * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU.
242 * This is a special-purpose function to be used in the softirq
243 * infrastructure and perhaps the occasional long-running softirq
244 * handler.
245 *
246 * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is
247 * equivalent to momentarily completely enabling preemption. For
248 * example, given this code::
249 *
250 * local_bh_disable();
251 * do_something();
252 * rcu_softirq_qs(); // A
253 * do_something_else();
254 * local_bh_enable(); // B
255 *
256 * A call to synchronize_rcu() that began concurrently with the
257 * call to do_something() would be guaranteed to wait only until
258 * execution reached statement A. Without that rcu_softirq_qs(),
259 * that same synchronize_rcu() would instead be guaranteed to wait
260 * until execution reached statement B.
261 */
rcu_softirq_qs(void)262 void rcu_softirq_qs(void)
263 {
264 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
265 lock_is_held(&rcu_lock_map) ||
266 lock_is_held(&rcu_sched_lock_map),
267 "Illegal rcu_softirq_qs() in RCU read-side critical section");
268 rcu_qs();
269 rcu_preempt_deferred_qs(current);
270 rcu_tasks_qs(current, false);
271 }
272
273 /*
274 * Reset the current CPU's RCU_WATCHING counter to indicate that the
275 * newly onlined CPU is no longer in an extended quiescent state.
276 * This will either leave the counter unchanged, or increment it
277 * to the next non-quiescent value.
278 *
279 * The non-atomic test/increment sequence works because the upper bits
280 * of the ->state variable are manipulated only by the corresponding CPU,
281 * or when the corresponding CPU is offline.
282 */
rcu_watching_online(void)283 static void rcu_watching_online(void)
284 {
285 if (ct_rcu_watching() & CT_RCU_WATCHING)
286 return;
287 ct_state_inc(CT_RCU_WATCHING);
288 }
289
290 /*
291 * Return true if the snapshot returned from ct_rcu_watching()
292 * indicates that RCU is in an extended quiescent state.
293 */
rcu_watching_snap_in_eqs(int snap)294 static bool rcu_watching_snap_in_eqs(int snap)
295 {
296 return !(snap & CT_RCU_WATCHING);
297 }
298
299 /**
300 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
301 * since the specified @snap?
302 *
303 * @rdp: The rcu_data corresponding to the CPU for which to check EQS.
304 * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS.
305 *
306 * Returns true if the CPU corresponding to @rdp has spent some time in an
307 * extended quiescent state since @snap. Note that this doesn't check if it
308 * /still/ is in an EQS, just that it went through one since @snap.
309 *
310 * This is meant to be used in a loop waiting for a CPU to go through an EQS.
311 */
rcu_watching_snap_stopped_since(struct rcu_data * rdp,int snap)312 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap)
313 {
314 /*
315 * The first failing snapshot is already ordered against the accesses
316 * performed by the remote CPU after it exits idle.
317 *
318 * The second snapshot therefore only needs to order against accesses
319 * performed by the remote CPU prior to entering idle and therefore can
320 * rely solely on acquire semantics.
321 */
322 if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap)))
323 return true;
324
325 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
326 }
327
328 /*
329 * Return true if the referenced integer is zero while the specified
330 * CPU remains within a single extended quiescent state.
331 */
rcu_watching_zero_in_eqs(int cpu,int * vp)332 bool rcu_watching_zero_in_eqs(int cpu, int *vp)
333 {
334 int snap;
335
336 // If not quiescent, force back to earlier extended quiescent state.
337 snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
338 smp_rmb(); // Order CT state and *vp reads.
339 if (READ_ONCE(*vp))
340 return false; // Non-zero, so report failure;
341 smp_rmb(); // Order *vp read and CT state re-read.
342
343 // If still in the same extended quiescent state, we are good!
344 return snap == ct_rcu_watching_cpu(cpu);
345 }
346
347 /*
348 * Let the RCU core know that this CPU has gone through the scheduler,
349 * which is a quiescent state. This is called when the need for a
350 * quiescent state is urgent, so we burn an atomic operation and full
351 * memory barriers to let the RCU core know about it, regardless of what
352 * this CPU might (or might not) do in the near future.
353 *
354 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
355 *
356 * The caller must have disabled interrupts and must not be idle.
357 */
rcu_momentary_eqs(void)358 notrace void rcu_momentary_eqs(void)
359 {
360 int seq;
361
362 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
363 seq = ct_state_inc(2 * CT_RCU_WATCHING);
364 /* It is illegal to call this from idle state. */
365 WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
366 rcu_preempt_deferred_qs(current);
367 }
368 EXPORT_SYMBOL_GPL(rcu_momentary_eqs);
369
370 /**
371 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
372 *
373 * If the current CPU is idle and running at a first-level (not nested)
374 * interrupt, or directly, from idle, return true.
375 *
376 * The caller must have at least disabled IRQs.
377 */
rcu_is_cpu_rrupt_from_idle(void)378 static int rcu_is_cpu_rrupt_from_idle(void)
379 {
380 long nesting;
381
382 /*
383 * Usually called from the tick; but also used from smp_function_call()
384 * for expedited grace periods. This latter can result in running from
385 * the idle task, instead of an actual IPI.
386 */
387 lockdep_assert_irqs_disabled();
388
389 /* Check for counter underflows */
390 RCU_LOCKDEP_WARN(ct_nesting() < 0,
391 "RCU nesting counter underflow!");
392 RCU_LOCKDEP_WARN(ct_nmi_nesting() <= 0,
393 "RCU nmi_nesting counter underflow/zero!");
394
395 /* Are we at first interrupt nesting level? */
396 nesting = ct_nmi_nesting();
397 if (nesting > 1)
398 return false;
399
400 /*
401 * If we're not in an interrupt, we must be in the idle task!
402 */
403 WARN_ON_ONCE(!nesting && !is_idle_task(current));
404
405 /* Does CPU appear to be idle from an RCU standpoint? */
406 return ct_nesting() == 0;
407 }
408
409 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
410 // Maximum callbacks per rcu_do_batch ...
411 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
412 static long blimit = DEFAULT_RCU_BLIMIT;
413 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
414 static long qhimark = DEFAULT_RCU_QHIMARK;
415 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
416 static long qlowmark = DEFAULT_RCU_QLOMARK;
417 #define DEFAULT_RCU_QOVLD_MULT 2
418 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
419 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
420 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
421
422 module_param(blimit, long, 0444);
423 module_param(qhimark, long, 0444);
424 module_param(qlowmark, long, 0444);
425 module_param(qovld, long, 0444);
426
427 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
428 static ulong jiffies_till_next_fqs = ULONG_MAX;
429 static bool rcu_kick_kthreads;
430 static int rcu_divisor = 7;
431 module_param(rcu_divisor, int, 0644);
432
433 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
434 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
435 module_param(rcu_resched_ns, long, 0644);
436
437 /*
438 * How long the grace period must be before we start recruiting
439 * quiescent-state help from rcu_note_context_switch().
440 */
441 static ulong jiffies_till_sched_qs = ULONG_MAX;
442 module_param(jiffies_till_sched_qs, ulong, 0444);
443 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
444 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
445
446 /*
447 * Make sure that we give the grace-period kthread time to detect any
448 * idle CPUs before taking active measures to force quiescent states.
449 * However, don't go below 100 milliseconds, adjusted upwards for really
450 * large systems.
451 */
adjust_jiffies_till_sched_qs(void)452 static void adjust_jiffies_till_sched_qs(void)
453 {
454 unsigned long j;
455
456 /* If jiffies_till_sched_qs was specified, respect the request. */
457 if (jiffies_till_sched_qs != ULONG_MAX) {
458 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
459 return;
460 }
461 /* Otherwise, set to third fqs scan, but bound below on large system. */
462 j = READ_ONCE(jiffies_till_first_fqs) +
463 2 * READ_ONCE(jiffies_till_next_fqs);
464 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
465 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
466 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
467 WRITE_ONCE(jiffies_to_sched_qs, j);
468 }
469
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)470 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
471 {
472 ulong j;
473 int ret = kstrtoul(val, 0, &j);
474
475 if (!ret) {
476 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
477 adjust_jiffies_till_sched_qs();
478 }
479 return ret;
480 }
481
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)482 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
483 {
484 ulong j;
485 int ret = kstrtoul(val, 0, &j);
486
487 if (!ret) {
488 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
489 adjust_jiffies_till_sched_qs();
490 }
491 return ret;
492 }
493
494 static const struct kernel_param_ops first_fqs_jiffies_ops = {
495 .set = param_set_first_fqs_jiffies,
496 .get = param_get_ulong,
497 };
498
499 static const struct kernel_param_ops next_fqs_jiffies_ops = {
500 .set = param_set_next_fqs_jiffies,
501 .get = param_get_ulong,
502 };
503
504 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
505 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
506 module_param(rcu_kick_kthreads, bool, 0644);
507
508 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
509 static int rcu_pending(int user);
510
511 /*
512 * Return the number of RCU GPs completed thus far for debug & stats.
513 */
rcu_get_gp_seq(void)514 unsigned long rcu_get_gp_seq(void)
515 {
516 return READ_ONCE(rcu_state.gp_seq);
517 }
518 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
519
520 /*
521 * Return the number of RCU expedited batches completed thus far for
522 * debug & stats. Odd numbers mean that a batch is in progress, even
523 * numbers mean idle. The value returned will thus be roughly double
524 * the cumulative batches since boot.
525 */
rcu_exp_batches_completed(void)526 unsigned long rcu_exp_batches_completed(void)
527 {
528 return rcu_state.expedited_sequence;
529 }
530 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
531
532 /*
533 * Return the root node of the rcu_state structure.
534 */
rcu_get_root(void)535 static struct rcu_node *rcu_get_root(void)
536 {
537 return &rcu_state.node[0];
538 }
539
540 /*
541 * Send along grace-period-related data for rcutorture diagnostics.
542 */
rcutorture_get_gp_data(int * flags,unsigned long * gp_seq)543 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq)
544 {
545 *flags = READ_ONCE(rcu_state.gp_flags);
546 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
547 }
548 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
549
550 /* Gather grace-period sequence numbers for rcutorture diagnostics. */
rcutorture_gather_gp_seqs(void)551 unsigned long long rcutorture_gather_gp_seqs(void)
552 {
553 return ((READ_ONCE(rcu_state.gp_seq) & 0xffffULL) << 40) |
554 ((READ_ONCE(rcu_state.expedited_sequence) & 0xffffffULL) << 16) |
555 (READ_ONCE(rcu_state.gp_seq_polled) & 0xffffULL);
556 }
557 EXPORT_SYMBOL_GPL(rcutorture_gather_gp_seqs);
558
559 /* Format grace-period sequence numbers for rcutorture diagnostics. */
rcutorture_format_gp_seqs(unsigned long long seqs,char * cp,size_t len)560 void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len)
561 {
562 unsigned int egp = (seqs >> 16) & 0xffffffULL;
563 unsigned int ggp = (seqs >> 40) & 0xffffULL;
564 unsigned int pgp = seqs & 0xffffULL;
565
566 snprintf(cp, len, "g%04x:e%06x:p%04x", ggp, egp, pgp);
567 }
568 EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs);
569
570 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
571 /*
572 * An empty function that will trigger a reschedule on
573 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
574 */
late_wakeup_func(struct irq_work * work)575 static void late_wakeup_func(struct irq_work *work)
576 {
577 }
578
579 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
580 IRQ_WORK_INIT(late_wakeup_func);
581
582 /*
583 * If either:
584 *
585 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
586 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
587 *
588 * In these cases the late RCU wake ups aren't supported in the resched loops and our
589 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
590 * get re-enabled again.
591 */
rcu_irq_work_resched(void)592 noinstr void rcu_irq_work_resched(void)
593 {
594 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
595
596 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
597 return;
598
599 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
600 return;
601
602 instrumentation_begin();
603 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
604 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
605 }
606 instrumentation_end();
607 }
608 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
609
610 #ifdef CONFIG_PROVE_RCU
611 /**
612 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
613 */
rcu_irq_exit_check_preempt(void)614 void rcu_irq_exit_check_preempt(void)
615 {
616 lockdep_assert_irqs_disabled();
617
618 RCU_LOCKDEP_WARN(ct_nesting() <= 0,
619 "RCU nesting counter underflow/zero!");
620 RCU_LOCKDEP_WARN(ct_nmi_nesting() !=
621 CT_NESTING_IRQ_NONIDLE,
622 "Bad RCU nmi_nesting counter\n");
623 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
624 "RCU in extended quiescent state!");
625 }
626 #endif /* #ifdef CONFIG_PROVE_RCU */
627
628 #ifdef CONFIG_NO_HZ_FULL
629 /**
630 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
631 *
632 * The scheduler tick is not normally enabled when CPUs enter the kernel
633 * from nohz_full userspace execution. After all, nohz_full userspace
634 * execution is an RCU quiescent state and the time executing in the kernel
635 * is quite short. Except of course when it isn't. And it is not hard to
636 * cause a large system to spend tens of seconds or even minutes looping
637 * in the kernel, which can cause a number of problems, include RCU CPU
638 * stall warnings.
639 *
640 * Therefore, if a nohz_full CPU fails to report a quiescent state
641 * in a timely manner, the RCU grace-period kthread sets that CPU's
642 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
643 * exception will invoke this function, which will turn on the scheduler
644 * tick, which will enable RCU to detect that CPU's quiescent states,
645 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
646 * The tick will be disabled once a quiescent state is reported for
647 * this CPU.
648 *
649 * Of course, in carefully tuned systems, there might never be an
650 * interrupt or exception. In that case, the RCU grace-period kthread
651 * will eventually cause one to happen. However, in less carefully
652 * controlled environments, this function allows RCU to get what it
653 * needs without creating otherwise useless interruptions.
654 */
__rcu_irq_enter_check_tick(void)655 void __rcu_irq_enter_check_tick(void)
656 {
657 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
658
659 // If we're here from NMI there's nothing to do.
660 if (in_nmi())
661 return;
662
663 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
664 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
665
666 if (!tick_nohz_full_cpu(rdp->cpu) ||
667 !READ_ONCE(rdp->rcu_urgent_qs) ||
668 READ_ONCE(rdp->rcu_forced_tick)) {
669 // RCU doesn't need nohz_full help from this CPU, or it is
670 // already getting that help.
671 return;
672 }
673
674 // We get here only when not in an extended quiescent state and
675 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
676 // already watching and (2) The fact that we are in an interrupt
677 // handler and that the rcu_node lock is an irq-disabled lock
678 // prevents self-deadlock. So we can safely recheck under the lock.
679 // Note that the nohz_full state currently cannot change.
680 raw_spin_lock_rcu_node(rdp->mynode);
681 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
682 // A nohz_full CPU is in the kernel and RCU needs a
683 // quiescent state. Turn on the tick!
684 WRITE_ONCE(rdp->rcu_forced_tick, true);
685 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
686 }
687 raw_spin_unlock_rcu_node(rdp->mynode);
688 }
689 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
690 #endif /* CONFIG_NO_HZ_FULL */
691
692 /*
693 * Check to see if any future non-offloaded RCU-related work will need
694 * to be done by the current CPU, even if none need be done immediately,
695 * returning 1 if so. This function is part of the RCU implementation;
696 * it is -not- an exported member of the RCU API. This is used by
697 * the idle-entry code to figure out whether it is safe to disable the
698 * scheduler-clock interrupt.
699 *
700 * Just check whether or not this CPU has non-offloaded RCU callbacks
701 * queued.
702 */
rcu_needs_cpu(void)703 int rcu_needs_cpu(void)
704 {
705 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
706 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
707 }
708
709 /*
710 * If any sort of urgency was applied to the current CPU (for example,
711 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
712 * to get to a quiescent state, disable it.
713 */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)714 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
715 {
716 raw_lockdep_assert_held_rcu_node(rdp->mynode);
717 WRITE_ONCE(rdp->rcu_urgent_qs, false);
718 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
719 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
720 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
721 WRITE_ONCE(rdp->rcu_forced_tick, false);
722 }
723 }
724
725 /**
726 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
727 *
728 * Return @true if RCU is watching the running CPU and @false otherwise.
729 * An @true return means that this CPU can safely enter RCU read-side
730 * critical sections.
731 *
732 * Although calls to rcu_is_watching() from most parts of the kernel
733 * will return @true, there are important exceptions. For example, if the
734 * current CPU is deep within its idle loop, in kernel entry/exit code,
735 * or offline, rcu_is_watching() will return @false.
736 *
737 * Make notrace because it can be called by the internal functions of
738 * ftrace, and making this notrace removes unnecessary recursion calls.
739 */
rcu_is_watching(void)740 notrace bool rcu_is_watching(void)
741 {
742 bool ret;
743
744 preempt_disable_notrace();
745 ret = rcu_is_watching_curr_cpu();
746 preempt_enable_notrace();
747 return ret;
748 }
749 EXPORT_SYMBOL_GPL(rcu_is_watching);
750
751 /*
752 * If a holdout task is actually running, request an urgent quiescent
753 * state from its CPU. This is unsynchronized, so migrations can cause
754 * the request to go to the wrong CPU. Which is OK, all that will happen
755 * is that the CPU's next context switch will be a bit slower and next
756 * time around this task will generate another request.
757 */
rcu_request_urgent_qs_task(struct task_struct * t)758 void rcu_request_urgent_qs_task(struct task_struct *t)
759 {
760 int cpu;
761
762 barrier();
763 cpu = task_cpu(t);
764 if (!task_curr(t))
765 return; /* This task is not running on that CPU. */
766 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
767 }
768
769 static unsigned long seq_gpwrap_lag = ULONG_MAX / 4;
770
771 /**
772 * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value.
773 * @lag_gps: Set overflow lag to this many grace period worth of counters
774 * which is used by rcutorture to quickly force a gpwrap situation.
775 * @lag_gps = 0 means we reset it back to the boot-time value.
776 */
rcu_set_gpwrap_lag(unsigned long lag_gps)777 void rcu_set_gpwrap_lag(unsigned long lag_gps)
778 {
779 unsigned long lag_seq_count;
780
781 lag_seq_count = (lag_gps == 0)
782 ? ULONG_MAX / 4
783 : lag_gps << RCU_SEQ_CTR_SHIFT;
784 WRITE_ONCE(seq_gpwrap_lag, lag_seq_count);
785 }
786 EXPORT_SYMBOL_GPL(rcu_set_gpwrap_lag);
787
788 /*
789 * When trying to report a quiescent state on behalf of some other CPU,
790 * it is our responsibility to check for and handle potential overflow
791 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
792 * After all, the CPU might be in deep idle state, and thus executing no
793 * code whatsoever.
794 */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)795 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
796 {
797 raw_lockdep_assert_held_rcu_node(rnp);
798 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag,
799 rnp->gp_seq)) {
800 WRITE_ONCE(rdp->gpwrap, true);
801 WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1);
802 }
803 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
804 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
805 }
806
807 /*
808 * Snapshot the specified CPU's RCU_WATCHING counter so that we can later
809 * credit them with an implicit quiescent state. Return 1 if this CPU
810 * is in dynticks idle mode, which is an extended quiescent state.
811 */
rcu_watching_snap_save(struct rcu_data * rdp)812 static int rcu_watching_snap_save(struct rcu_data *rdp)
813 {
814 /*
815 * Full ordering between remote CPU's post idle accesses and updater's
816 * accesses prior to current GP (and also the started GP sequence number)
817 * is enforced by rcu_seq_start() implicit barrier and even further by
818 * smp_mb__after_unlock_lock() barriers chained all the way throughout the
819 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp
820 * locking.
821 *
822 * Ordering between remote CPU's pre idle accesses and post grace period
823 * updater's accesses is enforced by the below acquire semantic.
824 */
825 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
826 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) {
827 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
828 rcu_gpnum_ovf(rdp->mynode, rdp);
829 return 1;
830 }
831 return 0;
832 }
833
834 #ifndef arch_irq_stat_cpu
835 #define arch_irq_stat_cpu(cpu) 0
836 #endif
837
838 /*
839 * Returns positive if the specified CPU has passed through a quiescent state
840 * by virtue of being in or having passed through an dynticks idle state since
841 * the last call to rcu_watching_snap_save() for this same CPU, or by
842 * virtue of having been offline.
843 *
844 * Returns negative if the specified CPU needs a force resched.
845 *
846 * Returns zero otherwise.
847 */
rcu_watching_snap_recheck(struct rcu_data * rdp)848 static int rcu_watching_snap_recheck(struct rcu_data *rdp)
849 {
850 unsigned long jtsq;
851 int ret = 0;
852 struct rcu_node *rnp = rdp->mynode;
853
854 /*
855 * If the CPU passed through or entered a dynticks idle phase with
856 * no active irq/NMI handlers, then we can safely pretend that the CPU
857 * already acknowledged the request to pass through a quiescent
858 * state. Either way, that CPU cannot possibly be in an RCU
859 * read-side critical section that started before the beginning
860 * of the current RCU grace period.
861 */
862 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) {
863 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
864 rcu_gpnum_ovf(rnp, rdp);
865 return 1;
866 }
867
868 /*
869 * Complain if a CPU that is considered to be offline from RCU's
870 * perspective has not yet reported a quiescent state. After all,
871 * the offline CPU should have reported a quiescent state during
872 * the CPU-offline process, or, failing that, by rcu_gp_init()
873 * if it ran concurrently with either the CPU going offline or the
874 * last task on a leaf rcu_node structure exiting its RCU read-side
875 * critical section while all CPUs corresponding to that structure
876 * are offline. This added warning detects bugs in any of these
877 * code paths.
878 *
879 * The rcu_node structure's ->lock is held here, which excludes
880 * the relevant portions the CPU-hotplug code, the grace-period
881 * initialization code, and the rcu_read_unlock() code paths.
882 *
883 * For more detail, please refer to the "Hotplug CPU" section
884 * of RCU's Requirements documentation.
885 */
886 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
887 struct rcu_node *rnp1;
888
889 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
890 __func__, rnp->grplo, rnp->grphi, rnp->level,
891 (long)rnp->gp_seq, (long)rnp->completedqs);
892 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
893 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
894 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
895 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
896 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
897 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
898 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
899 return 1; /* Break things loose after complaining. */
900 }
901
902 /*
903 * A CPU running for an extended time within the kernel can
904 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
905 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
906 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
907 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
908 * variable are safe because the assignments are repeated if this
909 * CPU failed to pass through a quiescent state. This code
910 * also checks .jiffies_resched in case jiffies_to_sched_qs
911 * is set way high.
912 */
913 jtsq = READ_ONCE(jiffies_to_sched_qs);
914 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
915 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
916 time_after(jiffies, rcu_state.jiffies_resched) ||
917 rcu_state.cbovld)) {
918 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
919 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
920 smp_store_release(&rdp->rcu_urgent_qs, true);
921 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
922 WRITE_ONCE(rdp->rcu_urgent_qs, true);
923 }
924
925 /*
926 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
927 * The above code handles this, but only for straight cond_resched().
928 * And some in-kernel loops check need_resched() before calling
929 * cond_resched(), which defeats the above code for CPUs that are
930 * running in-kernel with scheduling-clock interrupts disabled.
931 * So hit them over the head with the resched_cpu() hammer!
932 */
933 if (tick_nohz_full_cpu(rdp->cpu) &&
934 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
935 rcu_state.cbovld)) {
936 WRITE_ONCE(rdp->rcu_urgent_qs, true);
937 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
938 ret = -1;
939 }
940
941 /*
942 * If more than halfway to RCU CPU stall-warning time, invoke
943 * resched_cpu() more frequently to try to loosen things up a bit.
944 * Also check to see if the CPU is getting hammered with interrupts,
945 * but only once per grace period, just to keep the IPIs down to
946 * a dull roar.
947 */
948 if (time_after(jiffies, rcu_state.jiffies_resched)) {
949 if (time_after(jiffies,
950 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
951 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
952 ret = -1;
953 }
954 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
955 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
956 (rnp->ffmask & rdp->grpmask)) {
957 rdp->rcu_iw_pending = true;
958 rdp->rcu_iw_gp_seq = rnp->gp_seq;
959 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
960 }
961
962 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
963 int cpu = rdp->cpu;
964 struct rcu_snap_record *rsrp;
965 struct kernel_cpustat *kcsp;
966
967 kcsp = &kcpustat_cpu(cpu);
968
969 rsrp = &rdp->snap_record;
970 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
971 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
972 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
973 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu);
974 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu);
975 rsrp->nr_csw = nr_context_switches_cpu(cpu);
976 rsrp->jiffies = jiffies;
977 rsrp->gp_seq = rdp->gp_seq;
978 }
979 }
980
981 return ret;
982 }
983
984 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)985 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
986 unsigned long gp_seq_req, const char *s)
987 {
988 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
989 gp_seq_req, rnp->level,
990 rnp->grplo, rnp->grphi, s);
991 }
992
993 /*
994 * rcu_start_this_gp - Request the start of a particular grace period
995 * @rnp_start: The leaf node of the CPU from which to start.
996 * @rdp: The rcu_data corresponding to the CPU from which to start.
997 * @gp_seq_req: The gp_seq of the grace period to start.
998 *
999 * Start the specified grace period, as needed to handle newly arrived
1000 * callbacks. The required future grace periods are recorded in each
1001 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1002 * is reason to awaken the grace-period kthread.
1003 *
1004 * The caller must hold the specified rcu_node structure's ->lock, which
1005 * is why the caller is responsible for waking the grace-period kthread.
1006 *
1007 * Returns true if the GP thread needs to be awakened else false.
1008 */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1009 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1010 unsigned long gp_seq_req)
1011 {
1012 bool ret = false;
1013 struct rcu_node *rnp;
1014
1015 /*
1016 * Use funnel locking to either acquire the root rcu_node
1017 * structure's lock or bail out if the need for this grace period
1018 * has already been recorded -- or if that grace period has in
1019 * fact already started. If there is already a grace period in
1020 * progress in a non-leaf node, no recording is needed because the
1021 * end of the grace period will scan the leaf rcu_node structures.
1022 * Note that rnp_start->lock must not be released.
1023 */
1024 raw_lockdep_assert_held_rcu_node(rnp_start);
1025 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1026 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1027 if (rnp != rnp_start)
1028 raw_spin_lock_rcu_node(rnp);
1029 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1030 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1031 (rnp != rnp_start &&
1032 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1033 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1034 TPS("Prestarted"));
1035 goto unlock_out;
1036 }
1037 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1038 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1039 /*
1040 * We just marked the leaf or internal node, and a
1041 * grace period is in progress, which means that
1042 * rcu_gp_cleanup() will see the marking. Bail to
1043 * reduce contention.
1044 */
1045 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1046 TPS("Startedleaf"));
1047 goto unlock_out;
1048 }
1049 if (rnp != rnp_start && rnp->parent != NULL)
1050 raw_spin_unlock_rcu_node(rnp);
1051 if (!rnp->parent)
1052 break; /* At root, and perhaps also leaf. */
1053 }
1054
1055 /* If GP already in progress, just leave, otherwise start one. */
1056 if (rcu_gp_in_progress()) {
1057 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1058 goto unlock_out;
1059 }
1060 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1061 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1062 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1063 if (!READ_ONCE(rcu_state.gp_kthread)) {
1064 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1065 goto unlock_out;
1066 }
1067 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1068 ret = true; /* Caller must wake GP kthread. */
1069 unlock_out:
1070 /* Push furthest requested GP to leaf node and rcu_data structure. */
1071 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1072 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1073 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1074 }
1075 if (rnp != rnp_start)
1076 raw_spin_unlock_rcu_node(rnp);
1077 return ret;
1078 }
1079
1080 /*
1081 * Clean up any old requests for the just-ended grace period. Also return
1082 * whether any additional grace periods have been requested.
1083 */
rcu_future_gp_cleanup(struct rcu_node * rnp)1084 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1085 {
1086 bool needmore;
1087 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1088
1089 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1090 if (!needmore)
1091 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1092 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1093 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1094 return needmore;
1095 }
1096
1097 /*
1098 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1099 * interrupt or softirq handler, in which case we just might immediately
1100 * sleep upon return, resulting in a grace-period hang), and don't bother
1101 * awakening when there is nothing for the grace-period kthread to do
1102 * (as in several CPUs raced to awaken, we lost), and finally don't try
1103 * to awaken a kthread that has not yet been created. If all those checks
1104 * are passed, track some debug information and awaken.
1105 *
1106 * So why do the self-wakeup when in an interrupt or softirq handler
1107 * in the grace-period kthread's context? Because the kthread might have
1108 * been interrupted just as it was going to sleep, and just after the final
1109 * pre-sleep check of the awaken condition. In this case, a wakeup really
1110 * is required, and is therefore supplied.
1111 */
rcu_gp_kthread_wake(void)1112 static void rcu_gp_kthread_wake(void)
1113 {
1114 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1115
1116 if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1117 !READ_ONCE(rcu_state.gp_flags) || !t)
1118 return;
1119 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1120 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1121 swake_up_one(&rcu_state.gp_wq);
1122 }
1123
1124 /*
1125 * If there is room, assign a ->gp_seq number to any callbacks on this
1126 * CPU that have not already been assigned. Also accelerate any callbacks
1127 * that were previously assigned a ->gp_seq number that has since proven
1128 * to be too conservative, which can happen if callbacks get assigned a
1129 * ->gp_seq number while RCU is idle, but with reference to a non-root
1130 * rcu_node structure. This function is idempotent, so it does not hurt
1131 * to call it repeatedly. Returns an flag saying that we should awaken
1132 * the RCU grace-period kthread.
1133 *
1134 * The caller must hold rnp->lock with interrupts disabled.
1135 */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1136 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1137 {
1138 unsigned long gp_seq_req;
1139 bool ret = false;
1140
1141 rcu_lockdep_assert_cblist_protected(rdp);
1142 raw_lockdep_assert_held_rcu_node(rnp);
1143
1144 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1145 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1146 return false;
1147
1148 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1149
1150 /*
1151 * Callbacks are often registered with incomplete grace-period
1152 * information. Something about the fact that getting exact
1153 * information requires acquiring a global lock... RCU therefore
1154 * makes a conservative estimate of the grace period number at which
1155 * a given callback will become ready to invoke. The following
1156 * code checks this estimate and improves it when possible, thus
1157 * accelerating callback invocation to an earlier grace-period
1158 * number.
1159 */
1160 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1161 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1162 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1163
1164 /* Trace depending on how much we were able to accelerate. */
1165 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1166 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1167 else
1168 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1169
1170 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1171
1172 return ret;
1173 }
1174
1175 /*
1176 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1177 * rcu_node structure's ->lock be held. It consults the cached value
1178 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1179 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1180 * while holding the leaf rcu_node structure's ->lock.
1181 */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1182 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1183 struct rcu_data *rdp)
1184 {
1185 unsigned long c;
1186 bool needwake;
1187
1188 rcu_lockdep_assert_cblist_protected(rdp);
1189 c = rcu_seq_snap(&rcu_state.gp_seq);
1190 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1191 /* Old request still live, so mark recent callbacks. */
1192 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1193 return;
1194 }
1195 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1196 needwake = rcu_accelerate_cbs(rnp, rdp);
1197 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1198 if (needwake)
1199 rcu_gp_kthread_wake();
1200 }
1201
1202 /*
1203 * Move any callbacks whose grace period has completed to the
1204 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1205 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1206 * sublist. This function is idempotent, so it does not hurt to
1207 * invoke it repeatedly. As long as it is not invoked -too- often...
1208 * Returns true if the RCU grace-period kthread needs to be awakened.
1209 *
1210 * The caller must hold rnp->lock with interrupts disabled.
1211 */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1212 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1213 {
1214 rcu_lockdep_assert_cblist_protected(rdp);
1215 raw_lockdep_assert_held_rcu_node(rnp);
1216
1217 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1218 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1219 return false;
1220
1221 /*
1222 * Find all callbacks whose ->gp_seq numbers indicate that they
1223 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1224 */
1225 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1226
1227 /* Classify any remaining callbacks. */
1228 return rcu_accelerate_cbs(rnp, rdp);
1229 }
1230
1231 /*
1232 * Move and classify callbacks, but only if doing so won't require
1233 * that the RCU grace-period kthread be awakened.
1234 */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1235 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1236 struct rcu_data *rdp)
1237 {
1238 rcu_lockdep_assert_cblist_protected(rdp);
1239 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1240 return;
1241 // The grace period cannot end while we hold the rcu_node lock.
1242 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1243 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1244 raw_spin_unlock_rcu_node(rnp);
1245 }
1246
1247 /*
1248 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1249 * quiescent state. This is intended to be invoked when the CPU notices
1250 * a new grace period.
1251 */
rcu_strict_gp_check_qs(void)1252 static void rcu_strict_gp_check_qs(void)
1253 {
1254 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1255 rcu_read_lock();
1256 rcu_read_unlock();
1257 }
1258 }
1259
1260 /*
1261 * Update CPU-local rcu_data state to record the beginnings and ends of
1262 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1263 * structure corresponding to the current CPU, and must have irqs disabled.
1264 * Returns true if the grace-period kthread needs to be awakened.
1265 */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1266 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1267 {
1268 bool ret = false;
1269 bool need_qs;
1270 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1271
1272 raw_lockdep_assert_held_rcu_node(rnp);
1273
1274 if (rdp->gp_seq == rnp->gp_seq)
1275 return false; /* Nothing to do. */
1276
1277 /* Handle the ends of any preceding grace periods first. */
1278 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1279 unlikely(rdp->gpwrap)) {
1280 if (!offloaded)
1281 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1282 rdp->core_needs_qs = false;
1283 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1284 } else {
1285 if (!offloaded)
1286 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1287 if (rdp->core_needs_qs)
1288 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1289 }
1290
1291 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1292 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1293 unlikely(rdp->gpwrap)) {
1294 /*
1295 * If the current grace period is waiting for this CPU,
1296 * set up to detect a quiescent state, otherwise don't
1297 * go looking for one.
1298 */
1299 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1300 need_qs = !!(rnp->qsmask & rdp->grpmask);
1301 rdp->cpu_no_qs.b.norm = need_qs;
1302 rdp->core_needs_qs = need_qs;
1303 zero_cpu_stall_ticks(rdp);
1304 }
1305 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1306 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1307 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1308 if (IS_ENABLED(CONFIG_PROVE_RCU) && rdp->gpwrap)
1309 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1310 WRITE_ONCE(rdp->gpwrap, false);
1311 rcu_gpnum_ovf(rnp, rdp);
1312 return ret;
1313 }
1314
note_gp_changes(struct rcu_data * rdp)1315 static void note_gp_changes(struct rcu_data *rdp)
1316 {
1317 unsigned long flags;
1318 bool needwake;
1319 struct rcu_node *rnp;
1320
1321 local_irq_save(flags);
1322 rnp = rdp->mynode;
1323 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1324 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1325 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1326 local_irq_restore(flags);
1327 return;
1328 }
1329 needwake = __note_gp_changes(rnp, rdp);
1330 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1331 rcu_strict_gp_check_qs();
1332 if (needwake)
1333 rcu_gp_kthread_wake();
1334 }
1335
1336 static atomic_t *rcu_gp_slow_suppress;
1337
1338 /* Register a counter to suppress debugging grace-period delays. */
rcu_gp_slow_register(atomic_t * rgssp)1339 void rcu_gp_slow_register(atomic_t *rgssp)
1340 {
1341 WARN_ON_ONCE(rcu_gp_slow_suppress);
1342
1343 WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1344 }
1345 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1346
1347 /* Unregister a counter, with NULL for not caring which. */
rcu_gp_slow_unregister(atomic_t * rgssp)1348 void rcu_gp_slow_unregister(atomic_t *rgssp)
1349 {
1350 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
1351
1352 WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1353 }
1354 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1355
rcu_gp_slow_is_suppressed(void)1356 static bool rcu_gp_slow_is_suppressed(void)
1357 {
1358 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1359
1360 return rgssp && atomic_read(rgssp);
1361 }
1362
rcu_gp_slow(int delay)1363 static void rcu_gp_slow(int delay)
1364 {
1365 if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1366 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1367 schedule_timeout_idle(delay);
1368 }
1369
1370 static unsigned long sleep_duration;
1371
1372 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1373 void rcu_gp_set_torture_wait(int duration)
1374 {
1375 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1376 WRITE_ONCE(sleep_duration, duration);
1377 }
1378 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1379
1380 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1381 static void rcu_gp_torture_wait(void)
1382 {
1383 unsigned long duration;
1384
1385 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1386 return;
1387 duration = xchg(&sleep_duration, 0UL);
1388 if (duration > 0) {
1389 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1390 schedule_timeout_idle(duration);
1391 pr_alert("%s: Wait complete\n", __func__);
1392 }
1393 }
1394
1395 /*
1396 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1397 * processing.
1398 */
rcu_strict_gp_boundary(void * unused)1399 static void rcu_strict_gp_boundary(void *unused)
1400 {
1401 invoke_rcu_core();
1402 }
1403
1404 // Make the polled API aware of the beginning of a grace period.
rcu_poll_gp_seq_start(unsigned long * snap)1405 static void rcu_poll_gp_seq_start(unsigned long *snap)
1406 {
1407 struct rcu_node *rnp = rcu_get_root();
1408
1409 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1410 raw_lockdep_assert_held_rcu_node(rnp);
1411
1412 // If RCU was idle, note beginning of GP.
1413 if (!rcu_seq_state(rcu_state.gp_seq_polled))
1414 rcu_seq_start(&rcu_state.gp_seq_polled);
1415
1416 // Either way, record current state.
1417 *snap = rcu_state.gp_seq_polled;
1418 }
1419
1420 // Make the polled API aware of the end of a grace period.
rcu_poll_gp_seq_end(unsigned long * snap)1421 static void rcu_poll_gp_seq_end(unsigned long *snap)
1422 {
1423 struct rcu_node *rnp = rcu_get_root();
1424
1425 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1426 raw_lockdep_assert_held_rcu_node(rnp);
1427
1428 // If the previously noted GP is still in effect, record the
1429 // end of that GP. Either way, zero counter to avoid counter-wrap
1430 // problems.
1431 if (*snap && *snap == rcu_state.gp_seq_polled) {
1432 rcu_seq_end(&rcu_state.gp_seq_polled);
1433 rcu_state.gp_seq_polled_snap = 0;
1434 rcu_state.gp_seq_polled_exp_snap = 0;
1435 } else {
1436 *snap = 0;
1437 }
1438 }
1439
1440 // Make the polled API aware of the beginning of a grace period, but
1441 // where caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_start_unlocked(unsigned long * snap)1442 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1443 {
1444 unsigned long flags;
1445 struct rcu_node *rnp = rcu_get_root();
1446
1447 if (rcu_init_invoked()) {
1448 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1449 lockdep_assert_irqs_enabled();
1450 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1451 }
1452 rcu_poll_gp_seq_start(snap);
1453 if (rcu_init_invoked())
1454 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1455 }
1456
1457 // Make the polled API aware of the end of a grace period, but where
1458 // caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_end_unlocked(unsigned long * snap)1459 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1460 {
1461 unsigned long flags;
1462 struct rcu_node *rnp = rcu_get_root();
1463
1464 if (rcu_init_invoked()) {
1465 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1466 lockdep_assert_irqs_enabled();
1467 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1468 }
1469 rcu_poll_gp_seq_end(snap);
1470 if (rcu_init_invoked())
1471 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1472 }
1473
1474 /*
1475 * There is a single llist, which is used for handling
1476 * synchronize_rcu() users' enqueued rcu_synchronize nodes.
1477 * Within this llist, there are two tail pointers:
1478 *
1479 * wait tail: Tracks the set of nodes, which need to
1480 * wait for the current GP to complete.
1481 * done tail: Tracks the set of nodes, for which grace
1482 * period has elapsed. These nodes processing
1483 * will be done as part of the cleanup work
1484 * execution by a kworker.
1485 *
1486 * At every grace period init, a new wait node is added
1487 * to the llist. This wait node is used as wait tail
1488 * for this new grace period. Given that there are a fixed
1489 * number of wait nodes, if all wait nodes are in use
1490 * (which can happen when kworker callback processing
1491 * is delayed) and additional grace period is requested.
1492 * This means, a system is slow in processing callbacks.
1493 *
1494 * TODO: If a slow processing is detected, a first node
1495 * in the llist should be used as a wait-tail for this
1496 * grace period, therefore users which should wait due
1497 * to a slow process are handled by _this_ grace period
1498 * and not next.
1499 *
1500 * Below is an illustration of how the done and wait
1501 * tail pointers move from one set of rcu_synchronize nodes
1502 * to the other, as grace periods start and finish and
1503 * nodes are processed by kworker.
1504 *
1505 *
1506 * a. Initial llist callbacks list:
1507 *
1508 * +----------+ +--------+ +-------+
1509 * | | | | | |
1510 * | head |---------> | cb2 |--------->| cb1 |
1511 * | | | | | |
1512 * +----------+ +--------+ +-------+
1513 *
1514 *
1515 *
1516 * b. New GP1 Start:
1517 *
1518 * WAIT TAIL
1519 * |
1520 * |
1521 * v
1522 * +----------+ +--------+ +--------+ +-------+
1523 * | | | | | | | |
1524 * | head ------> wait |------> cb2 |------> | cb1 |
1525 * | | | head1 | | | | |
1526 * +----------+ +--------+ +--------+ +-------+
1527 *
1528 *
1529 *
1530 * c. GP completion:
1531 *
1532 * WAIT_TAIL == DONE_TAIL
1533 *
1534 * DONE TAIL
1535 * |
1536 * |
1537 * v
1538 * +----------+ +--------+ +--------+ +-------+
1539 * | | | | | | | |
1540 * | head ------> wait |------> cb2 |------> | cb1 |
1541 * | | | head1 | | | | |
1542 * +----------+ +--------+ +--------+ +-------+
1543 *
1544 *
1545 *
1546 * d. New callbacks and GP2 start:
1547 *
1548 * WAIT TAIL DONE TAIL
1549 * | |
1550 * | |
1551 * v v
1552 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1553 * | | | | | | | | | | | | | |
1554 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1555 * | | | head2| | | | | |head1| | | | |
1556 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1557 *
1558 *
1559 *
1560 * e. GP2 completion:
1561 *
1562 * WAIT_TAIL == DONE_TAIL
1563 * DONE TAIL
1564 * |
1565 * |
1566 * v
1567 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1568 * | | | | | | | | | | | | | |
1569 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1570 * | | | head2| | | | | |head1| | | | |
1571 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1572 *
1573 *
1574 * While the llist state transitions from d to e, a kworker
1575 * can start executing rcu_sr_normal_gp_cleanup_work() and
1576 * can observe either the old done tail (@c) or the new
1577 * done tail (@e). So, done tail updates and reads need
1578 * to use the rel-acq semantics. If the concurrent kworker
1579 * observes the old done tail, the newly queued work
1580 * execution will process the updated done tail. If the
1581 * concurrent kworker observes the new done tail, then
1582 * the newly queued work will skip processing the done
1583 * tail, as workqueue semantics guarantees that the new
1584 * work is executed only after the previous one completes.
1585 *
1586 * f. kworker callbacks processing complete:
1587 *
1588 *
1589 * DONE TAIL
1590 * |
1591 * |
1592 * v
1593 * +----------+ +--------+
1594 * | | | |
1595 * | head ------> wait |
1596 * | | | head2 |
1597 * +----------+ +--------+
1598 *
1599 */
rcu_sr_is_wait_head(struct llist_node * node)1600 static bool rcu_sr_is_wait_head(struct llist_node *node)
1601 {
1602 return &(rcu_state.srs_wait_nodes)[0].node <= node &&
1603 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
1604 }
1605
rcu_sr_get_wait_head(void)1606 static struct llist_node *rcu_sr_get_wait_head(void)
1607 {
1608 struct sr_wait_node *sr_wn;
1609 int i;
1610
1611 for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) {
1612 sr_wn = &(rcu_state.srs_wait_nodes)[i];
1613
1614 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1))
1615 return &sr_wn->node;
1616 }
1617
1618 return NULL;
1619 }
1620
rcu_sr_put_wait_head(struct llist_node * node)1621 static void rcu_sr_put_wait_head(struct llist_node *node)
1622 {
1623 struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node);
1624
1625 atomic_set_release(&sr_wn->inuse, 0);
1626 }
1627
1628 /* Disabled by default. */
1629 static int rcu_normal_wake_from_gp;
1630 module_param(rcu_normal_wake_from_gp, int, 0644);
1631 static struct workqueue_struct *sync_wq;
1632
rcu_sr_normal_complete(struct llist_node * node)1633 static void rcu_sr_normal_complete(struct llist_node *node)
1634 {
1635 struct rcu_synchronize *rs = container_of(
1636 (struct rcu_head *) node, struct rcu_synchronize, head);
1637
1638 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
1639 !poll_state_synchronize_rcu_full(&rs->oldstate),
1640 "A full grace period is not passed yet!\n");
1641
1642 /* Finally. */
1643 complete(&rs->completion);
1644 }
1645
rcu_sr_normal_gp_cleanup_work(struct work_struct * work)1646 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
1647 {
1648 struct llist_node *done, *rcu, *next, *head;
1649
1650 /*
1651 * This work execution can potentially execute
1652 * while a new done tail is being updated by
1653 * grace period kthread in rcu_sr_normal_gp_cleanup().
1654 * So, read and updates of done tail need to
1655 * follow acq-rel semantics.
1656 *
1657 * Given that wq semantics guarantees that a single work
1658 * cannot execute concurrently by multiple kworkers,
1659 * the done tail list manipulations are protected here.
1660 */
1661 done = smp_load_acquire(&rcu_state.srs_done_tail);
1662 if (WARN_ON_ONCE(!done))
1663 return;
1664
1665 WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
1666 head = done->next;
1667 done->next = NULL;
1668
1669 /*
1670 * The dummy node, which is pointed to by the
1671 * done tail which is acq-read above is not removed
1672 * here. This allows lockless additions of new
1673 * rcu_synchronize nodes in rcu_sr_normal_add_req(),
1674 * while the cleanup work executes. The dummy
1675 * nodes is removed, in next round of cleanup
1676 * work execution.
1677 */
1678 llist_for_each_safe(rcu, next, head) {
1679 if (!rcu_sr_is_wait_head(rcu)) {
1680 rcu_sr_normal_complete(rcu);
1681 continue;
1682 }
1683
1684 rcu_sr_put_wait_head(rcu);
1685 }
1686
1687 /* Order list manipulations with atomic access. */
1688 atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
1689 }
1690
1691 /*
1692 * Helper function for rcu_gp_cleanup().
1693 */
rcu_sr_normal_gp_cleanup(void)1694 static void rcu_sr_normal_gp_cleanup(void)
1695 {
1696 struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
1697 int done = 0;
1698
1699 wait_tail = rcu_state.srs_wait_tail;
1700 if (wait_tail == NULL)
1701 return;
1702
1703 rcu_state.srs_wait_tail = NULL;
1704 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1705 WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail));
1706
1707 /*
1708 * Process (a) and (d) cases. See an illustration.
1709 */
1710 llist_for_each_safe(rcu, next, wait_tail->next) {
1711 if (rcu_sr_is_wait_head(rcu))
1712 break;
1713
1714 rcu_sr_normal_complete(rcu);
1715 // It can be last, update a next on this step.
1716 wait_tail->next = next;
1717
1718 if (++done == SR_MAX_USERS_WAKE_FROM_GP)
1719 break;
1720 }
1721
1722 /*
1723 * Fast path, no more users to process except putting the second last
1724 * wait head if no inflight-workers. If there are in-flight workers,
1725 * they will remove the last wait head.
1726 *
1727 * Note that the ACQUIRE orders atomic access with list manipulation.
1728 */
1729 if (wait_tail->next && wait_tail->next->next == NULL &&
1730 rcu_sr_is_wait_head(wait_tail->next) &&
1731 !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
1732 rcu_sr_put_wait_head(wait_tail->next);
1733 wait_tail->next = NULL;
1734 }
1735
1736 /* Concurrent sr_normal_gp_cleanup work might observe this update. */
1737 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
1738 smp_store_release(&rcu_state.srs_done_tail, wait_tail);
1739
1740 /*
1741 * We schedule a work in order to perform a final processing
1742 * of outstanding users(if still left) and releasing wait-heads
1743 * added by rcu_sr_normal_gp_init() call.
1744 */
1745 if (wait_tail->next) {
1746 atomic_inc(&rcu_state.srs_cleanups_pending);
1747 if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
1748 atomic_dec(&rcu_state.srs_cleanups_pending);
1749 }
1750 }
1751
1752 /*
1753 * Helper function for rcu_gp_init().
1754 */
rcu_sr_normal_gp_init(void)1755 static bool rcu_sr_normal_gp_init(void)
1756 {
1757 struct llist_node *first;
1758 struct llist_node *wait_head;
1759 bool start_new_poll = false;
1760
1761 first = READ_ONCE(rcu_state.srs_next.first);
1762 if (!first || rcu_sr_is_wait_head(first))
1763 return start_new_poll;
1764
1765 wait_head = rcu_sr_get_wait_head();
1766 if (!wait_head) {
1767 // Kick another GP to retry.
1768 start_new_poll = true;
1769 return start_new_poll;
1770 }
1771
1772 /* Inject a wait-dummy-node. */
1773 llist_add(wait_head, &rcu_state.srs_next);
1774
1775 /*
1776 * A waiting list of rcu_synchronize nodes should be empty on
1777 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(),
1778 * rolls it over. If not, it is a BUG, warn a user.
1779 */
1780 WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL);
1781 rcu_state.srs_wait_tail = wait_head;
1782 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1783
1784 return start_new_poll;
1785 }
1786
rcu_sr_normal_add_req(struct rcu_synchronize * rs)1787 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs)
1788 {
1789 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
1790 }
1791
1792 /*
1793 * Initialize a new grace period. Return false if no grace period required.
1794 */
rcu_gp_init(void)1795 static noinline_for_stack bool rcu_gp_init(void)
1796 {
1797 unsigned long flags;
1798 unsigned long oldmask;
1799 unsigned long mask;
1800 struct rcu_data *rdp;
1801 struct rcu_node *rnp = rcu_get_root();
1802 bool start_new_poll;
1803 unsigned long old_gp_seq;
1804
1805 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1806 raw_spin_lock_irq_rcu_node(rnp);
1807 if (!rcu_state.gp_flags) {
1808 /* Spurious wakeup, tell caller to go back to sleep. */
1809 raw_spin_unlock_irq_rcu_node(rnp);
1810 return false;
1811 }
1812 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1813
1814 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1815 /*
1816 * Grace period already in progress, don't start another.
1817 * Not supposed to be able to happen.
1818 */
1819 raw_spin_unlock_irq_rcu_node(rnp);
1820 return false;
1821 }
1822
1823 /* Advance to a new grace period and initialize state. */
1824 record_gp_stall_check_time();
1825 /*
1826 * A new wait segment must be started before gp_seq advanced, so
1827 * that previous gp waiters won't observe the new gp_seq.
1828 */
1829 start_new_poll = rcu_sr_normal_gp_init();
1830 /* Record GP times before starting GP, hence rcu_seq_start(). */
1831 old_gp_seq = rcu_state.gp_seq;
1832 rcu_seq_start(&rcu_state.gp_seq);
1833 /* Ensure that rcu_seq_done_exact() guardband doesn't give false positives. */
1834 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
1835 rcu_seq_done_exact(&old_gp_seq, rcu_seq_snap(&rcu_state.gp_seq)));
1836
1837 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1838 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1839 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1840 raw_spin_unlock_irq_rcu_node(rnp);
1841
1842 /*
1843 * The "start_new_poll" is set to true, only when this GP is not able
1844 * to handle anything and there are outstanding users. It happens when
1845 * the rcu_sr_normal_gp_init() function was not able to insert a dummy
1846 * separator to the llist, because there were no left any dummy-nodes.
1847 *
1848 * Number of dummy-nodes is fixed, it could be that we are run out of
1849 * them, if so we start a new pool request to repeat a try. It is rare
1850 * and it means that a system is doing a slow processing of callbacks.
1851 */
1852 if (start_new_poll)
1853 (void) start_poll_synchronize_rcu();
1854
1855 /*
1856 * Apply per-leaf buffered online and offline operations to
1857 * the rcu_node tree. Note that this new grace period need not
1858 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1859 * offlining path, when combined with checks in this function,
1860 * will handle CPUs that are currently going offline or that will
1861 * go offline later. Please also refer to "Hotplug CPU" section
1862 * of RCU's Requirements documentation.
1863 */
1864 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1865 /* Exclude CPU hotplug operations. */
1866 rcu_for_each_leaf_node(rnp) {
1867 local_irq_disable();
1868 arch_spin_lock(&rcu_state.ofl_lock);
1869 raw_spin_lock_rcu_node(rnp);
1870 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1871 !rnp->wait_blkd_tasks) {
1872 /* Nothing to do on this leaf rcu_node structure. */
1873 raw_spin_unlock_rcu_node(rnp);
1874 arch_spin_unlock(&rcu_state.ofl_lock);
1875 local_irq_enable();
1876 continue;
1877 }
1878
1879 /* Record old state, apply changes to ->qsmaskinit field. */
1880 oldmask = rnp->qsmaskinit;
1881 rnp->qsmaskinit = rnp->qsmaskinitnext;
1882
1883 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1884 if (!oldmask != !rnp->qsmaskinit) {
1885 if (!oldmask) { /* First online CPU for rcu_node. */
1886 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1887 rcu_init_new_rnp(rnp);
1888 } else if (rcu_preempt_has_tasks(rnp)) {
1889 rnp->wait_blkd_tasks = true; /* blocked tasks */
1890 } else { /* Last offline CPU and can propagate. */
1891 rcu_cleanup_dead_rnp(rnp);
1892 }
1893 }
1894
1895 /*
1896 * If all waited-on tasks from prior grace period are
1897 * done, and if all this rcu_node structure's CPUs are
1898 * still offline, propagate up the rcu_node tree and
1899 * clear ->wait_blkd_tasks. Otherwise, if one of this
1900 * rcu_node structure's CPUs has since come back online,
1901 * simply clear ->wait_blkd_tasks.
1902 */
1903 if (rnp->wait_blkd_tasks &&
1904 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1905 rnp->wait_blkd_tasks = false;
1906 if (!rnp->qsmaskinit)
1907 rcu_cleanup_dead_rnp(rnp);
1908 }
1909
1910 raw_spin_unlock_rcu_node(rnp);
1911 arch_spin_unlock(&rcu_state.ofl_lock);
1912 local_irq_enable();
1913 }
1914 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1915
1916 /*
1917 * Set the quiescent-state-needed bits in all the rcu_node
1918 * structures for all currently online CPUs in breadth-first
1919 * order, starting from the root rcu_node structure, relying on the
1920 * layout of the tree within the rcu_state.node[] array. Note that
1921 * other CPUs will access only the leaves of the hierarchy, thus
1922 * seeing that no grace period is in progress, at least until the
1923 * corresponding leaf node has been initialized.
1924 *
1925 * The grace period cannot complete until the initialization
1926 * process finishes, because this kthread handles both.
1927 */
1928 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1929 rcu_for_each_node_breadth_first(rnp) {
1930 rcu_gp_slow(gp_init_delay);
1931 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1932 rdp = this_cpu_ptr(&rcu_data);
1933 rcu_preempt_check_blocked_tasks(rnp);
1934 rnp->qsmask = rnp->qsmaskinit;
1935 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1936 if (rnp == rdp->mynode)
1937 (void)__note_gp_changes(rnp, rdp);
1938 rcu_preempt_boost_start_gp(rnp);
1939 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1940 rnp->level, rnp->grplo,
1941 rnp->grphi, rnp->qsmask);
1942 /* Quiescent states for tasks on any now-offline CPUs. */
1943 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1944 rnp->rcu_gp_init_mask = mask;
1945 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1946 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1947 else
1948 raw_spin_unlock_irq_rcu_node(rnp);
1949 cond_resched_tasks_rcu_qs();
1950 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1951 }
1952
1953 // If strict, make all CPUs aware of new grace period.
1954 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1955 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1956
1957 return true;
1958 }
1959
1960 /*
1961 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1962 * time.
1963 */
rcu_gp_fqs_check_wake(int * gfp)1964 static bool rcu_gp_fqs_check_wake(int *gfp)
1965 {
1966 struct rcu_node *rnp = rcu_get_root();
1967
1968 // If under overload conditions, force an immediate FQS scan.
1969 if (*gfp & RCU_GP_FLAG_OVLD)
1970 return true;
1971
1972 // Someone like call_rcu() requested a force-quiescent-state scan.
1973 *gfp = READ_ONCE(rcu_state.gp_flags);
1974 if (*gfp & RCU_GP_FLAG_FQS)
1975 return true;
1976
1977 // The current grace period has completed.
1978 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1979 return true;
1980
1981 return false;
1982 }
1983
1984 /*
1985 * Do one round of quiescent-state forcing.
1986 */
rcu_gp_fqs(bool first_time)1987 static void rcu_gp_fqs(bool first_time)
1988 {
1989 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1990 struct rcu_node *rnp = rcu_get_root();
1991
1992 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1993 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1994
1995 WARN_ON_ONCE(nr_fqs > 3);
1996 /* Only countdown nr_fqs for stall purposes if jiffies moves. */
1997 if (nr_fqs) {
1998 if (nr_fqs == 1) {
1999 WRITE_ONCE(rcu_state.jiffies_stall,
2000 jiffies + rcu_jiffies_till_stall_check());
2001 }
2002 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
2003 }
2004
2005 if (first_time) {
2006 /* Collect dyntick-idle snapshots. */
2007 force_qs_rnp(rcu_watching_snap_save);
2008 } else {
2009 /* Handle dyntick-idle and offline CPUs. */
2010 force_qs_rnp(rcu_watching_snap_recheck);
2011 }
2012 /* Clear flag to prevent immediate re-entry. */
2013 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2014 raw_spin_lock_irq_rcu_node(rnp);
2015 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS);
2016 raw_spin_unlock_irq_rcu_node(rnp);
2017 }
2018 }
2019
2020 /*
2021 * Loop doing repeated quiescent-state forcing until the grace period ends.
2022 */
rcu_gp_fqs_loop(void)2023 static noinline_for_stack void rcu_gp_fqs_loop(void)
2024 {
2025 bool first_gp_fqs = true;
2026 int gf = 0;
2027 unsigned long j;
2028 int ret;
2029 struct rcu_node *rnp = rcu_get_root();
2030
2031 j = READ_ONCE(jiffies_till_first_fqs);
2032 if (rcu_state.cbovld)
2033 gf = RCU_GP_FLAG_OVLD;
2034 ret = 0;
2035 for (;;) {
2036 if (rcu_state.cbovld) {
2037 j = (j + 2) / 3;
2038 if (j <= 0)
2039 j = 1;
2040 }
2041 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
2042 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
2043 /*
2044 * jiffies_force_qs before RCU_GP_WAIT_FQS state
2045 * update; required for stall checks.
2046 */
2047 smp_wmb();
2048 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
2049 jiffies + (j ? 3 * j : 2));
2050 }
2051 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2052 TPS("fqswait"));
2053 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
2054 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
2055 rcu_gp_fqs_check_wake(&gf), j);
2056 rcu_gp_torture_wait();
2057 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2058 /* Locking provides needed memory barriers. */
2059 /*
2060 * Exit the loop if the root rcu_node structure indicates that the grace period
2061 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check
2062 * is required only for single-node rcu_node trees because readers blocking
2063 * the current grace period are queued only on leaf rcu_node structures.
2064 * For multi-node trees, checking the root node's ->qsmask suffices, because a
2065 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
2066 * the corresponding leaf nodes have passed through their quiescent state.
2067 */
2068 if (!READ_ONCE(rnp->qsmask) &&
2069 !rcu_preempt_blocked_readers_cgp(rnp))
2070 break;
2071 /* If time for quiescent-state forcing, do it. */
2072 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2073 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
2074 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2075 TPS("fqsstart"));
2076 rcu_gp_fqs(first_gp_fqs);
2077 gf = 0;
2078 if (first_gp_fqs) {
2079 first_gp_fqs = false;
2080 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2081 }
2082 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2083 TPS("fqsend"));
2084 cond_resched_tasks_rcu_qs();
2085 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2086 ret = 0; /* Force full wait till next FQS. */
2087 j = READ_ONCE(jiffies_till_next_fqs);
2088 } else {
2089 /* Deal with stray signal. */
2090 cond_resched_tasks_rcu_qs();
2091 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2092 WARN_ON(signal_pending(current));
2093 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2094 TPS("fqswaitsig"));
2095 ret = 1; /* Keep old FQS timing. */
2096 j = jiffies;
2097 if (time_after(jiffies, rcu_state.jiffies_force_qs))
2098 j = 1;
2099 else
2100 j = rcu_state.jiffies_force_qs - j;
2101 gf = 0;
2102 }
2103 }
2104 }
2105
2106 /*
2107 * Clean up after the old grace period.
2108 */
rcu_gp_cleanup(void)2109 static noinline void rcu_gp_cleanup(void)
2110 {
2111 int cpu;
2112 bool needgp = false;
2113 unsigned long gp_duration;
2114 unsigned long new_gp_seq;
2115 bool offloaded;
2116 struct rcu_data *rdp;
2117 struct rcu_node *rnp = rcu_get_root();
2118 struct swait_queue_head *sq;
2119
2120 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2121 raw_spin_lock_irq_rcu_node(rnp);
2122 rcu_state.gp_end = jiffies;
2123 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2124 if (gp_duration > rcu_state.gp_max)
2125 rcu_state.gp_max = gp_duration;
2126
2127 /*
2128 * We know the grace period is complete, but to everyone else
2129 * it appears to still be ongoing. But it is also the case
2130 * that to everyone else it looks like there is nothing that
2131 * they can do to advance the grace period. It is therefore
2132 * safe for us to drop the lock in order to mark the grace
2133 * period as completed in all of the rcu_node structures.
2134 */
2135 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
2136 raw_spin_unlock_irq_rcu_node(rnp);
2137
2138 /*
2139 * Propagate new ->gp_seq value to rcu_node structures so that
2140 * other CPUs don't have to wait until the start of the next grace
2141 * period to process their callbacks. This also avoids some nasty
2142 * RCU grace-period initialization races by forcing the end of
2143 * the current grace period to be completely recorded in all of
2144 * the rcu_node structures before the beginning of the next grace
2145 * period is recorded in any of the rcu_node structures.
2146 */
2147 new_gp_seq = rcu_state.gp_seq;
2148 rcu_seq_end(&new_gp_seq);
2149 rcu_for_each_node_breadth_first(rnp) {
2150 raw_spin_lock_irq_rcu_node(rnp);
2151 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2152 dump_blkd_tasks(rnp, 10);
2153 WARN_ON_ONCE(rnp->qsmask);
2154 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2155 if (!rnp->parent)
2156 smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
2157 rdp = this_cpu_ptr(&rcu_data);
2158 if (rnp == rdp->mynode)
2159 needgp = __note_gp_changes(rnp, rdp) || needgp;
2160 /* smp_mb() provided by prior unlock-lock pair. */
2161 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2162 // Reset overload indication for CPUs no longer overloaded
2163 if (rcu_is_leaf_node(rnp))
2164 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2165 rdp = per_cpu_ptr(&rcu_data, cpu);
2166 check_cb_ovld_locked(rdp, rnp);
2167 }
2168 sq = rcu_nocb_gp_get(rnp);
2169 raw_spin_unlock_irq_rcu_node(rnp);
2170 rcu_nocb_gp_cleanup(sq);
2171 cond_resched_tasks_rcu_qs();
2172 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2173 rcu_gp_slow(gp_cleanup_delay);
2174 }
2175 rnp = rcu_get_root();
2176 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2177
2178 /* Declare grace period done, trace first to use old GP number. */
2179 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2180 rcu_seq_end(&rcu_state.gp_seq);
2181 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2182 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2183 /* Check for GP requests since above loop. */
2184 rdp = this_cpu_ptr(&rcu_data);
2185 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2186 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2187 TPS("CleanupMore"));
2188 needgp = true;
2189 }
2190 /* Advance CBs to reduce false positives below. */
2191 offloaded = rcu_rdp_is_offloaded(rdp);
2192 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2193
2194 // We get here if a grace period was needed (“needgp”)
2195 // and the above call to rcu_accelerate_cbs() did not set
2196 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
2197 // the need for another grace period). The purpose
2198 // of the “offloaded” check is to avoid invoking
2199 // rcu_accelerate_cbs() on an offloaded CPU because we do not
2200 // hold the ->nocb_lock needed to safely access an offloaded
2201 // ->cblist. We do not want to acquire that lock because
2202 // it can be heavily contended during callback floods.
2203
2204 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2205 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2206 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
2207 } else {
2208
2209 // We get here either if there is no need for an
2210 // additional grace period or if rcu_accelerate_cbs() has
2211 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
2212 // So all we need to do is to clear all of the other
2213 // ->gp_flags bits.
2214
2215 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2216 }
2217 raw_spin_unlock_irq_rcu_node(rnp);
2218
2219 // Make synchronize_rcu() users aware of the end of old grace period.
2220 rcu_sr_normal_gp_cleanup();
2221
2222 // If strict, make all CPUs aware of the end of the old grace period.
2223 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2224 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2225 }
2226
2227 /*
2228 * Body of kthread that handles grace periods.
2229 */
rcu_gp_kthread(void * unused)2230 static int __noreturn rcu_gp_kthread(void *unused)
2231 {
2232 rcu_bind_gp_kthread();
2233 for (;;) {
2234
2235 /* Handle grace-period start. */
2236 for (;;) {
2237 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2238 TPS("reqwait"));
2239 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2240 swait_event_idle_exclusive(rcu_state.gp_wq,
2241 READ_ONCE(rcu_state.gp_flags) &
2242 RCU_GP_FLAG_INIT);
2243 rcu_gp_torture_wait();
2244 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2245 /* Locking provides needed memory barrier. */
2246 if (rcu_gp_init())
2247 break;
2248 cond_resched_tasks_rcu_qs();
2249 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2250 WARN_ON(signal_pending(current));
2251 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2252 TPS("reqwaitsig"));
2253 }
2254
2255 /* Handle quiescent-state forcing. */
2256 rcu_gp_fqs_loop();
2257
2258 /* Handle grace-period end. */
2259 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2260 rcu_gp_cleanup();
2261 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2262 }
2263 }
2264
2265 /*
2266 * Report a full set of quiescent states to the rcu_state data structure.
2267 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2268 * another grace period is required. Whether we wake the grace-period
2269 * kthread or it awakens itself for the next round of quiescent-state
2270 * forcing, that kthread will clean up after the just-completed grace
2271 * period. Note that the caller must hold rnp->lock, which is released
2272 * before return.
2273 */
rcu_report_qs_rsp(unsigned long flags)2274 static void rcu_report_qs_rsp(unsigned long flags)
2275 __releases(rcu_get_root()->lock)
2276 {
2277 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2278 WARN_ON_ONCE(!rcu_gp_in_progress());
2279 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2280 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2281 rcu_gp_kthread_wake();
2282 }
2283
2284 /*
2285 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2286 * Allows quiescent states for a group of CPUs to be reported at one go
2287 * to the specified rcu_node structure, though all the CPUs in the group
2288 * must be represented by the same rcu_node structure (which need not be a
2289 * leaf rcu_node structure, though it often will be). The gps parameter
2290 * is the grace-period snapshot, which means that the quiescent states
2291 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2292 * must be held upon entry, and it is released before return.
2293 *
2294 * As a special case, if mask is zero, the bit-already-cleared check is
2295 * disabled. This allows propagating quiescent state due to resumed tasks
2296 * during grace-period initialization.
2297 */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2298 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2299 unsigned long gps, unsigned long flags)
2300 __releases(rnp->lock)
2301 {
2302 unsigned long oldmask = 0;
2303 struct rcu_node *rnp_c;
2304
2305 raw_lockdep_assert_held_rcu_node(rnp);
2306
2307 /* Walk up the rcu_node hierarchy. */
2308 for (;;) {
2309 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2310
2311 /*
2312 * Our bit has already been cleared, or the
2313 * relevant grace period is already over, so done.
2314 */
2315 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2316 return;
2317 }
2318 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2319 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2320 rcu_preempt_blocked_readers_cgp(rnp));
2321 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2322 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2323 mask, rnp->qsmask, rnp->level,
2324 rnp->grplo, rnp->grphi,
2325 !!rnp->gp_tasks);
2326 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2327
2328 /* Other bits still set at this level, so done. */
2329 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2330 return;
2331 }
2332 rnp->completedqs = rnp->gp_seq;
2333 mask = rnp->grpmask;
2334 if (rnp->parent == NULL) {
2335
2336 /* No more levels. Exit loop holding root lock. */
2337
2338 break;
2339 }
2340 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2341 rnp_c = rnp;
2342 rnp = rnp->parent;
2343 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2344 oldmask = READ_ONCE(rnp_c->qsmask);
2345 }
2346
2347 /*
2348 * Get here if we are the last CPU to pass through a quiescent
2349 * state for this grace period. Invoke rcu_report_qs_rsp()
2350 * to clean up and start the next grace period if one is needed.
2351 */
2352 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2353 }
2354
2355 /*
2356 * Record a quiescent state for all tasks that were previously queued
2357 * on the specified rcu_node structure and that were blocking the current
2358 * RCU grace period. The caller must hold the corresponding rnp->lock with
2359 * irqs disabled, and this lock is released upon return, but irqs remain
2360 * disabled.
2361 */
2362 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2363 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2364 __releases(rnp->lock)
2365 {
2366 unsigned long gps;
2367 unsigned long mask;
2368 struct rcu_node *rnp_p;
2369
2370 raw_lockdep_assert_held_rcu_node(rnp);
2371 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2372 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2373 rnp->qsmask != 0) {
2374 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2375 return; /* Still need more quiescent states! */
2376 }
2377
2378 rnp->completedqs = rnp->gp_seq;
2379 rnp_p = rnp->parent;
2380 if (rnp_p == NULL) {
2381 /*
2382 * Only one rcu_node structure in the tree, so don't
2383 * try to report up to its nonexistent parent!
2384 */
2385 rcu_report_qs_rsp(flags);
2386 return;
2387 }
2388
2389 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2390 gps = rnp->gp_seq;
2391 mask = rnp->grpmask;
2392 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2393 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2394 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2395 }
2396
2397 /*
2398 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2399 * structure. This must be called from the specified CPU.
2400 */
2401 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2402 rcu_report_qs_rdp(struct rcu_data *rdp)
2403 {
2404 unsigned long flags;
2405 unsigned long mask;
2406 struct rcu_node *rnp;
2407
2408 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2409 rnp = rdp->mynode;
2410 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2411 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2412 rdp->gpwrap) {
2413
2414 /*
2415 * The grace period in which this quiescent state was
2416 * recorded has ended, so don't report it upwards.
2417 * We will instead need a new quiescent state that lies
2418 * within the current grace period.
2419 */
2420 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2421 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2422 return;
2423 }
2424 mask = rdp->grpmask;
2425 rdp->core_needs_qs = false;
2426 if ((rnp->qsmask & mask) == 0) {
2427 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2428 } else {
2429 /*
2430 * This GP can't end until cpu checks in, so all of our
2431 * callbacks can be processed during the next GP.
2432 *
2433 * NOCB kthreads have their own way to deal with that...
2434 */
2435 if (!rcu_rdp_is_offloaded(rdp)) {
2436 /*
2437 * The current GP has not yet ended, so it
2438 * should not be possible for rcu_accelerate_cbs()
2439 * to return true. So complain, but don't awaken.
2440 */
2441 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2442 }
2443
2444 rcu_disable_urgency_upon_qs(rdp);
2445 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2446 /* ^^^ Released rnp->lock */
2447 }
2448 }
2449
2450 /*
2451 * Check to see if there is a new grace period of which this CPU
2452 * is not yet aware, and if so, set up local rcu_data state for it.
2453 * Otherwise, see if this CPU has just passed through its first
2454 * quiescent state for this grace period, and record that fact if so.
2455 */
2456 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2457 rcu_check_quiescent_state(struct rcu_data *rdp)
2458 {
2459 /* Check for grace-period ends and beginnings. */
2460 note_gp_changes(rdp);
2461
2462 /*
2463 * Does this CPU still need to do its part for current grace period?
2464 * If no, return and let the other CPUs do their part as well.
2465 */
2466 if (!rdp->core_needs_qs)
2467 return;
2468
2469 /*
2470 * Was there a quiescent state since the beginning of the grace
2471 * period? If no, then exit and wait for the next call.
2472 */
2473 if (rdp->cpu_no_qs.b.norm)
2474 return;
2475
2476 /*
2477 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2478 * judge of that).
2479 */
2480 rcu_report_qs_rdp(rdp);
2481 }
2482
2483 /* Return true if callback-invocation time limit exceeded. */
rcu_do_batch_check_time(long count,long tlimit,bool jlimit_check,unsigned long jlimit)2484 static bool rcu_do_batch_check_time(long count, long tlimit,
2485 bool jlimit_check, unsigned long jlimit)
2486 {
2487 // Invoke local_clock() only once per 32 consecutive callbacks.
2488 return unlikely(tlimit) &&
2489 (!likely(count & 31) ||
2490 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2491 jlimit_check && time_after(jiffies, jlimit))) &&
2492 local_clock() >= tlimit;
2493 }
2494
2495 /*
2496 * Invoke any RCU callbacks that have made it to the end of their grace
2497 * period. Throttle as specified by rdp->blimit.
2498 */
rcu_do_batch(struct rcu_data * rdp)2499 static void rcu_do_batch(struct rcu_data *rdp)
2500 {
2501 long bl;
2502 long count = 0;
2503 int div;
2504 bool __maybe_unused empty;
2505 unsigned long flags;
2506 unsigned long jlimit;
2507 bool jlimit_check = false;
2508 long pending;
2509 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2510 struct rcu_head *rhp;
2511 long tlimit = 0;
2512
2513 /* If no callbacks are ready, just return. */
2514 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2515 trace_rcu_batch_start(rcu_state.name,
2516 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2517 trace_rcu_batch_end(rcu_state.name, 0,
2518 !rcu_segcblist_empty(&rdp->cblist),
2519 need_resched(), is_idle_task(current),
2520 rcu_is_callbacks_kthread(rdp));
2521 return;
2522 }
2523
2524 /*
2525 * Extract the list of ready callbacks, disabling IRQs to prevent
2526 * races with call_rcu() from interrupt handlers. Leave the
2527 * callback counts, as rcu_barrier() needs to be conservative.
2528 *
2529 * Callbacks execution is fully ordered against preceding grace period
2530 * completion (materialized by rnp->gp_seq update) thanks to the
2531 * smp_mb__after_unlock_lock() upon node locking required for callbacks
2532 * advancing. In NOCB mode this ordering is then further relayed through
2533 * the nocb locking that protects both callbacks advancing and extraction.
2534 */
2535 rcu_nocb_lock_irqsave(rdp, flags);
2536 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2537 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2538 div = READ_ONCE(rcu_divisor);
2539 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2540 bl = max(rdp->blimit, pending >> div);
2541 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2542 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2543 const long npj = NSEC_PER_SEC / HZ;
2544 long rrn = READ_ONCE(rcu_resched_ns);
2545
2546 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2547 tlimit = local_clock() + rrn;
2548 jlimit = jiffies + (rrn + npj + 1) / npj;
2549 jlimit_check = true;
2550 }
2551 trace_rcu_batch_start(rcu_state.name,
2552 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2553 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2554 if (rcu_rdp_is_offloaded(rdp))
2555 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2556
2557 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2558 rcu_nocb_unlock_irqrestore(rdp, flags);
2559
2560 /* Invoke callbacks. */
2561 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2562 rhp = rcu_cblist_dequeue(&rcl);
2563
2564 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2565 rcu_callback_t f;
2566
2567 count++;
2568 debug_rcu_head_unqueue(rhp);
2569
2570 rcu_lock_acquire(&rcu_callback_map);
2571 trace_rcu_invoke_callback(rcu_state.name, rhp);
2572
2573 f = rhp->func;
2574 debug_rcu_head_callback(rhp);
2575 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2576 f(rhp);
2577
2578 rcu_lock_release(&rcu_callback_map);
2579
2580 /*
2581 * Stop only if limit reached and CPU has something to do.
2582 */
2583 if (in_serving_softirq()) {
2584 if (count >= bl && (need_resched() || !is_idle_task(current)))
2585 break;
2586 /*
2587 * Make sure we don't spend too much time here and deprive other
2588 * softirq vectors of CPU cycles.
2589 */
2590 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2591 break;
2592 } else {
2593 // In rcuc/rcuoc context, so no worries about
2594 // depriving other softirq vectors of CPU cycles.
2595 local_bh_enable();
2596 lockdep_assert_irqs_enabled();
2597 cond_resched_tasks_rcu_qs();
2598 lockdep_assert_irqs_enabled();
2599 local_bh_disable();
2600 // But rcuc kthreads can delay quiescent-state
2601 // reporting, so check time limits for them.
2602 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2603 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2604 rdp->rcu_cpu_has_work = 1;
2605 break;
2606 }
2607 }
2608 }
2609
2610 rcu_nocb_lock_irqsave(rdp, flags);
2611 rdp->n_cbs_invoked += count;
2612 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2613 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2614
2615 /* Update counts and requeue any remaining callbacks. */
2616 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2617 rcu_segcblist_add_len(&rdp->cblist, -count);
2618
2619 /* Reinstate batch limit if we have worked down the excess. */
2620 count = rcu_segcblist_n_cbs(&rdp->cblist);
2621 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2622 rdp->blimit = blimit;
2623
2624 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2625 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2626 rdp->qlen_last_fqs_check = 0;
2627 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2628 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2629 rdp->qlen_last_fqs_check = count;
2630
2631 /*
2632 * The following usually indicates a double call_rcu(). To track
2633 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2634 */
2635 empty = rcu_segcblist_empty(&rdp->cblist);
2636 WARN_ON_ONCE(count == 0 && !empty);
2637 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2638 count != 0 && empty);
2639 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2640 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2641
2642 rcu_nocb_unlock_irqrestore(rdp, flags);
2643
2644 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2645 }
2646
2647 /*
2648 * This function is invoked from each scheduling-clock interrupt,
2649 * and checks to see if this CPU is in a non-context-switch quiescent
2650 * state, for example, user mode or idle loop. It also schedules RCU
2651 * core processing. If the current grace period has gone on too long,
2652 * it will ask the scheduler to manufacture a context switch for the sole
2653 * purpose of providing the needed quiescent state.
2654 */
rcu_sched_clock_irq(int user)2655 void rcu_sched_clock_irq(int user)
2656 {
2657 unsigned long j;
2658
2659 if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2660 j = jiffies;
2661 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2662 __this_cpu_write(rcu_data.last_sched_clock, j);
2663 }
2664 trace_rcu_utilization(TPS("Start scheduler-tick"));
2665 lockdep_assert_irqs_disabled();
2666 raw_cpu_inc(rcu_data.ticks_this_gp);
2667 /* The load-acquire pairs with the store-release setting to true. */
2668 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2669 /* Idle and userspace execution already are quiescent states. */
2670 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2671 set_tsk_need_resched(current);
2672 set_preempt_need_resched();
2673 }
2674 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2675 }
2676 rcu_flavor_sched_clock_irq(user);
2677 if (rcu_pending(user))
2678 invoke_rcu_core();
2679 if (user || rcu_is_cpu_rrupt_from_idle())
2680 rcu_note_voluntary_context_switch(current);
2681 lockdep_assert_irqs_disabled();
2682
2683 trace_rcu_utilization(TPS("End scheduler-tick"));
2684 }
2685
2686 /*
2687 * Scan the leaf rcu_node structures. For each structure on which all
2688 * CPUs have reported a quiescent state and on which there are tasks
2689 * blocking the current grace period, initiate RCU priority boosting.
2690 * Otherwise, invoke the specified function to check dyntick state for
2691 * each CPU that has not yet reported a quiescent state.
2692 */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2693 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2694 {
2695 int cpu;
2696 unsigned long flags;
2697 struct rcu_node *rnp;
2698
2699 rcu_state.cbovld = rcu_state.cbovldnext;
2700 rcu_state.cbovldnext = false;
2701 rcu_for_each_leaf_node(rnp) {
2702 unsigned long mask = 0;
2703 unsigned long rsmask = 0;
2704
2705 cond_resched_tasks_rcu_qs();
2706 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2707 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2708 if (rnp->qsmask == 0) {
2709 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2710 /*
2711 * No point in scanning bits because they
2712 * are all zero. But we might need to
2713 * priority-boost blocked readers.
2714 */
2715 rcu_initiate_boost(rnp, flags);
2716 /* rcu_initiate_boost() releases rnp->lock */
2717 continue;
2718 }
2719 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2720 continue;
2721 }
2722 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2723 struct rcu_data *rdp;
2724 int ret;
2725
2726 rdp = per_cpu_ptr(&rcu_data, cpu);
2727 ret = f(rdp);
2728 if (ret > 0) {
2729 mask |= rdp->grpmask;
2730 rcu_disable_urgency_upon_qs(rdp);
2731 }
2732 if (ret < 0)
2733 rsmask |= rdp->grpmask;
2734 }
2735 if (mask != 0) {
2736 /* Idle/offline CPUs, report (releases rnp->lock). */
2737 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2738 } else {
2739 /* Nothing to do here, so just drop the lock. */
2740 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2741 }
2742
2743 for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
2744 resched_cpu(cpu);
2745 }
2746 }
2747
2748 /*
2749 * Force quiescent states on reluctant CPUs, and also detect which
2750 * CPUs are in dyntick-idle mode.
2751 */
rcu_force_quiescent_state(void)2752 void rcu_force_quiescent_state(void)
2753 {
2754 unsigned long flags;
2755 bool ret;
2756 struct rcu_node *rnp;
2757 struct rcu_node *rnp_old = NULL;
2758
2759 if (!rcu_gp_in_progress())
2760 return;
2761 /* Funnel through hierarchy to reduce memory contention. */
2762 rnp = raw_cpu_read(rcu_data.mynode);
2763 for (; rnp != NULL; rnp = rnp->parent) {
2764 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2765 !raw_spin_trylock(&rnp->fqslock);
2766 if (rnp_old != NULL)
2767 raw_spin_unlock(&rnp_old->fqslock);
2768 if (ret)
2769 return;
2770 rnp_old = rnp;
2771 }
2772 /* rnp_old == rcu_get_root(), rnp == NULL. */
2773
2774 /* Reached the root of the rcu_node tree, acquire lock. */
2775 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2776 raw_spin_unlock(&rnp_old->fqslock);
2777 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2778 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2779 return; /* Someone beat us to it. */
2780 }
2781 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2782 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2783 rcu_gp_kthread_wake();
2784 }
2785 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2786
2787 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2788 // grace periods.
strict_work_handler(struct work_struct * work)2789 static void strict_work_handler(struct work_struct *work)
2790 {
2791 rcu_read_lock();
2792 rcu_read_unlock();
2793 }
2794
2795 /* Perform RCU core processing work for the current CPU. */
rcu_core(void)2796 static __latent_entropy void rcu_core(void)
2797 {
2798 unsigned long flags;
2799 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2800 struct rcu_node *rnp = rdp->mynode;
2801
2802 if (cpu_is_offline(smp_processor_id()))
2803 return;
2804 trace_rcu_utilization(TPS("Start RCU core"));
2805 WARN_ON_ONCE(!rdp->beenonline);
2806
2807 /* Report any deferred quiescent states if preemption enabled. */
2808 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2809 rcu_preempt_deferred_qs(current);
2810 } else if (rcu_preempt_need_deferred_qs(current)) {
2811 set_tsk_need_resched(current);
2812 set_preempt_need_resched();
2813 }
2814
2815 /* Update RCU state based on any recent quiescent states. */
2816 rcu_check_quiescent_state(rdp);
2817
2818 /* No grace period and unregistered callbacks? */
2819 if (!rcu_gp_in_progress() &&
2820 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
2821 local_irq_save(flags);
2822 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2823 rcu_accelerate_cbs_unlocked(rnp, rdp);
2824 local_irq_restore(flags);
2825 }
2826
2827 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2828
2829 /* If there are callbacks ready, invoke them. */
2830 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2831 likely(READ_ONCE(rcu_scheduler_fully_active))) {
2832 rcu_do_batch(rdp);
2833 /* Re-invoke RCU core processing if there are callbacks remaining. */
2834 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2835 invoke_rcu_core();
2836 }
2837
2838 /* Do any needed deferred wakeups of rcuo kthreads. */
2839 do_nocb_deferred_wakeup(rdp);
2840 trace_rcu_utilization(TPS("End RCU core"));
2841
2842 // If strict GPs, schedule an RCU reader in a clean environment.
2843 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2844 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2845 }
2846
rcu_core_si(void)2847 static void rcu_core_si(void)
2848 {
2849 rcu_core();
2850 }
2851
rcu_wake_cond(struct task_struct * t,int status)2852 static void rcu_wake_cond(struct task_struct *t, int status)
2853 {
2854 /*
2855 * If the thread is yielding, only wake it when this
2856 * is invoked from idle
2857 */
2858 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2859 wake_up_process(t);
2860 }
2861
invoke_rcu_core_kthread(void)2862 static void invoke_rcu_core_kthread(void)
2863 {
2864 struct task_struct *t;
2865 unsigned long flags;
2866
2867 local_irq_save(flags);
2868 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2869 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2870 if (t != NULL && t != current)
2871 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2872 local_irq_restore(flags);
2873 }
2874
2875 /*
2876 * Wake up this CPU's rcuc kthread to do RCU core processing.
2877 */
invoke_rcu_core(void)2878 static void invoke_rcu_core(void)
2879 {
2880 if (!cpu_online(smp_processor_id()))
2881 return;
2882 if (use_softirq)
2883 raise_softirq(RCU_SOFTIRQ);
2884 else
2885 invoke_rcu_core_kthread();
2886 }
2887
rcu_cpu_kthread_park(unsigned int cpu)2888 static void rcu_cpu_kthread_park(unsigned int cpu)
2889 {
2890 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2891 }
2892
rcu_cpu_kthread_should_run(unsigned int cpu)2893 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2894 {
2895 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2896 }
2897
2898 /*
2899 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2900 * the RCU softirq used in configurations of RCU that do not support RCU
2901 * priority boosting.
2902 */
rcu_cpu_kthread(unsigned int cpu)2903 static void rcu_cpu_kthread(unsigned int cpu)
2904 {
2905 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2906 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2907 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2908 int spincnt;
2909
2910 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2911 for (spincnt = 0; spincnt < 10; spincnt++) {
2912 WRITE_ONCE(*j, jiffies);
2913 local_bh_disable();
2914 *statusp = RCU_KTHREAD_RUNNING;
2915 local_irq_disable();
2916 work = *workp;
2917 WRITE_ONCE(*workp, 0);
2918 local_irq_enable();
2919 if (work)
2920 rcu_core();
2921 local_bh_enable();
2922 if (!READ_ONCE(*workp)) {
2923 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2924 *statusp = RCU_KTHREAD_WAITING;
2925 return;
2926 }
2927 }
2928 *statusp = RCU_KTHREAD_YIELDING;
2929 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2930 schedule_timeout_idle(2);
2931 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2932 *statusp = RCU_KTHREAD_WAITING;
2933 WRITE_ONCE(*j, jiffies);
2934 }
2935
2936 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2937 .store = &rcu_data.rcu_cpu_kthread_task,
2938 .thread_should_run = rcu_cpu_kthread_should_run,
2939 .thread_fn = rcu_cpu_kthread,
2940 .thread_comm = "rcuc/%u",
2941 .setup = rcu_cpu_kthread_setup,
2942 .park = rcu_cpu_kthread_park,
2943 };
2944
2945 /*
2946 * Spawn per-CPU RCU core processing kthreads.
2947 */
rcu_spawn_core_kthreads(void)2948 static int __init rcu_spawn_core_kthreads(void)
2949 {
2950 int cpu;
2951
2952 for_each_possible_cpu(cpu)
2953 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2954 if (use_softirq)
2955 return 0;
2956 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2957 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2958 return 0;
2959 }
2960
rcutree_enqueue(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func)2961 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
2962 {
2963 rcu_segcblist_enqueue(&rdp->cblist, head);
2964 trace_rcu_callback(rcu_state.name, head,
2965 rcu_segcblist_n_cbs(&rdp->cblist));
2966 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2967 }
2968
2969 /*
2970 * Handle any core-RCU processing required by a call_rcu() invocation.
2971 */
call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func,unsigned long flags)2972 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2973 rcu_callback_t func, unsigned long flags)
2974 {
2975 rcutree_enqueue(rdp, head, func);
2976 /*
2977 * If called from an extended quiescent state, invoke the RCU
2978 * core in order to force a re-evaluation of RCU's idleness.
2979 */
2980 if (!rcu_is_watching())
2981 invoke_rcu_core();
2982
2983 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2984 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2985 return;
2986
2987 /*
2988 * Force the grace period if too many callbacks or too long waiting.
2989 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2990 * if some other CPU has recently done so. Also, don't bother
2991 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2992 * is the only one waiting for a grace period to complete.
2993 */
2994 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2995 rdp->qlen_last_fqs_check + qhimark)) {
2996
2997 /* Are we ignoring a completed grace period? */
2998 note_gp_changes(rdp);
2999
3000 /* Start a new grace period if one not already started. */
3001 if (!rcu_gp_in_progress()) {
3002 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
3003 } else {
3004 /* Give the grace period a kick. */
3005 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
3006 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
3007 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
3008 rcu_force_quiescent_state();
3009 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
3010 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
3011 }
3012 }
3013 }
3014
3015 /*
3016 * RCU callback function to leak a callback.
3017 */
rcu_leak_callback(struct rcu_head * rhp)3018 static void rcu_leak_callback(struct rcu_head *rhp)
3019 {
3020 }
3021
3022 /*
3023 * Check and if necessary update the leaf rcu_node structure's
3024 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3025 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
3026 * structure's ->lock.
3027 */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)3028 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3029 {
3030 raw_lockdep_assert_held_rcu_node(rnp);
3031 if (qovld_calc <= 0)
3032 return; // Early boot and wildcard value set.
3033 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
3034 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
3035 else
3036 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
3037 }
3038
3039 /*
3040 * Check and if necessary update the leaf rcu_node structure's
3041 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3042 * number of queued RCU callbacks. No locks need be held, but the
3043 * caller must have disabled interrupts.
3044 *
3045 * Note that this function ignores the possibility that there are a lot
3046 * of callbacks all of which have already seen the end of their respective
3047 * grace periods. This omission is due to the need for no-CBs CPUs to
3048 * be holding ->nocb_lock to do this check, which is too heavy for a
3049 * common-case operation.
3050 */
check_cb_ovld(struct rcu_data * rdp)3051 static void check_cb_ovld(struct rcu_data *rdp)
3052 {
3053 struct rcu_node *const rnp = rdp->mynode;
3054
3055 if (qovld_calc <= 0 ||
3056 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3057 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3058 return; // Early boot wildcard value or already set correctly.
3059 raw_spin_lock_rcu_node(rnp);
3060 check_cb_ovld_locked(rdp, rnp);
3061 raw_spin_unlock_rcu_node(rnp);
3062 }
3063
3064 static void
__call_rcu_common(struct rcu_head * head,rcu_callback_t func,bool lazy_in)3065 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
3066 {
3067 static atomic_t doublefrees;
3068 unsigned long flags;
3069 bool lazy;
3070 struct rcu_data *rdp;
3071
3072 /* Misaligned rcu_head! */
3073 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3074
3075 /* Avoid NULL dereference if callback is NULL. */
3076 if (WARN_ON_ONCE(!func))
3077 return;
3078
3079 if (debug_rcu_head_queue(head)) {
3080 /*
3081 * Probable double call_rcu(), so leak the callback.
3082 * Use rcu:rcu_callback trace event to find the previous
3083 * time callback was passed to call_rcu().
3084 */
3085 if (atomic_inc_return(&doublefrees) < 4) {
3086 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
3087 mem_dump_obj(head);
3088 }
3089 WRITE_ONCE(head->func, rcu_leak_callback);
3090 return;
3091 }
3092 head->func = func;
3093 head->next = NULL;
3094 kasan_record_aux_stack(head);
3095
3096 local_irq_save(flags);
3097 rdp = this_cpu_ptr(&rcu_data);
3098 RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!");
3099
3100 lazy = lazy_in && !rcu_async_should_hurry();
3101
3102 /* Add the callback to our list. */
3103 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3104 // This can trigger due to call_rcu() from offline CPU:
3105 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3106 WARN_ON_ONCE(!rcu_is_watching());
3107 // Very early boot, before rcu_init(). Initialize if needed
3108 // and then drop through to queue the callback.
3109 if (rcu_segcblist_empty(&rdp->cblist))
3110 rcu_segcblist_init(&rdp->cblist);
3111 }
3112
3113 check_cb_ovld(rdp);
3114
3115 if (unlikely(rcu_rdp_is_offloaded(rdp)))
3116 call_rcu_nocb(rdp, head, func, flags, lazy);
3117 else
3118 call_rcu_core(rdp, head, func, flags);
3119 local_irq_restore(flags);
3120 }
3121
3122 #ifdef CONFIG_RCU_LAZY
3123 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF);
3124 module_param(enable_rcu_lazy, bool, 0444);
3125
3126 /**
3127 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3128 * flush all lazy callbacks (including the new one) to the main ->cblist while
3129 * doing so.
3130 *
3131 * @head: structure to be used for queueing the RCU updates.
3132 * @func: actual callback function to be invoked after the grace period
3133 *
3134 * The callback function will be invoked some time after a full grace
3135 * period elapses, in other words after all pre-existing RCU read-side
3136 * critical sections have completed.
3137 *
3138 * Use this API instead of call_rcu() if you don't want the callback to be
3139 * delayed for very long periods of time, which can happen on systems without
3140 * memory pressure and on systems which are lightly loaded or mostly idle.
3141 * This function will cause callbacks to be invoked sooner than later at the
3142 * expense of extra power. Other than that, this function is identical to, and
3143 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
3144 * ordering and other functionality.
3145 */
call_rcu_hurry(struct rcu_head * head,rcu_callback_t func)3146 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
3147 {
3148 __call_rcu_common(head, func, false);
3149 }
3150 EXPORT_SYMBOL_GPL(call_rcu_hurry);
3151 #else
3152 #define enable_rcu_lazy false
3153 #endif
3154
3155 /**
3156 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3157 * By default the callbacks are 'lazy' and are kept hidden from the main
3158 * ->cblist to prevent starting of grace periods too soon.
3159 * If you desire grace periods to start very soon, use call_rcu_hurry().
3160 *
3161 * @head: structure to be used for queueing the RCU updates.
3162 * @func: actual callback function to be invoked after the grace period
3163 *
3164 * The callback function will be invoked some time after a full grace
3165 * period elapses, in other words after all pre-existing RCU read-side
3166 * critical sections have completed. However, the callback function
3167 * might well execute concurrently with RCU read-side critical sections
3168 * that started after call_rcu() was invoked.
3169 *
3170 * It is perfectly legal to repost an RCU callback, potentially with
3171 * a different callback function, from within its callback function.
3172 * The specified function will be invoked after another full grace period
3173 * has elapsed. This use case is similar in form to the common practice
3174 * of reposting a timer from within its own handler.
3175 *
3176 * RCU read-side critical sections are delimited by rcu_read_lock()
3177 * and rcu_read_unlock(), and may be nested. In addition, but only in
3178 * v5.0 and later, regions of code across which interrupts, preemption,
3179 * or softirqs have been disabled also serve as RCU read-side critical
3180 * sections. This includes hardware interrupt handlers, softirq handlers,
3181 * and NMI handlers.
3182 *
3183 * Note that all CPUs must agree that the grace period extended beyond
3184 * all pre-existing RCU read-side critical section. On systems with more
3185 * than one CPU, this means that when "func()" is invoked, each CPU is
3186 * guaranteed to have executed a full memory barrier since the end of its
3187 * last RCU read-side critical section whose beginning preceded the call
3188 * to call_rcu(). It also means that each CPU executing an RCU read-side
3189 * critical section that continues beyond the start of "func()" must have
3190 * executed a memory barrier after the call_rcu() but before the beginning
3191 * of that RCU read-side critical section. Note that these guarantees
3192 * include CPUs that are offline, idle, or executing in user mode, as
3193 * well as CPUs that are executing in the kernel.
3194 *
3195 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3196 * resulting RCU callback function "func()", then both CPU A and CPU B are
3197 * guaranteed to execute a full memory barrier during the time interval
3198 * between the call to call_rcu() and the invocation of "func()" -- even
3199 * if CPU A and CPU B are the same CPU (but again only if the system has
3200 * more than one CPU).
3201 *
3202 * Implementation of these memory-ordering guarantees is described here:
3203 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3204 *
3205 * Specific to call_rcu() (as opposed to the other call_rcu*() functions),
3206 * in kernels built with CONFIG_RCU_LAZY=y, call_rcu() might delay for many
3207 * seconds before starting the grace period needed by the corresponding
3208 * callback. This delay can significantly improve energy-efficiency
3209 * on low-utilization battery-powered devices. To avoid this delay,
3210 * in latency-sensitive kernel code, use call_rcu_hurry().
3211 */
call_rcu(struct rcu_head * head,rcu_callback_t func)3212 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3213 {
3214 __call_rcu_common(head, func, enable_rcu_lazy);
3215 }
3216 EXPORT_SYMBOL_GPL(call_rcu);
3217
3218 /*
3219 * During early boot, any blocking grace-period wait automatically
3220 * implies a grace period.
3221 *
3222 * Later on, this could in theory be the case for kernels built with
3223 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3224 * is not a common case. Furthermore, this optimization would cause
3225 * the rcu_gp_oldstate structure to expand by 50%, so this potential
3226 * grace-period optimization is ignored once the scheduler is running.
3227 */
rcu_blocking_is_gp(void)3228 static int rcu_blocking_is_gp(void)
3229 {
3230 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3231 might_sleep();
3232 return false;
3233 }
3234 return true;
3235 }
3236
3237 /*
3238 * Helper function for the synchronize_rcu() API.
3239 */
synchronize_rcu_normal(void)3240 static void synchronize_rcu_normal(void)
3241 {
3242 struct rcu_synchronize rs;
3243
3244 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request"));
3245
3246 if (!READ_ONCE(rcu_normal_wake_from_gp)) {
3247 wait_rcu_gp(call_rcu_hurry);
3248 goto trace_complete_out;
3249 }
3250
3251 init_rcu_head_on_stack(&rs.head);
3252 init_completion(&rs.completion);
3253
3254 /*
3255 * This code might be preempted, therefore take a GP
3256 * snapshot before adding a request.
3257 */
3258 if (IS_ENABLED(CONFIG_PROVE_RCU))
3259 get_state_synchronize_rcu_full(&rs.oldstate);
3260
3261 rcu_sr_normal_add_req(&rs);
3262
3263 /* Kick a GP and start waiting. */
3264 (void) start_poll_synchronize_rcu();
3265
3266 /* Now we can wait. */
3267 wait_for_completion(&rs.completion);
3268 destroy_rcu_head_on_stack(&rs.head);
3269
3270 trace_complete_out:
3271 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete"));
3272 }
3273
3274 /**
3275 * synchronize_rcu - wait until a grace period has elapsed.
3276 *
3277 * Control will return to the caller some time after a full grace
3278 * period has elapsed, in other words after all currently executing RCU
3279 * read-side critical sections have completed. Note, however, that
3280 * upon return from synchronize_rcu(), the caller might well be executing
3281 * concurrently with new RCU read-side critical sections that began while
3282 * synchronize_rcu() was waiting.
3283 *
3284 * RCU read-side critical sections are delimited by rcu_read_lock()
3285 * and rcu_read_unlock(), and may be nested. In addition, but only in
3286 * v5.0 and later, regions of code across which interrupts, preemption,
3287 * or softirqs have been disabled also serve as RCU read-side critical
3288 * sections. This includes hardware interrupt handlers, softirq handlers,
3289 * and NMI handlers.
3290 *
3291 * Note that this guarantee implies further memory-ordering guarantees.
3292 * On systems with more than one CPU, when synchronize_rcu() returns,
3293 * each CPU is guaranteed to have executed a full memory barrier since
3294 * the end of its last RCU read-side critical section whose beginning
3295 * preceded the call to synchronize_rcu(). In addition, each CPU having
3296 * an RCU read-side critical section that extends beyond the return from
3297 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3298 * after the beginning of synchronize_rcu() and before the beginning of
3299 * that RCU read-side critical section. Note that these guarantees include
3300 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3301 * that are executing in the kernel.
3302 *
3303 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3304 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3305 * to have executed a full memory barrier during the execution of
3306 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3307 * again only if the system has more than one CPU).
3308 *
3309 * Implementation of these memory-ordering guarantees is described here:
3310 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3311 */
synchronize_rcu(void)3312 void synchronize_rcu(void)
3313 {
3314 unsigned long flags;
3315 struct rcu_node *rnp;
3316
3317 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3318 lock_is_held(&rcu_lock_map) ||
3319 lock_is_held(&rcu_sched_lock_map),
3320 "Illegal synchronize_rcu() in RCU read-side critical section");
3321 if (!rcu_blocking_is_gp()) {
3322 if (rcu_gp_is_expedited())
3323 synchronize_rcu_expedited();
3324 else
3325 synchronize_rcu_normal();
3326 return;
3327 }
3328
3329 // Context allows vacuous grace periods.
3330 // Note well that this code runs with !PREEMPT && !SMP.
3331 // In addition, all code that advances grace periods runs at
3332 // process level. Therefore, this normal GP overlaps with other
3333 // normal GPs only by being fully nested within them, which allows
3334 // reuse of ->gp_seq_polled_snap.
3335 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3336 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3337
3338 // Update the normal grace-period counters to record
3339 // this grace period, but only those used by the boot CPU.
3340 // The rcu_scheduler_starting() will take care of the rest of
3341 // these counters.
3342 local_irq_save(flags);
3343 WARN_ON_ONCE(num_online_cpus() > 1);
3344 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3345 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3346 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3347 local_irq_restore(flags);
3348 }
3349 EXPORT_SYMBOL_GPL(synchronize_rcu);
3350
3351 /**
3352 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3353 * @rgosp: Place to put state cookie
3354 *
3355 * Stores into @rgosp a value that will always be treated by functions
3356 * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3357 * has already completed.
3358 */
get_completed_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3359 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3360 {
3361 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3362 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3363 }
3364 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3365
3366 /**
3367 * get_state_synchronize_rcu - Snapshot current RCU state
3368 *
3369 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3370 * or poll_state_synchronize_rcu() to determine whether or not a full
3371 * grace period has elapsed in the meantime.
3372 */
get_state_synchronize_rcu(void)3373 unsigned long get_state_synchronize_rcu(void)
3374 {
3375 /*
3376 * Any prior manipulation of RCU-protected data must happen
3377 * before the load from ->gp_seq.
3378 */
3379 smp_mb(); /* ^^^ */
3380 return rcu_seq_snap(&rcu_state.gp_seq_polled);
3381 }
3382 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3383
3384 /**
3385 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3386 * @rgosp: location to place combined normal/expedited grace-period state
3387 *
3388 * Places the normal and expedited grace-period states in @rgosp. This
3389 * state value can be passed to a later call to cond_synchronize_rcu_full()
3390 * or poll_state_synchronize_rcu_full() to determine whether or not a
3391 * grace period (whether normal or expedited) has elapsed in the meantime.
3392 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3393 * long, but is guaranteed to see all grace periods. In contrast, the
3394 * combined state occupies less memory, but can sometimes fail to take
3395 * grace periods into account.
3396 *
3397 * This does not guarantee that the needed grace period will actually
3398 * start.
3399 */
get_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3400 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3401 {
3402 /*
3403 * Any prior manipulation of RCU-protected data must happen
3404 * before the loads from ->gp_seq and ->expedited_sequence.
3405 */
3406 smp_mb(); /* ^^^ */
3407
3408 // Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use
3409 // in poll_state_synchronize_rcu_full() notwithstanding. Use of
3410 // the latter here would result in too-short grace periods due to
3411 // interactions with newly onlined CPUs.
3412 rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq);
3413 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3414 }
3415 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3416
3417 /*
3418 * Helper function for start_poll_synchronize_rcu() and
3419 * start_poll_synchronize_rcu_full().
3420 */
start_poll_synchronize_rcu_common(void)3421 static void start_poll_synchronize_rcu_common(void)
3422 {
3423 unsigned long flags;
3424 bool needwake;
3425 struct rcu_data *rdp;
3426 struct rcu_node *rnp;
3427
3428 local_irq_save(flags);
3429 rdp = this_cpu_ptr(&rcu_data);
3430 rnp = rdp->mynode;
3431 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3432 // Note it is possible for a grace period to have elapsed between
3433 // the above call to get_state_synchronize_rcu() and the below call
3434 // to rcu_seq_snap. This is OK, the worst that happens is that we
3435 // get a grace period that no one needed. These accesses are ordered
3436 // by smp_mb(), and we are accessing them in the opposite order
3437 // from which they are updated at grace-period start, as required.
3438 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3439 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3440 if (needwake)
3441 rcu_gp_kthread_wake();
3442 }
3443
3444 /**
3445 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3446 *
3447 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3448 * or poll_state_synchronize_rcu() to determine whether or not a full
3449 * grace period has elapsed in the meantime. If the needed grace period
3450 * is not already slated to start, notifies RCU core of the need for that
3451 * grace period.
3452 */
start_poll_synchronize_rcu(void)3453 unsigned long start_poll_synchronize_rcu(void)
3454 {
3455 unsigned long gp_seq = get_state_synchronize_rcu();
3456
3457 start_poll_synchronize_rcu_common();
3458 return gp_seq;
3459 }
3460 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3461
3462 /**
3463 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3464 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3465 *
3466 * Places the normal and expedited grace-period states in *@rgos. This
3467 * state value can be passed to a later call to cond_synchronize_rcu_full()
3468 * or poll_state_synchronize_rcu_full() to determine whether or not a
3469 * grace period (whether normal or expedited) has elapsed in the meantime.
3470 * If the needed grace period is not already slated to start, notifies
3471 * RCU core of the need for that grace period.
3472 */
start_poll_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3473 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3474 {
3475 get_state_synchronize_rcu_full(rgosp);
3476
3477 start_poll_synchronize_rcu_common();
3478 }
3479 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3480
3481 /**
3482 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3483 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3484 *
3485 * If a full RCU grace period has elapsed since the earlier call from
3486 * which @oldstate was obtained, return @true, otherwise return @false.
3487 * If @false is returned, it is the caller's responsibility to invoke this
3488 * function later on until it does return @true. Alternatively, the caller
3489 * can explicitly wait for a grace period, for example, by passing @oldstate
3490 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3491 * on the one hand or by directly invoking either synchronize_rcu() or
3492 * synchronize_rcu_expedited() on the other.
3493 *
3494 * Yes, this function does not take counter wrap into account.
3495 * But counter wrap is harmless. If the counter wraps, we have waited for
3496 * more than a billion grace periods (and way more on a 64-bit system!).
3497 * Those needing to keep old state values for very long time periods
3498 * (many hours even on 32-bit systems) should check them occasionally and
3499 * either refresh them or set a flag indicating that the grace period has
3500 * completed. Alternatively, they can use get_completed_synchronize_rcu()
3501 * to get a guaranteed-completed grace-period state.
3502 *
3503 * In addition, because oldstate compresses the grace-period state for
3504 * both normal and expedited grace periods into a single unsigned long,
3505 * it can miss a grace period when synchronize_rcu() runs concurrently
3506 * with synchronize_rcu_expedited(). If this is unacceptable, please
3507 * instead use the _full() variant of these polling APIs.
3508 *
3509 * This function provides the same memory-ordering guarantees that
3510 * would be provided by a synchronize_rcu() that was invoked at the call
3511 * to the function that provided @oldstate, and that returned at the end
3512 * of this function.
3513 */
poll_state_synchronize_rcu(unsigned long oldstate)3514 bool poll_state_synchronize_rcu(unsigned long oldstate)
3515 {
3516 if (oldstate == RCU_GET_STATE_COMPLETED ||
3517 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3518 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3519 return true;
3520 }
3521 return false;
3522 }
3523 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3524
3525 /**
3526 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3527 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3528 *
3529 * If a full RCU grace period has elapsed since the earlier call from
3530 * which *rgosp was obtained, return @true, otherwise return @false.
3531 * If @false is returned, it is the caller's responsibility to invoke this
3532 * function later on until it does return @true. Alternatively, the caller
3533 * can explicitly wait for a grace period, for example, by passing @rgosp
3534 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3535 *
3536 * Yes, this function does not take counter wrap into account.
3537 * But counter wrap is harmless. If the counter wraps, we have waited
3538 * for more than a billion grace periods (and way more on a 64-bit
3539 * system!). Those needing to keep rcu_gp_oldstate values for very
3540 * long time periods (many hours even on 32-bit systems) should check
3541 * them occasionally and either refresh them or set a flag indicating
3542 * that the grace period has completed. Alternatively, they can use
3543 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3544 * grace-period state.
3545 *
3546 * This function provides the same memory-ordering guarantees that would
3547 * be provided by a synchronize_rcu() that was invoked at the call to
3548 * the function that provided @rgosp, and that returned at the end of this
3549 * function. And this guarantee requires that the root rcu_node structure's
3550 * ->gp_seq field be checked instead of that of the rcu_state structure.
3551 * The problem is that the just-ending grace-period's callbacks can be
3552 * invoked between the time that the root rcu_node structure's ->gp_seq
3553 * field is updated and the time that the rcu_state structure's ->gp_seq
3554 * field is updated. Therefore, if a single synchronize_rcu() is to
3555 * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3556 * then the root rcu_node structure is the one that needs to be polled.
3557 */
poll_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3558 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3559 {
3560 struct rcu_node *rnp = rcu_get_root();
3561
3562 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3563 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3564 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3565 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3566 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3567 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3568 return true;
3569 }
3570 return false;
3571 }
3572 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3573
3574 /**
3575 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3576 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3577 *
3578 * If a full RCU grace period has elapsed since the earlier call to
3579 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3580 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3581 *
3582 * Yes, this function does not take counter wrap into account.
3583 * But counter wrap is harmless. If the counter wraps, we have waited for
3584 * more than 2 billion grace periods (and way more on a 64-bit system!),
3585 * so waiting for a couple of additional grace periods should be just fine.
3586 *
3587 * This function provides the same memory-ordering guarantees that
3588 * would be provided by a synchronize_rcu() that was invoked at the call
3589 * to the function that provided @oldstate and that returned at the end
3590 * of this function.
3591 */
cond_synchronize_rcu(unsigned long oldstate)3592 void cond_synchronize_rcu(unsigned long oldstate)
3593 {
3594 if (!poll_state_synchronize_rcu(oldstate))
3595 synchronize_rcu();
3596 }
3597 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3598
3599 /**
3600 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3601 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3602 *
3603 * If a full RCU grace period has elapsed since the call to
3604 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3605 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3606 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3607 * for a full grace period.
3608 *
3609 * Yes, this function does not take counter wrap into account.
3610 * But counter wrap is harmless. If the counter wraps, we have waited for
3611 * more than 2 billion grace periods (and way more on a 64-bit system!),
3612 * so waiting for a couple of additional grace periods should be just fine.
3613 *
3614 * This function provides the same memory-ordering guarantees that
3615 * would be provided by a synchronize_rcu() that was invoked at the call
3616 * to the function that provided @rgosp and that returned at the end of
3617 * this function.
3618 */
cond_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3619 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3620 {
3621 if (!poll_state_synchronize_rcu_full(rgosp))
3622 synchronize_rcu();
3623 }
3624 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3625
3626 /*
3627 * Check to see if there is any immediate RCU-related work to be done by
3628 * the current CPU, returning 1 if so and zero otherwise. The checks are
3629 * in order of increasing expense: checks that can be carried out against
3630 * CPU-local state are performed first. However, we must check for CPU
3631 * stalls first, else we might not get a chance.
3632 */
rcu_pending(int user)3633 static int rcu_pending(int user)
3634 {
3635 bool gp_in_progress;
3636 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3637 struct rcu_node *rnp = rdp->mynode;
3638
3639 lockdep_assert_irqs_disabled();
3640
3641 /* Check for CPU stalls, if enabled. */
3642 check_cpu_stall(rdp);
3643
3644 /* Does this CPU need a deferred NOCB wakeup? */
3645 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3646 return 1;
3647
3648 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3649 gp_in_progress = rcu_gp_in_progress();
3650 if ((user || rcu_is_cpu_rrupt_from_idle() ||
3651 (gp_in_progress &&
3652 time_before(jiffies, READ_ONCE(rcu_state.gp_start) +
3653 nohz_full_patience_delay_jiffies))) &&
3654 rcu_nohz_full_cpu())
3655 return 0;
3656
3657 /* Is the RCU core waiting for a quiescent state from this CPU? */
3658 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3659 return 1;
3660
3661 /* Does this CPU have callbacks ready to invoke? */
3662 if (!rcu_rdp_is_offloaded(rdp) &&
3663 rcu_segcblist_ready_cbs(&rdp->cblist))
3664 return 1;
3665
3666 /* Has RCU gone idle with this CPU needing another grace period? */
3667 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3668 !rcu_rdp_is_offloaded(rdp) &&
3669 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3670 return 1;
3671
3672 /* Have RCU grace period completed or started? */
3673 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3674 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3675 return 1;
3676
3677 /* nothing to do */
3678 return 0;
3679 }
3680
3681 /*
3682 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3683 * the compiler is expected to optimize this away.
3684 */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3685 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3686 {
3687 trace_rcu_barrier(rcu_state.name, s, cpu,
3688 atomic_read(&rcu_state.barrier_cpu_count), done);
3689 }
3690
3691 /*
3692 * RCU callback function for rcu_barrier(). If we are last, wake
3693 * up the task executing rcu_barrier().
3694 *
3695 * Note that the value of rcu_state.barrier_sequence must be captured
3696 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3697 * other CPUs might count the value down to zero before this CPU gets
3698 * around to invoking rcu_barrier_trace(), which might result in bogus
3699 * data from the next instance of rcu_barrier().
3700 */
rcu_barrier_callback(struct rcu_head * rhp)3701 static void rcu_barrier_callback(struct rcu_head *rhp)
3702 {
3703 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3704
3705 rhp->next = rhp; // Mark the callback as having been invoked.
3706 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3707 rcu_barrier_trace(TPS("LastCB"), -1, s);
3708 complete(&rcu_state.barrier_completion);
3709 } else {
3710 rcu_barrier_trace(TPS("CB"), -1, s);
3711 }
3712 }
3713
3714 /*
3715 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3716 */
rcu_barrier_entrain(struct rcu_data * rdp)3717 static void rcu_barrier_entrain(struct rcu_data *rdp)
3718 {
3719 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3720 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3721 bool wake_nocb = false;
3722 bool was_alldone = false;
3723
3724 lockdep_assert_held(&rcu_state.barrier_lock);
3725 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3726 return;
3727 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3728 rdp->barrier_head.func = rcu_barrier_callback;
3729 debug_rcu_head_queue(&rdp->barrier_head);
3730 rcu_nocb_lock(rdp);
3731 /*
3732 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3733 * queue. This way we don't wait for bypass timer that can reach seconds
3734 * if it's fully lazy.
3735 */
3736 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3737 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3738 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3739 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3740 atomic_inc(&rcu_state.barrier_cpu_count);
3741 } else {
3742 debug_rcu_head_unqueue(&rdp->barrier_head);
3743 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3744 }
3745 rcu_nocb_unlock(rdp);
3746 if (wake_nocb)
3747 wake_nocb_gp(rdp, false);
3748 smp_store_release(&rdp->barrier_seq_snap, gseq);
3749 }
3750
3751 /*
3752 * Called with preemption disabled, and from cross-cpu IRQ context.
3753 */
rcu_barrier_handler(void * cpu_in)3754 static void rcu_barrier_handler(void *cpu_in)
3755 {
3756 uintptr_t cpu = (uintptr_t)cpu_in;
3757 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3758
3759 lockdep_assert_irqs_disabled();
3760 WARN_ON_ONCE(cpu != rdp->cpu);
3761 WARN_ON_ONCE(cpu != smp_processor_id());
3762 raw_spin_lock(&rcu_state.barrier_lock);
3763 rcu_barrier_entrain(rdp);
3764 raw_spin_unlock(&rcu_state.barrier_lock);
3765 }
3766
3767 /**
3768 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3769 *
3770 * Note that this primitive does not necessarily wait for an RCU grace period
3771 * to complete. For example, if there are no RCU callbacks queued anywhere
3772 * in the system, then rcu_barrier() is within its rights to return
3773 * immediately, without waiting for anything, much less an RCU grace period.
3774 */
rcu_barrier(void)3775 void rcu_barrier(void)
3776 {
3777 uintptr_t cpu;
3778 unsigned long flags;
3779 unsigned long gseq;
3780 struct rcu_data *rdp;
3781 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3782
3783 rcu_barrier_trace(TPS("Begin"), -1, s);
3784
3785 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3786 mutex_lock(&rcu_state.barrier_mutex);
3787
3788 /* Did someone else do our work for us? */
3789 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3790 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
3791 smp_mb(); /* caller's subsequent code after above check. */
3792 mutex_unlock(&rcu_state.barrier_mutex);
3793 return;
3794 }
3795
3796 /* Mark the start of the barrier operation. */
3797 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3798 rcu_seq_start(&rcu_state.barrier_sequence);
3799 gseq = rcu_state.barrier_sequence;
3800 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3801
3802 /*
3803 * Initialize the count to two rather than to zero in order
3804 * to avoid a too-soon return to zero in case of an immediate
3805 * invocation of the just-enqueued callback (or preemption of
3806 * this task). Exclude CPU-hotplug operations to ensure that no
3807 * offline non-offloaded CPU has callbacks queued.
3808 */
3809 init_completion(&rcu_state.barrier_completion);
3810 atomic_set(&rcu_state.barrier_cpu_count, 2);
3811 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3812
3813 /*
3814 * Force each CPU with callbacks to register a new callback.
3815 * When that callback is invoked, we will know that all of the
3816 * corresponding CPU's preceding callbacks have been invoked.
3817 */
3818 for_each_possible_cpu(cpu) {
3819 rdp = per_cpu_ptr(&rcu_data, cpu);
3820 retry:
3821 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
3822 continue;
3823 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3824 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
3825 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3826 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3827 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
3828 continue;
3829 }
3830 if (!rcu_rdp_cpu_online(rdp)) {
3831 rcu_barrier_entrain(rdp);
3832 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3833 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3834 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
3835 continue;
3836 }
3837 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3838 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
3839 schedule_timeout_uninterruptible(1);
3840 goto retry;
3841 }
3842 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3843 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
3844 }
3845
3846 /*
3847 * Now that we have an rcu_barrier_callback() callback on each
3848 * CPU, and thus each counted, remove the initial count.
3849 */
3850 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3851 complete(&rcu_state.barrier_completion);
3852
3853 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3854 wait_for_completion(&rcu_state.barrier_completion);
3855
3856 /* Mark the end of the barrier operation. */
3857 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3858 rcu_seq_end(&rcu_state.barrier_sequence);
3859 gseq = rcu_state.barrier_sequence;
3860 for_each_possible_cpu(cpu) {
3861 rdp = per_cpu_ptr(&rcu_data, cpu);
3862
3863 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3864 }
3865
3866 /* Other rcu_barrier() invocations can now safely proceed. */
3867 mutex_unlock(&rcu_state.barrier_mutex);
3868 }
3869 EXPORT_SYMBOL_GPL(rcu_barrier);
3870
3871 static unsigned long rcu_barrier_last_throttle;
3872
3873 /**
3874 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3875 *
3876 * This can be thought of as guard rails around rcu_barrier() that
3877 * permits unrestricted userspace use, at least assuming the hardware's
3878 * try_cmpxchg() is robust. There will be at most one call per second to
3879 * rcu_barrier() system-wide from use of this function, which means that
3880 * callers might needlessly wait a second or three.
3881 *
3882 * This is intended for use by test suites to avoid OOM by flushing RCU
3883 * callbacks from the previous test before starting the next. See the
3884 * rcutree.do_rcu_barrier module parameter for more information.
3885 *
3886 * Why not simply make rcu_barrier() more scalable? That might be
3887 * the eventual endpoint, but let's keep it simple for the time being.
3888 * Note that the module parameter infrastructure serializes calls to a
3889 * given .set() function, but should concurrent .set() invocation ever be
3890 * possible, we are ready!
3891 */
rcu_barrier_throttled(void)3892 static void rcu_barrier_throttled(void)
3893 {
3894 unsigned long j = jiffies;
3895 unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
3896 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3897
3898 while (time_in_range(j, old, old + HZ / 16) ||
3899 !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
3900 schedule_timeout_idle(HZ / 16);
3901 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3902 smp_mb(); /* caller's subsequent code after above check. */
3903 return;
3904 }
3905 j = jiffies;
3906 old = READ_ONCE(rcu_barrier_last_throttle);
3907 }
3908 rcu_barrier();
3909 }
3910
3911 /*
3912 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
3913 * request arrives. We insist on a true value to allow for possible
3914 * future expansion.
3915 */
param_set_do_rcu_barrier(const char * val,const struct kernel_param * kp)3916 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
3917 {
3918 bool b;
3919 int ret;
3920
3921 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
3922 return -EAGAIN;
3923 ret = kstrtobool(val, &b);
3924 if (!ret && b) {
3925 atomic_inc((atomic_t *)kp->arg);
3926 rcu_barrier_throttled();
3927 atomic_dec((atomic_t *)kp->arg);
3928 }
3929 return ret;
3930 }
3931
3932 /*
3933 * Output the number of outstanding rcutree.do_rcu_barrier requests.
3934 */
param_get_do_rcu_barrier(char * buffer,const struct kernel_param * kp)3935 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
3936 {
3937 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
3938 }
3939
3940 static const struct kernel_param_ops do_rcu_barrier_ops = {
3941 .set = param_set_do_rcu_barrier,
3942 .get = param_get_do_rcu_barrier,
3943 };
3944 static atomic_t do_rcu_barrier;
3945 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
3946
3947 /*
3948 * Compute the mask of online CPUs for the specified rcu_node structure.
3949 * This will not be stable unless the rcu_node structure's ->lock is
3950 * held, but the bit corresponding to the current CPU will be stable
3951 * in most contexts.
3952 */
rcu_rnp_online_cpus(struct rcu_node * rnp)3953 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
3954 {
3955 return READ_ONCE(rnp->qsmaskinitnext);
3956 }
3957
3958 /*
3959 * Is the CPU corresponding to the specified rcu_data structure online
3960 * from RCU's perspective? This perspective is given by that structure's
3961 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3962 */
rcu_rdp_cpu_online(struct rcu_data * rdp)3963 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
3964 {
3965 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
3966 }
3967
rcu_cpu_online(int cpu)3968 bool rcu_cpu_online(int cpu)
3969 {
3970 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3971
3972 return rcu_rdp_cpu_online(rdp);
3973 }
3974
3975 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
3976
3977 /*
3978 * Is the current CPU online as far as RCU is concerned?
3979 *
3980 * Disable preemption to avoid false positives that could otherwise
3981 * happen due to the current CPU number being sampled, this task being
3982 * preempted, its old CPU being taken offline, resuming on some other CPU,
3983 * then determining that its old CPU is now offline.
3984 *
3985 * Disable checking if in an NMI handler because we cannot safely
3986 * report errors from NMI handlers anyway. In addition, it is OK to use
3987 * RCU on an offline processor during initial boot, hence the check for
3988 * rcu_scheduler_fully_active.
3989 */
rcu_lockdep_current_cpu_online(void)3990 bool rcu_lockdep_current_cpu_online(void)
3991 {
3992 struct rcu_data *rdp;
3993 bool ret = false;
3994
3995 if (in_nmi() || !rcu_scheduler_fully_active)
3996 return true;
3997 preempt_disable_notrace();
3998 rdp = this_cpu_ptr(&rcu_data);
3999 /*
4000 * Strictly, we care here about the case where the current CPU is
4001 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
4002 * not being up to date. So arch_spin_is_locked() might have a
4003 * false positive if it's held by some *other* CPU, but that's
4004 * OK because that just means a false *negative* on the warning.
4005 */
4006 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4007 ret = true;
4008 preempt_enable_notrace();
4009 return ret;
4010 }
4011 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
4012
4013 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
4014
4015 // Has rcu_init() been invoked? This is used (for example) to determine
4016 // whether spinlocks may be acquired safely.
rcu_init_invoked(void)4017 static bool rcu_init_invoked(void)
4018 {
4019 return !!READ_ONCE(rcu_state.n_online_cpus);
4020 }
4021
4022 /*
4023 * All CPUs for the specified rcu_node structure have gone offline,
4024 * and all tasks that were preempted within an RCU read-side critical
4025 * section while running on one of those CPUs have since exited their RCU
4026 * read-side critical section. Some other CPU is reporting this fact with
4027 * the specified rcu_node structure's ->lock held and interrupts disabled.
4028 * This function therefore goes up the tree of rcu_node structures,
4029 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
4030 * the leaf rcu_node structure's ->qsmaskinit field has already been
4031 * updated.
4032 *
4033 * This function does check that the specified rcu_node structure has
4034 * all CPUs offline and no blocked tasks, so it is OK to invoke it
4035 * prematurely. That said, invoking it after the fact will cost you
4036 * a needless lock acquisition. So once it has done its work, don't
4037 * invoke it again.
4038 */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)4039 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4040 {
4041 long mask;
4042 struct rcu_node *rnp = rnp_leaf;
4043
4044 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4045 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4046 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4047 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4048 return;
4049 for (;;) {
4050 mask = rnp->grpmask;
4051 rnp = rnp->parent;
4052 if (!rnp)
4053 break;
4054 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4055 rnp->qsmaskinit &= ~mask;
4056 /* Between grace periods, so better already be zero! */
4057 WARN_ON_ONCE(rnp->qsmask);
4058 if (rnp->qsmaskinit) {
4059 raw_spin_unlock_rcu_node(rnp);
4060 /* irqs remain disabled. */
4061 return;
4062 }
4063 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4064 }
4065 }
4066
4067 /*
4068 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4069 * first CPU in a given leaf rcu_node structure coming online. The caller
4070 * must hold the corresponding leaf rcu_node ->lock with interrupts
4071 * disabled.
4072 */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)4073 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4074 {
4075 long mask;
4076 long oldmask;
4077 struct rcu_node *rnp = rnp_leaf;
4078
4079 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4080 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4081 for (;;) {
4082 mask = rnp->grpmask;
4083 rnp = rnp->parent;
4084 if (rnp == NULL)
4085 return;
4086 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4087 oldmask = rnp->qsmaskinit;
4088 rnp->qsmaskinit |= mask;
4089 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4090 if (oldmask)
4091 return;
4092 }
4093 }
4094
4095 /*
4096 * Do boot-time initialization of a CPU's per-CPU RCU data.
4097 */
4098 static void __init
rcu_boot_init_percpu_data(int cpu)4099 rcu_boot_init_percpu_data(int cpu)
4100 {
4101 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4102 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4103
4104 /* Set up local state, ensuring consistent view of global state. */
4105 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4106 INIT_WORK(&rdp->strict_work, strict_work_handler);
4107 WARN_ON_ONCE(ct->nesting != 1);
4108 WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu)));
4109 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4110 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4111 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
4112 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4113 rdp->rcu_onl_gp_state = RCU_GP_CLEANED;
4114 rdp->last_sched_clock = jiffies;
4115 rdp->cpu = cpu;
4116 rcu_boot_init_nocb_percpu_data(rdp);
4117 }
4118
rcu_thread_affine_rnp(struct task_struct * t,struct rcu_node * rnp)4119 static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
4120 {
4121 cpumask_var_t affinity;
4122 int cpu;
4123
4124 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
4125 return;
4126
4127 for_each_leaf_node_possible_cpu(rnp, cpu)
4128 cpumask_set_cpu(cpu, affinity);
4129
4130 kthread_affine_preferred(t, affinity);
4131
4132 free_cpumask_var(affinity);
4133 }
4134
4135 struct kthread_worker *rcu_exp_gp_kworker;
4136
rcu_spawn_exp_par_gp_kworker(struct rcu_node * rnp)4137 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4138 {
4139 struct kthread_worker *kworker;
4140 const char *name = "rcu_exp_par_gp_kthread_worker/%d";
4141 struct sched_param param = { .sched_priority = kthread_prio };
4142 int rnp_index = rnp - rcu_get_root();
4143
4144 if (rnp->exp_kworker)
4145 return;
4146
4147 kworker = kthread_create_worker(0, name, rnp_index);
4148 if (IS_ERR_OR_NULL(kworker)) {
4149 pr_err("Failed to create par gp kworker on %d/%d\n",
4150 rnp->grplo, rnp->grphi);
4151 return;
4152 }
4153 WRITE_ONCE(rnp->exp_kworker, kworker);
4154
4155 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4156 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
4157
4158 rcu_thread_affine_rnp(kworker->task, rnp);
4159 wake_up_process(kworker->task);
4160 }
4161
rcu_start_exp_gp_kworker(void)4162 static void __init rcu_start_exp_gp_kworker(void)
4163 {
4164 const char *name = "rcu_exp_gp_kthread_worker";
4165 struct sched_param param = { .sched_priority = kthread_prio };
4166
4167 rcu_exp_gp_kworker = kthread_run_worker(0, name);
4168 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4169 pr_err("Failed to create %s!\n", name);
4170 rcu_exp_gp_kworker = NULL;
4171 return;
4172 }
4173
4174 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4175 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m);
4176 }
4177
rcu_spawn_rnp_kthreads(struct rcu_node * rnp)4178 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
4179 {
4180 if (rcu_scheduler_fully_active) {
4181 mutex_lock(&rnp->kthread_mutex);
4182 rcu_spawn_one_boost_kthread(rnp);
4183 rcu_spawn_exp_par_gp_kworker(rnp);
4184 mutex_unlock(&rnp->kthread_mutex);
4185 }
4186 }
4187
4188 /*
4189 * Invoked early in the CPU-online process, when pretty much all services
4190 * are available. The incoming CPU is not present.
4191 *
4192 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4193 * offline event can be happening at a given time. Note also that we can
4194 * accept some slop in the rsp->gp_seq access due to the fact that this
4195 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4196 * And any offloaded callbacks are being numbered elsewhere.
4197 */
rcutree_prepare_cpu(unsigned int cpu)4198 int rcutree_prepare_cpu(unsigned int cpu)
4199 {
4200 unsigned long flags;
4201 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4202 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4203 struct rcu_node *rnp = rcu_get_root();
4204
4205 /* Set up local state, ensuring consistent view of global state. */
4206 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4207 rdp->qlen_last_fqs_check = 0;
4208 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4209 rdp->blimit = blimit;
4210 ct->nesting = 1; /* CPU not up, no tearing. */
4211 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4212
4213 /*
4214 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4215 * (re-)initialized.
4216 */
4217 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4218 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4219
4220 /*
4221 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4222 * propagation up the rcu_node tree will happen at the beginning
4223 * of the next grace period.
4224 */
4225 rnp = rdp->mynode;
4226 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4227 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4228 rdp->gp_seq_needed = rdp->gp_seq;
4229 rdp->cpu_no_qs.b.norm = true;
4230 rdp->core_needs_qs = false;
4231 rdp->rcu_iw_pending = false;
4232 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4233 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4234 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4235 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4236 rcu_spawn_rnp_kthreads(rnp);
4237 rcu_spawn_cpu_nocb_kthread(cpu);
4238 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4239 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4240
4241 return 0;
4242 }
4243
4244 /*
4245 * Has the specified (known valid) CPU ever been fully online?
4246 */
rcu_cpu_beenfullyonline(int cpu)4247 bool rcu_cpu_beenfullyonline(int cpu)
4248 {
4249 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4250
4251 return smp_load_acquire(&rdp->beenonline);
4252 }
4253
4254 /*
4255 * Near the end of the CPU-online process. Pretty much all services
4256 * enabled, and the CPU is now very much alive.
4257 */
rcutree_online_cpu(unsigned int cpu)4258 int rcutree_online_cpu(unsigned int cpu)
4259 {
4260 unsigned long flags;
4261 struct rcu_data *rdp;
4262 struct rcu_node *rnp;
4263
4264 rdp = per_cpu_ptr(&rcu_data, cpu);
4265 rnp = rdp->mynode;
4266 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4267 rnp->ffmask |= rdp->grpmask;
4268 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4269 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4270 return 0; /* Too early in boot for scheduler work. */
4271 sync_sched_exp_online_cleanup(cpu);
4272
4273 // Stop-machine done, so allow nohz_full to disable tick.
4274 tick_dep_clear(TICK_DEP_BIT_RCU);
4275 return 0;
4276 }
4277
4278 /*
4279 * Mark the specified CPU as being online so that subsequent grace periods
4280 * (both expedited and normal) will wait on it. Note that this means that
4281 * incoming CPUs are not allowed to use RCU read-side critical sections
4282 * until this function is called. Failing to observe this restriction
4283 * will result in lockdep splats.
4284 *
4285 * Note that this function is special in that it is invoked directly
4286 * from the incoming CPU rather than from the cpuhp_step mechanism.
4287 * This is because this function must be invoked at a precise location.
4288 * This incoming CPU must not have enabled interrupts yet.
4289 *
4290 * This mirrors the effects of rcutree_report_cpu_dead().
4291 */
rcutree_report_cpu_starting(unsigned int cpu)4292 void rcutree_report_cpu_starting(unsigned int cpu)
4293 {
4294 unsigned long mask;
4295 struct rcu_data *rdp;
4296 struct rcu_node *rnp;
4297 bool newcpu;
4298
4299 lockdep_assert_irqs_disabled();
4300 rdp = per_cpu_ptr(&rcu_data, cpu);
4301 if (rdp->cpu_started)
4302 return;
4303 rdp->cpu_started = true;
4304
4305 rnp = rdp->mynode;
4306 mask = rdp->grpmask;
4307 arch_spin_lock(&rcu_state.ofl_lock);
4308 rcu_watching_online();
4309 raw_spin_lock(&rcu_state.barrier_lock);
4310 raw_spin_lock_rcu_node(rnp);
4311 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4312 raw_spin_unlock(&rcu_state.barrier_lock);
4313 newcpu = !(rnp->expmaskinitnext & mask);
4314 rnp->expmaskinitnext |= mask;
4315 /* Allow lockless access for expedited grace periods. */
4316 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4317 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4318 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4319 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4320 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state);
4321
4322 /* An incoming CPU should never be blocking a grace period. */
4323 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4324 /* rcu_report_qs_rnp() *really* wants some flags to restore */
4325 unsigned long flags;
4326
4327 local_irq_save(flags);
4328 rcu_disable_urgency_upon_qs(rdp);
4329 /* Report QS -after- changing ->qsmaskinitnext! */
4330 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4331 } else {
4332 raw_spin_unlock_rcu_node(rnp);
4333 }
4334 arch_spin_unlock(&rcu_state.ofl_lock);
4335 smp_store_release(&rdp->beenonline, true);
4336 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4337 }
4338
4339 /*
4340 * The outgoing function has no further need of RCU, so remove it from
4341 * the rcu_node tree's ->qsmaskinitnext bit masks.
4342 *
4343 * Note that this function is special in that it is invoked directly
4344 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4345 * This is because this function must be invoked at a precise location.
4346 *
4347 * This mirrors the effect of rcutree_report_cpu_starting().
4348 */
rcutree_report_cpu_dead(void)4349 void rcutree_report_cpu_dead(void)
4350 {
4351 unsigned long flags;
4352 unsigned long mask;
4353 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4354 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4355
4356 /*
4357 * IRQS must be disabled from now on and until the CPU dies, or an interrupt
4358 * may introduce a new READ-side while it is actually off the QS masks.
4359 */
4360 lockdep_assert_irqs_disabled();
4361 // Do any dangling deferred wakeups.
4362 do_nocb_deferred_wakeup(rdp);
4363
4364 rcu_preempt_deferred_qs(current);
4365
4366 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4367 mask = rdp->grpmask;
4368 arch_spin_lock(&rcu_state.ofl_lock);
4369 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4370 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4371 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state);
4372 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4373 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4374 rcu_disable_urgency_upon_qs(rdp);
4375 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4376 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4377 }
4378 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4379 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4380 arch_spin_unlock(&rcu_state.ofl_lock);
4381 rdp->cpu_started = false;
4382 }
4383
4384 #ifdef CONFIG_HOTPLUG_CPU
4385 /*
4386 * The outgoing CPU has just passed through the dying-idle state, and we
4387 * are being invoked from the CPU that was IPIed to continue the offline
4388 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4389 */
rcutree_migrate_callbacks(int cpu)4390 void rcutree_migrate_callbacks(int cpu)
4391 {
4392 unsigned long flags;
4393 struct rcu_data *my_rdp;
4394 struct rcu_node *my_rnp;
4395 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4396 bool needwake;
4397
4398 if (rcu_rdp_is_offloaded(rdp))
4399 return;
4400
4401 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4402 if (rcu_segcblist_empty(&rdp->cblist)) {
4403 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4404 return; /* No callbacks to migrate. */
4405 }
4406
4407 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4408 rcu_barrier_entrain(rdp);
4409 my_rdp = this_cpu_ptr(&rcu_data);
4410 my_rnp = my_rdp->mynode;
4411 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4412 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4413 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4414 /* Leverage recent GPs and set GP for new callbacks. */
4415 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4416 rcu_advance_cbs(my_rnp, my_rdp);
4417 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4418 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4419 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4420 rcu_segcblist_disable(&rdp->cblist);
4421 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4422 check_cb_ovld_locked(my_rdp, my_rnp);
4423 if (rcu_rdp_is_offloaded(my_rdp)) {
4424 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4425 __call_rcu_nocb_wake(my_rdp, true, flags);
4426 } else {
4427 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4428 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4429 }
4430 local_irq_restore(flags);
4431 if (needwake)
4432 rcu_gp_kthread_wake();
4433 lockdep_assert_irqs_enabled();
4434 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4435 !rcu_segcblist_empty(&rdp->cblist),
4436 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4437 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4438 rcu_segcblist_first_cb(&rdp->cblist));
4439 }
4440
4441 /*
4442 * The CPU has been completely removed, and some other CPU is reporting
4443 * this fact from process context. Do the remainder of the cleanup.
4444 * There can only be one CPU hotplug operation at a time, so no need for
4445 * explicit locking.
4446 */
rcutree_dead_cpu(unsigned int cpu)4447 int rcutree_dead_cpu(unsigned int cpu)
4448 {
4449 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4450 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4451 // Stop-machine done, so allow nohz_full to disable tick.
4452 tick_dep_clear(TICK_DEP_BIT_RCU);
4453 return 0;
4454 }
4455
4456 /*
4457 * Near the end of the offline process. Trace the fact that this CPU
4458 * is going offline.
4459 */
rcutree_dying_cpu(unsigned int cpu)4460 int rcutree_dying_cpu(unsigned int cpu)
4461 {
4462 bool blkd;
4463 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4464 struct rcu_node *rnp = rdp->mynode;
4465
4466 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4467 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4468 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4469 return 0;
4470 }
4471
4472 /*
4473 * Near the beginning of the process. The CPU is still very much alive
4474 * with pretty much all services enabled.
4475 */
rcutree_offline_cpu(unsigned int cpu)4476 int rcutree_offline_cpu(unsigned int cpu)
4477 {
4478 unsigned long flags;
4479 struct rcu_data *rdp;
4480 struct rcu_node *rnp;
4481
4482 rdp = per_cpu_ptr(&rcu_data, cpu);
4483 rnp = rdp->mynode;
4484 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4485 rnp->ffmask &= ~rdp->grpmask;
4486 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4487
4488 // nohz_full CPUs need the tick for stop-machine to work quickly
4489 tick_dep_set(TICK_DEP_BIT_RCU);
4490 return 0;
4491 }
4492 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
4493
4494 /*
4495 * On non-huge systems, use expedited RCU grace periods to make suspend
4496 * and hibernation run faster.
4497 */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4498 static int rcu_pm_notify(struct notifier_block *self,
4499 unsigned long action, void *hcpu)
4500 {
4501 switch (action) {
4502 case PM_HIBERNATION_PREPARE:
4503 case PM_SUSPEND_PREPARE:
4504 rcu_async_hurry();
4505 rcu_expedite_gp();
4506 break;
4507 case PM_POST_HIBERNATION:
4508 case PM_POST_SUSPEND:
4509 rcu_unexpedite_gp();
4510 rcu_async_relax();
4511 break;
4512 default:
4513 break;
4514 }
4515 return NOTIFY_OK;
4516 }
4517
4518 /*
4519 * Spawn the kthreads that handle RCU's grace periods.
4520 */
rcu_spawn_gp_kthread(void)4521 static int __init rcu_spawn_gp_kthread(void)
4522 {
4523 unsigned long flags;
4524 struct rcu_node *rnp;
4525 struct sched_param sp;
4526 struct task_struct *t;
4527 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4528
4529 rcu_scheduler_fully_active = 1;
4530 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4531 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4532 return 0;
4533 if (kthread_prio) {
4534 sp.sched_priority = kthread_prio;
4535 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4536 }
4537 rnp = rcu_get_root();
4538 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4539 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4540 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4541 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4542 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4543 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4544 wake_up_process(t);
4545 /* This is a pre-SMP initcall, we expect a single CPU */
4546 WARN_ON(num_online_cpus() > 1);
4547 /*
4548 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4549 * due to rcu_scheduler_fully_active.
4550 */
4551 rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4552 rcu_spawn_rnp_kthreads(rdp->mynode);
4553 rcu_spawn_core_kthreads();
4554 /* Create kthread worker for expedited GPs */
4555 rcu_start_exp_gp_kworker();
4556 return 0;
4557 }
4558 early_initcall(rcu_spawn_gp_kthread);
4559
4560 /*
4561 * This function is invoked towards the end of the scheduler's
4562 * initialization process. Before this is called, the idle task might
4563 * contain synchronous grace-period primitives (during which time, this idle
4564 * task is booting the system, and such primitives are no-ops). After this
4565 * function is called, any synchronous grace-period primitives are run as
4566 * expedited, with the requesting task driving the grace period forward.
4567 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4568 * runtime RCU functionality.
4569 */
rcu_scheduler_starting(void)4570 void rcu_scheduler_starting(void)
4571 {
4572 unsigned long flags;
4573 struct rcu_node *rnp;
4574
4575 WARN_ON(num_online_cpus() != 1);
4576 WARN_ON(nr_context_switches() > 0);
4577 rcu_test_sync_prims();
4578
4579 // Fix up the ->gp_seq counters.
4580 local_irq_save(flags);
4581 rcu_for_each_node_breadth_first(rnp)
4582 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4583 local_irq_restore(flags);
4584
4585 // Switch out of early boot mode.
4586 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4587 rcu_test_sync_prims();
4588 }
4589
4590 /*
4591 * Helper function for rcu_init() that initializes the rcu_state structure.
4592 */
rcu_init_one(void)4593 static void __init rcu_init_one(void)
4594 {
4595 static const char * const buf[] = RCU_NODE_NAME_INIT;
4596 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4597 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4598 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4599
4600 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4601 int cpustride = 1;
4602 int i;
4603 int j;
4604 struct rcu_node *rnp;
4605
4606 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4607
4608 /* Silence gcc 4.8 false positive about array index out of range. */
4609 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4610 panic("rcu_init_one: rcu_num_lvls out of range");
4611
4612 /* Initialize the level-tracking arrays. */
4613
4614 for (i = 1; i < rcu_num_lvls; i++)
4615 rcu_state.level[i] =
4616 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4617 rcu_init_levelspread(levelspread, num_rcu_lvl);
4618
4619 /* Initialize the elements themselves, starting from the leaves. */
4620
4621 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4622 cpustride *= levelspread[i];
4623 rnp = rcu_state.level[i];
4624 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4625 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4626 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4627 &rcu_node_class[i], buf[i]);
4628 raw_spin_lock_init(&rnp->fqslock);
4629 lockdep_set_class_and_name(&rnp->fqslock,
4630 &rcu_fqs_class[i], fqs[i]);
4631 rnp->gp_seq = rcu_state.gp_seq;
4632 rnp->gp_seq_needed = rcu_state.gp_seq;
4633 rnp->completedqs = rcu_state.gp_seq;
4634 rnp->qsmask = 0;
4635 rnp->qsmaskinit = 0;
4636 rnp->grplo = j * cpustride;
4637 rnp->grphi = (j + 1) * cpustride - 1;
4638 if (rnp->grphi >= nr_cpu_ids)
4639 rnp->grphi = nr_cpu_ids - 1;
4640 if (i == 0) {
4641 rnp->grpnum = 0;
4642 rnp->grpmask = 0;
4643 rnp->parent = NULL;
4644 } else {
4645 rnp->grpnum = j % levelspread[i - 1];
4646 rnp->grpmask = BIT(rnp->grpnum);
4647 rnp->parent = rcu_state.level[i - 1] +
4648 j / levelspread[i - 1];
4649 }
4650 rnp->level = i;
4651 INIT_LIST_HEAD(&rnp->blkd_tasks);
4652 rcu_init_one_nocb(rnp);
4653 init_waitqueue_head(&rnp->exp_wq[0]);
4654 init_waitqueue_head(&rnp->exp_wq[1]);
4655 init_waitqueue_head(&rnp->exp_wq[2]);
4656 init_waitqueue_head(&rnp->exp_wq[3]);
4657 spin_lock_init(&rnp->exp_lock);
4658 mutex_init(&rnp->kthread_mutex);
4659 raw_spin_lock_init(&rnp->exp_poll_lock);
4660 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4661 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4662 }
4663 }
4664
4665 init_swait_queue_head(&rcu_state.gp_wq);
4666 init_swait_queue_head(&rcu_state.expedited_wq);
4667 rnp = rcu_first_leaf_node();
4668 for_each_possible_cpu(i) {
4669 while (i > rnp->grphi)
4670 rnp++;
4671 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4672 per_cpu_ptr(&rcu_data, i)->barrier_head.next =
4673 &per_cpu_ptr(&rcu_data, i)->barrier_head;
4674 rcu_boot_init_percpu_data(i);
4675 }
4676 }
4677
4678 /*
4679 * Force priority from the kernel command-line into range.
4680 */
sanitize_kthread_prio(void)4681 static void __init sanitize_kthread_prio(void)
4682 {
4683 int kthread_prio_in = kthread_prio;
4684
4685 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4686 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4687 kthread_prio = 2;
4688 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4689 kthread_prio = 1;
4690 else if (kthread_prio < 0)
4691 kthread_prio = 0;
4692 else if (kthread_prio > 99)
4693 kthread_prio = 99;
4694
4695 if (kthread_prio != kthread_prio_in)
4696 pr_alert("%s: Limited prio to %d from %d\n",
4697 __func__, kthread_prio, kthread_prio_in);
4698 }
4699
4700 /*
4701 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4702 * replace the definitions in tree.h because those are needed to size
4703 * the ->node array in the rcu_state structure.
4704 */
rcu_init_geometry(void)4705 void rcu_init_geometry(void)
4706 {
4707 ulong d;
4708 int i;
4709 static unsigned long old_nr_cpu_ids;
4710 int rcu_capacity[RCU_NUM_LVLS];
4711 static bool initialized;
4712
4713 if (initialized) {
4714 /*
4715 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4716 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4717 */
4718 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4719 return;
4720 }
4721
4722 old_nr_cpu_ids = nr_cpu_ids;
4723 initialized = true;
4724
4725 /*
4726 * Initialize any unspecified boot parameters.
4727 * The default values of jiffies_till_first_fqs and
4728 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4729 * value, which is a function of HZ, then adding one for each
4730 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4731 */
4732 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4733 if (jiffies_till_first_fqs == ULONG_MAX)
4734 jiffies_till_first_fqs = d;
4735 if (jiffies_till_next_fqs == ULONG_MAX)
4736 jiffies_till_next_fqs = d;
4737 adjust_jiffies_till_sched_qs();
4738
4739 /* If the compile-time values are accurate, just leave. */
4740 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4741 nr_cpu_ids == NR_CPUS)
4742 return;
4743 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4744 rcu_fanout_leaf, nr_cpu_ids);
4745
4746 /*
4747 * The boot-time rcu_fanout_leaf parameter must be at least two
4748 * and cannot exceed the number of bits in the rcu_node masks.
4749 * Complain and fall back to the compile-time values if this
4750 * limit is exceeded.
4751 */
4752 if (rcu_fanout_leaf < 2 || rcu_fanout_leaf > BITS_PER_LONG) {
4753 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4754 WARN_ON(1);
4755 return;
4756 }
4757
4758 /*
4759 * Compute number of nodes that can be handled an rcu_node tree
4760 * with the given number of levels.
4761 */
4762 rcu_capacity[0] = rcu_fanout_leaf;
4763 for (i = 1; i < RCU_NUM_LVLS; i++)
4764 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4765
4766 /*
4767 * The tree must be able to accommodate the configured number of CPUs.
4768 * If this limit is exceeded, fall back to the compile-time values.
4769 */
4770 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4771 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4772 WARN_ON(1);
4773 return;
4774 }
4775
4776 /* Calculate the number of levels in the tree. */
4777 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4778 }
4779 rcu_num_lvls = i + 1;
4780
4781 /* Calculate the number of rcu_nodes at each level of the tree. */
4782 for (i = 0; i < rcu_num_lvls; i++) {
4783 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4784 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4785 }
4786
4787 /* Calculate the total number of rcu_node structures. */
4788 rcu_num_nodes = 0;
4789 for (i = 0; i < rcu_num_lvls; i++)
4790 rcu_num_nodes += num_rcu_lvl[i];
4791 }
4792
4793 /*
4794 * Dump out the structure of the rcu_node combining tree associated
4795 * with the rcu_state structure.
4796 */
rcu_dump_rcu_node_tree(void)4797 static void __init rcu_dump_rcu_node_tree(void)
4798 {
4799 int level = 0;
4800 struct rcu_node *rnp;
4801
4802 pr_info("rcu_node tree layout dump\n");
4803 pr_info(" ");
4804 rcu_for_each_node_breadth_first(rnp) {
4805 if (rnp->level != level) {
4806 pr_cont("\n");
4807 pr_info(" ");
4808 level = rnp->level;
4809 }
4810 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4811 }
4812 pr_cont("\n");
4813 }
4814
4815 struct workqueue_struct *rcu_gp_wq;
4816
rcu_init(void)4817 void __init rcu_init(void)
4818 {
4819 int cpu = smp_processor_id();
4820
4821 rcu_early_boot_tests();
4822
4823 rcu_bootup_announce();
4824 sanitize_kthread_prio();
4825 rcu_init_geometry();
4826 rcu_init_one();
4827 if (dump_tree)
4828 rcu_dump_rcu_node_tree();
4829 if (use_softirq)
4830 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4831
4832 /*
4833 * We don't need protection against CPU-hotplug here because
4834 * this is called early in boot, before either interrupts
4835 * or the scheduler are operational.
4836 */
4837 pm_notifier(rcu_pm_notify, 0);
4838 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4839 rcutree_prepare_cpu(cpu);
4840 rcutree_report_cpu_starting(cpu);
4841 rcutree_online_cpu(cpu);
4842
4843 /* Create workqueue for Tree SRCU and for expedited GPs. */
4844 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4845 WARN_ON(!rcu_gp_wq);
4846
4847 sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
4848 WARN_ON(!sync_wq);
4849
4850 /* Fill in default value for rcutree.qovld boot parameter. */
4851 /* -After- the rcu_node ->lock fields are initialized! */
4852 if (qovld < 0)
4853 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4854 else
4855 qovld_calc = qovld;
4856
4857 // Kick-start in case any polled grace periods started early.
4858 (void)start_poll_synchronize_rcu_expedited();
4859
4860 rcu_test_sync_prims();
4861
4862 tasks_cblist_init_generic();
4863 }
4864
4865 #include "tree_stall.h"
4866 #include "tree_exp.h"
4867 #include "tree_nocb.h"
4868 #include "tree_plugin.h"
4869