1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/kmemleak.h> 35 #include <linux/moduleparam.h> 36 #include <linux/panic.h> 37 #include <linux/panic_notifier.h> 38 #include <linux/percpu.h> 39 #include <linux/notifier.h> 40 #include <linux/cpu.h> 41 #include <linux/mutex.h> 42 #include <linux/time.h> 43 #include <linux/kernel_stat.h> 44 #include <linux/wait.h> 45 #include <linux/kthread.h> 46 #include <uapi/linux/sched/types.h> 47 #include <linux/prefetch.h> 48 #include <linux/delay.h> 49 #include <linux/random.h> 50 #include <linux/trace_events.h> 51 #include <linux/suspend.h> 52 #include <linux/ftrace.h> 53 #include <linux/tick.h> 54 #include <linux/sysrq.h> 55 #include <linux/kprobes.h> 56 #include <linux/gfp.h> 57 #include <linux/oom.h> 58 #include <linux/smpboot.h> 59 #include <linux/jiffies.h> 60 #include <linux/slab.h> 61 #include <linux/sched/isolation.h> 62 #include <linux/sched/clock.h> 63 #include <linux/vmalloc.h> 64 #include <linux/mm.h> 65 #include <linux/kasan.h> 66 #include <linux/context_tracking.h> 67 #include "../time/tick-internal.h" 68 69 #include "tree.h" 70 #include "rcu.h" 71 72 #ifdef MODULE_PARAM_PREFIX 73 #undef MODULE_PARAM_PREFIX 74 #endif 75 #define MODULE_PARAM_PREFIX "rcutree." 76 77 /* Data structures. */ 78 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *); 79 80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 81 .gpwrap = true, 82 }; 83 84 int rcu_get_gpwrap_count(int cpu) 85 { 86 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 87 88 return READ_ONCE(rdp->gpwrap_count); 89 } 90 EXPORT_SYMBOL_GPL(rcu_get_gpwrap_count); 91 92 static struct rcu_state rcu_state = { 93 .level = { &rcu_state.node[0] }, 94 .gp_state = RCU_GP_IDLE, 95 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 96 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 97 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock), 98 .name = RCU_NAME, 99 .abbr = RCU_ABBR, 100 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 101 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 102 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED, 103 .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work, 104 rcu_sr_normal_gp_cleanup_work), 105 .srs_cleanups_pending = ATOMIC_INIT(0), 106 #ifdef CONFIG_RCU_NOCB_CPU 107 .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex), 108 #endif 109 }; 110 111 /* Dump rcu_node combining tree at boot to verify correct setup. */ 112 static bool dump_tree; 113 module_param(dump_tree, bool, 0444); 114 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 115 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT); 116 #ifndef CONFIG_PREEMPT_RT 117 module_param(use_softirq, bool, 0444); 118 #endif 119 /* Control rcu_node-tree auto-balancing at boot time. */ 120 static bool rcu_fanout_exact; 121 module_param(rcu_fanout_exact, bool, 0444); 122 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 123 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 124 module_param(rcu_fanout_leaf, int, 0444); 125 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 126 /* Number of rcu_nodes at specified level. */ 127 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 128 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 129 130 /* 131 * The rcu_scheduler_active variable is initialized to the value 132 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 133 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 134 * RCU can assume that there is but one task, allowing RCU to (for example) 135 * optimize synchronize_rcu() to a simple barrier(). When this variable 136 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 137 * to detect real grace periods. This variable is also used to suppress 138 * boot-time false positives from lockdep-RCU error checking. Finally, it 139 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 140 * is fully initialized, including all of its kthreads having been spawned. 141 */ 142 int rcu_scheduler_active __read_mostly; 143 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 144 145 /* 146 * The rcu_scheduler_fully_active variable transitions from zero to one 147 * during the early_initcall() processing, which is after the scheduler 148 * is capable of creating new tasks. So RCU processing (for example, 149 * creating tasks for RCU priority boosting) must be delayed until after 150 * rcu_scheduler_fully_active transitions from zero to one. We also 151 * currently delay invocation of any RCU callbacks until after this point. 152 * 153 * It might later prove better for people registering RCU callbacks during 154 * early boot to take responsibility for these callbacks, but one step at 155 * a time. 156 */ 157 static int rcu_scheduler_fully_active __read_mostly; 158 159 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 160 unsigned long gps, unsigned long flags); 161 static void invoke_rcu_core(void); 162 static void rcu_report_exp_rdp(struct rcu_data *rdp); 163 static void rcu_report_qs_rdp(struct rcu_data *rdp); 164 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 165 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp); 166 static bool rcu_rdp_cpu_online(struct rcu_data *rdp); 167 static bool rcu_init_invoked(void); 168 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 169 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 170 171 /* 172 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop" 173 * real-time priority(enabling/disabling) is controlled by 174 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration. 175 */ 176 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 177 module_param(kthread_prio, int, 0444); 178 179 /* Delay in jiffies for grace-period initialization delays, debug only. */ 180 181 static int gp_preinit_delay; 182 module_param(gp_preinit_delay, int, 0444); 183 static int gp_init_delay; 184 module_param(gp_init_delay, int, 0444); 185 static int gp_cleanup_delay; 186 module_param(gp_cleanup_delay, int, 0444); 187 static int nohz_full_patience_delay; 188 module_param(nohz_full_patience_delay, int, 0444); 189 static int nohz_full_patience_delay_jiffies; 190 191 // Add delay to rcu_read_unlock() for strict grace periods. 192 static int rcu_unlock_delay; 193 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD 194 module_param(rcu_unlock_delay, int, 0444); 195 #endif 196 197 /* Retrieve RCU kthreads priority for rcutorture */ 198 int rcu_get_gp_kthreads_prio(void) 199 { 200 return kthread_prio; 201 } 202 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 203 204 /* 205 * Number of grace periods between delays, normalized by the duration of 206 * the delay. The longer the delay, the more the grace periods between 207 * each delay. The reason for this normalization is that it means that, 208 * for non-zero delays, the overall slowdown of grace periods is constant 209 * regardless of the duration of the delay. This arrangement balances 210 * the need for long delays to increase some race probabilities with the 211 * need for fast grace periods to increase other race probabilities. 212 */ 213 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */ 214 215 /* 216 * Return true if an RCU grace period is in progress. The READ_ONCE()s 217 * permit this function to be invoked without holding the root rcu_node 218 * structure's ->lock, but of course results can be subject to change. 219 */ 220 static int rcu_gp_in_progress(void) 221 { 222 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 223 } 224 225 /* 226 * Return the number of callbacks queued on the specified CPU. 227 * Handles both the nocbs and normal cases. 228 */ 229 static long rcu_get_n_cbs_cpu(int cpu) 230 { 231 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 232 233 if (rcu_segcblist_is_enabled(&rdp->cblist)) 234 return rcu_segcblist_n_cbs(&rdp->cblist); 235 return 0; 236 } 237 238 /** 239 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing 240 * 241 * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU. 242 * This is a special-purpose function to be used in the softirq 243 * infrastructure and perhaps the occasional long-running softirq 244 * handler. 245 * 246 * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is 247 * equivalent to momentarily completely enabling preemption. For 248 * example, given this code:: 249 * 250 * local_bh_disable(); 251 * do_something(); 252 * rcu_softirq_qs(); // A 253 * do_something_else(); 254 * local_bh_enable(); // B 255 * 256 * A call to synchronize_rcu() that began concurrently with the 257 * call to do_something() would be guaranteed to wait only until 258 * execution reached statement A. Without that rcu_softirq_qs(), 259 * that same synchronize_rcu() would instead be guaranteed to wait 260 * until execution reached statement B. 261 */ 262 void rcu_softirq_qs(void) 263 { 264 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 265 lock_is_held(&rcu_lock_map) || 266 lock_is_held(&rcu_sched_lock_map), 267 "Illegal rcu_softirq_qs() in RCU read-side critical section"); 268 rcu_qs(); 269 rcu_preempt_deferred_qs(current); 270 rcu_tasks_qs(current, false); 271 } 272 273 /* 274 * Reset the current CPU's RCU_WATCHING counter to indicate that the 275 * newly onlined CPU is no longer in an extended quiescent state. 276 * This will either leave the counter unchanged, or increment it 277 * to the next non-quiescent value. 278 * 279 * The non-atomic test/increment sequence works because the upper bits 280 * of the ->state variable are manipulated only by the corresponding CPU, 281 * or when the corresponding CPU is offline. 282 */ 283 static void rcu_watching_online(void) 284 { 285 if (ct_rcu_watching() & CT_RCU_WATCHING) 286 return; 287 ct_state_inc(CT_RCU_WATCHING); 288 } 289 290 /* 291 * Return true if the snapshot returned from ct_rcu_watching() 292 * indicates that RCU is in an extended quiescent state. 293 */ 294 static bool rcu_watching_snap_in_eqs(int snap) 295 { 296 return !(snap & CT_RCU_WATCHING); 297 } 298 299 /** 300 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU 301 * since the specified @snap? 302 * 303 * @rdp: The rcu_data corresponding to the CPU for which to check EQS. 304 * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS. 305 * 306 * Returns true if the CPU corresponding to @rdp has spent some time in an 307 * extended quiescent state since @snap. Note that this doesn't check if it 308 * /still/ is in an EQS, just that it went through one since @snap. 309 * 310 * This is meant to be used in a loop waiting for a CPU to go through an EQS. 311 */ 312 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap) 313 { 314 /* 315 * The first failing snapshot is already ordered against the accesses 316 * performed by the remote CPU after it exits idle. 317 * 318 * The second snapshot therefore only needs to order against accesses 319 * performed by the remote CPU prior to entering idle and therefore can 320 * rely solely on acquire semantics. 321 */ 322 if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap))) 323 return true; 324 325 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu); 326 } 327 328 /* 329 * Return true if the referenced integer is zero while the specified 330 * CPU remains within a single extended quiescent state. 331 */ 332 bool rcu_watching_zero_in_eqs(int cpu, int *vp) 333 { 334 int snap; 335 336 // If not quiescent, force back to earlier extended quiescent state. 337 snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING; 338 smp_rmb(); // Order CT state and *vp reads. 339 if (READ_ONCE(*vp)) 340 return false; // Non-zero, so report failure; 341 smp_rmb(); // Order *vp read and CT state re-read. 342 343 // If still in the same extended quiescent state, we are good! 344 return snap == ct_rcu_watching_cpu(cpu); 345 } 346 347 /* 348 * Let the RCU core know that this CPU has gone through the scheduler, 349 * which is a quiescent state. This is called when the need for a 350 * quiescent state is urgent, so we burn an atomic operation and full 351 * memory barriers to let the RCU core know about it, regardless of what 352 * this CPU might (or might not) do in the near future. 353 * 354 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 355 * 356 * The caller must have disabled interrupts and must not be idle. 357 */ 358 notrace void rcu_momentary_eqs(void) 359 { 360 int seq; 361 362 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 363 seq = ct_state_inc(2 * CT_RCU_WATCHING); 364 /* It is illegal to call this from idle state. */ 365 WARN_ON_ONCE(!(seq & CT_RCU_WATCHING)); 366 rcu_preempt_deferred_qs(current); 367 } 368 EXPORT_SYMBOL_GPL(rcu_momentary_eqs); 369 370 /** 371 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 372 * 373 * If the current CPU is idle and running at a first-level (not nested) 374 * interrupt, or directly, from idle, return true. 375 * 376 * The caller must have at least disabled IRQs. 377 */ 378 static int rcu_is_cpu_rrupt_from_idle(void) 379 { 380 long nmi_nesting = ct_nmi_nesting(); 381 382 /* 383 * Usually called from the tick; but also used from smp_function_call() 384 * for expedited grace periods. This latter can result in running from 385 * the idle task, instead of an actual IPI. 386 */ 387 lockdep_assert_irqs_disabled(); 388 389 /* Check for counter underflows */ 390 RCU_LOCKDEP_WARN(ct_nesting() < 0, 391 "RCU nesting counter underflow!"); 392 393 /* Non-idle interrupt or nested idle interrupt */ 394 if (nmi_nesting > 1) 395 return false; 396 397 /* 398 * Non nested idle interrupt (interrupting section where RCU 399 * wasn't watching). 400 */ 401 if (nmi_nesting == 1) 402 return true; 403 404 /* Not in an interrupt */ 405 if (!nmi_nesting) { 406 RCU_LOCKDEP_WARN(!in_task() || !is_idle_task(current), 407 "RCU nmi_nesting counter not in idle task!"); 408 return !rcu_is_watching_curr_cpu(); 409 } 410 411 RCU_LOCKDEP_WARN(1, "RCU nmi_nesting counter underflow/zero!"); 412 413 return false; 414 } 415 416 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10) 417 // Maximum callbacks per rcu_do_batch ... 418 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood. 419 static long blimit = DEFAULT_RCU_BLIMIT; 420 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit. 421 static long qhimark = DEFAULT_RCU_QHIMARK; 422 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit. 423 static long qlowmark = DEFAULT_RCU_QLOMARK; 424 #define DEFAULT_RCU_QOVLD_MULT 2 425 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 426 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS. 427 static long qovld_calc = -1; // No pre-initialization lock acquisitions! 428 429 module_param(blimit, long, 0444); 430 module_param(qhimark, long, 0444); 431 module_param(qlowmark, long, 0444); 432 module_param(qovld, long, 0444); 433 434 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX; 435 static ulong jiffies_till_next_fqs = ULONG_MAX; 436 static bool rcu_kick_kthreads; 437 static int rcu_divisor = 7; 438 module_param(rcu_divisor, int, 0644); 439 440 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 441 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 442 module_param(rcu_resched_ns, long, 0644); 443 444 /* 445 * How long the grace period must be before we start recruiting 446 * quiescent-state help from rcu_note_context_switch(). 447 */ 448 static ulong jiffies_till_sched_qs = ULONG_MAX; 449 module_param(jiffies_till_sched_qs, ulong, 0444); 450 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 451 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 452 453 /* 454 * Make sure that we give the grace-period kthread time to detect any 455 * idle CPUs before taking active measures to force quiescent states. 456 * However, don't go below 100 milliseconds, adjusted upwards for really 457 * large systems. 458 */ 459 static void adjust_jiffies_till_sched_qs(void) 460 { 461 unsigned long j; 462 463 /* If jiffies_till_sched_qs was specified, respect the request. */ 464 if (jiffies_till_sched_qs != ULONG_MAX) { 465 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 466 return; 467 } 468 /* Otherwise, set to third fqs scan, but bound below on large system. */ 469 j = READ_ONCE(jiffies_till_first_fqs) + 470 2 * READ_ONCE(jiffies_till_next_fqs); 471 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 472 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 473 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 474 WRITE_ONCE(jiffies_to_sched_qs, j); 475 } 476 477 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 478 { 479 ulong j; 480 int ret = kstrtoul(val, 0, &j); 481 482 if (!ret) { 483 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 484 adjust_jiffies_till_sched_qs(); 485 } 486 return ret; 487 } 488 489 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 490 { 491 ulong j; 492 int ret = kstrtoul(val, 0, &j); 493 494 if (!ret) { 495 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 496 adjust_jiffies_till_sched_qs(); 497 } 498 return ret; 499 } 500 501 static const struct kernel_param_ops first_fqs_jiffies_ops = { 502 .set = param_set_first_fqs_jiffies, 503 .get = param_get_ulong, 504 }; 505 506 static const struct kernel_param_ops next_fqs_jiffies_ops = { 507 .set = param_set_next_fqs_jiffies, 508 .get = param_get_ulong, 509 }; 510 511 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 512 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 513 module_param(rcu_kick_kthreads, bool, 0644); 514 515 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 516 static int rcu_pending(int user); 517 518 /* 519 * Return the number of RCU GPs completed thus far for debug & stats. 520 */ 521 unsigned long rcu_get_gp_seq(void) 522 { 523 return READ_ONCE(rcu_state.gp_seq); 524 } 525 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 526 527 /* 528 * Return the number of RCU expedited batches completed thus far for 529 * debug & stats. Odd numbers mean that a batch is in progress, even 530 * numbers mean idle. The value returned will thus be roughly double 531 * the cumulative batches since boot. 532 */ 533 unsigned long rcu_exp_batches_completed(void) 534 { 535 return rcu_state.expedited_sequence; 536 } 537 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 538 539 /* 540 * Return the root node of the rcu_state structure. 541 */ 542 static struct rcu_node *rcu_get_root(void) 543 { 544 return &rcu_state.node[0]; 545 } 546 547 /* 548 * Send along grace-period-related data for rcutorture diagnostics. 549 */ 550 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq) 551 { 552 *flags = READ_ONCE(rcu_state.gp_flags); 553 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 554 } 555 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 556 557 /* Gather grace-period sequence numbers for rcutorture diagnostics. */ 558 unsigned long long rcutorture_gather_gp_seqs(void) 559 { 560 return ((READ_ONCE(rcu_state.gp_seq) & 0xffffULL) << 40) | 561 ((READ_ONCE(rcu_state.expedited_sequence) & 0xffffffULL) << 16) | 562 (READ_ONCE(rcu_state.gp_seq_polled) & 0xffffULL); 563 } 564 EXPORT_SYMBOL_GPL(rcutorture_gather_gp_seqs); 565 566 /* Format grace-period sequence numbers for rcutorture diagnostics. */ 567 void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len) 568 { 569 unsigned int egp = (seqs >> 16) & 0xffffffULL; 570 unsigned int ggp = (seqs >> 40) & 0xffffULL; 571 unsigned int pgp = seqs & 0xffffULL; 572 573 snprintf(cp, len, "g%04x:e%06x:p%04x", ggp, egp, pgp); 574 } 575 EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs); 576 577 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK)) 578 /* 579 * An empty function that will trigger a reschedule on 580 * IRQ tail once IRQs get re-enabled on userspace/guest resume. 581 */ 582 static void late_wakeup_func(struct irq_work *work) 583 { 584 } 585 586 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) = 587 IRQ_WORK_INIT(late_wakeup_func); 588 589 /* 590 * If either: 591 * 592 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work 593 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry. 594 * 595 * In these cases the late RCU wake ups aren't supported in the resched loops and our 596 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs 597 * get re-enabled again. 598 */ 599 noinstr void rcu_irq_work_resched(void) 600 { 601 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 602 603 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) 604 return; 605 606 if (IS_ENABLED(CONFIG_VIRT_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) 607 return; 608 609 instrumentation_begin(); 610 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { 611 irq_work_queue(this_cpu_ptr(&late_wakeup_work)); 612 } 613 instrumentation_end(); 614 } 615 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK)) */ 616 617 #ifdef CONFIG_PROVE_RCU 618 /** 619 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 620 */ 621 void rcu_irq_exit_check_preempt(void) 622 { 623 lockdep_assert_irqs_disabled(); 624 625 RCU_LOCKDEP_WARN(ct_nesting() <= 0, 626 "RCU nesting counter underflow/zero!"); 627 RCU_LOCKDEP_WARN(ct_nmi_nesting() != 628 CT_NESTING_IRQ_NONIDLE, 629 "Bad RCU nmi_nesting counter\n"); 630 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(), 631 "RCU in extended quiescent state!"); 632 } 633 #endif /* #ifdef CONFIG_PROVE_RCU */ 634 635 #ifdef CONFIG_NO_HZ_FULL 636 /** 637 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 638 * 639 * The scheduler tick is not normally enabled when CPUs enter the kernel 640 * from nohz_full userspace execution. After all, nohz_full userspace 641 * execution is an RCU quiescent state and the time executing in the kernel 642 * is quite short. Except of course when it isn't. And it is not hard to 643 * cause a large system to spend tens of seconds or even minutes looping 644 * in the kernel, which can cause a number of problems, include RCU CPU 645 * stall warnings. 646 * 647 * Therefore, if a nohz_full CPU fails to report a quiescent state 648 * in a timely manner, the RCU grace-period kthread sets that CPU's 649 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 650 * exception will invoke this function, which will turn on the scheduler 651 * tick, which will enable RCU to detect that CPU's quiescent states, 652 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 653 * The tick will be disabled once a quiescent state is reported for 654 * this CPU. 655 * 656 * Of course, in carefully tuned systems, there might never be an 657 * interrupt or exception. In that case, the RCU grace-period kthread 658 * will eventually cause one to happen. However, in less carefully 659 * controlled environments, this function allows RCU to get what it 660 * needs without creating otherwise useless interruptions. 661 */ 662 void __rcu_irq_enter_check_tick(void) 663 { 664 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 665 666 // If we're here from NMI there's nothing to do. 667 if (in_nmi()) 668 return; 669 670 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(), 671 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 672 673 if (!tick_nohz_full_cpu(rdp->cpu) || 674 !READ_ONCE(rdp->rcu_urgent_qs) || 675 READ_ONCE(rdp->rcu_forced_tick)) { 676 // RCU doesn't need nohz_full help from this CPU, or it is 677 // already getting that help. 678 return; 679 } 680 681 // We get here only when not in an extended quiescent state and 682 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 683 // already watching and (2) The fact that we are in an interrupt 684 // handler and that the rcu_node lock is an irq-disabled lock 685 // prevents self-deadlock. So we can safely recheck under the lock. 686 // Note that the nohz_full state currently cannot change. 687 raw_spin_lock_rcu_node(rdp->mynode); 688 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { 689 // A nohz_full CPU is in the kernel and RCU needs a 690 // quiescent state. Turn on the tick! 691 WRITE_ONCE(rdp->rcu_forced_tick, true); 692 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 693 } 694 raw_spin_unlock_rcu_node(rdp->mynode); 695 } 696 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick); 697 #endif /* CONFIG_NO_HZ_FULL */ 698 699 /* 700 * Check to see if any future non-offloaded RCU-related work will need 701 * to be done by the current CPU, even if none need be done immediately, 702 * returning 1 if so. This function is part of the RCU implementation; 703 * it is -not- an exported member of the RCU API. This is used by 704 * the idle-entry code to figure out whether it is safe to disable the 705 * scheduler-clock interrupt. 706 * 707 * Just check whether or not this CPU has non-offloaded RCU callbacks 708 * queued. 709 */ 710 int rcu_needs_cpu(void) 711 { 712 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && 713 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); 714 } 715 716 /* 717 * If any sort of urgency was applied to the current CPU (for example, 718 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 719 * to get to a quiescent state, disable it. 720 */ 721 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 722 { 723 raw_lockdep_assert_held_rcu_node(rdp->mynode); 724 WRITE_ONCE(rdp->rcu_urgent_qs, false); 725 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 726 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 727 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 728 WRITE_ONCE(rdp->rcu_forced_tick, false); 729 } 730 } 731 732 /** 733 * rcu_is_watching - RCU read-side critical sections permitted on current CPU? 734 * 735 * Return @true if RCU is watching the running CPU and @false otherwise. 736 * An @true return means that this CPU can safely enter RCU read-side 737 * critical sections. 738 * 739 * Although calls to rcu_is_watching() from most parts of the kernel 740 * will return @true, there are important exceptions. For example, if the 741 * current CPU is deep within its idle loop, in kernel entry/exit code, 742 * or offline, rcu_is_watching() will return @false. 743 * 744 * Make notrace because it can be called by the internal functions of 745 * ftrace, and making this notrace removes unnecessary recursion calls. 746 */ 747 notrace bool rcu_is_watching(void) 748 { 749 bool ret; 750 751 preempt_disable_notrace(); 752 ret = rcu_is_watching_curr_cpu(); 753 preempt_enable_notrace(); 754 return ret; 755 } 756 EXPORT_SYMBOL_GPL(rcu_is_watching); 757 758 /* 759 * If a holdout task is actually running, request an urgent quiescent 760 * state from its CPU. This is unsynchronized, so migrations can cause 761 * the request to go to the wrong CPU. Which is OK, all that will happen 762 * is that the CPU's next context switch will be a bit slower and next 763 * time around this task will generate another request. 764 */ 765 void rcu_request_urgent_qs_task(struct task_struct *t) 766 { 767 int cpu; 768 769 barrier(); 770 cpu = task_cpu(t); 771 if (!task_curr(t)) 772 return; /* This task is not running on that CPU. */ 773 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 774 } 775 776 static unsigned long seq_gpwrap_lag = ULONG_MAX / 4; 777 778 /** 779 * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value. 780 * @lag_gps: Set overflow lag to this many grace period worth of counters 781 * which is used by rcutorture to quickly force a gpwrap situation. 782 * @lag_gps = 0 means we reset it back to the boot-time value. 783 */ 784 void rcu_set_gpwrap_lag(unsigned long lag_gps) 785 { 786 unsigned long lag_seq_count; 787 788 lag_seq_count = (lag_gps == 0) 789 ? ULONG_MAX / 4 790 : lag_gps << RCU_SEQ_CTR_SHIFT; 791 WRITE_ONCE(seq_gpwrap_lag, lag_seq_count); 792 } 793 EXPORT_SYMBOL_GPL(rcu_set_gpwrap_lag); 794 795 /* 796 * When trying to report a quiescent state on behalf of some other CPU, 797 * it is our responsibility to check for and handle potential overflow 798 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 799 * After all, the CPU might be in deep idle state, and thus executing no 800 * code whatsoever. 801 */ 802 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 803 { 804 raw_lockdep_assert_held_rcu_node(rnp); 805 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag, 806 rnp->gp_seq)) { 807 WRITE_ONCE(rdp->gpwrap, true); 808 WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1); 809 } 810 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 811 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 812 } 813 814 /* 815 * Snapshot the specified CPU's RCU_WATCHING counter so that we can later 816 * credit them with an implicit quiescent state. Return 1 if this CPU 817 * is in dynticks idle mode, which is an extended quiescent state. 818 */ 819 static int rcu_watching_snap_save(struct rcu_data *rdp) 820 { 821 /* 822 * Full ordering between remote CPU's post idle accesses and updater's 823 * accesses prior to current GP (and also the started GP sequence number) 824 * is enforced by rcu_seq_start() implicit barrier and even further by 825 * smp_mb__after_unlock_lock() barriers chained all the way throughout the 826 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp 827 * locking. 828 * 829 * Ordering between remote CPU's pre idle accesses and post grace period 830 * updater's accesses is enforced by the below acquire semantic. 831 */ 832 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); 833 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) { 834 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 835 rcu_gpnum_ovf(rdp->mynode, rdp); 836 return 1; 837 } 838 return 0; 839 } 840 841 #ifndef arch_irq_stat_cpu 842 #define arch_irq_stat_cpu(cpu) 0 843 #endif 844 845 /* 846 * Returns positive if the specified CPU has passed through a quiescent state 847 * by virtue of being in or having passed through an dynticks idle state since 848 * the last call to rcu_watching_snap_save() for this same CPU, or by 849 * virtue of having been offline. 850 * 851 * Returns negative if the specified CPU needs a force resched. 852 * 853 * Returns zero otherwise. 854 */ 855 static int rcu_watching_snap_recheck(struct rcu_data *rdp) 856 { 857 unsigned long jtsq; 858 int ret = 0; 859 struct rcu_node *rnp = rdp->mynode; 860 861 /* 862 * If the CPU passed through or entered a dynticks idle phase with 863 * no active irq/NMI handlers, then we can safely pretend that the CPU 864 * already acknowledged the request to pass through a quiescent 865 * state. Either way, that CPU cannot possibly be in an RCU 866 * read-side critical section that started before the beginning 867 * of the current RCU grace period. 868 */ 869 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) { 870 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 871 rcu_gpnum_ovf(rnp, rdp); 872 return 1; 873 } 874 875 /* 876 * Complain if a CPU that is considered to be offline from RCU's 877 * perspective has not yet reported a quiescent state. After all, 878 * the offline CPU should have reported a quiescent state during 879 * the CPU-offline process, or, failing that, by rcu_gp_init() 880 * if it ran concurrently with either the CPU going offline or the 881 * last task on a leaf rcu_node structure exiting its RCU read-side 882 * critical section while all CPUs corresponding to that structure 883 * are offline. This added warning detects bugs in any of these 884 * code paths. 885 * 886 * The rcu_node structure's ->lock is held here, which excludes 887 * the relevant portions the CPU-hotplug code, the grace-period 888 * initialization code, and the rcu_read_unlock() code paths. 889 * 890 * For more detail, please refer to the "Hotplug CPU" section 891 * of RCU's Requirements documentation. 892 */ 893 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { 894 struct rcu_node *rnp1; 895 896 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 897 __func__, rnp->grplo, rnp->grphi, rnp->level, 898 (long)rnp->gp_seq, (long)rnp->completedqs); 899 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 900 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 901 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 902 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 903 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], 904 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, 905 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); 906 return 1; /* Break things loose after complaining. */ 907 } 908 909 /* 910 * A CPU running for an extended time within the kernel can 911 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 912 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 913 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 914 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 915 * variable are safe because the assignments are repeated if this 916 * CPU failed to pass through a quiescent state. This code 917 * also checks .jiffies_resched in case jiffies_to_sched_qs 918 * is set way high. 919 */ 920 jtsq = READ_ONCE(jiffies_to_sched_qs); 921 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && 922 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 923 time_after(jiffies, rcu_state.jiffies_resched) || 924 rcu_state.cbovld)) { 925 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); 926 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 927 smp_store_release(&rdp->rcu_urgent_qs, true); 928 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 929 WRITE_ONCE(rdp->rcu_urgent_qs, true); 930 } 931 932 /* 933 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 934 * The above code handles this, but only for straight cond_resched(). 935 * And some in-kernel loops check need_resched() before calling 936 * cond_resched(), which defeats the above code for CPUs that are 937 * running in-kernel with scheduling-clock interrupts disabled. 938 * So hit them over the head with the resched_cpu() hammer! 939 */ 940 if (tick_nohz_full_cpu(rdp->cpu) && 941 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 942 rcu_state.cbovld)) { 943 WRITE_ONCE(rdp->rcu_urgent_qs, true); 944 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 945 ret = -1; 946 } 947 948 /* 949 * If more than halfway to RCU CPU stall-warning time, invoke 950 * resched_cpu() more frequently to try to loosen things up a bit. 951 * Also check to see if the CPU is getting hammered with interrupts, 952 * but only once per grace period, just to keep the IPIs down to 953 * a dull roar. 954 */ 955 if (time_after(jiffies, rcu_state.jiffies_resched)) { 956 if (time_after(jiffies, 957 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 958 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 959 ret = -1; 960 } 961 if (IS_ENABLED(CONFIG_IRQ_WORK) && 962 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 963 (rnp->ffmask & rdp->grpmask)) { 964 rdp->rcu_iw_pending = true; 965 rdp->rcu_iw_gp_seq = rnp->gp_seq; 966 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 967 } 968 969 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { 970 int cpu = rdp->cpu; 971 struct rcu_snap_record *rsrp; 972 struct kernel_cpustat *kcsp; 973 974 kcsp = &kcpustat_cpu(cpu); 975 976 rsrp = &rdp->snap_record; 977 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); 978 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); 979 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); 980 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu); 981 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu); 982 rsrp->nr_csw = nr_context_switches_cpu(cpu); 983 rsrp->jiffies = jiffies; 984 rsrp->gp_seq = rdp->gp_seq; 985 } 986 } 987 988 return ret; 989 } 990 991 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 992 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 993 unsigned long gp_seq_req, const char *s) 994 { 995 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 996 gp_seq_req, rnp->level, 997 rnp->grplo, rnp->grphi, s); 998 } 999 1000 /* 1001 * rcu_start_this_gp - Request the start of a particular grace period 1002 * @rnp_start: The leaf node of the CPU from which to start. 1003 * @rdp: The rcu_data corresponding to the CPU from which to start. 1004 * @gp_seq_req: The gp_seq of the grace period to start. 1005 * 1006 * Start the specified grace period, as needed to handle newly arrived 1007 * callbacks. The required future grace periods are recorded in each 1008 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1009 * is reason to awaken the grace-period kthread. 1010 * 1011 * The caller must hold the specified rcu_node structure's ->lock, which 1012 * is why the caller is responsible for waking the grace-period kthread. 1013 * 1014 * Returns true if the GP thread needs to be awakened else false. 1015 */ 1016 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1017 unsigned long gp_seq_req) 1018 { 1019 bool ret = false; 1020 struct rcu_node *rnp; 1021 1022 /* 1023 * Use funnel locking to either acquire the root rcu_node 1024 * structure's lock or bail out if the need for this grace period 1025 * has already been recorded -- or if that grace period has in 1026 * fact already started. If there is already a grace period in 1027 * progress in a non-leaf node, no recording is needed because the 1028 * end of the grace period will scan the leaf rcu_node structures. 1029 * Note that rnp_start->lock must not be released. 1030 */ 1031 raw_lockdep_assert_held_rcu_node(rnp_start); 1032 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1033 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1034 if (rnp != rnp_start) 1035 raw_spin_lock_rcu_node(rnp); 1036 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1037 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1038 (rnp != rnp_start && 1039 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1040 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1041 TPS("Prestarted")); 1042 goto unlock_out; 1043 } 1044 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1045 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1046 /* 1047 * We just marked the leaf or internal node, and a 1048 * grace period is in progress, which means that 1049 * rcu_gp_cleanup() will see the marking. Bail to 1050 * reduce contention. 1051 */ 1052 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1053 TPS("Startedleaf")); 1054 goto unlock_out; 1055 } 1056 if (rnp != rnp_start && rnp->parent != NULL) 1057 raw_spin_unlock_rcu_node(rnp); 1058 if (!rnp->parent) 1059 break; /* At root, and perhaps also leaf. */ 1060 } 1061 1062 /* If GP already in progress, just leave, otherwise start one. */ 1063 if (rcu_gp_in_progress()) { 1064 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1065 goto unlock_out; 1066 } 1067 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1068 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1069 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1070 if (!READ_ONCE(rcu_state.gp_kthread)) { 1071 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1072 goto unlock_out; 1073 } 1074 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1075 ret = true; /* Caller must wake GP kthread. */ 1076 unlock_out: 1077 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1078 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1079 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1080 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1081 } 1082 if (rnp != rnp_start) 1083 raw_spin_unlock_rcu_node(rnp); 1084 return ret; 1085 } 1086 1087 /* 1088 * Clean up any old requests for the just-ended grace period. Also return 1089 * whether any additional grace periods have been requested. 1090 */ 1091 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1092 { 1093 bool needmore; 1094 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1095 1096 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1097 if (!needmore) 1098 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1099 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1100 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1101 return needmore; 1102 } 1103 1104 /* 1105 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1106 * interrupt or softirq handler, in which case we just might immediately 1107 * sleep upon return, resulting in a grace-period hang), and don't bother 1108 * awakening when there is nothing for the grace-period kthread to do 1109 * (as in several CPUs raced to awaken, we lost), and finally don't try 1110 * to awaken a kthread that has not yet been created. If all those checks 1111 * are passed, track some debug information and awaken. 1112 * 1113 * So why do the self-wakeup when in an interrupt or softirq handler 1114 * in the grace-period kthread's context? Because the kthread might have 1115 * been interrupted just as it was going to sleep, and just after the final 1116 * pre-sleep check of the awaken condition. In this case, a wakeup really 1117 * is required, and is therefore supplied. 1118 */ 1119 static void rcu_gp_kthread_wake(void) 1120 { 1121 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1122 1123 if ((current == t && !in_hardirq() && !in_serving_softirq()) || 1124 !READ_ONCE(rcu_state.gp_flags) || !t) 1125 return; 1126 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1127 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1128 swake_up_one(&rcu_state.gp_wq); 1129 } 1130 1131 /* 1132 * If there is room, assign a ->gp_seq number to any callbacks on this 1133 * CPU that have not already been assigned. Also accelerate any callbacks 1134 * that were previously assigned a ->gp_seq number that has since proven 1135 * to be too conservative, which can happen if callbacks get assigned a 1136 * ->gp_seq number while RCU is idle, but with reference to a non-root 1137 * rcu_node structure. This function is idempotent, so it does not hurt 1138 * to call it repeatedly. Returns an flag saying that we should awaken 1139 * the RCU grace-period kthread. 1140 * 1141 * The caller must hold rnp->lock with interrupts disabled. 1142 */ 1143 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1144 { 1145 unsigned long gp_seq_req; 1146 bool ret = false; 1147 1148 rcu_lockdep_assert_cblist_protected(rdp); 1149 raw_lockdep_assert_held_rcu_node(rnp); 1150 1151 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1152 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1153 return false; 1154 1155 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); 1156 1157 /* 1158 * Callbacks are often registered with incomplete grace-period 1159 * information. Something about the fact that getting exact 1160 * information requires acquiring a global lock... RCU therefore 1161 * makes a conservative estimate of the grace period number at which 1162 * a given callback will become ready to invoke. The following 1163 * code checks this estimate and improves it when possible, thus 1164 * accelerating callback invocation to an earlier grace-period 1165 * number. 1166 */ 1167 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1168 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1169 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1170 1171 /* Trace depending on how much we were able to accelerate. */ 1172 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1173 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB")); 1174 else 1175 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB")); 1176 1177 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); 1178 1179 return ret; 1180 } 1181 1182 /* 1183 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1184 * rcu_node structure's ->lock be held. It consults the cached value 1185 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1186 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1187 * while holding the leaf rcu_node structure's ->lock. 1188 */ 1189 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1190 struct rcu_data *rdp) 1191 { 1192 unsigned long c; 1193 bool needwake; 1194 1195 rcu_lockdep_assert_cblist_protected(rdp); 1196 c = rcu_seq_snap(&rcu_state.gp_seq); 1197 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1198 /* Old request still live, so mark recent callbacks. */ 1199 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1200 return; 1201 } 1202 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1203 needwake = rcu_accelerate_cbs(rnp, rdp); 1204 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1205 if (needwake) 1206 rcu_gp_kthread_wake(); 1207 } 1208 1209 /* 1210 * Move any callbacks whose grace period has completed to the 1211 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1212 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1213 * sublist. This function is idempotent, so it does not hurt to 1214 * invoke it repeatedly. As long as it is not invoked -too- often... 1215 * Returns true if the RCU grace-period kthread needs to be awakened. 1216 * 1217 * The caller must hold rnp->lock with interrupts disabled. 1218 */ 1219 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1220 { 1221 rcu_lockdep_assert_cblist_protected(rdp); 1222 raw_lockdep_assert_held_rcu_node(rnp); 1223 1224 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1225 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1226 return false; 1227 1228 /* 1229 * Find all callbacks whose ->gp_seq numbers indicate that they 1230 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1231 */ 1232 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1233 1234 /* Classify any remaining callbacks. */ 1235 return rcu_accelerate_cbs(rnp, rdp); 1236 } 1237 1238 /* 1239 * Move and classify callbacks, but only if doing so won't require 1240 * that the RCU grace-period kthread be awakened. 1241 */ 1242 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1243 struct rcu_data *rdp) 1244 { 1245 rcu_lockdep_assert_cblist_protected(rdp); 1246 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) 1247 return; 1248 // The grace period cannot end while we hold the rcu_node lock. 1249 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) 1250 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1251 raw_spin_unlock_rcu_node(rnp); 1252 } 1253 1254 /* 1255 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a 1256 * quiescent state. This is intended to be invoked when the CPU notices 1257 * a new grace period. 1258 */ 1259 static void rcu_strict_gp_check_qs(void) 1260 { 1261 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 1262 rcu_read_lock(); 1263 rcu_read_unlock(); 1264 } 1265 } 1266 1267 /* 1268 * Update CPU-local rcu_data state to record the beginnings and ends of 1269 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1270 * structure corresponding to the current CPU, and must have irqs disabled. 1271 * Returns true if the grace-period kthread needs to be awakened. 1272 */ 1273 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1274 { 1275 bool ret = false; 1276 bool need_qs; 1277 const bool offloaded = rcu_rdp_is_offloaded(rdp); 1278 1279 raw_lockdep_assert_held_rcu_node(rnp); 1280 1281 if (rdp->gp_seq == rnp->gp_seq) 1282 return false; /* Nothing to do. */ 1283 1284 /* Handle the ends of any preceding grace periods first. */ 1285 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1286 unlikely(rdp->gpwrap)) { 1287 if (!offloaded) 1288 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1289 rdp->core_needs_qs = false; 1290 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1291 } else { 1292 if (!offloaded) 1293 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1294 if (rdp->core_needs_qs) 1295 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1296 } 1297 1298 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1299 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1300 unlikely(rdp->gpwrap)) { 1301 /* 1302 * If the current grace period is waiting for this CPU, 1303 * set up to detect a quiescent state, otherwise don't 1304 * go looking for one. 1305 */ 1306 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1307 need_qs = !!(rnp->qsmask & rdp->grpmask); 1308 rdp->cpu_no_qs.b.norm = need_qs; 1309 rdp->core_needs_qs = need_qs; 1310 zero_cpu_stall_ticks(rdp); 1311 } 1312 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1313 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1314 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1315 if (IS_ENABLED(CONFIG_PROVE_RCU) && rdp->gpwrap) 1316 WRITE_ONCE(rdp->last_sched_clock, jiffies); 1317 WRITE_ONCE(rdp->gpwrap, false); 1318 rcu_gpnum_ovf(rnp, rdp); 1319 return ret; 1320 } 1321 1322 static void note_gp_changes(struct rcu_data *rdp) 1323 { 1324 unsigned long flags; 1325 bool needwake; 1326 struct rcu_node *rnp; 1327 1328 local_irq_save(flags); 1329 rnp = rdp->mynode; 1330 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1331 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1332 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1333 local_irq_restore(flags); 1334 return; 1335 } 1336 needwake = __note_gp_changes(rnp, rdp); 1337 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1338 rcu_strict_gp_check_qs(); 1339 if (needwake) 1340 rcu_gp_kthread_wake(); 1341 } 1342 1343 static atomic_t *rcu_gp_slow_suppress; 1344 1345 /* Register a counter to suppress debugging grace-period delays. */ 1346 void rcu_gp_slow_register(atomic_t *rgssp) 1347 { 1348 WARN_ON_ONCE(rcu_gp_slow_suppress); 1349 1350 WRITE_ONCE(rcu_gp_slow_suppress, rgssp); 1351 } 1352 EXPORT_SYMBOL_GPL(rcu_gp_slow_register); 1353 1354 /* Unregister a counter, with NULL for not caring which. */ 1355 void rcu_gp_slow_unregister(atomic_t *rgssp) 1356 { 1357 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL); 1358 1359 WRITE_ONCE(rcu_gp_slow_suppress, NULL); 1360 } 1361 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister); 1362 1363 static bool rcu_gp_slow_is_suppressed(void) 1364 { 1365 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress); 1366 1367 return rgssp && atomic_read(rgssp); 1368 } 1369 1370 static void rcu_gp_slow(int delay) 1371 { 1372 if (!rcu_gp_slow_is_suppressed() && delay > 0 && 1373 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1374 schedule_timeout_idle(delay); 1375 } 1376 1377 static unsigned long sleep_duration; 1378 1379 /* Allow rcutorture to stall the grace-period kthread. */ 1380 void rcu_gp_set_torture_wait(int duration) 1381 { 1382 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1383 WRITE_ONCE(sleep_duration, duration); 1384 } 1385 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1386 1387 /* Actually implement the aforementioned wait. */ 1388 static void rcu_gp_torture_wait(void) 1389 { 1390 unsigned long duration; 1391 1392 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1393 return; 1394 duration = xchg(&sleep_duration, 0UL); 1395 if (duration > 0) { 1396 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1397 schedule_timeout_idle(duration); 1398 pr_alert("%s: Wait complete\n", __func__); 1399 } 1400 } 1401 1402 /* 1403 * Handler for on_each_cpu() to invoke the target CPU's RCU core 1404 * processing. 1405 */ 1406 static void rcu_strict_gp_boundary(void *unused) 1407 { 1408 invoke_rcu_core(); 1409 } 1410 1411 // Make the polled API aware of the beginning of a grace period. 1412 static void rcu_poll_gp_seq_start(unsigned long *snap) 1413 { 1414 struct rcu_node *rnp = rcu_get_root(); 1415 1416 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1417 raw_lockdep_assert_held_rcu_node(rnp); 1418 1419 // If RCU was idle, note beginning of GP. 1420 if (!rcu_seq_state(rcu_state.gp_seq_polled)) 1421 rcu_seq_start(&rcu_state.gp_seq_polled); 1422 1423 // Either way, record current state. 1424 *snap = rcu_state.gp_seq_polled; 1425 } 1426 1427 // Make the polled API aware of the end of a grace period. 1428 static void rcu_poll_gp_seq_end(unsigned long *snap) 1429 { 1430 struct rcu_node *rnp = rcu_get_root(); 1431 1432 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1433 raw_lockdep_assert_held_rcu_node(rnp); 1434 1435 // If the previously noted GP is still in effect, record the 1436 // end of that GP. Either way, zero counter to avoid counter-wrap 1437 // problems. 1438 if (*snap && *snap == rcu_state.gp_seq_polled) { 1439 rcu_seq_end(&rcu_state.gp_seq_polled); 1440 rcu_state.gp_seq_polled_snap = 0; 1441 rcu_state.gp_seq_polled_exp_snap = 0; 1442 } else { 1443 *snap = 0; 1444 } 1445 } 1446 1447 // Make the polled API aware of the beginning of a grace period, but 1448 // where caller does not hold the root rcu_node structure's lock. 1449 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap) 1450 { 1451 unsigned long flags; 1452 struct rcu_node *rnp = rcu_get_root(); 1453 1454 if (rcu_init_invoked()) { 1455 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1456 lockdep_assert_irqs_enabled(); 1457 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1458 } 1459 rcu_poll_gp_seq_start(snap); 1460 if (rcu_init_invoked()) 1461 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1462 } 1463 1464 // Make the polled API aware of the end of a grace period, but where 1465 // caller does not hold the root rcu_node structure's lock. 1466 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap) 1467 { 1468 unsigned long flags; 1469 struct rcu_node *rnp = rcu_get_root(); 1470 1471 if (rcu_init_invoked()) { 1472 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1473 lockdep_assert_irqs_enabled(); 1474 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1475 } 1476 rcu_poll_gp_seq_end(snap); 1477 if (rcu_init_invoked()) 1478 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1479 } 1480 1481 /* 1482 * There is a single llist, which is used for handling 1483 * synchronize_rcu() users' enqueued rcu_synchronize nodes. 1484 * Within this llist, there are two tail pointers: 1485 * 1486 * wait tail: Tracks the set of nodes, which need to 1487 * wait for the current GP to complete. 1488 * done tail: Tracks the set of nodes, for which grace 1489 * period has elapsed. These nodes processing 1490 * will be done as part of the cleanup work 1491 * execution by a kworker. 1492 * 1493 * At every grace period init, a new wait node is added 1494 * to the llist. This wait node is used as wait tail 1495 * for this new grace period. Given that there are a fixed 1496 * number of wait nodes, if all wait nodes are in use 1497 * (which can happen when kworker callback processing 1498 * is delayed) and additional grace period is requested. 1499 * This means, a system is slow in processing callbacks. 1500 * 1501 * TODO: If a slow processing is detected, a first node 1502 * in the llist should be used as a wait-tail for this 1503 * grace period, therefore users which should wait due 1504 * to a slow process are handled by _this_ grace period 1505 * and not next. 1506 * 1507 * Below is an illustration of how the done and wait 1508 * tail pointers move from one set of rcu_synchronize nodes 1509 * to the other, as grace periods start and finish and 1510 * nodes are processed by kworker. 1511 * 1512 * 1513 * a. Initial llist callbacks list: 1514 * 1515 * +----------+ +--------+ +-------+ 1516 * | | | | | | 1517 * | head |---------> | cb2 |--------->| cb1 | 1518 * | | | | | | 1519 * +----------+ +--------+ +-------+ 1520 * 1521 * 1522 * 1523 * b. New GP1 Start: 1524 * 1525 * WAIT TAIL 1526 * | 1527 * | 1528 * v 1529 * +----------+ +--------+ +--------+ +-------+ 1530 * | | | | | | | | 1531 * | head ------> wait |------> cb2 |------> | cb1 | 1532 * | | | head1 | | | | | 1533 * +----------+ +--------+ +--------+ +-------+ 1534 * 1535 * 1536 * 1537 * c. GP completion: 1538 * 1539 * WAIT_TAIL == DONE_TAIL 1540 * 1541 * DONE TAIL 1542 * | 1543 * | 1544 * v 1545 * +----------+ +--------+ +--------+ +-------+ 1546 * | | | | | | | | 1547 * | head ------> wait |------> cb2 |------> | cb1 | 1548 * | | | head1 | | | | | 1549 * +----------+ +--------+ +--------+ +-------+ 1550 * 1551 * 1552 * 1553 * d. New callbacks and GP2 start: 1554 * 1555 * WAIT TAIL DONE TAIL 1556 * | | 1557 * | | 1558 * v v 1559 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1560 * | | | | | | | | | | | | | | 1561 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | 1562 * | | | head2| | | | | |head1| | | | | 1563 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1564 * 1565 * 1566 * 1567 * e. GP2 completion: 1568 * 1569 * WAIT_TAIL == DONE_TAIL 1570 * DONE TAIL 1571 * | 1572 * | 1573 * v 1574 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1575 * | | | | | | | | | | | | | | 1576 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | 1577 * | | | head2| | | | | |head1| | | | | 1578 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1579 * 1580 * 1581 * While the llist state transitions from d to e, a kworker 1582 * can start executing rcu_sr_normal_gp_cleanup_work() and 1583 * can observe either the old done tail (@c) or the new 1584 * done tail (@e). So, done tail updates and reads need 1585 * to use the rel-acq semantics. If the concurrent kworker 1586 * observes the old done tail, the newly queued work 1587 * execution will process the updated done tail. If the 1588 * concurrent kworker observes the new done tail, then 1589 * the newly queued work will skip processing the done 1590 * tail, as workqueue semantics guarantees that the new 1591 * work is executed only after the previous one completes. 1592 * 1593 * f. kworker callbacks processing complete: 1594 * 1595 * 1596 * DONE TAIL 1597 * | 1598 * | 1599 * v 1600 * +----------+ +--------+ 1601 * | | | | 1602 * | head ------> wait | 1603 * | | | head2 | 1604 * +----------+ +--------+ 1605 * 1606 */ 1607 static bool rcu_sr_is_wait_head(struct llist_node *node) 1608 { 1609 return &(rcu_state.srs_wait_nodes)[0].node <= node && 1610 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node; 1611 } 1612 1613 static struct llist_node *rcu_sr_get_wait_head(void) 1614 { 1615 struct sr_wait_node *sr_wn; 1616 int i; 1617 1618 for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) { 1619 sr_wn = &(rcu_state.srs_wait_nodes)[i]; 1620 1621 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1)) 1622 return &sr_wn->node; 1623 } 1624 1625 return NULL; 1626 } 1627 1628 static void rcu_sr_put_wait_head(struct llist_node *node) 1629 { 1630 struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node); 1631 1632 atomic_set_release(&sr_wn->inuse, 0); 1633 } 1634 1635 /* Enable rcu_normal_wake_from_gp automatically on small systems. */ 1636 #define WAKE_FROM_GP_CPU_THRESHOLD 16 1637 1638 static int rcu_normal_wake_from_gp = -1; 1639 module_param(rcu_normal_wake_from_gp, int, 0644); 1640 static struct workqueue_struct *sync_wq; 1641 1642 static void rcu_sr_normal_complete(struct llist_node *node) 1643 { 1644 struct rcu_synchronize *rs = container_of( 1645 (struct rcu_head *) node, struct rcu_synchronize, head); 1646 1647 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && 1648 !poll_state_synchronize_rcu_full(&rs->oldstate), 1649 "A full grace period is not passed yet!\n"); 1650 1651 /* Finally. */ 1652 complete(&rs->completion); 1653 } 1654 1655 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work) 1656 { 1657 struct llist_node *done, *rcu, *next, *head; 1658 1659 /* 1660 * This work execution can potentially execute 1661 * while a new done tail is being updated by 1662 * grace period kthread in rcu_sr_normal_gp_cleanup(). 1663 * So, read and updates of done tail need to 1664 * follow acq-rel semantics. 1665 * 1666 * Given that wq semantics guarantees that a single work 1667 * cannot execute concurrently by multiple kworkers, 1668 * the done tail list manipulations are protected here. 1669 */ 1670 done = smp_load_acquire(&rcu_state.srs_done_tail); 1671 if (WARN_ON_ONCE(!done)) 1672 return; 1673 1674 WARN_ON_ONCE(!rcu_sr_is_wait_head(done)); 1675 head = done->next; 1676 done->next = NULL; 1677 1678 /* 1679 * The dummy node, which is pointed to by the 1680 * done tail which is acq-read above is not removed 1681 * here. This allows lockless additions of new 1682 * rcu_synchronize nodes in rcu_sr_normal_add_req(), 1683 * while the cleanup work executes. The dummy 1684 * nodes is removed, in next round of cleanup 1685 * work execution. 1686 */ 1687 llist_for_each_safe(rcu, next, head) { 1688 if (!rcu_sr_is_wait_head(rcu)) { 1689 rcu_sr_normal_complete(rcu); 1690 continue; 1691 } 1692 1693 rcu_sr_put_wait_head(rcu); 1694 } 1695 1696 /* Order list manipulations with atomic access. */ 1697 atomic_dec_return_release(&rcu_state.srs_cleanups_pending); 1698 } 1699 1700 /* 1701 * Helper function for rcu_gp_cleanup(). 1702 */ 1703 static void rcu_sr_normal_gp_cleanup(void) 1704 { 1705 struct llist_node *wait_tail, *next = NULL, *rcu = NULL; 1706 int done = 0; 1707 1708 wait_tail = rcu_state.srs_wait_tail; 1709 if (wait_tail == NULL) 1710 return; 1711 1712 rcu_state.srs_wait_tail = NULL; 1713 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail); 1714 WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail)); 1715 1716 /* 1717 * Process (a) and (d) cases. See an illustration. 1718 */ 1719 llist_for_each_safe(rcu, next, wait_tail->next) { 1720 if (rcu_sr_is_wait_head(rcu)) 1721 break; 1722 1723 rcu_sr_normal_complete(rcu); 1724 // It can be last, update a next on this step. 1725 wait_tail->next = next; 1726 1727 if (++done == SR_MAX_USERS_WAKE_FROM_GP) 1728 break; 1729 } 1730 1731 /* 1732 * Fast path, no more users to process except putting the second last 1733 * wait head if no inflight-workers. If there are in-flight workers, 1734 * they will remove the last wait head. 1735 * 1736 * Note that the ACQUIRE orders atomic access with list manipulation. 1737 */ 1738 if (wait_tail->next && wait_tail->next->next == NULL && 1739 rcu_sr_is_wait_head(wait_tail->next) && 1740 !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) { 1741 rcu_sr_put_wait_head(wait_tail->next); 1742 wait_tail->next = NULL; 1743 } 1744 1745 /* Concurrent sr_normal_gp_cleanup work might observe this update. */ 1746 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail); 1747 smp_store_release(&rcu_state.srs_done_tail, wait_tail); 1748 1749 /* 1750 * We schedule a work in order to perform a final processing 1751 * of outstanding users(if still left) and releasing wait-heads 1752 * added by rcu_sr_normal_gp_init() call. 1753 */ 1754 if (wait_tail->next) { 1755 atomic_inc(&rcu_state.srs_cleanups_pending); 1756 if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work)) 1757 atomic_dec(&rcu_state.srs_cleanups_pending); 1758 } 1759 } 1760 1761 /* 1762 * Helper function for rcu_gp_init(). 1763 */ 1764 static bool rcu_sr_normal_gp_init(void) 1765 { 1766 struct llist_node *first; 1767 struct llist_node *wait_head; 1768 bool start_new_poll = false; 1769 1770 first = READ_ONCE(rcu_state.srs_next.first); 1771 if (!first || rcu_sr_is_wait_head(first)) 1772 return start_new_poll; 1773 1774 wait_head = rcu_sr_get_wait_head(); 1775 if (!wait_head) { 1776 // Kick another GP to retry. 1777 start_new_poll = true; 1778 return start_new_poll; 1779 } 1780 1781 /* Inject a wait-dummy-node. */ 1782 llist_add(wait_head, &rcu_state.srs_next); 1783 1784 /* 1785 * A waiting list of rcu_synchronize nodes should be empty on 1786 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(), 1787 * rolls it over. If not, it is a BUG, warn a user. 1788 */ 1789 WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL); 1790 rcu_state.srs_wait_tail = wait_head; 1791 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail); 1792 1793 return start_new_poll; 1794 } 1795 1796 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs) 1797 { 1798 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next); 1799 } 1800 1801 /* 1802 * Initialize a new grace period. Return false if no grace period required. 1803 */ 1804 static noinline_for_stack bool rcu_gp_init(void) 1805 { 1806 unsigned long flags; 1807 unsigned long oldmask; 1808 unsigned long mask; 1809 struct rcu_data *rdp; 1810 struct rcu_node *rnp = rcu_get_root(); 1811 bool start_new_poll; 1812 unsigned long old_gp_seq; 1813 1814 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1815 raw_spin_lock_irq_rcu_node(rnp); 1816 if (!rcu_state.gp_flags) { 1817 /* Spurious wakeup, tell caller to go back to sleep. */ 1818 raw_spin_unlock_irq_rcu_node(rnp); 1819 return false; 1820 } 1821 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1822 1823 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1824 /* 1825 * Grace period already in progress, don't start another. 1826 * Not supposed to be able to happen. 1827 */ 1828 raw_spin_unlock_irq_rcu_node(rnp); 1829 return false; 1830 } 1831 1832 /* Advance to a new grace period and initialize state. */ 1833 record_gp_stall_check_time(); 1834 /* 1835 * A new wait segment must be started before gp_seq advanced, so 1836 * that previous gp waiters won't observe the new gp_seq. 1837 */ 1838 start_new_poll = rcu_sr_normal_gp_init(); 1839 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1840 old_gp_seq = rcu_state.gp_seq; 1841 /* 1842 * Critical ordering: rcu_seq_start() must happen BEFORE the CPU hotplug 1843 * scan below. Otherwise we risk a race where a newly onlining CPU could 1844 * be missed by the current grace period, potentially leading to 1845 * use-after-free errors. For a detailed explanation of this race, see 1846 * Documentation/RCU/Design/Requirements/Requirements.rst in the 1847 * "Hotplug CPU" section. 1848 * 1849 * Also note that the root rnp's gp_seq is kept separate from, and lags, 1850 * the rcu_state's gp_seq, for a reason. See the Quick-Quiz on 1851 * Single-node systems for more details (in Data-Structures.rst). 1852 */ 1853 rcu_seq_start(&rcu_state.gp_seq); 1854 /* Ensure that rcu_seq_done_exact() guardband doesn't give false positives. */ 1855 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && 1856 rcu_seq_done_exact(&old_gp_seq, rcu_seq_snap(&rcu_state.gp_seq))); 1857 1858 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1859 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1860 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap); 1861 raw_spin_unlock_irq_rcu_node(rnp); 1862 1863 /* 1864 * The "start_new_poll" is set to true, only when this GP is not able 1865 * to handle anything and there are outstanding users. It happens when 1866 * the rcu_sr_normal_gp_init() function was not able to insert a dummy 1867 * separator to the llist, because there were no left any dummy-nodes. 1868 * 1869 * Number of dummy-nodes is fixed, it could be that we are run out of 1870 * them, if so we start a new pool request to repeat a try. It is rare 1871 * and it means that a system is doing a slow processing of callbacks. 1872 */ 1873 if (start_new_poll) 1874 (void) start_poll_synchronize_rcu(); 1875 1876 /* 1877 * Apply per-leaf buffered online and offline operations to 1878 * the rcu_node tree. Note that this new grace period need not 1879 * wait for subsequent online CPUs, and that RCU hooks in the CPU 1880 * offlining path, when combined with checks in this function, 1881 * will handle CPUs that are currently going offline or that will 1882 * go offline later. Please also refer to "Hotplug CPU" section 1883 * of RCU's Requirements documentation. 1884 */ 1885 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); 1886 /* Exclude CPU hotplug operations. */ 1887 rcu_for_each_leaf_node(rnp) { 1888 local_irq_disable(); 1889 /* 1890 * Serialize with CPU offline. See Requirements.rst > Hotplug CPU > 1891 * Concurrent Quiescent State Reporting for Offline CPUs. 1892 */ 1893 arch_spin_lock(&rcu_state.ofl_lock); 1894 raw_spin_lock_rcu_node(rnp); 1895 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1896 !rnp->wait_blkd_tasks) { 1897 /* Nothing to do on this leaf rcu_node structure. */ 1898 raw_spin_unlock_rcu_node(rnp); 1899 arch_spin_unlock(&rcu_state.ofl_lock); 1900 local_irq_enable(); 1901 continue; 1902 } 1903 1904 /* Record old state, apply changes to ->qsmaskinit field. */ 1905 oldmask = rnp->qsmaskinit; 1906 rnp->qsmaskinit = rnp->qsmaskinitnext; 1907 1908 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1909 if (!oldmask != !rnp->qsmaskinit) { 1910 if (!oldmask) { /* First online CPU for rcu_node. */ 1911 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1912 rcu_init_new_rnp(rnp); 1913 } else if (rcu_preempt_has_tasks(rnp)) { 1914 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1915 } else { /* Last offline CPU and can propagate. */ 1916 rcu_cleanup_dead_rnp(rnp); 1917 } 1918 } 1919 1920 /* 1921 * If all waited-on tasks from prior grace period are 1922 * done, and if all this rcu_node structure's CPUs are 1923 * still offline, propagate up the rcu_node tree and 1924 * clear ->wait_blkd_tasks. Otherwise, if one of this 1925 * rcu_node structure's CPUs has since come back online, 1926 * simply clear ->wait_blkd_tasks. 1927 */ 1928 if (rnp->wait_blkd_tasks && 1929 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1930 rnp->wait_blkd_tasks = false; 1931 if (!rnp->qsmaskinit) 1932 rcu_cleanup_dead_rnp(rnp); 1933 } 1934 1935 raw_spin_unlock_rcu_node(rnp); 1936 arch_spin_unlock(&rcu_state.ofl_lock); 1937 local_irq_enable(); 1938 } 1939 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1940 1941 /* 1942 * Set the quiescent-state-needed bits in all the rcu_node 1943 * structures for all currently online CPUs in breadth-first 1944 * order, starting from the root rcu_node structure, relying on the 1945 * layout of the tree within the rcu_state.node[] array. Note that 1946 * other CPUs will access only the leaves of the hierarchy, thus 1947 * seeing that no grace period is in progress, at least until the 1948 * corresponding leaf node has been initialized. 1949 * 1950 * The grace period cannot complete until the initialization 1951 * process finishes, because this kthread handles both. 1952 */ 1953 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT); 1954 rcu_for_each_node_breadth_first(rnp) { 1955 rcu_gp_slow(gp_init_delay); 1956 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1957 rdp = this_cpu_ptr(&rcu_data); 1958 rcu_preempt_check_blocked_tasks(rnp); 1959 rnp->qsmask = rnp->qsmaskinit; 1960 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1961 if (rnp == rdp->mynode) 1962 (void)__note_gp_changes(rnp, rdp); 1963 rcu_preempt_boost_start_gp(rnp); 1964 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1965 rnp->level, rnp->grplo, 1966 rnp->grphi, rnp->qsmask); 1967 /* 1968 * Quiescent states for tasks on any now-offline CPUs. Since we 1969 * released the ofl and rnp lock before this loop, CPUs might 1970 * have gone offline and we have to report QS on their behalf. 1971 * See Requirements.rst > Hotplug CPU > Concurrent QS Reporting. 1972 */ 1973 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1974 rnp->rcu_gp_init_mask = mask; 1975 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1976 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1977 else 1978 raw_spin_unlock_irq_rcu_node(rnp); 1979 cond_resched_tasks_rcu_qs(); 1980 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1981 } 1982 1983 // If strict, make all CPUs aware of new grace period. 1984 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 1985 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 1986 1987 /* 1988 * Immediately report QS for the GP kthread's CPU. The GP kthread 1989 * cannot be in an RCU read-side critical section while running 1990 * the FQS scan. This eliminates the need for a second FQS wait 1991 * when all CPUs are idle. 1992 */ 1993 preempt_disable(); 1994 rcu_qs(); 1995 rcu_report_qs_rdp(this_cpu_ptr(&rcu_data)); 1996 preempt_enable(); 1997 1998 return true; 1999 } 2000 2001 /* 2002 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 2003 * time. 2004 */ 2005 static bool rcu_gp_fqs_check_wake(int *gfp) 2006 { 2007 struct rcu_node *rnp = rcu_get_root(); 2008 2009 // If under overload conditions, force an immediate FQS scan. 2010 if (*gfp & RCU_GP_FLAG_OVLD) 2011 return true; 2012 2013 // Someone like call_rcu() requested a force-quiescent-state scan. 2014 *gfp = READ_ONCE(rcu_state.gp_flags); 2015 if (*gfp & RCU_GP_FLAG_FQS) 2016 return true; 2017 2018 // The current grace period has completed. 2019 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 2020 return true; 2021 2022 return false; 2023 } 2024 2025 /* 2026 * Do one round of quiescent-state forcing. 2027 */ 2028 static void rcu_gp_fqs(bool first_time) 2029 { 2030 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall); 2031 struct rcu_node *rnp = rcu_get_root(); 2032 2033 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2034 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1); 2035 2036 WARN_ON_ONCE(nr_fqs > 3); 2037 /* Only countdown nr_fqs for stall purposes if jiffies moves. */ 2038 if (nr_fqs) { 2039 if (nr_fqs == 1) { 2040 WRITE_ONCE(rcu_state.jiffies_stall, 2041 jiffies + rcu_jiffies_till_stall_check()); 2042 } 2043 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs); 2044 } 2045 2046 if (first_time) { 2047 /* Collect dyntick-idle snapshots. */ 2048 force_qs_rnp(rcu_watching_snap_save); 2049 } else { 2050 /* Handle dyntick-idle and offline CPUs. */ 2051 force_qs_rnp(rcu_watching_snap_recheck); 2052 } 2053 /* Clear flag to prevent immediate re-entry. */ 2054 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2055 raw_spin_lock_irq_rcu_node(rnp); 2056 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS); 2057 raw_spin_unlock_irq_rcu_node(rnp); 2058 } 2059 } 2060 2061 /* 2062 * Loop doing repeated quiescent-state forcing until the grace period ends. 2063 */ 2064 static noinline_for_stack void rcu_gp_fqs_loop(void) 2065 { 2066 bool first_gp_fqs = true; 2067 int gf = 0; 2068 unsigned long j; 2069 int ret; 2070 struct rcu_node *rnp = rcu_get_root(); 2071 2072 j = READ_ONCE(jiffies_till_first_fqs); 2073 if (rcu_state.cbovld) 2074 gf = RCU_GP_FLAG_OVLD; 2075 ret = 0; 2076 for (;;) { 2077 if (rcu_state.cbovld) { 2078 j = (j + 2) / 3; 2079 if (j <= 0) 2080 j = 1; 2081 } 2082 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) { 2083 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j); 2084 /* 2085 * jiffies_force_qs before RCU_GP_WAIT_FQS state 2086 * update; required for stall checks. 2087 */ 2088 smp_wmb(); 2089 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 2090 jiffies + (j ? 3 * j : 2)); 2091 } 2092 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2093 TPS("fqswait")); 2094 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS); 2095 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq, 2096 rcu_gp_fqs_check_wake(&gf), j); 2097 rcu_gp_torture_wait(); 2098 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS); 2099 /* Locking provides needed memory barriers. */ 2100 /* 2101 * Exit the loop if the root rcu_node structure indicates that the grace period 2102 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check 2103 * is required only for single-node rcu_node trees because readers blocking 2104 * the current grace period are queued only on leaf rcu_node structures. 2105 * For multi-node trees, checking the root node's ->qsmask suffices, because a 2106 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from 2107 * the corresponding leaf nodes have passed through their quiescent state. 2108 */ 2109 if (!READ_ONCE(rnp->qsmask) && 2110 !rcu_preempt_blocked_readers_cgp(rnp)) 2111 break; 2112 /* If time for quiescent-state forcing, do it. */ 2113 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 2114 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) { 2115 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2116 TPS("fqsstart")); 2117 rcu_gp_fqs(first_gp_fqs); 2118 gf = 0; 2119 if (first_gp_fqs) { 2120 first_gp_fqs = false; 2121 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 2122 } 2123 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2124 TPS("fqsend")); 2125 cond_resched_tasks_rcu_qs(); 2126 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2127 ret = 0; /* Force full wait till next FQS. */ 2128 j = READ_ONCE(jiffies_till_next_fqs); 2129 } else { 2130 /* Deal with stray signal. */ 2131 cond_resched_tasks_rcu_qs(); 2132 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2133 WARN_ON(signal_pending(current)); 2134 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2135 TPS("fqswaitsig")); 2136 ret = 1; /* Keep old FQS timing. */ 2137 j = jiffies; 2138 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 2139 j = 1; 2140 else 2141 j = rcu_state.jiffies_force_qs - j; 2142 gf = 0; 2143 } 2144 } 2145 } 2146 2147 /* 2148 * Clean up after the old grace period. 2149 */ 2150 static noinline void rcu_gp_cleanup(void) 2151 { 2152 int cpu; 2153 bool needgp = false; 2154 unsigned long gp_duration; 2155 unsigned long new_gp_seq; 2156 bool offloaded; 2157 struct rcu_data *rdp; 2158 struct rcu_node *rnp = rcu_get_root(); 2159 struct swait_queue_head *sq; 2160 2161 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2162 raw_spin_lock_irq_rcu_node(rnp); 2163 rcu_state.gp_end = jiffies; 2164 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 2165 if (gp_duration > rcu_state.gp_max) 2166 rcu_state.gp_max = gp_duration; 2167 2168 /* 2169 * We know the grace period is complete, but to everyone else 2170 * it appears to still be ongoing. But it is also the case 2171 * that to everyone else it looks like there is nothing that 2172 * they can do to advance the grace period. It is therefore 2173 * safe for us to drop the lock in order to mark the grace 2174 * period as completed in all of the rcu_node structures. 2175 */ 2176 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap); 2177 raw_spin_unlock_irq_rcu_node(rnp); 2178 2179 /* 2180 * Propagate new ->gp_seq value to rcu_node structures so that 2181 * other CPUs don't have to wait until the start of the next grace 2182 * period to process their callbacks. This also avoids some nasty 2183 * RCU grace-period initialization races by forcing the end of 2184 * the current grace period to be completely recorded in all of 2185 * the rcu_node structures before the beginning of the next grace 2186 * period is recorded in any of the rcu_node structures. 2187 */ 2188 new_gp_seq = rcu_state.gp_seq; 2189 rcu_seq_end(&new_gp_seq); 2190 rcu_for_each_node_breadth_first(rnp) { 2191 raw_spin_lock_irq_rcu_node(rnp); 2192 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 2193 dump_blkd_tasks(rnp, 10); 2194 WARN_ON_ONCE(rnp->qsmask); 2195 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 2196 if (!rnp->parent) 2197 smp_mb(); // Order against failing poll_state_synchronize_rcu_full(). 2198 rdp = this_cpu_ptr(&rcu_data); 2199 if (rnp == rdp->mynode) 2200 needgp = __note_gp_changes(rnp, rdp) || needgp; 2201 /* smp_mb() provided by prior unlock-lock pair. */ 2202 needgp = rcu_future_gp_cleanup(rnp) || needgp; 2203 // Reset overload indication for CPUs no longer overloaded 2204 if (rcu_is_leaf_node(rnp)) 2205 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 2206 rdp = per_cpu_ptr(&rcu_data, cpu); 2207 check_cb_ovld_locked(rdp, rnp); 2208 } 2209 sq = rcu_nocb_gp_get(rnp); 2210 raw_spin_unlock_irq_rcu_node(rnp); 2211 rcu_nocb_gp_cleanup(sq); 2212 cond_resched_tasks_rcu_qs(); 2213 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2214 rcu_gp_slow(gp_cleanup_delay); 2215 } 2216 rnp = rcu_get_root(); 2217 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 2218 2219 /* Declare grace period done, trace first to use old GP number. */ 2220 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 2221 rcu_seq_end(&rcu_state.gp_seq); 2222 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 2223 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE); 2224 /* Check for GP requests since above loop. */ 2225 rdp = this_cpu_ptr(&rcu_data); 2226 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 2227 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 2228 TPS("CleanupMore")); 2229 needgp = true; 2230 } 2231 /* Advance CBs to reduce false positives below. */ 2232 offloaded = rcu_rdp_is_offloaded(rdp); 2233 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 2234 2235 // We get here if a grace period was needed (“needgp”) 2236 // and the above call to rcu_accelerate_cbs() did not set 2237 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records 2238 // the need for another grace period). The purpose 2239 // of the “offloaded” check is to avoid invoking 2240 // rcu_accelerate_cbs() on an offloaded CPU because we do not 2241 // hold the ->nocb_lock needed to safely access an offloaded 2242 // ->cblist. We do not want to acquire that lock because 2243 // it can be heavily contended during callback floods. 2244 2245 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 2246 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 2247 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq")); 2248 } else { 2249 2250 // We get here either if there is no need for an 2251 // additional grace period or if rcu_accelerate_cbs() has 2252 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 2253 // So all we need to do is to clear all of the other 2254 // ->gp_flags bits. 2255 2256 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT); 2257 } 2258 raw_spin_unlock_irq_rcu_node(rnp); 2259 2260 // Make synchronize_rcu() users aware of the end of old grace period. 2261 rcu_sr_normal_gp_cleanup(); 2262 2263 // If strict, make all CPUs aware of the end of the old grace period. 2264 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2265 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 2266 } 2267 2268 /* 2269 * Body of kthread that handles grace periods. 2270 */ 2271 static int __noreturn rcu_gp_kthread(void *unused) 2272 { 2273 rcu_bind_gp_kthread(); 2274 for (;;) { 2275 2276 /* Handle grace-period start. */ 2277 for (;;) { 2278 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2279 TPS("reqwait")); 2280 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS); 2281 swait_event_idle_exclusive(rcu_state.gp_wq, 2282 READ_ONCE(rcu_state.gp_flags) & 2283 RCU_GP_FLAG_INIT); 2284 rcu_gp_torture_wait(); 2285 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS); 2286 /* Locking provides needed memory barrier. */ 2287 if (rcu_gp_init()) 2288 break; 2289 cond_resched_tasks_rcu_qs(); 2290 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2291 WARN_ON(signal_pending(current)); 2292 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2293 TPS("reqwaitsig")); 2294 } 2295 2296 /* Handle quiescent-state forcing. */ 2297 rcu_gp_fqs_loop(); 2298 2299 /* Handle grace-period end. */ 2300 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP); 2301 rcu_gp_cleanup(); 2302 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED); 2303 } 2304 } 2305 2306 /* 2307 * Report a full set of quiescent states to the rcu_state data structure. 2308 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 2309 * another grace period is required. Whether we wake the grace-period 2310 * kthread or it awakens itself for the next round of quiescent-state 2311 * forcing, that kthread will clean up after the just-completed grace 2312 * period. Note that the caller must hold rnp->lock, which is released 2313 * before return. 2314 */ 2315 static void rcu_report_qs_rsp(unsigned long flags) 2316 __releases(rcu_get_root()->lock) 2317 { 2318 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 2319 WARN_ON_ONCE(!rcu_gp_in_progress()); 2320 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS); 2321 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 2322 rcu_gp_kthread_wake(); 2323 } 2324 2325 /* 2326 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2327 * Allows quiescent states for a group of CPUs to be reported at one go 2328 * to the specified rcu_node structure, though all the CPUs in the group 2329 * must be represented by the same rcu_node structure (which need not be a 2330 * leaf rcu_node structure, though it often will be). The gps parameter 2331 * is the grace-period snapshot, which means that the quiescent states 2332 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 2333 * must be held upon entry, and it is released before return. 2334 * 2335 * As a special case, if mask is zero, the bit-already-cleared check is 2336 * disabled. This allows propagating quiescent state due to resumed tasks 2337 * during grace-period initialization. 2338 */ 2339 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 2340 unsigned long gps, unsigned long flags) 2341 __releases(rnp->lock) 2342 { 2343 unsigned long oldmask = 0; 2344 struct rcu_node *rnp_c; 2345 2346 raw_lockdep_assert_held_rcu_node(rnp); 2347 2348 /* Walk up the rcu_node hierarchy. */ 2349 for (;;) { 2350 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 2351 2352 /* 2353 * Our bit has already been cleared, or the 2354 * relevant grace period is already over, so done. 2355 */ 2356 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2357 return; 2358 } 2359 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2360 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2361 rcu_preempt_blocked_readers_cgp(rnp)); 2362 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 2363 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 2364 mask, rnp->qsmask, rnp->level, 2365 rnp->grplo, rnp->grphi, 2366 !!rnp->gp_tasks); 2367 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2368 2369 /* Other bits still set at this level, so done. */ 2370 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2371 return; 2372 } 2373 rnp->completedqs = rnp->gp_seq; 2374 mask = rnp->grpmask; 2375 if (rnp->parent == NULL) { 2376 2377 /* No more levels. Exit loop holding root lock. */ 2378 2379 break; 2380 } 2381 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2382 rnp_c = rnp; 2383 rnp = rnp->parent; 2384 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2385 oldmask = READ_ONCE(rnp_c->qsmask); 2386 } 2387 2388 /* 2389 * Get here if we are the last CPU to pass through a quiescent 2390 * state for this grace period. Invoke rcu_report_qs_rsp() 2391 * to clean up and start the next grace period if one is needed. 2392 */ 2393 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 2394 } 2395 2396 /* 2397 * Record a quiescent state for all tasks that were previously queued 2398 * on the specified rcu_node structure and that were blocking the current 2399 * RCU grace period. The caller must hold the corresponding rnp->lock with 2400 * irqs disabled, and this lock is released upon return, but irqs remain 2401 * disabled. 2402 */ 2403 static void __maybe_unused 2404 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 2405 __releases(rnp->lock) 2406 { 2407 unsigned long gps; 2408 unsigned long mask; 2409 struct rcu_node *rnp_p; 2410 2411 raw_lockdep_assert_held_rcu_node(rnp); 2412 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 2413 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 2414 rnp->qsmask != 0) { 2415 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2416 return; /* Still need more quiescent states! */ 2417 } 2418 2419 rnp->completedqs = rnp->gp_seq; 2420 rnp_p = rnp->parent; 2421 if (rnp_p == NULL) { 2422 /* 2423 * Only one rcu_node structure in the tree, so don't 2424 * try to report up to its nonexistent parent! 2425 */ 2426 rcu_report_qs_rsp(flags); 2427 return; 2428 } 2429 2430 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 2431 gps = rnp->gp_seq; 2432 mask = rnp->grpmask; 2433 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2434 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2435 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 2436 } 2437 2438 /* 2439 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2440 * structure. This must be called from the specified CPU. 2441 */ 2442 static void 2443 rcu_report_qs_rdp(struct rcu_data *rdp) 2444 { 2445 unsigned long flags; 2446 unsigned long mask; 2447 struct rcu_node *rnp; 2448 2449 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); 2450 rnp = rdp->mynode; 2451 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2452 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2453 rdp->gpwrap) { 2454 2455 /* 2456 * The grace period in which this quiescent state was 2457 * recorded has ended, so don't report it upwards. 2458 * We will instead need a new quiescent state that lies 2459 * within the current grace period. 2460 */ 2461 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2462 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2463 return; 2464 } 2465 mask = rdp->grpmask; 2466 rdp->core_needs_qs = false; 2467 if ((rnp->qsmask & mask) == 0) { 2468 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2469 } else { 2470 /* 2471 * This GP can't end until cpu checks in, so all of our 2472 * callbacks can be processed during the next GP. 2473 * 2474 * NOCB kthreads have their own way to deal with that... 2475 */ 2476 if (!rcu_rdp_is_offloaded(rdp)) { 2477 /* 2478 * The current GP has not yet ended, so it 2479 * should not be possible for rcu_accelerate_cbs() 2480 * to return true. So complain, but don't awaken. 2481 */ 2482 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp)); 2483 } 2484 2485 rcu_disable_urgency_upon_qs(rdp); 2486 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2487 /* ^^^ Released rnp->lock */ 2488 } 2489 } 2490 2491 /* 2492 * Check to see if there is a new grace period of which this CPU 2493 * is not yet aware, and if so, set up local rcu_data state for it. 2494 * Otherwise, see if this CPU has just passed through its first 2495 * quiescent state for this grace period, and record that fact if so. 2496 */ 2497 static void 2498 rcu_check_quiescent_state(struct rcu_data *rdp) 2499 { 2500 /* Check for grace-period ends and beginnings. */ 2501 note_gp_changes(rdp); 2502 2503 /* 2504 * Does this CPU still need to do its part for current grace period? 2505 * If no, return and let the other CPUs do their part as well. 2506 */ 2507 if (!rdp->core_needs_qs) 2508 return; 2509 2510 /* 2511 * Was there a quiescent state since the beginning of the grace 2512 * period? If no, then exit and wait for the next call. 2513 */ 2514 if (rdp->cpu_no_qs.b.norm) 2515 return; 2516 2517 /* 2518 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2519 * judge of that). 2520 */ 2521 rcu_report_qs_rdp(rdp); 2522 } 2523 2524 /* Return true if callback-invocation time limit exceeded. */ 2525 static bool rcu_do_batch_check_time(long count, long tlimit, 2526 bool jlimit_check, unsigned long jlimit) 2527 { 2528 // Invoke local_clock() only once per 32 consecutive callbacks. 2529 return unlikely(tlimit) && 2530 (!likely(count & 31) || 2531 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) && 2532 jlimit_check && time_after(jiffies, jlimit))) && 2533 local_clock() >= tlimit; 2534 } 2535 2536 /* 2537 * Invoke any RCU callbacks that have made it to the end of their grace 2538 * period. Throttle as specified by rdp->blimit. 2539 */ 2540 static void rcu_do_batch(struct rcu_data *rdp) 2541 { 2542 long bl; 2543 long count = 0; 2544 int div; 2545 bool __maybe_unused empty; 2546 unsigned long flags; 2547 unsigned long jlimit; 2548 bool jlimit_check = false; 2549 long pending; 2550 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2551 struct rcu_head *rhp; 2552 long tlimit = 0; 2553 2554 /* If no callbacks are ready, just return. */ 2555 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2556 trace_rcu_batch_start(rcu_state.name, 2557 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2558 trace_rcu_batch_end(rcu_state.name, 0, 2559 !rcu_segcblist_empty(&rdp->cblist), 2560 need_resched(), is_idle_task(current), 2561 rcu_is_callbacks_kthread(rdp)); 2562 return; 2563 } 2564 2565 /* 2566 * Extract the list of ready callbacks, disabling IRQs to prevent 2567 * races with call_rcu() from interrupt handlers. Leave the 2568 * callback counts, as rcu_barrier() needs to be conservative. 2569 * 2570 * Callbacks execution is fully ordered against preceding grace period 2571 * completion (materialized by rnp->gp_seq update) thanks to the 2572 * smp_mb__after_unlock_lock() upon node locking required for callbacks 2573 * advancing. In NOCB mode this ordering is then further relayed through 2574 * the nocb locking that protects both callbacks advancing and extraction. 2575 */ 2576 rcu_nocb_lock_irqsave(rdp, flags); 2577 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2578 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); 2579 div = READ_ONCE(rcu_divisor); 2580 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; 2581 bl = max(rdp->blimit, pending >> div); 2582 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && 2583 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) { 2584 const long npj = NSEC_PER_SEC / HZ; 2585 long rrn = READ_ONCE(rcu_resched_ns); 2586 2587 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn; 2588 tlimit = local_clock() + rrn; 2589 jlimit = jiffies + (rrn + npj + 1) / npj; 2590 jlimit_check = true; 2591 } 2592 trace_rcu_batch_start(rcu_state.name, 2593 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2594 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2595 if (rcu_rdp_is_offloaded(rdp)) 2596 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2597 2598 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); 2599 rcu_nocb_unlock_irqrestore(rdp, flags); 2600 2601 /* Invoke callbacks. */ 2602 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2603 rhp = rcu_cblist_dequeue(&rcl); 2604 2605 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2606 rcu_callback_t f; 2607 2608 count++; 2609 debug_rcu_head_unqueue(rhp); 2610 2611 rcu_lock_acquire(&rcu_callback_map); 2612 trace_rcu_invoke_callback(rcu_state.name, rhp); 2613 2614 f = rhp->func; 2615 debug_rcu_head_callback(rhp); 2616 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2617 f(rhp); 2618 2619 rcu_lock_release(&rcu_callback_map); 2620 2621 /* 2622 * Stop only if limit reached and CPU has something to do. 2623 */ 2624 if (in_serving_softirq()) { 2625 if (count >= bl && (need_resched() || !is_idle_task(current))) 2626 break; 2627 /* 2628 * Make sure we don't spend too much time here and deprive other 2629 * softirq vectors of CPU cycles. 2630 */ 2631 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) 2632 break; 2633 } else { 2634 // In rcuc/rcuoc context, so no worries about 2635 // depriving other softirq vectors of CPU cycles. 2636 local_bh_enable(); 2637 lockdep_assert_irqs_enabled(); 2638 cond_resched_tasks_rcu_qs(); 2639 lockdep_assert_irqs_enabled(); 2640 local_bh_disable(); 2641 // But rcuc kthreads can delay quiescent-state 2642 // reporting, so check time limits for them. 2643 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && 2644 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) { 2645 rdp->rcu_cpu_has_work = 1; 2646 break; 2647 } 2648 } 2649 } 2650 2651 rcu_nocb_lock_irqsave(rdp, flags); 2652 rdp->n_cbs_invoked += count; 2653 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2654 is_idle_task(current), rcu_is_callbacks_kthread(rdp)); 2655 2656 /* Update counts and requeue any remaining callbacks. */ 2657 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2658 rcu_segcblist_add_len(&rdp->cblist, -count); 2659 2660 /* Reinstate batch limit if we have worked down the excess. */ 2661 count = rcu_segcblist_n_cbs(&rdp->cblist); 2662 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2663 rdp->blimit = blimit; 2664 2665 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2666 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2667 rdp->qlen_last_fqs_check = 0; 2668 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 2669 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2670 rdp->qlen_last_fqs_check = count; 2671 2672 /* 2673 * The following usually indicates a double call_rcu(). To track 2674 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2675 */ 2676 empty = rcu_segcblist_empty(&rdp->cblist); 2677 WARN_ON_ONCE(count == 0 && !empty); 2678 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2679 count != 0 && empty); 2680 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); 2681 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); 2682 2683 rcu_nocb_unlock_irqrestore(rdp, flags); 2684 2685 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2686 } 2687 2688 /* 2689 * This function is invoked from each scheduling-clock interrupt, 2690 * and checks to see if this CPU is in a non-context-switch quiescent 2691 * state, for example, user mode or idle loop. It also schedules RCU 2692 * core processing. If the current grace period has gone on too long, 2693 * it will ask the scheduler to manufacture a context switch for the sole 2694 * purpose of providing the needed quiescent state. 2695 */ 2696 void rcu_sched_clock_irq(int user) 2697 { 2698 unsigned long j; 2699 2700 if (IS_ENABLED(CONFIG_PROVE_RCU)) { 2701 j = jiffies; 2702 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock))); 2703 __this_cpu_write(rcu_data.last_sched_clock, j); 2704 } 2705 trace_rcu_utilization(TPS("Start scheduler-tick")); 2706 lockdep_assert_irqs_disabled(); 2707 raw_cpu_inc(rcu_data.ticks_this_gp); 2708 /* The load-acquire pairs with the store-release setting to true. */ 2709 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2710 /* Idle and userspace execution already are quiescent states. */ 2711 if (!rcu_is_cpu_rrupt_from_idle() && !user) 2712 set_need_resched_current(); 2713 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2714 } 2715 rcu_flavor_sched_clock_irq(user); 2716 if (rcu_pending(user)) 2717 invoke_rcu_core(); 2718 if (user || rcu_is_cpu_rrupt_from_idle()) 2719 rcu_note_voluntary_context_switch(current); 2720 lockdep_assert_irqs_disabled(); 2721 2722 trace_rcu_utilization(TPS("End scheduler-tick")); 2723 } 2724 2725 /* 2726 * Scan the leaf rcu_node structures. For each structure on which all 2727 * CPUs have reported a quiescent state and on which there are tasks 2728 * blocking the current grace period, initiate RCU priority boosting. 2729 * Otherwise, invoke the specified function to check dyntick state for 2730 * each CPU that has not yet reported a quiescent state. 2731 */ 2732 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2733 { 2734 int cpu; 2735 unsigned long flags; 2736 struct rcu_node *rnp; 2737 2738 rcu_state.cbovld = rcu_state.cbovldnext; 2739 rcu_state.cbovldnext = false; 2740 rcu_for_each_leaf_node(rnp) { 2741 unsigned long mask = 0; 2742 unsigned long rsmask = 0; 2743 2744 cond_resched_tasks_rcu_qs(); 2745 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2746 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2747 if (rnp->qsmask == 0) { 2748 if (rcu_preempt_blocked_readers_cgp(rnp)) { 2749 /* 2750 * No point in scanning bits because they 2751 * are all zero. But we might need to 2752 * priority-boost blocked readers. 2753 */ 2754 rcu_initiate_boost(rnp, flags); 2755 /* rcu_initiate_boost() releases rnp->lock */ 2756 continue; 2757 } 2758 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2759 continue; 2760 } 2761 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2762 struct rcu_data *rdp; 2763 int ret; 2764 2765 rdp = per_cpu_ptr(&rcu_data, cpu); 2766 ret = f(rdp); 2767 if (ret > 0) { 2768 mask |= rdp->grpmask; 2769 rcu_disable_urgency_upon_qs(rdp); 2770 } 2771 if (ret < 0) 2772 rsmask |= rdp->grpmask; 2773 } 2774 if (mask != 0) { 2775 /* Idle/offline CPUs, report (releases rnp->lock). */ 2776 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2777 } else { 2778 /* Nothing to do here, so just drop the lock. */ 2779 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2780 } 2781 2782 for_each_leaf_node_cpu_mask(rnp, cpu, rsmask) 2783 resched_cpu(cpu); 2784 } 2785 } 2786 2787 /* 2788 * Force quiescent states on reluctant CPUs, and also detect which 2789 * CPUs are in dyntick-idle mode. 2790 */ 2791 void rcu_force_quiescent_state(void) 2792 { 2793 unsigned long flags; 2794 bool ret; 2795 struct rcu_node *rnp; 2796 struct rcu_node *rnp_old = NULL; 2797 2798 if (!rcu_gp_in_progress()) 2799 return; 2800 /* Funnel through hierarchy to reduce memory contention. */ 2801 rnp = raw_cpu_read(rcu_data.mynode); 2802 for (; rnp != NULL; rnp = rnp->parent) { 2803 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2804 !raw_spin_trylock(&rnp->fqslock); 2805 if (rnp_old != NULL) 2806 raw_spin_unlock(&rnp_old->fqslock); 2807 if (ret) 2808 return; 2809 rnp_old = rnp; 2810 } 2811 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2812 2813 /* Reached the root of the rcu_node tree, acquire lock. */ 2814 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2815 raw_spin_unlock(&rnp_old->fqslock); 2816 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2817 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2818 return; /* Someone beat us to it. */ 2819 } 2820 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS); 2821 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2822 rcu_gp_kthread_wake(); 2823 } 2824 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2825 2826 // Workqueue handler for an RCU reader for kernels enforcing struct RCU 2827 // grace periods. 2828 static void strict_work_handler(struct work_struct *work) 2829 { 2830 rcu_read_lock(); 2831 rcu_read_unlock(); 2832 } 2833 2834 /* Perform RCU core processing work for the current CPU. */ 2835 static __latent_entropy void rcu_core(void) 2836 { 2837 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2838 struct rcu_node *rnp = rdp->mynode; 2839 2840 if (cpu_is_offline(smp_processor_id())) 2841 return; 2842 trace_rcu_utilization(TPS("Start RCU core")); 2843 WARN_ON_ONCE(!rdp->beenonline); 2844 2845 /* Report any deferred quiescent states if preemption enabled. */ 2846 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) { 2847 rcu_preempt_deferred_qs(current); 2848 } else if (rcu_preempt_need_deferred_qs(current)) { 2849 guard(irqsave)(); 2850 set_need_resched_current(); 2851 } 2852 2853 /* Update RCU state based on any recent quiescent states. */ 2854 rcu_check_quiescent_state(rdp); 2855 2856 /* No grace period and unregistered callbacks? */ 2857 if (!rcu_gp_in_progress() && 2858 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) { 2859 guard(irqsave)(); 2860 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2861 rcu_accelerate_cbs_unlocked(rnp, rdp); 2862 } 2863 2864 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2865 2866 /* If there are callbacks ready, invoke them. */ 2867 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) && 2868 likely(READ_ONCE(rcu_scheduler_fully_active))) { 2869 rcu_do_batch(rdp); 2870 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2871 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2872 invoke_rcu_core(); 2873 } 2874 2875 /* Do any needed deferred wakeups of rcuo kthreads. */ 2876 do_nocb_deferred_wakeup(rdp); 2877 trace_rcu_utilization(TPS("End RCU core")); 2878 2879 // If strict GPs, schedule an RCU reader in a clean environment. 2880 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2881 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); 2882 } 2883 2884 static void rcu_core_si(void) 2885 { 2886 rcu_core(); 2887 } 2888 2889 static void rcu_wake_cond(struct task_struct *t, int status) 2890 { 2891 /* 2892 * If the thread is yielding, only wake it when this 2893 * is invoked from idle 2894 */ 2895 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2896 wake_up_process(t); 2897 } 2898 2899 static void invoke_rcu_core_kthread(void) 2900 { 2901 struct task_struct *t; 2902 unsigned long flags; 2903 2904 local_irq_save(flags); 2905 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2906 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2907 if (t != NULL && t != current) 2908 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2909 local_irq_restore(flags); 2910 } 2911 2912 /* 2913 * Wake up this CPU's rcuc kthread to do RCU core processing. 2914 */ 2915 static void invoke_rcu_core(void) 2916 { 2917 if (!cpu_online(smp_processor_id())) 2918 return; 2919 if (use_softirq) 2920 raise_softirq(RCU_SOFTIRQ); 2921 else 2922 invoke_rcu_core_kthread(); 2923 } 2924 2925 static void rcu_cpu_kthread_park(unsigned int cpu) 2926 { 2927 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2928 } 2929 2930 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2931 { 2932 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2933 } 2934 2935 /* 2936 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2937 * the RCU softirq used in configurations of RCU that do not support RCU 2938 * priority boosting. 2939 */ 2940 static void rcu_cpu_kthread(unsigned int cpu) 2941 { 2942 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2943 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2944 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity); 2945 int spincnt; 2946 2947 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2948 for (spincnt = 0; spincnt < 10; spincnt++) { 2949 WRITE_ONCE(*j, jiffies); 2950 local_bh_disable(); 2951 *statusp = RCU_KTHREAD_RUNNING; 2952 local_irq_disable(); 2953 work = *workp; 2954 WRITE_ONCE(*workp, 0); 2955 local_irq_enable(); 2956 if (work) 2957 rcu_core(); 2958 local_bh_enable(); 2959 if (!READ_ONCE(*workp)) { 2960 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2961 *statusp = RCU_KTHREAD_WAITING; 2962 return; 2963 } 2964 } 2965 *statusp = RCU_KTHREAD_YIELDING; 2966 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2967 schedule_timeout_idle(2); 2968 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2969 *statusp = RCU_KTHREAD_WAITING; 2970 WRITE_ONCE(*j, jiffies); 2971 } 2972 2973 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2974 .store = &rcu_data.rcu_cpu_kthread_task, 2975 .thread_should_run = rcu_cpu_kthread_should_run, 2976 .thread_fn = rcu_cpu_kthread, 2977 .thread_comm = "rcuc/%u", 2978 .setup = rcu_cpu_kthread_setup, 2979 .park = rcu_cpu_kthread_park, 2980 }; 2981 2982 /* 2983 * Spawn per-CPU RCU core processing kthreads. 2984 */ 2985 static int __init rcu_spawn_core_kthreads(void) 2986 { 2987 int cpu; 2988 2989 for_each_possible_cpu(cpu) 2990 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2991 if (use_softirq) 2992 return 0; 2993 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2994 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2995 return 0; 2996 } 2997 2998 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func) 2999 { 3000 rcu_segcblist_enqueue(&rdp->cblist, head); 3001 trace_rcu_callback(rcu_state.name, head, 3002 rcu_segcblist_n_cbs(&rdp->cblist)); 3003 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); 3004 } 3005 3006 /* 3007 * Handle any core-RCU processing required by a call_rcu() invocation. 3008 */ 3009 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 3010 rcu_callback_t func, unsigned long flags) 3011 { 3012 rcutree_enqueue(rdp, head, func); 3013 /* 3014 * If called from an extended quiescent state, invoke the RCU 3015 * core in order to force a re-evaluation of RCU's idleness. 3016 */ 3017 if (!rcu_is_watching()) 3018 invoke_rcu_core(); 3019 3020 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 3021 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 3022 return; 3023 3024 /* 3025 * Force the grace period if too many callbacks or too long waiting. 3026 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 3027 * if some other CPU has recently done so. Also, don't bother 3028 * invoking rcu_force_quiescent_state() if the newly enqueued callback 3029 * is the only one waiting for a grace period to complete. 3030 */ 3031 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 3032 rdp->qlen_last_fqs_check + qhimark)) { 3033 3034 /* Are we ignoring a completed grace period? */ 3035 note_gp_changes(rdp); 3036 3037 /* Start a new grace period if one not already started. */ 3038 if (!rcu_gp_in_progress()) { 3039 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 3040 } else { 3041 /* Give the grace period a kick. */ 3042 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 3043 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && 3044 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 3045 rcu_force_quiescent_state(); 3046 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 3047 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 3048 } 3049 } 3050 } 3051 3052 /* 3053 * RCU callback function to leak a callback. 3054 */ 3055 static void rcu_leak_callback(struct rcu_head *rhp) 3056 { 3057 } 3058 3059 /* 3060 * Check and if necessary update the leaf rcu_node structure's 3061 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 3062 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 3063 * structure's ->lock. 3064 */ 3065 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 3066 { 3067 raw_lockdep_assert_held_rcu_node(rnp); 3068 if (qovld_calc <= 0) 3069 return; // Early boot and wildcard value set. 3070 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 3071 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 3072 else 3073 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 3074 } 3075 3076 /* 3077 * Check and if necessary update the leaf rcu_node structure's 3078 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 3079 * number of queued RCU callbacks. No locks need be held, but the 3080 * caller must have disabled interrupts. 3081 * 3082 * Note that this function ignores the possibility that there are a lot 3083 * of callbacks all of which have already seen the end of their respective 3084 * grace periods. This omission is due to the need for no-CBs CPUs to 3085 * be holding ->nocb_lock to do this check, which is too heavy for a 3086 * common-case operation. 3087 */ 3088 static void check_cb_ovld(struct rcu_data *rdp) 3089 { 3090 struct rcu_node *const rnp = rdp->mynode; 3091 3092 if (qovld_calc <= 0 || 3093 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 3094 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 3095 return; // Early boot wildcard value or already set correctly. 3096 raw_spin_lock_rcu_node(rnp); 3097 check_cb_ovld_locked(rdp, rnp); 3098 raw_spin_unlock_rcu_node(rnp); 3099 } 3100 3101 static void 3102 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) 3103 { 3104 static atomic_t doublefrees; 3105 unsigned long flags; 3106 bool lazy; 3107 struct rcu_data *rdp; 3108 3109 /* Misaligned rcu_head! */ 3110 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 3111 3112 /* Avoid NULL dereference if callback is NULL. */ 3113 if (WARN_ON_ONCE(!func)) 3114 return; 3115 3116 if (debug_rcu_head_queue(head)) { 3117 /* 3118 * Probable double call_rcu(), so leak the callback. 3119 * Use rcu:rcu_callback trace event to find the previous 3120 * time callback was passed to call_rcu(). 3121 */ 3122 if (atomic_inc_return(&doublefrees) < 4) { 3123 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); 3124 mem_dump_obj(head); 3125 } 3126 WRITE_ONCE(head->func, rcu_leak_callback); 3127 return; 3128 } 3129 head->func = func; 3130 head->next = NULL; 3131 kasan_record_aux_stack(head); 3132 3133 local_irq_save(flags); 3134 rdp = this_cpu_ptr(&rcu_data); 3135 RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!"); 3136 3137 lazy = lazy_in && !rcu_async_should_hurry(); 3138 3139 /* Add the callback to our list. */ 3140 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 3141 // This can trigger due to call_rcu() from offline CPU: 3142 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 3143 WARN_ON_ONCE(!rcu_is_watching()); 3144 // Very early boot, before rcu_init(). Initialize if needed 3145 // and then drop through to queue the callback. 3146 if (rcu_segcblist_empty(&rdp->cblist)) 3147 rcu_segcblist_init(&rdp->cblist); 3148 } 3149 3150 check_cb_ovld(rdp); 3151 3152 if (unlikely(rcu_rdp_is_offloaded(rdp))) 3153 call_rcu_nocb(rdp, head, func, flags, lazy); 3154 else 3155 call_rcu_core(rdp, head, func, flags); 3156 local_irq_restore(flags); 3157 } 3158 3159 #ifdef CONFIG_RCU_LAZY 3160 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF); 3161 module_param(enable_rcu_lazy, bool, 0444); 3162 3163 /** 3164 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and 3165 * flush all lazy callbacks (including the new one) to the main ->cblist while 3166 * doing so. 3167 * 3168 * @head: structure to be used for queueing the RCU updates. 3169 * @func: actual callback function to be invoked after the grace period 3170 * 3171 * The callback function will be invoked some time after a full grace 3172 * period elapses, in other words after all pre-existing RCU read-side 3173 * critical sections have completed. 3174 * 3175 * Use this API instead of call_rcu() if you don't want the callback to be 3176 * delayed for very long periods of time, which can happen on systems without 3177 * memory pressure and on systems which are lightly loaded or mostly idle. 3178 * This function will cause callbacks to be invoked sooner than later at the 3179 * expense of extra power. Other than that, this function is identical to, and 3180 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory 3181 * ordering and other functionality. 3182 */ 3183 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) 3184 { 3185 __call_rcu_common(head, func, false); 3186 } 3187 EXPORT_SYMBOL_GPL(call_rcu_hurry); 3188 #else 3189 #define enable_rcu_lazy false 3190 #endif 3191 3192 /** 3193 * call_rcu() - Queue an RCU callback for invocation after a grace period. 3194 * By default the callbacks are 'lazy' and are kept hidden from the main 3195 * ->cblist to prevent starting of grace periods too soon. 3196 * If you desire grace periods to start very soon, use call_rcu_hurry(). 3197 * 3198 * @head: structure to be used for queueing the RCU updates. 3199 * @func: actual callback function to be invoked after the grace period 3200 * 3201 * The callback function will be invoked some time after a full grace 3202 * period elapses, in other words after all pre-existing RCU read-side 3203 * critical sections have completed. However, the callback function 3204 * might well execute concurrently with RCU read-side critical sections 3205 * that started after call_rcu() was invoked. 3206 * 3207 * It is perfectly legal to repost an RCU callback, potentially with 3208 * a different callback function, from within its callback function. 3209 * The specified function will be invoked after another full grace period 3210 * has elapsed. This use case is similar in form to the common practice 3211 * of reposting a timer from within its own handler. 3212 * 3213 * RCU read-side critical sections are delimited by rcu_read_lock() 3214 * and rcu_read_unlock(), and may be nested. In addition, but only in 3215 * v5.0 and later, regions of code across which interrupts, preemption, 3216 * or softirqs have been disabled also serve as RCU read-side critical 3217 * sections. This includes hardware interrupt handlers, softirq handlers, 3218 * and NMI handlers. 3219 * 3220 * Note that all CPUs must agree that the grace period extended beyond 3221 * all pre-existing RCU read-side critical section. On systems with more 3222 * than one CPU, this means that when "func()" is invoked, each CPU is 3223 * guaranteed to have executed a full memory barrier since the end of its 3224 * last RCU read-side critical section whose beginning preceded the call 3225 * to call_rcu(). It also means that each CPU executing an RCU read-side 3226 * critical section that continues beyond the start of "func()" must have 3227 * executed a memory barrier after the call_rcu() but before the beginning 3228 * of that RCU read-side critical section. Note that these guarantees 3229 * include CPUs that are offline, idle, or executing in user mode, as 3230 * well as CPUs that are executing in the kernel. 3231 * 3232 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 3233 * resulting RCU callback function "func()", then both CPU A and CPU B are 3234 * guaranteed to execute a full memory barrier during the time interval 3235 * between the call to call_rcu() and the invocation of "func()" -- even 3236 * if CPU A and CPU B are the same CPU (but again only if the system has 3237 * more than one CPU). 3238 * 3239 * Implementation of these memory-ordering guarantees is described here: 3240 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 3241 * 3242 * Specific to call_rcu() (as opposed to the other call_rcu*() functions), 3243 * in kernels built with CONFIG_RCU_LAZY=y, call_rcu() might delay for many 3244 * seconds before starting the grace period needed by the corresponding 3245 * callback. This delay can significantly improve energy-efficiency 3246 * on low-utilization battery-powered devices. To avoid this delay, 3247 * in latency-sensitive kernel code, use call_rcu_hurry(). 3248 */ 3249 void call_rcu(struct rcu_head *head, rcu_callback_t func) 3250 { 3251 __call_rcu_common(head, func, enable_rcu_lazy); 3252 } 3253 EXPORT_SYMBOL_GPL(call_rcu); 3254 3255 /* 3256 * During early boot, any blocking grace-period wait automatically 3257 * implies a grace period. 3258 * 3259 * Later on, this could in theory be the case for kernels built with 3260 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this 3261 * is not a common case. Furthermore, this optimization would cause 3262 * the rcu_gp_oldstate structure to expand by 50%, so this potential 3263 * grace-period optimization is ignored once the scheduler is running. 3264 */ 3265 static int rcu_blocking_is_gp(void) 3266 { 3267 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) { 3268 might_sleep(); 3269 return false; 3270 } 3271 return true; 3272 } 3273 3274 /* 3275 * Helper function for the synchronize_rcu() API. 3276 */ 3277 static void synchronize_rcu_normal(void) 3278 { 3279 struct rcu_synchronize rs; 3280 3281 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request")); 3282 3283 if (READ_ONCE(rcu_normal_wake_from_gp) < 1) { 3284 wait_rcu_gp(call_rcu_hurry); 3285 goto trace_complete_out; 3286 } 3287 3288 init_rcu_head_on_stack(&rs.head); 3289 init_completion(&rs.completion); 3290 3291 /* 3292 * This code might be preempted, therefore take a GP 3293 * snapshot before adding a request. 3294 */ 3295 if (IS_ENABLED(CONFIG_PROVE_RCU)) 3296 get_state_synchronize_rcu_full(&rs.oldstate); 3297 3298 rcu_sr_normal_add_req(&rs); 3299 3300 /* Kick a GP and start waiting. */ 3301 (void) start_poll_synchronize_rcu(); 3302 3303 /* Now we can wait. */ 3304 wait_for_completion(&rs.completion); 3305 destroy_rcu_head_on_stack(&rs.head); 3306 3307 trace_complete_out: 3308 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete")); 3309 } 3310 3311 /** 3312 * synchronize_rcu - wait until a grace period has elapsed. 3313 * 3314 * Control will return to the caller some time after a full grace 3315 * period has elapsed, in other words after all currently executing RCU 3316 * read-side critical sections have completed. Note, however, that 3317 * upon return from synchronize_rcu(), the caller might well be executing 3318 * concurrently with new RCU read-side critical sections that began while 3319 * synchronize_rcu() was waiting. 3320 * 3321 * RCU read-side critical sections are delimited by rcu_read_lock() 3322 * and rcu_read_unlock(), and may be nested. In addition, but only in 3323 * v5.0 and later, regions of code across which interrupts, preemption, 3324 * or softirqs have been disabled also serve as RCU read-side critical 3325 * sections. This includes hardware interrupt handlers, softirq handlers, 3326 * and NMI handlers. 3327 * 3328 * Note that this guarantee implies further memory-ordering guarantees. 3329 * On systems with more than one CPU, when synchronize_rcu() returns, 3330 * each CPU is guaranteed to have executed a full memory barrier since 3331 * the end of its last RCU read-side critical section whose beginning 3332 * preceded the call to synchronize_rcu(). In addition, each CPU having 3333 * an RCU read-side critical section that extends beyond the return from 3334 * synchronize_rcu() is guaranteed to have executed a full memory barrier 3335 * after the beginning of synchronize_rcu() and before the beginning of 3336 * that RCU read-side critical section. Note that these guarantees include 3337 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3338 * that are executing in the kernel. 3339 * 3340 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 3341 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3342 * to have executed a full memory barrier during the execution of 3343 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 3344 * again only if the system has more than one CPU). 3345 * 3346 * Implementation of these memory-ordering guarantees is described here: 3347 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 3348 */ 3349 void synchronize_rcu(void) 3350 { 3351 unsigned long flags; 3352 struct rcu_node *rnp; 3353 3354 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3355 lock_is_held(&rcu_lock_map) || 3356 lock_is_held(&rcu_sched_lock_map), 3357 "Illegal synchronize_rcu() in RCU read-side critical section"); 3358 if (!rcu_blocking_is_gp()) { 3359 if (rcu_gp_is_expedited()) 3360 synchronize_rcu_expedited(); 3361 else 3362 synchronize_rcu_normal(); 3363 return; 3364 } 3365 3366 // Context allows vacuous grace periods. 3367 // Note well that this code runs with !PREEMPT && !SMP. 3368 // In addition, all code that advances grace periods runs at 3369 // process level. Therefore, this normal GP overlaps with other 3370 // normal GPs only by being fully nested within them, which allows 3371 // reuse of ->gp_seq_polled_snap. 3372 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap); 3373 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap); 3374 3375 // Update the normal grace-period counters to record 3376 // this grace period, but only those used by the boot CPU. 3377 // The rcu_scheduler_starting() will take care of the rest of 3378 // these counters. 3379 local_irq_save(flags); 3380 WARN_ON_ONCE(num_online_cpus() > 1); 3381 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT); 3382 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) 3383 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 3384 local_irq_restore(flags); 3385 } 3386 EXPORT_SYMBOL_GPL(synchronize_rcu); 3387 3388 /** 3389 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie 3390 * @rgosp: Place to put state cookie 3391 * 3392 * Stores into @rgosp a value that will always be treated by functions 3393 * like poll_state_synchronize_rcu_full() as a cookie whose grace period 3394 * has already completed. 3395 */ 3396 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3397 { 3398 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; 3399 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED; 3400 } 3401 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full); 3402 3403 /** 3404 * get_state_synchronize_rcu - Snapshot current RCU state 3405 * 3406 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3407 * or poll_state_synchronize_rcu() to determine whether or not a full 3408 * grace period has elapsed in the meantime. 3409 */ 3410 unsigned long get_state_synchronize_rcu(void) 3411 { 3412 /* 3413 * Any prior manipulation of RCU-protected data must happen 3414 * before the load from ->gp_seq. 3415 */ 3416 smp_mb(); /* ^^^ */ 3417 return rcu_seq_snap(&rcu_state.gp_seq_polled); 3418 } 3419 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3420 3421 /** 3422 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited 3423 * @rgosp: location to place combined normal/expedited grace-period state 3424 * 3425 * Places the normal and expedited grace-period states in @rgosp. This 3426 * state value can be passed to a later call to cond_synchronize_rcu_full() 3427 * or poll_state_synchronize_rcu_full() to determine whether or not a 3428 * grace period (whether normal or expedited) has elapsed in the meantime. 3429 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned 3430 * long, but is guaranteed to see all grace periods. In contrast, the 3431 * combined state occupies less memory, but can sometimes fail to take 3432 * grace periods into account. 3433 * 3434 * This does not guarantee that the needed grace period will actually 3435 * start. 3436 */ 3437 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3438 { 3439 /* 3440 * Any prior manipulation of RCU-protected data must happen 3441 * before the loads from ->gp_seq and ->expedited_sequence. 3442 */ 3443 smp_mb(); /* ^^^ */ 3444 3445 // Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use 3446 // in poll_state_synchronize_rcu_full() notwithstanding. Use of 3447 // the latter here would result in too-short grace periods due to 3448 // interactions with newly onlined CPUs. 3449 rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq); 3450 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); 3451 } 3452 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full); 3453 3454 /* 3455 * Helper function for start_poll_synchronize_rcu() and 3456 * start_poll_synchronize_rcu_full(). 3457 */ 3458 static void start_poll_synchronize_rcu_common(void) 3459 { 3460 unsigned long flags; 3461 bool needwake; 3462 struct rcu_data *rdp; 3463 struct rcu_node *rnp; 3464 3465 local_irq_save(flags); 3466 rdp = this_cpu_ptr(&rcu_data); 3467 rnp = rdp->mynode; 3468 raw_spin_lock_rcu_node(rnp); // irqs already disabled. 3469 // Note it is possible for a grace period to have elapsed between 3470 // the above call to get_state_synchronize_rcu() and the below call 3471 // to rcu_seq_snap. This is OK, the worst that happens is that we 3472 // get a grace period that no one needed. These accesses are ordered 3473 // by smp_mb(), and we are accessing them in the opposite order 3474 // from which they are updated at grace-period start, as required. 3475 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); 3476 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3477 if (needwake) 3478 rcu_gp_kthread_wake(); 3479 } 3480 3481 /** 3482 * start_poll_synchronize_rcu - Snapshot and start RCU grace period 3483 * 3484 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3485 * or poll_state_synchronize_rcu() to determine whether or not a full 3486 * grace period has elapsed in the meantime. If the needed grace period 3487 * is not already slated to start, notifies RCU core of the need for that 3488 * grace period. 3489 */ 3490 unsigned long start_poll_synchronize_rcu(void) 3491 { 3492 unsigned long gp_seq = get_state_synchronize_rcu(); 3493 3494 start_poll_synchronize_rcu_common(); 3495 return gp_seq; 3496 } 3497 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); 3498 3499 /** 3500 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period 3501 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 3502 * 3503 * Places the normal and expedited grace-period states in *@rgos. This 3504 * state value can be passed to a later call to cond_synchronize_rcu_full() 3505 * or poll_state_synchronize_rcu_full() to determine whether or not a 3506 * grace period (whether normal or expedited) has elapsed in the meantime. 3507 * If the needed grace period is not already slated to start, notifies 3508 * RCU core of the need for that grace period. 3509 */ 3510 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3511 { 3512 get_state_synchronize_rcu_full(rgosp); 3513 3514 start_poll_synchronize_rcu_common(); 3515 } 3516 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full); 3517 3518 /** 3519 * poll_state_synchronize_rcu - Has the specified RCU grace period completed? 3520 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu() 3521 * 3522 * If a full RCU grace period has elapsed since the earlier call from 3523 * which @oldstate was obtained, return @true, otherwise return @false. 3524 * If @false is returned, it is the caller's responsibility to invoke this 3525 * function later on until it does return @true. Alternatively, the caller 3526 * can explicitly wait for a grace period, for example, by passing @oldstate 3527 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited() 3528 * on the one hand or by directly invoking either synchronize_rcu() or 3529 * synchronize_rcu_expedited() on the other. 3530 * 3531 * Yes, this function does not take counter wrap into account. 3532 * But counter wrap is harmless. If the counter wraps, we have waited for 3533 * more than a billion grace periods (and way more on a 64-bit system!). 3534 * Those needing to keep old state values for very long time periods 3535 * (many hours even on 32-bit systems) should check them occasionally and 3536 * either refresh them or set a flag indicating that the grace period has 3537 * completed. Alternatively, they can use get_completed_synchronize_rcu() 3538 * to get a guaranteed-completed grace-period state. 3539 * 3540 * In addition, because oldstate compresses the grace-period state for 3541 * both normal and expedited grace periods into a single unsigned long, 3542 * it can miss a grace period when synchronize_rcu() runs concurrently 3543 * with synchronize_rcu_expedited(). If this is unacceptable, please 3544 * instead use the _full() variant of these polling APIs. 3545 * 3546 * This function provides the same memory-ordering guarantees that 3547 * would be provided by a synchronize_rcu() that was invoked at the call 3548 * to the function that provided @oldstate, and that returned at the end 3549 * of this function. 3550 */ 3551 bool poll_state_synchronize_rcu(unsigned long oldstate) 3552 { 3553 if (oldstate == RCU_GET_STATE_COMPLETED || 3554 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) { 3555 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3556 return true; 3557 } 3558 return false; 3559 } 3560 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); 3561 3562 /** 3563 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed? 3564 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 3565 * 3566 * If a full RCU grace period has elapsed since the earlier call from 3567 * which *rgosp was obtained, return @true, otherwise return @false. 3568 * If @false is returned, it is the caller's responsibility to invoke this 3569 * function later on until it does return @true. Alternatively, the caller 3570 * can explicitly wait for a grace period, for example, by passing @rgosp 3571 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu(). 3572 * 3573 * Yes, this function does not take counter wrap into account. 3574 * But counter wrap is harmless. If the counter wraps, we have waited 3575 * for more than a billion grace periods (and way more on a 64-bit 3576 * system!). Those needing to keep rcu_gp_oldstate values for very 3577 * long time periods (many hours even on 32-bit systems) should check 3578 * them occasionally and either refresh them or set a flag indicating 3579 * that the grace period has completed. Alternatively, they can use 3580 * get_completed_synchronize_rcu_full() to get a guaranteed-completed 3581 * grace-period state. 3582 * 3583 * This function provides the same memory-ordering guarantees that would 3584 * be provided by a synchronize_rcu() that was invoked at the call to 3585 * the function that provided @rgosp, and that returned at the end of this 3586 * function. And this guarantee requires that the root rcu_node structure's 3587 * ->gp_seq field be checked instead of that of the rcu_state structure. 3588 * The problem is that the just-ending grace-period's callbacks can be 3589 * invoked between the time that the root rcu_node structure's ->gp_seq 3590 * field is updated and the time that the rcu_state structure's ->gp_seq 3591 * field is updated. Therefore, if a single synchronize_rcu() is to 3592 * cause a subsequent poll_state_synchronize_rcu_full() to return @true, 3593 * then the root rcu_node structure is the one that needs to be polled. 3594 */ 3595 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3596 { 3597 struct rcu_node *rnp = rcu_get_root(); 3598 3599 smp_mb(); // Order against root rcu_node structure grace-period cleanup. 3600 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED || 3601 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) || 3602 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED || 3603 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { 3604 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3605 return true; 3606 } 3607 return false; 3608 } 3609 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full); 3610 3611 /** 3612 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3613 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 3614 * 3615 * If a full RCU grace period has elapsed since the earlier call to 3616 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return. 3617 * Otherwise, invoke synchronize_rcu() to wait for a full grace period. 3618 * 3619 * Yes, this function does not take counter wrap into account. 3620 * But counter wrap is harmless. If the counter wraps, we have waited for 3621 * more than 2 billion grace periods (and way more on a 64-bit system!), 3622 * so waiting for a couple of additional grace periods should be just fine. 3623 * 3624 * This function provides the same memory-ordering guarantees that 3625 * would be provided by a synchronize_rcu() that was invoked at the call 3626 * to the function that provided @oldstate and that returned at the end 3627 * of this function. 3628 */ 3629 void cond_synchronize_rcu(unsigned long oldstate) 3630 { 3631 if (!poll_state_synchronize_rcu(oldstate)) 3632 synchronize_rcu(); 3633 } 3634 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3635 3636 /** 3637 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period 3638 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() 3639 * 3640 * If a full RCU grace period has elapsed since the call to 3641 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 3642 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was 3643 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait 3644 * for a full grace period. 3645 * 3646 * Yes, this function does not take counter wrap into account. 3647 * But counter wrap is harmless. If the counter wraps, we have waited for 3648 * more than 2 billion grace periods (and way more on a 64-bit system!), 3649 * so waiting for a couple of additional grace periods should be just fine. 3650 * 3651 * This function provides the same memory-ordering guarantees that 3652 * would be provided by a synchronize_rcu() that was invoked at the call 3653 * to the function that provided @rgosp and that returned at the end of 3654 * this function. 3655 */ 3656 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3657 { 3658 if (!poll_state_synchronize_rcu_full(rgosp)) 3659 synchronize_rcu(); 3660 } 3661 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full); 3662 3663 /* 3664 * Check to see if there is any immediate RCU-related work to be done by 3665 * the current CPU, returning 1 if so and zero otherwise. The checks are 3666 * in order of increasing expense: checks that can be carried out against 3667 * CPU-local state are performed first. However, we must check for CPU 3668 * stalls first, else we might not get a chance. 3669 */ 3670 static int rcu_pending(int user) 3671 { 3672 bool gp_in_progress; 3673 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 3674 struct rcu_node *rnp = rdp->mynode; 3675 3676 lockdep_assert_irqs_disabled(); 3677 3678 /* Check for CPU stalls, if enabled. */ 3679 check_cpu_stall(rdp); 3680 3681 /* Does this CPU need a deferred NOCB wakeup? */ 3682 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) 3683 return 1; 3684 3685 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 3686 gp_in_progress = rcu_gp_in_progress(); 3687 if ((user || rcu_is_cpu_rrupt_from_idle() || 3688 (gp_in_progress && 3689 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + 3690 nohz_full_patience_delay_jiffies))) && 3691 rcu_nohz_full_cpu()) 3692 return 0; 3693 3694 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3695 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 3696 return 1; 3697 3698 /* Does this CPU have callbacks ready to invoke? */ 3699 if (!rcu_rdp_is_offloaded(rdp) && 3700 rcu_segcblist_ready_cbs(&rdp->cblist)) 3701 return 1; 3702 3703 /* Has RCU gone idle with this CPU needing another grace period? */ 3704 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 3705 !rcu_rdp_is_offloaded(rdp) && 3706 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3707 return 1; 3708 3709 /* Have RCU grace period completed or started? */ 3710 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 3711 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3712 return 1; 3713 3714 /* nothing to do */ 3715 return 0; 3716 } 3717 3718 /* 3719 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3720 * the compiler is expected to optimize this away. 3721 */ 3722 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 3723 { 3724 trace_rcu_barrier(rcu_state.name, s, cpu, 3725 atomic_read(&rcu_state.barrier_cpu_count), done); 3726 } 3727 3728 /* 3729 * RCU callback function for rcu_barrier(). If we are last, wake 3730 * up the task executing rcu_barrier(). 3731 * 3732 * Note that the value of rcu_state.barrier_sequence must be captured 3733 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 3734 * other CPUs might count the value down to zero before this CPU gets 3735 * around to invoking rcu_barrier_trace(), which might result in bogus 3736 * data from the next instance of rcu_barrier(). 3737 */ 3738 static void rcu_barrier_callback(struct rcu_head *rhp) 3739 { 3740 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 3741 3742 rhp->next = rhp; // Mark the callback as having been invoked. 3743 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 3744 rcu_barrier_trace(TPS("LastCB"), -1, s); 3745 complete(&rcu_state.barrier_completion); 3746 } else { 3747 rcu_barrier_trace(TPS("CB"), -1, s); 3748 } 3749 } 3750 3751 /* 3752 * If needed, entrain an rcu_barrier() callback on rdp->cblist. 3753 */ 3754 static void rcu_barrier_entrain(struct rcu_data *rdp) 3755 { 3756 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence); 3757 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); 3758 bool wake_nocb = false; 3759 bool was_alldone = false; 3760 3761 lockdep_assert_held(&rcu_state.barrier_lock); 3762 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq)) 3763 return; 3764 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 3765 rdp->barrier_head.func = rcu_barrier_callback; 3766 debug_rcu_head_queue(&rdp->barrier_head); 3767 rcu_nocb_lock(rdp); 3768 /* 3769 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular 3770 * queue. This way we don't wait for bypass timer that can reach seconds 3771 * if it's fully lazy. 3772 */ 3773 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); 3774 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); 3775 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); 3776 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 3777 atomic_inc(&rcu_state.barrier_cpu_count); 3778 } else { 3779 debug_rcu_head_unqueue(&rdp->barrier_head); 3780 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); 3781 } 3782 rcu_nocb_unlock(rdp); 3783 if (wake_nocb) 3784 wake_nocb_gp(rdp); 3785 smp_store_release(&rdp->barrier_seq_snap, gseq); 3786 } 3787 3788 /* 3789 * Called with preemption disabled, and from cross-cpu IRQ context. 3790 */ 3791 static void rcu_barrier_handler(void *cpu_in) 3792 { 3793 uintptr_t cpu = (uintptr_t)cpu_in; 3794 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3795 3796 lockdep_assert_irqs_disabled(); 3797 WARN_ON_ONCE(cpu != rdp->cpu); 3798 WARN_ON_ONCE(cpu != smp_processor_id()); 3799 raw_spin_lock(&rcu_state.barrier_lock); 3800 rcu_barrier_entrain(rdp); 3801 raw_spin_unlock(&rcu_state.barrier_lock); 3802 } 3803 3804 /** 3805 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 3806 * 3807 * Note that this primitive does not necessarily wait for an RCU grace period 3808 * to complete. For example, if there are no RCU callbacks queued anywhere 3809 * in the system, then rcu_barrier() is within its rights to return 3810 * immediately, without waiting for anything, much less an RCU grace period. 3811 * In fact, rcu_barrier() will normally not result in any RCU grace periods 3812 * beyond those that were already destined to be executed. 3813 * 3814 * In kernels built with CONFIG_RCU_LAZY=y, this function also hurries all 3815 * pending lazy RCU callbacks. 3816 */ 3817 void rcu_barrier(void) 3818 { 3819 uintptr_t cpu; 3820 unsigned long flags; 3821 unsigned long gseq; 3822 struct rcu_data *rdp; 3823 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3824 3825 rcu_barrier_trace(TPS("Begin"), -1, s); 3826 3827 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3828 mutex_lock(&rcu_state.barrier_mutex); 3829 3830 /* Did someone else do our work for us? */ 3831 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3832 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); 3833 smp_mb(); /* caller's subsequent code after above check. */ 3834 mutex_unlock(&rcu_state.barrier_mutex); 3835 return; 3836 } 3837 3838 /* Mark the start of the barrier operation. */ 3839 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 3840 rcu_seq_start(&rcu_state.barrier_sequence); 3841 gseq = rcu_state.barrier_sequence; 3842 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 3843 3844 /* 3845 * Initialize the count to two rather than to zero in order 3846 * to avoid a too-soon return to zero in case of an immediate 3847 * invocation of the just-enqueued callback (or preemption of 3848 * this task). Exclude CPU-hotplug operations to ensure that no 3849 * offline non-offloaded CPU has callbacks queued. 3850 */ 3851 init_completion(&rcu_state.barrier_completion); 3852 atomic_set(&rcu_state.barrier_cpu_count, 2); 3853 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3854 3855 /* 3856 * Force each CPU with callbacks to register a new callback. 3857 * When that callback is invoked, we will know that all of the 3858 * corresponding CPU's preceding callbacks have been invoked. 3859 */ 3860 for_each_possible_cpu(cpu) { 3861 rdp = per_cpu_ptr(&rcu_data, cpu); 3862 retry: 3863 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) 3864 continue; 3865 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 3866 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { 3867 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 3868 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3869 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence); 3870 continue; 3871 } 3872 if (!rcu_rdp_cpu_online(rdp)) { 3873 rcu_barrier_entrain(rdp); 3874 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 3875 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3876 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence); 3877 continue; 3878 } 3879 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3880 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) { 3881 schedule_timeout_uninterruptible(1); 3882 goto retry; 3883 } 3884 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 3885 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence); 3886 } 3887 3888 /* 3889 * Now that we have an rcu_barrier_callback() callback on each 3890 * CPU, and thus each counted, remove the initial count. 3891 */ 3892 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 3893 complete(&rcu_state.barrier_completion); 3894 3895 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3896 wait_for_completion(&rcu_state.barrier_completion); 3897 3898 /* Mark the end of the barrier operation. */ 3899 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 3900 rcu_seq_end(&rcu_state.barrier_sequence); 3901 gseq = rcu_state.barrier_sequence; 3902 for_each_possible_cpu(cpu) { 3903 rdp = per_cpu_ptr(&rcu_data, cpu); 3904 3905 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 3906 } 3907 3908 /* Other rcu_barrier() invocations can now safely proceed. */ 3909 mutex_unlock(&rcu_state.barrier_mutex); 3910 } 3911 EXPORT_SYMBOL_GPL(rcu_barrier); 3912 3913 static unsigned long rcu_barrier_last_throttle; 3914 3915 /** 3916 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second 3917 * 3918 * This can be thought of as guard rails around rcu_barrier() that 3919 * permits unrestricted userspace use, at least assuming the hardware's 3920 * try_cmpxchg() is robust. There will be at most one call per second to 3921 * rcu_barrier() system-wide from use of this function, which means that 3922 * callers might needlessly wait a second or three. 3923 * 3924 * This is intended for use by test suites to avoid OOM by flushing RCU 3925 * callbacks from the previous test before starting the next. See the 3926 * rcutree.do_rcu_barrier module parameter for more information. 3927 * 3928 * Why not simply make rcu_barrier() more scalable? That might be 3929 * the eventual endpoint, but let's keep it simple for the time being. 3930 * Note that the module parameter infrastructure serializes calls to a 3931 * given .set() function, but should concurrent .set() invocation ever be 3932 * possible, we are ready! 3933 */ 3934 static void rcu_barrier_throttled(void) 3935 { 3936 unsigned long j = jiffies; 3937 unsigned long old = READ_ONCE(rcu_barrier_last_throttle); 3938 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3939 3940 while (time_in_range(j, old, old + HZ / 16) || 3941 !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) { 3942 schedule_timeout_idle(HZ / 16); 3943 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3944 smp_mb(); /* caller's subsequent code after above check. */ 3945 return; 3946 } 3947 j = jiffies; 3948 old = READ_ONCE(rcu_barrier_last_throttle); 3949 } 3950 rcu_barrier(); 3951 } 3952 3953 /* 3954 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier 3955 * request arrives. We insist on a true value to allow for possible 3956 * future expansion. 3957 */ 3958 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp) 3959 { 3960 bool b; 3961 int ret; 3962 3963 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) 3964 return -EAGAIN; 3965 ret = kstrtobool(val, &b); 3966 if (!ret && b) { 3967 atomic_inc((atomic_t *)kp->arg); 3968 rcu_barrier_throttled(); 3969 atomic_dec((atomic_t *)kp->arg); 3970 } 3971 return ret; 3972 } 3973 3974 /* 3975 * Output the number of outstanding rcutree.do_rcu_barrier requests. 3976 */ 3977 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp) 3978 { 3979 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg)); 3980 } 3981 3982 static const struct kernel_param_ops do_rcu_barrier_ops = { 3983 .set = param_set_do_rcu_barrier, 3984 .get = param_get_do_rcu_barrier, 3985 }; 3986 static atomic_t do_rcu_barrier; 3987 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644); 3988 3989 /* 3990 * Compute the mask of online CPUs for the specified rcu_node structure. 3991 * This will not be stable unless the rcu_node structure's ->lock is 3992 * held, but the bit corresponding to the current CPU will be stable 3993 * in most contexts. 3994 */ 3995 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 3996 { 3997 return READ_ONCE(rnp->qsmaskinitnext); 3998 } 3999 4000 /* 4001 * Is the CPU corresponding to the specified rcu_data structure online 4002 * from RCU's perspective? This perspective is given by that structure's 4003 * ->qsmaskinitnext field rather than by the global cpu_online_mask. 4004 */ 4005 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) 4006 { 4007 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); 4008 } 4009 4010 bool rcu_cpu_online(int cpu) 4011 { 4012 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4013 4014 return rcu_rdp_cpu_online(rdp); 4015 } 4016 4017 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 4018 4019 /* 4020 * Is the current CPU online as far as RCU is concerned? 4021 * 4022 * Disable preemption to avoid false positives that could otherwise 4023 * happen due to the current CPU number being sampled, this task being 4024 * preempted, its old CPU being taken offline, resuming on some other CPU, 4025 * then determining that its old CPU is now offline. 4026 * 4027 * Disable checking if in an NMI handler because we cannot safely 4028 * report errors from NMI handlers anyway. In addition, it is OK to use 4029 * RCU on an offline processor during initial boot, hence the check for 4030 * rcu_scheduler_fully_active. 4031 */ 4032 bool notrace rcu_lockdep_current_cpu_online(void) 4033 { 4034 struct rcu_data *rdp; 4035 bool ret = false; 4036 4037 if (in_nmi() || !rcu_scheduler_fully_active) 4038 return true; 4039 preempt_disable_notrace(); 4040 rdp = this_cpu_ptr(&rcu_data); 4041 /* 4042 * Strictly, we care here about the case where the current CPU is 4043 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask 4044 * not being up to date. So arch_spin_is_locked() might have a 4045 * false positive if it's held by some *other* CPU, but that's 4046 * OK because that just means a false *negative* on the warning. 4047 */ 4048 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) 4049 ret = true; 4050 preempt_enable_notrace(); 4051 return ret; 4052 } 4053 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 4054 4055 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 4056 4057 // Has rcu_init() been invoked? This is used (for example) to determine 4058 // whether spinlocks may be acquired safely. 4059 static bool rcu_init_invoked(void) 4060 { 4061 return !!READ_ONCE(rcu_state.n_online_cpus); 4062 } 4063 4064 /* 4065 * All CPUs for the specified rcu_node structure have gone offline, 4066 * and all tasks that were preempted within an RCU read-side critical 4067 * section while running on one of those CPUs have since exited their RCU 4068 * read-side critical section. Some other CPU is reporting this fact with 4069 * the specified rcu_node structure's ->lock held and interrupts disabled. 4070 * This function therefore goes up the tree of rcu_node structures, 4071 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 4072 * the leaf rcu_node structure's ->qsmaskinit field has already been 4073 * updated. 4074 * 4075 * This function does check that the specified rcu_node structure has 4076 * all CPUs offline and no blocked tasks, so it is OK to invoke it 4077 * prematurely. That said, invoking it after the fact will cost you 4078 * a needless lock acquisition. So once it has done its work, don't 4079 * invoke it again. 4080 */ 4081 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 4082 { 4083 long mask; 4084 struct rcu_node *rnp = rnp_leaf; 4085 4086 raw_lockdep_assert_held_rcu_node(rnp_leaf); 4087 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 4088 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 4089 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 4090 return; 4091 for (;;) { 4092 mask = rnp->grpmask; 4093 rnp = rnp->parent; 4094 if (!rnp) 4095 break; 4096 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4097 rnp->qsmaskinit &= ~mask; 4098 /* Between grace periods, so better already be zero! */ 4099 WARN_ON_ONCE(rnp->qsmask); 4100 if (rnp->qsmaskinit) { 4101 raw_spin_unlock_rcu_node(rnp); 4102 /* irqs remain disabled. */ 4103 return; 4104 } 4105 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4106 } 4107 } 4108 4109 /* 4110 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 4111 * first CPU in a given leaf rcu_node structure coming online. The caller 4112 * must hold the corresponding leaf rcu_node ->lock with interrupts 4113 * disabled. 4114 */ 4115 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 4116 { 4117 long mask; 4118 long oldmask; 4119 struct rcu_node *rnp = rnp_leaf; 4120 4121 raw_lockdep_assert_held_rcu_node(rnp_leaf); 4122 WARN_ON_ONCE(rnp->wait_blkd_tasks); 4123 for (;;) { 4124 mask = rnp->grpmask; 4125 rnp = rnp->parent; 4126 if (rnp == NULL) 4127 return; 4128 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4129 oldmask = rnp->qsmaskinit; 4130 rnp->qsmaskinit |= mask; 4131 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 4132 if (oldmask) 4133 return; 4134 } 4135 } 4136 4137 /* 4138 * Do boot-time initialization of a CPU's per-CPU RCU data. 4139 */ 4140 static void __init 4141 rcu_boot_init_percpu_data(int cpu) 4142 { 4143 struct context_tracking *ct = this_cpu_ptr(&context_tracking); 4144 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4145 4146 /* Set up local state, ensuring consistent view of global state. */ 4147 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 4148 INIT_WORK(&rdp->strict_work, strict_work_handler); 4149 WARN_ON_ONCE(ct->nesting != 1); 4150 WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu))); 4151 rdp->barrier_seq_snap = rcu_state.barrier_sequence; 4152 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 4153 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED; 4154 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 4155 rdp->rcu_onl_gp_state = RCU_GP_CLEANED; 4156 rdp->last_sched_clock = jiffies; 4157 rdp->cpu = cpu; 4158 rcu_boot_init_nocb_percpu_data(rdp); 4159 } 4160 4161 static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp) 4162 { 4163 cpumask_var_t affinity; 4164 int cpu; 4165 4166 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) 4167 return; 4168 4169 for_each_leaf_node_possible_cpu(rnp, cpu) 4170 cpumask_set_cpu(cpu, affinity); 4171 4172 kthread_affine_preferred(t, affinity); 4173 4174 free_cpumask_var(affinity); 4175 } 4176 4177 struct kthread_worker *rcu_exp_gp_kworker; 4178 4179 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) 4180 { 4181 struct kthread_worker *kworker; 4182 const char *name = "rcu_exp_par_gp_kthread_worker/%d"; 4183 struct sched_param param = { .sched_priority = kthread_prio }; 4184 int rnp_index = rnp - rcu_get_root(); 4185 4186 if (rnp->exp_kworker) 4187 return; 4188 4189 kworker = kthread_create_worker(0, name, rnp_index); 4190 if (IS_ERR_OR_NULL(kworker)) { 4191 pr_err("Failed to create par gp kworker on %d/%d\n", 4192 rnp->grplo, rnp->grphi); 4193 return; 4194 } 4195 WRITE_ONCE(rnp->exp_kworker, kworker); 4196 4197 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4198 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); 4199 4200 rcu_thread_affine_rnp(kworker->task, rnp); 4201 wake_up_process(kworker->task); 4202 } 4203 4204 static void __init rcu_start_exp_gp_kworker(void) 4205 { 4206 const char *name = "rcu_exp_gp_kthread_worker"; 4207 struct sched_param param = { .sched_priority = kthread_prio }; 4208 4209 rcu_exp_gp_kworker = kthread_run_worker(0, name); 4210 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { 4211 pr_err("Failed to create %s!\n", name); 4212 rcu_exp_gp_kworker = NULL; 4213 return; 4214 } 4215 4216 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4217 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); 4218 } 4219 4220 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp) 4221 { 4222 if (rcu_scheduler_fully_active) { 4223 mutex_lock(&rnp->kthread_mutex); 4224 rcu_spawn_one_boost_kthread(rnp); 4225 rcu_spawn_exp_par_gp_kworker(rnp); 4226 mutex_unlock(&rnp->kthread_mutex); 4227 } 4228 } 4229 4230 /* 4231 * Invoked early in the CPU-online process, when pretty much all services 4232 * are available. The incoming CPU is not present. 4233 * 4234 * Initializes a CPU's per-CPU RCU data. Note that only one online or 4235 * offline event can be happening at a given time. Note also that we can 4236 * accept some slop in the rsp->gp_seq access due to the fact that this 4237 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 4238 * And any offloaded callbacks are being numbered elsewhere. 4239 */ 4240 int rcutree_prepare_cpu(unsigned int cpu) 4241 { 4242 unsigned long flags; 4243 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); 4244 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4245 struct rcu_node *rnp = rcu_get_root(); 4246 4247 /* Set up local state, ensuring consistent view of global state. */ 4248 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4249 rdp->qlen_last_fqs_check = 0; 4250 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 4251 rdp->blimit = blimit; 4252 ct->nesting = 1; /* CPU not up, no tearing. */ 4253 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4254 4255 /* 4256 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be 4257 * (re-)initialized. 4258 */ 4259 if (!rcu_segcblist_is_enabled(&rdp->cblist)) 4260 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 4261 4262 /* 4263 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4264 * propagation up the rcu_node tree will happen at the beginning 4265 * of the next grace period. 4266 */ 4267 rnp = rdp->mynode; 4268 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4269 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 4270 rdp->gp_seq_needed = rdp->gp_seq; 4271 rdp->cpu_no_qs.b.norm = true; 4272 rdp->core_needs_qs = false; 4273 rdp->rcu_iw_pending = false; 4274 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); 4275 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4276 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4277 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4278 4279 rcu_preempt_deferred_qs_init(rdp); 4280 rcu_spawn_rnp_kthreads(rnp); 4281 rcu_spawn_cpu_nocb_kthread(cpu); 4282 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); 4283 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); 4284 4285 return 0; 4286 } 4287 4288 /* 4289 * Has the specified (known valid) CPU ever been fully online? 4290 */ 4291 bool rcu_cpu_beenfullyonline(int cpu) 4292 { 4293 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4294 4295 return smp_load_acquire(&rdp->beenonline); 4296 } 4297 4298 /* 4299 * Near the end of the CPU-online process. Pretty much all services 4300 * enabled, and the CPU is now very much alive. 4301 */ 4302 int rcutree_online_cpu(unsigned int cpu) 4303 { 4304 unsigned long flags; 4305 struct rcu_data *rdp; 4306 struct rcu_node *rnp; 4307 4308 rdp = per_cpu_ptr(&rcu_data, cpu); 4309 rnp = rdp->mynode; 4310 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4311 rnp->ffmask |= rdp->grpmask; 4312 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4313 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 4314 return 0; /* Too early in boot for scheduler work. */ 4315 4316 // Stop-machine done, so allow nohz_full to disable tick. 4317 tick_dep_clear(TICK_DEP_BIT_RCU); 4318 return 0; 4319 } 4320 4321 /* 4322 * Mark the specified CPU as being online so that subsequent grace periods 4323 * (both expedited and normal) will wait on it. Note that this means that 4324 * incoming CPUs are not allowed to use RCU read-side critical sections 4325 * until this function is called. Failing to observe this restriction 4326 * will result in lockdep splats. 4327 * 4328 * Note that this function is special in that it is invoked directly 4329 * from the incoming CPU rather than from the cpuhp_step mechanism. 4330 * This is because this function must be invoked at a precise location. 4331 * This incoming CPU must not have enabled interrupts yet. 4332 * 4333 * This mirrors the effects of rcutree_report_cpu_dead(). 4334 */ 4335 void rcutree_report_cpu_starting(unsigned int cpu) 4336 { 4337 unsigned long mask; 4338 struct rcu_data *rdp; 4339 struct rcu_node *rnp; 4340 bool newcpu; 4341 4342 lockdep_assert_irqs_disabled(); 4343 rdp = per_cpu_ptr(&rcu_data, cpu); 4344 if (rdp->cpu_started) 4345 return; 4346 rdp->cpu_started = true; 4347 4348 rnp = rdp->mynode; 4349 mask = rdp->grpmask; 4350 arch_spin_lock(&rcu_state.ofl_lock); 4351 rcu_watching_online(); 4352 raw_spin_lock(&rcu_state.barrier_lock); 4353 raw_spin_lock_rcu_node(rnp); 4354 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 4355 raw_spin_unlock(&rcu_state.barrier_lock); 4356 newcpu = !(rnp->expmaskinitnext & mask); 4357 rnp->expmaskinitnext |= mask; 4358 /* Allow lockless access for expedited grace periods. */ 4359 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ 4360 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 4361 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 4362 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4363 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state); 4364 4365 /* An incoming CPU should never be blocking a grace period. */ 4366 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ 4367 /* rcu_report_qs_rnp() *really* wants some flags to restore */ 4368 unsigned long flags; 4369 4370 local_irq_save(flags); 4371 rcu_disable_urgency_upon_qs(rdp); 4372 /* Report QS -after- changing ->qsmaskinitnext! */ 4373 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4374 } else { 4375 raw_spin_unlock_rcu_node(rnp); 4376 } 4377 arch_spin_unlock(&rcu_state.ofl_lock); 4378 smp_store_release(&rdp->beenonline, true); 4379 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 4380 } 4381 4382 /* 4383 * The outgoing function has no further need of RCU, so remove it from 4384 * the rcu_node tree's ->qsmaskinitnext bit masks. 4385 * 4386 * Note that this function is special in that it is invoked directly 4387 * from the outgoing CPU rather than from the cpuhp_step mechanism. 4388 * This is because this function must be invoked at a precise location. 4389 * 4390 * This mirrors the effect of rcutree_report_cpu_starting(). 4391 */ 4392 void rcutree_report_cpu_dead(void) 4393 { 4394 unsigned long flags; 4395 unsigned long mask; 4396 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 4397 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4398 4399 /* 4400 * IRQS must be disabled from now on and until the CPU dies, or an interrupt 4401 * may introduce a new READ-side while it is actually off the QS masks. 4402 */ 4403 lockdep_assert_irqs_disabled(); 4404 /* 4405 * CPUHP_AP_SMPCFD_DYING was the last call for rcu_exp_handler() execution. 4406 * The requested QS must have been reported on the last context switch 4407 * from stop machine to idle. 4408 */ 4409 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); 4410 // Do any dangling deferred wakeups. 4411 do_nocb_deferred_wakeup(rdp); 4412 4413 rcu_preempt_deferred_qs(current); 4414 4415 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4416 mask = rdp->grpmask; 4417 4418 /* 4419 * Hold the ofl_lock and rnp lock to avoid races between CPU going 4420 * offline and doing a QS report (as below), versus rcu_gp_init(). 4421 * See Requirements.rst > Hotplug CPU > Concurrent QS Reporting section 4422 * for more details. 4423 */ 4424 arch_spin_lock(&rcu_state.ofl_lock); 4425 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4426 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4427 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state); 4428 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 4429 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 4430 rcu_disable_urgency_upon_qs(rdp); 4431 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4432 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4433 } 4434 /* Clear from ->qsmaskinitnext to mark offline. */ 4435 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 4436 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4437 arch_spin_unlock(&rcu_state.ofl_lock); 4438 rdp->cpu_started = false; 4439 } 4440 4441 #ifdef CONFIG_HOTPLUG_CPU 4442 /* 4443 * The outgoing CPU has just passed through the dying-idle state, and we 4444 * are being invoked from the CPU that was IPIed to continue the offline 4445 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 4446 */ 4447 void rcutree_migrate_callbacks(int cpu) 4448 { 4449 unsigned long flags; 4450 struct rcu_data *my_rdp; 4451 struct rcu_node *my_rnp; 4452 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4453 bool needwake; 4454 4455 if (rcu_rdp_is_offloaded(rdp)) 4456 return; 4457 4458 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 4459 if (rcu_segcblist_empty(&rdp->cblist)) { 4460 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4461 return; /* No callbacks to migrate. */ 4462 } 4463 4464 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); 4465 rcu_barrier_entrain(rdp); 4466 my_rdp = this_cpu_ptr(&rcu_data); 4467 my_rnp = my_rdp->mynode; 4468 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 4469 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false)); 4470 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 4471 /* Leverage recent GPs and set GP for new callbacks. */ 4472 needwake = rcu_advance_cbs(my_rnp, rdp) || 4473 rcu_advance_cbs(my_rnp, my_rdp); 4474 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 4475 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */ 4476 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 4477 rcu_segcblist_disable(&rdp->cblist); 4478 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); 4479 check_cb_ovld_locked(my_rdp, my_rnp); 4480 if (rcu_rdp_is_offloaded(my_rdp)) { 4481 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4482 __call_rcu_nocb_wake(my_rdp, true, flags); 4483 } else { 4484 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 4485 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4486 } 4487 local_irq_restore(flags); 4488 if (needwake) 4489 rcu_gp_kthread_wake(); 4490 lockdep_assert_irqs_enabled(); 4491 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 4492 !rcu_segcblist_empty(&rdp->cblist), 4493 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 4494 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 4495 rcu_segcblist_first_cb(&rdp->cblist)); 4496 } 4497 4498 /* 4499 * The CPU has been completely removed, and some other CPU is reporting 4500 * this fact from process context. Do the remainder of the cleanup. 4501 * There can only be one CPU hotplug operation at a time, so no need for 4502 * explicit locking. 4503 */ 4504 int rcutree_dead_cpu(unsigned int cpu) 4505 { 4506 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); 4507 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); 4508 // Stop-machine done, so allow nohz_full to disable tick. 4509 tick_dep_clear(TICK_DEP_BIT_RCU); 4510 return 0; 4511 } 4512 4513 /* 4514 * Near the end of the offline process. Trace the fact that this CPU 4515 * is going offline. 4516 */ 4517 int rcutree_dying_cpu(unsigned int cpu) 4518 { 4519 bool blkd; 4520 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4521 struct rcu_node *rnp = rdp->mynode; 4522 4523 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); 4524 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 4525 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl")); 4526 return 0; 4527 } 4528 4529 /* 4530 * Near the beginning of the process. The CPU is still very much alive 4531 * with pretty much all services enabled. 4532 */ 4533 int rcutree_offline_cpu(unsigned int cpu) 4534 { 4535 unsigned long flags; 4536 struct rcu_data *rdp; 4537 struct rcu_node *rnp; 4538 4539 rdp = per_cpu_ptr(&rcu_data, cpu); 4540 rnp = rdp->mynode; 4541 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4542 rnp->ffmask &= ~rdp->grpmask; 4543 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4544 4545 // nohz_full CPUs need the tick for stop-machine to work quickly 4546 tick_dep_set(TICK_DEP_BIT_RCU); 4547 return 0; 4548 } 4549 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 4550 4551 /* 4552 * On non-huge systems, use expedited RCU grace periods to make suspend 4553 * and hibernation run faster. 4554 */ 4555 static int rcu_pm_notify(struct notifier_block *self, 4556 unsigned long action, void *hcpu) 4557 { 4558 switch (action) { 4559 case PM_HIBERNATION_PREPARE: 4560 case PM_SUSPEND_PREPARE: 4561 rcu_async_hurry(); 4562 rcu_expedite_gp(); 4563 break; 4564 case PM_POST_HIBERNATION: 4565 case PM_POST_SUSPEND: 4566 rcu_unexpedite_gp(); 4567 rcu_async_relax(); 4568 break; 4569 default: 4570 break; 4571 } 4572 return NOTIFY_OK; 4573 } 4574 4575 /* 4576 * Spawn the kthreads that handle RCU's grace periods. 4577 */ 4578 static int __init rcu_spawn_gp_kthread(void) 4579 { 4580 unsigned long flags; 4581 struct rcu_node *rnp; 4582 struct sched_param sp; 4583 struct task_struct *t; 4584 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 4585 4586 rcu_scheduler_fully_active = 1; 4587 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 4588 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 4589 return 0; 4590 if (kthread_prio) { 4591 sp.sched_priority = kthread_prio; 4592 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4593 } 4594 rnp = rcu_get_root(); 4595 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4596 WRITE_ONCE(rcu_state.gp_activity, jiffies); 4597 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 4598 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 4599 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 4600 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4601 wake_up_process(t); 4602 /* This is a pre-SMP initcall, we expect a single CPU */ 4603 WARN_ON(num_online_cpus() > 1); 4604 /* 4605 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() 4606 * due to rcu_scheduler_fully_active. 4607 */ 4608 rcu_spawn_cpu_nocb_kthread(smp_processor_id()); 4609 rcu_spawn_rnp_kthreads(rdp->mynode); 4610 rcu_spawn_core_kthreads(); 4611 /* Create kthread worker for expedited GPs */ 4612 rcu_start_exp_gp_kworker(); 4613 return 0; 4614 } 4615 early_initcall(rcu_spawn_gp_kthread); 4616 4617 /* 4618 * This function is invoked towards the end of the scheduler's 4619 * initialization process. Before this is called, the idle task might 4620 * contain synchronous grace-period primitives (during which time, this idle 4621 * task is booting the system, and such primitives are no-ops). After this 4622 * function is called, any synchronous grace-period primitives are run as 4623 * expedited, with the requesting task driving the grace period forward. 4624 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4625 * runtime RCU functionality. 4626 */ 4627 void rcu_scheduler_starting(void) 4628 { 4629 unsigned long flags; 4630 struct rcu_node *rnp; 4631 4632 WARN_ON(num_online_cpus() != 1); 4633 WARN_ON(nr_context_switches() > 0); 4634 rcu_test_sync_prims(); 4635 4636 // Fix up the ->gp_seq counters. 4637 local_irq_save(flags); 4638 rcu_for_each_node_breadth_first(rnp) 4639 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 4640 local_irq_restore(flags); 4641 4642 // Switch out of early boot mode. 4643 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4644 rcu_test_sync_prims(); 4645 } 4646 4647 /* 4648 * Helper function for rcu_init() that initializes the rcu_state structure. 4649 */ 4650 static void __init rcu_init_one(void) 4651 { 4652 static const char * const buf[] = RCU_NODE_NAME_INIT; 4653 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4654 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4655 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4656 4657 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4658 int cpustride = 1; 4659 int i; 4660 int j; 4661 struct rcu_node *rnp; 4662 4663 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4664 4665 /* Silence gcc 4.8 false positive about array index out of range. */ 4666 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4667 panic("rcu_init_one: rcu_num_lvls out of range"); 4668 4669 /* Initialize the level-tracking arrays. */ 4670 4671 for (i = 1; i < rcu_num_lvls; i++) 4672 rcu_state.level[i] = 4673 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 4674 rcu_init_levelspread(levelspread, num_rcu_lvl); 4675 4676 /* Initialize the elements themselves, starting from the leaves. */ 4677 4678 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4679 cpustride *= levelspread[i]; 4680 rnp = rcu_state.level[i]; 4681 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4682 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4683 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4684 &rcu_node_class[i], buf[i]); 4685 raw_spin_lock_init(&rnp->fqslock); 4686 lockdep_set_class_and_name(&rnp->fqslock, 4687 &rcu_fqs_class[i], fqs[i]); 4688 rnp->gp_seq = rcu_state.gp_seq; 4689 rnp->gp_seq_needed = rcu_state.gp_seq; 4690 rnp->completedqs = rcu_state.gp_seq; 4691 rnp->qsmask = 0; 4692 rnp->qsmaskinit = 0; 4693 rnp->grplo = j * cpustride; 4694 rnp->grphi = (j + 1) * cpustride - 1; 4695 if (rnp->grphi >= nr_cpu_ids) 4696 rnp->grphi = nr_cpu_ids - 1; 4697 if (i == 0) { 4698 rnp->grpnum = 0; 4699 rnp->grpmask = 0; 4700 rnp->parent = NULL; 4701 } else { 4702 rnp->grpnum = j % levelspread[i - 1]; 4703 rnp->grpmask = BIT(rnp->grpnum); 4704 rnp->parent = rcu_state.level[i - 1] + 4705 j / levelspread[i - 1]; 4706 } 4707 rnp->level = i; 4708 INIT_LIST_HEAD(&rnp->blkd_tasks); 4709 rcu_init_one_nocb(rnp); 4710 init_waitqueue_head(&rnp->exp_wq[0]); 4711 init_waitqueue_head(&rnp->exp_wq[1]); 4712 init_waitqueue_head(&rnp->exp_wq[2]); 4713 init_waitqueue_head(&rnp->exp_wq[3]); 4714 spin_lock_init(&rnp->exp_lock); 4715 mutex_init(&rnp->kthread_mutex); 4716 raw_spin_lock_init(&rnp->exp_poll_lock); 4717 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 4718 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); 4719 } 4720 } 4721 4722 init_swait_queue_head(&rcu_state.gp_wq); 4723 init_swait_queue_head(&rcu_state.expedited_wq); 4724 rnp = rcu_first_leaf_node(); 4725 for_each_possible_cpu(i) { 4726 while (i > rnp->grphi) 4727 rnp++; 4728 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 4729 per_cpu_ptr(&rcu_data, i)->barrier_head.next = 4730 &per_cpu_ptr(&rcu_data, i)->barrier_head; 4731 rcu_boot_init_percpu_data(i); 4732 } 4733 } 4734 4735 /* 4736 * Force priority from the kernel command-line into range. 4737 */ 4738 static void __init sanitize_kthread_prio(void) 4739 { 4740 int kthread_prio_in = kthread_prio; 4741 4742 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 4743 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 4744 kthread_prio = 2; 4745 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4746 kthread_prio = 1; 4747 else if (kthread_prio < 0) 4748 kthread_prio = 0; 4749 else if (kthread_prio > 99) 4750 kthread_prio = 99; 4751 4752 if (kthread_prio != kthread_prio_in) 4753 pr_alert("%s: Limited prio to %d from %d\n", 4754 __func__, kthread_prio, kthread_prio_in); 4755 } 4756 4757 /* 4758 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4759 * replace the definitions in tree.h because those are needed to size 4760 * the ->node array in the rcu_state structure. 4761 */ 4762 void rcu_init_geometry(void) 4763 { 4764 ulong d; 4765 int i; 4766 static unsigned long old_nr_cpu_ids; 4767 int rcu_capacity[RCU_NUM_LVLS]; 4768 static bool initialized; 4769 4770 if (initialized) { 4771 /* 4772 * Warn if setup_nr_cpu_ids() had not yet been invoked, 4773 * unless nr_cpus_ids == NR_CPUS, in which case who cares? 4774 */ 4775 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids); 4776 return; 4777 } 4778 4779 old_nr_cpu_ids = nr_cpu_ids; 4780 initialized = true; 4781 4782 /* 4783 * Initialize any unspecified boot parameters. 4784 * The default values of jiffies_till_first_fqs and 4785 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4786 * value, which is a function of HZ, then adding one for each 4787 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4788 */ 4789 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4790 if (jiffies_till_first_fqs == ULONG_MAX) 4791 jiffies_till_first_fqs = d; 4792 if (jiffies_till_next_fqs == ULONG_MAX) 4793 jiffies_till_next_fqs = d; 4794 adjust_jiffies_till_sched_qs(); 4795 4796 /* If the compile-time values are accurate, just leave. */ 4797 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4798 nr_cpu_ids == NR_CPUS) 4799 return; 4800 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4801 rcu_fanout_leaf, nr_cpu_ids); 4802 4803 /* 4804 * The boot-time rcu_fanout_leaf parameter must be at least two 4805 * and cannot exceed the number of bits in the rcu_node masks. 4806 * Complain and fall back to the compile-time values if this 4807 * limit is exceeded. 4808 */ 4809 if (rcu_fanout_leaf < 2 || rcu_fanout_leaf > BITS_PER_LONG) { 4810 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4811 WARN_ON(1); 4812 return; 4813 } 4814 4815 /* 4816 * Compute number of nodes that can be handled an rcu_node tree 4817 * with the given number of levels. 4818 */ 4819 rcu_capacity[0] = rcu_fanout_leaf; 4820 for (i = 1; i < RCU_NUM_LVLS; i++) 4821 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4822 4823 /* 4824 * The tree must be able to accommodate the configured number of CPUs. 4825 * If this limit is exceeded, fall back to the compile-time values. 4826 */ 4827 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4828 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4829 WARN_ON(1); 4830 return; 4831 } 4832 4833 /* Calculate the number of levels in the tree. */ 4834 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4835 } 4836 rcu_num_lvls = i + 1; 4837 4838 /* Calculate the number of rcu_nodes at each level of the tree. */ 4839 for (i = 0; i < rcu_num_lvls; i++) { 4840 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4841 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4842 } 4843 4844 /* Calculate the total number of rcu_node structures. */ 4845 rcu_num_nodes = 0; 4846 for (i = 0; i < rcu_num_lvls; i++) 4847 rcu_num_nodes += num_rcu_lvl[i]; 4848 } 4849 4850 /* 4851 * Dump out the structure of the rcu_node combining tree associated 4852 * with the rcu_state structure. 4853 */ 4854 static void __init rcu_dump_rcu_node_tree(void) 4855 { 4856 int level = 0; 4857 struct rcu_node *rnp; 4858 4859 pr_info("rcu_node tree layout dump\n"); 4860 pr_info(" "); 4861 rcu_for_each_node_breadth_first(rnp) { 4862 if (rnp->level != level) { 4863 pr_cont("\n"); 4864 pr_info(" "); 4865 level = rnp->level; 4866 } 4867 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4868 } 4869 pr_cont("\n"); 4870 } 4871 4872 struct workqueue_struct *rcu_gp_wq; 4873 4874 void __init rcu_init(void) 4875 { 4876 int cpu = smp_processor_id(); 4877 4878 rcu_early_boot_tests(); 4879 4880 rcu_bootup_announce(); 4881 sanitize_kthread_prio(); 4882 rcu_init_geometry(); 4883 rcu_init_one(); 4884 if (dump_tree) 4885 rcu_dump_rcu_node_tree(); 4886 if (use_softirq) 4887 open_softirq(RCU_SOFTIRQ, rcu_core_si); 4888 4889 /* 4890 * We don't need protection against CPU-hotplug here because 4891 * this is called early in boot, before either interrupts 4892 * or the scheduler are operational. 4893 */ 4894 pm_notifier(rcu_pm_notify, 0); 4895 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot. 4896 rcutree_prepare_cpu(cpu); 4897 rcutree_report_cpu_starting(cpu); 4898 rcutree_online_cpu(cpu); 4899 4900 /* Create workqueue for Tree SRCU and for expedited GPs. */ 4901 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM | WQ_PERCPU, 0); 4902 WARN_ON(!rcu_gp_wq); 4903 4904 sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 4905 WARN_ON(!sync_wq); 4906 4907 /* Respect if explicitly disabled via a boot parameter. */ 4908 if (rcu_normal_wake_from_gp < 0) { 4909 if (num_possible_cpus() <= WAKE_FROM_GP_CPU_THRESHOLD) 4910 rcu_normal_wake_from_gp = 1; 4911 } 4912 4913 /* Fill in default value for rcutree.qovld boot parameter. */ 4914 /* -After- the rcu_node ->lock fields are initialized! */ 4915 if (qovld < 0) 4916 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 4917 else 4918 qovld_calc = qovld; 4919 4920 // Kick-start in case any polled grace periods started early. 4921 (void)start_poll_synchronize_rcu_expedited(); 4922 4923 rcu_test_sync_prims(); 4924 4925 tasks_cblist_init_generic(); 4926 } 4927 4928 #include "tree_stall.h" 4929 #include "tree_exp.h" 4930 #include "tree_nocb.h" 4931 #include "tree_plugin.h" 4932