1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/kmemleak.h> 35 #include <linux/moduleparam.h> 36 #include <linux/panic.h> 37 #include <linux/panic_notifier.h> 38 #include <linux/percpu.h> 39 #include <linux/notifier.h> 40 #include <linux/cpu.h> 41 #include <linux/mutex.h> 42 #include <linux/time.h> 43 #include <linux/kernel_stat.h> 44 #include <linux/wait.h> 45 #include <linux/kthread.h> 46 #include <uapi/linux/sched/types.h> 47 #include <linux/prefetch.h> 48 #include <linux/delay.h> 49 #include <linux/random.h> 50 #include <linux/trace_events.h> 51 #include <linux/suspend.h> 52 #include <linux/ftrace.h> 53 #include <linux/tick.h> 54 #include <linux/sysrq.h> 55 #include <linux/kprobes.h> 56 #include <linux/gfp.h> 57 #include <linux/oom.h> 58 #include <linux/smpboot.h> 59 #include <linux/jiffies.h> 60 #include <linux/slab.h> 61 #include <linux/sched/isolation.h> 62 #include <linux/sched/clock.h> 63 #include <linux/vmalloc.h> 64 #include <linux/mm.h> 65 #include <linux/kasan.h> 66 #include <linux/context_tracking.h> 67 #include "../time/tick-internal.h" 68 69 #include "tree.h" 70 #include "rcu.h" 71 72 #ifdef MODULE_PARAM_PREFIX 73 #undef MODULE_PARAM_PREFIX 74 #endif 75 #define MODULE_PARAM_PREFIX "rcutree." 76 77 /* Data structures. */ 78 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *); 79 80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 81 .gpwrap = true, 82 }; 83 84 int rcu_get_gpwrap_count(int cpu) 85 { 86 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 87 88 return READ_ONCE(rdp->gpwrap_count); 89 } 90 EXPORT_SYMBOL_GPL(rcu_get_gpwrap_count); 91 92 static struct rcu_state rcu_state = { 93 .level = { &rcu_state.node[0] }, 94 .gp_state = RCU_GP_IDLE, 95 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 96 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 97 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock), 98 .name = RCU_NAME, 99 .abbr = RCU_ABBR, 100 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 101 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 102 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED, 103 .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work, 104 rcu_sr_normal_gp_cleanup_work), 105 .srs_cleanups_pending = ATOMIC_INIT(0), 106 #ifdef CONFIG_RCU_NOCB_CPU 107 .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex), 108 #endif 109 }; 110 111 /* Dump rcu_node combining tree at boot to verify correct setup. */ 112 static bool dump_tree; 113 module_param(dump_tree, bool, 0444); 114 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 115 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT); 116 #ifndef CONFIG_PREEMPT_RT 117 module_param(use_softirq, bool, 0444); 118 #endif 119 /* Control rcu_node-tree auto-balancing at boot time. */ 120 static bool rcu_fanout_exact; 121 module_param(rcu_fanout_exact, bool, 0444); 122 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 123 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 124 module_param(rcu_fanout_leaf, int, 0444); 125 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 126 /* Number of rcu_nodes at specified level. */ 127 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 128 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 129 130 /* 131 * The rcu_scheduler_active variable is initialized to the value 132 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 133 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 134 * RCU can assume that there is but one task, allowing RCU to (for example) 135 * optimize synchronize_rcu() to a simple barrier(). When this variable 136 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 137 * to detect real grace periods. This variable is also used to suppress 138 * boot-time false positives from lockdep-RCU error checking. Finally, it 139 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 140 * is fully initialized, including all of its kthreads having been spawned. 141 */ 142 int rcu_scheduler_active __read_mostly; 143 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 144 145 /* 146 * The rcu_scheduler_fully_active variable transitions from zero to one 147 * during the early_initcall() processing, which is after the scheduler 148 * is capable of creating new tasks. So RCU processing (for example, 149 * creating tasks for RCU priority boosting) must be delayed until after 150 * rcu_scheduler_fully_active transitions from zero to one. We also 151 * currently delay invocation of any RCU callbacks until after this point. 152 * 153 * It might later prove better for people registering RCU callbacks during 154 * early boot to take responsibility for these callbacks, but one step at 155 * a time. 156 */ 157 static int rcu_scheduler_fully_active __read_mostly; 158 159 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 160 unsigned long gps, unsigned long flags); 161 static void invoke_rcu_core(void); 162 static void rcu_report_exp_rdp(struct rcu_data *rdp); 163 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 164 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp); 165 static bool rcu_rdp_cpu_online(struct rcu_data *rdp); 166 static bool rcu_init_invoked(void); 167 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 168 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 169 170 /* 171 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop" 172 * real-time priority(enabling/disabling) is controlled by 173 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration. 174 */ 175 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 176 module_param(kthread_prio, int, 0444); 177 178 /* Delay in jiffies for grace-period initialization delays, debug only. */ 179 180 static int gp_preinit_delay; 181 module_param(gp_preinit_delay, int, 0444); 182 static int gp_init_delay; 183 module_param(gp_init_delay, int, 0444); 184 static int gp_cleanup_delay; 185 module_param(gp_cleanup_delay, int, 0444); 186 static int nohz_full_patience_delay; 187 module_param(nohz_full_patience_delay, int, 0444); 188 static int nohz_full_patience_delay_jiffies; 189 190 // Add delay to rcu_read_unlock() for strict grace periods. 191 static int rcu_unlock_delay; 192 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD 193 module_param(rcu_unlock_delay, int, 0444); 194 #endif 195 196 /* Retrieve RCU kthreads priority for rcutorture */ 197 int rcu_get_gp_kthreads_prio(void) 198 { 199 return kthread_prio; 200 } 201 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 202 203 /* 204 * Number of grace periods between delays, normalized by the duration of 205 * the delay. The longer the delay, the more the grace periods between 206 * each delay. The reason for this normalization is that it means that, 207 * for non-zero delays, the overall slowdown of grace periods is constant 208 * regardless of the duration of the delay. This arrangement balances 209 * the need for long delays to increase some race probabilities with the 210 * need for fast grace periods to increase other race probabilities. 211 */ 212 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */ 213 214 /* 215 * Return true if an RCU grace period is in progress. The READ_ONCE()s 216 * permit this function to be invoked without holding the root rcu_node 217 * structure's ->lock, but of course results can be subject to change. 218 */ 219 static int rcu_gp_in_progress(void) 220 { 221 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 222 } 223 224 /* 225 * Return the number of callbacks queued on the specified CPU. 226 * Handles both the nocbs and normal cases. 227 */ 228 static long rcu_get_n_cbs_cpu(int cpu) 229 { 230 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 231 232 if (rcu_segcblist_is_enabled(&rdp->cblist)) 233 return rcu_segcblist_n_cbs(&rdp->cblist); 234 return 0; 235 } 236 237 /** 238 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing 239 * 240 * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU. 241 * This is a special-purpose function to be used in the softirq 242 * infrastructure and perhaps the occasional long-running softirq 243 * handler. 244 * 245 * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is 246 * equivalent to momentarily completely enabling preemption. For 247 * example, given this code:: 248 * 249 * local_bh_disable(); 250 * do_something(); 251 * rcu_softirq_qs(); // A 252 * do_something_else(); 253 * local_bh_enable(); // B 254 * 255 * A call to synchronize_rcu() that began concurrently with the 256 * call to do_something() would be guaranteed to wait only until 257 * execution reached statement A. Without that rcu_softirq_qs(), 258 * that same synchronize_rcu() would instead be guaranteed to wait 259 * until execution reached statement B. 260 */ 261 void rcu_softirq_qs(void) 262 { 263 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 264 lock_is_held(&rcu_lock_map) || 265 lock_is_held(&rcu_sched_lock_map), 266 "Illegal rcu_softirq_qs() in RCU read-side critical section"); 267 rcu_qs(); 268 rcu_preempt_deferred_qs(current); 269 rcu_tasks_qs(current, false); 270 } 271 272 /* 273 * Reset the current CPU's RCU_WATCHING counter to indicate that the 274 * newly onlined CPU is no longer in an extended quiescent state. 275 * This will either leave the counter unchanged, or increment it 276 * to the next non-quiescent value. 277 * 278 * The non-atomic test/increment sequence works because the upper bits 279 * of the ->state variable are manipulated only by the corresponding CPU, 280 * or when the corresponding CPU is offline. 281 */ 282 static void rcu_watching_online(void) 283 { 284 if (ct_rcu_watching() & CT_RCU_WATCHING) 285 return; 286 ct_state_inc(CT_RCU_WATCHING); 287 } 288 289 /* 290 * Return true if the snapshot returned from ct_rcu_watching() 291 * indicates that RCU is in an extended quiescent state. 292 */ 293 static bool rcu_watching_snap_in_eqs(int snap) 294 { 295 return !(snap & CT_RCU_WATCHING); 296 } 297 298 /** 299 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU 300 * since the specified @snap? 301 * 302 * @rdp: The rcu_data corresponding to the CPU for which to check EQS. 303 * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS. 304 * 305 * Returns true if the CPU corresponding to @rdp has spent some time in an 306 * extended quiescent state since @snap. Note that this doesn't check if it 307 * /still/ is in an EQS, just that it went through one since @snap. 308 * 309 * This is meant to be used in a loop waiting for a CPU to go through an EQS. 310 */ 311 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap) 312 { 313 /* 314 * The first failing snapshot is already ordered against the accesses 315 * performed by the remote CPU after it exits idle. 316 * 317 * The second snapshot therefore only needs to order against accesses 318 * performed by the remote CPU prior to entering idle and therefore can 319 * rely solely on acquire semantics. 320 */ 321 if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap))) 322 return true; 323 324 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu); 325 } 326 327 /* 328 * Return true if the referenced integer is zero while the specified 329 * CPU remains within a single extended quiescent state. 330 */ 331 bool rcu_watching_zero_in_eqs(int cpu, int *vp) 332 { 333 int snap; 334 335 // If not quiescent, force back to earlier extended quiescent state. 336 snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING; 337 smp_rmb(); // Order CT state and *vp reads. 338 if (READ_ONCE(*vp)) 339 return false; // Non-zero, so report failure; 340 smp_rmb(); // Order *vp read and CT state re-read. 341 342 // If still in the same extended quiescent state, we are good! 343 return snap == ct_rcu_watching_cpu(cpu); 344 } 345 346 /* 347 * Let the RCU core know that this CPU has gone through the scheduler, 348 * which is a quiescent state. This is called when the need for a 349 * quiescent state is urgent, so we burn an atomic operation and full 350 * memory barriers to let the RCU core know about it, regardless of what 351 * this CPU might (or might not) do in the near future. 352 * 353 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 354 * 355 * The caller must have disabled interrupts and must not be idle. 356 */ 357 notrace void rcu_momentary_eqs(void) 358 { 359 int seq; 360 361 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 362 seq = ct_state_inc(2 * CT_RCU_WATCHING); 363 /* It is illegal to call this from idle state. */ 364 WARN_ON_ONCE(!(seq & CT_RCU_WATCHING)); 365 rcu_preempt_deferred_qs(current); 366 } 367 EXPORT_SYMBOL_GPL(rcu_momentary_eqs); 368 369 /** 370 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 371 * 372 * If the current CPU is idle and running at a first-level (not nested) 373 * interrupt, or directly, from idle, return true. 374 * 375 * The caller must have at least disabled IRQs. 376 */ 377 static int rcu_is_cpu_rrupt_from_idle(void) 378 { 379 long nmi_nesting = ct_nmi_nesting(); 380 381 /* 382 * Usually called from the tick; but also used from smp_function_call() 383 * for expedited grace periods. This latter can result in running from 384 * the idle task, instead of an actual IPI. 385 */ 386 lockdep_assert_irqs_disabled(); 387 388 /* Check for counter underflows */ 389 RCU_LOCKDEP_WARN(ct_nesting() < 0, 390 "RCU nesting counter underflow!"); 391 392 /* Non-idle interrupt or nested idle interrupt */ 393 if (nmi_nesting > 1) 394 return false; 395 396 /* 397 * Non nested idle interrupt (interrupting section where RCU 398 * wasn't watching). 399 */ 400 if (nmi_nesting == 1) 401 return true; 402 403 /* Not in an interrupt */ 404 if (!nmi_nesting) { 405 RCU_LOCKDEP_WARN(!in_task() || !is_idle_task(current), 406 "RCU nmi_nesting counter not in idle task!"); 407 return !rcu_is_watching_curr_cpu(); 408 } 409 410 RCU_LOCKDEP_WARN(1, "RCU nmi_nesting counter underflow/zero!"); 411 412 return false; 413 } 414 415 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10) 416 // Maximum callbacks per rcu_do_batch ... 417 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood. 418 static long blimit = DEFAULT_RCU_BLIMIT; 419 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit. 420 static long qhimark = DEFAULT_RCU_QHIMARK; 421 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit. 422 static long qlowmark = DEFAULT_RCU_QLOMARK; 423 #define DEFAULT_RCU_QOVLD_MULT 2 424 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 425 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS. 426 static long qovld_calc = -1; // No pre-initialization lock acquisitions! 427 428 module_param(blimit, long, 0444); 429 module_param(qhimark, long, 0444); 430 module_param(qlowmark, long, 0444); 431 module_param(qovld, long, 0444); 432 433 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX; 434 static ulong jiffies_till_next_fqs = ULONG_MAX; 435 static bool rcu_kick_kthreads; 436 static int rcu_divisor = 7; 437 module_param(rcu_divisor, int, 0644); 438 439 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 440 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 441 module_param(rcu_resched_ns, long, 0644); 442 443 /* 444 * How long the grace period must be before we start recruiting 445 * quiescent-state help from rcu_note_context_switch(). 446 */ 447 static ulong jiffies_till_sched_qs = ULONG_MAX; 448 module_param(jiffies_till_sched_qs, ulong, 0444); 449 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 450 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 451 452 /* 453 * Make sure that we give the grace-period kthread time to detect any 454 * idle CPUs before taking active measures to force quiescent states. 455 * However, don't go below 100 milliseconds, adjusted upwards for really 456 * large systems. 457 */ 458 static void adjust_jiffies_till_sched_qs(void) 459 { 460 unsigned long j; 461 462 /* If jiffies_till_sched_qs was specified, respect the request. */ 463 if (jiffies_till_sched_qs != ULONG_MAX) { 464 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 465 return; 466 } 467 /* Otherwise, set to third fqs scan, but bound below on large system. */ 468 j = READ_ONCE(jiffies_till_first_fqs) + 469 2 * READ_ONCE(jiffies_till_next_fqs); 470 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 471 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 472 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 473 WRITE_ONCE(jiffies_to_sched_qs, j); 474 } 475 476 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 477 { 478 ulong j; 479 int ret = kstrtoul(val, 0, &j); 480 481 if (!ret) { 482 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 483 adjust_jiffies_till_sched_qs(); 484 } 485 return ret; 486 } 487 488 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 489 { 490 ulong j; 491 int ret = kstrtoul(val, 0, &j); 492 493 if (!ret) { 494 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 495 adjust_jiffies_till_sched_qs(); 496 } 497 return ret; 498 } 499 500 static const struct kernel_param_ops first_fqs_jiffies_ops = { 501 .set = param_set_first_fqs_jiffies, 502 .get = param_get_ulong, 503 }; 504 505 static const struct kernel_param_ops next_fqs_jiffies_ops = { 506 .set = param_set_next_fqs_jiffies, 507 .get = param_get_ulong, 508 }; 509 510 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 511 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 512 module_param(rcu_kick_kthreads, bool, 0644); 513 514 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 515 static int rcu_pending(int user); 516 517 /* 518 * Return the number of RCU GPs completed thus far for debug & stats. 519 */ 520 unsigned long rcu_get_gp_seq(void) 521 { 522 return READ_ONCE(rcu_state.gp_seq); 523 } 524 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 525 526 /* 527 * Return the number of RCU expedited batches completed thus far for 528 * debug & stats. Odd numbers mean that a batch is in progress, even 529 * numbers mean idle. The value returned will thus be roughly double 530 * the cumulative batches since boot. 531 */ 532 unsigned long rcu_exp_batches_completed(void) 533 { 534 return rcu_state.expedited_sequence; 535 } 536 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 537 538 /* 539 * Return the root node of the rcu_state structure. 540 */ 541 static struct rcu_node *rcu_get_root(void) 542 { 543 return &rcu_state.node[0]; 544 } 545 546 /* 547 * Send along grace-period-related data for rcutorture diagnostics. 548 */ 549 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq) 550 { 551 *flags = READ_ONCE(rcu_state.gp_flags); 552 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 553 } 554 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 555 556 /* Gather grace-period sequence numbers for rcutorture diagnostics. */ 557 unsigned long long rcutorture_gather_gp_seqs(void) 558 { 559 return ((READ_ONCE(rcu_state.gp_seq) & 0xffffULL) << 40) | 560 ((READ_ONCE(rcu_state.expedited_sequence) & 0xffffffULL) << 16) | 561 (READ_ONCE(rcu_state.gp_seq_polled) & 0xffffULL); 562 } 563 EXPORT_SYMBOL_GPL(rcutorture_gather_gp_seqs); 564 565 /* Format grace-period sequence numbers for rcutorture diagnostics. */ 566 void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len) 567 { 568 unsigned int egp = (seqs >> 16) & 0xffffffULL; 569 unsigned int ggp = (seqs >> 40) & 0xffffULL; 570 unsigned int pgp = seqs & 0xffffULL; 571 572 snprintf(cp, len, "g%04x:e%06x:p%04x", ggp, egp, pgp); 573 } 574 EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs); 575 576 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK)) 577 /* 578 * An empty function that will trigger a reschedule on 579 * IRQ tail once IRQs get re-enabled on userspace/guest resume. 580 */ 581 static void late_wakeup_func(struct irq_work *work) 582 { 583 } 584 585 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) = 586 IRQ_WORK_INIT(late_wakeup_func); 587 588 /* 589 * If either: 590 * 591 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work 592 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry. 593 * 594 * In these cases the late RCU wake ups aren't supported in the resched loops and our 595 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs 596 * get re-enabled again. 597 */ 598 noinstr void rcu_irq_work_resched(void) 599 { 600 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 601 602 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) 603 return; 604 605 if (IS_ENABLED(CONFIG_VIRT_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) 606 return; 607 608 instrumentation_begin(); 609 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { 610 irq_work_queue(this_cpu_ptr(&late_wakeup_work)); 611 } 612 instrumentation_end(); 613 } 614 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK)) */ 615 616 #ifdef CONFIG_PROVE_RCU 617 /** 618 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 619 */ 620 void rcu_irq_exit_check_preempt(void) 621 { 622 lockdep_assert_irqs_disabled(); 623 624 RCU_LOCKDEP_WARN(ct_nesting() <= 0, 625 "RCU nesting counter underflow/zero!"); 626 RCU_LOCKDEP_WARN(ct_nmi_nesting() != 627 CT_NESTING_IRQ_NONIDLE, 628 "Bad RCU nmi_nesting counter\n"); 629 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(), 630 "RCU in extended quiescent state!"); 631 } 632 #endif /* #ifdef CONFIG_PROVE_RCU */ 633 634 #ifdef CONFIG_NO_HZ_FULL 635 /** 636 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 637 * 638 * The scheduler tick is not normally enabled when CPUs enter the kernel 639 * from nohz_full userspace execution. After all, nohz_full userspace 640 * execution is an RCU quiescent state and the time executing in the kernel 641 * is quite short. Except of course when it isn't. And it is not hard to 642 * cause a large system to spend tens of seconds or even minutes looping 643 * in the kernel, which can cause a number of problems, include RCU CPU 644 * stall warnings. 645 * 646 * Therefore, if a nohz_full CPU fails to report a quiescent state 647 * in a timely manner, the RCU grace-period kthread sets that CPU's 648 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 649 * exception will invoke this function, which will turn on the scheduler 650 * tick, which will enable RCU to detect that CPU's quiescent states, 651 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 652 * The tick will be disabled once a quiescent state is reported for 653 * this CPU. 654 * 655 * Of course, in carefully tuned systems, there might never be an 656 * interrupt or exception. In that case, the RCU grace-period kthread 657 * will eventually cause one to happen. However, in less carefully 658 * controlled environments, this function allows RCU to get what it 659 * needs without creating otherwise useless interruptions. 660 */ 661 void __rcu_irq_enter_check_tick(void) 662 { 663 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 664 665 // If we're here from NMI there's nothing to do. 666 if (in_nmi()) 667 return; 668 669 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(), 670 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 671 672 if (!tick_nohz_full_cpu(rdp->cpu) || 673 !READ_ONCE(rdp->rcu_urgent_qs) || 674 READ_ONCE(rdp->rcu_forced_tick)) { 675 // RCU doesn't need nohz_full help from this CPU, or it is 676 // already getting that help. 677 return; 678 } 679 680 // We get here only when not in an extended quiescent state and 681 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 682 // already watching and (2) The fact that we are in an interrupt 683 // handler and that the rcu_node lock is an irq-disabled lock 684 // prevents self-deadlock. So we can safely recheck under the lock. 685 // Note that the nohz_full state currently cannot change. 686 raw_spin_lock_rcu_node(rdp->mynode); 687 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { 688 // A nohz_full CPU is in the kernel and RCU needs a 689 // quiescent state. Turn on the tick! 690 WRITE_ONCE(rdp->rcu_forced_tick, true); 691 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 692 } 693 raw_spin_unlock_rcu_node(rdp->mynode); 694 } 695 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick); 696 #endif /* CONFIG_NO_HZ_FULL */ 697 698 /* 699 * Check to see if any future non-offloaded RCU-related work will need 700 * to be done by the current CPU, even if none need be done immediately, 701 * returning 1 if so. This function is part of the RCU implementation; 702 * it is -not- an exported member of the RCU API. This is used by 703 * the idle-entry code to figure out whether it is safe to disable the 704 * scheduler-clock interrupt. 705 * 706 * Just check whether or not this CPU has non-offloaded RCU callbacks 707 * queued. 708 */ 709 int rcu_needs_cpu(void) 710 { 711 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && 712 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); 713 } 714 715 /* 716 * If any sort of urgency was applied to the current CPU (for example, 717 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 718 * to get to a quiescent state, disable it. 719 */ 720 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 721 { 722 raw_lockdep_assert_held_rcu_node(rdp->mynode); 723 WRITE_ONCE(rdp->rcu_urgent_qs, false); 724 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 725 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 726 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 727 WRITE_ONCE(rdp->rcu_forced_tick, false); 728 } 729 } 730 731 /** 732 * rcu_is_watching - RCU read-side critical sections permitted on current CPU? 733 * 734 * Return @true if RCU is watching the running CPU and @false otherwise. 735 * An @true return means that this CPU can safely enter RCU read-side 736 * critical sections. 737 * 738 * Although calls to rcu_is_watching() from most parts of the kernel 739 * will return @true, there are important exceptions. For example, if the 740 * current CPU is deep within its idle loop, in kernel entry/exit code, 741 * or offline, rcu_is_watching() will return @false. 742 * 743 * Make notrace because it can be called by the internal functions of 744 * ftrace, and making this notrace removes unnecessary recursion calls. 745 */ 746 notrace bool rcu_is_watching(void) 747 { 748 bool ret; 749 750 preempt_disable_notrace(); 751 ret = rcu_is_watching_curr_cpu(); 752 preempt_enable_notrace(); 753 return ret; 754 } 755 EXPORT_SYMBOL_GPL(rcu_is_watching); 756 757 /* 758 * If a holdout task is actually running, request an urgent quiescent 759 * state from its CPU. This is unsynchronized, so migrations can cause 760 * the request to go to the wrong CPU. Which is OK, all that will happen 761 * is that the CPU's next context switch will be a bit slower and next 762 * time around this task will generate another request. 763 */ 764 void rcu_request_urgent_qs_task(struct task_struct *t) 765 { 766 int cpu; 767 768 barrier(); 769 cpu = task_cpu(t); 770 if (!task_curr(t)) 771 return; /* This task is not running on that CPU. */ 772 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 773 } 774 775 static unsigned long seq_gpwrap_lag = ULONG_MAX / 4; 776 777 /** 778 * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value. 779 * @lag_gps: Set overflow lag to this many grace period worth of counters 780 * which is used by rcutorture to quickly force a gpwrap situation. 781 * @lag_gps = 0 means we reset it back to the boot-time value. 782 */ 783 void rcu_set_gpwrap_lag(unsigned long lag_gps) 784 { 785 unsigned long lag_seq_count; 786 787 lag_seq_count = (lag_gps == 0) 788 ? ULONG_MAX / 4 789 : lag_gps << RCU_SEQ_CTR_SHIFT; 790 WRITE_ONCE(seq_gpwrap_lag, lag_seq_count); 791 } 792 EXPORT_SYMBOL_GPL(rcu_set_gpwrap_lag); 793 794 /* 795 * When trying to report a quiescent state on behalf of some other CPU, 796 * it is our responsibility to check for and handle potential overflow 797 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 798 * After all, the CPU might be in deep idle state, and thus executing no 799 * code whatsoever. 800 */ 801 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 802 { 803 raw_lockdep_assert_held_rcu_node(rnp); 804 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag, 805 rnp->gp_seq)) { 806 WRITE_ONCE(rdp->gpwrap, true); 807 WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1); 808 } 809 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 810 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 811 } 812 813 /* 814 * Snapshot the specified CPU's RCU_WATCHING counter so that we can later 815 * credit them with an implicit quiescent state. Return 1 if this CPU 816 * is in dynticks idle mode, which is an extended quiescent state. 817 */ 818 static int rcu_watching_snap_save(struct rcu_data *rdp) 819 { 820 /* 821 * Full ordering between remote CPU's post idle accesses and updater's 822 * accesses prior to current GP (and also the started GP sequence number) 823 * is enforced by rcu_seq_start() implicit barrier and even further by 824 * smp_mb__after_unlock_lock() barriers chained all the way throughout the 825 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp 826 * locking. 827 * 828 * Ordering between remote CPU's pre idle accesses and post grace period 829 * updater's accesses is enforced by the below acquire semantic. 830 */ 831 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); 832 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) { 833 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 834 rcu_gpnum_ovf(rdp->mynode, rdp); 835 return 1; 836 } 837 return 0; 838 } 839 840 #ifndef arch_irq_stat_cpu 841 #define arch_irq_stat_cpu(cpu) 0 842 #endif 843 844 /* 845 * Returns positive if the specified CPU has passed through a quiescent state 846 * by virtue of being in or having passed through an dynticks idle state since 847 * the last call to rcu_watching_snap_save() for this same CPU, or by 848 * virtue of having been offline. 849 * 850 * Returns negative if the specified CPU needs a force resched. 851 * 852 * Returns zero otherwise. 853 */ 854 static int rcu_watching_snap_recheck(struct rcu_data *rdp) 855 { 856 unsigned long jtsq; 857 int ret = 0; 858 struct rcu_node *rnp = rdp->mynode; 859 860 /* 861 * If the CPU passed through or entered a dynticks idle phase with 862 * no active irq/NMI handlers, then we can safely pretend that the CPU 863 * already acknowledged the request to pass through a quiescent 864 * state. Either way, that CPU cannot possibly be in an RCU 865 * read-side critical section that started before the beginning 866 * of the current RCU grace period. 867 */ 868 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) { 869 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 870 rcu_gpnum_ovf(rnp, rdp); 871 return 1; 872 } 873 874 /* 875 * Complain if a CPU that is considered to be offline from RCU's 876 * perspective has not yet reported a quiescent state. After all, 877 * the offline CPU should have reported a quiescent state during 878 * the CPU-offline process, or, failing that, by rcu_gp_init() 879 * if it ran concurrently with either the CPU going offline or the 880 * last task on a leaf rcu_node structure exiting its RCU read-side 881 * critical section while all CPUs corresponding to that structure 882 * are offline. This added warning detects bugs in any of these 883 * code paths. 884 * 885 * The rcu_node structure's ->lock is held here, which excludes 886 * the relevant portions the CPU-hotplug code, the grace-period 887 * initialization code, and the rcu_read_unlock() code paths. 888 * 889 * For more detail, please refer to the "Hotplug CPU" section 890 * of RCU's Requirements documentation. 891 */ 892 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { 893 struct rcu_node *rnp1; 894 895 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 896 __func__, rnp->grplo, rnp->grphi, rnp->level, 897 (long)rnp->gp_seq, (long)rnp->completedqs); 898 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 899 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 900 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 901 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 902 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], 903 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, 904 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); 905 return 1; /* Break things loose after complaining. */ 906 } 907 908 /* 909 * A CPU running for an extended time within the kernel can 910 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 911 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 912 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 913 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 914 * variable are safe because the assignments are repeated if this 915 * CPU failed to pass through a quiescent state. This code 916 * also checks .jiffies_resched in case jiffies_to_sched_qs 917 * is set way high. 918 */ 919 jtsq = READ_ONCE(jiffies_to_sched_qs); 920 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && 921 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 922 time_after(jiffies, rcu_state.jiffies_resched) || 923 rcu_state.cbovld)) { 924 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); 925 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 926 smp_store_release(&rdp->rcu_urgent_qs, true); 927 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 928 WRITE_ONCE(rdp->rcu_urgent_qs, true); 929 } 930 931 /* 932 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 933 * The above code handles this, but only for straight cond_resched(). 934 * And some in-kernel loops check need_resched() before calling 935 * cond_resched(), which defeats the above code for CPUs that are 936 * running in-kernel with scheduling-clock interrupts disabled. 937 * So hit them over the head with the resched_cpu() hammer! 938 */ 939 if (tick_nohz_full_cpu(rdp->cpu) && 940 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 941 rcu_state.cbovld)) { 942 WRITE_ONCE(rdp->rcu_urgent_qs, true); 943 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 944 ret = -1; 945 } 946 947 /* 948 * If more than halfway to RCU CPU stall-warning time, invoke 949 * resched_cpu() more frequently to try to loosen things up a bit. 950 * Also check to see if the CPU is getting hammered with interrupts, 951 * but only once per grace period, just to keep the IPIs down to 952 * a dull roar. 953 */ 954 if (time_after(jiffies, rcu_state.jiffies_resched)) { 955 if (time_after(jiffies, 956 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 957 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 958 ret = -1; 959 } 960 if (IS_ENABLED(CONFIG_IRQ_WORK) && 961 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 962 (rnp->ffmask & rdp->grpmask)) { 963 rdp->rcu_iw_pending = true; 964 rdp->rcu_iw_gp_seq = rnp->gp_seq; 965 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 966 } 967 968 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { 969 int cpu = rdp->cpu; 970 struct rcu_snap_record *rsrp; 971 struct kernel_cpustat *kcsp; 972 973 kcsp = &kcpustat_cpu(cpu); 974 975 rsrp = &rdp->snap_record; 976 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); 977 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); 978 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); 979 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu); 980 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu); 981 rsrp->nr_csw = nr_context_switches_cpu(cpu); 982 rsrp->jiffies = jiffies; 983 rsrp->gp_seq = rdp->gp_seq; 984 } 985 } 986 987 return ret; 988 } 989 990 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 991 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 992 unsigned long gp_seq_req, const char *s) 993 { 994 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 995 gp_seq_req, rnp->level, 996 rnp->grplo, rnp->grphi, s); 997 } 998 999 /* 1000 * rcu_start_this_gp - Request the start of a particular grace period 1001 * @rnp_start: The leaf node of the CPU from which to start. 1002 * @rdp: The rcu_data corresponding to the CPU from which to start. 1003 * @gp_seq_req: The gp_seq of the grace period to start. 1004 * 1005 * Start the specified grace period, as needed to handle newly arrived 1006 * callbacks. The required future grace periods are recorded in each 1007 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1008 * is reason to awaken the grace-period kthread. 1009 * 1010 * The caller must hold the specified rcu_node structure's ->lock, which 1011 * is why the caller is responsible for waking the grace-period kthread. 1012 * 1013 * Returns true if the GP thread needs to be awakened else false. 1014 */ 1015 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1016 unsigned long gp_seq_req) 1017 { 1018 bool ret = false; 1019 struct rcu_node *rnp; 1020 1021 /* 1022 * Use funnel locking to either acquire the root rcu_node 1023 * structure's lock or bail out if the need for this grace period 1024 * has already been recorded -- or if that grace period has in 1025 * fact already started. If there is already a grace period in 1026 * progress in a non-leaf node, no recording is needed because the 1027 * end of the grace period will scan the leaf rcu_node structures. 1028 * Note that rnp_start->lock must not be released. 1029 */ 1030 raw_lockdep_assert_held_rcu_node(rnp_start); 1031 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1032 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1033 if (rnp != rnp_start) 1034 raw_spin_lock_rcu_node(rnp); 1035 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1036 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1037 (rnp != rnp_start && 1038 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1039 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1040 TPS("Prestarted")); 1041 goto unlock_out; 1042 } 1043 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1044 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1045 /* 1046 * We just marked the leaf or internal node, and a 1047 * grace period is in progress, which means that 1048 * rcu_gp_cleanup() will see the marking. Bail to 1049 * reduce contention. 1050 */ 1051 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1052 TPS("Startedleaf")); 1053 goto unlock_out; 1054 } 1055 if (rnp != rnp_start && rnp->parent != NULL) 1056 raw_spin_unlock_rcu_node(rnp); 1057 if (!rnp->parent) 1058 break; /* At root, and perhaps also leaf. */ 1059 } 1060 1061 /* If GP already in progress, just leave, otherwise start one. */ 1062 if (rcu_gp_in_progress()) { 1063 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1064 goto unlock_out; 1065 } 1066 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1067 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1068 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1069 if (!READ_ONCE(rcu_state.gp_kthread)) { 1070 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1071 goto unlock_out; 1072 } 1073 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1074 ret = true; /* Caller must wake GP kthread. */ 1075 unlock_out: 1076 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1077 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1078 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1079 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1080 } 1081 if (rnp != rnp_start) 1082 raw_spin_unlock_rcu_node(rnp); 1083 return ret; 1084 } 1085 1086 /* 1087 * Clean up any old requests for the just-ended grace period. Also return 1088 * whether any additional grace periods have been requested. 1089 */ 1090 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1091 { 1092 bool needmore; 1093 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1094 1095 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1096 if (!needmore) 1097 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1098 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1099 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1100 return needmore; 1101 } 1102 1103 /* 1104 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1105 * interrupt or softirq handler, in which case we just might immediately 1106 * sleep upon return, resulting in a grace-period hang), and don't bother 1107 * awakening when there is nothing for the grace-period kthread to do 1108 * (as in several CPUs raced to awaken, we lost), and finally don't try 1109 * to awaken a kthread that has not yet been created. If all those checks 1110 * are passed, track some debug information and awaken. 1111 * 1112 * So why do the self-wakeup when in an interrupt or softirq handler 1113 * in the grace-period kthread's context? Because the kthread might have 1114 * been interrupted just as it was going to sleep, and just after the final 1115 * pre-sleep check of the awaken condition. In this case, a wakeup really 1116 * is required, and is therefore supplied. 1117 */ 1118 static void rcu_gp_kthread_wake(void) 1119 { 1120 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1121 1122 if ((current == t && !in_hardirq() && !in_serving_softirq()) || 1123 !READ_ONCE(rcu_state.gp_flags) || !t) 1124 return; 1125 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1126 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1127 swake_up_one(&rcu_state.gp_wq); 1128 } 1129 1130 /* 1131 * If there is room, assign a ->gp_seq number to any callbacks on this 1132 * CPU that have not already been assigned. Also accelerate any callbacks 1133 * that were previously assigned a ->gp_seq number that has since proven 1134 * to be too conservative, which can happen if callbacks get assigned a 1135 * ->gp_seq number while RCU is idle, but with reference to a non-root 1136 * rcu_node structure. This function is idempotent, so it does not hurt 1137 * to call it repeatedly. Returns an flag saying that we should awaken 1138 * the RCU grace-period kthread. 1139 * 1140 * The caller must hold rnp->lock with interrupts disabled. 1141 */ 1142 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1143 { 1144 unsigned long gp_seq_req; 1145 bool ret = false; 1146 1147 rcu_lockdep_assert_cblist_protected(rdp); 1148 raw_lockdep_assert_held_rcu_node(rnp); 1149 1150 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1151 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1152 return false; 1153 1154 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); 1155 1156 /* 1157 * Callbacks are often registered with incomplete grace-period 1158 * information. Something about the fact that getting exact 1159 * information requires acquiring a global lock... RCU therefore 1160 * makes a conservative estimate of the grace period number at which 1161 * a given callback will become ready to invoke. The following 1162 * code checks this estimate and improves it when possible, thus 1163 * accelerating callback invocation to an earlier grace-period 1164 * number. 1165 */ 1166 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1167 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1168 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1169 1170 /* Trace depending on how much we were able to accelerate. */ 1171 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1172 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB")); 1173 else 1174 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB")); 1175 1176 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); 1177 1178 return ret; 1179 } 1180 1181 /* 1182 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1183 * rcu_node structure's ->lock be held. It consults the cached value 1184 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1185 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1186 * while holding the leaf rcu_node structure's ->lock. 1187 */ 1188 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1189 struct rcu_data *rdp) 1190 { 1191 unsigned long c; 1192 bool needwake; 1193 1194 rcu_lockdep_assert_cblist_protected(rdp); 1195 c = rcu_seq_snap(&rcu_state.gp_seq); 1196 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1197 /* Old request still live, so mark recent callbacks. */ 1198 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1199 return; 1200 } 1201 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1202 needwake = rcu_accelerate_cbs(rnp, rdp); 1203 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1204 if (needwake) 1205 rcu_gp_kthread_wake(); 1206 } 1207 1208 /* 1209 * Move any callbacks whose grace period has completed to the 1210 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1211 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1212 * sublist. This function is idempotent, so it does not hurt to 1213 * invoke it repeatedly. As long as it is not invoked -too- often... 1214 * Returns true if the RCU grace-period kthread needs to be awakened. 1215 * 1216 * The caller must hold rnp->lock with interrupts disabled. 1217 */ 1218 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1219 { 1220 rcu_lockdep_assert_cblist_protected(rdp); 1221 raw_lockdep_assert_held_rcu_node(rnp); 1222 1223 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1224 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1225 return false; 1226 1227 /* 1228 * Find all callbacks whose ->gp_seq numbers indicate that they 1229 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1230 */ 1231 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1232 1233 /* Classify any remaining callbacks. */ 1234 return rcu_accelerate_cbs(rnp, rdp); 1235 } 1236 1237 /* 1238 * Move and classify callbacks, but only if doing so won't require 1239 * that the RCU grace-period kthread be awakened. 1240 */ 1241 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1242 struct rcu_data *rdp) 1243 { 1244 rcu_lockdep_assert_cblist_protected(rdp); 1245 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) 1246 return; 1247 // The grace period cannot end while we hold the rcu_node lock. 1248 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) 1249 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1250 raw_spin_unlock_rcu_node(rnp); 1251 } 1252 1253 /* 1254 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a 1255 * quiescent state. This is intended to be invoked when the CPU notices 1256 * a new grace period. 1257 */ 1258 static void rcu_strict_gp_check_qs(void) 1259 { 1260 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 1261 rcu_read_lock(); 1262 rcu_read_unlock(); 1263 } 1264 } 1265 1266 /* 1267 * Update CPU-local rcu_data state to record the beginnings and ends of 1268 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1269 * structure corresponding to the current CPU, and must have irqs disabled. 1270 * Returns true if the grace-period kthread needs to be awakened. 1271 */ 1272 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1273 { 1274 bool ret = false; 1275 bool need_qs; 1276 const bool offloaded = rcu_rdp_is_offloaded(rdp); 1277 1278 raw_lockdep_assert_held_rcu_node(rnp); 1279 1280 if (rdp->gp_seq == rnp->gp_seq) 1281 return false; /* Nothing to do. */ 1282 1283 /* Handle the ends of any preceding grace periods first. */ 1284 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1285 unlikely(rdp->gpwrap)) { 1286 if (!offloaded) 1287 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1288 rdp->core_needs_qs = false; 1289 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1290 } else { 1291 if (!offloaded) 1292 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1293 if (rdp->core_needs_qs) 1294 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1295 } 1296 1297 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1298 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1299 unlikely(rdp->gpwrap)) { 1300 /* 1301 * If the current grace period is waiting for this CPU, 1302 * set up to detect a quiescent state, otherwise don't 1303 * go looking for one. 1304 */ 1305 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1306 need_qs = !!(rnp->qsmask & rdp->grpmask); 1307 rdp->cpu_no_qs.b.norm = need_qs; 1308 rdp->core_needs_qs = need_qs; 1309 zero_cpu_stall_ticks(rdp); 1310 } 1311 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1312 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1313 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1314 if (IS_ENABLED(CONFIG_PROVE_RCU) && rdp->gpwrap) 1315 WRITE_ONCE(rdp->last_sched_clock, jiffies); 1316 WRITE_ONCE(rdp->gpwrap, false); 1317 rcu_gpnum_ovf(rnp, rdp); 1318 return ret; 1319 } 1320 1321 static void note_gp_changes(struct rcu_data *rdp) 1322 { 1323 unsigned long flags; 1324 bool needwake; 1325 struct rcu_node *rnp; 1326 1327 local_irq_save(flags); 1328 rnp = rdp->mynode; 1329 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1330 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1331 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1332 local_irq_restore(flags); 1333 return; 1334 } 1335 needwake = __note_gp_changes(rnp, rdp); 1336 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1337 rcu_strict_gp_check_qs(); 1338 if (needwake) 1339 rcu_gp_kthread_wake(); 1340 } 1341 1342 static atomic_t *rcu_gp_slow_suppress; 1343 1344 /* Register a counter to suppress debugging grace-period delays. */ 1345 void rcu_gp_slow_register(atomic_t *rgssp) 1346 { 1347 WARN_ON_ONCE(rcu_gp_slow_suppress); 1348 1349 WRITE_ONCE(rcu_gp_slow_suppress, rgssp); 1350 } 1351 EXPORT_SYMBOL_GPL(rcu_gp_slow_register); 1352 1353 /* Unregister a counter, with NULL for not caring which. */ 1354 void rcu_gp_slow_unregister(atomic_t *rgssp) 1355 { 1356 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL); 1357 1358 WRITE_ONCE(rcu_gp_slow_suppress, NULL); 1359 } 1360 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister); 1361 1362 static bool rcu_gp_slow_is_suppressed(void) 1363 { 1364 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress); 1365 1366 return rgssp && atomic_read(rgssp); 1367 } 1368 1369 static void rcu_gp_slow(int delay) 1370 { 1371 if (!rcu_gp_slow_is_suppressed() && delay > 0 && 1372 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1373 schedule_timeout_idle(delay); 1374 } 1375 1376 static unsigned long sleep_duration; 1377 1378 /* Allow rcutorture to stall the grace-period kthread. */ 1379 void rcu_gp_set_torture_wait(int duration) 1380 { 1381 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1382 WRITE_ONCE(sleep_duration, duration); 1383 } 1384 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1385 1386 /* Actually implement the aforementioned wait. */ 1387 static void rcu_gp_torture_wait(void) 1388 { 1389 unsigned long duration; 1390 1391 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1392 return; 1393 duration = xchg(&sleep_duration, 0UL); 1394 if (duration > 0) { 1395 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1396 schedule_timeout_idle(duration); 1397 pr_alert("%s: Wait complete\n", __func__); 1398 } 1399 } 1400 1401 /* 1402 * Handler for on_each_cpu() to invoke the target CPU's RCU core 1403 * processing. 1404 */ 1405 static void rcu_strict_gp_boundary(void *unused) 1406 { 1407 invoke_rcu_core(); 1408 } 1409 1410 // Make the polled API aware of the beginning of a grace period. 1411 static void rcu_poll_gp_seq_start(unsigned long *snap) 1412 { 1413 struct rcu_node *rnp = rcu_get_root(); 1414 1415 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1416 raw_lockdep_assert_held_rcu_node(rnp); 1417 1418 // If RCU was idle, note beginning of GP. 1419 if (!rcu_seq_state(rcu_state.gp_seq_polled)) 1420 rcu_seq_start(&rcu_state.gp_seq_polled); 1421 1422 // Either way, record current state. 1423 *snap = rcu_state.gp_seq_polled; 1424 } 1425 1426 // Make the polled API aware of the end of a grace period. 1427 static void rcu_poll_gp_seq_end(unsigned long *snap) 1428 { 1429 struct rcu_node *rnp = rcu_get_root(); 1430 1431 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1432 raw_lockdep_assert_held_rcu_node(rnp); 1433 1434 // If the previously noted GP is still in effect, record the 1435 // end of that GP. Either way, zero counter to avoid counter-wrap 1436 // problems. 1437 if (*snap && *snap == rcu_state.gp_seq_polled) { 1438 rcu_seq_end(&rcu_state.gp_seq_polled); 1439 rcu_state.gp_seq_polled_snap = 0; 1440 rcu_state.gp_seq_polled_exp_snap = 0; 1441 } else { 1442 *snap = 0; 1443 } 1444 } 1445 1446 // Make the polled API aware of the beginning of a grace period, but 1447 // where caller does not hold the root rcu_node structure's lock. 1448 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap) 1449 { 1450 unsigned long flags; 1451 struct rcu_node *rnp = rcu_get_root(); 1452 1453 if (rcu_init_invoked()) { 1454 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1455 lockdep_assert_irqs_enabled(); 1456 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1457 } 1458 rcu_poll_gp_seq_start(snap); 1459 if (rcu_init_invoked()) 1460 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1461 } 1462 1463 // Make the polled API aware of the end of a grace period, but where 1464 // caller does not hold the root rcu_node structure's lock. 1465 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap) 1466 { 1467 unsigned long flags; 1468 struct rcu_node *rnp = rcu_get_root(); 1469 1470 if (rcu_init_invoked()) { 1471 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1472 lockdep_assert_irqs_enabled(); 1473 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1474 } 1475 rcu_poll_gp_seq_end(snap); 1476 if (rcu_init_invoked()) 1477 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1478 } 1479 1480 /* 1481 * There is a single llist, which is used for handling 1482 * synchronize_rcu() users' enqueued rcu_synchronize nodes. 1483 * Within this llist, there are two tail pointers: 1484 * 1485 * wait tail: Tracks the set of nodes, which need to 1486 * wait for the current GP to complete. 1487 * done tail: Tracks the set of nodes, for which grace 1488 * period has elapsed. These nodes processing 1489 * will be done as part of the cleanup work 1490 * execution by a kworker. 1491 * 1492 * At every grace period init, a new wait node is added 1493 * to the llist. This wait node is used as wait tail 1494 * for this new grace period. Given that there are a fixed 1495 * number of wait nodes, if all wait nodes are in use 1496 * (which can happen when kworker callback processing 1497 * is delayed) and additional grace period is requested. 1498 * This means, a system is slow in processing callbacks. 1499 * 1500 * TODO: If a slow processing is detected, a first node 1501 * in the llist should be used as a wait-tail for this 1502 * grace period, therefore users which should wait due 1503 * to a slow process are handled by _this_ grace period 1504 * and not next. 1505 * 1506 * Below is an illustration of how the done and wait 1507 * tail pointers move from one set of rcu_synchronize nodes 1508 * to the other, as grace periods start and finish and 1509 * nodes are processed by kworker. 1510 * 1511 * 1512 * a. Initial llist callbacks list: 1513 * 1514 * +----------+ +--------+ +-------+ 1515 * | | | | | | 1516 * | head |---------> | cb2 |--------->| cb1 | 1517 * | | | | | | 1518 * +----------+ +--------+ +-------+ 1519 * 1520 * 1521 * 1522 * b. New GP1 Start: 1523 * 1524 * WAIT TAIL 1525 * | 1526 * | 1527 * v 1528 * +----------+ +--------+ +--------+ +-------+ 1529 * | | | | | | | | 1530 * | head ------> wait |------> cb2 |------> | cb1 | 1531 * | | | head1 | | | | | 1532 * +----------+ +--------+ +--------+ +-------+ 1533 * 1534 * 1535 * 1536 * c. GP completion: 1537 * 1538 * WAIT_TAIL == DONE_TAIL 1539 * 1540 * DONE TAIL 1541 * | 1542 * | 1543 * v 1544 * +----------+ +--------+ +--------+ +-------+ 1545 * | | | | | | | | 1546 * | head ------> wait |------> cb2 |------> | cb1 | 1547 * | | | head1 | | | | | 1548 * +----------+ +--------+ +--------+ +-------+ 1549 * 1550 * 1551 * 1552 * d. New callbacks and GP2 start: 1553 * 1554 * WAIT TAIL DONE TAIL 1555 * | | 1556 * | | 1557 * v v 1558 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1559 * | | | | | | | | | | | | | | 1560 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | 1561 * | | | head2| | | | | |head1| | | | | 1562 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1563 * 1564 * 1565 * 1566 * e. GP2 completion: 1567 * 1568 * WAIT_TAIL == DONE_TAIL 1569 * DONE TAIL 1570 * | 1571 * | 1572 * v 1573 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1574 * | | | | | | | | | | | | | | 1575 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | 1576 * | | | head2| | | | | |head1| | | | | 1577 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1578 * 1579 * 1580 * While the llist state transitions from d to e, a kworker 1581 * can start executing rcu_sr_normal_gp_cleanup_work() and 1582 * can observe either the old done tail (@c) or the new 1583 * done tail (@e). So, done tail updates and reads need 1584 * to use the rel-acq semantics. If the concurrent kworker 1585 * observes the old done tail, the newly queued work 1586 * execution will process the updated done tail. If the 1587 * concurrent kworker observes the new done tail, then 1588 * the newly queued work will skip processing the done 1589 * tail, as workqueue semantics guarantees that the new 1590 * work is executed only after the previous one completes. 1591 * 1592 * f. kworker callbacks processing complete: 1593 * 1594 * 1595 * DONE TAIL 1596 * | 1597 * | 1598 * v 1599 * +----------+ +--------+ 1600 * | | | | 1601 * | head ------> wait | 1602 * | | | head2 | 1603 * +----------+ +--------+ 1604 * 1605 */ 1606 static bool rcu_sr_is_wait_head(struct llist_node *node) 1607 { 1608 return &(rcu_state.srs_wait_nodes)[0].node <= node && 1609 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node; 1610 } 1611 1612 static struct llist_node *rcu_sr_get_wait_head(void) 1613 { 1614 struct sr_wait_node *sr_wn; 1615 int i; 1616 1617 for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) { 1618 sr_wn = &(rcu_state.srs_wait_nodes)[i]; 1619 1620 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1)) 1621 return &sr_wn->node; 1622 } 1623 1624 return NULL; 1625 } 1626 1627 static void rcu_sr_put_wait_head(struct llist_node *node) 1628 { 1629 struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node); 1630 1631 atomic_set_release(&sr_wn->inuse, 0); 1632 } 1633 1634 /* Enable rcu_normal_wake_from_gp automatically on small systems. */ 1635 #define WAKE_FROM_GP_CPU_THRESHOLD 16 1636 1637 static int rcu_normal_wake_from_gp = -1; 1638 module_param(rcu_normal_wake_from_gp, int, 0644); 1639 static struct workqueue_struct *sync_wq; 1640 1641 static void rcu_sr_normal_complete(struct llist_node *node) 1642 { 1643 struct rcu_synchronize *rs = container_of( 1644 (struct rcu_head *) node, struct rcu_synchronize, head); 1645 1646 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && 1647 !poll_state_synchronize_rcu_full(&rs->oldstate), 1648 "A full grace period is not passed yet!\n"); 1649 1650 /* Finally. */ 1651 complete(&rs->completion); 1652 } 1653 1654 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work) 1655 { 1656 struct llist_node *done, *rcu, *next, *head; 1657 1658 /* 1659 * This work execution can potentially execute 1660 * while a new done tail is being updated by 1661 * grace period kthread in rcu_sr_normal_gp_cleanup(). 1662 * So, read and updates of done tail need to 1663 * follow acq-rel semantics. 1664 * 1665 * Given that wq semantics guarantees that a single work 1666 * cannot execute concurrently by multiple kworkers, 1667 * the done tail list manipulations are protected here. 1668 */ 1669 done = smp_load_acquire(&rcu_state.srs_done_tail); 1670 if (WARN_ON_ONCE(!done)) 1671 return; 1672 1673 WARN_ON_ONCE(!rcu_sr_is_wait_head(done)); 1674 head = done->next; 1675 done->next = NULL; 1676 1677 /* 1678 * The dummy node, which is pointed to by the 1679 * done tail which is acq-read above is not removed 1680 * here. This allows lockless additions of new 1681 * rcu_synchronize nodes in rcu_sr_normal_add_req(), 1682 * while the cleanup work executes. The dummy 1683 * nodes is removed, in next round of cleanup 1684 * work execution. 1685 */ 1686 llist_for_each_safe(rcu, next, head) { 1687 if (!rcu_sr_is_wait_head(rcu)) { 1688 rcu_sr_normal_complete(rcu); 1689 continue; 1690 } 1691 1692 rcu_sr_put_wait_head(rcu); 1693 } 1694 1695 /* Order list manipulations with atomic access. */ 1696 atomic_dec_return_release(&rcu_state.srs_cleanups_pending); 1697 } 1698 1699 /* 1700 * Helper function for rcu_gp_cleanup(). 1701 */ 1702 static void rcu_sr_normal_gp_cleanup(void) 1703 { 1704 struct llist_node *wait_tail, *next = NULL, *rcu = NULL; 1705 int done = 0; 1706 1707 wait_tail = rcu_state.srs_wait_tail; 1708 if (wait_tail == NULL) 1709 return; 1710 1711 rcu_state.srs_wait_tail = NULL; 1712 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail); 1713 WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail)); 1714 1715 /* 1716 * Process (a) and (d) cases. See an illustration. 1717 */ 1718 llist_for_each_safe(rcu, next, wait_tail->next) { 1719 if (rcu_sr_is_wait_head(rcu)) 1720 break; 1721 1722 rcu_sr_normal_complete(rcu); 1723 // It can be last, update a next on this step. 1724 wait_tail->next = next; 1725 1726 if (++done == SR_MAX_USERS_WAKE_FROM_GP) 1727 break; 1728 } 1729 1730 /* 1731 * Fast path, no more users to process except putting the second last 1732 * wait head if no inflight-workers. If there are in-flight workers, 1733 * they will remove the last wait head. 1734 * 1735 * Note that the ACQUIRE orders atomic access with list manipulation. 1736 */ 1737 if (wait_tail->next && wait_tail->next->next == NULL && 1738 rcu_sr_is_wait_head(wait_tail->next) && 1739 !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) { 1740 rcu_sr_put_wait_head(wait_tail->next); 1741 wait_tail->next = NULL; 1742 } 1743 1744 /* Concurrent sr_normal_gp_cleanup work might observe this update. */ 1745 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail); 1746 smp_store_release(&rcu_state.srs_done_tail, wait_tail); 1747 1748 /* 1749 * We schedule a work in order to perform a final processing 1750 * of outstanding users(if still left) and releasing wait-heads 1751 * added by rcu_sr_normal_gp_init() call. 1752 */ 1753 if (wait_tail->next) { 1754 atomic_inc(&rcu_state.srs_cleanups_pending); 1755 if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work)) 1756 atomic_dec(&rcu_state.srs_cleanups_pending); 1757 } 1758 } 1759 1760 /* 1761 * Helper function for rcu_gp_init(). 1762 */ 1763 static bool rcu_sr_normal_gp_init(void) 1764 { 1765 struct llist_node *first; 1766 struct llist_node *wait_head; 1767 bool start_new_poll = false; 1768 1769 first = READ_ONCE(rcu_state.srs_next.first); 1770 if (!first || rcu_sr_is_wait_head(first)) 1771 return start_new_poll; 1772 1773 wait_head = rcu_sr_get_wait_head(); 1774 if (!wait_head) { 1775 // Kick another GP to retry. 1776 start_new_poll = true; 1777 return start_new_poll; 1778 } 1779 1780 /* Inject a wait-dummy-node. */ 1781 llist_add(wait_head, &rcu_state.srs_next); 1782 1783 /* 1784 * A waiting list of rcu_synchronize nodes should be empty on 1785 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(), 1786 * rolls it over. If not, it is a BUG, warn a user. 1787 */ 1788 WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL); 1789 rcu_state.srs_wait_tail = wait_head; 1790 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail); 1791 1792 return start_new_poll; 1793 } 1794 1795 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs) 1796 { 1797 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next); 1798 } 1799 1800 /* 1801 * Initialize a new grace period. Return false if no grace period required. 1802 */ 1803 static noinline_for_stack bool rcu_gp_init(void) 1804 { 1805 unsigned long flags; 1806 unsigned long oldmask; 1807 unsigned long mask; 1808 struct rcu_data *rdp; 1809 struct rcu_node *rnp = rcu_get_root(); 1810 bool start_new_poll; 1811 unsigned long old_gp_seq; 1812 1813 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1814 raw_spin_lock_irq_rcu_node(rnp); 1815 if (!rcu_state.gp_flags) { 1816 /* Spurious wakeup, tell caller to go back to sleep. */ 1817 raw_spin_unlock_irq_rcu_node(rnp); 1818 return false; 1819 } 1820 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1821 1822 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1823 /* 1824 * Grace period already in progress, don't start another. 1825 * Not supposed to be able to happen. 1826 */ 1827 raw_spin_unlock_irq_rcu_node(rnp); 1828 return false; 1829 } 1830 1831 /* Advance to a new grace period and initialize state. */ 1832 record_gp_stall_check_time(); 1833 /* 1834 * A new wait segment must be started before gp_seq advanced, so 1835 * that previous gp waiters won't observe the new gp_seq. 1836 */ 1837 start_new_poll = rcu_sr_normal_gp_init(); 1838 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1839 old_gp_seq = rcu_state.gp_seq; 1840 /* 1841 * Critical ordering: rcu_seq_start() must happen BEFORE the CPU hotplug 1842 * scan below. Otherwise we risk a race where a newly onlining CPU could 1843 * be missed by the current grace period, potentially leading to 1844 * use-after-free errors. For a detailed explanation of this race, see 1845 * Documentation/RCU/Design/Requirements/Requirements.rst in the 1846 * "Hotplug CPU" section. 1847 * 1848 * Also note that the root rnp's gp_seq is kept separate from, and lags, 1849 * the rcu_state's gp_seq, for a reason. See the Quick-Quiz on 1850 * Single-node systems for more details (in Data-Structures.rst). 1851 */ 1852 rcu_seq_start(&rcu_state.gp_seq); 1853 /* Ensure that rcu_seq_done_exact() guardband doesn't give false positives. */ 1854 WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && 1855 rcu_seq_done_exact(&old_gp_seq, rcu_seq_snap(&rcu_state.gp_seq))); 1856 1857 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1858 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1859 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap); 1860 raw_spin_unlock_irq_rcu_node(rnp); 1861 1862 /* 1863 * The "start_new_poll" is set to true, only when this GP is not able 1864 * to handle anything and there are outstanding users. It happens when 1865 * the rcu_sr_normal_gp_init() function was not able to insert a dummy 1866 * separator to the llist, because there were no left any dummy-nodes. 1867 * 1868 * Number of dummy-nodes is fixed, it could be that we are run out of 1869 * them, if so we start a new pool request to repeat a try. It is rare 1870 * and it means that a system is doing a slow processing of callbacks. 1871 */ 1872 if (start_new_poll) 1873 (void) start_poll_synchronize_rcu(); 1874 1875 /* 1876 * Apply per-leaf buffered online and offline operations to 1877 * the rcu_node tree. Note that this new grace period need not 1878 * wait for subsequent online CPUs, and that RCU hooks in the CPU 1879 * offlining path, when combined with checks in this function, 1880 * will handle CPUs that are currently going offline or that will 1881 * go offline later. Please also refer to "Hotplug CPU" section 1882 * of RCU's Requirements documentation. 1883 */ 1884 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); 1885 /* Exclude CPU hotplug operations. */ 1886 rcu_for_each_leaf_node(rnp) { 1887 local_irq_disable(); 1888 /* 1889 * Serialize with CPU offline. See Requirements.rst > Hotplug CPU > 1890 * Concurrent Quiescent State Reporting for Offline CPUs. 1891 */ 1892 arch_spin_lock(&rcu_state.ofl_lock); 1893 raw_spin_lock_rcu_node(rnp); 1894 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1895 !rnp->wait_blkd_tasks) { 1896 /* Nothing to do on this leaf rcu_node structure. */ 1897 raw_spin_unlock_rcu_node(rnp); 1898 arch_spin_unlock(&rcu_state.ofl_lock); 1899 local_irq_enable(); 1900 continue; 1901 } 1902 1903 /* Record old state, apply changes to ->qsmaskinit field. */ 1904 oldmask = rnp->qsmaskinit; 1905 rnp->qsmaskinit = rnp->qsmaskinitnext; 1906 1907 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1908 if (!oldmask != !rnp->qsmaskinit) { 1909 if (!oldmask) { /* First online CPU for rcu_node. */ 1910 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1911 rcu_init_new_rnp(rnp); 1912 } else if (rcu_preempt_has_tasks(rnp)) { 1913 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1914 } else { /* Last offline CPU and can propagate. */ 1915 rcu_cleanup_dead_rnp(rnp); 1916 } 1917 } 1918 1919 /* 1920 * If all waited-on tasks from prior grace period are 1921 * done, and if all this rcu_node structure's CPUs are 1922 * still offline, propagate up the rcu_node tree and 1923 * clear ->wait_blkd_tasks. Otherwise, if one of this 1924 * rcu_node structure's CPUs has since come back online, 1925 * simply clear ->wait_blkd_tasks. 1926 */ 1927 if (rnp->wait_blkd_tasks && 1928 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1929 rnp->wait_blkd_tasks = false; 1930 if (!rnp->qsmaskinit) 1931 rcu_cleanup_dead_rnp(rnp); 1932 } 1933 1934 raw_spin_unlock_rcu_node(rnp); 1935 arch_spin_unlock(&rcu_state.ofl_lock); 1936 local_irq_enable(); 1937 } 1938 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1939 1940 /* 1941 * Set the quiescent-state-needed bits in all the rcu_node 1942 * structures for all currently online CPUs in breadth-first 1943 * order, starting from the root rcu_node structure, relying on the 1944 * layout of the tree within the rcu_state.node[] array. Note that 1945 * other CPUs will access only the leaves of the hierarchy, thus 1946 * seeing that no grace period is in progress, at least until the 1947 * corresponding leaf node has been initialized. 1948 * 1949 * The grace period cannot complete until the initialization 1950 * process finishes, because this kthread handles both. 1951 */ 1952 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT); 1953 rcu_for_each_node_breadth_first(rnp) { 1954 rcu_gp_slow(gp_init_delay); 1955 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1956 rdp = this_cpu_ptr(&rcu_data); 1957 rcu_preempt_check_blocked_tasks(rnp); 1958 rnp->qsmask = rnp->qsmaskinit; 1959 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1960 if (rnp == rdp->mynode) 1961 (void)__note_gp_changes(rnp, rdp); 1962 rcu_preempt_boost_start_gp(rnp); 1963 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1964 rnp->level, rnp->grplo, 1965 rnp->grphi, rnp->qsmask); 1966 /* 1967 * Quiescent states for tasks on any now-offline CPUs. Since we 1968 * released the ofl and rnp lock before this loop, CPUs might 1969 * have gone offline and we have to report QS on their behalf. 1970 * See Requirements.rst > Hotplug CPU > Concurrent QS Reporting. 1971 */ 1972 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1973 rnp->rcu_gp_init_mask = mask; 1974 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1975 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1976 else 1977 raw_spin_unlock_irq_rcu_node(rnp); 1978 cond_resched_tasks_rcu_qs(); 1979 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1980 } 1981 1982 // If strict, make all CPUs aware of new grace period. 1983 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 1984 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 1985 1986 return true; 1987 } 1988 1989 /* 1990 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1991 * time. 1992 */ 1993 static bool rcu_gp_fqs_check_wake(int *gfp) 1994 { 1995 struct rcu_node *rnp = rcu_get_root(); 1996 1997 // If under overload conditions, force an immediate FQS scan. 1998 if (*gfp & RCU_GP_FLAG_OVLD) 1999 return true; 2000 2001 // Someone like call_rcu() requested a force-quiescent-state scan. 2002 *gfp = READ_ONCE(rcu_state.gp_flags); 2003 if (*gfp & RCU_GP_FLAG_FQS) 2004 return true; 2005 2006 // The current grace period has completed. 2007 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 2008 return true; 2009 2010 return false; 2011 } 2012 2013 /* 2014 * Do one round of quiescent-state forcing. 2015 */ 2016 static void rcu_gp_fqs(bool first_time) 2017 { 2018 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall); 2019 struct rcu_node *rnp = rcu_get_root(); 2020 2021 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2022 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1); 2023 2024 WARN_ON_ONCE(nr_fqs > 3); 2025 /* Only countdown nr_fqs for stall purposes if jiffies moves. */ 2026 if (nr_fqs) { 2027 if (nr_fqs == 1) { 2028 WRITE_ONCE(rcu_state.jiffies_stall, 2029 jiffies + rcu_jiffies_till_stall_check()); 2030 } 2031 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs); 2032 } 2033 2034 if (first_time) { 2035 /* Collect dyntick-idle snapshots. */ 2036 force_qs_rnp(rcu_watching_snap_save); 2037 } else { 2038 /* Handle dyntick-idle and offline CPUs. */ 2039 force_qs_rnp(rcu_watching_snap_recheck); 2040 } 2041 /* Clear flag to prevent immediate re-entry. */ 2042 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2043 raw_spin_lock_irq_rcu_node(rnp); 2044 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS); 2045 raw_spin_unlock_irq_rcu_node(rnp); 2046 } 2047 } 2048 2049 /* 2050 * Loop doing repeated quiescent-state forcing until the grace period ends. 2051 */ 2052 static noinline_for_stack void rcu_gp_fqs_loop(void) 2053 { 2054 bool first_gp_fqs = true; 2055 int gf = 0; 2056 unsigned long j; 2057 int ret; 2058 struct rcu_node *rnp = rcu_get_root(); 2059 2060 j = READ_ONCE(jiffies_till_first_fqs); 2061 if (rcu_state.cbovld) 2062 gf = RCU_GP_FLAG_OVLD; 2063 ret = 0; 2064 for (;;) { 2065 if (rcu_state.cbovld) { 2066 j = (j + 2) / 3; 2067 if (j <= 0) 2068 j = 1; 2069 } 2070 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) { 2071 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j); 2072 /* 2073 * jiffies_force_qs before RCU_GP_WAIT_FQS state 2074 * update; required for stall checks. 2075 */ 2076 smp_wmb(); 2077 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 2078 jiffies + (j ? 3 * j : 2)); 2079 } 2080 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2081 TPS("fqswait")); 2082 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS); 2083 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq, 2084 rcu_gp_fqs_check_wake(&gf), j); 2085 rcu_gp_torture_wait(); 2086 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS); 2087 /* Locking provides needed memory barriers. */ 2088 /* 2089 * Exit the loop if the root rcu_node structure indicates that the grace period 2090 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check 2091 * is required only for single-node rcu_node trees because readers blocking 2092 * the current grace period are queued only on leaf rcu_node structures. 2093 * For multi-node trees, checking the root node's ->qsmask suffices, because a 2094 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from 2095 * the corresponding leaf nodes have passed through their quiescent state. 2096 */ 2097 if (!READ_ONCE(rnp->qsmask) && 2098 !rcu_preempt_blocked_readers_cgp(rnp)) 2099 break; 2100 /* If time for quiescent-state forcing, do it. */ 2101 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 2102 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) { 2103 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2104 TPS("fqsstart")); 2105 rcu_gp_fqs(first_gp_fqs); 2106 gf = 0; 2107 if (first_gp_fqs) { 2108 first_gp_fqs = false; 2109 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 2110 } 2111 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2112 TPS("fqsend")); 2113 cond_resched_tasks_rcu_qs(); 2114 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2115 ret = 0; /* Force full wait till next FQS. */ 2116 j = READ_ONCE(jiffies_till_next_fqs); 2117 } else { 2118 /* Deal with stray signal. */ 2119 cond_resched_tasks_rcu_qs(); 2120 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2121 WARN_ON(signal_pending(current)); 2122 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2123 TPS("fqswaitsig")); 2124 ret = 1; /* Keep old FQS timing. */ 2125 j = jiffies; 2126 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 2127 j = 1; 2128 else 2129 j = rcu_state.jiffies_force_qs - j; 2130 gf = 0; 2131 } 2132 } 2133 } 2134 2135 /* 2136 * Clean up after the old grace period. 2137 */ 2138 static noinline void rcu_gp_cleanup(void) 2139 { 2140 int cpu; 2141 bool needgp = false; 2142 unsigned long gp_duration; 2143 unsigned long new_gp_seq; 2144 bool offloaded; 2145 struct rcu_data *rdp; 2146 struct rcu_node *rnp = rcu_get_root(); 2147 struct swait_queue_head *sq; 2148 2149 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2150 raw_spin_lock_irq_rcu_node(rnp); 2151 rcu_state.gp_end = jiffies; 2152 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 2153 if (gp_duration > rcu_state.gp_max) 2154 rcu_state.gp_max = gp_duration; 2155 2156 /* 2157 * We know the grace period is complete, but to everyone else 2158 * it appears to still be ongoing. But it is also the case 2159 * that to everyone else it looks like there is nothing that 2160 * they can do to advance the grace period. It is therefore 2161 * safe for us to drop the lock in order to mark the grace 2162 * period as completed in all of the rcu_node structures. 2163 */ 2164 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap); 2165 raw_spin_unlock_irq_rcu_node(rnp); 2166 2167 /* 2168 * Propagate new ->gp_seq value to rcu_node structures so that 2169 * other CPUs don't have to wait until the start of the next grace 2170 * period to process their callbacks. This also avoids some nasty 2171 * RCU grace-period initialization races by forcing the end of 2172 * the current grace period to be completely recorded in all of 2173 * the rcu_node structures before the beginning of the next grace 2174 * period is recorded in any of the rcu_node structures. 2175 */ 2176 new_gp_seq = rcu_state.gp_seq; 2177 rcu_seq_end(&new_gp_seq); 2178 rcu_for_each_node_breadth_first(rnp) { 2179 raw_spin_lock_irq_rcu_node(rnp); 2180 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 2181 dump_blkd_tasks(rnp, 10); 2182 WARN_ON_ONCE(rnp->qsmask); 2183 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 2184 if (!rnp->parent) 2185 smp_mb(); // Order against failing poll_state_synchronize_rcu_full(). 2186 rdp = this_cpu_ptr(&rcu_data); 2187 if (rnp == rdp->mynode) 2188 needgp = __note_gp_changes(rnp, rdp) || needgp; 2189 /* smp_mb() provided by prior unlock-lock pair. */ 2190 needgp = rcu_future_gp_cleanup(rnp) || needgp; 2191 // Reset overload indication for CPUs no longer overloaded 2192 if (rcu_is_leaf_node(rnp)) 2193 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 2194 rdp = per_cpu_ptr(&rcu_data, cpu); 2195 check_cb_ovld_locked(rdp, rnp); 2196 } 2197 sq = rcu_nocb_gp_get(rnp); 2198 raw_spin_unlock_irq_rcu_node(rnp); 2199 rcu_nocb_gp_cleanup(sq); 2200 cond_resched_tasks_rcu_qs(); 2201 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2202 rcu_gp_slow(gp_cleanup_delay); 2203 } 2204 rnp = rcu_get_root(); 2205 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 2206 2207 /* Declare grace period done, trace first to use old GP number. */ 2208 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 2209 rcu_seq_end(&rcu_state.gp_seq); 2210 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 2211 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE); 2212 /* Check for GP requests since above loop. */ 2213 rdp = this_cpu_ptr(&rcu_data); 2214 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 2215 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 2216 TPS("CleanupMore")); 2217 needgp = true; 2218 } 2219 /* Advance CBs to reduce false positives below. */ 2220 offloaded = rcu_rdp_is_offloaded(rdp); 2221 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 2222 2223 // We get here if a grace period was needed (“needgp”) 2224 // and the above call to rcu_accelerate_cbs() did not set 2225 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records 2226 // the need for another grace period). The purpose 2227 // of the “offloaded” check is to avoid invoking 2228 // rcu_accelerate_cbs() on an offloaded CPU because we do not 2229 // hold the ->nocb_lock needed to safely access an offloaded 2230 // ->cblist. We do not want to acquire that lock because 2231 // it can be heavily contended during callback floods. 2232 2233 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 2234 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 2235 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq")); 2236 } else { 2237 2238 // We get here either if there is no need for an 2239 // additional grace period or if rcu_accelerate_cbs() has 2240 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 2241 // So all we need to do is to clear all of the other 2242 // ->gp_flags bits. 2243 2244 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT); 2245 } 2246 raw_spin_unlock_irq_rcu_node(rnp); 2247 2248 // Make synchronize_rcu() users aware of the end of old grace period. 2249 rcu_sr_normal_gp_cleanup(); 2250 2251 // If strict, make all CPUs aware of the end of the old grace period. 2252 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2253 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 2254 } 2255 2256 /* 2257 * Body of kthread that handles grace periods. 2258 */ 2259 static int __noreturn rcu_gp_kthread(void *unused) 2260 { 2261 rcu_bind_gp_kthread(); 2262 for (;;) { 2263 2264 /* Handle grace-period start. */ 2265 for (;;) { 2266 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2267 TPS("reqwait")); 2268 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS); 2269 swait_event_idle_exclusive(rcu_state.gp_wq, 2270 READ_ONCE(rcu_state.gp_flags) & 2271 RCU_GP_FLAG_INIT); 2272 rcu_gp_torture_wait(); 2273 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS); 2274 /* Locking provides needed memory barrier. */ 2275 if (rcu_gp_init()) 2276 break; 2277 cond_resched_tasks_rcu_qs(); 2278 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2279 WARN_ON(signal_pending(current)); 2280 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2281 TPS("reqwaitsig")); 2282 } 2283 2284 /* Handle quiescent-state forcing. */ 2285 rcu_gp_fqs_loop(); 2286 2287 /* Handle grace-period end. */ 2288 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP); 2289 rcu_gp_cleanup(); 2290 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED); 2291 } 2292 } 2293 2294 /* 2295 * Report a full set of quiescent states to the rcu_state data structure. 2296 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 2297 * another grace period is required. Whether we wake the grace-period 2298 * kthread or it awakens itself for the next round of quiescent-state 2299 * forcing, that kthread will clean up after the just-completed grace 2300 * period. Note that the caller must hold rnp->lock, which is released 2301 * before return. 2302 */ 2303 static void rcu_report_qs_rsp(unsigned long flags) 2304 __releases(rcu_get_root()->lock) 2305 { 2306 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 2307 WARN_ON_ONCE(!rcu_gp_in_progress()); 2308 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS); 2309 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 2310 rcu_gp_kthread_wake(); 2311 } 2312 2313 /* 2314 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2315 * Allows quiescent states for a group of CPUs to be reported at one go 2316 * to the specified rcu_node structure, though all the CPUs in the group 2317 * must be represented by the same rcu_node structure (which need not be a 2318 * leaf rcu_node structure, though it often will be). The gps parameter 2319 * is the grace-period snapshot, which means that the quiescent states 2320 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 2321 * must be held upon entry, and it is released before return. 2322 * 2323 * As a special case, if mask is zero, the bit-already-cleared check is 2324 * disabled. This allows propagating quiescent state due to resumed tasks 2325 * during grace-period initialization. 2326 */ 2327 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 2328 unsigned long gps, unsigned long flags) 2329 __releases(rnp->lock) 2330 { 2331 unsigned long oldmask = 0; 2332 struct rcu_node *rnp_c; 2333 2334 raw_lockdep_assert_held_rcu_node(rnp); 2335 2336 /* Walk up the rcu_node hierarchy. */ 2337 for (;;) { 2338 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 2339 2340 /* 2341 * Our bit has already been cleared, or the 2342 * relevant grace period is already over, so done. 2343 */ 2344 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2345 return; 2346 } 2347 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2348 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2349 rcu_preempt_blocked_readers_cgp(rnp)); 2350 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 2351 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 2352 mask, rnp->qsmask, rnp->level, 2353 rnp->grplo, rnp->grphi, 2354 !!rnp->gp_tasks); 2355 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2356 2357 /* Other bits still set at this level, so done. */ 2358 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2359 return; 2360 } 2361 rnp->completedqs = rnp->gp_seq; 2362 mask = rnp->grpmask; 2363 if (rnp->parent == NULL) { 2364 2365 /* No more levels. Exit loop holding root lock. */ 2366 2367 break; 2368 } 2369 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2370 rnp_c = rnp; 2371 rnp = rnp->parent; 2372 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2373 oldmask = READ_ONCE(rnp_c->qsmask); 2374 } 2375 2376 /* 2377 * Get here if we are the last CPU to pass through a quiescent 2378 * state for this grace period. Invoke rcu_report_qs_rsp() 2379 * to clean up and start the next grace period if one is needed. 2380 */ 2381 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 2382 } 2383 2384 /* 2385 * Record a quiescent state for all tasks that were previously queued 2386 * on the specified rcu_node structure and that were blocking the current 2387 * RCU grace period. The caller must hold the corresponding rnp->lock with 2388 * irqs disabled, and this lock is released upon return, but irqs remain 2389 * disabled. 2390 */ 2391 static void __maybe_unused 2392 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 2393 __releases(rnp->lock) 2394 { 2395 unsigned long gps; 2396 unsigned long mask; 2397 struct rcu_node *rnp_p; 2398 2399 raw_lockdep_assert_held_rcu_node(rnp); 2400 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 2401 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 2402 rnp->qsmask != 0) { 2403 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2404 return; /* Still need more quiescent states! */ 2405 } 2406 2407 rnp->completedqs = rnp->gp_seq; 2408 rnp_p = rnp->parent; 2409 if (rnp_p == NULL) { 2410 /* 2411 * Only one rcu_node structure in the tree, so don't 2412 * try to report up to its nonexistent parent! 2413 */ 2414 rcu_report_qs_rsp(flags); 2415 return; 2416 } 2417 2418 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 2419 gps = rnp->gp_seq; 2420 mask = rnp->grpmask; 2421 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2422 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2423 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 2424 } 2425 2426 /* 2427 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2428 * structure. This must be called from the specified CPU. 2429 */ 2430 static void 2431 rcu_report_qs_rdp(struct rcu_data *rdp) 2432 { 2433 unsigned long flags; 2434 unsigned long mask; 2435 struct rcu_node *rnp; 2436 2437 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); 2438 rnp = rdp->mynode; 2439 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2440 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2441 rdp->gpwrap) { 2442 2443 /* 2444 * The grace period in which this quiescent state was 2445 * recorded has ended, so don't report it upwards. 2446 * We will instead need a new quiescent state that lies 2447 * within the current grace period. 2448 */ 2449 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2450 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2451 return; 2452 } 2453 mask = rdp->grpmask; 2454 rdp->core_needs_qs = false; 2455 if ((rnp->qsmask & mask) == 0) { 2456 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2457 } else { 2458 /* 2459 * This GP can't end until cpu checks in, so all of our 2460 * callbacks can be processed during the next GP. 2461 * 2462 * NOCB kthreads have their own way to deal with that... 2463 */ 2464 if (!rcu_rdp_is_offloaded(rdp)) { 2465 /* 2466 * The current GP has not yet ended, so it 2467 * should not be possible for rcu_accelerate_cbs() 2468 * to return true. So complain, but don't awaken. 2469 */ 2470 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp)); 2471 } 2472 2473 rcu_disable_urgency_upon_qs(rdp); 2474 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2475 /* ^^^ Released rnp->lock */ 2476 } 2477 } 2478 2479 /* 2480 * Check to see if there is a new grace period of which this CPU 2481 * is not yet aware, and if so, set up local rcu_data state for it. 2482 * Otherwise, see if this CPU has just passed through its first 2483 * quiescent state for this grace period, and record that fact if so. 2484 */ 2485 static void 2486 rcu_check_quiescent_state(struct rcu_data *rdp) 2487 { 2488 /* Check for grace-period ends and beginnings. */ 2489 note_gp_changes(rdp); 2490 2491 /* 2492 * Does this CPU still need to do its part for current grace period? 2493 * If no, return and let the other CPUs do their part as well. 2494 */ 2495 if (!rdp->core_needs_qs) 2496 return; 2497 2498 /* 2499 * Was there a quiescent state since the beginning of the grace 2500 * period? If no, then exit and wait for the next call. 2501 */ 2502 if (rdp->cpu_no_qs.b.norm) 2503 return; 2504 2505 /* 2506 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2507 * judge of that). 2508 */ 2509 rcu_report_qs_rdp(rdp); 2510 } 2511 2512 /* Return true if callback-invocation time limit exceeded. */ 2513 static bool rcu_do_batch_check_time(long count, long tlimit, 2514 bool jlimit_check, unsigned long jlimit) 2515 { 2516 // Invoke local_clock() only once per 32 consecutive callbacks. 2517 return unlikely(tlimit) && 2518 (!likely(count & 31) || 2519 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) && 2520 jlimit_check && time_after(jiffies, jlimit))) && 2521 local_clock() >= tlimit; 2522 } 2523 2524 /* 2525 * Invoke any RCU callbacks that have made it to the end of their grace 2526 * period. Throttle as specified by rdp->blimit. 2527 */ 2528 static void rcu_do_batch(struct rcu_data *rdp) 2529 { 2530 long bl; 2531 long count = 0; 2532 int div; 2533 bool __maybe_unused empty; 2534 unsigned long flags; 2535 unsigned long jlimit; 2536 bool jlimit_check = false; 2537 long pending; 2538 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2539 struct rcu_head *rhp; 2540 long tlimit = 0; 2541 2542 /* If no callbacks are ready, just return. */ 2543 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2544 trace_rcu_batch_start(rcu_state.name, 2545 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2546 trace_rcu_batch_end(rcu_state.name, 0, 2547 !rcu_segcblist_empty(&rdp->cblist), 2548 need_resched(), is_idle_task(current), 2549 rcu_is_callbacks_kthread(rdp)); 2550 return; 2551 } 2552 2553 /* 2554 * Extract the list of ready callbacks, disabling IRQs to prevent 2555 * races with call_rcu() from interrupt handlers. Leave the 2556 * callback counts, as rcu_barrier() needs to be conservative. 2557 * 2558 * Callbacks execution is fully ordered against preceding grace period 2559 * completion (materialized by rnp->gp_seq update) thanks to the 2560 * smp_mb__after_unlock_lock() upon node locking required for callbacks 2561 * advancing. In NOCB mode this ordering is then further relayed through 2562 * the nocb locking that protects both callbacks advancing and extraction. 2563 */ 2564 rcu_nocb_lock_irqsave(rdp, flags); 2565 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2566 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); 2567 div = READ_ONCE(rcu_divisor); 2568 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; 2569 bl = max(rdp->blimit, pending >> div); 2570 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && 2571 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) { 2572 const long npj = NSEC_PER_SEC / HZ; 2573 long rrn = READ_ONCE(rcu_resched_ns); 2574 2575 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn; 2576 tlimit = local_clock() + rrn; 2577 jlimit = jiffies + (rrn + npj + 1) / npj; 2578 jlimit_check = true; 2579 } 2580 trace_rcu_batch_start(rcu_state.name, 2581 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2582 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2583 if (rcu_rdp_is_offloaded(rdp)) 2584 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2585 2586 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); 2587 rcu_nocb_unlock_irqrestore(rdp, flags); 2588 2589 /* Invoke callbacks. */ 2590 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2591 rhp = rcu_cblist_dequeue(&rcl); 2592 2593 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2594 rcu_callback_t f; 2595 2596 count++; 2597 debug_rcu_head_unqueue(rhp); 2598 2599 rcu_lock_acquire(&rcu_callback_map); 2600 trace_rcu_invoke_callback(rcu_state.name, rhp); 2601 2602 f = rhp->func; 2603 debug_rcu_head_callback(rhp); 2604 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2605 f(rhp); 2606 2607 rcu_lock_release(&rcu_callback_map); 2608 2609 /* 2610 * Stop only if limit reached and CPU has something to do. 2611 */ 2612 if (in_serving_softirq()) { 2613 if (count >= bl && (need_resched() || !is_idle_task(current))) 2614 break; 2615 /* 2616 * Make sure we don't spend too much time here and deprive other 2617 * softirq vectors of CPU cycles. 2618 */ 2619 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) 2620 break; 2621 } else { 2622 // In rcuc/rcuoc context, so no worries about 2623 // depriving other softirq vectors of CPU cycles. 2624 local_bh_enable(); 2625 lockdep_assert_irqs_enabled(); 2626 cond_resched_tasks_rcu_qs(); 2627 lockdep_assert_irqs_enabled(); 2628 local_bh_disable(); 2629 // But rcuc kthreads can delay quiescent-state 2630 // reporting, so check time limits for them. 2631 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && 2632 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) { 2633 rdp->rcu_cpu_has_work = 1; 2634 break; 2635 } 2636 } 2637 } 2638 2639 rcu_nocb_lock_irqsave(rdp, flags); 2640 rdp->n_cbs_invoked += count; 2641 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2642 is_idle_task(current), rcu_is_callbacks_kthread(rdp)); 2643 2644 /* Update counts and requeue any remaining callbacks. */ 2645 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2646 rcu_segcblist_add_len(&rdp->cblist, -count); 2647 2648 /* Reinstate batch limit if we have worked down the excess. */ 2649 count = rcu_segcblist_n_cbs(&rdp->cblist); 2650 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2651 rdp->blimit = blimit; 2652 2653 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2654 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2655 rdp->qlen_last_fqs_check = 0; 2656 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 2657 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2658 rdp->qlen_last_fqs_check = count; 2659 2660 /* 2661 * The following usually indicates a double call_rcu(). To track 2662 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2663 */ 2664 empty = rcu_segcblist_empty(&rdp->cblist); 2665 WARN_ON_ONCE(count == 0 && !empty); 2666 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2667 count != 0 && empty); 2668 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); 2669 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); 2670 2671 rcu_nocb_unlock_irqrestore(rdp, flags); 2672 2673 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2674 } 2675 2676 /* 2677 * This function is invoked from each scheduling-clock interrupt, 2678 * and checks to see if this CPU is in a non-context-switch quiescent 2679 * state, for example, user mode or idle loop. It also schedules RCU 2680 * core processing. If the current grace period has gone on too long, 2681 * it will ask the scheduler to manufacture a context switch for the sole 2682 * purpose of providing the needed quiescent state. 2683 */ 2684 void rcu_sched_clock_irq(int user) 2685 { 2686 unsigned long j; 2687 2688 if (IS_ENABLED(CONFIG_PROVE_RCU)) { 2689 j = jiffies; 2690 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock))); 2691 __this_cpu_write(rcu_data.last_sched_clock, j); 2692 } 2693 trace_rcu_utilization(TPS("Start scheduler-tick")); 2694 lockdep_assert_irqs_disabled(); 2695 raw_cpu_inc(rcu_data.ticks_this_gp); 2696 /* The load-acquire pairs with the store-release setting to true. */ 2697 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2698 /* Idle and userspace execution already are quiescent states. */ 2699 if (!rcu_is_cpu_rrupt_from_idle() && !user) 2700 set_need_resched_current(); 2701 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2702 } 2703 rcu_flavor_sched_clock_irq(user); 2704 if (rcu_pending(user)) 2705 invoke_rcu_core(); 2706 if (user || rcu_is_cpu_rrupt_from_idle()) 2707 rcu_note_voluntary_context_switch(current); 2708 lockdep_assert_irqs_disabled(); 2709 2710 trace_rcu_utilization(TPS("End scheduler-tick")); 2711 } 2712 2713 /* 2714 * Scan the leaf rcu_node structures. For each structure on which all 2715 * CPUs have reported a quiescent state and on which there are tasks 2716 * blocking the current grace period, initiate RCU priority boosting. 2717 * Otherwise, invoke the specified function to check dyntick state for 2718 * each CPU that has not yet reported a quiescent state. 2719 */ 2720 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2721 { 2722 int cpu; 2723 unsigned long flags; 2724 struct rcu_node *rnp; 2725 2726 rcu_state.cbovld = rcu_state.cbovldnext; 2727 rcu_state.cbovldnext = false; 2728 rcu_for_each_leaf_node(rnp) { 2729 unsigned long mask = 0; 2730 unsigned long rsmask = 0; 2731 2732 cond_resched_tasks_rcu_qs(); 2733 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2734 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2735 if (rnp->qsmask == 0) { 2736 if (rcu_preempt_blocked_readers_cgp(rnp)) { 2737 /* 2738 * No point in scanning bits because they 2739 * are all zero. But we might need to 2740 * priority-boost blocked readers. 2741 */ 2742 rcu_initiate_boost(rnp, flags); 2743 /* rcu_initiate_boost() releases rnp->lock */ 2744 continue; 2745 } 2746 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2747 continue; 2748 } 2749 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2750 struct rcu_data *rdp; 2751 int ret; 2752 2753 rdp = per_cpu_ptr(&rcu_data, cpu); 2754 ret = f(rdp); 2755 if (ret > 0) { 2756 mask |= rdp->grpmask; 2757 rcu_disable_urgency_upon_qs(rdp); 2758 } 2759 if (ret < 0) 2760 rsmask |= rdp->grpmask; 2761 } 2762 if (mask != 0) { 2763 /* Idle/offline CPUs, report (releases rnp->lock). */ 2764 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2765 } else { 2766 /* Nothing to do here, so just drop the lock. */ 2767 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2768 } 2769 2770 for_each_leaf_node_cpu_mask(rnp, cpu, rsmask) 2771 resched_cpu(cpu); 2772 } 2773 } 2774 2775 /* 2776 * Force quiescent states on reluctant CPUs, and also detect which 2777 * CPUs are in dyntick-idle mode. 2778 */ 2779 void rcu_force_quiescent_state(void) 2780 { 2781 unsigned long flags; 2782 bool ret; 2783 struct rcu_node *rnp; 2784 struct rcu_node *rnp_old = NULL; 2785 2786 if (!rcu_gp_in_progress()) 2787 return; 2788 /* Funnel through hierarchy to reduce memory contention. */ 2789 rnp = raw_cpu_read(rcu_data.mynode); 2790 for (; rnp != NULL; rnp = rnp->parent) { 2791 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2792 !raw_spin_trylock(&rnp->fqslock); 2793 if (rnp_old != NULL) 2794 raw_spin_unlock(&rnp_old->fqslock); 2795 if (ret) 2796 return; 2797 rnp_old = rnp; 2798 } 2799 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2800 2801 /* Reached the root of the rcu_node tree, acquire lock. */ 2802 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2803 raw_spin_unlock(&rnp_old->fqslock); 2804 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2805 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2806 return; /* Someone beat us to it. */ 2807 } 2808 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS); 2809 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2810 rcu_gp_kthread_wake(); 2811 } 2812 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2813 2814 // Workqueue handler for an RCU reader for kernels enforcing struct RCU 2815 // grace periods. 2816 static void strict_work_handler(struct work_struct *work) 2817 { 2818 rcu_read_lock(); 2819 rcu_read_unlock(); 2820 } 2821 2822 /* Perform RCU core processing work for the current CPU. */ 2823 static __latent_entropy void rcu_core(void) 2824 { 2825 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2826 struct rcu_node *rnp = rdp->mynode; 2827 2828 if (cpu_is_offline(smp_processor_id())) 2829 return; 2830 trace_rcu_utilization(TPS("Start RCU core")); 2831 WARN_ON_ONCE(!rdp->beenonline); 2832 2833 /* Report any deferred quiescent states if preemption enabled. */ 2834 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) { 2835 rcu_preempt_deferred_qs(current); 2836 } else if (rcu_preempt_need_deferred_qs(current)) { 2837 guard(irqsave)(); 2838 set_need_resched_current(); 2839 } 2840 2841 /* Update RCU state based on any recent quiescent states. */ 2842 rcu_check_quiescent_state(rdp); 2843 2844 /* No grace period and unregistered callbacks? */ 2845 if (!rcu_gp_in_progress() && 2846 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) { 2847 guard(irqsave)(); 2848 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2849 rcu_accelerate_cbs_unlocked(rnp, rdp); 2850 } 2851 2852 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2853 2854 /* If there are callbacks ready, invoke them. */ 2855 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) && 2856 likely(READ_ONCE(rcu_scheduler_fully_active))) { 2857 rcu_do_batch(rdp); 2858 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2859 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2860 invoke_rcu_core(); 2861 } 2862 2863 /* Do any needed deferred wakeups of rcuo kthreads. */ 2864 do_nocb_deferred_wakeup(rdp); 2865 trace_rcu_utilization(TPS("End RCU core")); 2866 2867 // If strict GPs, schedule an RCU reader in a clean environment. 2868 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2869 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); 2870 } 2871 2872 static void rcu_core_si(void) 2873 { 2874 rcu_core(); 2875 } 2876 2877 static void rcu_wake_cond(struct task_struct *t, int status) 2878 { 2879 /* 2880 * If the thread is yielding, only wake it when this 2881 * is invoked from idle 2882 */ 2883 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2884 wake_up_process(t); 2885 } 2886 2887 static void invoke_rcu_core_kthread(void) 2888 { 2889 struct task_struct *t; 2890 unsigned long flags; 2891 2892 local_irq_save(flags); 2893 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2894 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2895 if (t != NULL && t != current) 2896 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2897 local_irq_restore(flags); 2898 } 2899 2900 /* 2901 * Wake up this CPU's rcuc kthread to do RCU core processing. 2902 */ 2903 static void invoke_rcu_core(void) 2904 { 2905 if (!cpu_online(smp_processor_id())) 2906 return; 2907 if (use_softirq) 2908 raise_softirq(RCU_SOFTIRQ); 2909 else 2910 invoke_rcu_core_kthread(); 2911 } 2912 2913 static void rcu_cpu_kthread_park(unsigned int cpu) 2914 { 2915 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2916 } 2917 2918 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2919 { 2920 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2921 } 2922 2923 /* 2924 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2925 * the RCU softirq used in configurations of RCU that do not support RCU 2926 * priority boosting. 2927 */ 2928 static void rcu_cpu_kthread(unsigned int cpu) 2929 { 2930 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2931 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2932 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity); 2933 int spincnt; 2934 2935 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2936 for (spincnt = 0; spincnt < 10; spincnt++) { 2937 WRITE_ONCE(*j, jiffies); 2938 local_bh_disable(); 2939 *statusp = RCU_KTHREAD_RUNNING; 2940 local_irq_disable(); 2941 work = *workp; 2942 WRITE_ONCE(*workp, 0); 2943 local_irq_enable(); 2944 if (work) 2945 rcu_core(); 2946 local_bh_enable(); 2947 if (!READ_ONCE(*workp)) { 2948 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2949 *statusp = RCU_KTHREAD_WAITING; 2950 return; 2951 } 2952 } 2953 *statusp = RCU_KTHREAD_YIELDING; 2954 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2955 schedule_timeout_idle(2); 2956 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2957 *statusp = RCU_KTHREAD_WAITING; 2958 WRITE_ONCE(*j, jiffies); 2959 } 2960 2961 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2962 .store = &rcu_data.rcu_cpu_kthread_task, 2963 .thread_should_run = rcu_cpu_kthread_should_run, 2964 .thread_fn = rcu_cpu_kthread, 2965 .thread_comm = "rcuc/%u", 2966 .setup = rcu_cpu_kthread_setup, 2967 .park = rcu_cpu_kthread_park, 2968 }; 2969 2970 /* 2971 * Spawn per-CPU RCU core processing kthreads. 2972 */ 2973 static int __init rcu_spawn_core_kthreads(void) 2974 { 2975 int cpu; 2976 2977 for_each_possible_cpu(cpu) 2978 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2979 if (use_softirq) 2980 return 0; 2981 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2982 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2983 return 0; 2984 } 2985 2986 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func) 2987 { 2988 rcu_segcblist_enqueue(&rdp->cblist, head); 2989 trace_rcu_callback(rcu_state.name, head, 2990 rcu_segcblist_n_cbs(&rdp->cblist)); 2991 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); 2992 } 2993 2994 /* 2995 * Handle any core-RCU processing required by a call_rcu() invocation. 2996 */ 2997 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2998 rcu_callback_t func, unsigned long flags) 2999 { 3000 rcutree_enqueue(rdp, head, func); 3001 /* 3002 * If called from an extended quiescent state, invoke the RCU 3003 * core in order to force a re-evaluation of RCU's idleness. 3004 */ 3005 if (!rcu_is_watching()) 3006 invoke_rcu_core(); 3007 3008 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 3009 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 3010 return; 3011 3012 /* 3013 * Force the grace period if too many callbacks or too long waiting. 3014 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 3015 * if some other CPU has recently done so. Also, don't bother 3016 * invoking rcu_force_quiescent_state() if the newly enqueued callback 3017 * is the only one waiting for a grace period to complete. 3018 */ 3019 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 3020 rdp->qlen_last_fqs_check + qhimark)) { 3021 3022 /* Are we ignoring a completed grace period? */ 3023 note_gp_changes(rdp); 3024 3025 /* Start a new grace period if one not already started. */ 3026 if (!rcu_gp_in_progress()) { 3027 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 3028 } else { 3029 /* Give the grace period a kick. */ 3030 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 3031 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && 3032 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 3033 rcu_force_quiescent_state(); 3034 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 3035 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 3036 } 3037 } 3038 } 3039 3040 /* 3041 * RCU callback function to leak a callback. 3042 */ 3043 static void rcu_leak_callback(struct rcu_head *rhp) 3044 { 3045 } 3046 3047 /* 3048 * Check and if necessary update the leaf rcu_node structure's 3049 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 3050 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 3051 * structure's ->lock. 3052 */ 3053 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 3054 { 3055 raw_lockdep_assert_held_rcu_node(rnp); 3056 if (qovld_calc <= 0) 3057 return; // Early boot and wildcard value set. 3058 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 3059 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 3060 else 3061 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 3062 } 3063 3064 /* 3065 * Check and if necessary update the leaf rcu_node structure's 3066 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 3067 * number of queued RCU callbacks. No locks need be held, but the 3068 * caller must have disabled interrupts. 3069 * 3070 * Note that this function ignores the possibility that there are a lot 3071 * of callbacks all of which have already seen the end of their respective 3072 * grace periods. This omission is due to the need for no-CBs CPUs to 3073 * be holding ->nocb_lock to do this check, which is too heavy for a 3074 * common-case operation. 3075 */ 3076 static void check_cb_ovld(struct rcu_data *rdp) 3077 { 3078 struct rcu_node *const rnp = rdp->mynode; 3079 3080 if (qovld_calc <= 0 || 3081 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 3082 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 3083 return; // Early boot wildcard value or already set correctly. 3084 raw_spin_lock_rcu_node(rnp); 3085 check_cb_ovld_locked(rdp, rnp); 3086 raw_spin_unlock_rcu_node(rnp); 3087 } 3088 3089 static void 3090 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) 3091 { 3092 static atomic_t doublefrees; 3093 unsigned long flags; 3094 bool lazy; 3095 struct rcu_data *rdp; 3096 3097 /* Misaligned rcu_head! */ 3098 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 3099 3100 /* Avoid NULL dereference if callback is NULL. */ 3101 if (WARN_ON_ONCE(!func)) 3102 return; 3103 3104 if (debug_rcu_head_queue(head)) { 3105 /* 3106 * Probable double call_rcu(), so leak the callback. 3107 * Use rcu:rcu_callback trace event to find the previous 3108 * time callback was passed to call_rcu(). 3109 */ 3110 if (atomic_inc_return(&doublefrees) < 4) { 3111 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); 3112 mem_dump_obj(head); 3113 } 3114 WRITE_ONCE(head->func, rcu_leak_callback); 3115 return; 3116 } 3117 head->func = func; 3118 head->next = NULL; 3119 kasan_record_aux_stack(head); 3120 3121 local_irq_save(flags); 3122 rdp = this_cpu_ptr(&rcu_data); 3123 RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!"); 3124 3125 lazy = lazy_in && !rcu_async_should_hurry(); 3126 3127 /* Add the callback to our list. */ 3128 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 3129 // This can trigger due to call_rcu() from offline CPU: 3130 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 3131 WARN_ON_ONCE(!rcu_is_watching()); 3132 // Very early boot, before rcu_init(). Initialize if needed 3133 // and then drop through to queue the callback. 3134 if (rcu_segcblist_empty(&rdp->cblist)) 3135 rcu_segcblist_init(&rdp->cblist); 3136 } 3137 3138 check_cb_ovld(rdp); 3139 3140 if (unlikely(rcu_rdp_is_offloaded(rdp))) 3141 call_rcu_nocb(rdp, head, func, flags, lazy); 3142 else 3143 call_rcu_core(rdp, head, func, flags); 3144 local_irq_restore(flags); 3145 } 3146 3147 #ifdef CONFIG_RCU_LAZY 3148 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF); 3149 module_param(enable_rcu_lazy, bool, 0444); 3150 3151 /** 3152 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and 3153 * flush all lazy callbacks (including the new one) to the main ->cblist while 3154 * doing so. 3155 * 3156 * @head: structure to be used for queueing the RCU updates. 3157 * @func: actual callback function to be invoked after the grace period 3158 * 3159 * The callback function will be invoked some time after a full grace 3160 * period elapses, in other words after all pre-existing RCU read-side 3161 * critical sections have completed. 3162 * 3163 * Use this API instead of call_rcu() if you don't want the callback to be 3164 * delayed for very long periods of time, which can happen on systems without 3165 * memory pressure and on systems which are lightly loaded or mostly idle. 3166 * This function will cause callbacks to be invoked sooner than later at the 3167 * expense of extra power. Other than that, this function is identical to, and 3168 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory 3169 * ordering and other functionality. 3170 */ 3171 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) 3172 { 3173 __call_rcu_common(head, func, false); 3174 } 3175 EXPORT_SYMBOL_GPL(call_rcu_hurry); 3176 #else 3177 #define enable_rcu_lazy false 3178 #endif 3179 3180 /** 3181 * call_rcu() - Queue an RCU callback for invocation after a grace period. 3182 * By default the callbacks are 'lazy' and are kept hidden from the main 3183 * ->cblist to prevent starting of grace periods too soon. 3184 * If you desire grace periods to start very soon, use call_rcu_hurry(). 3185 * 3186 * @head: structure to be used for queueing the RCU updates. 3187 * @func: actual callback function to be invoked after the grace period 3188 * 3189 * The callback function will be invoked some time after a full grace 3190 * period elapses, in other words after all pre-existing RCU read-side 3191 * critical sections have completed. However, the callback function 3192 * might well execute concurrently with RCU read-side critical sections 3193 * that started after call_rcu() was invoked. 3194 * 3195 * It is perfectly legal to repost an RCU callback, potentially with 3196 * a different callback function, from within its callback function. 3197 * The specified function will be invoked after another full grace period 3198 * has elapsed. This use case is similar in form to the common practice 3199 * of reposting a timer from within its own handler. 3200 * 3201 * RCU read-side critical sections are delimited by rcu_read_lock() 3202 * and rcu_read_unlock(), and may be nested. In addition, but only in 3203 * v5.0 and later, regions of code across which interrupts, preemption, 3204 * or softirqs have been disabled also serve as RCU read-side critical 3205 * sections. This includes hardware interrupt handlers, softirq handlers, 3206 * and NMI handlers. 3207 * 3208 * Note that all CPUs must agree that the grace period extended beyond 3209 * all pre-existing RCU read-side critical section. On systems with more 3210 * than one CPU, this means that when "func()" is invoked, each CPU is 3211 * guaranteed to have executed a full memory barrier since the end of its 3212 * last RCU read-side critical section whose beginning preceded the call 3213 * to call_rcu(). It also means that each CPU executing an RCU read-side 3214 * critical section that continues beyond the start of "func()" must have 3215 * executed a memory barrier after the call_rcu() but before the beginning 3216 * of that RCU read-side critical section. Note that these guarantees 3217 * include CPUs that are offline, idle, or executing in user mode, as 3218 * well as CPUs that are executing in the kernel. 3219 * 3220 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 3221 * resulting RCU callback function "func()", then both CPU A and CPU B are 3222 * guaranteed to execute a full memory barrier during the time interval 3223 * between the call to call_rcu() and the invocation of "func()" -- even 3224 * if CPU A and CPU B are the same CPU (but again only if the system has 3225 * more than one CPU). 3226 * 3227 * Implementation of these memory-ordering guarantees is described here: 3228 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 3229 * 3230 * Specific to call_rcu() (as opposed to the other call_rcu*() functions), 3231 * in kernels built with CONFIG_RCU_LAZY=y, call_rcu() might delay for many 3232 * seconds before starting the grace period needed by the corresponding 3233 * callback. This delay can significantly improve energy-efficiency 3234 * on low-utilization battery-powered devices. To avoid this delay, 3235 * in latency-sensitive kernel code, use call_rcu_hurry(). 3236 */ 3237 void call_rcu(struct rcu_head *head, rcu_callback_t func) 3238 { 3239 __call_rcu_common(head, func, enable_rcu_lazy); 3240 } 3241 EXPORT_SYMBOL_GPL(call_rcu); 3242 3243 /* 3244 * During early boot, any blocking grace-period wait automatically 3245 * implies a grace period. 3246 * 3247 * Later on, this could in theory be the case for kernels built with 3248 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this 3249 * is not a common case. Furthermore, this optimization would cause 3250 * the rcu_gp_oldstate structure to expand by 50%, so this potential 3251 * grace-period optimization is ignored once the scheduler is running. 3252 */ 3253 static int rcu_blocking_is_gp(void) 3254 { 3255 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) { 3256 might_sleep(); 3257 return false; 3258 } 3259 return true; 3260 } 3261 3262 /* 3263 * Helper function for the synchronize_rcu() API. 3264 */ 3265 static void synchronize_rcu_normal(void) 3266 { 3267 struct rcu_synchronize rs; 3268 3269 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request")); 3270 3271 if (READ_ONCE(rcu_normal_wake_from_gp) < 1) { 3272 wait_rcu_gp(call_rcu_hurry); 3273 goto trace_complete_out; 3274 } 3275 3276 init_rcu_head_on_stack(&rs.head); 3277 init_completion(&rs.completion); 3278 3279 /* 3280 * This code might be preempted, therefore take a GP 3281 * snapshot before adding a request. 3282 */ 3283 if (IS_ENABLED(CONFIG_PROVE_RCU)) 3284 get_state_synchronize_rcu_full(&rs.oldstate); 3285 3286 rcu_sr_normal_add_req(&rs); 3287 3288 /* Kick a GP and start waiting. */ 3289 (void) start_poll_synchronize_rcu(); 3290 3291 /* Now we can wait. */ 3292 wait_for_completion(&rs.completion); 3293 destroy_rcu_head_on_stack(&rs.head); 3294 3295 trace_complete_out: 3296 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete")); 3297 } 3298 3299 /** 3300 * synchronize_rcu - wait until a grace period has elapsed. 3301 * 3302 * Control will return to the caller some time after a full grace 3303 * period has elapsed, in other words after all currently executing RCU 3304 * read-side critical sections have completed. Note, however, that 3305 * upon return from synchronize_rcu(), the caller might well be executing 3306 * concurrently with new RCU read-side critical sections that began while 3307 * synchronize_rcu() was waiting. 3308 * 3309 * RCU read-side critical sections are delimited by rcu_read_lock() 3310 * and rcu_read_unlock(), and may be nested. In addition, but only in 3311 * v5.0 and later, regions of code across which interrupts, preemption, 3312 * or softirqs have been disabled also serve as RCU read-side critical 3313 * sections. This includes hardware interrupt handlers, softirq handlers, 3314 * and NMI handlers. 3315 * 3316 * Note that this guarantee implies further memory-ordering guarantees. 3317 * On systems with more than one CPU, when synchronize_rcu() returns, 3318 * each CPU is guaranteed to have executed a full memory barrier since 3319 * the end of its last RCU read-side critical section whose beginning 3320 * preceded the call to synchronize_rcu(). In addition, each CPU having 3321 * an RCU read-side critical section that extends beyond the return from 3322 * synchronize_rcu() is guaranteed to have executed a full memory barrier 3323 * after the beginning of synchronize_rcu() and before the beginning of 3324 * that RCU read-side critical section. Note that these guarantees include 3325 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3326 * that are executing in the kernel. 3327 * 3328 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 3329 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3330 * to have executed a full memory barrier during the execution of 3331 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 3332 * again only if the system has more than one CPU). 3333 * 3334 * Implementation of these memory-ordering guarantees is described here: 3335 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 3336 */ 3337 void synchronize_rcu(void) 3338 { 3339 unsigned long flags; 3340 struct rcu_node *rnp; 3341 3342 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3343 lock_is_held(&rcu_lock_map) || 3344 lock_is_held(&rcu_sched_lock_map), 3345 "Illegal synchronize_rcu() in RCU read-side critical section"); 3346 if (!rcu_blocking_is_gp()) { 3347 if (rcu_gp_is_expedited()) 3348 synchronize_rcu_expedited(); 3349 else 3350 synchronize_rcu_normal(); 3351 return; 3352 } 3353 3354 // Context allows vacuous grace periods. 3355 // Note well that this code runs with !PREEMPT && !SMP. 3356 // In addition, all code that advances grace periods runs at 3357 // process level. Therefore, this normal GP overlaps with other 3358 // normal GPs only by being fully nested within them, which allows 3359 // reuse of ->gp_seq_polled_snap. 3360 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap); 3361 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap); 3362 3363 // Update the normal grace-period counters to record 3364 // this grace period, but only those used by the boot CPU. 3365 // The rcu_scheduler_starting() will take care of the rest of 3366 // these counters. 3367 local_irq_save(flags); 3368 WARN_ON_ONCE(num_online_cpus() > 1); 3369 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT); 3370 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) 3371 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 3372 local_irq_restore(flags); 3373 } 3374 EXPORT_SYMBOL_GPL(synchronize_rcu); 3375 3376 /** 3377 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie 3378 * @rgosp: Place to put state cookie 3379 * 3380 * Stores into @rgosp a value that will always be treated by functions 3381 * like poll_state_synchronize_rcu_full() as a cookie whose grace period 3382 * has already completed. 3383 */ 3384 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3385 { 3386 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; 3387 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED; 3388 } 3389 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full); 3390 3391 /** 3392 * get_state_synchronize_rcu - Snapshot current RCU state 3393 * 3394 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3395 * or poll_state_synchronize_rcu() to determine whether or not a full 3396 * grace period has elapsed in the meantime. 3397 */ 3398 unsigned long get_state_synchronize_rcu(void) 3399 { 3400 /* 3401 * Any prior manipulation of RCU-protected data must happen 3402 * before the load from ->gp_seq. 3403 */ 3404 smp_mb(); /* ^^^ */ 3405 return rcu_seq_snap(&rcu_state.gp_seq_polled); 3406 } 3407 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3408 3409 /** 3410 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited 3411 * @rgosp: location to place combined normal/expedited grace-period state 3412 * 3413 * Places the normal and expedited grace-period states in @rgosp. This 3414 * state value can be passed to a later call to cond_synchronize_rcu_full() 3415 * or poll_state_synchronize_rcu_full() to determine whether or not a 3416 * grace period (whether normal or expedited) has elapsed in the meantime. 3417 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned 3418 * long, but is guaranteed to see all grace periods. In contrast, the 3419 * combined state occupies less memory, but can sometimes fail to take 3420 * grace periods into account. 3421 * 3422 * This does not guarantee that the needed grace period will actually 3423 * start. 3424 */ 3425 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3426 { 3427 /* 3428 * Any prior manipulation of RCU-protected data must happen 3429 * before the loads from ->gp_seq and ->expedited_sequence. 3430 */ 3431 smp_mb(); /* ^^^ */ 3432 3433 // Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use 3434 // in poll_state_synchronize_rcu_full() notwithstanding. Use of 3435 // the latter here would result in too-short grace periods due to 3436 // interactions with newly onlined CPUs. 3437 rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq); 3438 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); 3439 } 3440 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full); 3441 3442 /* 3443 * Helper function for start_poll_synchronize_rcu() and 3444 * start_poll_synchronize_rcu_full(). 3445 */ 3446 static void start_poll_synchronize_rcu_common(void) 3447 { 3448 unsigned long flags; 3449 bool needwake; 3450 struct rcu_data *rdp; 3451 struct rcu_node *rnp; 3452 3453 local_irq_save(flags); 3454 rdp = this_cpu_ptr(&rcu_data); 3455 rnp = rdp->mynode; 3456 raw_spin_lock_rcu_node(rnp); // irqs already disabled. 3457 // Note it is possible for a grace period to have elapsed between 3458 // the above call to get_state_synchronize_rcu() and the below call 3459 // to rcu_seq_snap. This is OK, the worst that happens is that we 3460 // get a grace period that no one needed. These accesses are ordered 3461 // by smp_mb(), and we are accessing them in the opposite order 3462 // from which they are updated at grace-period start, as required. 3463 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); 3464 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3465 if (needwake) 3466 rcu_gp_kthread_wake(); 3467 } 3468 3469 /** 3470 * start_poll_synchronize_rcu - Snapshot and start RCU grace period 3471 * 3472 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3473 * or poll_state_synchronize_rcu() to determine whether or not a full 3474 * grace period has elapsed in the meantime. If the needed grace period 3475 * is not already slated to start, notifies RCU core of the need for that 3476 * grace period. 3477 */ 3478 unsigned long start_poll_synchronize_rcu(void) 3479 { 3480 unsigned long gp_seq = get_state_synchronize_rcu(); 3481 3482 start_poll_synchronize_rcu_common(); 3483 return gp_seq; 3484 } 3485 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); 3486 3487 /** 3488 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period 3489 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 3490 * 3491 * Places the normal and expedited grace-period states in *@rgos. This 3492 * state value can be passed to a later call to cond_synchronize_rcu_full() 3493 * or poll_state_synchronize_rcu_full() to determine whether or not a 3494 * grace period (whether normal or expedited) has elapsed in the meantime. 3495 * If the needed grace period is not already slated to start, notifies 3496 * RCU core of the need for that grace period. 3497 */ 3498 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3499 { 3500 get_state_synchronize_rcu_full(rgosp); 3501 3502 start_poll_synchronize_rcu_common(); 3503 } 3504 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full); 3505 3506 /** 3507 * poll_state_synchronize_rcu - Has the specified RCU grace period completed? 3508 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu() 3509 * 3510 * If a full RCU grace period has elapsed since the earlier call from 3511 * which @oldstate was obtained, return @true, otherwise return @false. 3512 * If @false is returned, it is the caller's responsibility to invoke this 3513 * function later on until it does return @true. Alternatively, the caller 3514 * can explicitly wait for a grace period, for example, by passing @oldstate 3515 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited() 3516 * on the one hand or by directly invoking either synchronize_rcu() or 3517 * synchronize_rcu_expedited() on the other. 3518 * 3519 * Yes, this function does not take counter wrap into account. 3520 * But counter wrap is harmless. If the counter wraps, we have waited for 3521 * more than a billion grace periods (and way more on a 64-bit system!). 3522 * Those needing to keep old state values for very long time periods 3523 * (many hours even on 32-bit systems) should check them occasionally and 3524 * either refresh them or set a flag indicating that the grace period has 3525 * completed. Alternatively, they can use get_completed_synchronize_rcu() 3526 * to get a guaranteed-completed grace-period state. 3527 * 3528 * In addition, because oldstate compresses the grace-period state for 3529 * both normal and expedited grace periods into a single unsigned long, 3530 * it can miss a grace period when synchronize_rcu() runs concurrently 3531 * with synchronize_rcu_expedited(). If this is unacceptable, please 3532 * instead use the _full() variant of these polling APIs. 3533 * 3534 * This function provides the same memory-ordering guarantees that 3535 * would be provided by a synchronize_rcu() that was invoked at the call 3536 * to the function that provided @oldstate, and that returned at the end 3537 * of this function. 3538 */ 3539 bool poll_state_synchronize_rcu(unsigned long oldstate) 3540 { 3541 if (oldstate == RCU_GET_STATE_COMPLETED || 3542 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) { 3543 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3544 return true; 3545 } 3546 return false; 3547 } 3548 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); 3549 3550 /** 3551 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed? 3552 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 3553 * 3554 * If a full RCU grace period has elapsed since the earlier call from 3555 * which *rgosp was obtained, return @true, otherwise return @false. 3556 * If @false is returned, it is the caller's responsibility to invoke this 3557 * function later on until it does return @true. Alternatively, the caller 3558 * can explicitly wait for a grace period, for example, by passing @rgosp 3559 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu(). 3560 * 3561 * Yes, this function does not take counter wrap into account. 3562 * But counter wrap is harmless. If the counter wraps, we have waited 3563 * for more than a billion grace periods (and way more on a 64-bit 3564 * system!). Those needing to keep rcu_gp_oldstate values for very 3565 * long time periods (many hours even on 32-bit systems) should check 3566 * them occasionally and either refresh them or set a flag indicating 3567 * that the grace period has completed. Alternatively, they can use 3568 * get_completed_synchronize_rcu_full() to get a guaranteed-completed 3569 * grace-period state. 3570 * 3571 * This function provides the same memory-ordering guarantees that would 3572 * be provided by a synchronize_rcu() that was invoked at the call to 3573 * the function that provided @rgosp, and that returned at the end of this 3574 * function. And this guarantee requires that the root rcu_node structure's 3575 * ->gp_seq field be checked instead of that of the rcu_state structure. 3576 * The problem is that the just-ending grace-period's callbacks can be 3577 * invoked between the time that the root rcu_node structure's ->gp_seq 3578 * field is updated and the time that the rcu_state structure's ->gp_seq 3579 * field is updated. Therefore, if a single synchronize_rcu() is to 3580 * cause a subsequent poll_state_synchronize_rcu_full() to return @true, 3581 * then the root rcu_node structure is the one that needs to be polled. 3582 */ 3583 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3584 { 3585 struct rcu_node *rnp = rcu_get_root(); 3586 3587 smp_mb(); // Order against root rcu_node structure grace-period cleanup. 3588 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED || 3589 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) || 3590 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED || 3591 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { 3592 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3593 return true; 3594 } 3595 return false; 3596 } 3597 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full); 3598 3599 /** 3600 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3601 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 3602 * 3603 * If a full RCU grace period has elapsed since the earlier call to 3604 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return. 3605 * Otherwise, invoke synchronize_rcu() to wait for a full grace period. 3606 * 3607 * Yes, this function does not take counter wrap into account. 3608 * But counter wrap is harmless. If the counter wraps, we have waited for 3609 * more than 2 billion grace periods (and way more on a 64-bit system!), 3610 * so waiting for a couple of additional grace periods should be just fine. 3611 * 3612 * This function provides the same memory-ordering guarantees that 3613 * would be provided by a synchronize_rcu() that was invoked at the call 3614 * to the function that provided @oldstate and that returned at the end 3615 * of this function. 3616 */ 3617 void cond_synchronize_rcu(unsigned long oldstate) 3618 { 3619 if (!poll_state_synchronize_rcu(oldstate)) 3620 synchronize_rcu(); 3621 } 3622 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3623 3624 /** 3625 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period 3626 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() 3627 * 3628 * If a full RCU grace period has elapsed since the call to 3629 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 3630 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was 3631 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait 3632 * for a full grace period. 3633 * 3634 * Yes, this function does not take counter wrap into account. 3635 * But counter wrap is harmless. If the counter wraps, we have waited for 3636 * more than 2 billion grace periods (and way more on a 64-bit system!), 3637 * so waiting for a couple of additional grace periods should be just fine. 3638 * 3639 * This function provides the same memory-ordering guarantees that 3640 * would be provided by a synchronize_rcu() that was invoked at the call 3641 * to the function that provided @rgosp and that returned at the end of 3642 * this function. 3643 */ 3644 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 3645 { 3646 if (!poll_state_synchronize_rcu_full(rgosp)) 3647 synchronize_rcu(); 3648 } 3649 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full); 3650 3651 /* 3652 * Check to see if there is any immediate RCU-related work to be done by 3653 * the current CPU, returning 1 if so and zero otherwise. The checks are 3654 * in order of increasing expense: checks that can be carried out against 3655 * CPU-local state are performed first. However, we must check for CPU 3656 * stalls first, else we might not get a chance. 3657 */ 3658 static int rcu_pending(int user) 3659 { 3660 bool gp_in_progress; 3661 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 3662 struct rcu_node *rnp = rdp->mynode; 3663 3664 lockdep_assert_irqs_disabled(); 3665 3666 /* Check for CPU stalls, if enabled. */ 3667 check_cpu_stall(rdp); 3668 3669 /* Does this CPU need a deferred NOCB wakeup? */ 3670 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) 3671 return 1; 3672 3673 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 3674 gp_in_progress = rcu_gp_in_progress(); 3675 if ((user || rcu_is_cpu_rrupt_from_idle() || 3676 (gp_in_progress && 3677 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + 3678 nohz_full_patience_delay_jiffies))) && 3679 rcu_nohz_full_cpu()) 3680 return 0; 3681 3682 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3683 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 3684 return 1; 3685 3686 /* Does this CPU have callbacks ready to invoke? */ 3687 if (!rcu_rdp_is_offloaded(rdp) && 3688 rcu_segcblist_ready_cbs(&rdp->cblist)) 3689 return 1; 3690 3691 /* Has RCU gone idle with this CPU needing another grace period? */ 3692 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 3693 !rcu_rdp_is_offloaded(rdp) && 3694 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3695 return 1; 3696 3697 /* Have RCU grace period completed or started? */ 3698 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 3699 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3700 return 1; 3701 3702 /* nothing to do */ 3703 return 0; 3704 } 3705 3706 /* 3707 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3708 * the compiler is expected to optimize this away. 3709 */ 3710 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 3711 { 3712 trace_rcu_barrier(rcu_state.name, s, cpu, 3713 atomic_read(&rcu_state.barrier_cpu_count), done); 3714 } 3715 3716 /* 3717 * RCU callback function for rcu_barrier(). If we are last, wake 3718 * up the task executing rcu_barrier(). 3719 * 3720 * Note that the value of rcu_state.barrier_sequence must be captured 3721 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 3722 * other CPUs might count the value down to zero before this CPU gets 3723 * around to invoking rcu_barrier_trace(), which might result in bogus 3724 * data from the next instance of rcu_barrier(). 3725 */ 3726 static void rcu_barrier_callback(struct rcu_head *rhp) 3727 { 3728 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 3729 3730 rhp->next = rhp; // Mark the callback as having been invoked. 3731 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 3732 rcu_barrier_trace(TPS("LastCB"), -1, s); 3733 complete(&rcu_state.barrier_completion); 3734 } else { 3735 rcu_barrier_trace(TPS("CB"), -1, s); 3736 } 3737 } 3738 3739 /* 3740 * If needed, entrain an rcu_barrier() callback on rdp->cblist. 3741 */ 3742 static void rcu_barrier_entrain(struct rcu_data *rdp) 3743 { 3744 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence); 3745 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); 3746 bool wake_nocb = false; 3747 bool was_alldone = false; 3748 3749 lockdep_assert_held(&rcu_state.barrier_lock); 3750 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq)) 3751 return; 3752 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 3753 rdp->barrier_head.func = rcu_barrier_callback; 3754 debug_rcu_head_queue(&rdp->barrier_head); 3755 rcu_nocb_lock(rdp); 3756 /* 3757 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular 3758 * queue. This way we don't wait for bypass timer that can reach seconds 3759 * if it's fully lazy. 3760 */ 3761 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); 3762 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); 3763 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); 3764 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 3765 atomic_inc(&rcu_state.barrier_cpu_count); 3766 } else { 3767 debug_rcu_head_unqueue(&rdp->barrier_head); 3768 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); 3769 } 3770 rcu_nocb_unlock(rdp); 3771 if (wake_nocb) 3772 wake_nocb_gp(rdp, false); 3773 smp_store_release(&rdp->barrier_seq_snap, gseq); 3774 } 3775 3776 /* 3777 * Called with preemption disabled, and from cross-cpu IRQ context. 3778 */ 3779 static void rcu_barrier_handler(void *cpu_in) 3780 { 3781 uintptr_t cpu = (uintptr_t)cpu_in; 3782 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3783 3784 lockdep_assert_irqs_disabled(); 3785 WARN_ON_ONCE(cpu != rdp->cpu); 3786 WARN_ON_ONCE(cpu != smp_processor_id()); 3787 raw_spin_lock(&rcu_state.barrier_lock); 3788 rcu_barrier_entrain(rdp); 3789 raw_spin_unlock(&rcu_state.barrier_lock); 3790 } 3791 3792 /** 3793 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 3794 * 3795 * Note that this primitive does not necessarily wait for an RCU grace period 3796 * to complete. For example, if there are no RCU callbacks queued anywhere 3797 * in the system, then rcu_barrier() is within its rights to return 3798 * immediately, without waiting for anything, much less an RCU grace period. 3799 * In fact, rcu_barrier() will normally not result in any RCU grace periods 3800 * beyond those that were already destined to be executed. 3801 * 3802 * In kernels built with CONFIG_RCU_LAZY=y, this function also hurries all 3803 * pending lazy RCU callbacks. 3804 */ 3805 void rcu_barrier(void) 3806 { 3807 uintptr_t cpu; 3808 unsigned long flags; 3809 unsigned long gseq; 3810 struct rcu_data *rdp; 3811 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3812 3813 rcu_barrier_trace(TPS("Begin"), -1, s); 3814 3815 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3816 mutex_lock(&rcu_state.barrier_mutex); 3817 3818 /* Did someone else do our work for us? */ 3819 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3820 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); 3821 smp_mb(); /* caller's subsequent code after above check. */ 3822 mutex_unlock(&rcu_state.barrier_mutex); 3823 return; 3824 } 3825 3826 /* Mark the start of the barrier operation. */ 3827 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 3828 rcu_seq_start(&rcu_state.barrier_sequence); 3829 gseq = rcu_state.barrier_sequence; 3830 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 3831 3832 /* 3833 * Initialize the count to two rather than to zero in order 3834 * to avoid a too-soon return to zero in case of an immediate 3835 * invocation of the just-enqueued callback (or preemption of 3836 * this task). Exclude CPU-hotplug operations to ensure that no 3837 * offline non-offloaded CPU has callbacks queued. 3838 */ 3839 init_completion(&rcu_state.barrier_completion); 3840 atomic_set(&rcu_state.barrier_cpu_count, 2); 3841 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3842 3843 /* 3844 * Force each CPU with callbacks to register a new callback. 3845 * When that callback is invoked, we will know that all of the 3846 * corresponding CPU's preceding callbacks have been invoked. 3847 */ 3848 for_each_possible_cpu(cpu) { 3849 rdp = per_cpu_ptr(&rcu_data, cpu); 3850 retry: 3851 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) 3852 continue; 3853 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 3854 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { 3855 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 3856 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3857 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence); 3858 continue; 3859 } 3860 if (!rcu_rdp_cpu_online(rdp)) { 3861 rcu_barrier_entrain(rdp); 3862 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 3863 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3864 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence); 3865 continue; 3866 } 3867 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 3868 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) { 3869 schedule_timeout_uninterruptible(1); 3870 goto retry; 3871 } 3872 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 3873 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence); 3874 } 3875 3876 /* 3877 * Now that we have an rcu_barrier_callback() callback on each 3878 * CPU, and thus each counted, remove the initial count. 3879 */ 3880 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 3881 complete(&rcu_state.barrier_completion); 3882 3883 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3884 wait_for_completion(&rcu_state.barrier_completion); 3885 3886 /* Mark the end of the barrier operation. */ 3887 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 3888 rcu_seq_end(&rcu_state.barrier_sequence); 3889 gseq = rcu_state.barrier_sequence; 3890 for_each_possible_cpu(cpu) { 3891 rdp = per_cpu_ptr(&rcu_data, cpu); 3892 3893 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 3894 } 3895 3896 /* Other rcu_barrier() invocations can now safely proceed. */ 3897 mutex_unlock(&rcu_state.barrier_mutex); 3898 } 3899 EXPORT_SYMBOL_GPL(rcu_barrier); 3900 3901 static unsigned long rcu_barrier_last_throttle; 3902 3903 /** 3904 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second 3905 * 3906 * This can be thought of as guard rails around rcu_barrier() that 3907 * permits unrestricted userspace use, at least assuming the hardware's 3908 * try_cmpxchg() is robust. There will be at most one call per second to 3909 * rcu_barrier() system-wide from use of this function, which means that 3910 * callers might needlessly wait a second or three. 3911 * 3912 * This is intended for use by test suites to avoid OOM by flushing RCU 3913 * callbacks from the previous test before starting the next. See the 3914 * rcutree.do_rcu_barrier module parameter for more information. 3915 * 3916 * Why not simply make rcu_barrier() more scalable? That might be 3917 * the eventual endpoint, but let's keep it simple for the time being. 3918 * Note that the module parameter infrastructure serializes calls to a 3919 * given .set() function, but should concurrent .set() invocation ever be 3920 * possible, we are ready! 3921 */ 3922 static void rcu_barrier_throttled(void) 3923 { 3924 unsigned long j = jiffies; 3925 unsigned long old = READ_ONCE(rcu_barrier_last_throttle); 3926 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3927 3928 while (time_in_range(j, old, old + HZ / 16) || 3929 !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) { 3930 schedule_timeout_idle(HZ / 16); 3931 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3932 smp_mb(); /* caller's subsequent code after above check. */ 3933 return; 3934 } 3935 j = jiffies; 3936 old = READ_ONCE(rcu_barrier_last_throttle); 3937 } 3938 rcu_barrier(); 3939 } 3940 3941 /* 3942 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier 3943 * request arrives. We insist on a true value to allow for possible 3944 * future expansion. 3945 */ 3946 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp) 3947 { 3948 bool b; 3949 int ret; 3950 3951 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) 3952 return -EAGAIN; 3953 ret = kstrtobool(val, &b); 3954 if (!ret && b) { 3955 atomic_inc((atomic_t *)kp->arg); 3956 rcu_barrier_throttled(); 3957 atomic_dec((atomic_t *)kp->arg); 3958 } 3959 return ret; 3960 } 3961 3962 /* 3963 * Output the number of outstanding rcutree.do_rcu_barrier requests. 3964 */ 3965 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp) 3966 { 3967 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg)); 3968 } 3969 3970 static const struct kernel_param_ops do_rcu_barrier_ops = { 3971 .set = param_set_do_rcu_barrier, 3972 .get = param_get_do_rcu_barrier, 3973 }; 3974 static atomic_t do_rcu_barrier; 3975 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644); 3976 3977 /* 3978 * Compute the mask of online CPUs for the specified rcu_node structure. 3979 * This will not be stable unless the rcu_node structure's ->lock is 3980 * held, but the bit corresponding to the current CPU will be stable 3981 * in most contexts. 3982 */ 3983 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 3984 { 3985 return READ_ONCE(rnp->qsmaskinitnext); 3986 } 3987 3988 /* 3989 * Is the CPU corresponding to the specified rcu_data structure online 3990 * from RCU's perspective? This perspective is given by that structure's 3991 * ->qsmaskinitnext field rather than by the global cpu_online_mask. 3992 */ 3993 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) 3994 { 3995 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); 3996 } 3997 3998 bool rcu_cpu_online(int cpu) 3999 { 4000 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4001 4002 return rcu_rdp_cpu_online(rdp); 4003 } 4004 4005 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 4006 4007 /* 4008 * Is the current CPU online as far as RCU is concerned? 4009 * 4010 * Disable preemption to avoid false positives that could otherwise 4011 * happen due to the current CPU number being sampled, this task being 4012 * preempted, its old CPU being taken offline, resuming on some other CPU, 4013 * then determining that its old CPU is now offline. 4014 * 4015 * Disable checking if in an NMI handler because we cannot safely 4016 * report errors from NMI handlers anyway. In addition, it is OK to use 4017 * RCU on an offline processor during initial boot, hence the check for 4018 * rcu_scheduler_fully_active. 4019 */ 4020 bool rcu_lockdep_current_cpu_online(void) 4021 { 4022 struct rcu_data *rdp; 4023 bool ret = false; 4024 4025 if (in_nmi() || !rcu_scheduler_fully_active) 4026 return true; 4027 preempt_disable_notrace(); 4028 rdp = this_cpu_ptr(&rcu_data); 4029 /* 4030 * Strictly, we care here about the case where the current CPU is 4031 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask 4032 * not being up to date. So arch_spin_is_locked() might have a 4033 * false positive if it's held by some *other* CPU, but that's 4034 * OK because that just means a false *negative* on the warning. 4035 */ 4036 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) 4037 ret = true; 4038 preempt_enable_notrace(); 4039 return ret; 4040 } 4041 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 4042 4043 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 4044 4045 // Has rcu_init() been invoked? This is used (for example) to determine 4046 // whether spinlocks may be acquired safely. 4047 static bool rcu_init_invoked(void) 4048 { 4049 return !!READ_ONCE(rcu_state.n_online_cpus); 4050 } 4051 4052 /* 4053 * All CPUs for the specified rcu_node structure have gone offline, 4054 * and all tasks that were preempted within an RCU read-side critical 4055 * section while running on one of those CPUs have since exited their RCU 4056 * read-side critical section. Some other CPU is reporting this fact with 4057 * the specified rcu_node structure's ->lock held and interrupts disabled. 4058 * This function therefore goes up the tree of rcu_node structures, 4059 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 4060 * the leaf rcu_node structure's ->qsmaskinit field has already been 4061 * updated. 4062 * 4063 * This function does check that the specified rcu_node structure has 4064 * all CPUs offline and no blocked tasks, so it is OK to invoke it 4065 * prematurely. That said, invoking it after the fact will cost you 4066 * a needless lock acquisition. So once it has done its work, don't 4067 * invoke it again. 4068 */ 4069 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 4070 { 4071 long mask; 4072 struct rcu_node *rnp = rnp_leaf; 4073 4074 raw_lockdep_assert_held_rcu_node(rnp_leaf); 4075 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 4076 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 4077 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 4078 return; 4079 for (;;) { 4080 mask = rnp->grpmask; 4081 rnp = rnp->parent; 4082 if (!rnp) 4083 break; 4084 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4085 rnp->qsmaskinit &= ~mask; 4086 /* Between grace periods, so better already be zero! */ 4087 WARN_ON_ONCE(rnp->qsmask); 4088 if (rnp->qsmaskinit) { 4089 raw_spin_unlock_rcu_node(rnp); 4090 /* irqs remain disabled. */ 4091 return; 4092 } 4093 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4094 } 4095 } 4096 4097 /* 4098 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 4099 * first CPU in a given leaf rcu_node structure coming online. The caller 4100 * must hold the corresponding leaf rcu_node ->lock with interrupts 4101 * disabled. 4102 */ 4103 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 4104 { 4105 long mask; 4106 long oldmask; 4107 struct rcu_node *rnp = rnp_leaf; 4108 4109 raw_lockdep_assert_held_rcu_node(rnp_leaf); 4110 WARN_ON_ONCE(rnp->wait_blkd_tasks); 4111 for (;;) { 4112 mask = rnp->grpmask; 4113 rnp = rnp->parent; 4114 if (rnp == NULL) 4115 return; 4116 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4117 oldmask = rnp->qsmaskinit; 4118 rnp->qsmaskinit |= mask; 4119 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 4120 if (oldmask) 4121 return; 4122 } 4123 } 4124 4125 /* 4126 * Do boot-time initialization of a CPU's per-CPU RCU data. 4127 */ 4128 static void __init 4129 rcu_boot_init_percpu_data(int cpu) 4130 { 4131 struct context_tracking *ct = this_cpu_ptr(&context_tracking); 4132 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4133 4134 /* Set up local state, ensuring consistent view of global state. */ 4135 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 4136 INIT_WORK(&rdp->strict_work, strict_work_handler); 4137 WARN_ON_ONCE(ct->nesting != 1); 4138 WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu))); 4139 rdp->barrier_seq_snap = rcu_state.barrier_sequence; 4140 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 4141 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED; 4142 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 4143 rdp->rcu_onl_gp_state = RCU_GP_CLEANED; 4144 rdp->last_sched_clock = jiffies; 4145 rdp->cpu = cpu; 4146 rcu_boot_init_nocb_percpu_data(rdp); 4147 } 4148 4149 static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp) 4150 { 4151 cpumask_var_t affinity; 4152 int cpu; 4153 4154 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) 4155 return; 4156 4157 for_each_leaf_node_possible_cpu(rnp, cpu) 4158 cpumask_set_cpu(cpu, affinity); 4159 4160 kthread_affine_preferred(t, affinity); 4161 4162 free_cpumask_var(affinity); 4163 } 4164 4165 struct kthread_worker *rcu_exp_gp_kworker; 4166 4167 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) 4168 { 4169 struct kthread_worker *kworker; 4170 const char *name = "rcu_exp_par_gp_kthread_worker/%d"; 4171 struct sched_param param = { .sched_priority = kthread_prio }; 4172 int rnp_index = rnp - rcu_get_root(); 4173 4174 if (rnp->exp_kworker) 4175 return; 4176 4177 kworker = kthread_create_worker(0, name, rnp_index); 4178 if (IS_ERR_OR_NULL(kworker)) { 4179 pr_err("Failed to create par gp kworker on %d/%d\n", 4180 rnp->grplo, rnp->grphi); 4181 return; 4182 } 4183 WRITE_ONCE(rnp->exp_kworker, kworker); 4184 4185 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4186 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); 4187 4188 rcu_thread_affine_rnp(kworker->task, rnp); 4189 wake_up_process(kworker->task); 4190 } 4191 4192 static void __init rcu_start_exp_gp_kworker(void) 4193 { 4194 const char *name = "rcu_exp_gp_kthread_worker"; 4195 struct sched_param param = { .sched_priority = kthread_prio }; 4196 4197 rcu_exp_gp_kworker = kthread_run_worker(0, name); 4198 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { 4199 pr_err("Failed to create %s!\n", name); 4200 rcu_exp_gp_kworker = NULL; 4201 return; 4202 } 4203 4204 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4205 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); 4206 } 4207 4208 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp) 4209 { 4210 if (rcu_scheduler_fully_active) { 4211 mutex_lock(&rnp->kthread_mutex); 4212 rcu_spawn_one_boost_kthread(rnp); 4213 rcu_spawn_exp_par_gp_kworker(rnp); 4214 mutex_unlock(&rnp->kthread_mutex); 4215 } 4216 } 4217 4218 /* 4219 * Invoked early in the CPU-online process, when pretty much all services 4220 * are available. The incoming CPU is not present. 4221 * 4222 * Initializes a CPU's per-CPU RCU data. Note that only one online or 4223 * offline event can be happening at a given time. Note also that we can 4224 * accept some slop in the rsp->gp_seq access due to the fact that this 4225 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 4226 * And any offloaded callbacks are being numbered elsewhere. 4227 */ 4228 int rcutree_prepare_cpu(unsigned int cpu) 4229 { 4230 unsigned long flags; 4231 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); 4232 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4233 struct rcu_node *rnp = rcu_get_root(); 4234 4235 /* Set up local state, ensuring consistent view of global state. */ 4236 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4237 rdp->qlen_last_fqs_check = 0; 4238 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 4239 rdp->blimit = blimit; 4240 ct->nesting = 1; /* CPU not up, no tearing. */ 4241 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4242 4243 /* 4244 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be 4245 * (re-)initialized. 4246 */ 4247 if (!rcu_segcblist_is_enabled(&rdp->cblist)) 4248 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 4249 4250 /* 4251 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4252 * propagation up the rcu_node tree will happen at the beginning 4253 * of the next grace period. 4254 */ 4255 rnp = rdp->mynode; 4256 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4257 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 4258 rdp->gp_seq_needed = rdp->gp_seq; 4259 rdp->cpu_no_qs.b.norm = true; 4260 rdp->core_needs_qs = false; 4261 rdp->rcu_iw_pending = false; 4262 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); 4263 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4264 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4265 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4266 4267 rcu_preempt_deferred_qs_init(rdp); 4268 rcu_spawn_rnp_kthreads(rnp); 4269 rcu_spawn_cpu_nocb_kthread(cpu); 4270 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); 4271 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); 4272 4273 return 0; 4274 } 4275 4276 /* 4277 * Has the specified (known valid) CPU ever been fully online? 4278 */ 4279 bool rcu_cpu_beenfullyonline(int cpu) 4280 { 4281 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4282 4283 return smp_load_acquire(&rdp->beenonline); 4284 } 4285 4286 /* 4287 * Near the end of the CPU-online process. Pretty much all services 4288 * enabled, and the CPU is now very much alive. 4289 */ 4290 int rcutree_online_cpu(unsigned int cpu) 4291 { 4292 unsigned long flags; 4293 struct rcu_data *rdp; 4294 struct rcu_node *rnp; 4295 4296 rdp = per_cpu_ptr(&rcu_data, cpu); 4297 rnp = rdp->mynode; 4298 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4299 rnp->ffmask |= rdp->grpmask; 4300 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4301 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 4302 return 0; /* Too early in boot for scheduler work. */ 4303 4304 // Stop-machine done, so allow nohz_full to disable tick. 4305 tick_dep_clear(TICK_DEP_BIT_RCU); 4306 return 0; 4307 } 4308 4309 /* 4310 * Mark the specified CPU as being online so that subsequent grace periods 4311 * (both expedited and normal) will wait on it. Note that this means that 4312 * incoming CPUs are not allowed to use RCU read-side critical sections 4313 * until this function is called. Failing to observe this restriction 4314 * will result in lockdep splats. 4315 * 4316 * Note that this function is special in that it is invoked directly 4317 * from the incoming CPU rather than from the cpuhp_step mechanism. 4318 * This is because this function must be invoked at a precise location. 4319 * This incoming CPU must not have enabled interrupts yet. 4320 * 4321 * This mirrors the effects of rcutree_report_cpu_dead(). 4322 */ 4323 void rcutree_report_cpu_starting(unsigned int cpu) 4324 { 4325 unsigned long mask; 4326 struct rcu_data *rdp; 4327 struct rcu_node *rnp; 4328 bool newcpu; 4329 4330 lockdep_assert_irqs_disabled(); 4331 rdp = per_cpu_ptr(&rcu_data, cpu); 4332 if (rdp->cpu_started) 4333 return; 4334 rdp->cpu_started = true; 4335 4336 rnp = rdp->mynode; 4337 mask = rdp->grpmask; 4338 arch_spin_lock(&rcu_state.ofl_lock); 4339 rcu_watching_online(); 4340 raw_spin_lock(&rcu_state.barrier_lock); 4341 raw_spin_lock_rcu_node(rnp); 4342 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 4343 raw_spin_unlock(&rcu_state.barrier_lock); 4344 newcpu = !(rnp->expmaskinitnext & mask); 4345 rnp->expmaskinitnext |= mask; 4346 /* Allow lockless access for expedited grace periods. */ 4347 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ 4348 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 4349 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 4350 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4351 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state); 4352 4353 /* An incoming CPU should never be blocking a grace period. */ 4354 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ 4355 /* rcu_report_qs_rnp() *really* wants some flags to restore */ 4356 unsigned long flags; 4357 4358 local_irq_save(flags); 4359 rcu_disable_urgency_upon_qs(rdp); 4360 /* Report QS -after- changing ->qsmaskinitnext! */ 4361 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4362 } else { 4363 raw_spin_unlock_rcu_node(rnp); 4364 } 4365 arch_spin_unlock(&rcu_state.ofl_lock); 4366 smp_store_release(&rdp->beenonline, true); 4367 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 4368 } 4369 4370 /* 4371 * The outgoing function has no further need of RCU, so remove it from 4372 * the rcu_node tree's ->qsmaskinitnext bit masks. 4373 * 4374 * Note that this function is special in that it is invoked directly 4375 * from the outgoing CPU rather than from the cpuhp_step mechanism. 4376 * This is because this function must be invoked at a precise location. 4377 * 4378 * This mirrors the effect of rcutree_report_cpu_starting(). 4379 */ 4380 void rcutree_report_cpu_dead(void) 4381 { 4382 unsigned long flags; 4383 unsigned long mask; 4384 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 4385 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4386 4387 /* 4388 * IRQS must be disabled from now on and until the CPU dies, or an interrupt 4389 * may introduce a new READ-side while it is actually off the QS masks. 4390 */ 4391 lockdep_assert_irqs_disabled(); 4392 /* 4393 * CPUHP_AP_SMPCFD_DYING was the last call for rcu_exp_handler() execution. 4394 * The requested QS must have been reported on the last context switch 4395 * from stop machine to idle. 4396 */ 4397 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); 4398 // Do any dangling deferred wakeups. 4399 do_nocb_deferred_wakeup(rdp); 4400 4401 rcu_preempt_deferred_qs(current); 4402 4403 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4404 mask = rdp->grpmask; 4405 4406 /* 4407 * Hold the ofl_lock and rnp lock to avoid races between CPU going 4408 * offline and doing a QS report (as below), versus rcu_gp_init(). 4409 * See Requirements.rst > Hotplug CPU > Concurrent QS Reporting section 4410 * for more details. 4411 */ 4412 arch_spin_lock(&rcu_state.ofl_lock); 4413 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4414 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4415 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state); 4416 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 4417 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 4418 rcu_disable_urgency_upon_qs(rdp); 4419 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4420 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4421 } 4422 /* Clear from ->qsmaskinitnext to mark offline. */ 4423 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 4424 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4425 arch_spin_unlock(&rcu_state.ofl_lock); 4426 rdp->cpu_started = false; 4427 } 4428 4429 #ifdef CONFIG_HOTPLUG_CPU 4430 /* 4431 * The outgoing CPU has just passed through the dying-idle state, and we 4432 * are being invoked from the CPU that was IPIed to continue the offline 4433 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 4434 */ 4435 void rcutree_migrate_callbacks(int cpu) 4436 { 4437 unsigned long flags; 4438 struct rcu_data *my_rdp; 4439 struct rcu_node *my_rnp; 4440 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4441 bool needwake; 4442 4443 if (rcu_rdp_is_offloaded(rdp)) 4444 return; 4445 4446 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 4447 if (rcu_segcblist_empty(&rdp->cblist)) { 4448 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4449 return; /* No callbacks to migrate. */ 4450 } 4451 4452 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); 4453 rcu_barrier_entrain(rdp); 4454 my_rdp = this_cpu_ptr(&rcu_data); 4455 my_rnp = my_rdp->mynode; 4456 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 4457 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false)); 4458 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 4459 /* Leverage recent GPs and set GP for new callbacks. */ 4460 needwake = rcu_advance_cbs(my_rnp, rdp) || 4461 rcu_advance_cbs(my_rnp, my_rdp); 4462 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 4463 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */ 4464 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 4465 rcu_segcblist_disable(&rdp->cblist); 4466 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); 4467 check_cb_ovld_locked(my_rdp, my_rnp); 4468 if (rcu_rdp_is_offloaded(my_rdp)) { 4469 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4470 __call_rcu_nocb_wake(my_rdp, true, flags); 4471 } else { 4472 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 4473 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4474 } 4475 local_irq_restore(flags); 4476 if (needwake) 4477 rcu_gp_kthread_wake(); 4478 lockdep_assert_irqs_enabled(); 4479 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 4480 !rcu_segcblist_empty(&rdp->cblist), 4481 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 4482 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 4483 rcu_segcblist_first_cb(&rdp->cblist)); 4484 } 4485 4486 /* 4487 * The CPU has been completely removed, and some other CPU is reporting 4488 * this fact from process context. Do the remainder of the cleanup. 4489 * There can only be one CPU hotplug operation at a time, so no need for 4490 * explicit locking. 4491 */ 4492 int rcutree_dead_cpu(unsigned int cpu) 4493 { 4494 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); 4495 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); 4496 // Stop-machine done, so allow nohz_full to disable tick. 4497 tick_dep_clear(TICK_DEP_BIT_RCU); 4498 return 0; 4499 } 4500 4501 /* 4502 * Near the end of the offline process. Trace the fact that this CPU 4503 * is going offline. 4504 */ 4505 int rcutree_dying_cpu(unsigned int cpu) 4506 { 4507 bool blkd; 4508 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4509 struct rcu_node *rnp = rdp->mynode; 4510 4511 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); 4512 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 4513 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl")); 4514 return 0; 4515 } 4516 4517 /* 4518 * Near the beginning of the process. The CPU is still very much alive 4519 * with pretty much all services enabled. 4520 */ 4521 int rcutree_offline_cpu(unsigned int cpu) 4522 { 4523 unsigned long flags; 4524 struct rcu_data *rdp; 4525 struct rcu_node *rnp; 4526 4527 rdp = per_cpu_ptr(&rcu_data, cpu); 4528 rnp = rdp->mynode; 4529 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4530 rnp->ffmask &= ~rdp->grpmask; 4531 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4532 4533 // nohz_full CPUs need the tick for stop-machine to work quickly 4534 tick_dep_set(TICK_DEP_BIT_RCU); 4535 return 0; 4536 } 4537 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 4538 4539 /* 4540 * On non-huge systems, use expedited RCU grace periods to make suspend 4541 * and hibernation run faster. 4542 */ 4543 static int rcu_pm_notify(struct notifier_block *self, 4544 unsigned long action, void *hcpu) 4545 { 4546 switch (action) { 4547 case PM_HIBERNATION_PREPARE: 4548 case PM_SUSPEND_PREPARE: 4549 rcu_async_hurry(); 4550 rcu_expedite_gp(); 4551 break; 4552 case PM_POST_HIBERNATION: 4553 case PM_POST_SUSPEND: 4554 rcu_unexpedite_gp(); 4555 rcu_async_relax(); 4556 break; 4557 default: 4558 break; 4559 } 4560 return NOTIFY_OK; 4561 } 4562 4563 /* 4564 * Spawn the kthreads that handle RCU's grace periods. 4565 */ 4566 static int __init rcu_spawn_gp_kthread(void) 4567 { 4568 unsigned long flags; 4569 struct rcu_node *rnp; 4570 struct sched_param sp; 4571 struct task_struct *t; 4572 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 4573 4574 rcu_scheduler_fully_active = 1; 4575 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 4576 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 4577 return 0; 4578 if (kthread_prio) { 4579 sp.sched_priority = kthread_prio; 4580 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4581 } 4582 rnp = rcu_get_root(); 4583 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4584 WRITE_ONCE(rcu_state.gp_activity, jiffies); 4585 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 4586 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 4587 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 4588 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4589 wake_up_process(t); 4590 /* This is a pre-SMP initcall, we expect a single CPU */ 4591 WARN_ON(num_online_cpus() > 1); 4592 /* 4593 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() 4594 * due to rcu_scheduler_fully_active. 4595 */ 4596 rcu_spawn_cpu_nocb_kthread(smp_processor_id()); 4597 rcu_spawn_rnp_kthreads(rdp->mynode); 4598 rcu_spawn_core_kthreads(); 4599 /* Create kthread worker for expedited GPs */ 4600 rcu_start_exp_gp_kworker(); 4601 return 0; 4602 } 4603 early_initcall(rcu_spawn_gp_kthread); 4604 4605 /* 4606 * This function is invoked towards the end of the scheduler's 4607 * initialization process. Before this is called, the idle task might 4608 * contain synchronous grace-period primitives (during which time, this idle 4609 * task is booting the system, and such primitives are no-ops). After this 4610 * function is called, any synchronous grace-period primitives are run as 4611 * expedited, with the requesting task driving the grace period forward. 4612 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4613 * runtime RCU functionality. 4614 */ 4615 void rcu_scheduler_starting(void) 4616 { 4617 unsigned long flags; 4618 struct rcu_node *rnp; 4619 4620 WARN_ON(num_online_cpus() != 1); 4621 WARN_ON(nr_context_switches() > 0); 4622 rcu_test_sync_prims(); 4623 4624 // Fix up the ->gp_seq counters. 4625 local_irq_save(flags); 4626 rcu_for_each_node_breadth_first(rnp) 4627 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 4628 local_irq_restore(flags); 4629 4630 // Switch out of early boot mode. 4631 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4632 rcu_test_sync_prims(); 4633 } 4634 4635 /* 4636 * Helper function for rcu_init() that initializes the rcu_state structure. 4637 */ 4638 static void __init rcu_init_one(void) 4639 { 4640 static const char * const buf[] = RCU_NODE_NAME_INIT; 4641 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4642 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4643 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4644 4645 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4646 int cpustride = 1; 4647 int i; 4648 int j; 4649 struct rcu_node *rnp; 4650 4651 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4652 4653 /* Silence gcc 4.8 false positive about array index out of range. */ 4654 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4655 panic("rcu_init_one: rcu_num_lvls out of range"); 4656 4657 /* Initialize the level-tracking arrays. */ 4658 4659 for (i = 1; i < rcu_num_lvls; i++) 4660 rcu_state.level[i] = 4661 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 4662 rcu_init_levelspread(levelspread, num_rcu_lvl); 4663 4664 /* Initialize the elements themselves, starting from the leaves. */ 4665 4666 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4667 cpustride *= levelspread[i]; 4668 rnp = rcu_state.level[i]; 4669 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4670 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4671 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4672 &rcu_node_class[i], buf[i]); 4673 raw_spin_lock_init(&rnp->fqslock); 4674 lockdep_set_class_and_name(&rnp->fqslock, 4675 &rcu_fqs_class[i], fqs[i]); 4676 rnp->gp_seq = rcu_state.gp_seq; 4677 rnp->gp_seq_needed = rcu_state.gp_seq; 4678 rnp->completedqs = rcu_state.gp_seq; 4679 rnp->qsmask = 0; 4680 rnp->qsmaskinit = 0; 4681 rnp->grplo = j * cpustride; 4682 rnp->grphi = (j + 1) * cpustride - 1; 4683 if (rnp->grphi >= nr_cpu_ids) 4684 rnp->grphi = nr_cpu_ids - 1; 4685 if (i == 0) { 4686 rnp->grpnum = 0; 4687 rnp->grpmask = 0; 4688 rnp->parent = NULL; 4689 } else { 4690 rnp->grpnum = j % levelspread[i - 1]; 4691 rnp->grpmask = BIT(rnp->grpnum); 4692 rnp->parent = rcu_state.level[i - 1] + 4693 j / levelspread[i - 1]; 4694 } 4695 rnp->level = i; 4696 INIT_LIST_HEAD(&rnp->blkd_tasks); 4697 rcu_init_one_nocb(rnp); 4698 init_waitqueue_head(&rnp->exp_wq[0]); 4699 init_waitqueue_head(&rnp->exp_wq[1]); 4700 init_waitqueue_head(&rnp->exp_wq[2]); 4701 init_waitqueue_head(&rnp->exp_wq[3]); 4702 spin_lock_init(&rnp->exp_lock); 4703 mutex_init(&rnp->kthread_mutex); 4704 raw_spin_lock_init(&rnp->exp_poll_lock); 4705 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 4706 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); 4707 } 4708 } 4709 4710 init_swait_queue_head(&rcu_state.gp_wq); 4711 init_swait_queue_head(&rcu_state.expedited_wq); 4712 rnp = rcu_first_leaf_node(); 4713 for_each_possible_cpu(i) { 4714 while (i > rnp->grphi) 4715 rnp++; 4716 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 4717 per_cpu_ptr(&rcu_data, i)->barrier_head.next = 4718 &per_cpu_ptr(&rcu_data, i)->barrier_head; 4719 rcu_boot_init_percpu_data(i); 4720 } 4721 } 4722 4723 /* 4724 * Force priority from the kernel command-line into range. 4725 */ 4726 static void __init sanitize_kthread_prio(void) 4727 { 4728 int kthread_prio_in = kthread_prio; 4729 4730 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 4731 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 4732 kthread_prio = 2; 4733 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4734 kthread_prio = 1; 4735 else if (kthread_prio < 0) 4736 kthread_prio = 0; 4737 else if (kthread_prio > 99) 4738 kthread_prio = 99; 4739 4740 if (kthread_prio != kthread_prio_in) 4741 pr_alert("%s: Limited prio to %d from %d\n", 4742 __func__, kthread_prio, kthread_prio_in); 4743 } 4744 4745 /* 4746 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4747 * replace the definitions in tree.h because those are needed to size 4748 * the ->node array in the rcu_state structure. 4749 */ 4750 void rcu_init_geometry(void) 4751 { 4752 ulong d; 4753 int i; 4754 static unsigned long old_nr_cpu_ids; 4755 int rcu_capacity[RCU_NUM_LVLS]; 4756 static bool initialized; 4757 4758 if (initialized) { 4759 /* 4760 * Warn if setup_nr_cpu_ids() had not yet been invoked, 4761 * unless nr_cpus_ids == NR_CPUS, in which case who cares? 4762 */ 4763 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids); 4764 return; 4765 } 4766 4767 old_nr_cpu_ids = nr_cpu_ids; 4768 initialized = true; 4769 4770 /* 4771 * Initialize any unspecified boot parameters. 4772 * The default values of jiffies_till_first_fqs and 4773 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4774 * value, which is a function of HZ, then adding one for each 4775 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4776 */ 4777 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4778 if (jiffies_till_first_fqs == ULONG_MAX) 4779 jiffies_till_first_fqs = d; 4780 if (jiffies_till_next_fqs == ULONG_MAX) 4781 jiffies_till_next_fqs = d; 4782 adjust_jiffies_till_sched_qs(); 4783 4784 /* If the compile-time values are accurate, just leave. */ 4785 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4786 nr_cpu_ids == NR_CPUS) 4787 return; 4788 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4789 rcu_fanout_leaf, nr_cpu_ids); 4790 4791 /* 4792 * The boot-time rcu_fanout_leaf parameter must be at least two 4793 * and cannot exceed the number of bits in the rcu_node masks. 4794 * Complain and fall back to the compile-time values if this 4795 * limit is exceeded. 4796 */ 4797 if (rcu_fanout_leaf < 2 || rcu_fanout_leaf > BITS_PER_LONG) { 4798 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4799 WARN_ON(1); 4800 return; 4801 } 4802 4803 /* 4804 * Compute number of nodes that can be handled an rcu_node tree 4805 * with the given number of levels. 4806 */ 4807 rcu_capacity[0] = rcu_fanout_leaf; 4808 for (i = 1; i < RCU_NUM_LVLS; i++) 4809 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4810 4811 /* 4812 * The tree must be able to accommodate the configured number of CPUs. 4813 * If this limit is exceeded, fall back to the compile-time values. 4814 */ 4815 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4816 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4817 WARN_ON(1); 4818 return; 4819 } 4820 4821 /* Calculate the number of levels in the tree. */ 4822 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4823 } 4824 rcu_num_lvls = i + 1; 4825 4826 /* Calculate the number of rcu_nodes at each level of the tree. */ 4827 for (i = 0; i < rcu_num_lvls; i++) { 4828 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4829 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4830 } 4831 4832 /* Calculate the total number of rcu_node structures. */ 4833 rcu_num_nodes = 0; 4834 for (i = 0; i < rcu_num_lvls; i++) 4835 rcu_num_nodes += num_rcu_lvl[i]; 4836 } 4837 4838 /* 4839 * Dump out the structure of the rcu_node combining tree associated 4840 * with the rcu_state structure. 4841 */ 4842 static void __init rcu_dump_rcu_node_tree(void) 4843 { 4844 int level = 0; 4845 struct rcu_node *rnp; 4846 4847 pr_info("rcu_node tree layout dump\n"); 4848 pr_info(" "); 4849 rcu_for_each_node_breadth_first(rnp) { 4850 if (rnp->level != level) { 4851 pr_cont("\n"); 4852 pr_info(" "); 4853 level = rnp->level; 4854 } 4855 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4856 } 4857 pr_cont("\n"); 4858 } 4859 4860 struct workqueue_struct *rcu_gp_wq; 4861 4862 void __init rcu_init(void) 4863 { 4864 int cpu = smp_processor_id(); 4865 4866 rcu_early_boot_tests(); 4867 4868 rcu_bootup_announce(); 4869 sanitize_kthread_prio(); 4870 rcu_init_geometry(); 4871 rcu_init_one(); 4872 if (dump_tree) 4873 rcu_dump_rcu_node_tree(); 4874 if (use_softirq) 4875 open_softirq(RCU_SOFTIRQ, rcu_core_si); 4876 4877 /* 4878 * We don't need protection against CPU-hotplug here because 4879 * this is called early in boot, before either interrupts 4880 * or the scheduler are operational. 4881 */ 4882 pm_notifier(rcu_pm_notify, 0); 4883 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot. 4884 rcutree_prepare_cpu(cpu); 4885 rcutree_report_cpu_starting(cpu); 4886 rcutree_online_cpu(cpu); 4887 4888 /* Create workqueue for Tree SRCU and for expedited GPs. */ 4889 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM | WQ_PERCPU, 0); 4890 WARN_ON(!rcu_gp_wq); 4891 4892 sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 4893 WARN_ON(!sync_wq); 4894 4895 /* Respect if explicitly disabled via a boot parameter. */ 4896 if (rcu_normal_wake_from_gp < 0) { 4897 if (num_possible_cpus() <= WAKE_FROM_GP_CPU_THRESHOLD) 4898 rcu_normal_wake_from_gp = 1; 4899 } 4900 4901 /* Fill in default value for rcutree.qovld boot parameter. */ 4902 /* -After- the rcu_node ->lock fields are initialized! */ 4903 if (qovld < 0) 4904 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 4905 else 4906 qovld_calc = qovld; 4907 4908 // Kick-start in case any polled grace periods started early. 4909 (void)start_poll_synchronize_rcu_expedited(); 4910 4911 rcu_test_sync_prims(); 4912 4913 tasks_cblist_init_generic(); 4914 } 4915 4916 #include "tree_stall.h" 4917 #include "tree_exp.h" 4918 #include "tree_nocb.h" 4919 #include "tree_plugin.h" 4920