1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version 23 * 24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 26 * 27 * For detailed explanation of Read-Copy Update mechanism see - 28 * Documentation/RCU 29 */ 30 #include <linux/types.h> 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/smp.h> 35 #include <linux/rcupdate_wait.h> 36 #include <linux/interrupt.h> 37 #include <linux/sched.h> 38 #include <linux/sched/debug.h> 39 #include <linux/nmi.h> 40 #include <linux/atomic.h> 41 #include <linux/bitops.h> 42 #include <linux/export.h> 43 #include <linux/completion.h> 44 #include <linux/moduleparam.h> 45 #include <linux/percpu.h> 46 #include <linux/notifier.h> 47 #include <linux/cpu.h> 48 #include <linux/mutex.h> 49 #include <linux/time.h> 50 #include <linux/kernel_stat.h> 51 #include <linux/wait.h> 52 #include <linux/kthread.h> 53 #include <uapi/linux/sched/types.h> 54 #include <linux/prefetch.h> 55 #include <linux/delay.h> 56 #include <linux/stop_machine.h> 57 #include <linux/random.h> 58 #include <linux/trace_events.h> 59 #include <linux/suspend.h> 60 #include <linux/ftrace.h> 61 62 #include "tree.h" 63 #include "rcu.h" 64 65 #ifdef MODULE_PARAM_PREFIX 66 #undef MODULE_PARAM_PREFIX 67 #endif 68 #define MODULE_PARAM_PREFIX "rcutree." 69 70 /* Data structures. */ 71 72 /* 73 * In order to export the rcu_state name to the tracing tools, it 74 * needs to be added in the __tracepoint_string section. 75 * This requires defining a separate variable tp_<sname>_varname 76 * that points to the string being used, and this will allow 77 * the tracing userspace tools to be able to decipher the string 78 * address to the matching string. 79 */ 80 #ifdef CONFIG_TRACING 81 # define DEFINE_RCU_TPS(sname) \ 82 static char sname##_varname[] = #sname; \ 83 static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; 84 # define RCU_STATE_NAME(sname) sname##_varname 85 #else 86 # define DEFINE_RCU_TPS(sname) 87 # define RCU_STATE_NAME(sname) __stringify(sname) 88 #endif 89 90 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ 91 DEFINE_RCU_TPS(sname) \ 92 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \ 93 struct rcu_state sname##_state = { \ 94 .level = { &sname##_state.node[0] }, \ 95 .rda = &sname##_data, \ 96 .call = cr, \ 97 .gp_state = RCU_GP_IDLE, \ 98 .gpnum = 0UL - 300UL, \ 99 .completed = 0UL - 300UL, \ 100 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 101 .name = RCU_STATE_NAME(sname), \ 102 .abbr = sabbr, \ 103 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \ 104 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \ 105 } 106 107 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); 108 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); 109 110 static struct rcu_state *const rcu_state_p; 111 LIST_HEAD(rcu_struct_flavors); 112 113 /* Dump rcu_node combining tree at boot to verify correct setup. */ 114 static bool dump_tree; 115 module_param(dump_tree, bool, 0444); 116 /* Control rcu_node-tree auto-balancing at boot time. */ 117 static bool rcu_fanout_exact; 118 module_param(rcu_fanout_exact, bool, 0444); 119 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 120 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 121 module_param(rcu_fanout_leaf, int, 0444); 122 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 123 /* Number of rcu_nodes at specified level. */ 124 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 125 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 126 /* panic() on RCU Stall sysctl. */ 127 int sysctl_panic_on_rcu_stall __read_mostly; 128 129 /* 130 * The rcu_scheduler_active variable is initialized to the value 131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 132 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 133 * RCU can assume that there is but one task, allowing RCU to (for example) 134 * optimize synchronize_rcu() to a simple barrier(). When this variable 135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 136 * to detect real grace periods. This variable is also used to suppress 137 * boot-time false positives from lockdep-RCU error checking. Finally, it 138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 139 * is fully initialized, including all of its kthreads having been spawned. 140 */ 141 int rcu_scheduler_active __read_mostly; 142 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 143 144 /* 145 * The rcu_scheduler_fully_active variable transitions from zero to one 146 * during the early_initcall() processing, which is after the scheduler 147 * is capable of creating new tasks. So RCU processing (for example, 148 * creating tasks for RCU priority boosting) must be delayed until after 149 * rcu_scheduler_fully_active transitions from zero to one. We also 150 * currently delay invocation of any RCU callbacks until after this point. 151 * 152 * It might later prove better for people registering RCU callbacks during 153 * early boot to take responsibility for these callbacks, but one step at 154 * a time. 155 */ 156 static int rcu_scheduler_fully_active __read_mostly; 157 158 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 159 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 160 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 161 static void invoke_rcu_core(void); 162 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 163 static void rcu_report_exp_rdp(struct rcu_state *rsp, 164 struct rcu_data *rdp, bool wake); 165 static void sync_sched_exp_online_cleanup(int cpu); 166 167 /* rcuc/rcub kthread realtime priority */ 168 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 169 module_param(kthread_prio, int, 0644); 170 171 /* Delay in jiffies for grace-period initialization delays, debug only. */ 172 173 static int gp_preinit_delay; 174 module_param(gp_preinit_delay, int, 0444); 175 static int gp_init_delay; 176 module_param(gp_init_delay, int, 0444); 177 static int gp_cleanup_delay; 178 module_param(gp_cleanup_delay, int, 0444); 179 180 /* 181 * Number of grace periods between delays, normalized by the duration of 182 * the delay. The longer the delay, the more the grace periods between 183 * each delay. The reason for this normalization is that it means that, 184 * for non-zero delays, the overall slowdown of grace periods is constant 185 * regardless of the duration of the delay. This arrangement balances 186 * the need for long delays to increase some race probabilities with the 187 * need for fast grace periods to increase other race probabilities. 188 */ 189 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 190 191 /* 192 * Track the rcutorture test sequence number and the update version 193 * number within a given test. The rcutorture_testseq is incremented 194 * on every rcutorture module load and unload, so has an odd value 195 * when a test is running. The rcutorture_vernum is set to zero 196 * when rcutorture starts and is incremented on each rcutorture update. 197 * These variables enable correlating rcutorture output with the 198 * RCU tracing information. 199 */ 200 unsigned long rcutorture_testseq; 201 unsigned long rcutorture_vernum; 202 203 /* 204 * Compute the mask of online CPUs for the specified rcu_node structure. 205 * This will not be stable unless the rcu_node structure's ->lock is 206 * held, but the bit corresponding to the current CPU will be stable 207 * in most contexts. 208 */ 209 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 210 { 211 return READ_ONCE(rnp->qsmaskinitnext); 212 } 213 214 /* 215 * Return true if an RCU grace period is in progress. The READ_ONCE()s 216 * permit this function to be invoked without holding the root rcu_node 217 * structure's ->lock, but of course results can be subject to change. 218 */ 219 static int rcu_gp_in_progress(struct rcu_state *rsp) 220 { 221 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); 222 } 223 224 /* 225 * Note a quiescent state. Because we do not need to know 226 * how many quiescent states passed, just if there was at least 227 * one since the start of the grace period, this just sets a flag. 228 * The caller must have disabled preemption. 229 */ 230 void rcu_sched_qs(void) 231 { 232 RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!"); 233 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) 234 return; 235 trace_rcu_grace_period(TPS("rcu_sched"), 236 __this_cpu_read(rcu_sched_data.gpnum), 237 TPS("cpuqs")); 238 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); 239 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) 240 return; 241 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); 242 rcu_report_exp_rdp(&rcu_sched_state, 243 this_cpu_ptr(&rcu_sched_data), true); 244 } 245 246 void rcu_bh_qs(void) 247 { 248 RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); 249 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { 250 trace_rcu_grace_period(TPS("rcu_bh"), 251 __this_cpu_read(rcu_bh_data.gpnum), 252 TPS("cpuqs")); 253 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); 254 } 255 } 256 257 /* 258 * Steal a bit from the bottom of ->dynticks for idle entry/exit 259 * control. Initially this is for TLB flushing. 260 */ 261 #define RCU_DYNTICK_CTRL_MASK 0x1 262 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) 263 #ifndef rcu_eqs_special_exit 264 #define rcu_eqs_special_exit() do { } while (0) 265 #endif 266 267 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 268 .dynticks_nesting = 1, 269 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 270 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 271 }; 272 273 /* 274 * Record entry into an extended quiescent state. This is only to be 275 * called when not already in an extended quiescent state. 276 */ 277 static void rcu_dynticks_eqs_enter(void) 278 { 279 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 280 int seq; 281 282 /* 283 * CPUs seeing atomic_add_return() must see prior RCU read-side 284 * critical sections, and we also must force ordering with the 285 * next idle sojourn. 286 */ 287 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); 288 /* Better be in an extended quiescent state! */ 289 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 290 (seq & RCU_DYNTICK_CTRL_CTR)); 291 /* Better not have special action (TLB flush) pending! */ 292 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 293 (seq & RCU_DYNTICK_CTRL_MASK)); 294 } 295 296 /* 297 * Record exit from an extended quiescent state. This is only to be 298 * called from an extended quiescent state. 299 */ 300 static void rcu_dynticks_eqs_exit(void) 301 { 302 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 303 int seq; 304 305 /* 306 * CPUs seeing atomic_add_return() must see prior idle sojourns, 307 * and we also must force ordering with the next RCU read-side 308 * critical section. 309 */ 310 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); 311 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 312 !(seq & RCU_DYNTICK_CTRL_CTR)); 313 if (seq & RCU_DYNTICK_CTRL_MASK) { 314 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks); 315 smp_mb__after_atomic(); /* _exit after clearing mask. */ 316 /* Prefer duplicate flushes to losing a flush. */ 317 rcu_eqs_special_exit(); 318 } 319 } 320 321 /* 322 * Reset the current CPU's ->dynticks counter to indicate that the 323 * newly onlined CPU is no longer in an extended quiescent state. 324 * This will either leave the counter unchanged, or increment it 325 * to the next non-quiescent value. 326 * 327 * The non-atomic test/increment sequence works because the upper bits 328 * of the ->dynticks counter are manipulated only by the corresponding CPU, 329 * or when the corresponding CPU is offline. 330 */ 331 static void rcu_dynticks_eqs_online(void) 332 { 333 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 334 335 if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR) 336 return; 337 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); 338 } 339 340 /* 341 * Is the current CPU in an extended quiescent state? 342 * 343 * No ordering, as we are sampling CPU-local information. 344 */ 345 bool rcu_dynticks_curr_cpu_in_eqs(void) 346 { 347 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 348 349 return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR); 350 } 351 352 /* 353 * Snapshot the ->dynticks counter with full ordering so as to allow 354 * stable comparison of this counter with past and future snapshots. 355 */ 356 int rcu_dynticks_snap(struct rcu_dynticks *rdtp) 357 { 358 int snap = atomic_add_return(0, &rdtp->dynticks); 359 360 return snap & ~RCU_DYNTICK_CTRL_MASK; 361 } 362 363 /* 364 * Return true if the snapshot returned from rcu_dynticks_snap() 365 * indicates that RCU is in an extended quiescent state. 366 */ 367 static bool rcu_dynticks_in_eqs(int snap) 368 { 369 return !(snap & RCU_DYNTICK_CTRL_CTR); 370 } 371 372 /* 373 * Return true if the CPU corresponding to the specified rcu_dynticks 374 * structure has spent some time in an extended quiescent state since 375 * rcu_dynticks_snap() returned the specified snapshot. 376 */ 377 static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap) 378 { 379 return snap != rcu_dynticks_snap(rdtp); 380 } 381 382 /* 383 * Do a double-increment of the ->dynticks counter to emulate a 384 * momentary idle-CPU quiescent state. 385 */ 386 static void rcu_dynticks_momentary_idle(void) 387 { 388 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 389 int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, 390 &rdtp->dynticks); 391 392 /* It is illegal to call this from idle state. */ 393 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); 394 } 395 396 /* 397 * Set the special (bottom) bit of the specified CPU so that it 398 * will take special action (such as flushing its TLB) on the 399 * next exit from an extended quiescent state. Returns true if 400 * the bit was successfully set, or false if the CPU was not in 401 * an extended quiescent state. 402 */ 403 bool rcu_eqs_special_set(int cpu) 404 { 405 int old; 406 int new; 407 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 408 409 do { 410 old = atomic_read(&rdtp->dynticks); 411 if (old & RCU_DYNTICK_CTRL_CTR) 412 return false; 413 new = old | RCU_DYNTICK_CTRL_MASK; 414 } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old); 415 return true; 416 } 417 418 /* 419 * Let the RCU core know that this CPU has gone through the scheduler, 420 * which is a quiescent state. This is called when the need for a 421 * quiescent state is urgent, so we burn an atomic operation and full 422 * memory barriers to let the RCU core know about it, regardless of what 423 * this CPU might (or might not) do in the near future. 424 * 425 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 426 * 427 * The caller must have disabled interrupts. 428 */ 429 static void rcu_momentary_dyntick_idle(void) 430 { 431 raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false); 432 rcu_dynticks_momentary_idle(); 433 } 434 435 /* 436 * Note a context switch. This is a quiescent state for RCU-sched, 437 * and requires special handling for preemptible RCU. 438 * The caller must have disabled interrupts. 439 */ 440 void rcu_note_context_switch(bool preempt) 441 { 442 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 443 trace_rcu_utilization(TPS("Start context switch")); 444 rcu_sched_qs(); 445 rcu_preempt_note_context_switch(preempt); 446 /* Load rcu_urgent_qs before other flags. */ 447 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) 448 goto out; 449 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); 450 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) 451 rcu_momentary_dyntick_idle(); 452 this_cpu_inc(rcu_dynticks.rcu_qs_ctr); 453 if (!preempt) 454 rcu_note_voluntary_context_switch_lite(current); 455 out: 456 trace_rcu_utilization(TPS("End context switch")); 457 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 458 } 459 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 460 461 /* 462 * Register a quiescent state for all RCU flavors. If there is an 463 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight 464 * dyntick-idle quiescent state visible to other CPUs (but only for those 465 * RCU flavors in desperate need of a quiescent state, which will normally 466 * be none of them). Either way, do a lightweight quiescent state for 467 * all RCU flavors. 468 * 469 * The barrier() calls are redundant in the common case when this is 470 * called externally, but just in case this is called from within this 471 * file. 472 * 473 */ 474 void rcu_all_qs(void) 475 { 476 unsigned long flags; 477 478 if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs)) 479 return; 480 preempt_disable(); 481 /* Load rcu_urgent_qs before other flags. */ 482 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { 483 preempt_enable(); 484 return; 485 } 486 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); 487 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 488 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) { 489 local_irq_save(flags); 490 rcu_momentary_dyntick_idle(); 491 local_irq_restore(flags); 492 } 493 if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) 494 rcu_sched_qs(); 495 this_cpu_inc(rcu_dynticks.rcu_qs_ctr); 496 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 497 preempt_enable(); 498 } 499 EXPORT_SYMBOL_GPL(rcu_all_qs); 500 501 #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */ 502 static long blimit = DEFAULT_RCU_BLIMIT; 503 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ 504 static long qhimark = DEFAULT_RCU_QHIMARK; 505 #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */ 506 static long qlowmark = DEFAULT_RCU_QLOMARK; 507 508 module_param(blimit, long, 0444); 509 module_param(qhimark, long, 0444); 510 module_param(qlowmark, long, 0444); 511 512 static ulong jiffies_till_first_fqs = ULONG_MAX; 513 static ulong jiffies_till_next_fqs = ULONG_MAX; 514 static bool rcu_kick_kthreads; 515 516 module_param(jiffies_till_first_fqs, ulong, 0644); 517 module_param(jiffies_till_next_fqs, ulong, 0644); 518 module_param(rcu_kick_kthreads, bool, 0644); 519 520 /* 521 * How long the grace period must be before we start recruiting 522 * quiescent-state help from rcu_note_context_switch(). 523 */ 524 static ulong jiffies_till_sched_qs = HZ / 10; 525 module_param(jiffies_till_sched_qs, ulong, 0444); 526 527 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 528 struct rcu_data *rdp); 529 static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)); 530 static void force_quiescent_state(struct rcu_state *rsp); 531 static int rcu_pending(void); 532 533 /* 534 * Return the number of RCU batches started thus far for debug & stats. 535 */ 536 unsigned long rcu_batches_started(void) 537 { 538 return rcu_state_p->gpnum; 539 } 540 EXPORT_SYMBOL_GPL(rcu_batches_started); 541 542 /* 543 * Return the number of RCU-sched batches started thus far for debug & stats. 544 */ 545 unsigned long rcu_batches_started_sched(void) 546 { 547 return rcu_sched_state.gpnum; 548 } 549 EXPORT_SYMBOL_GPL(rcu_batches_started_sched); 550 551 /* 552 * Return the number of RCU BH batches started thus far for debug & stats. 553 */ 554 unsigned long rcu_batches_started_bh(void) 555 { 556 return rcu_bh_state.gpnum; 557 } 558 EXPORT_SYMBOL_GPL(rcu_batches_started_bh); 559 560 /* 561 * Return the number of RCU batches completed thus far for debug & stats. 562 */ 563 unsigned long rcu_batches_completed(void) 564 { 565 return rcu_state_p->completed; 566 } 567 EXPORT_SYMBOL_GPL(rcu_batches_completed); 568 569 /* 570 * Return the number of RCU-sched batches completed thus far for debug & stats. 571 */ 572 unsigned long rcu_batches_completed_sched(void) 573 { 574 return rcu_sched_state.completed; 575 } 576 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); 577 578 /* 579 * Return the number of RCU BH batches completed thus far for debug & stats. 580 */ 581 unsigned long rcu_batches_completed_bh(void) 582 { 583 return rcu_bh_state.completed; 584 } 585 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); 586 587 /* 588 * Return the number of RCU expedited batches completed thus far for 589 * debug & stats. Odd numbers mean that a batch is in progress, even 590 * numbers mean idle. The value returned will thus be roughly double 591 * the cumulative batches since boot. 592 */ 593 unsigned long rcu_exp_batches_completed(void) 594 { 595 return rcu_state_p->expedited_sequence; 596 } 597 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 598 599 /* 600 * Return the number of RCU-sched expedited batches completed thus far 601 * for debug & stats. Similar to rcu_exp_batches_completed(). 602 */ 603 unsigned long rcu_exp_batches_completed_sched(void) 604 { 605 return rcu_sched_state.expedited_sequence; 606 } 607 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); 608 609 /* 610 * Force a quiescent state. 611 */ 612 void rcu_force_quiescent_state(void) 613 { 614 force_quiescent_state(rcu_state_p); 615 } 616 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 617 618 /* 619 * Force a quiescent state for RCU BH. 620 */ 621 void rcu_bh_force_quiescent_state(void) 622 { 623 force_quiescent_state(&rcu_bh_state); 624 } 625 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); 626 627 /* 628 * Force a quiescent state for RCU-sched. 629 */ 630 void rcu_sched_force_quiescent_state(void) 631 { 632 force_quiescent_state(&rcu_sched_state); 633 } 634 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); 635 636 /* 637 * Show the state of the grace-period kthreads. 638 */ 639 void show_rcu_gp_kthreads(void) 640 { 641 struct rcu_state *rsp; 642 643 for_each_rcu_flavor(rsp) { 644 pr_info("%s: wait state: %d ->state: %#lx\n", 645 rsp->name, rsp->gp_state, rsp->gp_kthread->state); 646 /* sched_show_task(rsp->gp_kthread); */ 647 } 648 } 649 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); 650 651 /* 652 * Record the number of times rcutorture tests have been initiated and 653 * terminated. This information allows the debugfs tracing stats to be 654 * correlated to the rcutorture messages, even when the rcutorture module 655 * is being repeatedly loaded and unloaded. In other words, we cannot 656 * store this state in rcutorture itself. 657 */ 658 void rcutorture_record_test_transition(void) 659 { 660 rcutorture_testseq++; 661 rcutorture_vernum = 0; 662 } 663 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); 664 665 /* 666 * Send along grace-period-related data for rcutorture diagnostics. 667 */ 668 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 669 unsigned long *gpnum, unsigned long *completed) 670 { 671 struct rcu_state *rsp = NULL; 672 673 switch (test_type) { 674 case RCU_FLAVOR: 675 rsp = rcu_state_p; 676 break; 677 case RCU_BH_FLAVOR: 678 rsp = &rcu_bh_state; 679 break; 680 case RCU_SCHED_FLAVOR: 681 rsp = &rcu_sched_state; 682 break; 683 default: 684 break; 685 } 686 if (rsp == NULL) 687 return; 688 *flags = READ_ONCE(rsp->gp_flags); 689 *gpnum = READ_ONCE(rsp->gpnum); 690 *completed = READ_ONCE(rsp->completed); 691 } 692 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 693 694 /* 695 * Record the number of writer passes through the current rcutorture test. 696 * This is also used to correlate debugfs tracing stats with the rcutorture 697 * messages. 698 */ 699 void rcutorture_record_progress(unsigned long vernum) 700 { 701 rcutorture_vernum++; 702 } 703 EXPORT_SYMBOL_GPL(rcutorture_record_progress); 704 705 /* 706 * Return the root node of the specified rcu_state structure. 707 */ 708 static struct rcu_node *rcu_get_root(struct rcu_state *rsp) 709 { 710 return &rsp->node[0]; 711 } 712 713 /* 714 * Is there any need for future grace periods? 715 * Interrupts must be disabled. If the caller does not hold the root 716 * rnp_node structure's ->lock, the results are advisory only. 717 */ 718 static int rcu_future_needs_gp(struct rcu_state *rsp) 719 { 720 struct rcu_node *rnp = rcu_get_root(rsp); 721 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; 722 int *fp = &rnp->need_future_gp[idx]; 723 724 lockdep_assert_irqs_disabled(); 725 return READ_ONCE(*fp); 726 } 727 728 /* 729 * Does the current CPU require a not-yet-started grace period? 730 * The caller must have disabled interrupts to prevent races with 731 * normal callback registry. 732 */ 733 static bool 734 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 735 { 736 lockdep_assert_irqs_disabled(); 737 if (rcu_gp_in_progress(rsp)) 738 return false; /* No, a grace period is already in progress. */ 739 if (rcu_future_needs_gp(rsp)) 740 return true; /* Yes, a no-CBs CPU needs one. */ 741 if (!rcu_segcblist_is_enabled(&rdp->cblist)) 742 return false; /* No, this is a no-CBs (or offline) CPU. */ 743 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 744 return true; /* Yes, CPU has newly registered callbacks. */ 745 if (rcu_segcblist_future_gp_needed(&rdp->cblist, 746 READ_ONCE(rsp->completed))) 747 return true; /* Yes, CBs for future grace period. */ 748 return false; /* No grace period needed. */ 749 } 750 751 /* 752 * Enter an RCU extended quiescent state, which can be either the 753 * idle loop or adaptive-tickless usermode execution. 754 * 755 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for 756 * the possibility of usermode upcalls having messed up our count 757 * of interrupt nesting level during the prior busy period. 758 */ 759 static void rcu_eqs_enter(bool user) 760 { 761 struct rcu_state *rsp; 762 struct rcu_data *rdp; 763 struct rcu_dynticks *rdtp; 764 765 rdtp = this_cpu_ptr(&rcu_dynticks); 766 WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); 767 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 768 rdtp->dynticks_nesting == 0); 769 if (rdtp->dynticks_nesting != 1) { 770 rdtp->dynticks_nesting--; 771 return; 772 } 773 774 lockdep_assert_irqs_disabled(); 775 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks); 776 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 777 for_each_rcu_flavor(rsp) { 778 rdp = this_cpu_ptr(rsp->rda); 779 do_nocb_deferred_wakeup(rdp); 780 } 781 rcu_prepare_for_idle(); 782 WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ 783 rcu_dynticks_eqs_enter(); 784 rcu_dynticks_task_enter(); 785 } 786 787 /** 788 * rcu_idle_enter - inform RCU that current CPU is entering idle 789 * 790 * Enter idle mode, in other words, -leave- the mode in which RCU 791 * read-side critical sections can occur. (Though RCU read-side 792 * critical sections can occur in irq handlers in idle, a possibility 793 * handled by irq_enter() and irq_exit().) 794 * 795 * If you add or remove a call to rcu_idle_enter(), be sure to test with 796 * CONFIG_RCU_EQS_DEBUG=y. 797 */ 798 void rcu_idle_enter(void) 799 { 800 lockdep_assert_irqs_disabled(); 801 rcu_eqs_enter(false); 802 } 803 804 #ifdef CONFIG_NO_HZ_FULL 805 /** 806 * rcu_user_enter - inform RCU that we are resuming userspace. 807 * 808 * Enter RCU idle mode right before resuming userspace. No use of RCU 809 * is permitted between this call and rcu_user_exit(). This way the 810 * CPU doesn't need to maintain the tick for RCU maintenance purposes 811 * when the CPU runs in userspace. 812 * 813 * If you add or remove a call to rcu_user_enter(), be sure to test with 814 * CONFIG_RCU_EQS_DEBUG=y. 815 */ 816 void rcu_user_enter(void) 817 { 818 lockdep_assert_irqs_disabled(); 819 rcu_eqs_enter(true); 820 } 821 #endif /* CONFIG_NO_HZ_FULL */ 822 823 /** 824 * rcu_nmi_exit - inform RCU of exit from NMI context 825 * 826 * If we are returning from the outermost NMI handler that interrupted an 827 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting 828 * to let the RCU grace-period handling know that the CPU is back to 829 * being RCU-idle. 830 * 831 * If you add or remove a call to rcu_nmi_exit(), be sure to test 832 * with CONFIG_RCU_EQS_DEBUG=y. 833 */ 834 void rcu_nmi_exit(void) 835 { 836 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 837 838 /* 839 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 840 * (We are exiting an NMI handler, so RCU better be paying attention 841 * to us!) 842 */ 843 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); 844 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); 845 846 /* 847 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 848 * leave it in non-RCU-idle state. 849 */ 850 if (rdtp->dynticks_nmi_nesting != 1) { 851 trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks); 852 WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */ 853 rdtp->dynticks_nmi_nesting - 2); 854 return; 855 } 856 857 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 858 trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks); 859 WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ 860 rcu_dynticks_eqs_enter(); 861 } 862 863 /** 864 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 865 * 866 * Exit from an interrupt handler, which might possibly result in entering 867 * idle mode, in other words, leaving the mode in which read-side critical 868 * sections can occur. The caller must have disabled interrupts. 869 * 870 * This code assumes that the idle loop never does anything that might 871 * result in unbalanced calls to irq_enter() and irq_exit(). If your 872 * architecture's idle loop violates this assumption, RCU will give you what 873 * you deserve, good and hard. But very infrequently and irreproducibly. 874 * 875 * Use things like work queues to work around this limitation. 876 * 877 * You have been warned. 878 * 879 * If you add or remove a call to rcu_irq_exit(), be sure to test with 880 * CONFIG_RCU_EQS_DEBUG=y. 881 */ 882 void rcu_irq_exit(void) 883 { 884 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 885 886 lockdep_assert_irqs_disabled(); 887 if (rdtp->dynticks_nmi_nesting == 1) 888 rcu_prepare_for_idle(); 889 rcu_nmi_exit(); 890 if (rdtp->dynticks_nmi_nesting == 0) 891 rcu_dynticks_task_enter(); 892 } 893 894 /* 895 * Wrapper for rcu_irq_exit() where interrupts are enabled. 896 * 897 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test 898 * with CONFIG_RCU_EQS_DEBUG=y. 899 */ 900 void rcu_irq_exit_irqson(void) 901 { 902 unsigned long flags; 903 904 local_irq_save(flags); 905 rcu_irq_exit(); 906 local_irq_restore(flags); 907 } 908 909 /* 910 * Exit an RCU extended quiescent state, which can be either the 911 * idle loop or adaptive-tickless usermode execution. 912 * 913 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to 914 * allow for the possibility of usermode upcalls messing up our count of 915 * interrupt nesting level during the busy period that is just now starting. 916 */ 917 static void rcu_eqs_exit(bool user) 918 { 919 struct rcu_dynticks *rdtp; 920 long oldval; 921 922 lockdep_assert_irqs_disabled(); 923 rdtp = this_cpu_ptr(&rcu_dynticks); 924 oldval = rdtp->dynticks_nesting; 925 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 926 if (oldval) { 927 rdtp->dynticks_nesting++; 928 return; 929 } 930 rcu_dynticks_task_exit(); 931 rcu_dynticks_eqs_exit(); 932 rcu_cleanup_after_idle(); 933 trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks); 934 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 935 WRITE_ONCE(rdtp->dynticks_nesting, 1); 936 WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 937 } 938 939 /** 940 * rcu_idle_exit - inform RCU that current CPU is leaving idle 941 * 942 * Exit idle mode, in other words, -enter- the mode in which RCU 943 * read-side critical sections can occur. 944 * 945 * If you add or remove a call to rcu_idle_exit(), be sure to test with 946 * CONFIG_RCU_EQS_DEBUG=y. 947 */ 948 void rcu_idle_exit(void) 949 { 950 unsigned long flags; 951 952 local_irq_save(flags); 953 rcu_eqs_exit(false); 954 local_irq_restore(flags); 955 } 956 957 #ifdef CONFIG_NO_HZ_FULL 958 /** 959 * rcu_user_exit - inform RCU that we are exiting userspace. 960 * 961 * Exit RCU idle mode while entering the kernel because it can 962 * run a RCU read side critical section anytime. 963 * 964 * If you add or remove a call to rcu_user_exit(), be sure to test with 965 * CONFIG_RCU_EQS_DEBUG=y. 966 */ 967 void rcu_user_exit(void) 968 { 969 rcu_eqs_exit(1); 970 } 971 #endif /* CONFIG_NO_HZ_FULL */ 972 973 /** 974 * rcu_nmi_enter - inform RCU of entry to NMI context 975 * 976 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and 977 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know 978 * that the CPU is active. This implementation permits nested NMIs, as 979 * long as the nesting level does not overflow an int. (You will probably 980 * run out of stack space first.) 981 * 982 * If you add or remove a call to rcu_nmi_enter(), be sure to test 983 * with CONFIG_RCU_EQS_DEBUG=y. 984 */ 985 void rcu_nmi_enter(void) 986 { 987 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 988 long incby = 2; 989 990 /* Complain about underflow. */ 991 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); 992 993 /* 994 * If idle from RCU viewpoint, atomically increment ->dynticks 995 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 996 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 997 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 998 * to be in the outermost NMI handler that interrupted an RCU-idle 999 * period (observation due to Andy Lutomirski). 1000 */ 1001 if (rcu_dynticks_curr_cpu_in_eqs()) { 1002 rcu_dynticks_eqs_exit(); 1003 incby = 1; 1004 } 1005 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 1006 rdtp->dynticks_nmi_nesting, 1007 rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks); 1008 WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */ 1009 rdtp->dynticks_nmi_nesting + incby); 1010 barrier(); 1011 } 1012 1013 /** 1014 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 1015 * 1016 * Enter an interrupt handler, which might possibly result in exiting 1017 * idle mode, in other words, entering the mode in which read-side critical 1018 * sections can occur. The caller must have disabled interrupts. 1019 * 1020 * Note that the Linux kernel is fully capable of entering an interrupt 1021 * handler that it never exits, for example when doing upcalls to user mode! 1022 * This code assumes that the idle loop never does upcalls to user mode. 1023 * If your architecture's idle loop does do upcalls to user mode (or does 1024 * anything else that results in unbalanced calls to the irq_enter() and 1025 * irq_exit() functions), RCU will give you what you deserve, good and hard. 1026 * But very infrequently and irreproducibly. 1027 * 1028 * Use things like work queues to work around this limitation. 1029 * 1030 * You have been warned. 1031 * 1032 * If you add or remove a call to rcu_irq_enter(), be sure to test with 1033 * CONFIG_RCU_EQS_DEBUG=y. 1034 */ 1035 void rcu_irq_enter(void) 1036 { 1037 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1038 1039 lockdep_assert_irqs_disabled(); 1040 if (rdtp->dynticks_nmi_nesting == 0) 1041 rcu_dynticks_task_exit(); 1042 rcu_nmi_enter(); 1043 if (rdtp->dynticks_nmi_nesting == 1) 1044 rcu_cleanup_after_idle(); 1045 } 1046 1047 /* 1048 * Wrapper for rcu_irq_enter() where interrupts are enabled. 1049 * 1050 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test 1051 * with CONFIG_RCU_EQS_DEBUG=y. 1052 */ 1053 void rcu_irq_enter_irqson(void) 1054 { 1055 unsigned long flags; 1056 1057 local_irq_save(flags); 1058 rcu_irq_enter(); 1059 local_irq_restore(flags); 1060 } 1061 1062 /** 1063 * rcu_is_watching - see if RCU thinks that the current CPU is idle 1064 * 1065 * Return true if RCU is watching the running CPU, which means that this 1066 * CPU can safely enter RCU read-side critical sections. In other words, 1067 * if the current CPU is in its idle loop and is neither in an interrupt 1068 * or NMI handler, return true. 1069 */ 1070 bool notrace rcu_is_watching(void) 1071 { 1072 bool ret; 1073 1074 preempt_disable_notrace(); 1075 ret = !rcu_dynticks_curr_cpu_in_eqs(); 1076 preempt_enable_notrace(); 1077 return ret; 1078 } 1079 EXPORT_SYMBOL_GPL(rcu_is_watching); 1080 1081 /* 1082 * If a holdout task is actually running, request an urgent quiescent 1083 * state from its CPU. This is unsynchronized, so migrations can cause 1084 * the request to go to the wrong CPU. Which is OK, all that will happen 1085 * is that the CPU's next context switch will be a bit slower and next 1086 * time around this task will generate another request. 1087 */ 1088 void rcu_request_urgent_qs_task(struct task_struct *t) 1089 { 1090 int cpu; 1091 1092 barrier(); 1093 cpu = task_cpu(t); 1094 if (!task_curr(t)) 1095 return; /* This task is not running on that CPU. */ 1096 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true); 1097 } 1098 1099 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1100 1101 /* 1102 * Is the current CPU online? Disable preemption to avoid false positives 1103 * that could otherwise happen due to the current CPU number being sampled, 1104 * this task being preempted, its old CPU being taken offline, resuming 1105 * on some other CPU, then determining that its old CPU is now offline. 1106 * It is OK to use RCU on an offline processor during initial boot, hence 1107 * the check for rcu_scheduler_fully_active. Note also that it is OK 1108 * for a CPU coming online to use RCU for one jiffy prior to marking itself 1109 * online in the cpu_online_mask. Similarly, it is OK for a CPU going 1110 * offline to continue to use RCU for one jiffy after marking itself 1111 * offline in the cpu_online_mask. This leniency is necessary given the 1112 * non-atomic nature of the online and offline processing, for example, 1113 * the fact that a CPU enters the scheduler after completing the teardown 1114 * of the CPU. 1115 * 1116 * This is also why RCU internally marks CPUs online during in the 1117 * preparation phase and offline after the CPU has been taken down. 1118 * 1119 * Disable checking if in an NMI handler because we cannot safely report 1120 * errors from NMI handlers anyway. 1121 */ 1122 bool rcu_lockdep_current_cpu_online(void) 1123 { 1124 struct rcu_data *rdp; 1125 struct rcu_node *rnp; 1126 bool ret; 1127 1128 if (in_nmi()) 1129 return true; 1130 preempt_disable(); 1131 rdp = this_cpu_ptr(&rcu_sched_data); 1132 rnp = rdp->mynode; 1133 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) || 1134 !rcu_scheduler_fully_active; 1135 preempt_enable(); 1136 return ret; 1137 } 1138 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 1139 1140 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 1141 1142 /** 1143 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle 1144 * 1145 * If the current CPU is idle or running at a first-level (not nested) 1146 * interrupt from idle, return true. The caller must have at least 1147 * disabled preemption. 1148 */ 1149 static int rcu_is_cpu_rrupt_from_idle(void) 1150 { 1151 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 && 1152 __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1; 1153 } 1154 1155 /* 1156 * We are reporting a quiescent state on behalf of some other CPU, so 1157 * it is our responsibility to check for and handle potential overflow 1158 * of the rcu_node ->gpnum counter with respect to the rcu_data counters. 1159 * After all, the CPU might be in deep idle state, and thus executing no 1160 * code whatsoever. 1161 */ 1162 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 1163 { 1164 lockdep_assert_held(&rnp->lock); 1165 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum)) 1166 WRITE_ONCE(rdp->gpwrap, true); 1167 if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum)) 1168 rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4; 1169 } 1170 1171 /* 1172 * Snapshot the specified CPU's dynticks counter so that we can later 1173 * credit them with an implicit quiescent state. Return 1 if this CPU 1174 * is in dynticks idle mode, which is an extended quiescent state. 1175 */ 1176 static int dyntick_save_progress_counter(struct rcu_data *rdp) 1177 { 1178 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); 1179 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 1180 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1181 rcu_gpnum_ovf(rdp->mynode, rdp); 1182 return 1; 1183 } 1184 return 0; 1185 } 1186 1187 /* 1188 * Handler for the irq_work request posted when a grace period has 1189 * gone on for too long, but not yet long enough for an RCU CPU 1190 * stall warning. Set state appropriately, but just complain if 1191 * there is unexpected state on entry. 1192 */ 1193 static void rcu_iw_handler(struct irq_work *iwp) 1194 { 1195 struct rcu_data *rdp; 1196 struct rcu_node *rnp; 1197 1198 rdp = container_of(iwp, struct rcu_data, rcu_iw); 1199 rnp = rdp->mynode; 1200 raw_spin_lock_rcu_node(rnp); 1201 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { 1202 rdp->rcu_iw_gpnum = rnp->gpnum; 1203 rdp->rcu_iw_pending = false; 1204 } 1205 raw_spin_unlock_rcu_node(rnp); 1206 } 1207 1208 /* 1209 * Return true if the specified CPU has passed through a quiescent 1210 * state by virtue of being in or having passed through an dynticks 1211 * idle state since the last call to dyntick_save_progress_counter() 1212 * for this same CPU, or by virtue of having been offline. 1213 */ 1214 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 1215 { 1216 unsigned long jtsq; 1217 bool *rnhqp; 1218 bool *ruqp; 1219 struct rcu_node *rnp = rdp->mynode; 1220 1221 /* 1222 * If the CPU passed through or entered a dynticks idle phase with 1223 * no active irq/NMI handlers, then we can safely pretend that the CPU 1224 * already acknowledged the request to pass through a quiescent 1225 * state. Either way, that CPU cannot possibly be in an RCU 1226 * read-side critical section that started before the beginning 1227 * of the current RCU grace period. 1228 */ 1229 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { 1230 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1231 rdp->dynticks_fqs++; 1232 rcu_gpnum_ovf(rnp, rdp); 1233 return 1; 1234 } 1235 1236 /* 1237 * Has this CPU encountered a cond_resched_rcu_qs() since the 1238 * beginning of the grace period? For this to be the case, 1239 * the CPU has to have noticed the current grace period. This 1240 * might not be the case for nohz_full CPUs looping in the kernel. 1241 */ 1242 jtsq = jiffies_till_sched_qs; 1243 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); 1244 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && 1245 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && 1246 READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) { 1247 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc")); 1248 rcu_gpnum_ovf(rnp, rdp); 1249 return 1; 1250 } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) { 1251 /* Load rcu_qs_ctr before store to rcu_urgent_qs. */ 1252 smp_store_release(ruqp, true); 1253 } 1254 1255 /* Check for the CPU being offline. */ 1256 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) { 1257 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); 1258 rdp->offline_fqs++; 1259 rcu_gpnum_ovf(rnp, rdp); 1260 return 1; 1261 } 1262 1263 /* 1264 * A CPU running for an extended time within the kernel can 1265 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode, 1266 * even context-switching back and forth between a pair of 1267 * in-kernel CPU-bound tasks cannot advance grace periods. 1268 * So if the grace period is old enough, make the CPU pay attention. 1269 * Note that the unsynchronized assignments to the per-CPU 1270 * rcu_need_heavy_qs variable are safe. Yes, setting of 1271 * bits can be lost, but they will be set again on the next 1272 * force-quiescent-state pass. So lost bit sets do not result 1273 * in incorrect behavior, merely in a grace period lasting 1274 * a few jiffies longer than it might otherwise. Because 1275 * there are at most four threads involved, and because the 1276 * updates are only once every few jiffies, the probability of 1277 * lossage (and thus of slight grace-period extension) is 1278 * quite low. 1279 */ 1280 rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); 1281 if (!READ_ONCE(*rnhqp) && 1282 (time_after(jiffies, rdp->rsp->gp_start + jtsq) || 1283 time_after(jiffies, rdp->rsp->jiffies_resched))) { 1284 WRITE_ONCE(*rnhqp, true); 1285 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 1286 smp_store_release(ruqp, true); 1287 rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */ 1288 } 1289 1290 /* 1291 * If more than halfway to RCU CPU stall-warning time, do a 1292 * resched_cpu() to try to loosen things up a bit. Also check to 1293 * see if the CPU is getting hammered with interrupts, but only 1294 * once per grace period, just to keep the IPIs down to a dull roar. 1295 */ 1296 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) { 1297 resched_cpu(rdp->cpu); 1298 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1299 !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum && 1300 (rnp->ffmask & rdp->grpmask)) { 1301 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); 1302 rdp->rcu_iw_pending = true; 1303 rdp->rcu_iw_gpnum = rnp->gpnum; 1304 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1305 } 1306 } 1307 1308 return 0; 1309 } 1310 1311 static void record_gp_stall_check_time(struct rcu_state *rsp) 1312 { 1313 unsigned long j = jiffies; 1314 unsigned long j1; 1315 1316 rsp->gp_start = j; 1317 smp_wmb(); /* Record start time before stall time. */ 1318 j1 = rcu_jiffies_till_stall_check(); 1319 WRITE_ONCE(rsp->jiffies_stall, j + j1); 1320 rsp->jiffies_resched = j + j1 / 2; 1321 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); 1322 } 1323 1324 /* 1325 * Convert a ->gp_state value to a character string. 1326 */ 1327 static const char *gp_state_getname(short gs) 1328 { 1329 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) 1330 return "???"; 1331 return gp_state_names[gs]; 1332 } 1333 1334 /* 1335 * Complain about starvation of grace-period kthread. 1336 */ 1337 static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) 1338 { 1339 unsigned long gpa; 1340 unsigned long j; 1341 1342 j = jiffies; 1343 gpa = READ_ONCE(rsp->gp_activity); 1344 if (j - gpa > 2 * HZ) { 1345 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n", 1346 rsp->name, j - gpa, 1347 rsp->gpnum, rsp->completed, 1348 rsp->gp_flags, 1349 gp_state_getname(rsp->gp_state), rsp->gp_state, 1350 rsp->gp_kthread ? rsp->gp_kthread->state : ~0, 1351 rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1); 1352 if (rsp->gp_kthread) { 1353 sched_show_task(rsp->gp_kthread); 1354 wake_up_process(rsp->gp_kthread); 1355 } 1356 } 1357 } 1358 1359 /* 1360 * Dump stacks of all tasks running on stalled CPUs. First try using 1361 * NMIs, but fall back to manual remote stack tracing on architectures 1362 * that don't support NMI-based stack dumps. The NMI-triggered stack 1363 * traces are more accurate because they are printed by the target CPU. 1364 */ 1365 static void rcu_dump_cpu_stacks(struct rcu_state *rsp) 1366 { 1367 int cpu; 1368 unsigned long flags; 1369 struct rcu_node *rnp; 1370 1371 rcu_for_each_leaf_node(rsp, rnp) { 1372 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1373 for_each_leaf_node_possible_cpu(rnp, cpu) 1374 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) 1375 if (!trigger_single_cpu_backtrace(cpu)) 1376 dump_cpu_task(cpu); 1377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1378 } 1379 } 1380 1381 /* 1382 * If too much time has passed in the current grace period, and if 1383 * so configured, go kick the relevant kthreads. 1384 */ 1385 static void rcu_stall_kick_kthreads(struct rcu_state *rsp) 1386 { 1387 unsigned long j; 1388 1389 if (!rcu_kick_kthreads) 1390 return; 1391 j = READ_ONCE(rsp->jiffies_kick_kthreads); 1392 if (time_after(jiffies, j) && rsp->gp_kthread && 1393 (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) { 1394 WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name); 1395 rcu_ftrace_dump(DUMP_ALL); 1396 wake_up_process(rsp->gp_kthread); 1397 WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ); 1398 } 1399 } 1400 1401 static inline void panic_on_rcu_stall(void) 1402 { 1403 if (sysctl_panic_on_rcu_stall) 1404 panic("RCU Stall\n"); 1405 } 1406 1407 static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) 1408 { 1409 int cpu; 1410 long delta; 1411 unsigned long flags; 1412 unsigned long gpa; 1413 unsigned long j; 1414 int ndetected = 0; 1415 struct rcu_node *rnp = rcu_get_root(rsp); 1416 long totqlen = 0; 1417 1418 /* Kick and suppress, if so configured. */ 1419 rcu_stall_kick_kthreads(rsp); 1420 if (rcu_cpu_stall_suppress) 1421 return; 1422 1423 /* Only let one CPU complain about others per time interval. */ 1424 1425 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1426 delta = jiffies - READ_ONCE(rsp->jiffies_stall); 1427 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 1428 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1429 return; 1430 } 1431 WRITE_ONCE(rsp->jiffies_stall, 1432 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1433 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1434 1435 /* 1436 * OK, time to rat on our buddy... 1437 * See Documentation/RCU/stallwarn.txt for info on how to debug 1438 * RCU CPU stall warnings. 1439 */ 1440 pr_err("INFO: %s detected stalls on CPUs/tasks:", 1441 rsp->name); 1442 print_cpu_stall_info_begin(); 1443 rcu_for_each_leaf_node(rsp, rnp) { 1444 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1445 ndetected += rcu_print_task_stall(rnp); 1446 if (rnp->qsmask != 0) { 1447 for_each_leaf_node_possible_cpu(rnp, cpu) 1448 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { 1449 print_cpu_stall_info(rsp, cpu); 1450 ndetected++; 1451 } 1452 } 1453 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1454 } 1455 1456 print_cpu_stall_info_end(); 1457 for_each_possible_cpu(cpu) 1458 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, 1459 cpu)->cblist); 1460 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", 1461 smp_processor_id(), (long)(jiffies - rsp->gp_start), 1462 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1463 if (ndetected) { 1464 rcu_dump_cpu_stacks(rsp); 1465 1466 /* Complain about tasks blocking the grace period. */ 1467 rcu_print_detail_task_stall(rsp); 1468 } else { 1469 if (READ_ONCE(rsp->gpnum) != gpnum || 1470 READ_ONCE(rsp->completed) == gpnum) { 1471 pr_err("INFO: Stall ended before state dump start\n"); 1472 } else { 1473 j = jiffies; 1474 gpa = READ_ONCE(rsp->gp_activity); 1475 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", 1476 rsp->name, j - gpa, j, gpa, 1477 jiffies_till_next_fqs, 1478 rcu_get_root(rsp)->qsmask); 1479 /* In this case, the current CPU might be at fault. */ 1480 sched_show_task(current); 1481 } 1482 } 1483 1484 rcu_check_gp_kthread_starvation(rsp); 1485 1486 panic_on_rcu_stall(); 1487 1488 force_quiescent_state(rsp); /* Kick them all. */ 1489 } 1490 1491 static void print_cpu_stall(struct rcu_state *rsp) 1492 { 1493 int cpu; 1494 unsigned long flags; 1495 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1496 struct rcu_node *rnp = rcu_get_root(rsp); 1497 long totqlen = 0; 1498 1499 /* Kick and suppress, if so configured. */ 1500 rcu_stall_kick_kthreads(rsp); 1501 if (rcu_cpu_stall_suppress) 1502 return; 1503 1504 /* 1505 * OK, time to rat on ourselves... 1506 * See Documentation/RCU/stallwarn.txt for info on how to debug 1507 * RCU CPU stall warnings. 1508 */ 1509 pr_err("INFO: %s self-detected stall on CPU", rsp->name); 1510 print_cpu_stall_info_begin(); 1511 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); 1512 print_cpu_stall_info(rsp, smp_processor_id()); 1513 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); 1514 print_cpu_stall_info_end(); 1515 for_each_possible_cpu(cpu) 1516 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, 1517 cpu)->cblist); 1518 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1519 jiffies - rsp->gp_start, 1520 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1521 1522 rcu_check_gp_kthread_starvation(rsp); 1523 1524 rcu_dump_cpu_stacks(rsp); 1525 1526 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1527 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) 1528 WRITE_ONCE(rsp->jiffies_stall, 1529 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1530 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1531 1532 panic_on_rcu_stall(); 1533 1534 /* 1535 * Attempt to revive the RCU machinery by forcing a context switch. 1536 * 1537 * A context switch would normally allow the RCU state machine to make 1538 * progress and it could be we're stuck in kernel space without context 1539 * switches for an entirely unreasonable amount of time. 1540 */ 1541 resched_cpu(smp_processor_id()); 1542 } 1543 1544 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) 1545 { 1546 unsigned long completed; 1547 unsigned long gpnum; 1548 unsigned long gps; 1549 unsigned long j; 1550 unsigned long js; 1551 struct rcu_node *rnp; 1552 1553 if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || 1554 !rcu_gp_in_progress(rsp)) 1555 return; 1556 rcu_stall_kick_kthreads(rsp); 1557 j = jiffies; 1558 1559 /* 1560 * Lots of memory barriers to reject false positives. 1561 * 1562 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall, 1563 * then rsp->gp_start, and finally rsp->completed. These values 1564 * are updated in the opposite order with memory barriers (or 1565 * equivalent) during grace-period initialization and cleanup. 1566 * Now, a false positive can occur if we get an new value of 1567 * rsp->gp_start and a old value of rsp->jiffies_stall. But given 1568 * the memory barriers, the only way that this can happen is if one 1569 * grace period ends and another starts between these two fetches. 1570 * Detect this by comparing rsp->completed with the previous fetch 1571 * from rsp->gpnum. 1572 * 1573 * Given this check, comparisons of jiffies, rsp->jiffies_stall, 1574 * and rsp->gp_start suffice to forestall false positives. 1575 */ 1576 gpnum = READ_ONCE(rsp->gpnum); 1577 smp_rmb(); /* Pick up ->gpnum first... */ 1578 js = READ_ONCE(rsp->jiffies_stall); 1579 smp_rmb(); /* ...then ->jiffies_stall before the rest... */ 1580 gps = READ_ONCE(rsp->gp_start); 1581 smp_rmb(); /* ...and finally ->gp_start before ->completed. */ 1582 completed = READ_ONCE(rsp->completed); 1583 if (ULONG_CMP_GE(completed, gpnum) || 1584 ULONG_CMP_LT(j, js) || 1585 ULONG_CMP_GE(gps, js)) 1586 return; /* No stall or GP completed since entering function. */ 1587 rnp = rdp->mynode; 1588 if (rcu_gp_in_progress(rsp) && 1589 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) { 1590 1591 /* We haven't checked in, so go dump stack. */ 1592 print_cpu_stall(rsp); 1593 1594 } else if (rcu_gp_in_progress(rsp) && 1595 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { 1596 1597 /* They had a few time units to dump stack, so complain. */ 1598 print_other_cpu_stall(rsp, gpnum); 1599 } 1600 } 1601 1602 /** 1603 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period 1604 * 1605 * Set the stall-warning timeout way off into the future, thus preventing 1606 * any RCU CPU stall-warning messages from appearing in the current set of 1607 * RCU grace periods. 1608 * 1609 * The caller must disable hard irqs. 1610 */ 1611 void rcu_cpu_stall_reset(void) 1612 { 1613 struct rcu_state *rsp; 1614 1615 for_each_rcu_flavor(rsp) 1616 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2); 1617 } 1618 1619 /* 1620 * Determine the value that ->completed will have at the end of the 1621 * next subsequent grace period. This is used to tag callbacks so that 1622 * a CPU can invoke callbacks in a timely fashion even if that CPU has 1623 * been dyntick-idle for an extended period with callbacks under the 1624 * influence of RCU_FAST_NO_HZ. 1625 * 1626 * The caller must hold rnp->lock with interrupts disabled. 1627 */ 1628 static unsigned long rcu_cbs_completed(struct rcu_state *rsp, 1629 struct rcu_node *rnp) 1630 { 1631 lockdep_assert_held(&rnp->lock); 1632 1633 /* 1634 * If RCU is idle, we just wait for the next grace period. 1635 * But we can only be sure that RCU is idle if we are looking 1636 * at the root rcu_node structure -- otherwise, a new grace 1637 * period might have started, but just not yet gotten around 1638 * to initializing the current non-root rcu_node structure. 1639 */ 1640 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) 1641 return rnp->completed + 1; 1642 1643 /* 1644 * Otherwise, wait for a possible partial grace period and 1645 * then the subsequent full grace period. 1646 */ 1647 return rnp->completed + 2; 1648 } 1649 1650 /* 1651 * Trace-event helper function for rcu_start_future_gp() and 1652 * rcu_nocb_wait_gp(). 1653 */ 1654 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1655 unsigned long c, const char *s) 1656 { 1657 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, 1658 rnp->completed, c, rnp->level, 1659 rnp->grplo, rnp->grphi, s); 1660 } 1661 1662 /* 1663 * Start some future grace period, as needed to handle newly arrived 1664 * callbacks. The required future grace periods are recorded in each 1665 * rcu_node structure's ->need_future_gp field. Returns true if there 1666 * is reason to awaken the grace-period kthread. 1667 * 1668 * The caller must hold the specified rcu_node structure's ->lock. 1669 */ 1670 static bool __maybe_unused 1671 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1672 unsigned long *c_out) 1673 { 1674 unsigned long c; 1675 bool ret = false; 1676 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 1677 1678 lockdep_assert_held(&rnp->lock); 1679 1680 /* 1681 * Pick up grace-period number for new callbacks. If this 1682 * grace period is already marked as needed, return to the caller. 1683 */ 1684 c = rcu_cbs_completed(rdp->rsp, rnp); 1685 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); 1686 if (rnp->need_future_gp[c & 0x1]) { 1687 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); 1688 goto out; 1689 } 1690 1691 /* 1692 * If either this rcu_node structure or the root rcu_node structure 1693 * believe that a grace period is in progress, then we must wait 1694 * for the one following, which is in "c". Because our request 1695 * will be noticed at the end of the current grace period, we don't 1696 * need to explicitly start one. We only do the lockless check 1697 * of rnp_root's fields if the current rcu_node structure thinks 1698 * there is no grace period in flight, and because we hold rnp->lock, 1699 * the only possible change is when rnp_root's two fields are 1700 * equal, in which case rnp_root->gpnum might be concurrently 1701 * incremented. But that is OK, as it will just result in our 1702 * doing some extra useless work. 1703 */ 1704 if (rnp->gpnum != rnp->completed || 1705 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) { 1706 rnp->need_future_gp[c & 0x1]++; 1707 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); 1708 goto out; 1709 } 1710 1711 /* 1712 * There might be no grace period in progress. If we don't already 1713 * hold it, acquire the root rcu_node structure's lock in order to 1714 * start one (if needed). 1715 */ 1716 if (rnp != rnp_root) 1717 raw_spin_lock_rcu_node(rnp_root); 1718 1719 /* 1720 * Get a new grace-period number. If there really is no grace 1721 * period in progress, it will be smaller than the one we obtained 1722 * earlier. Adjust callbacks as needed. 1723 */ 1724 c = rcu_cbs_completed(rdp->rsp, rnp_root); 1725 if (!rcu_is_nocb_cpu(rdp->cpu)) 1726 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1727 1728 /* 1729 * If the needed for the required grace period is already 1730 * recorded, trace and leave. 1731 */ 1732 if (rnp_root->need_future_gp[c & 0x1]) { 1733 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); 1734 goto unlock_out; 1735 } 1736 1737 /* Record the need for the future grace period. */ 1738 rnp_root->need_future_gp[c & 0x1]++; 1739 1740 /* If a grace period is not already in progress, start one. */ 1741 if (rnp_root->gpnum != rnp_root->completed) { 1742 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); 1743 } else { 1744 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); 1745 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); 1746 } 1747 unlock_out: 1748 if (rnp != rnp_root) 1749 raw_spin_unlock_rcu_node(rnp_root); 1750 out: 1751 if (c_out != NULL) 1752 *c_out = c; 1753 return ret; 1754 } 1755 1756 /* 1757 * Clean up any old requests for the just-ended grace period. Also return 1758 * whether any additional grace periods have been requested. 1759 */ 1760 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 1761 { 1762 int c = rnp->completed; 1763 int needmore; 1764 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1765 1766 rnp->need_future_gp[c & 0x1] = 0; 1767 needmore = rnp->need_future_gp[(c + 1) & 0x1]; 1768 trace_rcu_future_gp(rnp, rdp, c, 1769 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1770 return needmore; 1771 } 1772 1773 /* 1774 * Awaken the grace-period kthread for the specified flavor of RCU. 1775 * Don't do a self-awaken, and don't bother awakening when there is 1776 * nothing for the grace-period kthread to do (as in several CPUs 1777 * raced to awaken, and we lost), and finally don't try to awaken 1778 * a kthread that has not yet been created. 1779 */ 1780 static void rcu_gp_kthread_wake(struct rcu_state *rsp) 1781 { 1782 if (current == rsp->gp_kthread || 1783 !READ_ONCE(rsp->gp_flags) || 1784 !rsp->gp_kthread) 1785 return; 1786 swake_up(&rsp->gp_wq); 1787 } 1788 1789 /* 1790 * If there is room, assign a ->completed number to any callbacks on 1791 * this CPU that have not already been assigned. Also accelerate any 1792 * callbacks that were previously assigned a ->completed number that has 1793 * since proven to be too conservative, which can happen if callbacks get 1794 * assigned a ->completed number while RCU is idle, but with reference to 1795 * a non-root rcu_node structure. This function is idempotent, so it does 1796 * not hurt to call it repeatedly. Returns an flag saying that we should 1797 * awaken the RCU grace-period kthread. 1798 * 1799 * The caller must hold rnp->lock with interrupts disabled. 1800 */ 1801 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1802 struct rcu_data *rdp) 1803 { 1804 bool ret = false; 1805 1806 lockdep_assert_held(&rnp->lock); 1807 1808 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1809 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1810 return false; 1811 1812 /* 1813 * Callbacks are often registered with incomplete grace-period 1814 * information. Something about the fact that getting exact 1815 * information requires acquiring a global lock... RCU therefore 1816 * makes a conservative estimate of the grace period number at which 1817 * a given callback will become ready to invoke. The following 1818 * code checks this estimate and improves it when possible, thus 1819 * accelerating callback invocation to an earlier grace-period 1820 * number. 1821 */ 1822 if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp))) 1823 ret = rcu_start_future_gp(rnp, rdp, NULL); 1824 1825 /* Trace depending on how much we were able to accelerate. */ 1826 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1827 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB")); 1828 else 1829 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB")); 1830 return ret; 1831 } 1832 1833 /* 1834 * Move any callbacks whose grace period has completed to the 1835 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1836 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL 1837 * sublist. This function is idempotent, so it does not hurt to 1838 * invoke it repeatedly. As long as it is not invoked -too- often... 1839 * Returns true if the RCU grace-period kthread needs to be awakened. 1840 * 1841 * The caller must hold rnp->lock with interrupts disabled. 1842 */ 1843 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1844 struct rcu_data *rdp) 1845 { 1846 lockdep_assert_held(&rnp->lock); 1847 1848 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1849 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1850 return false; 1851 1852 /* 1853 * Find all callbacks whose ->completed numbers indicate that they 1854 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1855 */ 1856 rcu_segcblist_advance(&rdp->cblist, rnp->completed); 1857 1858 /* Classify any remaining callbacks. */ 1859 return rcu_accelerate_cbs(rsp, rnp, rdp); 1860 } 1861 1862 /* 1863 * Update CPU-local rcu_data state to record the beginnings and ends of 1864 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1865 * structure corresponding to the current CPU, and must have irqs disabled. 1866 * Returns true if the grace-period kthread needs to be awakened. 1867 */ 1868 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, 1869 struct rcu_data *rdp) 1870 { 1871 bool ret; 1872 bool need_gp; 1873 1874 lockdep_assert_held(&rnp->lock); 1875 1876 /* Handle the ends of any preceding grace periods first. */ 1877 if (rdp->completed == rnp->completed && 1878 !unlikely(READ_ONCE(rdp->gpwrap))) { 1879 1880 /* No grace period end, so just accelerate recent callbacks. */ 1881 ret = rcu_accelerate_cbs(rsp, rnp, rdp); 1882 1883 } else { 1884 1885 /* Advance callbacks. */ 1886 ret = rcu_advance_cbs(rsp, rnp, rdp); 1887 1888 /* Remember that we saw this grace-period completion. */ 1889 rdp->completed = rnp->completed; 1890 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); 1891 } 1892 1893 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) { 1894 /* 1895 * If the current grace period is waiting for this CPU, 1896 * set up to detect a quiescent state, otherwise don't 1897 * go looking for one. 1898 */ 1899 rdp->gpnum = rnp->gpnum; 1900 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1901 need_gp = !!(rnp->qsmask & rdp->grpmask); 1902 rdp->cpu_no_qs.b.norm = need_gp; 1903 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); 1904 rdp->core_needs_qs = need_gp; 1905 zero_cpu_stall_ticks(rdp); 1906 WRITE_ONCE(rdp->gpwrap, false); 1907 rcu_gpnum_ovf(rnp, rdp); 1908 } 1909 return ret; 1910 } 1911 1912 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) 1913 { 1914 unsigned long flags; 1915 bool needwake; 1916 struct rcu_node *rnp; 1917 1918 local_irq_save(flags); 1919 rnp = rdp->mynode; 1920 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && 1921 rdp->completed == READ_ONCE(rnp->completed) && 1922 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1923 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1924 local_irq_restore(flags); 1925 return; 1926 } 1927 needwake = __note_gp_changes(rsp, rnp, rdp); 1928 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1929 if (needwake) 1930 rcu_gp_kthread_wake(rsp); 1931 } 1932 1933 static void rcu_gp_slow(struct rcu_state *rsp, int delay) 1934 { 1935 if (delay > 0 && 1936 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1937 schedule_timeout_uninterruptible(delay); 1938 } 1939 1940 /* 1941 * Initialize a new grace period. Return false if no grace period required. 1942 */ 1943 static bool rcu_gp_init(struct rcu_state *rsp) 1944 { 1945 unsigned long oldmask; 1946 struct rcu_data *rdp; 1947 struct rcu_node *rnp = rcu_get_root(rsp); 1948 1949 WRITE_ONCE(rsp->gp_activity, jiffies); 1950 raw_spin_lock_irq_rcu_node(rnp); 1951 if (!READ_ONCE(rsp->gp_flags)) { 1952 /* Spurious wakeup, tell caller to go back to sleep. */ 1953 raw_spin_unlock_irq_rcu_node(rnp); 1954 return false; 1955 } 1956 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ 1957 1958 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { 1959 /* 1960 * Grace period already in progress, don't start another. 1961 * Not supposed to be able to happen. 1962 */ 1963 raw_spin_unlock_irq_rcu_node(rnp); 1964 return false; 1965 } 1966 1967 /* Advance to a new grace period and initialize state. */ 1968 record_gp_stall_check_time(rsp); 1969 /* Record GP times before starting GP, hence smp_store_release(). */ 1970 smp_store_release(&rsp->gpnum, rsp->gpnum + 1); 1971 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1972 raw_spin_unlock_irq_rcu_node(rnp); 1973 1974 /* 1975 * Apply per-leaf buffered online and offline operations to the 1976 * rcu_node tree. Note that this new grace period need not wait 1977 * for subsequent online CPUs, and that quiescent-state forcing 1978 * will handle subsequent offline CPUs. 1979 */ 1980 rcu_for_each_leaf_node(rsp, rnp) { 1981 rcu_gp_slow(rsp, gp_preinit_delay); 1982 raw_spin_lock_irq_rcu_node(rnp); 1983 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1984 !rnp->wait_blkd_tasks) { 1985 /* Nothing to do on this leaf rcu_node structure. */ 1986 raw_spin_unlock_irq_rcu_node(rnp); 1987 continue; 1988 } 1989 1990 /* Record old state, apply changes to ->qsmaskinit field. */ 1991 oldmask = rnp->qsmaskinit; 1992 rnp->qsmaskinit = rnp->qsmaskinitnext; 1993 1994 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1995 if (!oldmask != !rnp->qsmaskinit) { 1996 if (!oldmask) /* First online CPU for this rcu_node. */ 1997 rcu_init_new_rnp(rnp); 1998 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */ 1999 rnp->wait_blkd_tasks = true; 2000 else /* Last offline CPU and can propagate. */ 2001 rcu_cleanup_dead_rnp(rnp); 2002 } 2003 2004 /* 2005 * If all waited-on tasks from prior grace period are 2006 * done, and if all this rcu_node structure's CPUs are 2007 * still offline, propagate up the rcu_node tree and 2008 * clear ->wait_blkd_tasks. Otherwise, if one of this 2009 * rcu_node structure's CPUs has since come back online, 2010 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp() 2011 * checks for this, so just call it unconditionally). 2012 */ 2013 if (rnp->wait_blkd_tasks && 2014 (!rcu_preempt_has_tasks(rnp) || 2015 rnp->qsmaskinit)) { 2016 rnp->wait_blkd_tasks = false; 2017 rcu_cleanup_dead_rnp(rnp); 2018 } 2019 2020 raw_spin_unlock_irq_rcu_node(rnp); 2021 } 2022 2023 /* 2024 * Set the quiescent-state-needed bits in all the rcu_node 2025 * structures for all currently online CPUs in breadth-first order, 2026 * starting from the root rcu_node structure, relying on the layout 2027 * of the tree within the rsp->node[] array. Note that other CPUs 2028 * will access only the leaves of the hierarchy, thus seeing that no 2029 * grace period is in progress, at least until the corresponding 2030 * leaf node has been initialized. 2031 * 2032 * The grace period cannot complete until the initialization 2033 * process finishes, because this kthread handles both. 2034 */ 2035 rcu_for_each_node_breadth_first(rsp, rnp) { 2036 rcu_gp_slow(rsp, gp_init_delay); 2037 raw_spin_lock_irq_rcu_node(rnp); 2038 rdp = this_cpu_ptr(rsp->rda); 2039 rcu_preempt_check_blocked_tasks(rnp); 2040 rnp->qsmask = rnp->qsmaskinit; 2041 WRITE_ONCE(rnp->gpnum, rsp->gpnum); 2042 if (WARN_ON_ONCE(rnp->completed != rsp->completed)) 2043 WRITE_ONCE(rnp->completed, rsp->completed); 2044 if (rnp == rdp->mynode) 2045 (void)__note_gp_changes(rsp, rnp, rdp); 2046 rcu_preempt_boost_start_gp(rnp); 2047 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, 2048 rnp->level, rnp->grplo, 2049 rnp->grphi, rnp->qsmask); 2050 raw_spin_unlock_irq_rcu_node(rnp); 2051 cond_resched_rcu_qs(); 2052 WRITE_ONCE(rsp->gp_activity, jiffies); 2053 } 2054 2055 return true; 2056 } 2057 2058 /* 2059 * Helper function for swait_event_idle() wakeup at force-quiescent-state 2060 * time. 2061 */ 2062 static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) 2063 { 2064 struct rcu_node *rnp = rcu_get_root(rsp); 2065 2066 /* Someone like call_rcu() requested a force-quiescent-state scan. */ 2067 *gfp = READ_ONCE(rsp->gp_flags); 2068 if (*gfp & RCU_GP_FLAG_FQS) 2069 return true; 2070 2071 /* The current grace period has completed. */ 2072 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 2073 return true; 2074 2075 return false; 2076 } 2077 2078 /* 2079 * Do one round of quiescent-state forcing. 2080 */ 2081 static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time) 2082 { 2083 struct rcu_node *rnp = rcu_get_root(rsp); 2084 2085 WRITE_ONCE(rsp->gp_activity, jiffies); 2086 rsp->n_force_qs++; 2087 if (first_time) { 2088 /* Collect dyntick-idle snapshots. */ 2089 force_qs_rnp(rsp, dyntick_save_progress_counter); 2090 } else { 2091 /* Handle dyntick-idle and offline CPUs. */ 2092 force_qs_rnp(rsp, rcu_implicit_dynticks_qs); 2093 } 2094 /* Clear flag to prevent immediate re-entry. */ 2095 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2096 raw_spin_lock_irq_rcu_node(rnp); 2097 WRITE_ONCE(rsp->gp_flags, 2098 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); 2099 raw_spin_unlock_irq_rcu_node(rnp); 2100 } 2101 } 2102 2103 /* 2104 * Clean up after the old grace period. 2105 */ 2106 static void rcu_gp_cleanup(struct rcu_state *rsp) 2107 { 2108 unsigned long gp_duration; 2109 bool needgp = false; 2110 int nocb = 0; 2111 struct rcu_data *rdp; 2112 struct rcu_node *rnp = rcu_get_root(rsp); 2113 struct swait_queue_head *sq; 2114 2115 WRITE_ONCE(rsp->gp_activity, jiffies); 2116 raw_spin_lock_irq_rcu_node(rnp); 2117 gp_duration = jiffies - rsp->gp_start; 2118 if (gp_duration > rsp->gp_max) 2119 rsp->gp_max = gp_duration; 2120 2121 /* 2122 * We know the grace period is complete, but to everyone else 2123 * it appears to still be ongoing. But it is also the case 2124 * that to everyone else it looks like there is nothing that 2125 * they can do to advance the grace period. It is therefore 2126 * safe for us to drop the lock in order to mark the grace 2127 * period as completed in all of the rcu_node structures. 2128 */ 2129 raw_spin_unlock_irq_rcu_node(rnp); 2130 2131 /* 2132 * Propagate new ->completed value to rcu_node structures so 2133 * that other CPUs don't have to wait until the start of the next 2134 * grace period to process their callbacks. This also avoids 2135 * some nasty RCU grace-period initialization races by forcing 2136 * the end of the current grace period to be completely recorded in 2137 * all of the rcu_node structures before the beginning of the next 2138 * grace period is recorded in any of the rcu_node structures. 2139 */ 2140 rcu_for_each_node_breadth_first(rsp, rnp) { 2141 raw_spin_lock_irq_rcu_node(rnp); 2142 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 2143 WARN_ON_ONCE(rnp->qsmask); 2144 WRITE_ONCE(rnp->completed, rsp->gpnum); 2145 rdp = this_cpu_ptr(rsp->rda); 2146 if (rnp == rdp->mynode) 2147 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; 2148 /* smp_mb() provided by prior unlock-lock pair. */ 2149 nocb += rcu_future_gp_cleanup(rsp, rnp); 2150 sq = rcu_nocb_gp_get(rnp); 2151 raw_spin_unlock_irq_rcu_node(rnp); 2152 rcu_nocb_gp_cleanup(sq); 2153 cond_resched_rcu_qs(); 2154 WRITE_ONCE(rsp->gp_activity, jiffies); 2155 rcu_gp_slow(rsp, gp_cleanup_delay); 2156 } 2157 rnp = rcu_get_root(rsp); 2158 raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */ 2159 rcu_nocb_gp_set(rnp, nocb); 2160 2161 /* Declare grace period done. */ 2162 WRITE_ONCE(rsp->completed, rsp->gpnum); 2163 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 2164 rsp->gp_state = RCU_GP_IDLE; 2165 rdp = this_cpu_ptr(rsp->rda); 2166 /* Advance CBs to reduce false positives below. */ 2167 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; 2168 if (needgp || cpu_needs_another_gp(rsp, rdp)) { 2169 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); 2170 trace_rcu_grace_period(rsp->name, 2171 READ_ONCE(rsp->gpnum), 2172 TPS("newreq")); 2173 } 2174 raw_spin_unlock_irq_rcu_node(rnp); 2175 } 2176 2177 /* 2178 * Body of kthread that handles grace periods. 2179 */ 2180 static int __noreturn rcu_gp_kthread(void *arg) 2181 { 2182 bool first_gp_fqs; 2183 int gf; 2184 unsigned long j; 2185 int ret; 2186 struct rcu_state *rsp = arg; 2187 struct rcu_node *rnp = rcu_get_root(rsp); 2188 2189 rcu_bind_gp_kthread(); 2190 for (;;) { 2191 2192 /* Handle grace-period start. */ 2193 for (;;) { 2194 trace_rcu_grace_period(rsp->name, 2195 READ_ONCE(rsp->gpnum), 2196 TPS("reqwait")); 2197 rsp->gp_state = RCU_GP_WAIT_GPS; 2198 swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) & 2199 RCU_GP_FLAG_INIT); 2200 rsp->gp_state = RCU_GP_DONE_GPS; 2201 /* Locking provides needed memory barrier. */ 2202 if (rcu_gp_init(rsp)) 2203 break; 2204 cond_resched_rcu_qs(); 2205 WRITE_ONCE(rsp->gp_activity, jiffies); 2206 WARN_ON(signal_pending(current)); 2207 trace_rcu_grace_period(rsp->name, 2208 READ_ONCE(rsp->gpnum), 2209 TPS("reqwaitsig")); 2210 } 2211 2212 /* Handle quiescent-state forcing. */ 2213 first_gp_fqs = true; 2214 j = jiffies_till_first_fqs; 2215 if (j > HZ) { 2216 j = HZ; 2217 jiffies_till_first_fqs = HZ; 2218 } 2219 ret = 0; 2220 for (;;) { 2221 if (!ret) { 2222 rsp->jiffies_force_qs = jiffies + j; 2223 WRITE_ONCE(rsp->jiffies_kick_kthreads, 2224 jiffies + 3 * j); 2225 } 2226 trace_rcu_grace_period(rsp->name, 2227 READ_ONCE(rsp->gpnum), 2228 TPS("fqswait")); 2229 rsp->gp_state = RCU_GP_WAIT_FQS; 2230 ret = swait_event_idle_timeout(rsp->gp_wq, 2231 rcu_gp_fqs_check_wake(rsp, &gf), j); 2232 rsp->gp_state = RCU_GP_DOING_FQS; 2233 /* Locking provides needed memory barriers. */ 2234 /* If grace period done, leave loop. */ 2235 if (!READ_ONCE(rnp->qsmask) && 2236 !rcu_preempt_blocked_readers_cgp(rnp)) 2237 break; 2238 /* If time for quiescent-state forcing, do it. */ 2239 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || 2240 (gf & RCU_GP_FLAG_FQS)) { 2241 trace_rcu_grace_period(rsp->name, 2242 READ_ONCE(rsp->gpnum), 2243 TPS("fqsstart")); 2244 rcu_gp_fqs(rsp, first_gp_fqs); 2245 first_gp_fqs = false; 2246 trace_rcu_grace_period(rsp->name, 2247 READ_ONCE(rsp->gpnum), 2248 TPS("fqsend")); 2249 cond_resched_rcu_qs(); 2250 WRITE_ONCE(rsp->gp_activity, jiffies); 2251 ret = 0; /* Force full wait till next FQS. */ 2252 j = jiffies_till_next_fqs; 2253 if (j > HZ) { 2254 j = HZ; 2255 jiffies_till_next_fqs = HZ; 2256 } else if (j < 1) { 2257 j = 1; 2258 jiffies_till_next_fqs = 1; 2259 } 2260 } else { 2261 /* Deal with stray signal. */ 2262 cond_resched_rcu_qs(); 2263 WRITE_ONCE(rsp->gp_activity, jiffies); 2264 WARN_ON(signal_pending(current)); 2265 trace_rcu_grace_period(rsp->name, 2266 READ_ONCE(rsp->gpnum), 2267 TPS("fqswaitsig")); 2268 ret = 1; /* Keep old FQS timing. */ 2269 j = jiffies; 2270 if (time_after(jiffies, rsp->jiffies_force_qs)) 2271 j = 1; 2272 else 2273 j = rsp->jiffies_force_qs - j; 2274 } 2275 } 2276 2277 /* Handle grace-period end. */ 2278 rsp->gp_state = RCU_GP_CLEANUP; 2279 rcu_gp_cleanup(rsp); 2280 rsp->gp_state = RCU_GP_CLEANED; 2281 } 2282 } 2283 2284 /* 2285 * Start a new RCU grace period if warranted, re-initializing the hierarchy 2286 * in preparation for detecting the next grace period. The caller must hold 2287 * the root node's ->lock and hard irqs must be disabled. 2288 * 2289 * Note that it is legal for a dying CPU (which is marked as offline) to 2290 * invoke this function. This can happen when the dying CPU reports its 2291 * quiescent state. 2292 * 2293 * Returns true if the grace-period kthread must be awakened. 2294 */ 2295 static bool 2296 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 2297 struct rcu_data *rdp) 2298 { 2299 lockdep_assert_held(&rnp->lock); 2300 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) { 2301 /* 2302 * Either we have not yet spawned the grace-period 2303 * task, this CPU does not need another grace period, 2304 * or a grace period is already in progress. 2305 * Either way, don't start a new grace period. 2306 */ 2307 return false; 2308 } 2309 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); 2310 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), 2311 TPS("newreq")); 2312 2313 /* 2314 * We can't do wakeups while holding the rnp->lock, as that 2315 * could cause possible deadlocks with the rq->lock. Defer 2316 * the wakeup to our caller. 2317 */ 2318 return true; 2319 } 2320 2321 /* 2322 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's 2323 * callbacks. Note that rcu_start_gp_advanced() cannot do this because it 2324 * is invoked indirectly from rcu_advance_cbs(), which would result in 2325 * endless recursion -- or would do so if it wasn't for the self-deadlock 2326 * that is encountered beforehand. 2327 * 2328 * Returns true if the grace-period kthread needs to be awakened. 2329 */ 2330 static bool rcu_start_gp(struct rcu_state *rsp) 2331 { 2332 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 2333 struct rcu_node *rnp = rcu_get_root(rsp); 2334 bool ret = false; 2335 2336 /* 2337 * If there is no grace period in progress right now, any 2338 * callbacks we have up to this point will be satisfied by the 2339 * next grace period. Also, advancing the callbacks reduces the 2340 * probability of false positives from cpu_needs_another_gp() 2341 * resulting in pointless grace periods. So, advance callbacks 2342 * then start the grace period! 2343 */ 2344 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret; 2345 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret; 2346 return ret; 2347 } 2348 2349 /* 2350 * Report a full set of quiescent states to the specified rcu_state data 2351 * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period 2352 * kthread if another grace period is required. Whether we wake 2353 * the grace-period kthread or it awakens itself for the next round 2354 * of quiescent-state forcing, that kthread will clean up after the 2355 * just-completed grace period. Note that the caller must hold rnp->lock, 2356 * which is released before return. 2357 */ 2358 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 2359 __releases(rcu_get_root(rsp)->lock) 2360 { 2361 lockdep_assert_held(&rcu_get_root(rsp)->lock); 2362 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2363 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2364 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2365 rcu_gp_kthread_wake(rsp); 2366 } 2367 2368 /* 2369 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2370 * Allows quiescent states for a group of CPUs to be reported at one go 2371 * to the specified rcu_node structure, though all the CPUs in the group 2372 * must be represented by the same rcu_node structure (which need not be a 2373 * leaf rcu_node structure, though it often will be). The gps parameter 2374 * is the grace-period snapshot, which means that the quiescent states 2375 * are valid only if rnp->gpnum is equal to gps. That structure's lock 2376 * must be held upon entry, and it is released before return. 2377 */ 2378 static void 2379 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, 2380 struct rcu_node *rnp, unsigned long gps, unsigned long flags) 2381 __releases(rnp->lock) 2382 { 2383 unsigned long oldmask = 0; 2384 struct rcu_node *rnp_c; 2385 2386 lockdep_assert_held(&rnp->lock); 2387 2388 /* Walk up the rcu_node hierarchy. */ 2389 for (;;) { 2390 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) { 2391 2392 /* 2393 * Our bit has already been cleared, or the 2394 * relevant grace period is already over, so done. 2395 */ 2396 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2397 return; 2398 } 2399 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2400 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 && 2401 rcu_preempt_blocked_readers_cgp(rnp)); 2402 rnp->qsmask &= ~mask; 2403 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, 2404 mask, rnp->qsmask, rnp->level, 2405 rnp->grplo, rnp->grphi, 2406 !!rnp->gp_tasks); 2407 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2408 2409 /* Other bits still set at this level, so done. */ 2410 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2411 return; 2412 } 2413 mask = rnp->grpmask; 2414 if (rnp->parent == NULL) { 2415 2416 /* No more levels. Exit loop holding root lock. */ 2417 2418 break; 2419 } 2420 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2421 rnp_c = rnp; 2422 rnp = rnp->parent; 2423 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2424 oldmask = rnp_c->qsmask; 2425 } 2426 2427 /* 2428 * Get here if we are the last CPU to pass through a quiescent 2429 * state for this grace period. Invoke rcu_report_qs_rsp() 2430 * to clean up and start the next grace period if one is needed. 2431 */ 2432 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ 2433 } 2434 2435 /* 2436 * Record a quiescent state for all tasks that were previously queued 2437 * on the specified rcu_node structure and that were blocking the current 2438 * RCU grace period. The caller must hold the specified rnp->lock with 2439 * irqs disabled, and this lock is released upon return, but irqs remain 2440 * disabled. 2441 */ 2442 static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp, 2443 struct rcu_node *rnp, unsigned long flags) 2444 __releases(rnp->lock) 2445 { 2446 unsigned long gps; 2447 unsigned long mask; 2448 struct rcu_node *rnp_p; 2449 2450 lockdep_assert_held(&rnp->lock); 2451 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || 2452 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2453 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2454 return; /* Still need more quiescent states! */ 2455 } 2456 2457 rnp_p = rnp->parent; 2458 if (rnp_p == NULL) { 2459 /* 2460 * Only one rcu_node structure in the tree, so don't 2461 * try to report up to its nonexistent parent! 2462 */ 2463 rcu_report_qs_rsp(rsp, flags); 2464 return; 2465 } 2466 2467 /* Report up the rest of the hierarchy, tracking current ->gpnum. */ 2468 gps = rnp->gpnum; 2469 mask = rnp->grpmask; 2470 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2471 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2472 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); 2473 } 2474 2475 /* 2476 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2477 * structure. This must be called from the specified CPU. 2478 */ 2479 static void 2480 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) 2481 { 2482 unsigned long flags; 2483 unsigned long mask; 2484 bool needwake; 2485 struct rcu_node *rnp; 2486 2487 rnp = rdp->mynode; 2488 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2489 if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum || 2490 rnp->completed == rnp->gpnum || rdp->gpwrap) { 2491 2492 /* 2493 * The grace period in which this quiescent state was 2494 * recorded has ended, so don't report it upwards. 2495 * We will instead need a new quiescent state that lies 2496 * within the current grace period. 2497 */ 2498 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2499 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); 2500 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2501 return; 2502 } 2503 mask = rdp->grpmask; 2504 if ((rnp->qsmask & mask) == 0) { 2505 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2506 } else { 2507 rdp->core_needs_qs = false; 2508 2509 /* 2510 * This GP can't end until cpu checks in, so all of our 2511 * callbacks can be processed during the next GP. 2512 */ 2513 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 2514 2515 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2516 /* ^^^ Released rnp->lock */ 2517 if (needwake) 2518 rcu_gp_kthread_wake(rsp); 2519 } 2520 } 2521 2522 /* 2523 * Check to see if there is a new grace period of which this CPU 2524 * is not yet aware, and if so, set up local rcu_data state for it. 2525 * Otherwise, see if this CPU has just passed through its first 2526 * quiescent state for this grace period, and record that fact if so. 2527 */ 2528 static void 2529 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) 2530 { 2531 /* Check for grace-period ends and beginnings. */ 2532 note_gp_changes(rsp, rdp); 2533 2534 /* 2535 * Does this CPU still need to do its part for current grace period? 2536 * If no, return and let the other CPUs do their part as well. 2537 */ 2538 if (!rdp->core_needs_qs) 2539 return; 2540 2541 /* 2542 * Was there a quiescent state since the beginning of the grace 2543 * period? If no, then exit and wait for the next call. 2544 */ 2545 if (rdp->cpu_no_qs.b.norm) 2546 return; 2547 2548 /* 2549 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2550 * judge of that). 2551 */ 2552 rcu_report_qs_rdp(rdp->cpu, rsp, rdp); 2553 } 2554 2555 /* 2556 * Trace the fact that this CPU is going offline. 2557 */ 2558 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) 2559 { 2560 RCU_TRACE(unsigned long mask;) 2561 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);) 2562 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) 2563 2564 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2565 return; 2566 2567 RCU_TRACE(mask = rdp->grpmask;) 2568 trace_rcu_grace_period(rsp->name, 2569 rnp->gpnum + 1 - !!(rnp->qsmask & mask), 2570 TPS("cpuofl")); 2571 } 2572 2573 /* 2574 * All CPUs for the specified rcu_node structure have gone offline, 2575 * and all tasks that were preempted within an RCU read-side critical 2576 * section while running on one of those CPUs have since exited their RCU 2577 * read-side critical section. Some other CPU is reporting this fact with 2578 * the specified rcu_node structure's ->lock held and interrupts disabled. 2579 * This function therefore goes up the tree of rcu_node structures, 2580 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2581 * the leaf rcu_node structure's ->qsmaskinit field has already been 2582 * updated 2583 * 2584 * This function does check that the specified rcu_node structure has 2585 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2586 * prematurely. That said, invoking it after the fact will cost you 2587 * a needless lock acquisition. So once it has done its work, don't 2588 * invoke it again. 2589 */ 2590 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2591 { 2592 long mask; 2593 struct rcu_node *rnp = rnp_leaf; 2594 2595 lockdep_assert_held(&rnp->lock); 2596 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2597 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) 2598 return; 2599 for (;;) { 2600 mask = rnp->grpmask; 2601 rnp = rnp->parent; 2602 if (!rnp) 2603 break; 2604 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2605 rnp->qsmaskinit &= ~mask; 2606 rnp->qsmask &= ~mask; 2607 if (rnp->qsmaskinit) { 2608 raw_spin_unlock_rcu_node(rnp); 2609 /* irqs remain disabled. */ 2610 return; 2611 } 2612 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2613 } 2614 } 2615 2616 /* 2617 * The CPU has been completely removed, and some other CPU is reporting 2618 * this fact from process context. Do the remainder of the cleanup. 2619 * There can only be one CPU hotplug operation at a time, so no need for 2620 * explicit locking. 2621 */ 2622 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2623 { 2624 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2625 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2626 2627 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2628 return; 2629 2630 /* Adjust any no-longer-needed kthreads. */ 2631 rcu_boost_kthread_setaffinity(rnp, -1); 2632 } 2633 2634 /* 2635 * Invoke any RCU callbacks that have made it to the end of their grace 2636 * period. Thottle as specified by rdp->blimit. 2637 */ 2638 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) 2639 { 2640 unsigned long flags; 2641 struct rcu_head *rhp; 2642 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2643 long bl, count; 2644 2645 /* If no callbacks are ready, just return. */ 2646 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2647 trace_rcu_batch_start(rsp->name, 2648 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2649 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2650 trace_rcu_batch_end(rsp->name, 0, 2651 !rcu_segcblist_empty(&rdp->cblist), 2652 need_resched(), is_idle_task(current), 2653 rcu_is_callbacks_kthread()); 2654 return; 2655 } 2656 2657 /* 2658 * Extract the list of ready callbacks, disabling to prevent 2659 * races with call_rcu() from interrupt handlers. Leave the 2660 * callback counts, as rcu_barrier() needs to be conservative. 2661 */ 2662 local_irq_save(flags); 2663 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2664 bl = rdp->blimit; 2665 trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2666 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2667 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2668 local_irq_restore(flags); 2669 2670 /* Invoke callbacks. */ 2671 rhp = rcu_cblist_dequeue(&rcl); 2672 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2673 debug_rcu_head_unqueue(rhp); 2674 if (__rcu_reclaim(rsp->name, rhp)) 2675 rcu_cblist_dequeued_lazy(&rcl); 2676 /* 2677 * Stop only if limit reached and CPU has something to do. 2678 * Note: The rcl structure counts down from zero. 2679 */ 2680 if (-rcl.len >= bl && 2681 (need_resched() || 2682 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2683 break; 2684 } 2685 2686 local_irq_save(flags); 2687 count = -rcl.len; 2688 trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(), 2689 is_idle_task(current), rcu_is_callbacks_kthread()); 2690 2691 /* Update counts and requeue any remaining callbacks. */ 2692 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2693 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2694 rdp->n_cbs_invoked += count; 2695 rcu_segcblist_insert_count(&rdp->cblist, &rcl); 2696 2697 /* Reinstate batch limit if we have worked down the excess. */ 2698 count = rcu_segcblist_n_cbs(&rdp->cblist); 2699 if (rdp->blimit == LONG_MAX && count <= qlowmark) 2700 rdp->blimit = blimit; 2701 2702 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2703 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2704 rdp->qlen_last_fqs_check = 0; 2705 rdp->n_force_qs_snap = rsp->n_force_qs; 2706 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2707 rdp->qlen_last_fqs_check = count; 2708 2709 /* 2710 * The following usually indicates a double call_rcu(). To track 2711 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2712 */ 2713 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0)); 2714 2715 local_irq_restore(flags); 2716 2717 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2718 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2719 invoke_rcu_core(); 2720 } 2721 2722 /* 2723 * Check to see if this CPU is in a non-context-switch quiescent state 2724 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 2725 * Also schedule RCU core processing. 2726 * 2727 * This function must be called from hardirq context. It is normally 2728 * invoked from the scheduling-clock interrupt. 2729 */ 2730 void rcu_check_callbacks(int user) 2731 { 2732 trace_rcu_utilization(TPS("Start scheduler-tick")); 2733 increment_cpu_stall_ticks(); 2734 if (user || rcu_is_cpu_rrupt_from_idle()) { 2735 2736 /* 2737 * Get here if this CPU took its interrupt from user 2738 * mode or from the idle loop, and if this is not a 2739 * nested interrupt. In this case, the CPU is in 2740 * a quiescent state, so note it. 2741 * 2742 * No memory barrier is required here because both 2743 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local 2744 * variables that other CPUs neither access nor modify, 2745 * at least not while the corresponding CPU is online. 2746 */ 2747 2748 rcu_sched_qs(); 2749 rcu_bh_qs(); 2750 2751 } else if (!in_softirq()) { 2752 2753 /* 2754 * Get here if this CPU did not take its interrupt from 2755 * softirq, in other words, if it is not interrupting 2756 * a rcu_bh read-side critical section. This is an _bh 2757 * critical section, so note it. 2758 */ 2759 2760 rcu_bh_qs(); 2761 } 2762 rcu_preempt_check_callbacks(); 2763 if (rcu_pending()) 2764 invoke_rcu_core(); 2765 if (user) 2766 rcu_note_voluntary_context_switch(current); 2767 trace_rcu_utilization(TPS("End scheduler-tick")); 2768 } 2769 2770 /* 2771 * Scan the leaf rcu_node structures, processing dyntick state for any that 2772 * have not yet encountered a quiescent state, using the function specified. 2773 * Also initiate boosting for any threads blocked on the root rcu_node. 2774 * 2775 * The caller must have suppressed start of new grace periods. 2776 */ 2777 static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) 2778 { 2779 int cpu; 2780 unsigned long flags; 2781 unsigned long mask; 2782 struct rcu_node *rnp; 2783 2784 rcu_for_each_leaf_node(rsp, rnp) { 2785 cond_resched_rcu_qs(); 2786 mask = 0; 2787 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2788 if (rnp->qsmask == 0) { 2789 if (rcu_state_p == &rcu_sched_state || 2790 rsp != rcu_state_p || 2791 rcu_preempt_blocked_readers_cgp(rnp)) { 2792 /* 2793 * No point in scanning bits because they 2794 * are all zero. But we might need to 2795 * priority-boost blocked readers. 2796 */ 2797 rcu_initiate_boost(rnp, flags); 2798 /* rcu_initiate_boost() releases rnp->lock */ 2799 continue; 2800 } 2801 if (rnp->parent && 2802 (rnp->parent->qsmask & rnp->grpmask)) { 2803 /* 2804 * Race between grace-period 2805 * initialization and task exiting RCU 2806 * read-side critical section: Report. 2807 */ 2808 rcu_report_unblock_qs_rnp(rsp, rnp, flags); 2809 /* rcu_report_unblock_qs_rnp() rlses ->lock */ 2810 continue; 2811 } 2812 } 2813 for_each_leaf_node_possible_cpu(rnp, cpu) { 2814 unsigned long bit = leaf_node_cpu_bit(rnp, cpu); 2815 if ((rnp->qsmask & bit) != 0) { 2816 if (f(per_cpu_ptr(rsp->rda, cpu))) 2817 mask |= bit; 2818 } 2819 } 2820 if (mask != 0) { 2821 /* Idle/offline CPUs, report (releases rnp->lock. */ 2822 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2823 } else { 2824 /* Nothing to do here, so just drop the lock. */ 2825 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2826 } 2827 } 2828 } 2829 2830 /* 2831 * Force quiescent states on reluctant CPUs, and also detect which 2832 * CPUs are in dyntick-idle mode. 2833 */ 2834 static void force_quiescent_state(struct rcu_state *rsp) 2835 { 2836 unsigned long flags; 2837 bool ret; 2838 struct rcu_node *rnp; 2839 struct rcu_node *rnp_old = NULL; 2840 2841 /* Funnel through hierarchy to reduce memory contention. */ 2842 rnp = __this_cpu_read(rsp->rda->mynode); 2843 for (; rnp != NULL; rnp = rnp->parent) { 2844 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || 2845 !raw_spin_trylock(&rnp->fqslock); 2846 if (rnp_old != NULL) 2847 raw_spin_unlock(&rnp_old->fqslock); 2848 if (ret) { 2849 rsp->n_force_qs_lh++; 2850 return; 2851 } 2852 rnp_old = rnp; 2853 } 2854 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */ 2855 2856 /* Reached the root of the rcu_node tree, acquire lock. */ 2857 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2858 raw_spin_unlock(&rnp_old->fqslock); 2859 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2860 rsp->n_force_qs_lh++; 2861 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2862 return; /* Someone beat us to it. */ 2863 } 2864 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2865 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2866 rcu_gp_kthread_wake(rsp); 2867 } 2868 2869 /* 2870 * This does the RCU core processing work for the specified rcu_state 2871 * and rcu_data structures. This may be called only from the CPU to 2872 * whom the rdp belongs. 2873 */ 2874 static void 2875 __rcu_process_callbacks(struct rcu_state *rsp) 2876 { 2877 unsigned long flags; 2878 bool needwake; 2879 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2880 2881 WARN_ON_ONCE(!rdp->beenonline); 2882 2883 /* Update RCU state based on any recent quiescent states. */ 2884 rcu_check_quiescent_state(rsp, rdp); 2885 2886 /* Does this CPU require a not-yet-started grace period? */ 2887 local_irq_save(flags); 2888 if (cpu_needs_another_gp(rsp, rdp)) { 2889 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */ 2890 needwake = rcu_start_gp(rsp); 2891 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2892 if (needwake) 2893 rcu_gp_kthread_wake(rsp); 2894 } else { 2895 local_irq_restore(flags); 2896 } 2897 2898 /* If there are callbacks ready, invoke them. */ 2899 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2900 invoke_rcu_callbacks(rsp, rdp); 2901 2902 /* Do any needed deferred wakeups of rcuo kthreads. */ 2903 do_nocb_deferred_wakeup(rdp); 2904 } 2905 2906 /* 2907 * Do RCU core processing for the current CPU. 2908 */ 2909 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) 2910 { 2911 struct rcu_state *rsp; 2912 2913 if (cpu_is_offline(smp_processor_id())) 2914 return; 2915 trace_rcu_utilization(TPS("Start RCU core")); 2916 for_each_rcu_flavor(rsp) 2917 __rcu_process_callbacks(rsp); 2918 trace_rcu_utilization(TPS("End RCU core")); 2919 } 2920 2921 /* 2922 * Schedule RCU callback invocation. If the specified type of RCU 2923 * does not support RCU priority boosting, just do a direct call, 2924 * otherwise wake up the per-CPU kernel kthread. Note that because we 2925 * are running on the current CPU with softirqs disabled, the 2926 * rcu_cpu_kthread_task cannot disappear out from under us. 2927 */ 2928 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 2929 { 2930 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) 2931 return; 2932 if (likely(!rsp->boost)) { 2933 rcu_do_batch(rsp, rdp); 2934 return; 2935 } 2936 invoke_rcu_callbacks_kthread(); 2937 } 2938 2939 static void invoke_rcu_core(void) 2940 { 2941 if (cpu_online(smp_processor_id())) 2942 raise_softirq(RCU_SOFTIRQ); 2943 } 2944 2945 /* 2946 * Handle any core-RCU processing required by a call_rcu() invocation. 2947 */ 2948 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, 2949 struct rcu_head *head, unsigned long flags) 2950 { 2951 bool needwake; 2952 2953 /* 2954 * If called from an extended quiescent state, invoke the RCU 2955 * core in order to force a re-evaluation of RCU's idleness. 2956 */ 2957 if (!rcu_is_watching()) 2958 invoke_rcu_core(); 2959 2960 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2961 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2962 return; 2963 2964 /* 2965 * Force the grace period if too many callbacks or too long waiting. 2966 * Enforce hysteresis, and don't invoke force_quiescent_state() 2967 * if some other CPU has recently done so. Also, don't bother 2968 * invoking force_quiescent_state() if the newly enqueued callback 2969 * is the only one waiting for a grace period to complete. 2970 */ 2971 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2972 rdp->qlen_last_fqs_check + qhimark)) { 2973 2974 /* Are we ignoring a completed grace period? */ 2975 note_gp_changes(rsp, rdp); 2976 2977 /* Start a new grace period if one not already started. */ 2978 if (!rcu_gp_in_progress(rsp)) { 2979 struct rcu_node *rnp_root = rcu_get_root(rsp); 2980 2981 raw_spin_lock_rcu_node(rnp_root); 2982 needwake = rcu_start_gp(rsp); 2983 raw_spin_unlock_rcu_node(rnp_root); 2984 if (needwake) 2985 rcu_gp_kthread_wake(rsp); 2986 } else { 2987 /* Give the grace period a kick. */ 2988 rdp->blimit = LONG_MAX; 2989 if (rsp->n_force_qs == rdp->n_force_qs_snap && 2990 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2991 force_quiescent_state(rsp); 2992 rdp->n_force_qs_snap = rsp->n_force_qs; 2993 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2994 } 2995 } 2996 } 2997 2998 /* 2999 * RCU callback function to leak a callback. 3000 */ 3001 static void rcu_leak_callback(struct rcu_head *rhp) 3002 { 3003 } 3004 3005 /* 3006 * Helper function for call_rcu() and friends. The cpu argument will 3007 * normally be -1, indicating "currently running CPU". It may specify 3008 * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier() 3009 * is expected to specify a CPU. 3010 */ 3011 static void 3012 __call_rcu(struct rcu_head *head, rcu_callback_t func, 3013 struct rcu_state *rsp, int cpu, bool lazy) 3014 { 3015 unsigned long flags; 3016 struct rcu_data *rdp; 3017 3018 /* Misaligned rcu_head! */ 3019 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 3020 3021 if (debug_rcu_head_queue(head)) { 3022 /* 3023 * Probable double call_rcu(), so leak the callback. 3024 * Use rcu:rcu_callback trace event to find the previous 3025 * time callback was passed to __call_rcu(). 3026 */ 3027 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n", 3028 head, head->func); 3029 WRITE_ONCE(head->func, rcu_leak_callback); 3030 return; 3031 } 3032 head->func = func; 3033 head->next = NULL; 3034 local_irq_save(flags); 3035 rdp = this_cpu_ptr(rsp->rda); 3036 3037 /* Add the callback to our list. */ 3038 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) { 3039 int offline; 3040 3041 if (cpu != -1) 3042 rdp = per_cpu_ptr(rsp->rda, cpu); 3043 if (likely(rdp->mynode)) { 3044 /* Post-boot, so this should be for a no-CBs CPU. */ 3045 offline = !__call_rcu_nocb(rdp, head, lazy, flags); 3046 WARN_ON_ONCE(offline); 3047 /* Offline CPU, _call_rcu() illegal, leak callback. */ 3048 local_irq_restore(flags); 3049 return; 3050 } 3051 /* 3052 * Very early boot, before rcu_init(). Initialize if needed 3053 * and then drop through to queue the callback. 3054 */ 3055 BUG_ON(cpu != -1); 3056 WARN_ON_ONCE(!rcu_is_watching()); 3057 if (rcu_segcblist_empty(&rdp->cblist)) 3058 rcu_segcblist_init(&rdp->cblist); 3059 } 3060 rcu_segcblist_enqueue(&rdp->cblist, head, lazy); 3061 if (!lazy) 3062 rcu_idle_count_callbacks_posted(); 3063 3064 if (__is_kfree_rcu_offset((unsigned long)func)) 3065 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, 3066 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 3067 rcu_segcblist_n_cbs(&rdp->cblist)); 3068 else 3069 trace_rcu_callback(rsp->name, head, 3070 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 3071 rcu_segcblist_n_cbs(&rdp->cblist)); 3072 3073 /* Go handle any RCU core processing required. */ 3074 __call_rcu_core(rsp, rdp, head, flags); 3075 local_irq_restore(flags); 3076 } 3077 3078 /** 3079 * call_rcu_sched() - Queue an RCU for invocation after sched grace period. 3080 * @head: structure to be used for queueing the RCU updates. 3081 * @func: actual callback function to be invoked after the grace period 3082 * 3083 * The callback function will be invoked some time after a full grace 3084 * period elapses, in other words after all currently executing RCU 3085 * read-side critical sections have completed. call_rcu_sched() assumes 3086 * that the read-side critical sections end on enabling of preemption 3087 * or on voluntary preemption. 3088 * RCU read-side critical sections are delimited by: 3089 * 3090 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR 3091 * - anything that disables preemption. 3092 * 3093 * These may be nested. 3094 * 3095 * See the description of call_rcu() for more detailed information on 3096 * memory ordering guarantees. 3097 */ 3098 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) 3099 { 3100 __call_rcu(head, func, &rcu_sched_state, -1, 0); 3101 } 3102 EXPORT_SYMBOL_GPL(call_rcu_sched); 3103 3104 /** 3105 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. 3106 * @head: structure to be used for queueing the RCU updates. 3107 * @func: actual callback function to be invoked after the grace period 3108 * 3109 * The callback function will be invoked some time after a full grace 3110 * period elapses, in other words after all currently executing RCU 3111 * read-side critical sections have completed. call_rcu_bh() assumes 3112 * that the read-side critical sections end on completion of a softirq 3113 * handler. This means that read-side critical sections in process 3114 * context must not be interrupted by softirqs. This interface is to be 3115 * used when most of the read-side critical sections are in softirq context. 3116 * RCU read-side critical sections are delimited by: 3117 * 3118 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR 3119 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 3120 * 3121 * These may be nested. 3122 * 3123 * See the description of call_rcu() for more detailed information on 3124 * memory ordering guarantees. 3125 */ 3126 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) 3127 { 3128 __call_rcu(head, func, &rcu_bh_state, -1, 0); 3129 } 3130 EXPORT_SYMBOL_GPL(call_rcu_bh); 3131 3132 /* 3133 * Queue an RCU callback for lazy invocation after a grace period. 3134 * This will likely be later named something like "call_rcu_lazy()", 3135 * but this change will require some way of tagging the lazy RCU 3136 * callbacks in the list of pending callbacks. Until then, this 3137 * function may only be called from __kfree_rcu(). 3138 */ 3139 void kfree_call_rcu(struct rcu_head *head, 3140 rcu_callback_t func) 3141 { 3142 __call_rcu(head, func, rcu_state_p, -1, 1); 3143 } 3144 EXPORT_SYMBOL_GPL(kfree_call_rcu); 3145 3146 /* 3147 * Because a context switch is a grace period for RCU-sched and RCU-bh, 3148 * any blocking grace-period wait automatically implies a grace period 3149 * if there is only one CPU online at any point time during execution 3150 * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to 3151 * occasionally incorrectly indicate that there are multiple CPUs online 3152 * when there was in fact only one the whole time, as this just adds 3153 * some overhead: RCU still operates correctly. 3154 */ 3155 static inline int rcu_blocking_is_gp(void) 3156 { 3157 int ret; 3158 3159 might_sleep(); /* Check for RCU read-side critical section. */ 3160 preempt_disable(); 3161 ret = num_online_cpus() <= 1; 3162 preempt_enable(); 3163 return ret; 3164 } 3165 3166 /** 3167 * synchronize_sched - wait until an rcu-sched grace period has elapsed. 3168 * 3169 * Control will return to the caller some time after a full rcu-sched 3170 * grace period has elapsed, in other words after all currently executing 3171 * rcu-sched read-side critical sections have completed. These read-side 3172 * critical sections are delimited by rcu_read_lock_sched() and 3173 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), 3174 * local_irq_disable(), and so on may be used in place of 3175 * rcu_read_lock_sched(). 3176 * 3177 * This means that all preempt_disable code sequences, including NMI and 3178 * non-threaded hardware-interrupt handlers, in progress on entry will 3179 * have completed before this primitive returns. However, this does not 3180 * guarantee that softirq handlers will have completed, since in some 3181 * kernels, these handlers can run in process context, and can block. 3182 * 3183 * Note that this guarantee implies further memory-ordering guarantees. 3184 * On systems with more than one CPU, when synchronize_sched() returns, 3185 * each CPU is guaranteed to have executed a full memory barrier since the 3186 * end of its last RCU-sched read-side critical section whose beginning 3187 * preceded the call to synchronize_sched(). In addition, each CPU having 3188 * an RCU read-side critical section that extends beyond the return from 3189 * synchronize_sched() is guaranteed to have executed a full memory barrier 3190 * after the beginning of synchronize_sched() and before the beginning of 3191 * that RCU read-side critical section. Note that these guarantees include 3192 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3193 * that are executing in the kernel. 3194 * 3195 * Furthermore, if CPU A invoked synchronize_sched(), which returned 3196 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3197 * to have executed a full memory barrier during the execution of 3198 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but 3199 * again only if the system has more than one CPU). 3200 */ 3201 void synchronize_sched(void) 3202 { 3203 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3204 lock_is_held(&rcu_lock_map) || 3205 lock_is_held(&rcu_sched_lock_map), 3206 "Illegal synchronize_sched() in RCU-sched read-side critical section"); 3207 if (rcu_blocking_is_gp()) 3208 return; 3209 if (rcu_gp_is_expedited()) 3210 synchronize_sched_expedited(); 3211 else 3212 wait_rcu_gp(call_rcu_sched); 3213 } 3214 EXPORT_SYMBOL_GPL(synchronize_sched); 3215 3216 /** 3217 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. 3218 * 3219 * Control will return to the caller some time after a full rcu_bh grace 3220 * period has elapsed, in other words after all currently executing rcu_bh 3221 * read-side critical sections have completed. RCU read-side critical 3222 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), 3223 * and may be nested. 3224 * 3225 * See the description of synchronize_sched() for more detailed information 3226 * on memory ordering guarantees. 3227 */ 3228 void synchronize_rcu_bh(void) 3229 { 3230 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3231 lock_is_held(&rcu_lock_map) || 3232 lock_is_held(&rcu_sched_lock_map), 3233 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section"); 3234 if (rcu_blocking_is_gp()) 3235 return; 3236 if (rcu_gp_is_expedited()) 3237 synchronize_rcu_bh_expedited(); 3238 else 3239 wait_rcu_gp(call_rcu_bh); 3240 } 3241 EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 3242 3243 /** 3244 * get_state_synchronize_rcu - Snapshot current RCU state 3245 * 3246 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3247 * to determine whether or not a full grace period has elapsed in the 3248 * meantime. 3249 */ 3250 unsigned long get_state_synchronize_rcu(void) 3251 { 3252 /* 3253 * Any prior manipulation of RCU-protected data must happen 3254 * before the load from ->gpnum. 3255 */ 3256 smp_mb(); /* ^^^ */ 3257 3258 /* 3259 * Make sure this load happens before the purportedly 3260 * time-consuming work between get_state_synchronize_rcu() 3261 * and cond_synchronize_rcu(). 3262 */ 3263 return smp_load_acquire(&rcu_state_p->gpnum); 3264 } 3265 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3266 3267 /** 3268 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3269 * 3270 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 3271 * 3272 * If a full RCU grace period has elapsed since the earlier call to 3273 * get_state_synchronize_rcu(), just return. Otherwise, invoke 3274 * synchronize_rcu() to wait for a full grace period. 3275 * 3276 * Yes, this function does not take counter wrap into account. But 3277 * counter wrap is harmless. If the counter wraps, we have waited for 3278 * more than 2 billion grace periods (and way more on a 64-bit system!), 3279 * so waiting for one additional grace period should be just fine. 3280 */ 3281 void cond_synchronize_rcu(unsigned long oldstate) 3282 { 3283 unsigned long newstate; 3284 3285 /* 3286 * Ensure that this load happens before any RCU-destructive 3287 * actions the caller might carry out after we return. 3288 */ 3289 newstate = smp_load_acquire(&rcu_state_p->completed); 3290 if (ULONG_CMP_GE(oldstate, newstate)) 3291 synchronize_rcu(); 3292 } 3293 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3294 3295 /** 3296 * get_state_synchronize_sched - Snapshot current RCU-sched state 3297 * 3298 * Returns a cookie that is used by a later call to cond_synchronize_sched() 3299 * to determine whether or not a full grace period has elapsed in the 3300 * meantime. 3301 */ 3302 unsigned long get_state_synchronize_sched(void) 3303 { 3304 /* 3305 * Any prior manipulation of RCU-protected data must happen 3306 * before the load from ->gpnum. 3307 */ 3308 smp_mb(); /* ^^^ */ 3309 3310 /* 3311 * Make sure this load happens before the purportedly 3312 * time-consuming work between get_state_synchronize_sched() 3313 * and cond_synchronize_sched(). 3314 */ 3315 return smp_load_acquire(&rcu_sched_state.gpnum); 3316 } 3317 EXPORT_SYMBOL_GPL(get_state_synchronize_sched); 3318 3319 /** 3320 * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period 3321 * 3322 * @oldstate: return value from earlier call to get_state_synchronize_sched() 3323 * 3324 * If a full RCU-sched grace period has elapsed since the earlier call to 3325 * get_state_synchronize_sched(), just return. Otherwise, invoke 3326 * synchronize_sched() to wait for a full grace period. 3327 * 3328 * Yes, this function does not take counter wrap into account. But 3329 * counter wrap is harmless. If the counter wraps, we have waited for 3330 * more than 2 billion grace periods (and way more on a 64-bit system!), 3331 * so waiting for one additional grace period should be just fine. 3332 */ 3333 void cond_synchronize_sched(unsigned long oldstate) 3334 { 3335 unsigned long newstate; 3336 3337 /* 3338 * Ensure that this load happens before any RCU-destructive 3339 * actions the caller might carry out after we return. 3340 */ 3341 newstate = smp_load_acquire(&rcu_sched_state.completed); 3342 if (ULONG_CMP_GE(oldstate, newstate)) 3343 synchronize_sched(); 3344 } 3345 EXPORT_SYMBOL_GPL(cond_synchronize_sched); 3346 3347 /* 3348 * Check to see if there is any immediate RCU-related work to be done 3349 * by the current CPU, for the specified type of RCU, returning 1 if so. 3350 * The checks are in order of increasing expense: checks that can be 3351 * carried out against CPU-local state are performed first. However, 3352 * we must check for CPU stalls first, else we might not get a chance. 3353 */ 3354 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 3355 { 3356 struct rcu_node *rnp = rdp->mynode; 3357 3358 rdp->n_rcu_pending++; 3359 3360 /* Check for CPU stalls, if enabled. */ 3361 check_cpu_stall(rsp, rdp); 3362 3363 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ 3364 if (rcu_nohz_full_cpu(rsp)) 3365 return 0; 3366 3367 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3368 if (rcu_scheduler_fully_active && 3369 rdp->core_needs_qs && rdp->cpu_no_qs.b.norm && 3370 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) { 3371 rdp->n_rp_core_needs_qs++; 3372 } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) { 3373 rdp->n_rp_report_qs++; 3374 return 1; 3375 } 3376 3377 /* Does this CPU have callbacks ready to invoke? */ 3378 if (rcu_segcblist_ready_cbs(&rdp->cblist)) { 3379 rdp->n_rp_cb_ready++; 3380 return 1; 3381 } 3382 3383 /* Has RCU gone idle with this CPU needing another grace period? */ 3384 if (cpu_needs_another_gp(rsp, rdp)) { 3385 rdp->n_rp_cpu_needs_gp++; 3386 return 1; 3387 } 3388 3389 /* Has another RCU grace period completed? */ 3390 if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ 3391 rdp->n_rp_gp_completed++; 3392 return 1; 3393 } 3394 3395 /* Has a new RCU grace period started? */ 3396 if (READ_ONCE(rnp->gpnum) != rdp->gpnum || 3397 unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */ 3398 rdp->n_rp_gp_started++; 3399 return 1; 3400 } 3401 3402 /* Does this CPU need a deferred NOCB wakeup? */ 3403 if (rcu_nocb_need_deferred_wakeup(rdp)) { 3404 rdp->n_rp_nocb_defer_wakeup++; 3405 return 1; 3406 } 3407 3408 /* nothing to do */ 3409 rdp->n_rp_need_nothing++; 3410 return 0; 3411 } 3412 3413 /* 3414 * Check to see if there is any immediate RCU-related work to be done 3415 * by the current CPU, returning 1 if so. This function is part of the 3416 * RCU implementation; it is -not- an exported member of the RCU API. 3417 */ 3418 static int rcu_pending(void) 3419 { 3420 struct rcu_state *rsp; 3421 3422 for_each_rcu_flavor(rsp) 3423 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda))) 3424 return 1; 3425 return 0; 3426 } 3427 3428 /* 3429 * Return true if the specified CPU has any callback. If all_lazy is 3430 * non-NULL, store an indication of whether all callbacks are lazy. 3431 * (If there are no callbacks, all of them are deemed to be lazy.) 3432 */ 3433 static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy) 3434 { 3435 bool al = true; 3436 bool hc = false; 3437 struct rcu_data *rdp; 3438 struct rcu_state *rsp; 3439 3440 for_each_rcu_flavor(rsp) { 3441 rdp = this_cpu_ptr(rsp->rda); 3442 if (rcu_segcblist_empty(&rdp->cblist)) 3443 continue; 3444 hc = true; 3445 if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) { 3446 al = false; 3447 break; 3448 } 3449 } 3450 if (all_lazy) 3451 *all_lazy = al; 3452 return hc; 3453 } 3454 3455 /* 3456 * Helper function for _rcu_barrier() tracing. If tracing is disabled, 3457 * the compiler is expected to optimize this away. 3458 */ 3459 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s, 3460 int cpu, unsigned long done) 3461 { 3462 trace_rcu_barrier(rsp->name, s, cpu, 3463 atomic_read(&rsp->barrier_cpu_count), done); 3464 } 3465 3466 /* 3467 * RCU callback function for _rcu_barrier(). If we are last, wake 3468 * up the task executing _rcu_barrier(). 3469 */ 3470 static void rcu_barrier_callback(struct rcu_head *rhp) 3471 { 3472 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); 3473 struct rcu_state *rsp = rdp->rsp; 3474 3475 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { 3476 _rcu_barrier_trace(rsp, TPS("LastCB"), -1, 3477 rsp->barrier_sequence); 3478 complete(&rsp->barrier_completion); 3479 } else { 3480 _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence); 3481 } 3482 } 3483 3484 /* 3485 * Called with preemption disabled, and from cross-cpu IRQ context. 3486 */ 3487 static void rcu_barrier_func(void *type) 3488 { 3489 struct rcu_state *rsp = type; 3490 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 3491 3492 _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence); 3493 rdp->barrier_head.func = rcu_barrier_callback; 3494 debug_rcu_head_queue(&rdp->barrier_head); 3495 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { 3496 atomic_inc(&rsp->barrier_cpu_count); 3497 } else { 3498 debug_rcu_head_unqueue(&rdp->barrier_head); 3499 _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1, 3500 rsp->barrier_sequence); 3501 } 3502 } 3503 3504 /* 3505 * Orchestrate the specified type of RCU barrier, waiting for all 3506 * RCU callbacks of the specified type to complete. 3507 */ 3508 static void _rcu_barrier(struct rcu_state *rsp) 3509 { 3510 int cpu; 3511 struct rcu_data *rdp; 3512 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence); 3513 3514 _rcu_barrier_trace(rsp, TPS("Begin"), -1, s); 3515 3516 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3517 mutex_lock(&rsp->barrier_mutex); 3518 3519 /* Did someone else do our work for us? */ 3520 if (rcu_seq_done(&rsp->barrier_sequence, s)) { 3521 _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1, 3522 rsp->barrier_sequence); 3523 smp_mb(); /* caller's subsequent code after above check. */ 3524 mutex_unlock(&rsp->barrier_mutex); 3525 return; 3526 } 3527 3528 /* Mark the start of the barrier operation. */ 3529 rcu_seq_start(&rsp->barrier_sequence); 3530 _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence); 3531 3532 /* 3533 * Initialize the count to one rather than to zero in order to 3534 * avoid a too-soon return to zero in case of a short grace period 3535 * (or preemption of this task). Exclude CPU-hotplug operations 3536 * to ensure that no offline CPU has callbacks queued. 3537 */ 3538 init_completion(&rsp->barrier_completion); 3539 atomic_set(&rsp->barrier_cpu_count, 1); 3540 get_online_cpus(); 3541 3542 /* 3543 * Force each CPU with callbacks to register a new callback. 3544 * When that callback is invoked, we will know that all of the 3545 * corresponding CPU's preceding callbacks have been invoked. 3546 */ 3547 for_each_possible_cpu(cpu) { 3548 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu)) 3549 continue; 3550 rdp = per_cpu_ptr(rsp->rda, cpu); 3551 if (rcu_is_nocb_cpu(cpu)) { 3552 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { 3553 _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu, 3554 rsp->barrier_sequence); 3555 } else { 3556 _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu, 3557 rsp->barrier_sequence); 3558 smp_mb__before_atomic(); 3559 atomic_inc(&rsp->barrier_cpu_count); 3560 __call_rcu(&rdp->barrier_head, 3561 rcu_barrier_callback, rsp, cpu, 0); 3562 } 3563 } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { 3564 _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu, 3565 rsp->barrier_sequence); 3566 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); 3567 } else { 3568 _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu, 3569 rsp->barrier_sequence); 3570 } 3571 } 3572 put_online_cpus(); 3573 3574 /* 3575 * Now that we have an rcu_barrier_callback() callback on each 3576 * CPU, and thus each counted, remove the initial count. 3577 */ 3578 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) 3579 complete(&rsp->barrier_completion); 3580 3581 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3582 wait_for_completion(&rsp->barrier_completion); 3583 3584 /* Mark the end of the barrier operation. */ 3585 _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence); 3586 rcu_seq_end(&rsp->barrier_sequence); 3587 3588 /* Other rcu_barrier() invocations can now safely proceed. */ 3589 mutex_unlock(&rsp->barrier_mutex); 3590 } 3591 3592 /** 3593 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. 3594 */ 3595 void rcu_barrier_bh(void) 3596 { 3597 _rcu_barrier(&rcu_bh_state); 3598 } 3599 EXPORT_SYMBOL_GPL(rcu_barrier_bh); 3600 3601 /** 3602 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. 3603 */ 3604 void rcu_barrier_sched(void) 3605 { 3606 _rcu_barrier(&rcu_sched_state); 3607 } 3608 EXPORT_SYMBOL_GPL(rcu_barrier_sched); 3609 3610 /* 3611 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 3612 * first CPU in a given leaf rcu_node structure coming online. The caller 3613 * must hold the corresponding leaf rcu_node ->lock with interrrupts 3614 * disabled. 3615 */ 3616 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 3617 { 3618 long mask; 3619 struct rcu_node *rnp = rnp_leaf; 3620 3621 lockdep_assert_held(&rnp->lock); 3622 for (;;) { 3623 mask = rnp->grpmask; 3624 rnp = rnp->parent; 3625 if (rnp == NULL) 3626 return; 3627 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3628 rnp->qsmaskinit |= mask; 3629 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3630 } 3631 } 3632 3633 /* 3634 * Do boot-time initialization of a CPU's per-CPU RCU data. 3635 */ 3636 static void __init 3637 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) 3638 { 3639 unsigned long flags; 3640 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3641 struct rcu_node *rnp = rcu_get_root(rsp); 3642 3643 /* Set up local state, ensuring consistent view of global state. */ 3644 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3645 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 3646 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3647 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); 3648 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); 3649 rdp->cpu = cpu; 3650 rdp->rsp = rsp; 3651 rcu_boot_init_nocb_percpu_data(rdp); 3652 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3653 } 3654 3655 /* 3656 * Initialize a CPU's per-CPU RCU data. Note that only one online or 3657 * offline event can be happening at a given time. Note also that we 3658 * can accept some slop in the rsp->completed access due to the fact 3659 * that this CPU cannot possibly have any RCU callbacks in flight yet. 3660 */ 3661 static void 3662 rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 3663 { 3664 unsigned long flags; 3665 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3666 struct rcu_node *rnp = rcu_get_root(rsp); 3667 3668 /* Set up local state, ensuring consistent view of global state. */ 3669 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3670 rdp->qlen_last_fqs_check = 0; 3671 rdp->n_force_qs_snap = rsp->n_force_qs; 3672 rdp->blimit = blimit; 3673 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 3674 !init_nocb_callback_list(rdp)) 3675 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 3676 rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */ 3677 rcu_dynticks_eqs_online(); 3678 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 3679 3680 /* 3681 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 3682 * propagation up the rcu_node tree will happen at the beginning 3683 * of the next grace period. 3684 */ 3685 rnp = rdp->mynode; 3686 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3687 rdp->beenonline = true; /* We have now been online. */ 3688 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ 3689 rdp->completed = rnp->completed; 3690 rdp->cpu_no_qs.b.norm = true; 3691 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); 3692 rdp->core_needs_qs = false; 3693 rdp->rcu_iw_pending = false; 3694 rdp->rcu_iw_gpnum = rnp->gpnum - 1; 3695 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 3696 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3697 } 3698 3699 /* 3700 * Invoked early in the CPU-online process, when pretty much all 3701 * services are available. The incoming CPU is not present. 3702 */ 3703 int rcutree_prepare_cpu(unsigned int cpu) 3704 { 3705 struct rcu_state *rsp; 3706 3707 for_each_rcu_flavor(rsp) 3708 rcu_init_percpu_data(cpu, rsp); 3709 3710 rcu_prepare_kthreads(cpu); 3711 rcu_spawn_all_nocb_kthreads(cpu); 3712 3713 return 0; 3714 } 3715 3716 /* 3717 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 3718 */ 3719 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 3720 { 3721 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 3722 3723 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 3724 } 3725 3726 /* 3727 * Near the end of the CPU-online process. Pretty much all services 3728 * enabled, and the CPU is now very much alive. 3729 */ 3730 int rcutree_online_cpu(unsigned int cpu) 3731 { 3732 unsigned long flags; 3733 struct rcu_data *rdp; 3734 struct rcu_node *rnp; 3735 struct rcu_state *rsp; 3736 3737 for_each_rcu_flavor(rsp) { 3738 rdp = per_cpu_ptr(rsp->rda, cpu); 3739 rnp = rdp->mynode; 3740 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3741 rnp->ffmask |= rdp->grpmask; 3742 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3743 } 3744 if (IS_ENABLED(CONFIG_TREE_SRCU)) 3745 srcu_online_cpu(cpu); 3746 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 3747 return 0; /* Too early in boot for scheduler work. */ 3748 sync_sched_exp_online_cleanup(cpu); 3749 rcutree_affinity_setting(cpu, -1); 3750 return 0; 3751 } 3752 3753 /* 3754 * Near the beginning of the process. The CPU is still very much alive 3755 * with pretty much all services enabled. 3756 */ 3757 int rcutree_offline_cpu(unsigned int cpu) 3758 { 3759 unsigned long flags; 3760 struct rcu_data *rdp; 3761 struct rcu_node *rnp; 3762 struct rcu_state *rsp; 3763 3764 for_each_rcu_flavor(rsp) { 3765 rdp = per_cpu_ptr(rsp->rda, cpu); 3766 rnp = rdp->mynode; 3767 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3768 rnp->ffmask &= ~rdp->grpmask; 3769 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3770 } 3771 3772 rcutree_affinity_setting(cpu, cpu); 3773 if (IS_ENABLED(CONFIG_TREE_SRCU)) 3774 srcu_offline_cpu(cpu); 3775 return 0; 3776 } 3777 3778 /* 3779 * Near the end of the offline process. We do only tracing here. 3780 */ 3781 int rcutree_dying_cpu(unsigned int cpu) 3782 { 3783 struct rcu_state *rsp; 3784 3785 for_each_rcu_flavor(rsp) 3786 rcu_cleanup_dying_cpu(rsp); 3787 return 0; 3788 } 3789 3790 /* 3791 * The outgoing CPU is gone and we are running elsewhere. 3792 */ 3793 int rcutree_dead_cpu(unsigned int cpu) 3794 { 3795 struct rcu_state *rsp; 3796 3797 for_each_rcu_flavor(rsp) { 3798 rcu_cleanup_dead_cpu(cpu, rsp); 3799 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); 3800 } 3801 return 0; 3802 } 3803 3804 /* 3805 * Mark the specified CPU as being online so that subsequent grace periods 3806 * (both expedited and normal) will wait on it. Note that this means that 3807 * incoming CPUs are not allowed to use RCU read-side critical sections 3808 * until this function is called. Failing to observe this restriction 3809 * will result in lockdep splats. 3810 * 3811 * Note that this function is special in that it is invoked directly 3812 * from the incoming CPU rather than from the cpuhp_step mechanism. 3813 * This is because this function must be invoked at a precise location. 3814 */ 3815 void rcu_cpu_starting(unsigned int cpu) 3816 { 3817 unsigned long flags; 3818 unsigned long mask; 3819 int nbits; 3820 unsigned long oldmask; 3821 struct rcu_data *rdp; 3822 struct rcu_node *rnp; 3823 struct rcu_state *rsp; 3824 3825 for_each_rcu_flavor(rsp) { 3826 rdp = per_cpu_ptr(rsp->rda, cpu); 3827 rnp = rdp->mynode; 3828 mask = rdp->grpmask; 3829 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3830 rnp->qsmaskinitnext |= mask; 3831 oldmask = rnp->expmaskinitnext; 3832 rnp->expmaskinitnext |= mask; 3833 oldmask ^= rnp->expmaskinitnext; 3834 nbits = bitmap_weight(&oldmask, BITS_PER_LONG); 3835 /* Allow lockless access for expedited grace periods. */ 3836 smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ 3837 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3838 } 3839 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 3840 } 3841 3842 #ifdef CONFIG_HOTPLUG_CPU 3843 /* 3844 * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 3845 * function. We now remove it from the rcu_node tree's ->qsmaskinit 3846 * bit masks. 3847 */ 3848 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) 3849 { 3850 unsigned long flags; 3851 unsigned long mask; 3852 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3853 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 3854 3855 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 3856 mask = rdp->grpmask; 3857 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 3858 rnp->qsmaskinitnext &= ~mask; 3859 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3860 } 3861 3862 /* 3863 * The outgoing function has no further need of RCU, so remove it from 3864 * the list of CPUs that RCU must track. 3865 * 3866 * Note that this function is special in that it is invoked directly 3867 * from the outgoing CPU rather than from the cpuhp_step mechanism. 3868 * This is because this function must be invoked at a precise location. 3869 */ 3870 void rcu_report_dead(unsigned int cpu) 3871 { 3872 struct rcu_state *rsp; 3873 3874 /* QS for any half-done expedited RCU-sched GP. */ 3875 preempt_disable(); 3876 rcu_report_exp_rdp(&rcu_sched_state, 3877 this_cpu_ptr(rcu_sched_state.rda), true); 3878 preempt_enable(); 3879 for_each_rcu_flavor(rsp) 3880 rcu_cleanup_dying_idle_cpu(cpu, rsp); 3881 } 3882 3883 /* Migrate the dead CPU's callbacks to the current CPU. */ 3884 static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) 3885 { 3886 unsigned long flags; 3887 struct rcu_data *my_rdp; 3888 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3889 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 3890 3891 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) 3892 return; /* No callbacks to migrate. */ 3893 3894 local_irq_save(flags); 3895 my_rdp = this_cpu_ptr(rsp->rda); 3896 if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) { 3897 local_irq_restore(flags); 3898 return; 3899 } 3900 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ 3901 rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */ 3902 rcu_advance_cbs(rsp, rnp_root, my_rdp); /* Assign GP to pending CBs. */ 3903 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 3904 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 3905 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 3906 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags); 3907 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 3908 !rcu_segcblist_empty(&rdp->cblist), 3909 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 3910 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 3911 rcu_segcblist_first_cb(&rdp->cblist)); 3912 } 3913 3914 /* 3915 * The outgoing CPU has just passed through the dying-idle state, 3916 * and we are being invoked from the CPU that was IPIed to continue the 3917 * offline operation. We need to migrate the outgoing CPU's callbacks. 3918 */ 3919 void rcutree_migrate_callbacks(int cpu) 3920 { 3921 struct rcu_state *rsp; 3922 3923 for_each_rcu_flavor(rsp) 3924 rcu_migrate_callbacks(cpu, rsp); 3925 } 3926 #endif 3927 3928 /* 3929 * On non-huge systems, use expedited RCU grace periods to make suspend 3930 * and hibernation run faster. 3931 */ 3932 static int rcu_pm_notify(struct notifier_block *self, 3933 unsigned long action, void *hcpu) 3934 { 3935 switch (action) { 3936 case PM_HIBERNATION_PREPARE: 3937 case PM_SUSPEND_PREPARE: 3938 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ 3939 rcu_expedite_gp(); 3940 break; 3941 case PM_POST_HIBERNATION: 3942 case PM_POST_SUSPEND: 3943 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ 3944 rcu_unexpedite_gp(); 3945 break; 3946 default: 3947 break; 3948 } 3949 return NOTIFY_OK; 3950 } 3951 3952 /* 3953 * Spawn the kthreads that handle each RCU flavor's grace periods. 3954 */ 3955 static int __init rcu_spawn_gp_kthread(void) 3956 { 3957 unsigned long flags; 3958 int kthread_prio_in = kthread_prio; 3959 struct rcu_node *rnp; 3960 struct rcu_state *rsp; 3961 struct sched_param sp; 3962 struct task_struct *t; 3963 3964 /* Force priority into range. */ 3965 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 3966 kthread_prio = 1; 3967 else if (kthread_prio < 0) 3968 kthread_prio = 0; 3969 else if (kthread_prio > 99) 3970 kthread_prio = 99; 3971 if (kthread_prio != kthread_prio_in) 3972 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 3973 kthread_prio, kthread_prio_in); 3974 3975 rcu_scheduler_fully_active = 1; 3976 for_each_rcu_flavor(rsp) { 3977 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); 3978 BUG_ON(IS_ERR(t)); 3979 rnp = rcu_get_root(rsp); 3980 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3981 rsp->gp_kthread = t; 3982 if (kthread_prio) { 3983 sp.sched_priority = kthread_prio; 3984 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 3985 } 3986 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3987 wake_up_process(t); 3988 } 3989 rcu_spawn_nocb_kthreads(); 3990 rcu_spawn_boost_kthreads(); 3991 return 0; 3992 } 3993 early_initcall(rcu_spawn_gp_kthread); 3994 3995 /* 3996 * This function is invoked towards the end of the scheduler's 3997 * initialization process. Before this is called, the idle task might 3998 * contain synchronous grace-period primitives (during which time, this idle 3999 * task is booting the system, and such primitives are no-ops). After this 4000 * function is called, any synchronous grace-period primitives are run as 4001 * expedited, with the requesting task driving the grace period forward. 4002 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4003 * runtime RCU functionality. 4004 */ 4005 void rcu_scheduler_starting(void) 4006 { 4007 WARN_ON(num_online_cpus() != 1); 4008 WARN_ON(nr_context_switches() > 0); 4009 rcu_test_sync_prims(); 4010 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4011 rcu_test_sync_prims(); 4012 } 4013 4014 /* 4015 * Helper function for rcu_init() that initializes one rcu_state structure. 4016 */ 4017 static void __init rcu_init_one(struct rcu_state *rsp) 4018 { 4019 static const char * const buf[] = RCU_NODE_NAME_INIT; 4020 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4021 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4022 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4023 4024 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4025 int cpustride = 1; 4026 int i; 4027 int j; 4028 struct rcu_node *rnp; 4029 4030 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4031 4032 /* Silence gcc 4.8 false positive about array index out of range. */ 4033 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4034 panic("rcu_init_one: rcu_num_lvls out of range"); 4035 4036 /* Initialize the level-tracking arrays. */ 4037 4038 for (i = 1; i < rcu_num_lvls; i++) 4039 rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1]; 4040 rcu_init_levelspread(levelspread, num_rcu_lvl); 4041 4042 /* Initialize the elements themselves, starting from the leaves. */ 4043 4044 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4045 cpustride *= levelspread[i]; 4046 rnp = rsp->level[i]; 4047 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4048 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4049 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4050 &rcu_node_class[i], buf[i]); 4051 raw_spin_lock_init(&rnp->fqslock); 4052 lockdep_set_class_and_name(&rnp->fqslock, 4053 &rcu_fqs_class[i], fqs[i]); 4054 rnp->gpnum = rsp->gpnum; 4055 rnp->completed = rsp->completed; 4056 rnp->qsmask = 0; 4057 rnp->qsmaskinit = 0; 4058 rnp->grplo = j * cpustride; 4059 rnp->grphi = (j + 1) * cpustride - 1; 4060 if (rnp->grphi >= nr_cpu_ids) 4061 rnp->grphi = nr_cpu_ids - 1; 4062 if (i == 0) { 4063 rnp->grpnum = 0; 4064 rnp->grpmask = 0; 4065 rnp->parent = NULL; 4066 } else { 4067 rnp->grpnum = j % levelspread[i - 1]; 4068 rnp->grpmask = 1UL << rnp->grpnum; 4069 rnp->parent = rsp->level[i - 1] + 4070 j / levelspread[i - 1]; 4071 } 4072 rnp->level = i; 4073 INIT_LIST_HEAD(&rnp->blkd_tasks); 4074 rcu_init_one_nocb(rnp); 4075 init_waitqueue_head(&rnp->exp_wq[0]); 4076 init_waitqueue_head(&rnp->exp_wq[1]); 4077 init_waitqueue_head(&rnp->exp_wq[2]); 4078 init_waitqueue_head(&rnp->exp_wq[3]); 4079 spin_lock_init(&rnp->exp_lock); 4080 } 4081 } 4082 4083 init_swait_queue_head(&rsp->gp_wq); 4084 init_swait_queue_head(&rsp->expedited_wq); 4085 rnp = rsp->level[rcu_num_lvls - 1]; 4086 for_each_possible_cpu(i) { 4087 while (i > rnp->grphi) 4088 rnp++; 4089 per_cpu_ptr(rsp->rda, i)->mynode = rnp; 4090 rcu_boot_init_percpu_data(i, rsp); 4091 } 4092 list_add(&rsp->flavors, &rcu_struct_flavors); 4093 } 4094 4095 /* 4096 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4097 * replace the definitions in tree.h because those are needed to size 4098 * the ->node array in the rcu_state structure. 4099 */ 4100 static void __init rcu_init_geometry(void) 4101 { 4102 ulong d; 4103 int i; 4104 int rcu_capacity[RCU_NUM_LVLS]; 4105 4106 /* 4107 * Initialize any unspecified boot parameters. 4108 * The default values of jiffies_till_first_fqs and 4109 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4110 * value, which is a function of HZ, then adding one for each 4111 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4112 */ 4113 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4114 if (jiffies_till_first_fqs == ULONG_MAX) 4115 jiffies_till_first_fqs = d; 4116 if (jiffies_till_next_fqs == ULONG_MAX) 4117 jiffies_till_next_fqs = d; 4118 4119 /* If the compile-time values are accurate, just leave. */ 4120 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4121 nr_cpu_ids == NR_CPUS) 4122 return; 4123 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4124 rcu_fanout_leaf, nr_cpu_ids); 4125 4126 /* 4127 * The boot-time rcu_fanout_leaf parameter must be at least two 4128 * and cannot exceed the number of bits in the rcu_node masks. 4129 * Complain and fall back to the compile-time values if this 4130 * limit is exceeded. 4131 */ 4132 if (rcu_fanout_leaf < 2 || 4133 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4134 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4135 WARN_ON(1); 4136 return; 4137 } 4138 4139 /* 4140 * Compute number of nodes that can be handled an rcu_node tree 4141 * with the given number of levels. 4142 */ 4143 rcu_capacity[0] = rcu_fanout_leaf; 4144 for (i = 1; i < RCU_NUM_LVLS; i++) 4145 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4146 4147 /* 4148 * The tree must be able to accommodate the configured number of CPUs. 4149 * If this limit is exceeded, fall back to the compile-time values. 4150 */ 4151 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4152 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4153 WARN_ON(1); 4154 return; 4155 } 4156 4157 /* Calculate the number of levels in the tree. */ 4158 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4159 } 4160 rcu_num_lvls = i + 1; 4161 4162 /* Calculate the number of rcu_nodes at each level of the tree. */ 4163 for (i = 0; i < rcu_num_lvls; i++) { 4164 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4165 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4166 } 4167 4168 /* Calculate the total number of rcu_node structures. */ 4169 rcu_num_nodes = 0; 4170 for (i = 0; i < rcu_num_lvls; i++) 4171 rcu_num_nodes += num_rcu_lvl[i]; 4172 } 4173 4174 /* 4175 * Dump out the structure of the rcu_node combining tree associated 4176 * with the rcu_state structure referenced by rsp. 4177 */ 4178 static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp) 4179 { 4180 int level = 0; 4181 struct rcu_node *rnp; 4182 4183 pr_info("rcu_node tree layout dump\n"); 4184 pr_info(" "); 4185 rcu_for_each_node_breadth_first(rsp, rnp) { 4186 if (rnp->level != level) { 4187 pr_cont("\n"); 4188 pr_info(" "); 4189 level = rnp->level; 4190 } 4191 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4192 } 4193 pr_cont("\n"); 4194 } 4195 4196 void __init rcu_init(void) 4197 { 4198 int cpu; 4199 4200 rcu_early_boot_tests(); 4201 4202 rcu_bootup_announce(); 4203 rcu_init_geometry(); 4204 rcu_init_one(&rcu_bh_state); 4205 rcu_init_one(&rcu_sched_state); 4206 if (dump_tree) 4207 rcu_dump_rcu_node_tree(&rcu_sched_state); 4208 __rcu_init_preempt(); 4209 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 4210 4211 /* 4212 * We don't need protection against CPU-hotplug here because 4213 * this is called early in boot, before either interrupts 4214 * or the scheduler are operational. 4215 */ 4216 pm_notifier(rcu_pm_notify, 0); 4217 for_each_online_cpu(cpu) { 4218 rcutree_prepare_cpu(cpu); 4219 rcu_cpu_starting(cpu); 4220 rcutree_online_cpu(cpu); 4221 } 4222 } 4223 4224 #include "tree_exp.h" 4225 #include "tree_plugin.h" 4226