1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version 23 * 24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 26 * 27 * For detailed explanation of Read-Copy Update mechanism see - 28 * Documentation/RCU 29 */ 30 #include <linux/types.h> 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/smp.h> 35 #include <linux/rcupdate.h> 36 #include <linux/interrupt.h> 37 #include <linux/sched.h> 38 #include <linux/nmi.h> 39 #include <linux/atomic.h> 40 #include <linux/bitops.h> 41 #include <linux/export.h> 42 #include <linux/completion.h> 43 #include <linux/moduleparam.h> 44 #include <linux/module.h> 45 #include <linux/percpu.h> 46 #include <linux/notifier.h> 47 #include <linux/cpu.h> 48 #include <linux/mutex.h> 49 #include <linux/time.h> 50 #include <linux/kernel_stat.h> 51 #include <linux/wait.h> 52 #include <linux/kthread.h> 53 #include <linux/prefetch.h> 54 #include <linux/delay.h> 55 #include <linux/stop_machine.h> 56 #include <linux/random.h> 57 #include <linux/trace_events.h> 58 #include <linux/suspend.h> 59 60 #include "tree.h" 61 #include "rcu.h" 62 63 MODULE_ALIAS("rcutree"); 64 #ifdef MODULE_PARAM_PREFIX 65 #undef MODULE_PARAM_PREFIX 66 #endif 67 #define MODULE_PARAM_PREFIX "rcutree." 68 69 /* Data structures. */ 70 71 /* 72 * In order to export the rcu_state name to the tracing tools, it 73 * needs to be added in the __tracepoint_string section. 74 * This requires defining a separate variable tp_<sname>_varname 75 * that points to the string being used, and this will allow 76 * the tracing userspace tools to be able to decipher the string 77 * address to the matching string. 78 */ 79 #ifdef CONFIG_TRACING 80 # define DEFINE_RCU_TPS(sname) \ 81 static char sname##_varname[] = #sname; \ 82 static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; 83 # define RCU_STATE_NAME(sname) sname##_varname 84 #else 85 # define DEFINE_RCU_TPS(sname) 86 # define RCU_STATE_NAME(sname) __stringify(sname) 87 #endif 88 89 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ 90 DEFINE_RCU_TPS(sname) \ 91 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \ 92 struct rcu_state sname##_state = { \ 93 .level = { &sname##_state.node[0] }, \ 94 .rda = &sname##_data, \ 95 .call = cr, \ 96 .gp_state = RCU_GP_IDLE, \ 97 .gpnum = 0UL - 300UL, \ 98 .completed = 0UL - 300UL, \ 99 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \ 100 .orphan_nxttail = &sname##_state.orphan_nxtlist, \ 101 .orphan_donetail = &sname##_state.orphan_donelist, \ 102 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 103 .name = RCU_STATE_NAME(sname), \ 104 .abbr = sabbr, \ 105 } 106 107 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); 108 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); 109 110 static struct rcu_state *const rcu_state_p; 111 LIST_HEAD(rcu_struct_flavors); 112 113 /* Dump rcu_node combining tree at boot to verify correct setup. */ 114 static bool dump_tree; 115 module_param(dump_tree, bool, 0444); 116 /* Control rcu_node-tree auto-balancing at boot time. */ 117 static bool rcu_fanout_exact; 118 module_param(rcu_fanout_exact, bool, 0444); 119 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 120 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 121 module_param(rcu_fanout_leaf, int, 0444); 122 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 123 /* Number of rcu_nodes at specified level. */ 124 static int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 125 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 126 127 /* 128 * The rcu_scheduler_active variable transitions from zero to one just 129 * before the first task is spawned. So when this variable is zero, RCU 130 * can assume that there is but one task, allowing RCU to (for example) 131 * optimize synchronize_sched() to a simple barrier(). When this variable 132 * is one, RCU must actually do all the hard work required to detect real 133 * grace periods. This variable is also used to suppress boot-time false 134 * positives from lockdep-RCU error checking. 135 */ 136 int rcu_scheduler_active __read_mostly; 137 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 138 139 /* 140 * The rcu_scheduler_fully_active variable transitions from zero to one 141 * during the early_initcall() processing, which is after the scheduler 142 * is capable of creating new tasks. So RCU processing (for example, 143 * creating tasks for RCU priority boosting) must be delayed until after 144 * rcu_scheduler_fully_active transitions from zero to one. We also 145 * currently delay invocation of any RCU callbacks until after this point. 146 * 147 * It might later prove better for people registering RCU callbacks during 148 * early boot to take responsibility for these callbacks, but one step at 149 * a time. 150 */ 151 static int rcu_scheduler_fully_active __read_mostly; 152 153 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 154 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 155 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 156 static void invoke_rcu_core(void); 157 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 158 static void rcu_report_exp_rdp(struct rcu_state *rsp, 159 struct rcu_data *rdp, bool wake); 160 161 /* rcuc/rcub kthread realtime priority */ 162 #ifdef CONFIG_RCU_KTHREAD_PRIO 163 static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; 164 #else /* #ifdef CONFIG_RCU_KTHREAD_PRIO */ 165 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 166 #endif /* #else #ifdef CONFIG_RCU_KTHREAD_PRIO */ 167 module_param(kthread_prio, int, 0644); 168 169 /* Delay in jiffies for grace-period initialization delays, debug only. */ 170 171 #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT 172 static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY; 173 module_param(gp_preinit_delay, int, 0644); 174 #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */ 175 static const int gp_preinit_delay; 176 #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */ 177 178 #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT 179 static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY; 180 module_param(gp_init_delay, int, 0644); 181 #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ 182 static const int gp_init_delay; 183 #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ 184 185 #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP 186 static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY; 187 module_param(gp_cleanup_delay, int, 0644); 188 #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */ 189 static const int gp_cleanup_delay; 190 #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */ 191 192 /* 193 * Number of grace periods between delays, normalized by the duration of 194 * the delay. The longer the the delay, the more the grace periods between 195 * each delay. The reason for this normalization is that it means that, 196 * for non-zero delays, the overall slowdown of grace periods is constant 197 * regardless of the duration of the delay. This arrangement balances 198 * the need for long delays to increase some race probabilities with the 199 * need for fast grace periods to increase other race probabilities. 200 */ 201 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 202 203 /* 204 * Track the rcutorture test sequence number and the update version 205 * number within a given test. The rcutorture_testseq is incremented 206 * on every rcutorture module load and unload, so has an odd value 207 * when a test is running. The rcutorture_vernum is set to zero 208 * when rcutorture starts and is incremented on each rcutorture update. 209 * These variables enable correlating rcutorture output with the 210 * RCU tracing information. 211 */ 212 unsigned long rcutorture_testseq; 213 unsigned long rcutorture_vernum; 214 215 /* 216 * Compute the mask of online CPUs for the specified rcu_node structure. 217 * This will not be stable unless the rcu_node structure's ->lock is 218 * held, but the bit corresponding to the current CPU will be stable 219 * in most contexts. 220 */ 221 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 222 { 223 return READ_ONCE(rnp->qsmaskinitnext); 224 } 225 226 /* 227 * Return true if an RCU grace period is in progress. The READ_ONCE()s 228 * permit this function to be invoked without holding the root rcu_node 229 * structure's ->lock, but of course results can be subject to change. 230 */ 231 static int rcu_gp_in_progress(struct rcu_state *rsp) 232 { 233 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); 234 } 235 236 /* 237 * Note a quiescent state. Because we do not need to know 238 * how many quiescent states passed, just if there was at least 239 * one since the start of the grace period, this just sets a flag. 240 * The caller must have disabled preemption. 241 */ 242 void rcu_sched_qs(void) 243 { 244 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) 245 return; 246 trace_rcu_grace_period(TPS("rcu_sched"), 247 __this_cpu_read(rcu_sched_data.gpnum), 248 TPS("cpuqs")); 249 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); 250 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) 251 return; 252 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); 253 rcu_report_exp_rdp(&rcu_sched_state, 254 this_cpu_ptr(&rcu_sched_data), true); 255 } 256 257 void rcu_bh_qs(void) 258 { 259 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { 260 trace_rcu_grace_period(TPS("rcu_bh"), 261 __this_cpu_read(rcu_bh_data.gpnum), 262 TPS("cpuqs")); 263 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); 264 } 265 } 266 267 static DEFINE_PER_CPU(int, rcu_sched_qs_mask); 268 269 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 270 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, 271 .dynticks = ATOMIC_INIT(1), 272 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE 273 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE, 274 .dynticks_idle = ATOMIC_INIT(1), 275 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 276 }; 277 278 DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); 279 EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr); 280 281 /* 282 * Let the RCU core know that this CPU has gone through the scheduler, 283 * which is a quiescent state. This is called when the need for a 284 * quiescent state is urgent, so we burn an atomic operation and full 285 * memory barriers to let the RCU core know about it, regardless of what 286 * this CPU might (or might not) do in the near future. 287 * 288 * We inform the RCU core by emulating a zero-duration dyntick-idle 289 * period, which we in turn do by incrementing the ->dynticks counter 290 * by two. 291 * 292 * The caller must have disabled interrupts. 293 */ 294 static void rcu_momentary_dyntick_idle(void) 295 { 296 struct rcu_data *rdp; 297 struct rcu_dynticks *rdtp; 298 int resched_mask; 299 struct rcu_state *rsp; 300 301 /* 302 * Yes, we can lose flag-setting operations. This is OK, because 303 * the flag will be set again after some delay. 304 */ 305 resched_mask = raw_cpu_read(rcu_sched_qs_mask); 306 raw_cpu_write(rcu_sched_qs_mask, 0); 307 308 /* Find the flavor that needs a quiescent state. */ 309 for_each_rcu_flavor(rsp) { 310 rdp = raw_cpu_ptr(rsp->rda); 311 if (!(resched_mask & rsp->flavor_mask)) 312 continue; 313 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */ 314 if (READ_ONCE(rdp->mynode->completed) != 315 READ_ONCE(rdp->cond_resched_completed)) 316 continue; 317 318 /* 319 * Pretend to be momentarily idle for the quiescent state. 320 * This allows the grace-period kthread to record the 321 * quiescent state, with no need for this CPU to do anything 322 * further. 323 */ 324 rdtp = this_cpu_ptr(&rcu_dynticks); 325 smp_mb__before_atomic(); /* Earlier stuff before QS. */ 326 atomic_add(2, &rdtp->dynticks); /* QS. */ 327 smp_mb__after_atomic(); /* Later stuff after QS. */ 328 break; 329 } 330 } 331 332 /* 333 * Note a context switch. This is a quiescent state for RCU-sched, 334 * and requires special handling for preemptible RCU. 335 * The caller must have disabled interrupts. 336 */ 337 void rcu_note_context_switch(void) 338 { 339 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 340 trace_rcu_utilization(TPS("Start context switch")); 341 rcu_sched_qs(); 342 rcu_preempt_note_context_switch(); 343 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) 344 rcu_momentary_dyntick_idle(); 345 trace_rcu_utilization(TPS("End context switch")); 346 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 347 } 348 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 349 350 /* 351 * Register a quiescent state for all RCU flavors. If there is an 352 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight 353 * dyntick-idle quiescent state visible to other CPUs (but only for those 354 * RCU flavors in desperate need of a quiescent state, which will normally 355 * be none of them). Either way, do a lightweight quiescent state for 356 * all RCU flavors. 357 * 358 * The barrier() calls are redundant in the common case when this is 359 * called externally, but just in case this is called from within this 360 * file. 361 * 362 */ 363 void rcu_all_qs(void) 364 { 365 unsigned long flags; 366 367 barrier(); /* Avoid RCU read-side critical sections leaking down. */ 368 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) { 369 local_irq_save(flags); 370 rcu_momentary_dyntick_idle(); 371 local_irq_restore(flags); 372 } 373 this_cpu_inc(rcu_qs_ctr); 374 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 375 } 376 EXPORT_SYMBOL_GPL(rcu_all_qs); 377 378 static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 379 static long qhimark = 10000; /* If this many pending, ignore blimit. */ 380 static long qlowmark = 100; /* Once only this many pending, use blimit. */ 381 382 module_param(blimit, long, 0444); 383 module_param(qhimark, long, 0444); 384 module_param(qlowmark, long, 0444); 385 386 static ulong jiffies_till_first_fqs = ULONG_MAX; 387 static ulong jiffies_till_next_fqs = ULONG_MAX; 388 389 module_param(jiffies_till_first_fqs, ulong, 0644); 390 module_param(jiffies_till_next_fqs, ulong, 0644); 391 392 /* 393 * How long the grace period must be before we start recruiting 394 * quiescent-state help from rcu_note_context_switch(). 395 */ 396 static ulong jiffies_till_sched_qs = HZ / 20; 397 module_param(jiffies_till_sched_qs, ulong, 0644); 398 399 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 400 struct rcu_data *rdp); 401 static void force_qs_rnp(struct rcu_state *rsp, 402 int (*f)(struct rcu_data *rsp, bool *isidle, 403 unsigned long *maxj), 404 bool *isidle, unsigned long *maxj); 405 static void force_quiescent_state(struct rcu_state *rsp); 406 static int rcu_pending(void); 407 408 /* 409 * Return the number of RCU batches started thus far for debug & stats. 410 */ 411 unsigned long rcu_batches_started(void) 412 { 413 return rcu_state_p->gpnum; 414 } 415 EXPORT_SYMBOL_GPL(rcu_batches_started); 416 417 /* 418 * Return the number of RCU-sched batches started thus far for debug & stats. 419 */ 420 unsigned long rcu_batches_started_sched(void) 421 { 422 return rcu_sched_state.gpnum; 423 } 424 EXPORT_SYMBOL_GPL(rcu_batches_started_sched); 425 426 /* 427 * Return the number of RCU BH batches started thus far for debug & stats. 428 */ 429 unsigned long rcu_batches_started_bh(void) 430 { 431 return rcu_bh_state.gpnum; 432 } 433 EXPORT_SYMBOL_GPL(rcu_batches_started_bh); 434 435 /* 436 * Return the number of RCU batches completed thus far for debug & stats. 437 */ 438 unsigned long rcu_batches_completed(void) 439 { 440 return rcu_state_p->completed; 441 } 442 EXPORT_SYMBOL_GPL(rcu_batches_completed); 443 444 /* 445 * Return the number of RCU-sched batches completed thus far for debug & stats. 446 */ 447 unsigned long rcu_batches_completed_sched(void) 448 { 449 return rcu_sched_state.completed; 450 } 451 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); 452 453 /* 454 * Return the number of RCU BH batches completed thus far for debug & stats. 455 */ 456 unsigned long rcu_batches_completed_bh(void) 457 { 458 return rcu_bh_state.completed; 459 } 460 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); 461 462 /* 463 * Force a quiescent state. 464 */ 465 void rcu_force_quiescent_state(void) 466 { 467 force_quiescent_state(rcu_state_p); 468 } 469 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 470 471 /* 472 * Force a quiescent state for RCU BH. 473 */ 474 void rcu_bh_force_quiescent_state(void) 475 { 476 force_quiescent_state(&rcu_bh_state); 477 } 478 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); 479 480 /* 481 * Force a quiescent state for RCU-sched. 482 */ 483 void rcu_sched_force_quiescent_state(void) 484 { 485 force_quiescent_state(&rcu_sched_state); 486 } 487 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); 488 489 /* 490 * Show the state of the grace-period kthreads. 491 */ 492 void show_rcu_gp_kthreads(void) 493 { 494 struct rcu_state *rsp; 495 496 for_each_rcu_flavor(rsp) { 497 pr_info("%s: wait state: %d ->state: %#lx\n", 498 rsp->name, rsp->gp_state, rsp->gp_kthread->state); 499 /* sched_show_task(rsp->gp_kthread); */ 500 } 501 } 502 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); 503 504 /* 505 * Record the number of times rcutorture tests have been initiated and 506 * terminated. This information allows the debugfs tracing stats to be 507 * correlated to the rcutorture messages, even when the rcutorture module 508 * is being repeatedly loaded and unloaded. In other words, we cannot 509 * store this state in rcutorture itself. 510 */ 511 void rcutorture_record_test_transition(void) 512 { 513 rcutorture_testseq++; 514 rcutorture_vernum = 0; 515 } 516 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); 517 518 /* 519 * Send along grace-period-related data for rcutorture diagnostics. 520 */ 521 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 522 unsigned long *gpnum, unsigned long *completed) 523 { 524 struct rcu_state *rsp = NULL; 525 526 switch (test_type) { 527 case RCU_FLAVOR: 528 rsp = rcu_state_p; 529 break; 530 case RCU_BH_FLAVOR: 531 rsp = &rcu_bh_state; 532 break; 533 case RCU_SCHED_FLAVOR: 534 rsp = &rcu_sched_state; 535 break; 536 default: 537 break; 538 } 539 if (rsp != NULL) { 540 *flags = READ_ONCE(rsp->gp_flags); 541 *gpnum = READ_ONCE(rsp->gpnum); 542 *completed = READ_ONCE(rsp->completed); 543 return; 544 } 545 *flags = 0; 546 *gpnum = 0; 547 *completed = 0; 548 } 549 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 550 551 /* 552 * Record the number of writer passes through the current rcutorture test. 553 * This is also used to correlate debugfs tracing stats with the rcutorture 554 * messages. 555 */ 556 void rcutorture_record_progress(unsigned long vernum) 557 { 558 rcutorture_vernum++; 559 } 560 EXPORT_SYMBOL_GPL(rcutorture_record_progress); 561 562 /* 563 * Does the CPU have callbacks ready to be invoked? 564 */ 565 static int 566 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) 567 { 568 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] && 569 rdp->nxttail[RCU_DONE_TAIL] != NULL; 570 } 571 572 /* 573 * Return the root node of the specified rcu_state structure. 574 */ 575 static struct rcu_node *rcu_get_root(struct rcu_state *rsp) 576 { 577 return &rsp->node[0]; 578 } 579 580 /* 581 * Is there any need for future grace periods? 582 * Interrupts must be disabled. If the caller does not hold the root 583 * rnp_node structure's ->lock, the results are advisory only. 584 */ 585 static int rcu_future_needs_gp(struct rcu_state *rsp) 586 { 587 struct rcu_node *rnp = rcu_get_root(rsp); 588 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; 589 int *fp = &rnp->need_future_gp[idx]; 590 591 return READ_ONCE(*fp); 592 } 593 594 /* 595 * Does the current CPU require a not-yet-started grace period? 596 * The caller must have disabled interrupts to prevent races with 597 * normal callback registry. 598 */ 599 static bool 600 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 601 { 602 int i; 603 604 if (rcu_gp_in_progress(rsp)) 605 return false; /* No, a grace period is already in progress. */ 606 if (rcu_future_needs_gp(rsp)) 607 return true; /* Yes, a no-CBs CPU needs one. */ 608 if (!rdp->nxttail[RCU_NEXT_TAIL]) 609 return false; /* No, this is a no-CBs (or offline) CPU. */ 610 if (*rdp->nxttail[RCU_NEXT_READY_TAIL]) 611 return true; /* Yes, CPU has newly registered callbacks. */ 612 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) 613 if (rdp->nxttail[i - 1] != rdp->nxttail[i] && 614 ULONG_CMP_LT(READ_ONCE(rsp->completed), 615 rdp->nxtcompleted[i])) 616 return true; /* Yes, CBs for future grace period. */ 617 return false; /* No grace period needed. */ 618 } 619 620 /* 621 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state 622 * 623 * If the new value of the ->dynticks_nesting counter now is zero, 624 * we really have entered idle, and must do the appropriate accounting. 625 * The caller must have disabled interrupts. 626 */ 627 static void rcu_eqs_enter_common(long long oldval, bool user) 628 { 629 struct rcu_state *rsp; 630 struct rcu_data *rdp; 631 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 632 633 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 634 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 635 !user && !is_idle_task(current)) { 636 struct task_struct *idle __maybe_unused = 637 idle_task(smp_processor_id()); 638 639 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); 640 ftrace_dump(DUMP_ORIG); 641 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 642 current->pid, current->comm, 643 idle->pid, idle->comm); /* must be idle task! */ 644 } 645 for_each_rcu_flavor(rsp) { 646 rdp = this_cpu_ptr(rsp->rda); 647 do_nocb_deferred_wakeup(rdp); 648 } 649 rcu_prepare_for_idle(); 650 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 651 smp_mb__before_atomic(); /* See above. */ 652 atomic_inc(&rdtp->dynticks); 653 smp_mb__after_atomic(); /* Force ordering with next sojourn. */ 654 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 655 atomic_read(&rdtp->dynticks) & 0x1); 656 rcu_dynticks_task_enter(); 657 658 /* 659 * It is illegal to enter an extended quiescent state while 660 * in an RCU read-side critical section. 661 */ 662 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), 663 "Illegal idle entry in RCU read-side critical section."); 664 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), 665 "Illegal idle entry in RCU-bh read-side critical section."); 666 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), 667 "Illegal idle entry in RCU-sched read-side critical section."); 668 } 669 670 /* 671 * Enter an RCU extended quiescent state, which can be either the 672 * idle loop or adaptive-tickless usermode execution. 673 */ 674 static void rcu_eqs_enter(bool user) 675 { 676 long long oldval; 677 struct rcu_dynticks *rdtp; 678 679 rdtp = this_cpu_ptr(&rcu_dynticks); 680 oldval = rdtp->dynticks_nesting; 681 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 682 (oldval & DYNTICK_TASK_NEST_MASK) == 0); 683 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { 684 rdtp->dynticks_nesting = 0; 685 rcu_eqs_enter_common(oldval, user); 686 } else { 687 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 688 } 689 } 690 691 /** 692 * rcu_idle_enter - inform RCU that current CPU is entering idle 693 * 694 * Enter idle mode, in other words, -leave- the mode in which RCU 695 * read-side critical sections can occur. (Though RCU read-side 696 * critical sections can occur in irq handlers in idle, a possibility 697 * handled by irq_enter() and irq_exit().) 698 * 699 * We crowbar the ->dynticks_nesting field to zero to allow for 700 * the possibility of usermode upcalls having messed up our count 701 * of interrupt nesting level during the prior busy period. 702 */ 703 void rcu_idle_enter(void) 704 { 705 unsigned long flags; 706 707 local_irq_save(flags); 708 rcu_eqs_enter(false); 709 rcu_sysidle_enter(0); 710 local_irq_restore(flags); 711 } 712 EXPORT_SYMBOL_GPL(rcu_idle_enter); 713 714 #ifdef CONFIG_NO_HZ_FULL 715 /** 716 * rcu_user_enter - inform RCU that we are resuming userspace. 717 * 718 * Enter RCU idle mode right before resuming userspace. No use of RCU 719 * is permitted between this call and rcu_user_exit(). This way the 720 * CPU doesn't need to maintain the tick for RCU maintenance purposes 721 * when the CPU runs in userspace. 722 */ 723 void rcu_user_enter(void) 724 { 725 rcu_eqs_enter(1); 726 } 727 #endif /* CONFIG_NO_HZ_FULL */ 728 729 /** 730 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 731 * 732 * Exit from an interrupt handler, which might possibly result in entering 733 * idle mode, in other words, leaving the mode in which read-side critical 734 * sections can occur. The caller must have disabled interrupts. 735 * 736 * This code assumes that the idle loop never does anything that might 737 * result in unbalanced calls to irq_enter() and irq_exit(). If your 738 * architecture violates this assumption, RCU will give you what you 739 * deserve, good and hard. But very infrequently and irreproducibly. 740 * 741 * Use things like work queues to work around this limitation. 742 * 743 * You have been warned. 744 */ 745 void rcu_irq_exit(void) 746 { 747 long long oldval; 748 struct rcu_dynticks *rdtp; 749 750 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); 751 rdtp = this_cpu_ptr(&rcu_dynticks); 752 oldval = rdtp->dynticks_nesting; 753 rdtp->dynticks_nesting--; 754 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 755 rdtp->dynticks_nesting < 0); 756 if (rdtp->dynticks_nesting) 757 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); 758 else 759 rcu_eqs_enter_common(oldval, true); 760 rcu_sysidle_enter(1); 761 } 762 763 /* 764 * Wrapper for rcu_irq_exit() where interrupts are enabled. 765 */ 766 void rcu_irq_exit_irqson(void) 767 { 768 unsigned long flags; 769 770 local_irq_save(flags); 771 rcu_irq_exit(); 772 local_irq_restore(flags); 773 } 774 775 /* 776 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state 777 * 778 * If the new value of the ->dynticks_nesting counter was previously zero, 779 * we really have exited idle, and must do the appropriate accounting. 780 * The caller must have disabled interrupts. 781 */ 782 static void rcu_eqs_exit_common(long long oldval, int user) 783 { 784 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 785 786 rcu_dynticks_task_exit(); 787 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ 788 atomic_inc(&rdtp->dynticks); 789 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 790 smp_mb__after_atomic(); /* See above. */ 791 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 792 !(atomic_read(&rdtp->dynticks) & 0x1)); 793 rcu_cleanup_after_idle(); 794 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); 795 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 796 !user && !is_idle_task(current)) { 797 struct task_struct *idle __maybe_unused = 798 idle_task(smp_processor_id()); 799 800 trace_rcu_dyntick(TPS("Error on exit: not idle task"), 801 oldval, rdtp->dynticks_nesting); 802 ftrace_dump(DUMP_ORIG); 803 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 804 current->pid, current->comm, 805 idle->pid, idle->comm); /* must be idle task! */ 806 } 807 } 808 809 /* 810 * Exit an RCU extended quiescent state, which can be either the 811 * idle loop or adaptive-tickless usermode execution. 812 */ 813 static void rcu_eqs_exit(bool user) 814 { 815 struct rcu_dynticks *rdtp; 816 long long oldval; 817 818 rdtp = this_cpu_ptr(&rcu_dynticks); 819 oldval = rdtp->dynticks_nesting; 820 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 821 if (oldval & DYNTICK_TASK_NEST_MASK) { 822 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; 823 } else { 824 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 825 rcu_eqs_exit_common(oldval, user); 826 } 827 } 828 829 /** 830 * rcu_idle_exit - inform RCU that current CPU is leaving idle 831 * 832 * Exit idle mode, in other words, -enter- the mode in which RCU 833 * read-side critical sections can occur. 834 * 835 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to 836 * allow for the possibility of usermode upcalls messing up our count 837 * of interrupt nesting level during the busy period that is just 838 * now starting. 839 */ 840 void rcu_idle_exit(void) 841 { 842 unsigned long flags; 843 844 local_irq_save(flags); 845 rcu_eqs_exit(false); 846 rcu_sysidle_exit(0); 847 local_irq_restore(flags); 848 } 849 EXPORT_SYMBOL_GPL(rcu_idle_exit); 850 851 #ifdef CONFIG_NO_HZ_FULL 852 /** 853 * rcu_user_exit - inform RCU that we are exiting userspace. 854 * 855 * Exit RCU idle mode while entering the kernel because it can 856 * run a RCU read side critical section anytime. 857 */ 858 void rcu_user_exit(void) 859 { 860 rcu_eqs_exit(1); 861 } 862 #endif /* CONFIG_NO_HZ_FULL */ 863 864 /** 865 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 866 * 867 * Enter an interrupt handler, which might possibly result in exiting 868 * idle mode, in other words, entering the mode in which read-side critical 869 * sections can occur. The caller must have disabled interrupts. 870 * 871 * Note that the Linux kernel is fully capable of entering an interrupt 872 * handler that it never exits, for example when doing upcalls to 873 * user mode! This code assumes that the idle loop never does upcalls to 874 * user mode. If your architecture does do upcalls from the idle loop (or 875 * does anything else that results in unbalanced calls to the irq_enter() 876 * and irq_exit() functions), RCU will give you what you deserve, good 877 * and hard. But very infrequently and irreproducibly. 878 * 879 * Use things like work queues to work around this limitation. 880 * 881 * You have been warned. 882 */ 883 void rcu_irq_enter(void) 884 { 885 struct rcu_dynticks *rdtp; 886 long long oldval; 887 888 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); 889 rdtp = this_cpu_ptr(&rcu_dynticks); 890 oldval = rdtp->dynticks_nesting; 891 rdtp->dynticks_nesting++; 892 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 893 rdtp->dynticks_nesting == 0); 894 if (oldval) 895 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); 896 else 897 rcu_eqs_exit_common(oldval, true); 898 rcu_sysidle_exit(1); 899 } 900 901 /* 902 * Wrapper for rcu_irq_enter() where interrupts are enabled. 903 */ 904 void rcu_irq_enter_irqson(void) 905 { 906 unsigned long flags; 907 908 local_irq_save(flags); 909 rcu_irq_enter(); 910 local_irq_restore(flags); 911 } 912 913 /** 914 * rcu_nmi_enter - inform RCU of entry to NMI context 915 * 916 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and 917 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know 918 * that the CPU is active. This implementation permits nested NMIs, as 919 * long as the nesting level does not overflow an int. (You will probably 920 * run out of stack space first.) 921 */ 922 void rcu_nmi_enter(void) 923 { 924 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 925 int incby = 2; 926 927 /* Complain about underflow. */ 928 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); 929 930 /* 931 * If idle from RCU viewpoint, atomically increment ->dynticks 932 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 933 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 934 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 935 * to be in the outermost NMI handler that interrupted an RCU-idle 936 * period (observation due to Andy Lutomirski). 937 */ 938 if (!(atomic_read(&rdtp->dynticks) & 0x1)) { 939 smp_mb__before_atomic(); /* Force delay from prior write. */ 940 atomic_inc(&rdtp->dynticks); 941 /* atomic_inc() before later RCU read-side crit sects */ 942 smp_mb__after_atomic(); /* See above. */ 943 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 944 incby = 1; 945 } 946 rdtp->dynticks_nmi_nesting += incby; 947 barrier(); 948 } 949 950 /** 951 * rcu_nmi_exit - inform RCU of exit from NMI context 952 * 953 * If we are returning from the outermost NMI handler that interrupted an 954 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting 955 * to let the RCU grace-period handling know that the CPU is back to 956 * being RCU-idle. 957 */ 958 void rcu_nmi_exit(void) 959 { 960 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 961 962 /* 963 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 964 * (We are exiting an NMI handler, so RCU better be paying attention 965 * to us!) 966 */ 967 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); 968 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 969 970 /* 971 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 972 * leave it in non-RCU-idle state. 973 */ 974 if (rdtp->dynticks_nmi_nesting != 1) { 975 rdtp->dynticks_nmi_nesting -= 2; 976 return; 977 } 978 979 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 980 rdtp->dynticks_nmi_nesting = 0; 981 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 982 smp_mb__before_atomic(); /* See above. */ 983 atomic_inc(&rdtp->dynticks); 984 smp_mb__after_atomic(); /* Force delay to next write. */ 985 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 986 } 987 988 /** 989 * __rcu_is_watching - are RCU read-side critical sections safe? 990 * 991 * Return true if RCU is watching the running CPU, which means that 992 * this CPU can safely enter RCU read-side critical sections. Unlike 993 * rcu_is_watching(), the caller of __rcu_is_watching() must have at 994 * least disabled preemption. 995 */ 996 bool notrace __rcu_is_watching(void) 997 { 998 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; 999 } 1000 1001 /** 1002 * rcu_is_watching - see if RCU thinks that the current CPU is idle 1003 * 1004 * If the current CPU is in its idle loop and is neither in an interrupt 1005 * or NMI handler, return true. 1006 */ 1007 bool notrace rcu_is_watching(void) 1008 { 1009 bool ret; 1010 1011 preempt_disable_notrace(); 1012 ret = __rcu_is_watching(); 1013 preempt_enable_notrace(); 1014 return ret; 1015 } 1016 EXPORT_SYMBOL_GPL(rcu_is_watching); 1017 1018 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1019 1020 /* 1021 * Is the current CPU online? Disable preemption to avoid false positives 1022 * that could otherwise happen due to the current CPU number being sampled, 1023 * this task being preempted, its old CPU being taken offline, resuming 1024 * on some other CPU, then determining that its old CPU is now offline. 1025 * It is OK to use RCU on an offline processor during initial boot, hence 1026 * the check for rcu_scheduler_fully_active. Note also that it is OK 1027 * for a CPU coming online to use RCU for one jiffy prior to marking itself 1028 * online in the cpu_online_mask. Similarly, it is OK for a CPU going 1029 * offline to continue to use RCU for one jiffy after marking itself 1030 * offline in the cpu_online_mask. This leniency is necessary given the 1031 * non-atomic nature of the online and offline processing, for example, 1032 * the fact that a CPU enters the scheduler after completing the CPU_DYING 1033 * notifiers. 1034 * 1035 * This is also why RCU internally marks CPUs online during the 1036 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase. 1037 * 1038 * Disable checking if in an NMI handler because we cannot safely report 1039 * errors from NMI handlers anyway. 1040 */ 1041 bool rcu_lockdep_current_cpu_online(void) 1042 { 1043 struct rcu_data *rdp; 1044 struct rcu_node *rnp; 1045 bool ret; 1046 1047 if (in_nmi()) 1048 return true; 1049 preempt_disable(); 1050 rdp = this_cpu_ptr(&rcu_sched_data); 1051 rnp = rdp->mynode; 1052 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) || 1053 !rcu_scheduler_fully_active; 1054 preempt_enable(); 1055 return ret; 1056 } 1057 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 1058 1059 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 1060 1061 /** 1062 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle 1063 * 1064 * If the current CPU is idle or running at a first-level (not nested) 1065 * interrupt from idle, return true. The caller must have at least 1066 * disabled preemption. 1067 */ 1068 static int rcu_is_cpu_rrupt_from_idle(void) 1069 { 1070 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; 1071 } 1072 1073 /* 1074 * Snapshot the specified CPU's dynticks counter so that we can later 1075 * credit them with an implicit quiescent state. Return 1 if this CPU 1076 * is in dynticks idle mode, which is an extended quiescent state. 1077 */ 1078 static int dyntick_save_progress_counter(struct rcu_data *rdp, 1079 bool *isidle, unsigned long *maxj) 1080 { 1081 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); 1082 rcu_sysidle_check_cpu(rdp, isidle, maxj); 1083 if ((rdp->dynticks_snap & 0x1) == 0) { 1084 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1085 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, 1086 rdp->mynode->gpnum)) 1087 WRITE_ONCE(rdp->gpwrap, true); 1088 return 1; 1089 } 1090 return 0; 1091 } 1092 1093 /* 1094 * Return true if the specified CPU has passed through a quiescent 1095 * state by virtue of being in or having passed through an dynticks 1096 * idle state since the last call to dyntick_save_progress_counter() 1097 * for this same CPU, or by virtue of having been offline. 1098 */ 1099 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, 1100 bool *isidle, unsigned long *maxj) 1101 { 1102 unsigned int curr; 1103 int *rcrmp; 1104 unsigned int snap; 1105 1106 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); 1107 snap = (unsigned int)rdp->dynticks_snap; 1108 1109 /* 1110 * If the CPU passed through or entered a dynticks idle phase with 1111 * no active irq/NMI handlers, then we can safely pretend that the CPU 1112 * already acknowledged the request to pass through a quiescent 1113 * state. Either way, that CPU cannot possibly be in an RCU 1114 * read-side critical section that started before the beginning 1115 * of the current RCU grace period. 1116 */ 1117 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { 1118 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1119 rdp->dynticks_fqs++; 1120 return 1; 1121 } 1122 1123 /* 1124 * Check for the CPU being offline, but only if the grace period 1125 * is old enough. We don't need to worry about the CPU changing 1126 * state: If we see it offline even once, it has been through a 1127 * quiescent state. 1128 * 1129 * The reason for insisting that the grace period be at least 1130 * one jiffy old is that CPUs that are not quite online and that 1131 * have just gone offline can still execute RCU read-side critical 1132 * sections. 1133 */ 1134 if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies)) 1135 return 0; /* Grace period is not old enough. */ 1136 barrier(); 1137 if (cpu_is_offline(rdp->cpu)) { 1138 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); 1139 rdp->offline_fqs++; 1140 return 1; 1141 } 1142 1143 /* 1144 * A CPU running for an extended time within the kernel can 1145 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode, 1146 * even context-switching back and forth between a pair of 1147 * in-kernel CPU-bound tasks cannot advance grace periods. 1148 * So if the grace period is old enough, make the CPU pay attention. 1149 * Note that the unsynchronized assignments to the per-CPU 1150 * rcu_sched_qs_mask variable are safe. Yes, setting of 1151 * bits can be lost, but they will be set again on the next 1152 * force-quiescent-state pass. So lost bit sets do not result 1153 * in incorrect behavior, merely in a grace period lasting 1154 * a few jiffies longer than it might otherwise. Because 1155 * there are at most four threads involved, and because the 1156 * updates are only once every few jiffies, the probability of 1157 * lossage (and thus of slight grace-period extension) is 1158 * quite low. 1159 * 1160 * Note that if the jiffies_till_sched_qs boot/sysfs parameter 1161 * is set too high, we override with half of the RCU CPU stall 1162 * warning delay. 1163 */ 1164 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu); 1165 if (ULONG_CMP_GE(jiffies, 1166 rdp->rsp->gp_start + jiffies_till_sched_qs) || 1167 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 1168 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { 1169 WRITE_ONCE(rdp->cond_resched_completed, 1170 READ_ONCE(rdp->mynode->completed)); 1171 smp_mb(); /* ->cond_resched_completed before *rcrmp. */ 1172 WRITE_ONCE(*rcrmp, 1173 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask); 1174 } 1175 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */ 1176 } 1177 1178 /* And if it has been a really long time, kick the CPU as well. */ 1179 if (ULONG_CMP_GE(jiffies, 1180 rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) || 1181 ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs)) 1182 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */ 1183 1184 return 0; 1185 } 1186 1187 static void record_gp_stall_check_time(struct rcu_state *rsp) 1188 { 1189 unsigned long j = jiffies; 1190 unsigned long j1; 1191 1192 rsp->gp_start = j; 1193 smp_wmb(); /* Record start time before stall time. */ 1194 j1 = rcu_jiffies_till_stall_check(); 1195 WRITE_ONCE(rsp->jiffies_stall, j + j1); 1196 rsp->jiffies_resched = j + j1 / 2; 1197 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); 1198 } 1199 1200 /* 1201 * Convert a ->gp_state value to a character string. 1202 */ 1203 static const char *gp_state_getname(short gs) 1204 { 1205 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) 1206 return "???"; 1207 return gp_state_names[gs]; 1208 } 1209 1210 /* 1211 * Complain about starvation of grace-period kthread. 1212 */ 1213 static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) 1214 { 1215 unsigned long gpa; 1216 unsigned long j; 1217 1218 j = jiffies; 1219 gpa = READ_ONCE(rsp->gp_activity); 1220 if (j - gpa > 2 * HZ) { 1221 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n", 1222 rsp->name, j - gpa, 1223 rsp->gpnum, rsp->completed, 1224 rsp->gp_flags, 1225 gp_state_getname(rsp->gp_state), rsp->gp_state, 1226 rsp->gp_kthread ? rsp->gp_kthread->state : ~0); 1227 if (rsp->gp_kthread) 1228 sched_show_task(rsp->gp_kthread); 1229 } 1230 } 1231 1232 /* 1233 * Dump stacks of all tasks running on stalled CPUs. 1234 */ 1235 static void rcu_dump_cpu_stacks(struct rcu_state *rsp) 1236 { 1237 int cpu; 1238 unsigned long flags; 1239 struct rcu_node *rnp; 1240 1241 rcu_for_each_leaf_node(rsp, rnp) { 1242 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1243 if (rnp->qsmask != 0) { 1244 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 1245 if (rnp->qsmask & (1UL << cpu)) 1246 dump_cpu_task(rnp->grplo + cpu); 1247 } 1248 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1249 } 1250 } 1251 1252 static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) 1253 { 1254 int cpu; 1255 long delta; 1256 unsigned long flags; 1257 unsigned long gpa; 1258 unsigned long j; 1259 int ndetected = 0; 1260 struct rcu_node *rnp = rcu_get_root(rsp); 1261 long totqlen = 0; 1262 1263 /* Only let one CPU complain about others per time interval. */ 1264 1265 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1266 delta = jiffies - READ_ONCE(rsp->jiffies_stall); 1267 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 1268 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1269 return; 1270 } 1271 WRITE_ONCE(rsp->jiffies_stall, 1272 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1273 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1274 1275 /* 1276 * OK, time to rat on our buddy... 1277 * See Documentation/RCU/stallwarn.txt for info on how to debug 1278 * RCU CPU stall warnings. 1279 */ 1280 pr_err("INFO: %s detected stalls on CPUs/tasks:", 1281 rsp->name); 1282 print_cpu_stall_info_begin(); 1283 rcu_for_each_leaf_node(rsp, rnp) { 1284 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1285 ndetected += rcu_print_task_stall(rnp); 1286 if (rnp->qsmask != 0) { 1287 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 1288 if (rnp->qsmask & (1UL << cpu)) { 1289 print_cpu_stall_info(rsp, 1290 rnp->grplo + cpu); 1291 ndetected++; 1292 } 1293 } 1294 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1295 } 1296 1297 print_cpu_stall_info_end(); 1298 for_each_possible_cpu(cpu) 1299 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; 1300 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", 1301 smp_processor_id(), (long)(jiffies - rsp->gp_start), 1302 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1303 if (ndetected) { 1304 rcu_dump_cpu_stacks(rsp); 1305 } else { 1306 if (READ_ONCE(rsp->gpnum) != gpnum || 1307 READ_ONCE(rsp->completed) == gpnum) { 1308 pr_err("INFO: Stall ended before state dump start\n"); 1309 } else { 1310 j = jiffies; 1311 gpa = READ_ONCE(rsp->gp_activity); 1312 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", 1313 rsp->name, j - gpa, j, gpa, 1314 jiffies_till_next_fqs, 1315 rcu_get_root(rsp)->qsmask); 1316 /* In this case, the current CPU might be at fault. */ 1317 sched_show_task(current); 1318 } 1319 } 1320 1321 /* Complain about tasks blocking the grace period. */ 1322 rcu_print_detail_task_stall(rsp); 1323 1324 rcu_check_gp_kthread_starvation(rsp); 1325 1326 force_quiescent_state(rsp); /* Kick them all. */ 1327 } 1328 1329 static void print_cpu_stall(struct rcu_state *rsp) 1330 { 1331 int cpu; 1332 unsigned long flags; 1333 struct rcu_node *rnp = rcu_get_root(rsp); 1334 long totqlen = 0; 1335 1336 /* 1337 * OK, time to rat on ourselves... 1338 * See Documentation/RCU/stallwarn.txt for info on how to debug 1339 * RCU CPU stall warnings. 1340 */ 1341 pr_err("INFO: %s self-detected stall on CPU", rsp->name); 1342 print_cpu_stall_info_begin(); 1343 print_cpu_stall_info(rsp, smp_processor_id()); 1344 print_cpu_stall_info_end(); 1345 for_each_possible_cpu(cpu) 1346 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; 1347 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1348 jiffies - rsp->gp_start, 1349 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1350 1351 rcu_check_gp_kthread_starvation(rsp); 1352 1353 rcu_dump_cpu_stacks(rsp); 1354 1355 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1356 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) 1357 WRITE_ONCE(rsp->jiffies_stall, 1358 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1359 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1360 1361 /* 1362 * Attempt to revive the RCU machinery by forcing a context switch. 1363 * 1364 * A context switch would normally allow the RCU state machine to make 1365 * progress and it could be we're stuck in kernel space without context 1366 * switches for an entirely unreasonable amount of time. 1367 */ 1368 resched_cpu(smp_processor_id()); 1369 } 1370 1371 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) 1372 { 1373 unsigned long completed; 1374 unsigned long gpnum; 1375 unsigned long gps; 1376 unsigned long j; 1377 unsigned long js; 1378 struct rcu_node *rnp; 1379 1380 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp)) 1381 return; 1382 j = jiffies; 1383 1384 /* 1385 * Lots of memory barriers to reject false positives. 1386 * 1387 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall, 1388 * then rsp->gp_start, and finally rsp->completed. These values 1389 * are updated in the opposite order with memory barriers (or 1390 * equivalent) during grace-period initialization and cleanup. 1391 * Now, a false positive can occur if we get an new value of 1392 * rsp->gp_start and a old value of rsp->jiffies_stall. But given 1393 * the memory barriers, the only way that this can happen is if one 1394 * grace period ends and another starts between these two fetches. 1395 * Detect this by comparing rsp->completed with the previous fetch 1396 * from rsp->gpnum. 1397 * 1398 * Given this check, comparisons of jiffies, rsp->jiffies_stall, 1399 * and rsp->gp_start suffice to forestall false positives. 1400 */ 1401 gpnum = READ_ONCE(rsp->gpnum); 1402 smp_rmb(); /* Pick up ->gpnum first... */ 1403 js = READ_ONCE(rsp->jiffies_stall); 1404 smp_rmb(); /* ...then ->jiffies_stall before the rest... */ 1405 gps = READ_ONCE(rsp->gp_start); 1406 smp_rmb(); /* ...and finally ->gp_start before ->completed. */ 1407 completed = READ_ONCE(rsp->completed); 1408 if (ULONG_CMP_GE(completed, gpnum) || 1409 ULONG_CMP_LT(j, js) || 1410 ULONG_CMP_GE(gps, js)) 1411 return; /* No stall or GP completed since entering function. */ 1412 rnp = rdp->mynode; 1413 if (rcu_gp_in_progress(rsp) && 1414 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) { 1415 1416 /* We haven't checked in, so go dump stack. */ 1417 print_cpu_stall(rsp); 1418 1419 } else if (rcu_gp_in_progress(rsp) && 1420 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { 1421 1422 /* They had a few time units to dump stack, so complain. */ 1423 print_other_cpu_stall(rsp, gpnum); 1424 } 1425 } 1426 1427 /** 1428 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period 1429 * 1430 * Set the stall-warning timeout way off into the future, thus preventing 1431 * any RCU CPU stall-warning messages from appearing in the current set of 1432 * RCU grace periods. 1433 * 1434 * The caller must disable hard irqs. 1435 */ 1436 void rcu_cpu_stall_reset(void) 1437 { 1438 struct rcu_state *rsp; 1439 1440 for_each_rcu_flavor(rsp) 1441 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2); 1442 } 1443 1444 /* 1445 * Initialize the specified rcu_data structure's default callback list 1446 * to empty. The default callback list is the one that is not used by 1447 * no-callbacks CPUs. 1448 */ 1449 static void init_default_callback_list(struct rcu_data *rdp) 1450 { 1451 int i; 1452 1453 rdp->nxtlist = NULL; 1454 for (i = 0; i < RCU_NEXT_SIZE; i++) 1455 rdp->nxttail[i] = &rdp->nxtlist; 1456 } 1457 1458 /* 1459 * Initialize the specified rcu_data structure's callback list to empty. 1460 */ 1461 static void init_callback_list(struct rcu_data *rdp) 1462 { 1463 if (init_nocb_callback_list(rdp)) 1464 return; 1465 init_default_callback_list(rdp); 1466 } 1467 1468 /* 1469 * Determine the value that ->completed will have at the end of the 1470 * next subsequent grace period. This is used to tag callbacks so that 1471 * a CPU can invoke callbacks in a timely fashion even if that CPU has 1472 * been dyntick-idle for an extended period with callbacks under the 1473 * influence of RCU_FAST_NO_HZ. 1474 * 1475 * The caller must hold rnp->lock with interrupts disabled. 1476 */ 1477 static unsigned long rcu_cbs_completed(struct rcu_state *rsp, 1478 struct rcu_node *rnp) 1479 { 1480 /* 1481 * If RCU is idle, we just wait for the next grace period. 1482 * But we can only be sure that RCU is idle if we are looking 1483 * at the root rcu_node structure -- otherwise, a new grace 1484 * period might have started, but just not yet gotten around 1485 * to initializing the current non-root rcu_node structure. 1486 */ 1487 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) 1488 return rnp->completed + 1; 1489 1490 /* 1491 * Otherwise, wait for a possible partial grace period and 1492 * then the subsequent full grace period. 1493 */ 1494 return rnp->completed + 2; 1495 } 1496 1497 /* 1498 * Trace-event helper function for rcu_start_future_gp() and 1499 * rcu_nocb_wait_gp(). 1500 */ 1501 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1502 unsigned long c, const char *s) 1503 { 1504 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, 1505 rnp->completed, c, rnp->level, 1506 rnp->grplo, rnp->grphi, s); 1507 } 1508 1509 /* 1510 * Start some future grace period, as needed to handle newly arrived 1511 * callbacks. The required future grace periods are recorded in each 1512 * rcu_node structure's ->need_future_gp field. Returns true if there 1513 * is reason to awaken the grace-period kthread. 1514 * 1515 * The caller must hold the specified rcu_node structure's ->lock. 1516 */ 1517 static bool __maybe_unused 1518 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1519 unsigned long *c_out) 1520 { 1521 unsigned long c; 1522 int i; 1523 bool ret = false; 1524 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 1525 1526 /* 1527 * Pick up grace-period number for new callbacks. If this 1528 * grace period is already marked as needed, return to the caller. 1529 */ 1530 c = rcu_cbs_completed(rdp->rsp, rnp); 1531 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); 1532 if (rnp->need_future_gp[c & 0x1]) { 1533 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); 1534 goto out; 1535 } 1536 1537 /* 1538 * If either this rcu_node structure or the root rcu_node structure 1539 * believe that a grace period is in progress, then we must wait 1540 * for the one following, which is in "c". Because our request 1541 * will be noticed at the end of the current grace period, we don't 1542 * need to explicitly start one. We only do the lockless check 1543 * of rnp_root's fields if the current rcu_node structure thinks 1544 * there is no grace period in flight, and because we hold rnp->lock, 1545 * the only possible change is when rnp_root's two fields are 1546 * equal, in which case rnp_root->gpnum might be concurrently 1547 * incremented. But that is OK, as it will just result in our 1548 * doing some extra useless work. 1549 */ 1550 if (rnp->gpnum != rnp->completed || 1551 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) { 1552 rnp->need_future_gp[c & 0x1]++; 1553 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); 1554 goto out; 1555 } 1556 1557 /* 1558 * There might be no grace period in progress. If we don't already 1559 * hold it, acquire the root rcu_node structure's lock in order to 1560 * start one (if needed). 1561 */ 1562 if (rnp != rnp_root) 1563 raw_spin_lock_rcu_node(rnp_root); 1564 1565 /* 1566 * Get a new grace-period number. If there really is no grace 1567 * period in progress, it will be smaller than the one we obtained 1568 * earlier. Adjust callbacks as needed. Note that even no-CBs 1569 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed. 1570 */ 1571 c = rcu_cbs_completed(rdp->rsp, rnp_root); 1572 for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++) 1573 if (ULONG_CMP_LT(c, rdp->nxtcompleted[i])) 1574 rdp->nxtcompleted[i] = c; 1575 1576 /* 1577 * If the needed for the required grace period is already 1578 * recorded, trace and leave. 1579 */ 1580 if (rnp_root->need_future_gp[c & 0x1]) { 1581 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); 1582 goto unlock_out; 1583 } 1584 1585 /* Record the need for the future grace period. */ 1586 rnp_root->need_future_gp[c & 0x1]++; 1587 1588 /* If a grace period is not already in progress, start one. */ 1589 if (rnp_root->gpnum != rnp_root->completed) { 1590 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); 1591 } else { 1592 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); 1593 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); 1594 } 1595 unlock_out: 1596 if (rnp != rnp_root) 1597 raw_spin_unlock_rcu_node(rnp_root); 1598 out: 1599 if (c_out != NULL) 1600 *c_out = c; 1601 return ret; 1602 } 1603 1604 /* 1605 * Clean up any old requests for the just-ended grace period. Also return 1606 * whether any additional grace periods have been requested. Also invoke 1607 * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads 1608 * waiting for this grace period to complete. 1609 */ 1610 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 1611 { 1612 int c = rnp->completed; 1613 int needmore; 1614 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1615 1616 rnp->need_future_gp[c & 0x1] = 0; 1617 needmore = rnp->need_future_gp[(c + 1) & 0x1]; 1618 trace_rcu_future_gp(rnp, rdp, c, 1619 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1620 return needmore; 1621 } 1622 1623 /* 1624 * Awaken the grace-period kthread for the specified flavor of RCU. 1625 * Don't do a self-awaken, and don't bother awakening when there is 1626 * nothing for the grace-period kthread to do (as in several CPUs 1627 * raced to awaken, and we lost), and finally don't try to awaken 1628 * a kthread that has not yet been created. 1629 */ 1630 static void rcu_gp_kthread_wake(struct rcu_state *rsp) 1631 { 1632 if (current == rsp->gp_kthread || 1633 !READ_ONCE(rsp->gp_flags) || 1634 !rsp->gp_kthread) 1635 return; 1636 swake_up(&rsp->gp_wq); 1637 } 1638 1639 /* 1640 * If there is room, assign a ->completed number to any callbacks on 1641 * this CPU that have not already been assigned. Also accelerate any 1642 * callbacks that were previously assigned a ->completed number that has 1643 * since proven to be too conservative, which can happen if callbacks get 1644 * assigned a ->completed number while RCU is idle, but with reference to 1645 * a non-root rcu_node structure. This function is idempotent, so it does 1646 * not hurt to call it repeatedly. Returns an flag saying that we should 1647 * awaken the RCU grace-period kthread. 1648 * 1649 * The caller must hold rnp->lock with interrupts disabled. 1650 */ 1651 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1652 struct rcu_data *rdp) 1653 { 1654 unsigned long c; 1655 int i; 1656 bool ret; 1657 1658 /* If the CPU has no callbacks, nothing to do. */ 1659 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) 1660 return false; 1661 1662 /* 1663 * Starting from the sublist containing the callbacks most 1664 * recently assigned a ->completed number and working down, find the 1665 * first sublist that is not assignable to an upcoming grace period. 1666 * Such a sublist has something in it (first two tests) and has 1667 * a ->completed number assigned that will complete sooner than 1668 * the ->completed number for newly arrived callbacks (last test). 1669 * 1670 * The key point is that any later sublist can be assigned the 1671 * same ->completed number as the newly arrived callbacks, which 1672 * means that the callbacks in any of these later sublist can be 1673 * grouped into a single sublist, whether or not they have already 1674 * been assigned a ->completed number. 1675 */ 1676 c = rcu_cbs_completed(rsp, rnp); 1677 for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--) 1678 if (rdp->nxttail[i] != rdp->nxttail[i - 1] && 1679 !ULONG_CMP_GE(rdp->nxtcompleted[i], c)) 1680 break; 1681 1682 /* 1683 * If there are no sublist for unassigned callbacks, leave. 1684 * At the same time, advance "i" one sublist, so that "i" will 1685 * index into the sublist where all the remaining callbacks should 1686 * be grouped into. 1687 */ 1688 if (++i >= RCU_NEXT_TAIL) 1689 return false; 1690 1691 /* 1692 * Assign all subsequent callbacks' ->completed number to the next 1693 * full grace period and group them all in the sublist initially 1694 * indexed by "i". 1695 */ 1696 for (; i <= RCU_NEXT_TAIL; i++) { 1697 rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL]; 1698 rdp->nxtcompleted[i] = c; 1699 } 1700 /* Record any needed additional grace periods. */ 1701 ret = rcu_start_future_gp(rnp, rdp, NULL); 1702 1703 /* Trace depending on how much we were able to accelerate. */ 1704 if (!*rdp->nxttail[RCU_WAIT_TAIL]) 1705 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB")); 1706 else 1707 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB")); 1708 return ret; 1709 } 1710 1711 /* 1712 * Move any callbacks whose grace period has completed to the 1713 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1714 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL 1715 * sublist. This function is idempotent, so it does not hurt to 1716 * invoke it repeatedly. As long as it is not invoked -too- often... 1717 * Returns true if the RCU grace-period kthread needs to be awakened. 1718 * 1719 * The caller must hold rnp->lock with interrupts disabled. 1720 */ 1721 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1722 struct rcu_data *rdp) 1723 { 1724 int i, j; 1725 1726 /* If the CPU has no callbacks, nothing to do. */ 1727 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) 1728 return false; 1729 1730 /* 1731 * Find all callbacks whose ->completed numbers indicate that they 1732 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1733 */ 1734 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { 1735 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i])) 1736 break; 1737 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i]; 1738 } 1739 /* Clean up any sublist tail pointers that were misordered above. */ 1740 for (j = RCU_WAIT_TAIL; j < i; j++) 1741 rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL]; 1742 1743 /* Copy down callbacks to fill in empty sublists. */ 1744 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { 1745 if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL]) 1746 break; 1747 rdp->nxttail[j] = rdp->nxttail[i]; 1748 rdp->nxtcompleted[j] = rdp->nxtcompleted[i]; 1749 } 1750 1751 /* Classify any remaining callbacks. */ 1752 return rcu_accelerate_cbs(rsp, rnp, rdp); 1753 } 1754 1755 /* 1756 * Update CPU-local rcu_data state to record the beginnings and ends of 1757 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1758 * structure corresponding to the current CPU, and must have irqs disabled. 1759 * Returns true if the grace-period kthread needs to be awakened. 1760 */ 1761 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, 1762 struct rcu_data *rdp) 1763 { 1764 bool ret; 1765 1766 /* Handle the ends of any preceding grace periods first. */ 1767 if (rdp->completed == rnp->completed && 1768 !unlikely(READ_ONCE(rdp->gpwrap))) { 1769 1770 /* No grace period end, so just accelerate recent callbacks. */ 1771 ret = rcu_accelerate_cbs(rsp, rnp, rdp); 1772 1773 } else { 1774 1775 /* Advance callbacks. */ 1776 ret = rcu_advance_cbs(rsp, rnp, rdp); 1777 1778 /* Remember that we saw this grace-period completion. */ 1779 rdp->completed = rnp->completed; 1780 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); 1781 } 1782 1783 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) { 1784 /* 1785 * If the current grace period is waiting for this CPU, 1786 * set up to detect a quiescent state, otherwise don't 1787 * go looking for one. 1788 */ 1789 rdp->gpnum = rnp->gpnum; 1790 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1791 rdp->cpu_no_qs.b.norm = true; 1792 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 1793 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1794 zero_cpu_stall_ticks(rdp); 1795 WRITE_ONCE(rdp->gpwrap, false); 1796 } 1797 return ret; 1798 } 1799 1800 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) 1801 { 1802 unsigned long flags; 1803 bool needwake; 1804 struct rcu_node *rnp; 1805 1806 local_irq_save(flags); 1807 rnp = rdp->mynode; 1808 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && 1809 rdp->completed == READ_ONCE(rnp->completed) && 1810 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1811 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1812 local_irq_restore(flags); 1813 return; 1814 } 1815 needwake = __note_gp_changes(rsp, rnp, rdp); 1816 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1817 if (needwake) 1818 rcu_gp_kthread_wake(rsp); 1819 } 1820 1821 static void rcu_gp_slow(struct rcu_state *rsp, int delay) 1822 { 1823 if (delay > 0 && 1824 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1825 schedule_timeout_uninterruptible(delay); 1826 } 1827 1828 /* 1829 * Initialize a new grace period. Return false if no grace period required. 1830 */ 1831 static bool rcu_gp_init(struct rcu_state *rsp) 1832 { 1833 unsigned long oldmask; 1834 struct rcu_data *rdp; 1835 struct rcu_node *rnp = rcu_get_root(rsp); 1836 1837 WRITE_ONCE(rsp->gp_activity, jiffies); 1838 raw_spin_lock_irq_rcu_node(rnp); 1839 if (!READ_ONCE(rsp->gp_flags)) { 1840 /* Spurious wakeup, tell caller to go back to sleep. */ 1841 raw_spin_unlock_irq_rcu_node(rnp); 1842 return false; 1843 } 1844 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ 1845 1846 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { 1847 /* 1848 * Grace period already in progress, don't start another. 1849 * Not supposed to be able to happen. 1850 */ 1851 raw_spin_unlock_irq_rcu_node(rnp); 1852 return false; 1853 } 1854 1855 /* Advance to a new grace period and initialize state. */ 1856 record_gp_stall_check_time(rsp); 1857 /* Record GP times before starting GP, hence smp_store_release(). */ 1858 smp_store_release(&rsp->gpnum, rsp->gpnum + 1); 1859 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1860 raw_spin_unlock_irq_rcu_node(rnp); 1861 1862 /* 1863 * Apply per-leaf buffered online and offline operations to the 1864 * rcu_node tree. Note that this new grace period need not wait 1865 * for subsequent online CPUs, and that quiescent-state forcing 1866 * will handle subsequent offline CPUs. 1867 */ 1868 rcu_for_each_leaf_node(rsp, rnp) { 1869 rcu_gp_slow(rsp, gp_preinit_delay); 1870 raw_spin_lock_irq_rcu_node(rnp); 1871 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1872 !rnp->wait_blkd_tasks) { 1873 /* Nothing to do on this leaf rcu_node structure. */ 1874 raw_spin_unlock_irq_rcu_node(rnp); 1875 continue; 1876 } 1877 1878 /* Record old state, apply changes to ->qsmaskinit field. */ 1879 oldmask = rnp->qsmaskinit; 1880 rnp->qsmaskinit = rnp->qsmaskinitnext; 1881 1882 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1883 if (!oldmask != !rnp->qsmaskinit) { 1884 if (!oldmask) /* First online CPU for this rcu_node. */ 1885 rcu_init_new_rnp(rnp); 1886 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */ 1887 rnp->wait_blkd_tasks = true; 1888 else /* Last offline CPU and can propagate. */ 1889 rcu_cleanup_dead_rnp(rnp); 1890 } 1891 1892 /* 1893 * If all waited-on tasks from prior grace period are 1894 * done, and if all this rcu_node structure's CPUs are 1895 * still offline, propagate up the rcu_node tree and 1896 * clear ->wait_blkd_tasks. Otherwise, if one of this 1897 * rcu_node structure's CPUs has since come back online, 1898 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp() 1899 * checks for this, so just call it unconditionally). 1900 */ 1901 if (rnp->wait_blkd_tasks && 1902 (!rcu_preempt_has_tasks(rnp) || 1903 rnp->qsmaskinit)) { 1904 rnp->wait_blkd_tasks = false; 1905 rcu_cleanup_dead_rnp(rnp); 1906 } 1907 1908 raw_spin_unlock_irq_rcu_node(rnp); 1909 } 1910 1911 /* 1912 * Set the quiescent-state-needed bits in all the rcu_node 1913 * structures for all currently online CPUs in breadth-first order, 1914 * starting from the root rcu_node structure, relying on the layout 1915 * of the tree within the rsp->node[] array. Note that other CPUs 1916 * will access only the leaves of the hierarchy, thus seeing that no 1917 * grace period is in progress, at least until the corresponding 1918 * leaf node has been initialized. In addition, we have excluded 1919 * CPU-hotplug operations. 1920 * 1921 * The grace period cannot complete until the initialization 1922 * process finishes, because this kthread handles both. 1923 */ 1924 rcu_for_each_node_breadth_first(rsp, rnp) { 1925 rcu_gp_slow(rsp, gp_init_delay); 1926 raw_spin_lock_irq_rcu_node(rnp); 1927 rdp = this_cpu_ptr(rsp->rda); 1928 rcu_preempt_check_blocked_tasks(rnp); 1929 rnp->qsmask = rnp->qsmaskinit; 1930 WRITE_ONCE(rnp->gpnum, rsp->gpnum); 1931 if (WARN_ON_ONCE(rnp->completed != rsp->completed)) 1932 WRITE_ONCE(rnp->completed, rsp->completed); 1933 if (rnp == rdp->mynode) 1934 (void)__note_gp_changes(rsp, rnp, rdp); 1935 rcu_preempt_boost_start_gp(rnp); 1936 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, 1937 rnp->level, rnp->grplo, 1938 rnp->grphi, rnp->qsmask); 1939 raw_spin_unlock_irq_rcu_node(rnp); 1940 cond_resched_rcu_qs(); 1941 WRITE_ONCE(rsp->gp_activity, jiffies); 1942 } 1943 1944 return true; 1945 } 1946 1947 /* 1948 * Helper function for wait_event_interruptible_timeout() wakeup 1949 * at force-quiescent-state time. 1950 */ 1951 static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) 1952 { 1953 struct rcu_node *rnp = rcu_get_root(rsp); 1954 1955 /* Someone like call_rcu() requested a force-quiescent-state scan. */ 1956 *gfp = READ_ONCE(rsp->gp_flags); 1957 if (*gfp & RCU_GP_FLAG_FQS) 1958 return true; 1959 1960 /* The current grace period has completed. */ 1961 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1962 return true; 1963 1964 return false; 1965 } 1966 1967 /* 1968 * Do one round of quiescent-state forcing. 1969 */ 1970 static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time) 1971 { 1972 bool isidle = false; 1973 unsigned long maxj; 1974 struct rcu_node *rnp = rcu_get_root(rsp); 1975 1976 WRITE_ONCE(rsp->gp_activity, jiffies); 1977 rsp->n_force_qs++; 1978 if (first_time) { 1979 /* Collect dyntick-idle snapshots. */ 1980 if (is_sysidle_rcu_state(rsp)) { 1981 isidle = true; 1982 maxj = jiffies - ULONG_MAX / 4; 1983 } 1984 force_qs_rnp(rsp, dyntick_save_progress_counter, 1985 &isidle, &maxj); 1986 rcu_sysidle_report_gp(rsp, isidle, maxj); 1987 } else { 1988 /* Handle dyntick-idle and offline CPUs. */ 1989 isidle = true; 1990 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); 1991 } 1992 /* Clear flag to prevent immediate re-entry. */ 1993 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 1994 raw_spin_lock_irq_rcu_node(rnp); 1995 WRITE_ONCE(rsp->gp_flags, 1996 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); 1997 raw_spin_unlock_irq_rcu_node(rnp); 1998 } 1999 } 2000 2001 /* 2002 * Clean up after the old grace period. 2003 */ 2004 static void rcu_gp_cleanup(struct rcu_state *rsp) 2005 { 2006 unsigned long gp_duration; 2007 bool needgp = false; 2008 int nocb = 0; 2009 struct rcu_data *rdp; 2010 struct rcu_node *rnp = rcu_get_root(rsp); 2011 struct swait_queue_head *sq; 2012 2013 WRITE_ONCE(rsp->gp_activity, jiffies); 2014 raw_spin_lock_irq_rcu_node(rnp); 2015 gp_duration = jiffies - rsp->gp_start; 2016 if (gp_duration > rsp->gp_max) 2017 rsp->gp_max = gp_duration; 2018 2019 /* 2020 * We know the grace period is complete, but to everyone else 2021 * it appears to still be ongoing. But it is also the case 2022 * that to everyone else it looks like there is nothing that 2023 * they can do to advance the grace period. It is therefore 2024 * safe for us to drop the lock in order to mark the grace 2025 * period as completed in all of the rcu_node structures. 2026 */ 2027 raw_spin_unlock_irq_rcu_node(rnp); 2028 2029 /* 2030 * Propagate new ->completed value to rcu_node structures so 2031 * that other CPUs don't have to wait until the start of the next 2032 * grace period to process their callbacks. This also avoids 2033 * some nasty RCU grace-period initialization races by forcing 2034 * the end of the current grace period to be completely recorded in 2035 * all of the rcu_node structures before the beginning of the next 2036 * grace period is recorded in any of the rcu_node structures. 2037 */ 2038 rcu_for_each_node_breadth_first(rsp, rnp) { 2039 raw_spin_lock_irq_rcu_node(rnp); 2040 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 2041 WARN_ON_ONCE(rnp->qsmask); 2042 WRITE_ONCE(rnp->completed, rsp->gpnum); 2043 rdp = this_cpu_ptr(rsp->rda); 2044 if (rnp == rdp->mynode) 2045 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; 2046 /* smp_mb() provided by prior unlock-lock pair. */ 2047 nocb += rcu_future_gp_cleanup(rsp, rnp); 2048 sq = rcu_nocb_gp_get(rnp); 2049 raw_spin_unlock_irq_rcu_node(rnp); 2050 rcu_nocb_gp_cleanup(sq); 2051 cond_resched_rcu_qs(); 2052 WRITE_ONCE(rsp->gp_activity, jiffies); 2053 rcu_gp_slow(rsp, gp_cleanup_delay); 2054 } 2055 rnp = rcu_get_root(rsp); 2056 raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */ 2057 rcu_nocb_gp_set(rnp, nocb); 2058 2059 /* Declare grace period done. */ 2060 WRITE_ONCE(rsp->completed, rsp->gpnum); 2061 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 2062 rsp->gp_state = RCU_GP_IDLE; 2063 rdp = this_cpu_ptr(rsp->rda); 2064 /* Advance CBs to reduce false positives below. */ 2065 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; 2066 if (needgp || cpu_needs_another_gp(rsp, rdp)) { 2067 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); 2068 trace_rcu_grace_period(rsp->name, 2069 READ_ONCE(rsp->gpnum), 2070 TPS("newreq")); 2071 } 2072 raw_spin_unlock_irq_rcu_node(rnp); 2073 } 2074 2075 /* 2076 * Body of kthread that handles grace periods. 2077 */ 2078 static int __noreturn rcu_gp_kthread(void *arg) 2079 { 2080 bool first_gp_fqs; 2081 int gf; 2082 unsigned long j; 2083 int ret; 2084 struct rcu_state *rsp = arg; 2085 struct rcu_node *rnp = rcu_get_root(rsp); 2086 2087 rcu_bind_gp_kthread(); 2088 for (;;) { 2089 2090 /* Handle grace-period start. */ 2091 for (;;) { 2092 trace_rcu_grace_period(rsp->name, 2093 READ_ONCE(rsp->gpnum), 2094 TPS("reqwait")); 2095 rsp->gp_state = RCU_GP_WAIT_GPS; 2096 swait_event_interruptible(rsp->gp_wq, 2097 READ_ONCE(rsp->gp_flags) & 2098 RCU_GP_FLAG_INIT); 2099 rsp->gp_state = RCU_GP_DONE_GPS; 2100 /* Locking provides needed memory barrier. */ 2101 if (rcu_gp_init(rsp)) 2102 break; 2103 cond_resched_rcu_qs(); 2104 WRITE_ONCE(rsp->gp_activity, jiffies); 2105 WARN_ON(signal_pending(current)); 2106 trace_rcu_grace_period(rsp->name, 2107 READ_ONCE(rsp->gpnum), 2108 TPS("reqwaitsig")); 2109 } 2110 2111 /* Handle quiescent-state forcing. */ 2112 first_gp_fqs = true; 2113 j = jiffies_till_first_fqs; 2114 if (j > HZ) { 2115 j = HZ; 2116 jiffies_till_first_fqs = HZ; 2117 } 2118 ret = 0; 2119 for (;;) { 2120 if (!ret) 2121 rsp->jiffies_force_qs = jiffies + j; 2122 trace_rcu_grace_period(rsp->name, 2123 READ_ONCE(rsp->gpnum), 2124 TPS("fqswait")); 2125 rsp->gp_state = RCU_GP_WAIT_FQS; 2126 ret = swait_event_interruptible_timeout(rsp->gp_wq, 2127 rcu_gp_fqs_check_wake(rsp, &gf), j); 2128 rsp->gp_state = RCU_GP_DOING_FQS; 2129 /* Locking provides needed memory barriers. */ 2130 /* If grace period done, leave loop. */ 2131 if (!READ_ONCE(rnp->qsmask) && 2132 !rcu_preempt_blocked_readers_cgp(rnp)) 2133 break; 2134 /* If time for quiescent-state forcing, do it. */ 2135 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || 2136 (gf & RCU_GP_FLAG_FQS)) { 2137 trace_rcu_grace_period(rsp->name, 2138 READ_ONCE(rsp->gpnum), 2139 TPS("fqsstart")); 2140 rcu_gp_fqs(rsp, first_gp_fqs); 2141 first_gp_fqs = false; 2142 trace_rcu_grace_period(rsp->name, 2143 READ_ONCE(rsp->gpnum), 2144 TPS("fqsend")); 2145 cond_resched_rcu_qs(); 2146 WRITE_ONCE(rsp->gp_activity, jiffies); 2147 } else { 2148 /* Deal with stray signal. */ 2149 cond_resched_rcu_qs(); 2150 WRITE_ONCE(rsp->gp_activity, jiffies); 2151 WARN_ON(signal_pending(current)); 2152 trace_rcu_grace_period(rsp->name, 2153 READ_ONCE(rsp->gpnum), 2154 TPS("fqswaitsig")); 2155 } 2156 j = jiffies_till_next_fqs; 2157 if (j > HZ) { 2158 j = HZ; 2159 jiffies_till_next_fqs = HZ; 2160 } else if (j < 1) { 2161 j = 1; 2162 jiffies_till_next_fqs = 1; 2163 } 2164 } 2165 2166 /* Handle grace-period end. */ 2167 rsp->gp_state = RCU_GP_CLEANUP; 2168 rcu_gp_cleanup(rsp); 2169 rsp->gp_state = RCU_GP_CLEANED; 2170 } 2171 } 2172 2173 /* 2174 * Start a new RCU grace period if warranted, re-initializing the hierarchy 2175 * in preparation for detecting the next grace period. The caller must hold 2176 * the root node's ->lock and hard irqs must be disabled. 2177 * 2178 * Note that it is legal for a dying CPU (which is marked as offline) to 2179 * invoke this function. This can happen when the dying CPU reports its 2180 * quiescent state. 2181 * 2182 * Returns true if the grace-period kthread must be awakened. 2183 */ 2184 static bool 2185 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 2186 struct rcu_data *rdp) 2187 { 2188 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) { 2189 /* 2190 * Either we have not yet spawned the grace-period 2191 * task, this CPU does not need another grace period, 2192 * or a grace period is already in progress. 2193 * Either way, don't start a new grace period. 2194 */ 2195 return false; 2196 } 2197 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); 2198 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), 2199 TPS("newreq")); 2200 2201 /* 2202 * We can't do wakeups while holding the rnp->lock, as that 2203 * could cause possible deadlocks with the rq->lock. Defer 2204 * the wakeup to our caller. 2205 */ 2206 return true; 2207 } 2208 2209 /* 2210 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's 2211 * callbacks. Note that rcu_start_gp_advanced() cannot do this because it 2212 * is invoked indirectly from rcu_advance_cbs(), which would result in 2213 * endless recursion -- or would do so if it wasn't for the self-deadlock 2214 * that is encountered beforehand. 2215 * 2216 * Returns true if the grace-period kthread needs to be awakened. 2217 */ 2218 static bool rcu_start_gp(struct rcu_state *rsp) 2219 { 2220 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 2221 struct rcu_node *rnp = rcu_get_root(rsp); 2222 bool ret = false; 2223 2224 /* 2225 * If there is no grace period in progress right now, any 2226 * callbacks we have up to this point will be satisfied by the 2227 * next grace period. Also, advancing the callbacks reduces the 2228 * probability of false positives from cpu_needs_another_gp() 2229 * resulting in pointless grace periods. So, advance callbacks 2230 * then start the grace period! 2231 */ 2232 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret; 2233 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret; 2234 return ret; 2235 } 2236 2237 /* 2238 * Report a full set of quiescent states to the specified rcu_state data 2239 * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period 2240 * kthread if another grace period is required. Whether we wake 2241 * the grace-period kthread or it awakens itself for the next round 2242 * of quiescent-state forcing, that kthread will clean up after the 2243 * just-completed grace period. Note that the caller must hold rnp->lock, 2244 * which is released before return. 2245 */ 2246 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 2247 __releases(rcu_get_root(rsp)->lock) 2248 { 2249 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2250 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2251 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2252 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2253 } 2254 2255 /* 2256 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2257 * Allows quiescent states for a group of CPUs to be reported at one go 2258 * to the specified rcu_node structure, though all the CPUs in the group 2259 * must be represented by the same rcu_node structure (which need not be a 2260 * leaf rcu_node structure, though it often will be). The gps parameter 2261 * is the grace-period snapshot, which means that the quiescent states 2262 * are valid only if rnp->gpnum is equal to gps. That structure's lock 2263 * must be held upon entry, and it is released before return. 2264 */ 2265 static void 2266 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, 2267 struct rcu_node *rnp, unsigned long gps, unsigned long flags) 2268 __releases(rnp->lock) 2269 { 2270 unsigned long oldmask = 0; 2271 struct rcu_node *rnp_c; 2272 2273 /* Walk up the rcu_node hierarchy. */ 2274 for (;;) { 2275 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) { 2276 2277 /* 2278 * Our bit has already been cleared, or the 2279 * relevant grace period is already over, so done. 2280 */ 2281 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2282 return; 2283 } 2284 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2285 rnp->qsmask &= ~mask; 2286 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, 2287 mask, rnp->qsmask, rnp->level, 2288 rnp->grplo, rnp->grphi, 2289 !!rnp->gp_tasks); 2290 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2291 2292 /* Other bits still set at this level, so done. */ 2293 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2294 return; 2295 } 2296 mask = rnp->grpmask; 2297 if (rnp->parent == NULL) { 2298 2299 /* No more levels. Exit loop holding root lock. */ 2300 2301 break; 2302 } 2303 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2304 rnp_c = rnp; 2305 rnp = rnp->parent; 2306 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2307 oldmask = rnp_c->qsmask; 2308 } 2309 2310 /* 2311 * Get here if we are the last CPU to pass through a quiescent 2312 * state for this grace period. Invoke rcu_report_qs_rsp() 2313 * to clean up and start the next grace period if one is needed. 2314 */ 2315 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ 2316 } 2317 2318 /* 2319 * Record a quiescent state for all tasks that were previously queued 2320 * on the specified rcu_node structure and that were blocking the current 2321 * RCU grace period. The caller must hold the specified rnp->lock with 2322 * irqs disabled, and this lock is released upon return, but irqs remain 2323 * disabled. 2324 */ 2325 static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp, 2326 struct rcu_node *rnp, unsigned long flags) 2327 __releases(rnp->lock) 2328 { 2329 unsigned long gps; 2330 unsigned long mask; 2331 struct rcu_node *rnp_p; 2332 2333 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || 2334 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2335 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2336 return; /* Still need more quiescent states! */ 2337 } 2338 2339 rnp_p = rnp->parent; 2340 if (rnp_p == NULL) { 2341 /* 2342 * Only one rcu_node structure in the tree, so don't 2343 * try to report up to its nonexistent parent! 2344 */ 2345 rcu_report_qs_rsp(rsp, flags); 2346 return; 2347 } 2348 2349 /* Report up the rest of the hierarchy, tracking current ->gpnum. */ 2350 gps = rnp->gpnum; 2351 mask = rnp->grpmask; 2352 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2353 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2354 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); 2355 } 2356 2357 /* 2358 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2359 * structure. This must be called from the specified CPU. 2360 */ 2361 static void 2362 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) 2363 { 2364 unsigned long flags; 2365 unsigned long mask; 2366 bool needwake; 2367 struct rcu_node *rnp; 2368 2369 rnp = rdp->mynode; 2370 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2371 if ((rdp->cpu_no_qs.b.norm && 2372 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || 2373 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || 2374 rdp->gpwrap) { 2375 2376 /* 2377 * The grace period in which this quiescent state was 2378 * recorded has ended, so don't report it upwards. 2379 * We will instead need a new quiescent state that lies 2380 * within the current grace period. 2381 */ 2382 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2383 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 2384 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2385 return; 2386 } 2387 mask = rdp->grpmask; 2388 if ((rnp->qsmask & mask) == 0) { 2389 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2390 } else { 2391 rdp->core_needs_qs = false; 2392 2393 /* 2394 * This GP can't end until cpu checks in, so all of our 2395 * callbacks can be processed during the next GP. 2396 */ 2397 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 2398 2399 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2400 /* ^^^ Released rnp->lock */ 2401 if (needwake) 2402 rcu_gp_kthread_wake(rsp); 2403 } 2404 } 2405 2406 /* 2407 * Check to see if there is a new grace period of which this CPU 2408 * is not yet aware, and if so, set up local rcu_data state for it. 2409 * Otherwise, see if this CPU has just passed through its first 2410 * quiescent state for this grace period, and record that fact if so. 2411 */ 2412 static void 2413 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) 2414 { 2415 /* Check for grace-period ends and beginnings. */ 2416 note_gp_changes(rsp, rdp); 2417 2418 /* 2419 * Does this CPU still need to do its part for current grace period? 2420 * If no, return and let the other CPUs do their part as well. 2421 */ 2422 if (!rdp->core_needs_qs) 2423 return; 2424 2425 /* 2426 * Was there a quiescent state since the beginning of the grace 2427 * period? If no, then exit and wait for the next call. 2428 */ 2429 if (rdp->cpu_no_qs.b.norm && 2430 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) 2431 return; 2432 2433 /* 2434 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2435 * judge of that). 2436 */ 2437 rcu_report_qs_rdp(rdp->cpu, rsp, rdp); 2438 } 2439 2440 /* 2441 * Send the specified CPU's RCU callbacks to the orphanage. The 2442 * specified CPU must be offline, and the caller must hold the 2443 * ->orphan_lock. 2444 */ 2445 static void 2446 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, 2447 struct rcu_node *rnp, struct rcu_data *rdp) 2448 { 2449 /* No-CBs CPUs do not have orphanable callbacks. */ 2450 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu)) 2451 return; 2452 2453 /* 2454 * Orphan the callbacks. First adjust the counts. This is safe 2455 * because _rcu_barrier() excludes CPU-hotplug operations, so it 2456 * cannot be running now. Thus no memory barrier is required. 2457 */ 2458 if (rdp->nxtlist != NULL) { 2459 rsp->qlen_lazy += rdp->qlen_lazy; 2460 rsp->qlen += rdp->qlen; 2461 rdp->n_cbs_orphaned += rdp->qlen; 2462 rdp->qlen_lazy = 0; 2463 WRITE_ONCE(rdp->qlen, 0); 2464 } 2465 2466 /* 2467 * Next, move those callbacks still needing a grace period to 2468 * the orphanage, where some other CPU will pick them up. 2469 * Some of the callbacks might have gone partway through a grace 2470 * period, but that is too bad. They get to start over because we 2471 * cannot assume that grace periods are synchronized across CPUs. 2472 * We don't bother updating the ->nxttail[] array yet, instead 2473 * we just reset the whole thing later on. 2474 */ 2475 if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) { 2476 *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL]; 2477 rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL]; 2478 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 2479 } 2480 2481 /* 2482 * Then move the ready-to-invoke callbacks to the orphanage, 2483 * where some other CPU will pick them up. These will not be 2484 * required to pass though another grace period: They are done. 2485 */ 2486 if (rdp->nxtlist != NULL) { 2487 *rsp->orphan_donetail = rdp->nxtlist; 2488 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL]; 2489 } 2490 2491 /* 2492 * Finally, initialize the rcu_data structure's list to empty and 2493 * disallow further callbacks on this CPU. 2494 */ 2495 init_callback_list(rdp); 2496 rdp->nxttail[RCU_NEXT_TAIL] = NULL; 2497 } 2498 2499 /* 2500 * Adopt the RCU callbacks from the specified rcu_state structure's 2501 * orphanage. The caller must hold the ->orphan_lock. 2502 */ 2503 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags) 2504 { 2505 int i; 2506 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2507 2508 /* No-CBs CPUs are handled specially. */ 2509 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2510 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) 2511 return; 2512 2513 /* Do the accounting first. */ 2514 rdp->qlen_lazy += rsp->qlen_lazy; 2515 rdp->qlen += rsp->qlen; 2516 rdp->n_cbs_adopted += rsp->qlen; 2517 if (rsp->qlen_lazy != rsp->qlen) 2518 rcu_idle_count_callbacks_posted(); 2519 rsp->qlen_lazy = 0; 2520 rsp->qlen = 0; 2521 2522 /* 2523 * We do not need a memory barrier here because the only way we 2524 * can get here if there is an rcu_barrier() in flight is if 2525 * we are the task doing the rcu_barrier(). 2526 */ 2527 2528 /* First adopt the ready-to-invoke callbacks. */ 2529 if (rsp->orphan_donelist != NULL) { 2530 *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL]; 2531 *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist; 2532 for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--) 2533 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL]) 2534 rdp->nxttail[i] = rsp->orphan_donetail; 2535 rsp->orphan_donelist = NULL; 2536 rsp->orphan_donetail = &rsp->orphan_donelist; 2537 } 2538 2539 /* And then adopt the callbacks that still need a grace period. */ 2540 if (rsp->orphan_nxtlist != NULL) { 2541 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist; 2542 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail; 2543 rsp->orphan_nxtlist = NULL; 2544 rsp->orphan_nxttail = &rsp->orphan_nxtlist; 2545 } 2546 } 2547 2548 /* 2549 * Trace the fact that this CPU is going offline. 2550 */ 2551 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) 2552 { 2553 RCU_TRACE(unsigned long mask); 2554 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda)); 2555 RCU_TRACE(struct rcu_node *rnp = rdp->mynode); 2556 2557 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2558 return; 2559 2560 RCU_TRACE(mask = rdp->grpmask); 2561 trace_rcu_grace_period(rsp->name, 2562 rnp->gpnum + 1 - !!(rnp->qsmask & mask), 2563 TPS("cpuofl")); 2564 } 2565 2566 /* 2567 * All CPUs for the specified rcu_node structure have gone offline, 2568 * and all tasks that were preempted within an RCU read-side critical 2569 * section while running on one of those CPUs have since exited their RCU 2570 * read-side critical section. Some other CPU is reporting this fact with 2571 * the specified rcu_node structure's ->lock held and interrupts disabled. 2572 * This function therefore goes up the tree of rcu_node structures, 2573 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2574 * the leaf rcu_node structure's ->qsmaskinit field has already been 2575 * updated 2576 * 2577 * This function does check that the specified rcu_node structure has 2578 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2579 * prematurely. That said, invoking it after the fact will cost you 2580 * a needless lock acquisition. So once it has done its work, don't 2581 * invoke it again. 2582 */ 2583 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2584 { 2585 long mask; 2586 struct rcu_node *rnp = rnp_leaf; 2587 2588 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2589 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) 2590 return; 2591 for (;;) { 2592 mask = rnp->grpmask; 2593 rnp = rnp->parent; 2594 if (!rnp) 2595 break; 2596 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2597 rnp->qsmaskinit &= ~mask; 2598 rnp->qsmask &= ~mask; 2599 if (rnp->qsmaskinit) { 2600 raw_spin_unlock_rcu_node(rnp); 2601 /* irqs remain disabled. */ 2602 return; 2603 } 2604 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2605 } 2606 } 2607 2608 /* 2609 * The CPU has been completely removed, and some other CPU is reporting 2610 * this fact from process context. Do the remainder of the cleanup, 2611 * including orphaning the outgoing CPU's RCU callbacks, and also 2612 * adopting them. There can only be one CPU hotplug operation at a time, 2613 * so no other CPU can be attempting to update rcu_cpu_kthread_task. 2614 */ 2615 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2616 { 2617 unsigned long flags; 2618 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2619 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2620 2621 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2622 return; 2623 2624 /* Adjust any no-longer-needed kthreads. */ 2625 rcu_boost_kthread_setaffinity(rnp, -1); 2626 2627 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2628 raw_spin_lock_irqsave(&rsp->orphan_lock, flags); 2629 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2630 rcu_adopt_orphan_cbs(rsp, flags); 2631 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); 2632 2633 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, 2634 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", 2635 cpu, rdp->qlen, rdp->nxtlist); 2636 } 2637 2638 /* 2639 * Invoke any RCU callbacks that have made it to the end of their grace 2640 * period. Thottle as specified by rdp->blimit. 2641 */ 2642 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) 2643 { 2644 unsigned long flags; 2645 struct rcu_head *next, *list, **tail; 2646 long bl, count, count_lazy; 2647 int i; 2648 2649 /* If no callbacks are ready, just return. */ 2650 if (!cpu_has_callbacks_ready_to_invoke(rdp)) { 2651 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); 2652 trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist), 2653 need_resched(), is_idle_task(current), 2654 rcu_is_callbacks_kthread()); 2655 return; 2656 } 2657 2658 /* 2659 * Extract the list of ready callbacks, disabling to prevent 2660 * races with call_rcu() from interrupt handlers. 2661 */ 2662 local_irq_save(flags); 2663 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2664 bl = rdp->blimit; 2665 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl); 2666 list = rdp->nxtlist; 2667 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; 2668 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 2669 tail = rdp->nxttail[RCU_DONE_TAIL]; 2670 for (i = RCU_NEXT_SIZE - 1; i >= 0; i--) 2671 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL]) 2672 rdp->nxttail[i] = &rdp->nxtlist; 2673 local_irq_restore(flags); 2674 2675 /* Invoke callbacks. */ 2676 count = count_lazy = 0; 2677 while (list) { 2678 next = list->next; 2679 prefetch(next); 2680 debug_rcu_head_unqueue(list); 2681 if (__rcu_reclaim(rsp->name, list)) 2682 count_lazy++; 2683 list = next; 2684 /* Stop only if limit reached and CPU has something to do. */ 2685 if (++count >= bl && 2686 (need_resched() || 2687 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2688 break; 2689 } 2690 2691 local_irq_save(flags); 2692 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(), 2693 is_idle_task(current), 2694 rcu_is_callbacks_kthread()); 2695 2696 /* Update count, and requeue any remaining callbacks. */ 2697 if (list != NULL) { 2698 *tail = rdp->nxtlist; 2699 rdp->nxtlist = list; 2700 for (i = 0; i < RCU_NEXT_SIZE; i++) 2701 if (&rdp->nxtlist == rdp->nxttail[i]) 2702 rdp->nxttail[i] = tail; 2703 else 2704 break; 2705 } 2706 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2707 rdp->qlen_lazy -= count_lazy; 2708 WRITE_ONCE(rdp->qlen, rdp->qlen - count); 2709 rdp->n_cbs_invoked += count; 2710 2711 /* Reinstate batch limit if we have worked down the excess. */ 2712 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 2713 rdp->blimit = blimit; 2714 2715 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2716 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { 2717 rdp->qlen_last_fqs_check = 0; 2718 rdp->n_force_qs_snap = rsp->n_force_qs; 2719 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) 2720 rdp->qlen_last_fqs_check = rdp->qlen; 2721 WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0)); 2722 2723 local_irq_restore(flags); 2724 2725 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2726 if (cpu_has_callbacks_ready_to_invoke(rdp)) 2727 invoke_rcu_core(); 2728 } 2729 2730 /* 2731 * Check to see if this CPU is in a non-context-switch quiescent state 2732 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 2733 * Also schedule RCU core processing. 2734 * 2735 * This function must be called from hardirq context. It is normally 2736 * invoked from the scheduling-clock interrupt. If rcu_pending returns 2737 * false, there is no point in invoking rcu_check_callbacks(). 2738 */ 2739 void rcu_check_callbacks(int user) 2740 { 2741 trace_rcu_utilization(TPS("Start scheduler-tick")); 2742 increment_cpu_stall_ticks(); 2743 if (user || rcu_is_cpu_rrupt_from_idle()) { 2744 2745 /* 2746 * Get here if this CPU took its interrupt from user 2747 * mode or from the idle loop, and if this is not a 2748 * nested interrupt. In this case, the CPU is in 2749 * a quiescent state, so note it. 2750 * 2751 * No memory barrier is required here because both 2752 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local 2753 * variables that other CPUs neither access nor modify, 2754 * at least not while the corresponding CPU is online. 2755 */ 2756 2757 rcu_sched_qs(); 2758 rcu_bh_qs(); 2759 2760 } else if (!in_softirq()) { 2761 2762 /* 2763 * Get here if this CPU did not take its interrupt from 2764 * softirq, in other words, if it is not interrupting 2765 * a rcu_bh read-side critical section. This is an _bh 2766 * critical section, so note it. 2767 */ 2768 2769 rcu_bh_qs(); 2770 } 2771 rcu_preempt_check_callbacks(); 2772 if (rcu_pending()) 2773 invoke_rcu_core(); 2774 if (user) 2775 rcu_note_voluntary_context_switch(current); 2776 trace_rcu_utilization(TPS("End scheduler-tick")); 2777 } 2778 2779 /* 2780 * Scan the leaf rcu_node structures, processing dyntick state for any that 2781 * have not yet encountered a quiescent state, using the function specified. 2782 * Also initiate boosting for any threads blocked on the root rcu_node. 2783 * 2784 * The caller must have suppressed start of new grace periods. 2785 */ 2786 static void force_qs_rnp(struct rcu_state *rsp, 2787 int (*f)(struct rcu_data *rsp, bool *isidle, 2788 unsigned long *maxj), 2789 bool *isidle, unsigned long *maxj) 2790 { 2791 unsigned long bit; 2792 int cpu; 2793 unsigned long flags; 2794 unsigned long mask; 2795 struct rcu_node *rnp; 2796 2797 rcu_for_each_leaf_node(rsp, rnp) { 2798 cond_resched_rcu_qs(); 2799 mask = 0; 2800 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2801 if (rnp->qsmask == 0) { 2802 if (rcu_state_p == &rcu_sched_state || 2803 rsp != rcu_state_p || 2804 rcu_preempt_blocked_readers_cgp(rnp)) { 2805 /* 2806 * No point in scanning bits because they 2807 * are all zero. But we might need to 2808 * priority-boost blocked readers. 2809 */ 2810 rcu_initiate_boost(rnp, flags); 2811 /* rcu_initiate_boost() releases rnp->lock */ 2812 continue; 2813 } 2814 if (rnp->parent && 2815 (rnp->parent->qsmask & rnp->grpmask)) { 2816 /* 2817 * Race between grace-period 2818 * initialization and task exiting RCU 2819 * read-side critical section: Report. 2820 */ 2821 rcu_report_unblock_qs_rnp(rsp, rnp, flags); 2822 /* rcu_report_unblock_qs_rnp() rlses ->lock */ 2823 continue; 2824 } 2825 } 2826 cpu = rnp->grplo; 2827 bit = 1; 2828 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { 2829 if ((rnp->qsmask & bit) != 0) { 2830 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) 2831 mask |= bit; 2832 } 2833 } 2834 if (mask != 0) { 2835 /* Idle/offline CPUs, report (releases rnp->lock. */ 2836 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2837 } else { 2838 /* Nothing to do here, so just drop the lock. */ 2839 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2840 } 2841 } 2842 } 2843 2844 /* 2845 * Force quiescent states on reluctant CPUs, and also detect which 2846 * CPUs are in dyntick-idle mode. 2847 */ 2848 static void force_quiescent_state(struct rcu_state *rsp) 2849 { 2850 unsigned long flags; 2851 bool ret; 2852 struct rcu_node *rnp; 2853 struct rcu_node *rnp_old = NULL; 2854 2855 /* Funnel through hierarchy to reduce memory contention. */ 2856 rnp = __this_cpu_read(rsp->rda->mynode); 2857 for (; rnp != NULL; rnp = rnp->parent) { 2858 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || 2859 !raw_spin_trylock(&rnp->fqslock); 2860 if (rnp_old != NULL) 2861 raw_spin_unlock(&rnp_old->fqslock); 2862 if (ret) { 2863 rsp->n_force_qs_lh++; 2864 return; 2865 } 2866 rnp_old = rnp; 2867 } 2868 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */ 2869 2870 /* Reached the root of the rcu_node tree, acquire lock. */ 2871 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2872 raw_spin_unlock(&rnp_old->fqslock); 2873 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2874 rsp->n_force_qs_lh++; 2875 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2876 return; /* Someone beat us to it. */ 2877 } 2878 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2879 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2880 swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ 2881 } 2882 2883 /* 2884 * This does the RCU core processing work for the specified rcu_state 2885 * and rcu_data structures. This may be called only from the CPU to 2886 * whom the rdp belongs. 2887 */ 2888 static void 2889 __rcu_process_callbacks(struct rcu_state *rsp) 2890 { 2891 unsigned long flags; 2892 bool needwake; 2893 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2894 2895 WARN_ON_ONCE(rdp->beenonline == 0); 2896 2897 /* Update RCU state based on any recent quiescent states. */ 2898 rcu_check_quiescent_state(rsp, rdp); 2899 2900 /* Does this CPU require a not-yet-started grace period? */ 2901 local_irq_save(flags); 2902 if (cpu_needs_another_gp(rsp, rdp)) { 2903 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */ 2904 needwake = rcu_start_gp(rsp); 2905 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2906 if (needwake) 2907 rcu_gp_kthread_wake(rsp); 2908 } else { 2909 local_irq_restore(flags); 2910 } 2911 2912 /* If there are callbacks ready, invoke them. */ 2913 if (cpu_has_callbacks_ready_to_invoke(rdp)) 2914 invoke_rcu_callbacks(rsp, rdp); 2915 2916 /* Do any needed deferred wakeups of rcuo kthreads. */ 2917 do_nocb_deferred_wakeup(rdp); 2918 } 2919 2920 /* 2921 * Do RCU core processing for the current CPU. 2922 */ 2923 static void rcu_process_callbacks(struct softirq_action *unused) 2924 { 2925 struct rcu_state *rsp; 2926 2927 if (cpu_is_offline(smp_processor_id())) 2928 return; 2929 trace_rcu_utilization(TPS("Start RCU core")); 2930 for_each_rcu_flavor(rsp) 2931 __rcu_process_callbacks(rsp); 2932 trace_rcu_utilization(TPS("End RCU core")); 2933 } 2934 2935 /* 2936 * Schedule RCU callback invocation. If the specified type of RCU 2937 * does not support RCU priority boosting, just do a direct call, 2938 * otherwise wake up the per-CPU kernel kthread. Note that because we 2939 * are running on the current CPU with softirqs disabled, the 2940 * rcu_cpu_kthread_task cannot disappear out from under us. 2941 */ 2942 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 2943 { 2944 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) 2945 return; 2946 if (likely(!rsp->boost)) { 2947 rcu_do_batch(rsp, rdp); 2948 return; 2949 } 2950 invoke_rcu_callbacks_kthread(); 2951 } 2952 2953 static void invoke_rcu_core(void) 2954 { 2955 if (cpu_online(smp_processor_id())) 2956 raise_softirq(RCU_SOFTIRQ); 2957 } 2958 2959 /* 2960 * Handle any core-RCU processing required by a call_rcu() invocation. 2961 */ 2962 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, 2963 struct rcu_head *head, unsigned long flags) 2964 { 2965 bool needwake; 2966 2967 /* 2968 * If called from an extended quiescent state, invoke the RCU 2969 * core in order to force a re-evaluation of RCU's idleness. 2970 */ 2971 if (!rcu_is_watching()) 2972 invoke_rcu_core(); 2973 2974 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2975 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2976 return; 2977 2978 /* 2979 * Force the grace period if too many callbacks or too long waiting. 2980 * Enforce hysteresis, and don't invoke force_quiescent_state() 2981 * if some other CPU has recently done so. Also, don't bother 2982 * invoking force_quiescent_state() if the newly enqueued callback 2983 * is the only one waiting for a grace period to complete. 2984 */ 2985 if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { 2986 2987 /* Are we ignoring a completed grace period? */ 2988 note_gp_changes(rsp, rdp); 2989 2990 /* Start a new grace period if one not already started. */ 2991 if (!rcu_gp_in_progress(rsp)) { 2992 struct rcu_node *rnp_root = rcu_get_root(rsp); 2993 2994 raw_spin_lock_rcu_node(rnp_root); 2995 needwake = rcu_start_gp(rsp); 2996 raw_spin_unlock_rcu_node(rnp_root); 2997 if (needwake) 2998 rcu_gp_kthread_wake(rsp); 2999 } else { 3000 /* Give the grace period a kick. */ 3001 rdp->blimit = LONG_MAX; 3002 if (rsp->n_force_qs == rdp->n_force_qs_snap && 3003 *rdp->nxttail[RCU_DONE_TAIL] != head) 3004 force_quiescent_state(rsp); 3005 rdp->n_force_qs_snap = rsp->n_force_qs; 3006 rdp->qlen_last_fqs_check = rdp->qlen; 3007 } 3008 } 3009 } 3010 3011 /* 3012 * RCU callback function to leak a callback. 3013 */ 3014 static void rcu_leak_callback(struct rcu_head *rhp) 3015 { 3016 } 3017 3018 /* 3019 * Helper function for call_rcu() and friends. The cpu argument will 3020 * normally be -1, indicating "currently running CPU". It may specify 3021 * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier() 3022 * is expected to specify a CPU. 3023 */ 3024 static void 3025 __call_rcu(struct rcu_head *head, rcu_callback_t func, 3026 struct rcu_state *rsp, int cpu, bool lazy) 3027 { 3028 unsigned long flags; 3029 struct rcu_data *rdp; 3030 3031 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */ 3032 if (debug_rcu_head_queue(head)) { 3033 /* Probable double call_rcu(), so leak the callback. */ 3034 WRITE_ONCE(head->func, rcu_leak_callback); 3035 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); 3036 return; 3037 } 3038 head->func = func; 3039 head->next = NULL; 3040 3041 /* 3042 * Opportunistically note grace-period endings and beginnings. 3043 * Note that we might see a beginning right after we see an 3044 * end, but never vice versa, since this CPU has to pass through 3045 * a quiescent state betweentimes. 3046 */ 3047 local_irq_save(flags); 3048 rdp = this_cpu_ptr(rsp->rda); 3049 3050 /* Add the callback to our list. */ 3051 if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) { 3052 int offline; 3053 3054 if (cpu != -1) 3055 rdp = per_cpu_ptr(rsp->rda, cpu); 3056 if (likely(rdp->mynode)) { 3057 /* Post-boot, so this should be for a no-CBs CPU. */ 3058 offline = !__call_rcu_nocb(rdp, head, lazy, flags); 3059 WARN_ON_ONCE(offline); 3060 /* Offline CPU, _call_rcu() illegal, leak callback. */ 3061 local_irq_restore(flags); 3062 return; 3063 } 3064 /* 3065 * Very early boot, before rcu_init(). Initialize if needed 3066 * and then drop through to queue the callback. 3067 */ 3068 BUG_ON(cpu != -1); 3069 WARN_ON_ONCE(!rcu_is_watching()); 3070 if (!likely(rdp->nxtlist)) 3071 init_default_callback_list(rdp); 3072 } 3073 WRITE_ONCE(rdp->qlen, rdp->qlen + 1); 3074 if (lazy) 3075 rdp->qlen_lazy++; 3076 else 3077 rcu_idle_count_callbacks_posted(); 3078 smp_mb(); /* Count before adding callback for rcu_barrier(). */ 3079 *rdp->nxttail[RCU_NEXT_TAIL] = head; 3080 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 3081 3082 if (__is_kfree_rcu_offset((unsigned long)func)) 3083 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, 3084 rdp->qlen_lazy, rdp->qlen); 3085 else 3086 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen); 3087 3088 /* Go handle any RCU core processing required. */ 3089 __call_rcu_core(rsp, rdp, head, flags); 3090 local_irq_restore(flags); 3091 } 3092 3093 /* 3094 * Queue an RCU-sched callback for invocation after a grace period. 3095 */ 3096 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) 3097 { 3098 __call_rcu(head, func, &rcu_sched_state, -1, 0); 3099 } 3100 EXPORT_SYMBOL_GPL(call_rcu_sched); 3101 3102 /* 3103 * Queue an RCU callback for invocation after a quicker grace period. 3104 */ 3105 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) 3106 { 3107 __call_rcu(head, func, &rcu_bh_state, -1, 0); 3108 } 3109 EXPORT_SYMBOL_GPL(call_rcu_bh); 3110 3111 /* 3112 * Queue an RCU callback for lazy invocation after a grace period. 3113 * This will likely be later named something like "call_rcu_lazy()", 3114 * but this change will require some way of tagging the lazy RCU 3115 * callbacks in the list of pending callbacks. Until then, this 3116 * function may only be called from __kfree_rcu(). 3117 */ 3118 void kfree_call_rcu(struct rcu_head *head, 3119 rcu_callback_t func) 3120 { 3121 __call_rcu(head, func, rcu_state_p, -1, 1); 3122 } 3123 EXPORT_SYMBOL_GPL(kfree_call_rcu); 3124 3125 /* 3126 * Because a context switch is a grace period for RCU-sched and RCU-bh, 3127 * any blocking grace-period wait automatically implies a grace period 3128 * if there is only one CPU online at any point time during execution 3129 * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to 3130 * occasionally incorrectly indicate that there are multiple CPUs online 3131 * when there was in fact only one the whole time, as this just adds 3132 * some overhead: RCU still operates correctly. 3133 */ 3134 static inline int rcu_blocking_is_gp(void) 3135 { 3136 int ret; 3137 3138 might_sleep(); /* Check for RCU read-side critical section. */ 3139 preempt_disable(); 3140 ret = num_online_cpus() <= 1; 3141 preempt_enable(); 3142 return ret; 3143 } 3144 3145 /** 3146 * synchronize_sched - wait until an rcu-sched grace period has elapsed. 3147 * 3148 * Control will return to the caller some time after a full rcu-sched 3149 * grace period has elapsed, in other words after all currently executing 3150 * rcu-sched read-side critical sections have completed. These read-side 3151 * critical sections are delimited by rcu_read_lock_sched() and 3152 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), 3153 * local_irq_disable(), and so on may be used in place of 3154 * rcu_read_lock_sched(). 3155 * 3156 * This means that all preempt_disable code sequences, including NMI and 3157 * non-threaded hardware-interrupt handlers, in progress on entry will 3158 * have completed before this primitive returns. However, this does not 3159 * guarantee that softirq handlers will have completed, since in some 3160 * kernels, these handlers can run in process context, and can block. 3161 * 3162 * Note that this guarantee implies further memory-ordering guarantees. 3163 * On systems with more than one CPU, when synchronize_sched() returns, 3164 * each CPU is guaranteed to have executed a full memory barrier since the 3165 * end of its last RCU-sched read-side critical section whose beginning 3166 * preceded the call to synchronize_sched(). In addition, each CPU having 3167 * an RCU read-side critical section that extends beyond the return from 3168 * synchronize_sched() is guaranteed to have executed a full memory barrier 3169 * after the beginning of synchronize_sched() and before the beginning of 3170 * that RCU read-side critical section. Note that these guarantees include 3171 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3172 * that are executing in the kernel. 3173 * 3174 * Furthermore, if CPU A invoked synchronize_sched(), which returned 3175 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3176 * to have executed a full memory barrier during the execution of 3177 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but 3178 * again only if the system has more than one CPU). 3179 * 3180 * This primitive provides the guarantees made by the (now removed) 3181 * synchronize_kernel() API. In contrast, synchronize_rcu() only 3182 * guarantees that rcu_read_lock() sections will have completed. 3183 * In "classic RCU", these two guarantees happen to be one and 3184 * the same, but can differ in realtime RCU implementations. 3185 */ 3186 void synchronize_sched(void) 3187 { 3188 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3189 lock_is_held(&rcu_lock_map) || 3190 lock_is_held(&rcu_sched_lock_map), 3191 "Illegal synchronize_sched() in RCU-sched read-side critical section"); 3192 if (rcu_blocking_is_gp()) 3193 return; 3194 if (rcu_gp_is_expedited()) 3195 synchronize_sched_expedited(); 3196 else 3197 wait_rcu_gp(call_rcu_sched); 3198 } 3199 EXPORT_SYMBOL_GPL(synchronize_sched); 3200 3201 /** 3202 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. 3203 * 3204 * Control will return to the caller some time after a full rcu_bh grace 3205 * period has elapsed, in other words after all currently executing rcu_bh 3206 * read-side critical sections have completed. RCU read-side critical 3207 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), 3208 * and may be nested. 3209 * 3210 * See the description of synchronize_sched() for more detailed information 3211 * on memory ordering guarantees. 3212 */ 3213 void synchronize_rcu_bh(void) 3214 { 3215 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3216 lock_is_held(&rcu_lock_map) || 3217 lock_is_held(&rcu_sched_lock_map), 3218 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section"); 3219 if (rcu_blocking_is_gp()) 3220 return; 3221 if (rcu_gp_is_expedited()) 3222 synchronize_rcu_bh_expedited(); 3223 else 3224 wait_rcu_gp(call_rcu_bh); 3225 } 3226 EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 3227 3228 /** 3229 * get_state_synchronize_rcu - Snapshot current RCU state 3230 * 3231 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3232 * to determine whether or not a full grace period has elapsed in the 3233 * meantime. 3234 */ 3235 unsigned long get_state_synchronize_rcu(void) 3236 { 3237 /* 3238 * Any prior manipulation of RCU-protected data must happen 3239 * before the load from ->gpnum. 3240 */ 3241 smp_mb(); /* ^^^ */ 3242 3243 /* 3244 * Make sure this load happens before the purportedly 3245 * time-consuming work between get_state_synchronize_rcu() 3246 * and cond_synchronize_rcu(). 3247 */ 3248 return smp_load_acquire(&rcu_state_p->gpnum); 3249 } 3250 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3251 3252 /** 3253 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3254 * 3255 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 3256 * 3257 * If a full RCU grace period has elapsed since the earlier call to 3258 * get_state_synchronize_rcu(), just return. Otherwise, invoke 3259 * synchronize_rcu() to wait for a full grace period. 3260 * 3261 * Yes, this function does not take counter wrap into account. But 3262 * counter wrap is harmless. If the counter wraps, we have waited for 3263 * more than 2 billion grace periods (and way more on a 64-bit system!), 3264 * so waiting for one additional grace period should be just fine. 3265 */ 3266 void cond_synchronize_rcu(unsigned long oldstate) 3267 { 3268 unsigned long newstate; 3269 3270 /* 3271 * Ensure that this load happens before any RCU-destructive 3272 * actions the caller might carry out after we return. 3273 */ 3274 newstate = smp_load_acquire(&rcu_state_p->completed); 3275 if (ULONG_CMP_GE(oldstate, newstate)) 3276 synchronize_rcu(); 3277 } 3278 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3279 3280 /** 3281 * get_state_synchronize_sched - Snapshot current RCU-sched state 3282 * 3283 * Returns a cookie that is used by a later call to cond_synchronize_sched() 3284 * to determine whether or not a full grace period has elapsed in the 3285 * meantime. 3286 */ 3287 unsigned long get_state_synchronize_sched(void) 3288 { 3289 /* 3290 * Any prior manipulation of RCU-protected data must happen 3291 * before the load from ->gpnum. 3292 */ 3293 smp_mb(); /* ^^^ */ 3294 3295 /* 3296 * Make sure this load happens before the purportedly 3297 * time-consuming work between get_state_synchronize_sched() 3298 * and cond_synchronize_sched(). 3299 */ 3300 return smp_load_acquire(&rcu_sched_state.gpnum); 3301 } 3302 EXPORT_SYMBOL_GPL(get_state_synchronize_sched); 3303 3304 /** 3305 * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period 3306 * 3307 * @oldstate: return value from earlier call to get_state_synchronize_sched() 3308 * 3309 * If a full RCU-sched grace period has elapsed since the earlier call to 3310 * get_state_synchronize_sched(), just return. Otherwise, invoke 3311 * synchronize_sched() to wait for a full grace period. 3312 * 3313 * Yes, this function does not take counter wrap into account. But 3314 * counter wrap is harmless. If the counter wraps, we have waited for 3315 * more than 2 billion grace periods (and way more on a 64-bit system!), 3316 * so waiting for one additional grace period should be just fine. 3317 */ 3318 void cond_synchronize_sched(unsigned long oldstate) 3319 { 3320 unsigned long newstate; 3321 3322 /* 3323 * Ensure that this load happens before any RCU-destructive 3324 * actions the caller might carry out after we return. 3325 */ 3326 newstate = smp_load_acquire(&rcu_sched_state.completed); 3327 if (ULONG_CMP_GE(oldstate, newstate)) 3328 synchronize_sched(); 3329 } 3330 EXPORT_SYMBOL_GPL(cond_synchronize_sched); 3331 3332 /* Adjust sequence number for start of update-side operation. */ 3333 static void rcu_seq_start(unsigned long *sp) 3334 { 3335 WRITE_ONCE(*sp, *sp + 1); 3336 smp_mb(); /* Ensure update-side operation after counter increment. */ 3337 WARN_ON_ONCE(!(*sp & 0x1)); 3338 } 3339 3340 /* Adjust sequence number for end of update-side operation. */ 3341 static void rcu_seq_end(unsigned long *sp) 3342 { 3343 smp_mb(); /* Ensure update-side operation before counter increment. */ 3344 WRITE_ONCE(*sp, *sp + 1); 3345 WARN_ON_ONCE(*sp & 0x1); 3346 } 3347 3348 /* Take a snapshot of the update side's sequence number. */ 3349 static unsigned long rcu_seq_snap(unsigned long *sp) 3350 { 3351 unsigned long s; 3352 3353 s = (READ_ONCE(*sp) + 3) & ~0x1; 3354 smp_mb(); /* Above access must not bleed into critical section. */ 3355 return s; 3356 } 3357 3358 /* 3359 * Given a snapshot from rcu_seq_snap(), determine whether or not a 3360 * full update-side operation has occurred. 3361 */ 3362 static bool rcu_seq_done(unsigned long *sp, unsigned long s) 3363 { 3364 return ULONG_CMP_GE(READ_ONCE(*sp), s); 3365 } 3366 3367 /* Wrapper functions for expedited grace periods. */ 3368 static void rcu_exp_gp_seq_start(struct rcu_state *rsp) 3369 { 3370 rcu_seq_start(&rsp->expedited_sequence); 3371 } 3372 static void rcu_exp_gp_seq_end(struct rcu_state *rsp) 3373 { 3374 rcu_seq_end(&rsp->expedited_sequence); 3375 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 3376 } 3377 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp) 3378 { 3379 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 3380 return rcu_seq_snap(&rsp->expedited_sequence); 3381 } 3382 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) 3383 { 3384 return rcu_seq_done(&rsp->expedited_sequence, s); 3385 } 3386 3387 /* 3388 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 3389 * recent CPU-online activity. Note that these masks are not cleared 3390 * when CPUs go offline, so they reflect the union of all CPUs that have 3391 * ever been online. This means that this function normally takes its 3392 * no-work-to-do fastpath. 3393 */ 3394 static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) 3395 { 3396 bool done; 3397 unsigned long flags; 3398 unsigned long mask; 3399 unsigned long oldmask; 3400 int ncpus = READ_ONCE(rsp->ncpus); 3401 struct rcu_node *rnp; 3402 struct rcu_node *rnp_up; 3403 3404 /* If no new CPUs onlined since last time, nothing to do. */ 3405 if (likely(ncpus == rsp->ncpus_snap)) 3406 return; 3407 rsp->ncpus_snap = ncpus; 3408 3409 /* 3410 * Each pass through the following loop propagates newly onlined 3411 * CPUs for the current rcu_node structure up the rcu_node tree. 3412 */ 3413 rcu_for_each_leaf_node(rsp, rnp) { 3414 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3415 if (rnp->expmaskinit == rnp->expmaskinitnext) { 3416 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3417 continue; /* No new CPUs, nothing to do. */ 3418 } 3419 3420 /* Update this node's mask, track old value for propagation. */ 3421 oldmask = rnp->expmaskinit; 3422 rnp->expmaskinit = rnp->expmaskinitnext; 3423 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3424 3425 /* If was already nonzero, nothing to propagate. */ 3426 if (oldmask) 3427 continue; 3428 3429 /* Propagate the new CPU up the tree. */ 3430 mask = rnp->grpmask; 3431 rnp_up = rnp->parent; 3432 done = false; 3433 while (rnp_up) { 3434 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 3435 if (rnp_up->expmaskinit) 3436 done = true; 3437 rnp_up->expmaskinit |= mask; 3438 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 3439 if (done) 3440 break; 3441 mask = rnp_up->grpmask; 3442 rnp_up = rnp_up->parent; 3443 } 3444 } 3445 } 3446 3447 /* 3448 * Reset the ->expmask values in the rcu_node tree in preparation for 3449 * a new expedited grace period. 3450 */ 3451 static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp) 3452 { 3453 unsigned long flags; 3454 struct rcu_node *rnp; 3455 3456 sync_exp_reset_tree_hotplug(rsp); 3457 rcu_for_each_node_breadth_first(rsp, rnp) { 3458 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3459 WARN_ON_ONCE(rnp->expmask); 3460 rnp->expmask = rnp->expmaskinit; 3461 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3462 } 3463 } 3464 3465 /* 3466 * Return non-zero if there is no RCU expedited grace period in progress 3467 * for the specified rcu_node structure, in other words, if all CPUs and 3468 * tasks covered by the specified rcu_node structure have done their bit 3469 * for the current expedited grace period. Works only for preemptible 3470 * RCU -- other RCU implementation use other means. 3471 * 3472 * Caller must hold the root rcu_node's exp_funnel_mutex. 3473 */ 3474 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) 3475 { 3476 return rnp->exp_tasks == NULL && 3477 READ_ONCE(rnp->expmask) == 0; 3478 } 3479 3480 /* 3481 * Report the exit from RCU read-side critical section for the last task 3482 * that queued itself during or before the current expedited preemptible-RCU 3483 * grace period. This event is reported either to the rcu_node structure on 3484 * which the task was queued or to one of that rcu_node structure's ancestors, 3485 * recursively up the tree. (Calm down, calm down, we do the recursion 3486 * iteratively!) 3487 * 3488 * Caller must hold the root rcu_node's exp_funnel_mutex and the 3489 * specified rcu_node structure's ->lock. 3490 */ 3491 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 3492 bool wake, unsigned long flags) 3493 __releases(rnp->lock) 3494 { 3495 unsigned long mask; 3496 3497 for (;;) { 3498 if (!sync_rcu_preempt_exp_done(rnp)) { 3499 if (!rnp->expmask) 3500 rcu_initiate_boost(rnp, flags); 3501 else 3502 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3503 break; 3504 } 3505 if (rnp->parent == NULL) { 3506 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3507 if (wake) { 3508 smp_mb(); /* EGP done before wake_up(). */ 3509 swake_up(&rsp->expedited_wq); 3510 } 3511 break; 3512 } 3513 mask = rnp->grpmask; 3514 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 3515 rnp = rnp->parent; 3516 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 3517 WARN_ON_ONCE(!(rnp->expmask & mask)); 3518 rnp->expmask &= ~mask; 3519 } 3520 } 3521 3522 /* 3523 * Report expedited quiescent state for specified node. This is a 3524 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 3525 * 3526 * Caller must hold the root rcu_node's exp_funnel_mutex. 3527 */ 3528 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp, 3529 struct rcu_node *rnp, bool wake) 3530 { 3531 unsigned long flags; 3532 3533 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3534 __rcu_report_exp_rnp(rsp, rnp, wake, flags); 3535 } 3536 3537 /* 3538 * Report expedited quiescent state for multiple CPUs, all covered by the 3539 * specified leaf rcu_node structure. Caller must hold the root 3540 * rcu_node's exp_funnel_mutex. 3541 */ 3542 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, 3543 unsigned long mask, bool wake) 3544 { 3545 unsigned long flags; 3546 3547 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3548 if (!(rnp->expmask & mask)) { 3549 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3550 return; 3551 } 3552 rnp->expmask &= ~mask; 3553 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ 3554 } 3555 3556 /* 3557 * Report expedited quiescent state for specified rcu_data (CPU). 3558 * Caller must hold the root rcu_node's exp_funnel_mutex. 3559 */ 3560 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, 3561 bool wake) 3562 { 3563 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); 3564 } 3565 3566 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ 3567 static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp, 3568 struct rcu_data *rdp, 3569 atomic_long_t *stat, unsigned long s) 3570 { 3571 if (rcu_exp_gp_seq_done(rsp, s)) { 3572 if (rnp) 3573 mutex_unlock(&rnp->exp_funnel_mutex); 3574 else if (rdp) 3575 mutex_unlock(&rdp->exp_funnel_mutex); 3576 /* Ensure test happens before caller kfree(). */ 3577 smp_mb__before_atomic(); /* ^^^ */ 3578 atomic_long_inc(stat); 3579 return true; 3580 } 3581 return false; 3582 } 3583 3584 /* 3585 * Funnel-lock acquisition for expedited grace periods. Returns a 3586 * pointer to the root rcu_node structure, or NULL if some other 3587 * task did the expedited grace period for us. 3588 */ 3589 static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s) 3590 { 3591 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); 3592 struct rcu_node *rnp0; 3593 struct rcu_node *rnp1 = NULL; 3594 3595 /* 3596 * First try directly acquiring the root lock in order to reduce 3597 * latency in the common case where expedited grace periods are 3598 * rare. We check mutex_is_locked() to avoid pathological levels of 3599 * memory contention on ->exp_funnel_mutex in the heavy-load case. 3600 */ 3601 rnp0 = rcu_get_root(rsp); 3602 if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) { 3603 if (mutex_trylock(&rnp0->exp_funnel_mutex)) { 3604 if (sync_exp_work_done(rsp, rnp0, NULL, 3605 &rdp->expedited_workdone0, s)) 3606 return NULL; 3607 return rnp0; 3608 } 3609 } 3610 3611 /* 3612 * Each pass through the following loop works its way 3613 * up the rcu_node tree, returning if others have done the 3614 * work or otherwise falls through holding the root rnp's 3615 * ->exp_funnel_mutex. The mapping from CPU to rcu_node structure 3616 * can be inexact, as it is just promoting locality and is not 3617 * strictly needed for correctness. 3618 */ 3619 if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s)) 3620 return NULL; 3621 mutex_lock(&rdp->exp_funnel_mutex); 3622 rnp0 = rdp->mynode; 3623 for (; rnp0 != NULL; rnp0 = rnp0->parent) { 3624 if (sync_exp_work_done(rsp, rnp1, rdp, 3625 &rdp->expedited_workdone2, s)) 3626 return NULL; 3627 mutex_lock(&rnp0->exp_funnel_mutex); 3628 if (rnp1) 3629 mutex_unlock(&rnp1->exp_funnel_mutex); 3630 else 3631 mutex_unlock(&rdp->exp_funnel_mutex); 3632 rnp1 = rnp0; 3633 } 3634 if (sync_exp_work_done(rsp, rnp1, rdp, 3635 &rdp->expedited_workdone3, s)) 3636 return NULL; 3637 return rnp1; 3638 } 3639 3640 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 3641 static void sync_sched_exp_handler(void *data) 3642 { 3643 struct rcu_data *rdp; 3644 struct rcu_node *rnp; 3645 struct rcu_state *rsp = data; 3646 3647 rdp = this_cpu_ptr(rsp->rda); 3648 rnp = rdp->mynode; 3649 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 3650 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) 3651 return; 3652 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true); 3653 resched_cpu(smp_processor_id()); 3654 } 3655 3656 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 3657 static void sync_sched_exp_online_cleanup(int cpu) 3658 { 3659 struct rcu_data *rdp; 3660 int ret; 3661 struct rcu_node *rnp; 3662 struct rcu_state *rsp = &rcu_sched_state; 3663 3664 rdp = per_cpu_ptr(rsp->rda, cpu); 3665 rnp = rdp->mynode; 3666 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) 3667 return; 3668 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); 3669 WARN_ON_ONCE(ret); 3670 } 3671 3672 /* 3673 * Select the nodes that the upcoming expedited grace period needs 3674 * to wait for. 3675 */ 3676 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, 3677 smp_call_func_t func) 3678 { 3679 int cpu; 3680 unsigned long flags; 3681 unsigned long mask; 3682 unsigned long mask_ofl_test; 3683 unsigned long mask_ofl_ipi; 3684 int ret; 3685 struct rcu_node *rnp; 3686 3687 sync_exp_reset_tree(rsp); 3688 rcu_for_each_leaf_node(rsp, rnp) { 3689 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3690 3691 /* Each pass checks a CPU for identity, offline, and idle. */ 3692 mask_ofl_test = 0; 3693 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { 3694 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3695 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 3696 3697 if (raw_smp_processor_id() == cpu || 3698 !(atomic_add_return(0, &rdtp->dynticks) & 0x1)) 3699 mask_ofl_test |= rdp->grpmask; 3700 } 3701 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 3702 3703 /* 3704 * Need to wait for any blocked tasks as well. Note that 3705 * additional blocking tasks will also block the expedited 3706 * GP until such time as the ->expmask bits are cleared. 3707 */ 3708 if (rcu_preempt_has_tasks(rnp)) 3709 rnp->exp_tasks = rnp->blkd_tasks.next; 3710 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3711 3712 /* IPI the remaining CPUs for expedited quiescent state. */ 3713 mask = 1; 3714 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) { 3715 if (!(mask_ofl_ipi & mask)) 3716 continue; 3717 retry_ipi: 3718 ret = smp_call_function_single(cpu, func, rsp, 0); 3719 if (!ret) { 3720 mask_ofl_ipi &= ~mask; 3721 continue; 3722 } 3723 /* Failed, raced with offline. */ 3724 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3725 if (cpu_online(cpu) && 3726 (rnp->expmask & mask)) { 3727 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3728 schedule_timeout_uninterruptible(1); 3729 if (cpu_online(cpu) && 3730 (rnp->expmask & mask)) 3731 goto retry_ipi; 3732 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3733 } 3734 if (!(rnp->expmask & mask)) 3735 mask_ofl_ipi &= ~mask; 3736 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3737 } 3738 /* Report quiescent states for those that went offline. */ 3739 mask_ofl_test |= mask_ofl_ipi; 3740 if (mask_ofl_test) 3741 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); 3742 } 3743 } 3744 3745 static void synchronize_sched_expedited_wait(struct rcu_state *rsp) 3746 { 3747 int cpu; 3748 unsigned long jiffies_stall; 3749 unsigned long jiffies_start; 3750 unsigned long mask; 3751 int ndetected; 3752 struct rcu_node *rnp; 3753 struct rcu_node *rnp_root = rcu_get_root(rsp); 3754 int ret; 3755 3756 jiffies_stall = rcu_jiffies_till_stall_check(); 3757 jiffies_start = jiffies; 3758 3759 for (;;) { 3760 ret = swait_event_timeout( 3761 rsp->expedited_wq, 3762 sync_rcu_preempt_exp_done(rnp_root), 3763 jiffies_stall); 3764 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root)) 3765 return; 3766 if (ret < 0) { 3767 /* Hit a signal, disable CPU stall warnings. */ 3768 swait_event(rsp->expedited_wq, 3769 sync_rcu_preempt_exp_done(rnp_root)); 3770 return; 3771 } 3772 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 3773 rsp->name); 3774 ndetected = 0; 3775 rcu_for_each_leaf_node(rsp, rnp) { 3776 ndetected = rcu_print_task_exp_stall(rnp); 3777 mask = 1; 3778 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) { 3779 struct rcu_data *rdp; 3780 3781 if (!(rnp->expmask & mask)) 3782 continue; 3783 ndetected++; 3784 rdp = per_cpu_ptr(rsp->rda, cpu); 3785 pr_cont(" %d-%c%c%c", cpu, 3786 "O."[cpu_online(cpu)], 3787 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 3788 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); 3789 } 3790 mask <<= 1; 3791 } 3792 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 3793 jiffies - jiffies_start, rsp->expedited_sequence, 3794 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); 3795 if (!ndetected) { 3796 pr_err("blocking rcu_node structures:"); 3797 rcu_for_each_node_breadth_first(rsp, rnp) { 3798 if (rnp == rnp_root) 3799 continue; /* printed unconditionally */ 3800 if (sync_rcu_preempt_exp_done(rnp)) 3801 continue; 3802 pr_cont(" l=%u:%d-%d:%#lx/%c", 3803 rnp->level, rnp->grplo, rnp->grphi, 3804 rnp->expmask, 3805 ".T"[!!rnp->exp_tasks]); 3806 } 3807 pr_cont("\n"); 3808 } 3809 rcu_for_each_leaf_node(rsp, rnp) { 3810 mask = 1; 3811 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) { 3812 if (!(rnp->expmask & mask)) 3813 continue; 3814 dump_cpu_task(cpu); 3815 } 3816 } 3817 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; 3818 } 3819 } 3820 3821 /** 3822 * synchronize_sched_expedited - Brute-force RCU-sched grace period 3823 * 3824 * Wait for an RCU-sched grace period to elapse, but use a "big hammer" 3825 * approach to force the grace period to end quickly. This consumes 3826 * significant time on all CPUs and is unfriendly to real-time workloads, 3827 * so is thus not recommended for any sort of common-case code. In fact, 3828 * if you are using synchronize_sched_expedited() in a loop, please 3829 * restructure your code to batch your updates, and then use a single 3830 * synchronize_sched() instead. 3831 * 3832 * This implementation can be thought of as an application of sequence 3833 * locking to expedited grace periods, but using the sequence counter to 3834 * determine when someone else has already done the work instead of for 3835 * retrying readers. 3836 */ 3837 void synchronize_sched_expedited(void) 3838 { 3839 unsigned long s; 3840 struct rcu_node *rnp; 3841 struct rcu_state *rsp = &rcu_sched_state; 3842 3843 /* If only one CPU, this is automatically a grace period. */ 3844 if (rcu_blocking_is_gp()) 3845 return; 3846 3847 /* If expedited grace periods are prohibited, fall back to normal. */ 3848 if (rcu_gp_is_normal()) { 3849 wait_rcu_gp(call_rcu_sched); 3850 return; 3851 } 3852 3853 /* Take a snapshot of the sequence number. */ 3854 s = rcu_exp_gp_seq_snap(rsp); 3855 3856 rnp = exp_funnel_lock(rsp, s); 3857 if (rnp == NULL) 3858 return; /* Someone else did our work for us. */ 3859 3860 rcu_exp_gp_seq_start(rsp); 3861 sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler); 3862 synchronize_sched_expedited_wait(rsp); 3863 3864 rcu_exp_gp_seq_end(rsp); 3865 mutex_unlock(&rnp->exp_funnel_mutex); 3866 } 3867 EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 3868 3869 /* 3870 * Check to see if there is any immediate RCU-related work to be done 3871 * by the current CPU, for the specified type of RCU, returning 1 if so. 3872 * The checks are in order of increasing expense: checks that can be 3873 * carried out against CPU-local state are performed first. However, 3874 * we must check for CPU stalls first, else we might not get a chance. 3875 */ 3876 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 3877 { 3878 struct rcu_node *rnp = rdp->mynode; 3879 3880 rdp->n_rcu_pending++; 3881 3882 /* Check for CPU stalls, if enabled. */ 3883 check_cpu_stall(rsp, rdp); 3884 3885 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ 3886 if (rcu_nohz_full_cpu(rsp)) 3887 return 0; 3888 3889 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3890 if (rcu_scheduler_fully_active && 3891 rdp->core_needs_qs && rdp->cpu_no_qs.b.norm && 3892 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { 3893 rdp->n_rp_core_needs_qs++; 3894 } else if (rdp->core_needs_qs && 3895 (!rdp->cpu_no_qs.b.norm || 3896 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) { 3897 rdp->n_rp_report_qs++; 3898 return 1; 3899 } 3900 3901 /* Does this CPU have callbacks ready to invoke? */ 3902 if (cpu_has_callbacks_ready_to_invoke(rdp)) { 3903 rdp->n_rp_cb_ready++; 3904 return 1; 3905 } 3906 3907 /* Has RCU gone idle with this CPU needing another grace period? */ 3908 if (cpu_needs_another_gp(rsp, rdp)) { 3909 rdp->n_rp_cpu_needs_gp++; 3910 return 1; 3911 } 3912 3913 /* Has another RCU grace period completed? */ 3914 if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ 3915 rdp->n_rp_gp_completed++; 3916 return 1; 3917 } 3918 3919 /* Has a new RCU grace period started? */ 3920 if (READ_ONCE(rnp->gpnum) != rdp->gpnum || 3921 unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */ 3922 rdp->n_rp_gp_started++; 3923 return 1; 3924 } 3925 3926 /* Does this CPU need a deferred NOCB wakeup? */ 3927 if (rcu_nocb_need_deferred_wakeup(rdp)) { 3928 rdp->n_rp_nocb_defer_wakeup++; 3929 return 1; 3930 } 3931 3932 /* nothing to do */ 3933 rdp->n_rp_need_nothing++; 3934 return 0; 3935 } 3936 3937 /* 3938 * Check to see if there is any immediate RCU-related work to be done 3939 * by the current CPU, returning 1 if so. This function is part of the 3940 * RCU implementation; it is -not- an exported member of the RCU API. 3941 */ 3942 static int rcu_pending(void) 3943 { 3944 struct rcu_state *rsp; 3945 3946 for_each_rcu_flavor(rsp) 3947 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda))) 3948 return 1; 3949 return 0; 3950 } 3951 3952 /* 3953 * Return true if the specified CPU has any callback. If all_lazy is 3954 * non-NULL, store an indication of whether all callbacks are lazy. 3955 * (If there are no callbacks, all of them are deemed to be lazy.) 3956 */ 3957 static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy) 3958 { 3959 bool al = true; 3960 bool hc = false; 3961 struct rcu_data *rdp; 3962 struct rcu_state *rsp; 3963 3964 for_each_rcu_flavor(rsp) { 3965 rdp = this_cpu_ptr(rsp->rda); 3966 if (!rdp->nxtlist) 3967 continue; 3968 hc = true; 3969 if (rdp->qlen != rdp->qlen_lazy || !all_lazy) { 3970 al = false; 3971 break; 3972 } 3973 } 3974 if (all_lazy) 3975 *all_lazy = al; 3976 return hc; 3977 } 3978 3979 /* 3980 * Helper function for _rcu_barrier() tracing. If tracing is disabled, 3981 * the compiler is expected to optimize this away. 3982 */ 3983 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s, 3984 int cpu, unsigned long done) 3985 { 3986 trace_rcu_barrier(rsp->name, s, cpu, 3987 atomic_read(&rsp->barrier_cpu_count), done); 3988 } 3989 3990 /* 3991 * RCU callback function for _rcu_barrier(). If we are last, wake 3992 * up the task executing _rcu_barrier(). 3993 */ 3994 static void rcu_barrier_callback(struct rcu_head *rhp) 3995 { 3996 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); 3997 struct rcu_state *rsp = rdp->rsp; 3998 3999 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { 4000 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence); 4001 complete(&rsp->barrier_completion); 4002 } else { 4003 _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence); 4004 } 4005 } 4006 4007 /* 4008 * Called with preemption disabled, and from cross-cpu IRQ context. 4009 */ 4010 static void rcu_barrier_func(void *type) 4011 { 4012 struct rcu_state *rsp = type; 4013 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 4014 4015 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence); 4016 atomic_inc(&rsp->barrier_cpu_count); 4017 rsp->call(&rdp->barrier_head, rcu_barrier_callback); 4018 } 4019 4020 /* 4021 * Orchestrate the specified type of RCU barrier, waiting for all 4022 * RCU callbacks of the specified type to complete. 4023 */ 4024 static void _rcu_barrier(struct rcu_state *rsp) 4025 { 4026 int cpu; 4027 struct rcu_data *rdp; 4028 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence); 4029 4030 _rcu_barrier_trace(rsp, "Begin", -1, s); 4031 4032 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 4033 mutex_lock(&rsp->barrier_mutex); 4034 4035 /* Did someone else do our work for us? */ 4036 if (rcu_seq_done(&rsp->barrier_sequence, s)) { 4037 _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence); 4038 smp_mb(); /* caller's subsequent code after above check. */ 4039 mutex_unlock(&rsp->barrier_mutex); 4040 return; 4041 } 4042 4043 /* Mark the start of the barrier operation. */ 4044 rcu_seq_start(&rsp->barrier_sequence); 4045 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence); 4046 4047 /* 4048 * Initialize the count to one rather than to zero in order to 4049 * avoid a too-soon return to zero in case of a short grace period 4050 * (or preemption of this task). Exclude CPU-hotplug operations 4051 * to ensure that no offline CPU has callbacks queued. 4052 */ 4053 init_completion(&rsp->barrier_completion); 4054 atomic_set(&rsp->barrier_cpu_count, 1); 4055 get_online_cpus(); 4056 4057 /* 4058 * Force each CPU with callbacks to register a new callback. 4059 * When that callback is invoked, we will know that all of the 4060 * corresponding CPU's preceding callbacks have been invoked. 4061 */ 4062 for_each_possible_cpu(cpu) { 4063 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu)) 4064 continue; 4065 rdp = per_cpu_ptr(rsp->rda, cpu); 4066 if (rcu_is_nocb_cpu(cpu)) { 4067 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { 4068 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu, 4069 rsp->barrier_sequence); 4070 } else { 4071 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, 4072 rsp->barrier_sequence); 4073 smp_mb__before_atomic(); 4074 atomic_inc(&rsp->barrier_cpu_count); 4075 __call_rcu(&rdp->barrier_head, 4076 rcu_barrier_callback, rsp, cpu, 0); 4077 } 4078 } else if (READ_ONCE(rdp->qlen)) { 4079 _rcu_barrier_trace(rsp, "OnlineQ", cpu, 4080 rsp->barrier_sequence); 4081 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); 4082 } else { 4083 _rcu_barrier_trace(rsp, "OnlineNQ", cpu, 4084 rsp->barrier_sequence); 4085 } 4086 } 4087 put_online_cpus(); 4088 4089 /* 4090 * Now that we have an rcu_barrier_callback() callback on each 4091 * CPU, and thus each counted, remove the initial count. 4092 */ 4093 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) 4094 complete(&rsp->barrier_completion); 4095 4096 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 4097 wait_for_completion(&rsp->barrier_completion); 4098 4099 /* Mark the end of the barrier operation. */ 4100 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence); 4101 rcu_seq_end(&rsp->barrier_sequence); 4102 4103 /* Other rcu_barrier() invocations can now safely proceed. */ 4104 mutex_unlock(&rsp->barrier_mutex); 4105 } 4106 4107 /** 4108 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. 4109 */ 4110 void rcu_barrier_bh(void) 4111 { 4112 _rcu_barrier(&rcu_bh_state); 4113 } 4114 EXPORT_SYMBOL_GPL(rcu_barrier_bh); 4115 4116 /** 4117 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. 4118 */ 4119 void rcu_barrier_sched(void) 4120 { 4121 _rcu_barrier(&rcu_sched_state); 4122 } 4123 EXPORT_SYMBOL_GPL(rcu_barrier_sched); 4124 4125 /* 4126 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 4127 * first CPU in a given leaf rcu_node structure coming online. The caller 4128 * must hold the corresponding leaf rcu_node ->lock with interrrupts 4129 * disabled. 4130 */ 4131 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 4132 { 4133 long mask; 4134 struct rcu_node *rnp = rnp_leaf; 4135 4136 for (;;) { 4137 mask = rnp->grpmask; 4138 rnp = rnp->parent; 4139 if (rnp == NULL) 4140 return; 4141 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4142 rnp->qsmaskinit |= mask; 4143 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 4144 } 4145 } 4146 4147 /* 4148 * Do boot-time initialization of a CPU's per-CPU RCU data. 4149 */ 4150 static void __init 4151 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) 4152 { 4153 unsigned long flags; 4154 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 4155 struct rcu_node *rnp = rcu_get_root(rsp); 4156 4157 /* Set up local state, ensuring consistent view of global state. */ 4158 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4159 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 4160 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 4161 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 4162 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 4163 rdp->cpu = cpu; 4164 rdp->rsp = rsp; 4165 mutex_init(&rdp->exp_funnel_mutex); 4166 rcu_boot_init_nocb_percpu_data(rdp); 4167 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4168 } 4169 4170 /* 4171 * Initialize a CPU's per-CPU RCU data. Note that only one online or 4172 * offline event can be happening at a given time. Note also that we 4173 * can accept some slop in the rsp->completed access due to the fact 4174 * that this CPU cannot possibly have any RCU callbacks in flight yet. 4175 */ 4176 static void 4177 rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 4178 { 4179 unsigned long flags; 4180 unsigned long mask; 4181 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 4182 struct rcu_node *rnp = rcu_get_root(rsp); 4183 4184 /* Set up local state, ensuring consistent view of global state. */ 4185 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4186 rdp->qlen_last_fqs_check = 0; 4187 rdp->n_force_qs_snap = rsp->n_force_qs; 4188 rdp->blimit = blimit; 4189 if (!rdp->nxtlist) 4190 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ 4191 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 4192 rcu_sysidle_init_percpu_data(rdp->dynticks); 4193 atomic_set(&rdp->dynticks->dynticks, 4194 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 4195 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4196 4197 /* 4198 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4199 * propagation up the rcu_node tree will happen at the beginning 4200 * of the next grace period. 4201 */ 4202 rnp = rdp->mynode; 4203 mask = rdp->grpmask; 4204 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4205 rnp->qsmaskinitnext |= mask; 4206 rnp->expmaskinitnext |= mask; 4207 if (!rdp->beenonline) 4208 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1); 4209 rdp->beenonline = true; /* We have now been online. */ 4210 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ 4211 rdp->completed = rnp->completed; 4212 rdp->cpu_no_qs.b.norm = true; 4213 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); 4214 rdp->core_needs_qs = false; 4215 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 4216 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4217 } 4218 4219 static void rcu_prepare_cpu(int cpu) 4220 { 4221 struct rcu_state *rsp; 4222 4223 for_each_rcu_flavor(rsp) 4224 rcu_init_percpu_data(cpu, rsp); 4225 } 4226 4227 #ifdef CONFIG_HOTPLUG_CPU 4228 /* 4229 * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 4230 * function. We now remove it from the rcu_node tree's ->qsmaskinit 4231 * bit masks. 4232 * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 4233 * function. We now remove it from the rcu_node tree's ->qsmaskinit 4234 * bit masks. 4235 */ 4236 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) 4237 { 4238 unsigned long flags; 4239 unsigned long mask; 4240 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 4241 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4242 4243 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 4244 return; 4245 4246 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4247 mask = rdp->grpmask; 4248 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4249 rnp->qsmaskinitnext &= ~mask; 4250 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4251 } 4252 4253 void rcu_report_dead(unsigned int cpu) 4254 { 4255 struct rcu_state *rsp; 4256 4257 /* QS for any half-done expedited RCU-sched GP. */ 4258 preempt_disable(); 4259 rcu_report_exp_rdp(&rcu_sched_state, 4260 this_cpu_ptr(rcu_sched_state.rda), true); 4261 preempt_enable(); 4262 for_each_rcu_flavor(rsp) 4263 rcu_cleanup_dying_idle_cpu(cpu, rsp); 4264 } 4265 #endif 4266 4267 /* 4268 * Handle CPU online/offline notification events. 4269 */ 4270 int rcu_cpu_notify(struct notifier_block *self, 4271 unsigned long action, void *hcpu) 4272 { 4273 long cpu = (long)hcpu; 4274 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 4275 struct rcu_node *rnp = rdp->mynode; 4276 struct rcu_state *rsp; 4277 4278 switch (action) { 4279 case CPU_UP_PREPARE: 4280 case CPU_UP_PREPARE_FROZEN: 4281 rcu_prepare_cpu(cpu); 4282 rcu_prepare_kthreads(cpu); 4283 rcu_spawn_all_nocb_kthreads(cpu); 4284 break; 4285 case CPU_ONLINE: 4286 case CPU_DOWN_FAILED: 4287 sync_sched_exp_online_cleanup(cpu); 4288 rcu_boost_kthread_setaffinity(rnp, -1); 4289 break; 4290 case CPU_DOWN_PREPARE: 4291 rcu_boost_kthread_setaffinity(rnp, cpu); 4292 break; 4293 case CPU_DYING: 4294 case CPU_DYING_FROZEN: 4295 for_each_rcu_flavor(rsp) 4296 rcu_cleanup_dying_cpu(rsp); 4297 break; 4298 case CPU_DEAD: 4299 case CPU_DEAD_FROZEN: 4300 case CPU_UP_CANCELED: 4301 case CPU_UP_CANCELED_FROZEN: 4302 for_each_rcu_flavor(rsp) { 4303 rcu_cleanup_dead_cpu(cpu, rsp); 4304 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); 4305 } 4306 break; 4307 default: 4308 break; 4309 } 4310 return NOTIFY_OK; 4311 } 4312 4313 static int rcu_pm_notify(struct notifier_block *self, 4314 unsigned long action, void *hcpu) 4315 { 4316 switch (action) { 4317 case PM_HIBERNATION_PREPARE: 4318 case PM_SUSPEND_PREPARE: 4319 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ 4320 rcu_expedite_gp(); 4321 break; 4322 case PM_POST_HIBERNATION: 4323 case PM_POST_SUSPEND: 4324 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ 4325 rcu_unexpedite_gp(); 4326 break; 4327 default: 4328 break; 4329 } 4330 return NOTIFY_OK; 4331 } 4332 4333 /* 4334 * Spawn the kthreads that handle each RCU flavor's grace periods. 4335 */ 4336 static int __init rcu_spawn_gp_kthread(void) 4337 { 4338 unsigned long flags; 4339 int kthread_prio_in = kthread_prio; 4340 struct rcu_node *rnp; 4341 struct rcu_state *rsp; 4342 struct sched_param sp; 4343 struct task_struct *t; 4344 4345 /* Force priority into range. */ 4346 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4347 kthread_prio = 1; 4348 else if (kthread_prio < 0) 4349 kthread_prio = 0; 4350 else if (kthread_prio > 99) 4351 kthread_prio = 99; 4352 if (kthread_prio != kthread_prio_in) 4353 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 4354 kthread_prio, kthread_prio_in); 4355 4356 rcu_scheduler_fully_active = 1; 4357 for_each_rcu_flavor(rsp) { 4358 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); 4359 BUG_ON(IS_ERR(t)); 4360 rnp = rcu_get_root(rsp); 4361 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4362 rsp->gp_kthread = t; 4363 if (kthread_prio) { 4364 sp.sched_priority = kthread_prio; 4365 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4366 } 4367 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4368 wake_up_process(t); 4369 } 4370 rcu_spawn_nocb_kthreads(); 4371 rcu_spawn_boost_kthreads(); 4372 return 0; 4373 } 4374 early_initcall(rcu_spawn_gp_kthread); 4375 4376 /* 4377 * This function is invoked towards the end of the scheduler's initialization 4378 * process. Before this is called, the idle task might contain 4379 * RCU read-side critical sections (during which time, this idle 4380 * task is booting the system). After this function is called, the 4381 * idle tasks are prohibited from containing RCU read-side critical 4382 * sections. This function also enables RCU lockdep checking. 4383 */ 4384 void rcu_scheduler_starting(void) 4385 { 4386 WARN_ON(num_online_cpus() != 1); 4387 WARN_ON(nr_context_switches() > 0); 4388 rcu_scheduler_active = 1; 4389 } 4390 4391 /* 4392 * Compute the per-level fanout, either using the exact fanout specified 4393 * or balancing the tree, depending on the rcu_fanout_exact boot parameter. 4394 */ 4395 static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt) 4396 { 4397 int i; 4398 4399 if (rcu_fanout_exact) { 4400 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; 4401 for (i = rcu_num_lvls - 2; i >= 0; i--) 4402 levelspread[i] = RCU_FANOUT; 4403 } else { 4404 int ccur; 4405 int cprv; 4406 4407 cprv = nr_cpu_ids; 4408 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4409 ccur = levelcnt[i]; 4410 levelspread[i] = (cprv + ccur - 1) / ccur; 4411 cprv = ccur; 4412 } 4413 } 4414 } 4415 4416 /* 4417 * Helper function for rcu_init() that initializes one rcu_state structure. 4418 */ 4419 static void __init rcu_init_one(struct rcu_state *rsp) 4420 { 4421 static const char * const buf[] = RCU_NODE_NAME_INIT; 4422 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4423 static const char * const exp[] = RCU_EXP_NAME_INIT; 4424 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4425 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4426 static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS]; 4427 static u8 fl_mask = 0x1; 4428 4429 int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */ 4430 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4431 int cpustride = 1; 4432 int i; 4433 int j; 4434 struct rcu_node *rnp; 4435 4436 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4437 4438 /* Silence gcc 4.8 false positive about array index out of range. */ 4439 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4440 panic("rcu_init_one: rcu_num_lvls out of range"); 4441 4442 /* Initialize the level-tracking arrays. */ 4443 4444 for (i = 0; i < rcu_num_lvls; i++) 4445 levelcnt[i] = num_rcu_lvl[i]; 4446 for (i = 1; i < rcu_num_lvls; i++) 4447 rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1]; 4448 rcu_init_levelspread(levelspread, levelcnt); 4449 rsp->flavor_mask = fl_mask; 4450 fl_mask <<= 1; 4451 4452 /* Initialize the elements themselves, starting from the leaves. */ 4453 4454 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4455 cpustride *= levelspread[i]; 4456 rnp = rsp->level[i]; 4457 for (j = 0; j < levelcnt[i]; j++, rnp++) { 4458 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4459 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4460 &rcu_node_class[i], buf[i]); 4461 raw_spin_lock_init(&rnp->fqslock); 4462 lockdep_set_class_and_name(&rnp->fqslock, 4463 &rcu_fqs_class[i], fqs[i]); 4464 rnp->gpnum = rsp->gpnum; 4465 rnp->completed = rsp->completed; 4466 rnp->qsmask = 0; 4467 rnp->qsmaskinit = 0; 4468 rnp->grplo = j * cpustride; 4469 rnp->grphi = (j + 1) * cpustride - 1; 4470 if (rnp->grphi >= nr_cpu_ids) 4471 rnp->grphi = nr_cpu_ids - 1; 4472 if (i == 0) { 4473 rnp->grpnum = 0; 4474 rnp->grpmask = 0; 4475 rnp->parent = NULL; 4476 } else { 4477 rnp->grpnum = j % levelspread[i - 1]; 4478 rnp->grpmask = 1UL << rnp->grpnum; 4479 rnp->parent = rsp->level[i - 1] + 4480 j / levelspread[i - 1]; 4481 } 4482 rnp->level = i; 4483 INIT_LIST_HEAD(&rnp->blkd_tasks); 4484 rcu_init_one_nocb(rnp); 4485 mutex_init(&rnp->exp_funnel_mutex); 4486 lockdep_set_class_and_name(&rnp->exp_funnel_mutex, 4487 &rcu_exp_class[i], exp[i]); 4488 } 4489 } 4490 4491 init_swait_queue_head(&rsp->gp_wq); 4492 init_swait_queue_head(&rsp->expedited_wq); 4493 rnp = rsp->level[rcu_num_lvls - 1]; 4494 for_each_possible_cpu(i) { 4495 while (i > rnp->grphi) 4496 rnp++; 4497 per_cpu_ptr(rsp->rda, i)->mynode = rnp; 4498 rcu_boot_init_percpu_data(i, rsp); 4499 } 4500 list_add(&rsp->flavors, &rcu_struct_flavors); 4501 } 4502 4503 /* 4504 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4505 * replace the definitions in tree.h because those are needed to size 4506 * the ->node array in the rcu_state structure. 4507 */ 4508 static void __init rcu_init_geometry(void) 4509 { 4510 ulong d; 4511 int i; 4512 int rcu_capacity[RCU_NUM_LVLS]; 4513 4514 /* 4515 * Initialize any unspecified boot parameters. 4516 * The default values of jiffies_till_first_fqs and 4517 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4518 * value, which is a function of HZ, then adding one for each 4519 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4520 */ 4521 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4522 if (jiffies_till_first_fqs == ULONG_MAX) 4523 jiffies_till_first_fqs = d; 4524 if (jiffies_till_next_fqs == ULONG_MAX) 4525 jiffies_till_next_fqs = d; 4526 4527 /* If the compile-time values are accurate, just leave. */ 4528 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4529 nr_cpu_ids == NR_CPUS) 4530 return; 4531 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n", 4532 rcu_fanout_leaf, nr_cpu_ids); 4533 4534 /* 4535 * The boot-time rcu_fanout_leaf parameter must be at least two 4536 * and cannot exceed the number of bits in the rcu_node masks. 4537 * Complain and fall back to the compile-time values if this 4538 * limit is exceeded. 4539 */ 4540 if (rcu_fanout_leaf < 2 || 4541 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4542 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4543 WARN_ON(1); 4544 return; 4545 } 4546 4547 /* 4548 * Compute number of nodes that can be handled an rcu_node tree 4549 * with the given number of levels. 4550 */ 4551 rcu_capacity[0] = rcu_fanout_leaf; 4552 for (i = 1; i < RCU_NUM_LVLS; i++) 4553 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4554 4555 /* 4556 * The tree must be able to accommodate the configured number of CPUs. 4557 * If this limit is exceeded, fall back to the compile-time values. 4558 */ 4559 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4560 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4561 WARN_ON(1); 4562 return; 4563 } 4564 4565 /* Calculate the number of levels in the tree. */ 4566 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4567 } 4568 rcu_num_lvls = i + 1; 4569 4570 /* Calculate the number of rcu_nodes at each level of the tree. */ 4571 for (i = 0; i < rcu_num_lvls; i++) { 4572 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4573 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4574 } 4575 4576 /* Calculate the total number of rcu_node structures. */ 4577 rcu_num_nodes = 0; 4578 for (i = 0; i < rcu_num_lvls; i++) 4579 rcu_num_nodes += num_rcu_lvl[i]; 4580 } 4581 4582 /* 4583 * Dump out the structure of the rcu_node combining tree associated 4584 * with the rcu_state structure referenced by rsp. 4585 */ 4586 static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp) 4587 { 4588 int level = 0; 4589 struct rcu_node *rnp; 4590 4591 pr_info("rcu_node tree layout dump\n"); 4592 pr_info(" "); 4593 rcu_for_each_node_breadth_first(rsp, rnp) { 4594 if (rnp->level != level) { 4595 pr_cont("\n"); 4596 pr_info(" "); 4597 level = rnp->level; 4598 } 4599 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4600 } 4601 pr_cont("\n"); 4602 } 4603 4604 void __init rcu_init(void) 4605 { 4606 int cpu; 4607 4608 rcu_early_boot_tests(); 4609 4610 rcu_bootup_announce(); 4611 rcu_init_geometry(); 4612 rcu_init_one(&rcu_bh_state); 4613 rcu_init_one(&rcu_sched_state); 4614 if (dump_tree) 4615 rcu_dump_rcu_node_tree(&rcu_sched_state); 4616 __rcu_init_preempt(); 4617 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 4618 4619 /* 4620 * We don't need protection against CPU-hotplug here because 4621 * this is called early in boot, before either interrupts 4622 * or the scheduler are operational. 4623 */ 4624 cpu_notifier(rcu_cpu_notify, 0); 4625 pm_notifier(rcu_pm_notify, 0); 4626 for_each_online_cpu(cpu) 4627 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 4628 } 4629 4630 #include "tree_plugin.h" 4631