1 /* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Internal non-public definitions that provide either classic 4 * or preemptible semantics. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 * Copyright Red Hat, 2009 21 * Copyright IBM Corporation, 2009 22 * 23 * Author: Ingo Molnar <mingo@elte.hu> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 25 */ 26 27 #include <linux/delay.h> 28 #include <linux/gfp.h> 29 #include <linux/oom.h> 30 #include <linux/sched/debug.h> 31 #include <linux/smpboot.h> 32 #include <linux/sched/isolation.h> 33 #include <uapi/linux/sched/types.h> 34 #include "../time/tick-internal.h" 35 36 #ifdef CONFIG_RCU_BOOST 37 38 #include "../locking/rtmutex_common.h" 39 40 /* 41 * Control variables for per-CPU and per-rcu_node kthreads. These 42 * handle all flavors of RCU. 43 */ 44 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); 45 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 46 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 47 DEFINE_PER_CPU(char, rcu_cpu_has_work); 48 49 #else /* #ifdef CONFIG_RCU_BOOST */ 50 51 /* 52 * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, 53 * all uses are in dead code. Provide a definition to keep the compiler 54 * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. 55 * This probably needs to be excluded from -rt builds. 56 */ 57 #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) 58 #define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1) 59 60 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 61 62 #ifdef CONFIG_RCU_NOCB_CPU 63 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 64 static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 65 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 66 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 67 68 /* 69 * Check the RCU kernel configuration parameters and print informative 70 * messages about anything out of the ordinary. 71 */ 72 static void __init rcu_bootup_announce_oddness(void) 73 { 74 if (IS_ENABLED(CONFIG_RCU_TRACE)) 75 pr_info("\tRCU event tracing is enabled.\n"); 76 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 77 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 78 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", 79 RCU_FANOUT); 80 if (rcu_fanout_exact) 81 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 82 if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) 83 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); 84 if (IS_ENABLED(CONFIG_PROVE_RCU)) 85 pr_info("\tRCU lockdep checking is enabled.\n"); 86 if (RCU_NUM_LVLS >= 4) 87 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); 88 if (RCU_FANOUT_LEAF != 16) 89 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 90 RCU_FANOUT_LEAF); 91 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 92 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 93 if (nr_cpu_ids != NR_CPUS) 94 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 95 #ifdef CONFIG_RCU_BOOST 96 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY); 97 #endif 98 if (blimit != DEFAULT_RCU_BLIMIT) 99 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 100 if (qhimark != DEFAULT_RCU_QHIMARK) 101 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); 102 if (qlowmark != DEFAULT_RCU_QLOMARK) 103 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); 104 if (jiffies_till_first_fqs != ULONG_MAX) 105 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); 106 if (jiffies_till_next_fqs != ULONG_MAX) 107 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); 108 if (rcu_kick_kthreads) 109 pr_info("\tKick kthreads if too-long grace period.\n"); 110 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) 111 pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); 112 if (gp_preinit_delay) 113 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); 114 if (gp_init_delay) 115 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); 116 if (gp_cleanup_delay) 117 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay); 118 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) 119 pr_info("\tRCU debug extended QS entry/exit.\n"); 120 rcupdate_announce_bootup_oddness(); 121 } 122 123 #ifdef CONFIG_PREEMPT_RCU 124 125 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); 126 static struct rcu_state *const rcu_state_p = &rcu_preempt_state; 127 static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data; 128 129 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 130 bool wake); 131 132 /* 133 * Tell them what RCU they are running. 134 */ 135 static void __init rcu_bootup_announce(void) 136 { 137 pr_info("Preemptible hierarchical RCU implementation.\n"); 138 rcu_bootup_announce_oddness(); 139 } 140 141 /* Flags for rcu_preempt_ctxt_queue() decision table. */ 142 #define RCU_GP_TASKS 0x8 143 #define RCU_EXP_TASKS 0x4 144 #define RCU_GP_BLKD 0x2 145 #define RCU_EXP_BLKD 0x1 146 147 /* 148 * Queues a task preempted within an RCU-preempt read-side critical 149 * section into the appropriate location within the ->blkd_tasks list, 150 * depending on the states of any ongoing normal and expedited grace 151 * periods. The ->gp_tasks pointer indicates which element the normal 152 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer 153 * indicates which element the expedited grace period is waiting on (again, 154 * NULL if none). If a grace period is waiting on a given element in the 155 * ->blkd_tasks list, it also waits on all subsequent elements. Thus, 156 * adding a task to the tail of the list blocks any grace period that is 157 * already waiting on one of the elements. In contrast, adding a task 158 * to the head of the list won't block any grace period that is already 159 * waiting on one of the elements. 160 * 161 * This queuing is imprecise, and can sometimes make an ongoing grace 162 * period wait for a task that is not strictly speaking blocking it. 163 * Given the choice, we needlessly block a normal grace period rather than 164 * blocking an expedited grace period. 165 * 166 * Note that an endless sequence of expedited grace periods still cannot 167 * indefinitely postpone a normal grace period. Eventually, all of the 168 * fixed number of preempted tasks blocking the normal grace period that are 169 * not also blocking the expedited grace period will resume and complete 170 * their RCU read-side critical sections. At that point, the ->gp_tasks 171 * pointer will equal the ->exp_tasks pointer, at which point the end of 172 * the corresponding expedited grace period will also be the end of the 173 * normal grace period. 174 */ 175 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) 176 __releases(rnp->lock) /* But leaves rrupts disabled. */ 177 { 178 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + 179 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + 180 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + 181 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 182 struct task_struct *t = current; 183 184 lockdep_assert_held(&rnp->lock); 185 WARN_ON_ONCE(rdp->mynode != rnp); 186 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); 187 188 /* 189 * Decide where to queue the newly blocked task. In theory, 190 * this could be an if-statement. In practice, when I tried 191 * that, it was quite messy. 192 */ 193 switch (blkd_state) { 194 case 0: 195 case RCU_EXP_TASKS: 196 case RCU_EXP_TASKS + RCU_GP_BLKD: 197 case RCU_GP_TASKS: 198 case RCU_GP_TASKS + RCU_EXP_TASKS: 199 200 /* 201 * Blocking neither GP, or first task blocking the normal 202 * GP but not blocking the already-waiting expedited GP. 203 * Queue at the head of the list to avoid unnecessarily 204 * blocking the already-waiting GPs. 205 */ 206 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 207 break; 208 209 case RCU_EXP_BLKD: 210 case RCU_GP_BLKD: 211 case RCU_GP_BLKD + RCU_EXP_BLKD: 212 case RCU_GP_TASKS + RCU_EXP_BLKD: 213 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 214 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 215 216 /* 217 * First task arriving that blocks either GP, or first task 218 * arriving that blocks the expedited GP (with the normal 219 * GP already waiting), or a task arriving that blocks 220 * both GPs with both GPs already waiting. Queue at the 221 * tail of the list to avoid any GP waiting on any of the 222 * already queued tasks that are not blocking it. 223 */ 224 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); 225 break; 226 227 case RCU_EXP_TASKS + RCU_EXP_BLKD: 228 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 229 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD: 230 231 /* 232 * Second or subsequent task blocking the expedited GP. 233 * The task either does not block the normal GP, or is the 234 * first task blocking the normal GP. Queue just after 235 * the first task blocking the expedited GP. 236 */ 237 list_add(&t->rcu_node_entry, rnp->exp_tasks); 238 break; 239 240 case RCU_GP_TASKS + RCU_GP_BLKD: 241 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD: 242 243 /* 244 * Second or subsequent task blocking the normal GP. 245 * The task does not block the expedited GP. Queue just 246 * after the first task blocking the normal GP. 247 */ 248 list_add(&t->rcu_node_entry, rnp->gp_tasks); 249 break; 250 251 default: 252 253 /* Yet another exercise in excessive paranoia. */ 254 WARN_ON_ONCE(1); 255 break; 256 } 257 258 /* 259 * We have now queued the task. If it was the first one to 260 * block either grace period, update the ->gp_tasks and/or 261 * ->exp_tasks pointers, respectively, to reference the newly 262 * blocked tasks. 263 */ 264 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) 265 rnp->gp_tasks = &t->rcu_node_entry; 266 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 267 rnp->exp_tasks = &t->rcu_node_entry; 268 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 269 !(rnp->qsmask & rdp->grpmask)); 270 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != 271 !(rnp->expmask & rdp->grpmask)); 272 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 273 274 /* 275 * Report the quiescent state for the expedited GP. This expedited 276 * GP should not be able to end until we report, so there should be 277 * no need to check for a subsequent expedited GP. (Though we are 278 * still in a quiescent state in any case.) 279 */ 280 if (blkd_state & RCU_EXP_BLKD && 281 t->rcu_read_unlock_special.b.exp_need_qs) { 282 t->rcu_read_unlock_special.b.exp_need_qs = false; 283 rcu_report_exp_rdp(rdp->rsp, rdp, true); 284 } else { 285 WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs); 286 } 287 } 288 289 /* 290 * Record a preemptible-RCU quiescent state for the specified CPU. Note 291 * that this just means that the task currently running on the CPU is 292 * not in a quiescent state. There might be any number of tasks blocked 293 * while in an RCU read-side critical section. 294 * 295 * As with the other rcu_*_qs() functions, callers to this function 296 * must disable preemption. 297 */ 298 static void rcu_preempt_qs(void) 299 { 300 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n"); 301 if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) { 302 trace_rcu_grace_period(TPS("rcu_preempt"), 303 __this_cpu_read(rcu_data_p->gpnum), 304 TPS("cpuqs")); 305 __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false); 306 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */ 307 current->rcu_read_unlock_special.b.need_qs = false; 308 } 309 } 310 311 /* 312 * We have entered the scheduler, and the current task might soon be 313 * context-switched away from. If this task is in an RCU read-side 314 * critical section, we will no longer be able to rely on the CPU to 315 * record that fact, so we enqueue the task on the blkd_tasks list. 316 * The task will dequeue itself when it exits the outermost enclosing 317 * RCU read-side critical section. Therefore, the current grace period 318 * cannot be permitted to complete until the blkd_tasks list entries 319 * predating the current grace period drain, in other words, until 320 * rnp->gp_tasks becomes NULL. 321 * 322 * Caller must disable interrupts. 323 */ 324 static void rcu_preempt_note_context_switch(bool preempt) 325 { 326 struct task_struct *t = current; 327 struct rcu_data *rdp; 328 struct rcu_node *rnp; 329 330 lockdep_assert_irqs_disabled(); 331 WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); 332 if (t->rcu_read_lock_nesting > 0 && 333 !t->rcu_read_unlock_special.b.blocked) { 334 335 /* Possibly blocking in an RCU read-side critical section. */ 336 rdp = this_cpu_ptr(rcu_state_p->rda); 337 rnp = rdp->mynode; 338 raw_spin_lock_rcu_node(rnp); 339 t->rcu_read_unlock_special.b.blocked = true; 340 t->rcu_blocked_node = rnp; 341 342 /* 343 * Verify the CPU's sanity, trace the preemption, and 344 * then queue the task as required based on the states 345 * of any ongoing and expedited grace periods. 346 */ 347 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); 348 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 349 trace_rcu_preempt_task(rdp->rsp->name, 350 t->pid, 351 (rnp->qsmask & rdp->grpmask) 352 ? rnp->gpnum 353 : rnp->gpnum + 1); 354 rcu_preempt_ctxt_queue(rnp, rdp); 355 } else if (t->rcu_read_lock_nesting < 0 && 356 t->rcu_read_unlock_special.s) { 357 358 /* 359 * Complete exit from RCU read-side critical section on 360 * behalf of preempted instance of __rcu_read_unlock(). 361 */ 362 rcu_read_unlock_special(t); 363 } 364 365 /* 366 * Either we were not in an RCU read-side critical section to 367 * begin with, or we have now recorded that critical section 368 * globally. Either way, we can now note a quiescent state 369 * for this CPU. Again, if we were in an RCU read-side critical 370 * section, and if that critical section was blocking the current 371 * grace period, then the fact that the task has been enqueued 372 * means that we continue to block the current grace period. 373 */ 374 rcu_preempt_qs(); 375 } 376 377 /* 378 * Check for preempted RCU readers blocking the current grace period 379 * for the specified rcu_node structure. If the caller needs a reliable 380 * answer, it must hold the rcu_node's ->lock. 381 */ 382 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 383 { 384 return rnp->gp_tasks != NULL; 385 } 386 387 /* 388 * Advance a ->blkd_tasks-list pointer to the next entry, instead 389 * returning NULL if at the end of the list. 390 */ 391 static struct list_head *rcu_next_node_entry(struct task_struct *t, 392 struct rcu_node *rnp) 393 { 394 struct list_head *np; 395 396 np = t->rcu_node_entry.next; 397 if (np == &rnp->blkd_tasks) 398 np = NULL; 399 return np; 400 } 401 402 /* 403 * Return true if the specified rcu_node structure has tasks that were 404 * preempted within an RCU read-side critical section. 405 */ 406 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 407 { 408 return !list_empty(&rnp->blkd_tasks); 409 } 410 411 /* 412 * Handle special cases during rcu_read_unlock(), such as needing to 413 * notify RCU core processing or task having blocked during the RCU 414 * read-side critical section. 415 */ 416 void rcu_read_unlock_special(struct task_struct *t) 417 { 418 bool empty_exp; 419 bool empty_norm; 420 bool empty_exp_now; 421 unsigned long flags; 422 struct list_head *np; 423 bool drop_boost_mutex = false; 424 struct rcu_data *rdp; 425 struct rcu_node *rnp; 426 union rcu_special special; 427 428 /* NMI handlers cannot block and cannot safely manipulate state. */ 429 if (in_nmi()) 430 return; 431 432 local_irq_save(flags); 433 434 /* 435 * If RCU core is waiting for this CPU to exit its critical section, 436 * report the fact that it has exited. Because irqs are disabled, 437 * t->rcu_read_unlock_special cannot change. 438 */ 439 special = t->rcu_read_unlock_special; 440 if (special.b.need_qs) { 441 rcu_preempt_qs(); 442 t->rcu_read_unlock_special.b.need_qs = false; 443 if (!t->rcu_read_unlock_special.s) { 444 local_irq_restore(flags); 445 return; 446 } 447 } 448 449 /* 450 * Respond to a request for an expedited grace period, but only if 451 * we were not preempted, meaning that we were running on the same 452 * CPU throughout. If we were preempted, the exp_need_qs flag 453 * would have been cleared at the time of the first preemption, 454 * and the quiescent state would be reported when we were dequeued. 455 */ 456 if (special.b.exp_need_qs) { 457 WARN_ON_ONCE(special.b.blocked); 458 t->rcu_read_unlock_special.b.exp_need_qs = false; 459 rdp = this_cpu_ptr(rcu_state_p->rda); 460 rcu_report_exp_rdp(rcu_state_p, rdp, true); 461 if (!t->rcu_read_unlock_special.s) { 462 local_irq_restore(flags); 463 return; 464 } 465 } 466 467 /* Hardware IRQ handlers cannot block, complain if they get here. */ 468 if (in_irq() || in_serving_softirq()) { 469 lockdep_rcu_suspicious(__FILE__, __LINE__, 470 "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); 471 pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", 472 t->rcu_read_unlock_special.s, 473 t->rcu_read_unlock_special.b.blocked, 474 t->rcu_read_unlock_special.b.exp_need_qs, 475 t->rcu_read_unlock_special.b.need_qs); 476 local_irq_restore(flags); 477 return; 478 } 479 480 /* Clean up if blocked during RCU read-side critical section. */ 481 if (special.b.blocked) { 482 t->rcu_read_unlock_special.b.blocked = false; 483 484 /* 485 * Remove this task from the list it blocked on. The task 486 * now remains queued on the rcu_node corresponding to the 487 * CPU it first blocked on, so there is no longer any need 488 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. 489 */ 490 rnp = t->rcu_blocked_node; 491 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 492 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 493 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); 494 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 495 empty_exp = sync_rcu_preempt_exp_done(rnp); 496 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 497 np = rcu_next_node_entry(t, rnp); 498 list_del_init(&t->rcu_node_entry); 499 t->rcu_blocked_node = NULL; 500 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 501 rnp->gpnum, t->pid); 502 if (&t->rcu_node_entry == rnp->gp_tasks) 503 rnp->gp_tasks = np; 504 if (&t->rcu_node_entry == rnp->exp_tasks) 505 rnp->exp_tasks = np; 506 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 507 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 508 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; 509 if (&t->rcu_node_entry == rnp->boost_tasks) 510 rnp->boost_tasks = np; 511 } 512 513 /* 514 * If this was the last task on the current list, and if 515 * we aren't waiting on any CPUs, report the quiescent state. 516 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 517 * so we must take a snapshot of the expedited state. 518 */ 519 empty_exp_now = sync_rcu_preempt_exp_done(rnp); 520 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 521 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 522 rnp->gpnum, 523 0, rnp->qsmask, 524 rnp->level, 525 rnp->grplo, 526 rnp->grphi, 527 !!rnp->gp_tasks); 528 rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); 529 } else { 530 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 531 } 532 533 /* Unboost if we were boosted. */ 534 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) 535 rt_mutex_futex_unlock(&rnp->boost_mtx); 536 537 /* 538 * If this was the last task on the expedited lists, 539 * then we need to report up the rcu_node hierarchy. 540 */ 541 if (!empty_exp && empty_exp_now) 542 rcu_report_exp_rnp(rcu_state_p, rnp, true); 543 } else { 544 local_irq_restore(flags); 545 } 546 } 547 548 /* 549 * Dump detailed information for all tasks blocking the current RCU 550 * grace period on the specified rcu_node structure. 551 */ 552 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 553 { 554 unsigned long flags; 555 struct task_struct *t; 556 557 raw_spin_lock_irqsave_rcu_node(rnp, flags); 558 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 559 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 560 return; 561 } 562 t = list_entry(rnp->gp_tasks->prev, 563 struct task_struct, rcu_node_entry); 564 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) 565 sched_show_task(t); 566 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 567 } 568 569 /* 570 * Dump detailed information for all tasks blocking the current RCU 571 * grace period. 572 */ 573 static void rcu_print_detail_task_stall(struct rcu_state *rsp) 574 { 575 struct rcu_node *rnp = rcu_get_root(rsp); 576 577 rcu_print_detail_task_stall_rnp(rnp); 578 rcu_for_each_leaf_node(rsp, rnp) 579 rcu_print_detail_task_stall_rnp(rnp); 580 } 581 582 static void rcu_print_task_stall_begin(struct rcu_node *rnp) 583 { 584 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", 585 rnp->level, rnp->grplo, rnp->grphi); 586 } 587 588 static void rcu_print_task_stall_end(void) 589 { 590 pr_cont("\n"); 591 } 592 593 /* 594 * Scan the current list of tasks blocked within RCU read-side critical 595 * sections, printing out the tid of each. 596 */ 597 static int rcu_print_task_stall(struct rcu_node *rnp) 598 { 599 struct task_struct *t; 600 int ndetected = 0; 601 602 if (!rcu_preempt_blocked_readers_cgp(rnp)) 603 return 0; 604 rcu_print_task_stall_begin(rnp); 605 t = list_entry(rnp->gp_tasks->prev, 606 struct task_struct, rcu_node_entry); 607 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 608 pr_cont(" P%d", t->pid); 609 ndetected++; 610 } 611 rcu_print_task_stall_end(); 612 return ndetected; 613 } 614 615 /* 616 * Scan the current list of tasks blocked within RCU read-side critical 617 * sections, printing out the tid of each that is blocking the current 618 * expedited grace period. 619 */ 620 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 621 { 622 struct task_struct *t; 623 int ndetected = 0; 624 625 if (!rnp->exp_tasks) 626 return 0; 627 t = list_entry(rnp->exp_tasks->prev, 628 struct task_struct, rcu_node_entry); 629 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 630 pr_cont(" P%d", t->pid); 631 ndetected++; 632 } 633 return ndetected; 634 } 635 636 /* 637 * Check that the list of blocked tasks for the newly completed grace 638 * period is in fact empty. It is a serious bug to complete a grace 639 * period that still has RCU readers blocked! This function must be 640 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock 641 * must be held by the caller. 642 * 643 * Also, if there are blocked tasks on the list, they automatically 644 * block the newly created grace period, so set up ->gp_tasks accordingly. 645 */ 646 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 647 { 648 struct task_struct *t; 649 650 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 651 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 652 if (rcu_preempt_has_tasks(rnp)) { 653 rnp->gp_tasks = rnp->blkd_tasks.next; 654 t = container_of(rnp->gp_tasks, struct task_struct, 655 rcu_node_entry); 656 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 657 rnp->gpnum, t->pid); 658 } 659 WARN_ON_ONCE(rnp->qsmask); 660 } 661 662 /* 663 * Check for a quiescent state from the current CPU. When a task blocks, 664 * the task is recorded in the corresponding CPU's rcu_node structure, 665 * which is checked elsewhere. 666 * 667 * Caller must disable hard irqs. 668 */ 669 static void rcu_preempt_check_callbacks(void) 670 { 671 struct task_struct *t = current; 672 673 if (t->rcu_read_lock_nesting == 0) { 674 rcu_preempt_qs(); 675 return; 676 } 677 if (t->rcu_read_lock_nesting > 0 && 678 __this_cpu_read(rcu_data_p->core_needs_qs) && 679 __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm)) 680 t->rcu_read_unlock_special.b.need_qs = true; 681 } 682 683 #ifdef CONFIG_RCU_BOOST 684 685 static void rcu_preempt_do_callbacks(void) 686 { 687 rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); 688 } 689 690 #endif /* #ifdef CONFIG_RCU_BOOST */ 691 692 /** 693 * call_rcu() - Queue an RCU callback for invocation after a grace period. 694 * @head: structure to be used for queueing the RCU updates. 695 * @func: actual callback function to be invoked after the grace period 696 * 697 * The callback function will be invoked some time after a full grace 698 * period elapses, in other words after all pre-existing RCU read-side 699 * critical sections have completed. However, the callback function 700 * might well execute concurrently with RCU read-side critical sections 701 * that started after call_rcu() was invoked. RCU read-side critical 702 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 703 * and may be nested. 704 * 705 * Note that all CPUs must agree that the grace period extended beyond 706 * all pre-existing RCU read-side critical section. On systems with more 707 * than one CPU, this means that when "func()" is invoked, each CPU is 708 * guaranteed to have executed a full memory barrier since the end of its 709 * last RCU read-side critical section whose beginning preceded the call 710 * to call_rcu(). It also means that each CPU executing an RCU read-side 711 * critical section that continues beyond the start of "func()" must have 712 * executed a memory barrier after the call_rcu() but before the beginning 713 * of that RCU read-side critical section. Note that these guarantees 714 * include CPUs that are offline, idle, or executing in user mode, as 715 * well as CPUs that are executing in the kernel. 716 * 717 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 718 * resulting RCU callback function "func()", then both CPU A and CPU B are 719 * guaranteed to execute a full memory barrier during the time interval 720 * between the call to call_rcu() and the invocation of "func()" -- even 721 * if CPU A and CPU B are the same CPU (but again only if the system has 722 * more than one CPU). 723 */ 724 void call_rcu(struct rcu_head *head, rcu_callback_t func) 725 { 726 __call_rcu(head, func, rcu_state_p, -1, 0); 727 } 728 EXPORT_SYMBOL_GPL(call_rcu); 729 730 /** 731 * synchronize_rcu - wait until a grace period has elapsed. 732 * 733 * Control will return to the caller some time after a full grace 734 * period has elapsed, in other words after all currently executing RCU 735 * read-side critical sections have completed. Note, however, that 736 * upon return from synchronize_rcu(), the caller might well be executing 737 * concurrently with new RCU read-side critical sections that began while 738 * synchronize_rcu() was waiting. RCU read-side critical sections are 739 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 740 * 741 * See the description of synchronize_sched() for more detailed 742 * information on memory-ordering guarantees. However, please note 743 * that -only- the memory-ordering guarantees apply. For example, 744 * synchronize_rcu() is -not- guaranteed to wait on things like code 745 * protected by preempt_disable(), instead, synchronize_rcu() is -only- 746 * guaranteed to wait on RCU read-side critical sections, that is, sections 747 * of code protected by rcu_read_lock(). 748 */ 749 void synchronize_rcu(void) 750 { 751 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 752 lock_is_held(&rcu_lock_map) || 753 lock_is_held(&rcu_sched_lock_map), 754 "Illegal synchronize_rcu() in RCU read-side critical section"); 755 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 756 return; 757 if (rcu_gp_is_expedited()) 758 synchronize_rcu_expedited(); 759 else 760 wait_rcu_gp(call_rcu); 761 } 762 EXPORT_SYMBOL_GPL(synchronize_rcu); 763 764 /** 765 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 766 * 767 * Note that this primitive does not necessarily wait for an RCU grace period 768 * to complete. For example, if there are no RCU callbacks queued anywhere 769 * in the system, then rcu_barrier() is within its rights to return 770 * immediately, without waiting for anything, much less an RCU grace period. 771 */ 772 void rcu_barrier(void) 773 { 774 _rcu_barrier(rcu_state_p); 775 } 776 EXPORT_SYMBOL_GPL(rcu_barrier); 777 778 /* 779 * Initialize preemptible RCU's state structures. 780 */ 781 static void __init __rcu_init_preempt(void) 782 { 783 rcu_init_one(rcu_state_p); 784 } 785 786 /* 787 * Check for a task exiting while in a preemptible-RCU read-side 788 * critical section, clean up if so. No need to issue warnings, 789 * as debug_check_no_locks_held() already does this if lockdep 790 * is enabled. 791 */ 792 void exit_rcu(void) 793 { 794 struct task_struct *t = current; 795 796 if (likely(list_empty(¤t->rcu_node_entry))) 797 return; 798 t->rcu_read_lock_nesting = 1; 799 barrier(); 800 t->rcu_read_unlock_special.b.blocked = true; 801 __rcu_read_unlock(); 802 } 803 804 #else /* #ifdef CONFIG_PREEMPT_RCU */ 805 806 static struct rcu_state *const rcu_state_p = &rcu_sched_state; 807 808 /* 809 * Tell them what RCU they are running. 810 */ 811 static void __init rcu_bootup_announce(void) 812 { 813 pr_info("Hierarchical RCU implementation.\n"); 814 rcu_bootup_announce_oddness(); 815 } 816 817 /* 818 * Because preemptible RCU does not exist, we never have to check for 819 * CPUs being in quiescent states. 820 */ 821 static void rcu_preempt_note_context_switch(bool preempt) 822 { 823 } 824 825 /* 826 * Because preemptible RCU does not exist, there are never any preempted 827 * RCU readers. 828 */ 829 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 830 { 831 return 0; 832 } 833 834 /* 835 * Because there is no preemptible RCU, there can be no readers blocked. 836 */ 837 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 838 { 839 return false; 840 } 841 842 /* 843 * Because preemptible RCU does not exist, we never have to check for 844 * tasks blocked within RCU read-side critical sections. 845 */ 846 static void rcu_print_detail_task_stall(struct rcu_state *rsp) 847 { 848 } 849 850 /* 851 * Because preemptible RCU does not exist, we never have to check for 852 * tasks blocked within RCU read-side critical sections. 853 */ 854 static int rcu_print_task_stall(struct rcu_node *rnp) 855 { 856 return 0; 857 } 858 859 /* 860 * Because preemptible RCU does not exist, we never have to check for 861 * tasks blocked within RCU read-side critical sections that are 862 * blocking the current expedited grace period. 863 */ 864 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 865 { 866 return 0; 867 } 868 869 /* 870 * Because there is no preemptible RCU, there can be no readers blocked, 871 * so there is no need to check for blocked tasks. So check only for 872 * bogus qsmask values. 873 */ 874 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 875 { 876 WARN_ON_ONCE(rnp->qsmask); 877 } 878 879 /* 880 * Because preemptible RCU does not exist, it never has any callbacks 881 * to check. 882 */ 883 static void rcu_preempt_check_callbacks(void) 884 { 885 } 886 887 /* 888 * Because preemptible RCU does not exist, rcu_barrier() is just 889 * another name for rcu_barrier_sched(). 890 */ 891 void rcu_barrier(void) 892 { 893 rcu_barrier_sched(); 894 } 895 EXPORT_SYMBOL_GPL(rcu_barrier); 896 897 /* 898 * Because preemptible RCU does not exist, it need not be initialized. 899 */ 900 static void __init __rcu_init_preempt(void) 901 { 902 } 903 904 /* 905 * Because preemptible RCU does not exist, tasks cannot possibly exit 906 * while in preemptible RCU read-side critical sections. 907 */ 908 void exit_rcu(void) 909 { 910 } 911 912 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 913 914 #ifdef CONFIG_RCU_BOOST 915 916 static void rcu_wake_cond(struct task_struct *t, int status) 917 { 918 /* 919 * If the thread is yielding, only wake it when this 920 * is invoked from idle 921 */ 922 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) 923 wake_up_process(t); 924 } 925 926 /* 927 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 928 * or ->boost_tasks, advancing the pointer to the next task in the 929 * ->blkd_tasks list. 930 * 931 * Note that irqs must be enabled: boosting the task can block. 932 * Returns 1 if there are more tasks needing to be boosted. 933 */ 934 static int rcu_boost(struct rcu_node *rnp) 935 { 936 unsigned long flags; 937 struct task_struct *t; 938 struct list_head *tb; 939 940 if (READ_ONCE(rnp->exp_tasks) == NULL && 941 READ_ONCE(rnp->boost_tasks) == NULL) 942 return 0; /* Nothing left to boost. */ 943 944 raw_spin_lock_irqsave_rcu_node(rnp, flags); 945 946 /* 947 * Recheck under the lock: all tasks in need of boosting 948 * might exit their RCU read-side critical sections on their own. 949 */ 950 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 951 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 952 return 0; 953 } 954 955 /* 956 * Preferentially boost tasks blocking expedited grace periods. 957 * This cannot starve the normal grace periods because a second 958 * expedited grace period must boost all blocked tasks, including 959 * those blocking the pre-existing normal grace period. 960 */ 961 if (rnp->exp_tasks != NULL) { 962 tb = rnp->exp_tasks; 963 rnp->n_exp_boosts++; 964 } else { 965 tb = rnp->boost_tasks; 966 rnp->n_normal_boosts++; 967 } 968 rnp->n_tasks_boosted++; 969 970 /* 971 * We boost task t by manufacturing an rt_mutex that appears to 972 * be held by task t. We leave a pointer to that rt_mutex where 973 * task t can find it, and task t will release the mutex when it 974 * exits its outermost RCU read-side critical section. Then 975 * simply acquiring this artificial rt_mutex will boost task 976 * t's priority. (Thanks to tglx for suggesting this approach!) 977 * 978 * Note that task t must acquire rnp->lock to remove itself from 979 * the ->blkd_tasks list, which it will do from exit() if from 980 * nowhere else. We therefore are guaranteed that task t will 981 * stay around at least until we drop rnp->lock. Note that 982 * rnp->lock also resolves races between our priority boosting 983 * and task t's exiting its outermost RCU read-side critical 984 * section. 985 */ 986 t = container_of(tb, struct task_struct, rcu_node_entry); 987 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 988 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 989 /* Lock only for side effect: boosts task t's priority. */ 990 rt_mutex_lock(&rnp->boost_mtx); 991 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 992 993 return READ_ONCE(rnp->exp_tasks) != NULL || 994 READ_ONCE(rnp->boost_tasks) != NULL; 995 } 996 997 /* 998 * Priority-boosting kthread, one per leaf rcu_node. 999 */ 1000 static int rcu_boost_kthread(void *arg) 1001 { 1002 struct rcu_node *rnp = (struct rcu_node *)arg; 1003 int spincnt = 0; 1004 int more2boost; 1005 1006 trace_rcu_utilization(TPS("Start boost kthread@init")); 1007 for (;;) { 1008 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1009 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1010 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1011 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1012 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1013 more2boost = rcu_boost(rnp); 1014 if (more2boost) 1015 spincnt++; 1016 else 1017 spincnt = 0; 1018 if (spincnt > 10) { 1019 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1020 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1021 schedule_timeout_interruptible(2); 1022 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1023 spincnt = 0; 1024 } 1025 } 1026 /* NOTREACHED */ 1027 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1028 return 0; 1029 } 1030 1031 /* 1032 * Check to see if it is time to start boosting RCU readers that are 1033 * blocking the current grace period, and, if so, tell the per-rcu_node 1034 * kthread to start boosting them. If there is an expedited grace 1035 * period in progress, it is always time to boost. 1036 * 1037 * The caller must hold rnp->lock, which this function releases. 1038 * The ->boost_kthread_task is immortal, so we don't need to worry 1039 * about it going away. 1040 */ 1041 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1042 __releases(rnp->lock) 1043 { 1044 struct task_struct *t; 1045 1046 lockdep_assert_held(&rnp->lock); 1047 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1048 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1049 return; 1050 } 1051 if (rnp->exp_tasks != NULL || 1052 (rnp->gp_tasks != NULL && 1053 rnp->boost_tasks == NULL && 1054 rnp->qsmask == 0 && 1055 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1056 if (rnp->exp_tasks == NULL) 1057 rnp->boost_tasks = rnp->gp_tasks; 1058 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1059 t = rnp->boost_kthread_task; 1060 if (t) 1061 rcu_wake_cond(t, rnp->boost_kthread_status); 1062 } else { 1063 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1064 } 1065 } 1066 1067 /* 1068 * Wake up the per-CPU kthread to invoke RCU callbacks. 1069 */ 1070 static void invoke_rcu_callbacks_kthread(void) 1071 { 1072 unsigned long flags; 1073 1074 local_irq_save(flags); 1075 __this_cpu_write(rcu_cpu_has_work, 1); 1076 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1077 current != __this_cpu_read(rcu_cpu_kthread_task)) { 1078 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), 1079 __this_cpu_read(rcu_cpu_kthread_status)); 1080 } 1081 local_irq_restore(flags); 1082 } 1083 1084 /* 1085 * Is the current CPU running the RCU-callbacks kthread? 1086 * Caller must have preemption disabled. 1087 */ 1088 static bool rcu_is_callbacks_kthread(void) 1089 { 1090 return __this_cpu_read(rcu_cpu_kthread_task) == current; 1091 } 1092 1093 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1094 1095 /* 1096 * Do priority-boost accounting for the start of a new grace period. 1097 */ 1098 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1099 { 1100 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1101 } 1102 1103 /* 1104 * Create an RCU-boost kthread for the specified node if one does not 1105 * already exist. We only create this kthread for preemptible RCU. 1106 * Returns zero if all is well, a negated errno otherwise. 1107 */ 1108 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1109 struct rcu_node *rnp) 1110 { 1111 int rnp_index = rnp - &rsp->node[0]; 1112 unsigned long flags; 1113 struct sched_param sp; 1114 struct task_struct *t; 1115 1116 if (rcu_state_p != rsp) 1117 return 0; 1118 1119 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) 1120 return 0; 1121 1122 rsp->boost = 1; 1123 if (rnp->boost_kthread_task != NULL) 1124 return 0; 1125 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1126 "rcub/%d", rnp_index); 1127 if (IS_ERR(t)) 1128 return PTR_ERR(t); 1129 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1130 rnp->boost_kthread_task = t; 1131 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1132 sp.sched_priority = kthread_prio; 1133 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1134 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1135 return 0; 1136 } 1137 1138 static void rcu_kthread_do_work(void) 1139 { 1140 rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); 1141 rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); 1142 rcu_preempt_do_callbacks(); 1143 } 1144 1145 static void rcu_cpu_kthread_setup(unsigned int cpu) 1146 { 1147 struct sched_param sp; 1148 1149 sp.sched_priority = kthread_prio; 1150 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1151 } 1152 1153 static void rcu_cpu_kthread_park(unsigned int cpu) 1154 { 1155 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 1156 } 1157 1158 static int rcu_cpu_kthread_should_run(unsigned int cpu) 1159 { 1160 return __this_cpu_read(rcu_cpu_has_work); 1161 } 1162 1163 /* 1164 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the 1165 * RCU softirq used in flavors and configurations of RCU that do not 1166 * support RCU priority boosting. 1167 */ 1168 static void rcu_cpu_kthread(unsigned int cpu) 1169 { 1170 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); 1171 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); 1172 int spincnt; 1173 1174 for (spincnt = 0; spincnt < 10; spincnt++) { 1175 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); 1176 local_bh_disable(); 1177 *statusp = RCU_KTHREAD_RUNNING; 1178 this_cpu_inc(rcu_cpu_kthread_loops); 1179 local_irq_disable(); 1180 work = *workp; 1181 *workp = 0; 1182 local_irq_enable(); 1183 if (work) 1184 rcu_kthread_do_work(); 1185 local_bh_enable(); 1186 if (*workp == 0) { 1187 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 1188 *statusp = RCU_KTHREAD_WAITING; 1189 return; 1190 } 1191 } 1192 *statusp = RCU_KTHREAD_YIELDING; 1193 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 1194 schedule_timeout_interruptible(2); 1195 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 1196 *statusp = RCU_KTHREAD_WAITING; 1197 } 1198 1199 /* 1200 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1201 * served by the rcu_node in question. The CPU hotplug lock is still 1202 * held, so the value of rnp->qsmaskinit will be stable. 1203 * 1204 * We don't include outgoingcpu in the affinity set, use -1 if there is 1205 * no outgoing CPU. If there are no CPUs left in the affinity set, 1206 * this function allows the kthread to execute on any CPU. 1207 */ 1208 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1209 { 1210 struct task_struct *t = rnp->boost_kthread_task; 1211 unsigned long mask = rcu_rnp_online_cpus(rnp); 1212 cpumask_var_t cm; 1213 int cpu; 1214 1215 if (!t) 1216 return; 1217 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 1218 return; 1219 for_each_leaf_node_possible_cpu(rnp, cpu) 1220 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && 1221 cpu != outgoingcpu) 1222 cpumask_set_cpu(cpu, cm); 1223 if (cpumask_weight(cm) == 0) 1224 cpumask_setall(cm); 1225 set_cpus_allowed_ptr(t, cm); 1226 free_cpumask_var(cm); 1227 } 1228 1229 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 1230 .store = &rcu_cpu_kthread_task, 1231 .thread_should_run = rcu_cpu_kthread_should_run, 1232 .thread_fn = rcu_cpu_kthread, 1233 .thread_comm = "rcuc/%u", 1234 .setup = rcu_cpu_kthread_setup, 1235 .park = rcu_cpu_kthread_park, 1236 }; 1237 1238 /* 1239 * Spawn boost kthreads -- called as soon as the scheduler is running. 1240 */ 1241 static void __init rcu_spawn_boost_kthreads(void) 1242 { 1243 struct rcu_node *rnp; 1244 int cpu; 1245 1246 for_each_possible_cpu(cpu) 1247 per_cpu(rcu_cpu_has_work, cpu) = 0; 1248 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1249 rcu_for_each_leaf_node(rcu_state_p, rnp) 1250 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1251 } 1252 1253 static void rcu_prepare_kthreads(int cpu) 1254 { 1255 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 1256 struct rcu_node *rnp = rdp->mynode; 1257 1258 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1259 if (rcu_scheduler_fully_active) 1260 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1261 } 1262 1263 #else /* #ifdef CONFIG_RCU_BOOST */ 1264 1265 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1266 __releases(rnp->lock) 1267 { 1268 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1269 } 1270 1271 static void invoke_rcu_callbacks_kthread(void) 1272 { 1273 WARN_ON_ONCE(1); 1274 } 1275 1276 static bool rcu_is_callbacks_kthread(void) 1277 { 1278 return false; 1279 } 1280 1281 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1282 { 1283 } 1284 1285 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1286 { 1287 } 1288 1289 static void __init rcu_spawn_boost_kthreads(void) 1290 { 1291 } 1292 1293 static void rcu_prepare_kthreads(int cpu) 1294 { 1295 } 1296 1297 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 1298 1299 #if !defined(CONFIG_RCU_FAST_NO_HZ) 1300 1301 /* 1302 * Check to see if any future RCU-related work will need to be done 1303 * by the current CPU, even if none need be done immediately, returning 1304 * 1 if so. This function is part of the RCU implementation; it is -not- 1305 * an exported member of the RCU API. 1306 * 1307 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs 1308 * any flavor of RCU. 1309 */ 1310 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1311 { 1312 *nextevt = KTIME_MAX; 1313 return rcu_cpu_has_callbacks(NULL); 1314 } 1315 1316 /* 1317 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1318 * after it. 1319 */ 1320 static void rcu_cleanup_after_idle(void) 1321 { 1322 } 1323 1324 /* 1325 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1326 * is nothing. 1327 */ 1328 static void rcu_prepare_for_idle(void) 1329 { 1330 } 1331 1332 /* 1333 * Don't bother keeping a running count of the number of RCU callbacks 1334 * posted because CONFIG_RCU_FAST_NO_HZ=n. 1335 */ 1336 static void rcu_idle_count_callbacks_posted(void) 1337 { 1338 } 1339 1340 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1341 1342 /* 1343 * This code is invoked when a CPU goes idle, at which point we want 1344 * to have the CPU do everything required for RCU so that it can enter 1345 * the energy-efficient dyntick-idle mode. This is handled by a 1346 * state machine implemented by rcu_prepare_for_idle() below. 1347 * 1348 * The following three proprocessor symbols control this state machine: 1349 * 1350 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted 1351 * to sleep in dyntick-idle mode with RCU callbacks pending. This 1352 * is sized to be roughly one RCU grace period. Those energy-efficiency 1353 * benchmarkers who might otherwise be tempted to set this to a large 1354 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1355 * system. And if you are -that- concerned about energy efficiency, 1356 * just power the system down and be done with it! 1357 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is 1358 * permitted to sleep in dyntick-idle mode with only lazy RCU 1359 * callbacks pending. Setting this too high can OOM your system. 1360 * 1361 * The values below work well in practice. If future workloads require 1362 * adjustment, they can be converted into kernel config parameters, though 1363 * making the state machine smarter might be a better option. 1364 */ 1365 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ 1366 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1367 1368 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; 1369 module_param(rcu_idle_gp_delay, int, 0644); 1370 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1371 module_param(rcu_idle_lazy_gp_delay, int, 0644); 1372 1373 /* 1374 * Try to advance callbacks for all flavors of RCU on the current CPU, but 1375 * only if it has been awhile since the last time we did so. Afterwards, 1376 * if there are any callbacks ready for immediate invocation, return true. 1377 */ 1378 static bool __maybe_unused rcu_try_advance_all_cbs(void) 1379 { 1380 bool cbs_ready = false; 1381 struct rcu_data *rdp; 1382 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1383 struct rcu_node *rnp; 1384 struct rcu_state *rsp; 1385 1386 /* Exit early if we advanced recently. */ 1387 if (jiffies == rdtp->last_advance_all) 1388 return false; 1389 rdtp->last_advance_all = jiffies; 1390 1391 for_each_rcu_flavor(rsp) { 1392 rdp = this_cpu_ptr(rsp->rda); 1393 rnp = rdp->mynode; 1394 1395 /* 1396 * Don't bother checking unless a grace period has 1397 * completed since we last checked and there are 1398 * callbacks not yet ready to invoke. 1399 */ 1400 if ((rdp->completed != rnp->completed || 1401 unlikely(READ_ONCE(rdp->gpwrap))) && 1402 rcu_segcblist_pend_cbs(&rdp->cblist)) 1403 note_gp_changes(rsp, rdp); 1404 1405 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 1406 cbs_ready = true; 1407 } 1408 return cbs_ready; 1409 } 1410 1411 /* 1412 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready 1413 * to invoke. If the CPU has callbacks, try to advance them. Tell the 1414 * caller to set the timeout based on whether or not there are non-lazy 1415 * callbacks. 1416 * 1417 * The caller must have disabled interrupts. 1418 */ 1419 int rcu_needs_cpu(u64 basemono, u64 *nextevt) 1420 { 1421 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1422 unsigned long dj; 1423 1424 lockdep_assert_irqs_disabled(); 1425 1426 /* Snapshot to detect later posting of non-lazy callback. */ 1427 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1428 1429 /* If no callbacks, RCU doesn't need the CPU. */ 1430 if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) { 1431 *nextevt = KTIME_MAX; 1432 return 0; 1433 } 1434 1435 /* Attempt to advance callbacks. */ 1436 if (rcu_try_advance_all_cbs()) { 1437 /* Some ready to invoke, so initiate later invocation. */ 1438 invoke_rcu_core(); 1439 return 1; 1440 } 1441 rdtp->last_accelerate = jiffies; 1442 1443 /* Request timer delay depending on laziness, and round. */ 1444 if (!rdtp->all_lazy) { 1445 dj = round_up(rcu_idle_gp_delay + jiffies, 1446 rcu_idle_gp_delay) - jiffies; 1447 } else { 1448 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; 1449 } 1450 *nextevt = basemono + dj * TICK_NSEC; 1451 return 0; 1452 } 1453 1454 /* 1455 * Prepare a CPU for idle from an RCU perspective. The first major task 1456 * is to sense whether nohz mode has been enabled or disabled via sysfs. 1457 * The second major task is to check to see if a non-lazy callback has 1458 * arrived at a CPU that previously had only lazy callbacks. The third 1459 * major task is to accelerate (that is, assign grace-period numbers to) 1460 * any recently arrived callbacks. 1461 * 1462 * The caller must have disabled interrupts. 1463 */ 1464 static void rcu_prepare_for_idle(void) 1465 { 1466 bool needwake; 1467 struct rcu_data *rdp; 1468 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1469 struct rcu_node *rnp; 1470 struct rcu_state *rsp; 1471 int tne; 1472 1473 lockdep_assert_irqs_disabled(); 1474 if (rcu_is_nocb_cpu(smp_processor_id())) 1475 return; 1476 1477 /* Handle nohz enablement switches conservatively. */ 1478 tne = READ_ONCE(tick_nohz_active); 1479 if (tne != rdtp->tick_nohz_enabled_snap) { 1480 if (rcu_cpu_has_callbacks(NULL)) 1481 invoke_rcu_core(); /* force nohz to see update. */ 1482 rdtp->tick_nohz_enabled_snap = tne; 1483 return; 1484 } 1485 if (!tne) 1486 return; 1487 1488 /* 1489 * If a non-lazy callback arrived at a CPU having only lazy 1490 * callbacks, invoke RCU core for the side-effect of recalculating 1491 * idle duration on re-entry to idle. 1492 */ 1493 if (rdtp->all_lazy && 1494 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { 1495 rdtp->all_lazy = false; 1496 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1497 invoke_rcu_core(); 1498 return; 1499 } 1500 1501 /* 1502 * If we have not yet accelerated this jiffy, accelerate all 1503 * callbacks on this CPU. 1504 */ 1505 if (rdtp->last_accelerate == jiffies) 1506 return; 1507 rdtp->last_accelerate = jiffies; 1508 for_each_rcu_flavor(rsp) { 1509 rdp = this_cpu_ptr(rsp->rda); 1510 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1511 continue; 1512 rnp = rdp->mynode; 1513 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1514 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 1515 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1516 if (needwake) 1517 rcu_gp_kthread_wake(rsp); 1518 } 1519 } 1520 1521 /* 1522 * Clean up for exit from idle. Attempt to advance callbacks based on 1523 * any grace periods that elapsed while the CPU was idle, and if any 1524 * callbacks are now ready to invoke, initiate invocation. 1525 */ 1526 static void rcu_cleanup_after_idle(void) 1527 { 1528 lockdep_assert_irqs_disabled(); 1529 if (rcu_is_nocb_cpu(smp_processor_id())) 1530 return; 1531 if (rcu_try_advance_all_cbs()) 1532 invoke_rcu_core(); 1533 } 1534 1535 /* 1536 * Keep a running count of the number of non-lazy callbacks posted 1537 * on this CPU. This running counter (which is never decremented) allows 1538 * rcu_prepare_for_idle() to detect when something out of the idle loop 1539 * posts a callback, even if an equal number of callbacks are invoked. 1540 * Of course, callbacks should only be posted from within a trace event 1541 * designed to be called from idle or from within RCU_NONIDLE(). 1542 */ 1543 static void rcu_idle_count_callbacks_posted(void) 1544 { 1545 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); 1546 } 1547 1548 /* 1549 * Data for flushing lazy RCU callbacks at OOM time. 1550 */ 1551 static atomic_t oom_callback_count; 1552 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); 1553 1554 /* 1555 * RCU OOM callback -- decrement the outstanding count and deliver the 1556 * wake-up if we are the last one. 1557 */ 1558 static void rcu_oom_callback(struct rcu_head *rhp) 1559 { 1560 if (atomic_dec_and_test(&oom_callback_count)) 1561 wake_up(&oom_callback_wq); 1562 } 1563 1564 /* 1565 * Post an rcu_oom_notify callback on the current CPU if it has at 1566 * least one lazy callback. This will unnecessarily post callbacks 1567 * to CPUs that already have a non-lazy callback at the end of their 1568 * callback list, but this is an infrequent operation, so accept some 1569 * extra overhead to keep things simple. 1570 */ 1571 static void rcu_oom_notify_cpu(void *unused) 1572 { 1573 struct rcu_state *rsp; 1574 struct rcu_data *rdp; 1575 1576 for_each_rcu_flavor(rsp) { 1577 rdp = raw_cpu_ptr(rsp->rda); 1578 if (rcu_segcblist_n_lazy_cbs(&rdp->cblist)) { 1579 atomic_inc(&oom_callback_count); 1580 rsp->call(&rdp->oom_head, rcu_oom_callback); 1581 } 1582 } 1583 } 1584 1585 /* 1586 * If low on memory, ensure that each CPU has a non-lazy callback. 1587 * This will wake up CPUs that have only lazy callbacks, in turn 1588 * ensuring that they free up the corresponding memory in a timely manner. 1589 * Because an uncertain amount of memory will be freed in some uncertain 1590 * timeframe, we do not claim to have freed anything. 1591 */ 1592 static int rcu_oom_notify(struct notifier_block *self, 1593 unsigned long notused, void *nfreed) 1594 { 1595 int cpu; 1596 1597 /* Wait for callbacks from earlier instance to complete. */ 1598 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); 1599 smp_mb(); /* Ensure callback reuse happens after callback invocation. */ 1600 1601 /* 1602 * Prevent premature wakeup: ensure that all increments happen 1603 * before there is a chance of the counter reaching zero. 1604 */ 1605 atomic_set(&oom_callback_count, 1); 1606 1607 for_each_online_cpu(cpu) { 1608 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); 1609 cond_resched_rcu_qs(); 1610 } 1611 1612 /* Unconditionally decrement: no need to wake ourselves up. */ 1613 atomic_dec(&oom_callback_count); 1614 1615 return NOTIFY_OK; 1616 } 1617 1618 static struct notifier_block rcu_oom_nb = { 1619 .notifier_call = rcu_oom_notify 1620 }; 1621 1622 static int __init rcu_register_oom_notifier(void) 1623 { 1624 register_oom_notifier(&rcu_oom_nb); 1625 return 0; 1626 } 1627 early_initcall(rcu_register_oom_notifier); 1628 1629 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1630 1631 #ifdef CONFIG_RCU_FAST_NO_HZ 1632 1633 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1634 { 1635 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1636 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap; 1637 1638 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", 1639 rdtp->last_accelerate & 0xffff, jiffies & 0xffff, 1640 ulong2long(nlpd), 1641 rdtp->all_lazy ? 'L' : '.', 1642 rdtp->tick_nohz_enabled_snap ? '.' : 'D'); 1643 } 1644 1645 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 1646 1647 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1648 { 1649 *cp = '\0'; 1650 } 1651 1652 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ 1653 1654 /* Initiate the stall-info list. */ 1655 static void print_cpu_stall_info_begin(void) 1656 { 1657 pr_cont("\n"); 1658 } 1659 1660 /* 1661 * Print out diagnostic information for the specified stalled CPU. 1662 * 1663 * If the specified CPU is aware of the current RCU grace period 1664 * (flavor specified by rsp), then print the number of scheduling 1665 * clock interrupts the CPU has taken during the time that it has 1666 * been aware. Otherwise, print the number of RCU grace periods 1667 * that this CPU is ignorant of, for example, "1" if the CPU was 1668 * aware of the previous grace period. 1669 * 1670 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. 1671 */ 1672 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) 1673 { 1674 unsigned long delta; 1675 char fast_no_hz[72]; 1676 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1677 struct rcu_dynticks *rdtp = rdp->dynticks; 1678 char *ticks_title; 1679 unsigned long ticks_value; 1680 1681 if (rsp->gpnum == rdp->gpnum) { 1682 ticks_title = "ticks this GP"; 1683 ticks_value = rdp->ticks_this_gp; 1684 } else { 1685 ticks_title = "GPs behind"; 1686 ticks_value = rsp->gpnum - rdp->gpnum; 1687 } 1688 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1689 delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum; 1690 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n", 1691 cpu, 1692 "O."[!!cpu_online(cpu)], 1693 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], 1694 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], 1695 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : 1696 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : 1697 "!."[!delta], 1698 ticks_value, ticks_title, 1699 rcu_dynticks_snap(rdtp) & 0xfff, 1700 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1701 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1702 READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, 1703 fast_no_hz); 1704 } 1705 1706 /* Terminate the stall-info list. */ 1707 static void print_cpu_stall_info_end(void) 1708 { 1709 pr_err("\t"); 1710 } 1711 1712 /* Zero ->ticks_this_gp for all flavors of RCU. */ 1713 static void zero_cpu_stall_ticks(struct rcu_data *rdp) 1714 { 1715 rdp->ticks_this_gp = 0; 1716 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); 1717 } 1718 1719 /* Increment ->ticks_this_gp for all flavors of RCU. */ 1720 static void increment_cpu_stall_ticks(void) 1721 { 1722 struct rcu_state *rsp; 1723 1724 for_each_rcu_flavor(rsp) 1725 raw_cpu_inc(rsp->rda->ticks_this_gp); 1726 } 1727 1728 #ifdef CONFIG_RCU_NOCB_CPU 1729 1730 /* 1731 * Offload callback processing from the boot-time-specified set of CPUs 1732 * specified by rcu_nocb_mask. For each CPU in the set, there is a 1733 * kthread created that pulls the callbacks from the corresponding CPU, 1734 * waits for a grace period to elapse, and invokes the callbacks. 1735 * The no-CBs CPUs do a wake_up() on their kthread when they insert 1736 * a callback into any empty list, unless the rcu_nocb_poll boot parameter 1737 * has been specified, in which case each kthread actively polls its 1738 * CPU. (Which isn't so great for energy efficiency, but which does 1739 * reduce RCU's overhead on that CPU.) 1740 * 1741 * This is intended to be used in conjunction with Frederic Weisbecker's 1742 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 1743 * running CPU-bound user-mode computations. 1744 * 1745 * Offloading of callback processing could also in theory be used as 1746 * an energy-efficiency measure because CPUs with no RCU callbacks 1747 * queued are more aggressive about entering dyntick-idle mode. 1748 */ 1749 1750 1751 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */ 1752 static int __init rcu_nocb_setup(char *str) 1753 { 1754 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 1755 have_rcu_nocb_mask = true; 1756 cpulist_parse(str, rcu_nocb_mask); 1757 return 1; 1758 } 1759 __setup("rcu_nocbs=", rcu_nocb_setup); 1760 1761 static int __init parse_rcu_nocb_poll(char *arg) 1762 { 1763 rcu_nocb_poll = true; 1764 return 0; 1765 } 1766 early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 1767 1768 /* 1769 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 1770 * grace period. 1771 */ 1772 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 1773 { 1774 swake_up_all(sq); 1775 } 1776 1777 /* 1778 * Set the root rcu_node structure's ->need_future_gp field 1779 * based on the sum of those of all rcu_node structures. This does 1780 * double-count the root rcu_node structure's requests, but this 1781 * is necessary to handle the possibility of a rcu_nocb_kthread() 1782 * having awakened during the time that the rcu_node structures 1783 * were being updated for the end of the previous grace period. 1784 */ 1785 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 1786 { 1787 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; 1788 } 1789 1790 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 1791 { 1792 return &rnp->nocb_gp_wq[rnp->completed & 0x1]; 1793 } 1794 1795 static void rcu_init_one_nocb(struct rcu_node *rnp) 1796 { 1797 init_swait_queue_head(&rnp->nocb_gp_wq[0]); 1798 init_swait_queue_head(&rnp->nocb_gp_wq[1]); 1799 } 1800 1801 /* Is the specified CPU a no-CBs CPU? */ 1802 bool rcu_is_nocb_cpu(int cpu) 1803 { 1804 if (have_rcu_nocb_mask) 1805 return cpumask_test_cpu(cpu, rcu_nocb_mask); 1806 return false; 1807 } 1808 1809 /* 1810 * Kick the leader kthread for this NOCB group. Caller holds ->nocb_lock 1811 * and this function releases it. 1812 */ 1813 static void __wake_nocb_leader(struct rcu_data *rdp, bool force, 1814 unsigned long flags) 1815 __releases(rdp->nocb_lock) 1816 { 1817 struct rcu_data *rdp_leader = rdp->nocb_leader; 1818 1819 lockdep_assert_held(&rdp->nocb_lock); 1820 if (!READ_ONCE(rdp_leader->nocb_kthread)) { 1821 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1822 return; 1823 } 1824 if (rdp_leader->nocb_leader_sleep || force) { 1825 /* Prior smp_mb__after_atomic() orders against prior enqueue. */ 1826 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); 1827 del_timer(&rdp->nocb_timer); 1828 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1829 smp_mb(); /* ->nocb_leader_sleep before swake_up(). */ 1830 swake_up(&rdp_leader->nocb_wq); 1831 } else { 1832 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1833 } 1834 } 1835 1836 /* 1837 * Kick the leader kthread for this NOCB group, but caller has not 1838 * acquired locks. 1839 */ 1840 static void wake_nocb_leader(struct rcu_data *rdp, bool force) 1841 { 1842 unsigned long flags; 1843 1844 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1845 __wake_nocb_leader(rdp, force, flags); 1846 } 1847 1848 /* 1849 * Arrange to wake the leader kthread for this NOCB group at some 1850 * future time when it is safe to do so. 1851 */ 1852 static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, 1853 const char *reason) 1854 { 1855 unsigned long flags; 1856 1857 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 1858 if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) 1859 mod_timer(&rdp->nocb_timer, jiffies + 1); 1860 WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); 1861 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason); 1862 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1863 } 1864 1865 /* 1866 * Does the specified CPU need an RCU callback for the specified flavor 1867 * of rcu_barrier()? 1868 */ 1869 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) 1870 { 1871 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1872 unsigned long ret; 1873 #ifdef CONFIG_PROVE_RCU 1874 struct rcu_head *rhp; 1875 #endif /* #ifdef CONFIG_PROVE_RCU */ 1876 1877 /* 1878 * Check count of all no-CBs callbacks awaiting invocation. 1879 * There needs to be a barrier before this function is called, 1880 * but associated with a prior determination that no more 1881 * callbacks would be posted. In the worst case, the first 1882 * barrier in _rcu_barrier() suffices (but the caller cannot 1883 * necessarily rely on this, not a substitute for the caller 1884 * getting the concurrency design right!). There must also be 1885 * a barrier between the following load an posting of a callback 1886 * (if a callback is in fact needed). This is associated with an 1887 * atomic_inc() in the caller. 1888 */ 1889 ret = atomic_long_read(&rdp->nocb_q_count); 1890 1891 #ifdef CONFIG_PROVE_RCU 1892 rhp = READ_ONCE(rdp->nocb_head); 1893 if (!rhp) 1894 rhp = READ_ONCE(rdp->nocb_gp_head); 1895 if (!rhp) 1896 rhp = READ_ONCE(rdp->nocb_follower_head); 1897 1898 /* Having no rcuo kthread but CBs after scheduler starts is bad! */ 1899 if (!READ_ONCE(rdp->nocb_kthread) && rhp && 1900 rcu_scheduler_fully_active) { 1901 /* RCU callback enqueued before CPU first came online??? */ 1902 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", 1903 cpu, rhp->func); 1904 WARN_ON_ONCE(1); 1905 } 1906 #endif /* #ifdef CONFIG_PROVE_RCU */ 1907 1908 return !!ret; 1909 } 1910 1911 /* 1912 * Enqueue the specified string of rcu_head structures onto the specified 1913 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 1914 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 1915 * counts are supplied by rhcount and rhcount_lazy. 1916 * 1917 * If warranted, also wake up the kthread servicing this CPUs queues. 1918 */ 1919 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, 1920 struct rcu_head *rhp, 1921 struct rcu_head **rhtp, 1922 int rhcount, int rhcount_lazy, 1923 unsigned long flags) 1924 { 1925 int len; 1926 struct rcu_head **old_rhpp; 1927 struct task_struct *t; 1928 1929 /* Enqueue the callback on the nocb list and update counts. */ 1930 atomic_long_add(rhcount, &rdp->nocb_q_count); 1931 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ 1932 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 1933 WRITE_ONCE(*old_rhpp, rhp); 1934 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 1935 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ 1936 1937 /* If we are not being polled and there is a kthread, awaken it ... */ 1938 t = READ_ONCE(rdp->nocb_kthread); 1939 if (rcu_nocb_poll || !t) { 1940 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 1941 TPS("WakeNotPoll")); 1942 return; 1943 } 1944 len = atomic_long_read(&rdp->nocb_q_count); 1945 if (old_rhpp == &rdp->nocb_head) { 1946 if (!irqs_disabled_flags(flags)) { 1947 /* ... if queue was empty ... */ 1948 wake_nocb_leader(rdp, false); 1949 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 1950 TPS("WakeEmpty")); 1951 } else { 1952 wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, 1953 TPS("WakeEmptyIsDeferred")); 1954 } 1955 rdp->qlen_last_fqs_check = 0; 1956 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 1957 /* ... or if many callbacks queued. */ 1958 if (!irqs_disabled_flags(flags)) { 1959 wake_nocb_leader(rdp, true); 1960 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 1961 TPS("WakeOvf")); 1962 } else { 1963 wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, 1964 TPS("WakeOvfIsDeferred")); 1965 } 1966 rdp->qlen_last_fqs_check = LONG_MAX / 2; 1967 } else { 1968 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot")); 1969 } 1970 return; 1971 } 1972 1973 /* 1974 * This is a helper for __call_rcu(), which invokes this when the normal 1975 * callback queue is inoperable. If this is not a no-CBs CPU, this 1976 * function returns failure back to __call_rcu(), which can complain 1977 * appropriately. 1978 * 1979 * Otherwise, this function queues the callback where the corresponding 1980 * "rcuo" kthread can find it. 1981 */ 1982 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 1983 bool lazy, unsigned long flags) 1984 { 1985 1986 if (!rcu_is_nocb_cpu(rdp->cpu)) 1987 return false; 1988 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); 1989 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 1990 trace_rcu_kfree_callback(rdp->rsp->name, rhp, 1991 (unsigned long)rhp->func, 1992 -atomic_long_read(&rdp->nocb_q_count_lazy), 1993 -atomic_long_read(&rdp->nocb_q_count)); 1994 else 1995 trace_rcu_callback(rdp->rsp->name, rhp, 1996 -atomic_long_read(&rdp->nocb_q_count_lazy), 1997 -atomic_long_read(&rdp->nocb_q_count)); 1998 1999 /* 2000 * If called from an extended quiescent state with interrupts 2001 * disabled, invoke the RCU core in order to allow the idle-entry 2002 * deferred-wakeup check to function. 2003 */ 2004 if (irqs_disabled_flags(flags) && 2005 !rcu_is_watching() && 2006 cpu_online(smp_processor_id())) 2007 invoke_rcu_core(); 2008 2009 return true; 2010 } 2011 2012 /* 2013 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is 2014 * not a no-CBs CPU. 2015 */ 2016 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, 2017 struct rcu_data *rdp, 2018 unsigned long flags) 2019 { 2020 lockdep_assert_irqs_disabled(); 2021 if (!rcu_is_nocb_cpu(smp_processor_id())) 2022 return false; /* Not NOCBs CPU, caller must migrate CBs. */ 2023 __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), 2024 rcu_segcblist_tail(&rdp->cblist), 2025 rcu_segcblist_n_cbs(&rdp->cblist), 2026 rcu_segcblist_n_lazy_cbs(&rdp->cblist), flags); 2027 rcu_segcblist_init(&rdp->cblist); 2028 rcu_segcblist_disable(&rdp->cblist); 2029 return true; 2030 } 2031 2032 /* 2033 * If necessary, kick off a new grace period, and either way wait 2034 * for a subsequent grace period to complete. 2035 */ 2036 static void rcu_nocb_wait_gp(struct rcu_data *rdp) 2037 { 2038 unsigned long c; 2039 bool d; 2040 unsigned long flags; 2041 bool needwake; 2042 struct rcu_node *rnp = rdp->mynode; 2043 2044 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2045 needwake = rcu_start_future_gp(rnp, rdp, &c); 2046 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2047 if (needwake) 2048 rcu_gp_kthread_wake(rdp->rsp); 2049 2050 /* 2051 * Wait for the grace period. Do so interruptibly to avoid messing 2052 * up the load average. 2053 */ 2054 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); 2055 for (;;) { 2056 swait_event_interruptible( 2057 rnp->nocb_gp_wq[c & 0x1], 2058 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); 2059 if (likely(d)) 2060 break; 2061 WARN_ON(signal_pending(current)); 2062 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); 2063 } 2064 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); 2065 smp_mb(); /* Ensure that CB invocation happens after GP end. */ 2066 } 2067 2068 /* 2069 * Leaders come here to wait for additional callbacks to show up. 2070 * This function does not return until callbacks appear. 2071 */ 2072 static void nocb_leader_wait(struct rcu_data *my_rdp) 2073 { 2074 bool firsttime = true; 2075 unsigned long flags; 2076 bool gotcbs; 2077 struct rcu_data *rdp; 2078 struct rcu_head **tail; 2079 2080 wait_again: 2081 2082 /* Wait for callbacks to appear. */ 2083 if (!rcu_nocb_poll) { 2084 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep")); 2085 swait_event_interruptible(my_rdp->nocb_wq, 2086 !READ_ONCE(my_rdp->nocb_leader_sleep)); 2087 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 2088 my_rdp->nocb_leader_sleep = true; 2089 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 2090 del_timer(&my_rdp->nocb_timer); 2091 raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); 2092 } else if (firsttime) { 2093 firsttime = false; /* Don't drown trace log with "Poll"! */ 2094 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll")); 2095 } 2096 2097 /* 2098 * Each pass through the following loop checks a follower for CBs. 2099 * We are our own first follower. Any CBs found are moved to 2100 * nocb_gp_head, where they await a grace period. 2101 */ 2102 gotcbs = false; 2103 smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */ 2104 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2105 rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); 2106 if (!rdp->nocb_gp_head) 2107 continue; /* No CBs here, try next follower. */ 2108 2109 /* Move callbacks to wait-for-GP list, which is empty. */ 2110 WRITE_ONCE(rdp->nocb_head, NULL); 2111 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2112 gotcbs = true; 2113 } 2114 2115 /* No callbacks? Sleep a bit if polling, and go retry. */ 2116 if (unlikely(!gotcbs)) { 2117 WARN_ON(signal_pending(current)); 2118 if (rcu_nocb_poll) { 2119 schedule_timeout_interruptible(1); 2120 } else { 2121 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, 2122 TPS("WokeEmpty")); 2123 } 2124 goto wait_again; 2125 } 2126 2127 /* Wait for one grace period. */ 2128 rcu_nocb_wait_gp(my_rdp); 2129 2130 /* Each pass through the following loop wakes a follower, if needed. */ 2131 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2132 if (!rcu_nocb_poll && 2133 READ_ONCE(rdp->nocb_head) && 2134 READ_ONCE(my_rdp->nocb_leader_sleep)) { 2135 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 2136 my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ 2137 raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); 2138 } 2139 if (!rdp->nocb_gp_head) 2140 continue; /* No CBs, so no need to wake follower. */ 2141 2142 /* Append callbacks to follower's "done" list. */ 2143 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 2144 tail = rdp->nocb_follower_tail; 2145 rdp->nocb_follower_tail = rdp->nocb_gp_tail; 2146 *tail = rdp->nocb_gp_head; 2147 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2148 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2149 /* List was empty, so wake up the follower. */ 2150 swake_up(&rdp->nocb_wq); 2151 } 2152 } 2153 2154 /* If we (the leader) don't have CBs, go wait some more. */ 2155 if (!my_rdp->nocb_follower_head) 2156 goto wait_again; 2157 } 2158 2159 /* 2160 * Followers come here to wait for additional callbacks to show up. 2161 * This function does not return until callbacks appear. 2162 */ 2163 static void nocb_follower_wait(struct rcu_data *rdp) 2164 { 2165 for (;;) { 2166 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep")); 2167 swait_event_interruptible(rdp->nocb_wq, 2168 READ_ONCE(rdp->nocb_follower_head)); 2169 if (smp_load_acquire(&rdp->nocb_follower_head)) { 2170 /* ^^^ Ensure CB invocation follows _head test. */ 2171 return; 2172 } 2173 WARN_ON(signal_pending(current)); 2174 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty")); 2175 } 2176 } 2177 2178 /* 2179 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes 2180 * callbacks queued by the corresponding no-CBs CPU, however, there is 2181 * an optional leader-follower relationship so that the grace-period 2182 * kthreads don't have to do quite so many wakeups. 2183 */ 2184 static int rcu_nocb_kthread(void *arg) 2185 { 2186 int c, cl; 2187 unsigned long flags; 2188 struct rcu_head *list; 2189 struct rcu_head *next; 2190 struct rcu_head **tail; 2191 struct rcu_data *rdp = arg; 2192 2193 /* Each pass through this loop invokes one batch of callbacks */ 2194 for (;;) { 2195 /* Wait for callbacks. */ 2196 if (rdp->nocb_leader == rdp) 2197 nocb_leader_wait(rdp); 2198 else 2199 nocb_follower_wait(rdp); 2200 2201 /* Pull the ready-to-invoke callbacks onto local list. */ 2202 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 2203 list = rdp->nocb_follower_head; 2204 rdp->nocb_follower_head = NULL; 2205 tail = rdp->nocb_follower_tail; 2206 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2207 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2208 BUG_ON(!list); 2209 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty")); 2210 2211 /* Each pass through the following loop invokes a callback. */ 2212 trace_rcu_batch_start(rdp->rsp->name, 2213 atomic_long_read(&rdp->nocb_q_count_lazy), 2214 atomic_long_read(&rdp->nocb_q_count), -1); 2215 c = cl = 0; 2216 while (list) { 2217 next = list->next; 2218 /* Wait for enqueuing to complete, if needed. */ 2219 while (next == NULL && &list->next != tail) { 2220 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2221 TPS("WaitQueue")); 2222 schedule_timeout_interruptible(1); 2223 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2224 TPS("WokeQueue")); 2225 next = list->next; 2226 } 2227 debug_rcu_head_unqueue(list); 2228 local_bh_disable(); 2229 if (__rcu_reclaim(rdp->rsp->name, list)) 2230 cl++; 2231 c++; 2232 local_bh_enable(); 2233 cond_resched_rcu_qs(); 2234 list = next; 2235 } 2236 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2237 smp_mb__before_atomic(); /* _add after CB invocation. */ 2238 atomic_long_add(-c, &rdp->nocb_q_count); 2239 atomic_long_add(-cl, &rdp->nocb_q_count_lazy); 2240 rdp->n_nocbs_invoked += c; 2241 } 2242 return 0; 2243 } 2244 2245 /* Is a deferred wakeup of rcu_nocb_kthread() required? */ 2246 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2247 { 2248 return READ_ONCE(rdp->nocb_defer_wakeup); 2249 } 2250 2251 /* Do a deferred wakeup of rcu_nocb_kthread(). */ 2252 static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) 2253 { 2254 unsigned long flags; 2255 int ndw; 2256 2257 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); 2258 if (!rcu_nocb_need_deferred_wakeup(rdp)) { 2259 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2260 return; 2261 } 2262 ndw = READ_ONCE(rdp->nocb_defer_wakeup); 2263 WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 2264 __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); 2265 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); 2266 } 2267 2268 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ 2269 static void do_nocb_deferred_wakeup_timer(struct timer_list *t) 2270 { 2271 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); 2272 2273 do_nocb_deferred_wakeup_common(rdp); 2274 } 2275 2276 /* 2277 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath. 2278 * This means we do an inexact common-case check. Note that if 2279 * we miss, ->nocb_timer will eventually clean things up. 2280 */ 2281 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2282 { 2283 if (rcu_nocb_need_deferred_wakeup(rdp)) 2284 do_nocb_deferred_wakeup_common(rdp); 2285 } 2286 2287 void __init rcu_init_nohz(void) 2288 { 2289 int cpu; 2290 bool need_rcu_nocb_mask = true; 2291 struct rcu_state *rsp; 2292 2293 #if defined(CONFIG_NO_HZ_FULL) 2294 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) 2295 need_rcu_nocb_mask = true; 2296 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2297 2298 if (!have_rcu_nocb_mask && need_rcu_nocb_mask) { 2299 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { 2300 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); 2301 return; 2302 } 2303 have_rcu_nocb_mask = true; 2304 } 2305 if (!have_rcu_nocb_mask) 2306 return; 2307 2308 #if defined(CONFIG_NO_HZ_FULL) 2309 if (tick_nohz_full_running) 2310 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); 2311 #endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2312 2313 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { 2314 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n"); 2315 cpumask_and(rcu_nocb_mask, cpu_possible_mask, 2316 rcu_nocb_mask); 2317 } 2318 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", 2319 cpumask_pr_args(rcu_nocb_mask)); 2320 if (rcu_nocb_poll) 2321 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); 2322 2323 for_each_rcu_flavor(rsp) { 2324 for_each_cpu(cpu, rcu_nocb_mask) 2325 init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu)); 2326 rcu_organize_nocb_kthreads(rsp); 2327 } 2328 } 2329 2330 /* Initialize per-rcu_data variables for no-CBs CPUs. */ 2331 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2332 { 2333 rdp->nocb_tail = &rdp->nocb_head; 2334 init_swait_queue_head(&rdp->nocb_wq); 2335 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2336 raw_spin_lock_init(&rdp->nocb_lock); 2337 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); 2338 } 2339 2340 /* 2341 * If the specified CPU is a no-CBs CPU that does not already have its 2342 * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are 2343 * brought online out of order, this can require re-organizing the 2344 * leader-follower relationships. 2345 */ 2346 static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) 2347 { 2348 struct rcu_data *rdp; 2349 struct rcu_data *rdp_last; 2350 struct rcu_data *rdp_old_leader; 2351 struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu); 2352 struct task_struct *t; 2353 2354 /* 2355 * If this isn't a no-CBs CPU or if it already has an rcuo kthread, 2356 * then nothing to do. 2357 */ 2358 if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread) 2359 return; 2360 2361 /* If we didn't spawn the leader first, reorganize! */ 2362 rdp_old_leader = rdp_spawn->nocb_leader; 2363 if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) { 2364 rdp_last = NULL; 2365 rdp = rdp_old_leader; 2366 do { 2367 rdp->nocb_leader = rdp_spawn; 2368 if (rdp_last && rdp != rdp_spawn) 2369 rdp_last->nocb_next_follower = rdp; 2370 if (rdp == rdp_spawn) { 2371 rdp = rdp->nocb_next_follower; 2372 } else { 2373 rdp_last = rdp; 2374 rdp = rdp->nocb_next_follower; 2375 rdp_last->nocb_next_follower = NULL; 2376 } 2377 } while (rdp); 2378 rdp_spawn->nocb_next_follower = rdp_old_leader; 2379 } 2380 2381 /* Spawn the kthread for this CPU and RCU flavor. */ 2382 t = kthread_run(rcu_nocb_kthread, rdp_spawn, 2383 "rcuo%c/%d", rsp->abbr, cpu); 2384 BUG_ON(IS_ERR(t)); 2385 WRITE_ONCE(rdp_spawn->nocb_kthread, t); 2386 } 2387 2388 /* 2389 * If the specified CPU is a no-CBs CPU that does not already have its 2390 * rcuo kthreads, spawn them. 2391 */ 2392 static void rcu_spawn_all_nocb_kthreads(int cpu) 2393 { 2394 struct rcu_state *rsp; 2395 2396 if (rcu_scheduler_fully_active) 2397 for_each_rcu_flavor(rsp) 2398 rcu_spawn_one_nocb_kthread(rsp, cpu); 2399 } 2400 2401 /* 2402 * Once the scheduler is running, spawn rcuo kthreads for all online 2403 * no-CBs CPUs. This assumes that the early_initcall()s happen before 2404 * non-boot CPUs come online -- if this changes, we will need to add 2405 * some mutual exclusion. 2406 */ 2407 static void __init rcu_spawn_nocb_kthreads(void) 2408 { 2409 int cpu; 2410 2411 for_each_online_cpu(cpu) 2412 rcu_spawn_all_nocb_kthreads(cpu); 2413 } 2414 2415 /* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ 2416 static int rcu_nocb_leader_stride = -1; 2417 module_param(rcu_nocb_leader_stride, int, 0444); 2418 2419 /* 2420 * Initialize leader-follower relationships for all no-CBs CPU. 2421 */ 2422 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp) 2423 { 2424 int cpu; 2425 int ls = rcu_nocb_leader_stride; 2426 int nl = 0; /* Next leader. */ 2427 struct rcu_data *rdp; 2428 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ 2429 struct rcu_data *rdp_prev = NULL; 2430 2431 if (!have_rcu_nocb_mask) 2432 return; 2433 if (ls == -1) { 2434 ls = int_sqrt(nr_cpu_ids); 2435 rcu_nocb_leader_stride = ls; 2436 } 2437 2438 /* 2439 * Each pass through this loop sets up one rcu_data structure. 2440 * Should the corresponding CPU come online in the future, then 2441 * we will spawn the needed set of rcu_nocb_kthread() kthreads. 2442 */ 2443 for_each_cpu(cpu, rcu_nocb_mask) { 2444 rdp = per_cpu_ptr(rsp->rda, cpu); 2445 if (rdp->cpu >= nl) { 2446 /* New leader, set up for followers & next leader. */ 2447 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; 2448 rdp->nocb_leader = rdp; 2449 rdp_leader = rdp; 2450 } else { 2451 /* Another follower, link to previous leader. */ 2452 rdp->nocb_leader = rdp_leader; 2453 rdp_prev->nocb_next_follower = rdp; 2454 } 2455 rdp_prev = rdp; 2456 } 2457 } 2458 2459 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */ 2460 static bool init_nocb_callback_list(struct rcu_data *rdp) 2461 { 2462 if (!rcu_is_nocb_cpu(rdp->cpu)) 2463 return false; 2464 2465 /* If there are early-boot callbacks, move them to nocb lists. */ 2466 if (!rcu_segcblist_empty(&rdp->cblist)) { 2467 rdp->nocb_head = rcu_segcblist_head(&rdp->cblist); 2468 rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist); 2469 atomic_long_set(&rdp->nocb_q_count, 2470 rcu_segcblist_n_cbs(&rdp->cblist)); 2471 atomic_long_set(&rdp->nocb_q_count_lazy, 2472 rcu_segcblist_n_lazy_cbs(&rdp->cblist)); 2473 rcu_segcblist_init(&rdp->cblist); 2474 } 2475 rcu_segcblist_disable(&rdp->cblist); 2476 return true; 2477 } 2478 2479 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2480 2481 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) 2482 { 2483 WARN_ON_ONCE(1); /* Should be dead code. */ 2484 return false; 2485 } 2486 2487 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) 2488 { 2489 } 2490 2491 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 2492 { 2493 } 2494 2495 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 2496 { 2497 return NULL; 2498 } 2499 2500 static void rcu_init_one_nocb(struct rcu_node *rnp) 2501 { 2502 } 2503 2504 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2505 bool lazy, unsigned long flags) 2506 { 2507 return false; 2508 } 2509 2510 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, 2511 struct rcu_data *rdp, 2512 unsigned long flags) 2513 { 2514 return false; 2515 } 2516 2517 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2518 { 2519 } 2520 2521 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2522 { 2523 return false; 2524 } 2525 2526 static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2527 { 2528 } 2529 2530 static void rcu_spawn_all_nocb_kthreads(int cpu) 2531 { 2532 } 2533 2534 static void __init rcu_spawn_nocb_kthreads(void) 2535 { 2536 } 2537 2538 static bool init_nocb_callback_list(struct rcu_data *rdp) 2539 { 2540 return false; 2541 } 2542 2543 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2544 2545 /* 2546 * An adaptive-ticks CPU can potentially execute in kernel mode for an 2547 * arbitrarily long period of time with the scheduling-clock tick turned 2548 * off. RCU will be paying attention to this CPU because it is in the 2549 * kernel, but the CPU cannot be guaranteed to be executing the RCU state 2550 * machine because the scheduling-clock tick has been disabled. Therefore, 2551 * if an adaptive-ticks CPU is failing to respond to the current grace 2552 * period and has not be idle from an RCU perspective, kick it. 2553 */ 2554 static void __maybe_unused rcu_kick_nohz_cpu(int cpu) 2555 { 2556 #ifdef CONFIG_NO_HZ_FULL 2557 if (tick_nohz_full_cpu(cpu)) 2558 smp_send_reschedule(cpu); 2559 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 2560 } 2561 2562 /* 2563 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 2564 * grace-period kthread will do force_quiescent_state() processing? 2565 * The idea is to avoid waking up RCU core processing on such a 2566 * CPU unless the grace period has extended for too long. 2567 * 2568 * This code relies on the fact that all NO_HZ_FULL CPUs are also 2569 * CONFIG_RCU_NOCB_CPU CPUs. 2570 */ 2571 static bool rcu_nohz_full_cpu(struct rcu_state *rsp) 2572 { 2573 #ifdef CONFIG_NO_HZ_FULL 2574 if (tick_nohz_full_cpu(smp_processor_id()) && 2575 (!rcu_gp_in_progress(rsp) || 2576 ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ))) 2577 return true; 2578 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 2579 return false; 2580 } 2581 2582 /* 2583 * Bind the grace-period kthread for the sysidle flavor of RCU to the 2584 * timekeeping CPU. 2585 */ 2586 static void rcu_bind_gp_kthread(void) 2587 { 2588 int __maybe_unused cpu; 2589 2590 if (!tick_nohz_full_enabled()) 2591 return; 2592 housekeeping_affine(current, HK_FLAG_RCU); 2593 } 2594 2595 /* Record the current task on dyntick-idle entry. */ 2596 static void rcu_dynticks_task_enter(void) 2597 { 2598 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2599 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); 2600 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2601 } 2602 2603 /* Record no current task on dyntick-idle exit. */ 2604 static void rcu_dynticks_task_exit(void) 2605 { 2606 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) 2607 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); 2608 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ 2609 } 2610