1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * Internal non-public definitions that provide either classic 5 * or preemptible semantics. 6 * 7 * Copyright Red Hat, 2009 8 * Copyright IBM Corporation, 2009 9 * 10 * Author: Ingo Molnar <mingo@elte.hu> 11 * Paul E. McKenney <paulmck@linux.ibm.com> 12 */ 13 14 #include "../locking/rtmutex_common.h" 15 16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) 17 { 18 /* 19 * In order to read the offloaded state of an rdp in a safe 20 * and stable way and prevent from its value to be changed 21 * under us, we must either hold the barrier mutex, the cpu 22 * hotplug lock (read or write) or the nocb lock. Local 23 * non-preemptible reads are also safe. NOCB kthreads and 24 * timers have their own means of synchronization against the 25 * offloaded state updaters. 26 */ 27 RCU_NOCB_LOCKDEP_WARN( 28 !(lockdep_is_held(&rcu_state.barrier_mutex) || 29 (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) || 30 lockdep_is_held(&rdp->nocb_lock) || 31 lockdep_is_held(&rcu_state.nocb_mutex) || 32 ((!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) || softirq_count()) && 33 rdp == this_cpu_ptr(&rcu_data)) || 34 rcu_current_is_nocb_kthread(rdp)), 35 "Unsafe read of RCU_NOCB offloaded state" 36 ); 37 38 return rcu_segcblist_is_offloaded(&rdp->cblist); 39 } 40 41 /* 42 * Check the RCU kernel configuration parameters and print informative 43 * messages about anything out of the ordinary. 44 */ 45 static void __init rcu_bootup_announce_oddness(void) 46 { 47 if (IS_ENABLED(CONFIG_RCU_TRACE)) 48 pr_info("\tRCU event tracing is enabled.\n"); 49 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 50 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 51 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", 52 RCU_FANOUT); 53 if (rcu_fanout_exact) 54 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 55 if (IS_ENABLED(CONFIG_PROVE_RCU)) 56 pr_info("\tRCU lockdep checking is enabled.\n"); 57 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 58 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); 59 if (RCU_NUM_LVLS >= 4) 60 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); 61 if (RCU_FANOUT_LEAF != 16) 62 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 63 RCU_FANOUT_LEAF); 64 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 65 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", 66 rcu_fanout_leaf); 67 if (nr_cpu_ids != NR_CPUS) 68 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 69 #ifdef CONFIG_RCU_BOOST 70 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", 71 kthread_prio, CONFIG_RCU_BOOST_DELAY); 72 #endif 73 if (blimit != DEFAULT_RCU_BLIMIT) 74 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 75 if (qhimark != DEFAULT_RCU_QHIMARK) 76 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); 77 if (qlowmark != DEFAULT_RCU_QLOMARK) 78 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); 79 if (qovld != DEFAULT_RCU_QOVLD) 80 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); 81 if (jiffies_till_first_fqs != ULONG_MAX) 82 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); 83 if (jiffies_till_next_fqs != ULONG_MAX) 84 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); 85 if (jiffies_till_sched_qs != ULONG_MAX) 86 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); 87 if (rcu_kick_kthreads) 88 pr_info("\tKick kthreads if too-long grace period.\n"); 89 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) 90 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); 91 if (gp_preinit_delay) 92 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); 93 if (gp_init_delay) 94 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); 95 if (gp_cleanup_delay) 96 pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay); 97 if (nohz_full_patience_delay < 0) { 98 pr_info("\tRCU NOCB CPU patience negative (%d), resetting to zero.\n", nohz_full_patience_delay); 99 nohz_full_patience_delay = 0; 100 } else if (nohz_full_patience_delay > 5 * MSEC_PER_SEC) { 101 pr_info("\tRCU NOCB CPU patience too large (%d), resetting to %ld.\n", nohz_full_patience_delay, 5 * MSEC_PER_SEC); 102 nohz_full_patience_delay = 5 * MSEC_PER_SEC; 103 } else if (nohz_full_patience_delay) { 104 pr_info("\tRCU NOCB CPU patience set to %d milliseconds.\n", nohz_full_patience_delay); 105 } 106 nohz_full_patience_delay_jiffies = msecs_to_jiffies(nohz_full_patience_delay); 107 if (!use_softirq) 108 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); 109 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) 110 pr_info("\tRCU debug extended QS entry/exit.\n"); 111 rcupdate_announce_bootup_oddness(); 112 } 113 114 #ifdef CONFIG_PREEMPT_RCU 115 116 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 117 static void rcu_read_unlock_special(struct task_struct *t); 118 119 /* 120 * Tell them what RCU they are running. 121 */ 122 static void __init rcu_bootup_announce(void) 123 { 124 pr_info("Preemptible hierarchical RCU implementation.\n"); 125 rcu_bootup_announce_oddness(); 126 } 127 128 /* Flags for rcu_preempt_ctxt_queue() decision table. */ 129 #define RCU_GP_TASKS 0x8 130 #define RCU_EXP_TASKS 0x4 131 #define RCU_GP_BLKD 0x2 132 #define RCU_EXP_BLKD 0x1 133 134 /* 135 * Queues a task preempted within an RCU-preempt read-side critical 136 * section into the appropriate location within the ->blkd_tasks list, 137 * depending on the states of any ongoing normal and expedited grace 138 * periods. The ->gp_tasks pointer indicates which element the normal 139 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer 140 * indicates which element the expedited grace period is waiting on (again, 141 * NULL if none). If a grace period is waiting on a given element in the 142 * ->blkd_tasks list, it also waits on all subsequent elements. Thus, 143 * adding a task to the tail of the list blocks any grace period that is 144 * already waiting on one of the elements. In contrast, adding a task 145 * to the head of the list won't block any grace period that is already 146 * waiting on one of the elements. 147 * 148 * This queuing is imprecise, and can sometimes make an ongoing grace 149 * period wait for a task that is not strictly speaking blocking it. 150 * Given the choice, we needlessly block a normal grace period rather than 151 * blocking an expedited grace period. 152 * 153 * Note that an endless sequence of expedited grace periods still cannot 154 * indefinitely postpone a normal grace period. Eventually, all of the 155 * fixed number of preempted tasks blocking the normal grace period that are 156 * not also blocking the expedited grace period will resume and complete 157 * their RCU read-side critical sections. At that point, the ->gp_tasks 158 * pointer will equal the ->exp_tasks pointer, at which point the end of 159 * the corresponding expedited grace period will also be the end of the 160 * normal grace period. 161 */ 162 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) 163 __releases(rnp->lock) /* But leaves rrupts disabled. */ 164 { 165 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + 166 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + 167 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + 168 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 169 struct task_struct *t = current; 170 171 raw_lockdep_assert_held_rcu_node(rnp); 172 WARN_ON_ONCE(rdp->mynode != rnp); 173 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 174 /* RCU better not be waiting on newly onlined CPUs! */ 175 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & 176 rdp->grpmask); 177 178 /* 179 * Decide where to queue the newly blocked task. In theory, 180 * this could be an if-statement. In practice, when I tried 181 * that, it was quite messy. 182 */ 183 switch (blkd_state) { 184 case 0: 185 case RCU_EXP_TASKS: 186 case RCU_EXP_TASKS | RCU_GP_BLKD: 187 case RCU_GP_TASKS: 188 case RCU_GP_TASKS | RCU_EXP_TASKS: 189 190 /* 191 * Blocking neither GP, or first task blocking the normal 192 * GP but not blocking the already-waiting expedited GP. 193 * Queue at the head of the list to avoid unnecessarily 194 * blocking the already-waiting GPs. 195 */ 196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 197 break; 198 199 case RCU_EXP_BLKD: 200 case RCU_GP_BLKD: 201 case RCU_GP_BLKD | RCU_EXP_BLKD: 202 case RCU_GP_TASKS | RCU_EXP_BLKD: 203 case RCU_GP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: 204 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: 205 206 /* 207 * First task arriving that blocks either GP, or first task 208 * arriving that blocks the expedited GP (with the normal 209 * GP already waiting), or a task arriving that blocks 210 * both GPs with both GPs already waiting. Queue at the 211 * tail of the list to avoid any GP waiting on any of the 212 * already queued tasks that are not blocking it. 213 */ 214 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); 215 break; 216 217 case RCU_EXP_TASKS | RCU_EXP_BLKD: 218 case RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: 219 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_EXP_BLKD: 220 221 /* 222 * Second or subsequent task blocking the expedited GP. 223 * The task either does not block the normal GP, or is the 224 * first task blocking the normal GP. Queue just after 225 * the first task blocking the expedited GP. 226 */ 227 list_add(&t->rcu_node_entry, rnp->exp_tasks); 228 break; 229 230 case RCU_GP_TASKS | RCU_GP_BLKD: 231 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD: 232 233 /* 234 * Second or subsequent task blocking the normal GP. 235 * The task does not block the expedited GP. Queue just 236 * after the first task blocking the normal GP. 237 */ 238 list_add(&t->rcu_node_entry, rnp->gp_tasks); 239 break; 240 241 default: 242 243 /* Yet another exercise in excessive paranoia. */ 244 WARN_ON_ONCE(1); 245 break; 246 } 247 248 /* 249 * We have now queued the task. If it was the first one to 250 * block either grace period, update the ->gp_tasks and/or 251 * ->exp_tasks pointers, respectively, to reference the newly 252 * blocked tasks. 253 */ 254 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { 255 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); 256 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); 257 } 258 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 259 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); 260 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 261 !(rnp->qsmask & rdp->grpmask)); 262 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != 263 !(rnp->expmask & rdp->grpmask)); 264 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 265 266 /* 267 * Report the quiescent state for the expedited GP. This expedited 268 * GP should not be able to end until we report, so there should be 269 * no need to check for a subsequent expedited GP. (Though we are 270 * still in a quiescent state in any case.) 271 * 272 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change. 273 */ 274 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp) 275 rcu_report_exp_rdp(rdp); 276 else 277 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); 278 ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp); 279 } 280 281 /* 282 * Record a preemptible-RCU quiescent state for the specified CPU. 283 * Note that this does not necessarily mean that the task currently running 284 * on the CPU is in a quiescent state: Instead, it means that the current 285 * grace period need not wait on any RCU read-side critical section that 286 * starts later on this CPU. It also means that if the current task is 287 * in an RCU read-side critical section, it has already added itself to 288 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the 289 * current task, there might be any number of other tasks blocked while 290 * in an RCU read-side critical section. 291 * 292 * Unlike non-preemptible-RCU, quiescent state reports for expedited 293 * grace periods are handled separately via deferred quiescent states 294 * and context switch events. 295 * 296 * Callers to this function must disable preemption. 297 */ 298 static void rcu_qs(void) 299 { 300 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); 301 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) { 302 trace_rcu_grace_period(TPS("rcu_preempt"), 303 __this_cpu_read(rcu_data.gp_seq), 304 TPS("cpuqs")); 305 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 306 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */ 307 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); 308 } 309 } 310 311 /* 312 * We have entered the scheduler, and the current task might soon be 313 * context-switched away from. If this task is in an RCU read-side 314 * critical section, we will no longer be able to rely on the CPU to 315 * record that fact, so we enqueue the task on the blkd_tasks list. 316 * The task will dequeue itself when it exits the outermost enclosing 317 * RCU read-side critical section. Therefore, the current grace period 318 * cannot be permitted to complete until the blkd_tasks list entries 319 * predating the current grace period drain, in other words, until 320 * rnp->gp_tasks becomes NULL. 321 * 322 * Caller must disable interrupts. 323 */ 324 void rcu_note_context_switch(bool preempt) 325 { 326 struct task_struct *t = current; 327 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 328 struct rcu_node *rnp; 329 330 trace_rcu_utilization(TPS("Start context switch")); 331 lockdep_assert_irqs_disabled(); 332 WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!"); 333 if (rcu_preempt_depth() > 0 && 334 !t->rcu_read_unlock_special.b.blocked) { 335 336 /* Possibly blocking in an RCU read-side critical section. */ 337 rnp = rdp->mynode; 338 raw_spin_lock_rcu_node(rnp); 339 t->rcu_read_unlock_special.b.blocked = true; 340 t->rcu_blocked_node = rnp; 341 342 /* 343 * Verify the CPU's sanity, trace the preemption, and 344 * then queue the task as required based on the states 345 * of any ongoing and expedited grace periods. 346 */ 347 WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp)); 348 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 349 trace_rcu_preempt_task(rcu_state.name, 350 t->pid, 351 (rnp->qsmask & rdp->grpmask) 352 ? rnp->gp_seq 353 : rcu_seq_snap(&rnp->gp_seq)); 354 rcu_preempt_ctxt_queue(rnp, rdp); 355 } else { 356 rcu_preempt_deferred_qs(t); 357 } 358 359 /* 360 * Either we were not in an RCU read-side critical section to 361 * begin with, or we have now recorded that critical section 362 * globally. Either way, we can now note a quiescent state 363 * for this CPU. Again, if we were in an RCU read-side critical 364 * section, and if that critical section was blocking the current 365 * grace period, then the fact that the task has been enqueued 366 * means that we continue to block the current grace period. 367 */ 368 rcu_qs(); 369 if (rdp->cpu_no_qs.b.exp) 370 rcu_report_exp_rdp(rdp); 371 rcu_tasks_qs(current, preempt); 372 trace_rcu_utilization(TPS("End context switch")); 373 } 374 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 375 376 /* 377 * Check for preempted RCU readers blocking the current grace period 378 * for the specified rcu_node structure. If the caller needs a reliable 379 * answer, it must hold the rcu_node's ->lock. 380 */ 381 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 382 { 383 return READ_ONCE(rnp->gp_tasks) != NULL; 384 } 385 386 /* limit value for ->rcu_read_lock_nesting. */ 387 #define RCU_NEST_PMAX (INT_MAX / 2) 388 389 static void rcu_preempt_read_enter(void) 390 { 391 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1); 392 } 393 394 static int rcu_preempt_read_exit(void) 395 { 396 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1; 397 398 WRITE_ONCE(current->rcu_read_lock_nesting, ret); 399 return ret; 400 } 401 402 static void rcu_preempt_depth_set(int val) 403 { 404 WRITE_ONCE(current->rcu_read_lock_nesting, val); 405 } 406 407 /* 408 * Preemptible RCU implementation for rcu_read_lock(). 409 * Just increment ->rcu_read_lock_nesting, shared state will be updated 410 * if we block. 411 */ 412 void __rcu_read_lock(void) 413 { 414 rcu_preempt_read_enter(); 415 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 416 WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX); 417 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread) 418 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); 419 barrier(); /* critical section after entry code. */ 420 } 421 EXPORT_SYMBOL_GPL(__rcu_read_lock); 422 423 /* 424 * Preemptible RCU implementation for rcu_read_unlock(). 425 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 426 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 427 * invoke rcu_read_unlock_special() to clean up after a context switch 428 * in an RCU read-side critical section and other special cases. 429 */ 430 void __rcu_read_unlock(void) 431 { 432 struct task_struct *t = current; 433 434 barrier(); // critical section before exit code. 435 if (rcu_preempt_read_exit() == 0) { 436 barrier(); // critical-section exit before .s check. 437 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 438 rcu_read_unlock_special(t); 439 } 440 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 441 int rrln = rcu_preempt_depth(); 442 443 WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX); 444 } 445 } 446 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 447 448 /* 449 * Advance a ->blkd_tasks-list pointer to the next entry, instead 450 * returning NULL if at the end of the list. 451 */ 452 static struct list_head *rcu_next_node_entry(struct task_struct *t, 453 struct rcu_node *rnp) 454 { 455 struct list_head *np; 456 457 np = t->rcu_node_entry.next; 458 if (np == &rnp->blkd_tasks) 459 np = NULL; 460 return np; 461 } 462 463 /* 464 * Return true if the specified rcu_node structure has tasks that were 465 * preempted within an RCU read-side critical section. 466 */ 467 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 468 { 469 return !list_empty(&rnp->blkd_tasks); 470 } 471 472 /* 473 * Report deferred quiescent states. The deferral time can 474 * be quite short, for example, in the case of the call from 475 * rcu_read_unlock_special(). 476 */ 477 static notrace void 478 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) 479 { 480 bool empty_exp; 481 bool empty_norm; 482 bool empty_exp_now; 483 struct list_head *np; 484 bool drop_boost_mutex = false; 485 struct rcu_data *rdp; 486 struct rcu_node *rnp; 487 union rcu_special special; 488 489 rdp = this_cpu_ptr(&rcu_data); 490 if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING) 491 rdp->defer_qs_iw_pending = DEFER_QS_IDLE; 492 493 /* 494 * If RCU core is waiting for this CPU to exit its critical section, 495 * report the fact that it has exited. Because irqs are disabled, 496 * t->rcu_read_unlock_special cannot change. 497 */ 498 special = t->rcu_read_unlock_special; 499 if (!special.s && !rdp->cpu_no_qs.b.exp) { 500 local_irq_restore(flags); 501 return; 502 } 503 t->rcu_read_unlock_special.s = 0; 504 if (special.b.need_qs) { 505 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 506 rdp->cpu_no_qs.b.norm = false; 507 rcu_report_qs_rdp(rdp); 508 udelay(rcu_unlock_delay); 509 } else { 510 rcu_qs(); 511 } 512 } 513 514 /* 515 * Respond to a request by an expedited grace period for a 516 * quiescent state from this CPU. Note that requests from 517 * tasks are handled when removing the task from the 518 * blocked-tasks list below. 519 */ 520 if (rdp->cpu_no_qs.b.exp) 521 rcu_report_exp_rdp(rdp); 522 523 /* Clean up if blocked during RCU read-side critical section. */ 524 if (special.b.blocked) { 525 526 /* 527 * Remove this task from the list it blocked on. The task 528 * now remains queued on the rcu_node corresponding to the 529 * CPU it first blocked on, so there is no longer any need 530 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. 531 */ 532 rnp = t->rcu_blocked_node; 533 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 534 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 535 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 536 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 537 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && 538 (!empty_norm || rnp->qsmask)); 539 empty_exp = sync_rcu_exp_done(rnp); 540 np = rcu_next_node_entry(t, rnp); 541 list_del_init(&t->rcu_node_entry); 542 t->rcu_blocked_node = NULL; 543 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 544 rnp->gp_seq, t->pid); 545 if (&t->rcu_node_entry == rnp->gp_tasks) 546 WRITE_ONCE(rnp->gp_tasks, np); 547 if (&t->rcu_node_entry == rnp->exp_tasks) 548 WRITE_ONCE(rnp->exp_tasks, np); 549 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 550 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 551 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; 552 if (&t->rcu_node_entry == rnp->boost_tasks) 553 WRITE_ONCE(rnp->boost_tasks, np); 554 } 555 556 /* 557 * If this was the last task on the current list, and if 558 * we aren't waiting on any CPUs, report the quiescent state. 559 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 560 * so we must take a snapshot of the expedited state. 561 */ 562 empty_exp_now = sync_rcu_exp_done(rnp); 563 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 564 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 565 rnp->gp_seq, 566 0, rnp->qsmask, 567 rnp->level, 568 rnp->grplo, 569 rnp->grphi, 570 !!rnp->gp_tasks); 571 rcu_report_unblock_qs_rnp(rnp, flags); 572 } else { 573 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 574 } 575 576 /* 577 * If this was the last task on the expedited lists, 578 * then we need to report up the rcu_node hierarchy. 579 */ 580 if (!empty_exp && empty_exp_now) 581 rcu_report_exp_rnp(rnp, true); 582 583 /* Unboost if we were boosted. */ 584 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) 585 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); 586 } else { 587 local_irq_restore(flags); 588 } 589 } 590 591 /* 592 * Is a deferred quiescent-state pending, and are we also not in 593 * an RCU read-side critical section? It is the caller's responsibility 594 * to ensure it is otherwise safe to report any deferred quiescent 595 * states. The reason for this is that it is safe to report a 596 * quiescent state during context switch even though preemption 597 * is disabled. This function cannot be expected to understand these 598 * nuances, so the caller must handle them. 599 */ 600 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) 601 { 602 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) || 603 READ_ONCE(t->rcu_read_unlock_special.s)) && 604 rcu_preempt_depth() == 0; 605 } 606 607 /* 608 * Report a deferred quiescent state if needed and safe to do so. 609 * As with rcu_preempt_need_deferred_qs(), "safe" involves only 610 * not being in an RCU read-side critical section. The caller must 611 * evaluate safety in terms of interrupt, softirq, and preemption 612 * disabling. 613 */ 614 notrace void rcu_preempt_deferred_qs(struct task_struct *t) 615 { 616 unsigned long flags; 617 618 if (!rcu_preempt_need_deferred_qs(t)) 619 return; 620 local_irq_save(flags); 621 rcu_preempt_deferred_qs_irqrestore(t, flags); 622 } 623 624 /* 625 * Minimal handler to give the scheduler a chance to re-evaluate. 626 */ 627 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) 628 { 629 struct rcu_data *rdp; 630 631 lockdep_assert_irqs_disabled(); 632 rdp = container_of(iwp, struct rcu_data, defer_qs_iw); 633 634 /* 635 * If the IRQ work handler happens to run in the middle of RCU read-side 636 * critical section, it could be ineffective in getting the scheduler's 637 * attention to report a deferred quiescent state (the whole point of the 638 * IRQ work). For this reason, requeue the IRQ work. 639 * 640 * Basically, we want to avoid following situation: 641 * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING) 642 * 2. CPU enters new rcu_read_lock() 643 * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0 644 * 4. rcu_read_unlock() does not re-queue work (state still PENDING) 645 * 5. Deferred QS reporting does not happen. 646 */ 647 if (rcu_preempt_depth() > 0) 648 WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE); 649 } 650 651 /* 652 * Check if expedited grace period processing during unlock is needed. 653 * 654 * This function determines whether expedited handling is required based on: 655 * 1. Task blocking an expedited grace period (based on a heuristic, could be 656 * false-positive, see below.) 657 * 2. CPU participating in an expedited grace period 658 * 3. Strict grace period mode requiring expedited handling 659 * 4. RCU priority deboosting needs when interrupts were disabled 660 * 661 * @t: The task being checked 662 * @rdp: The per-CPU RCU data 663 * @rnp: The RCU node for this CPU 664 * @irqs_were_disabled: Whether interrupts were disabled before rcu_read_unlock() 665 * 666 * Returns true if expedited processing of the rcu_read_unlock() is needed. 667 */ 668 static bool rcu_unlock_needs_exp_handling(struct task_struct *t, 669 struct rcu_data *rdp, 670 struct rcu_node *rnp, 671 bool irqs_were_disabled) 672 { 673 /* 674 * Check if this task is blocking an expedited grace period. If the 675 * task was preempted within an RCU read-side critical section and is 676 * on the expedited grace period blockers list (exp_tasks), we need 677 * expedited handling to unblock the expedited GP. This is not an exact 678 * check because 't' might not be on the exp_tasks list at all - its 679 * just a fast heuristic that can be false-positive sometimes. 680 */ 681 if (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) 682 return true; 683 684 /* 685 * Check if this CPU is participating in an expedited grace period. 686 * The expmask bitmap tracks which CPUs need to check in for the 687 * current expedited GP. If our CPU's bit is set, we need expedited 688 * handling to help complete the expedited GP. 689 */ 690 if (rdp->grpmask & READ_ONCE(rnp->expmask)) 691 return true; 692 693 /* 694 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, all grace periods 695 * are treated as short for testing purposes even if that means 696 * disturbing the system more. Check if either: 697 * - This CPU has not yet reported a quiescent state, or 698 * - This task was preempted within an RCU critical section 699 * In either case, require expedited handling for strict GP mode. 700 */ 701 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && 702 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) 703 return true; 704 705 /* 706 * RCU priority boosting case: If a task is subject to RCU priority 707 * boosting and exits an RCU read-side critical section with interrupts 708 * disabled, we need expedited handling to ensure timely deboosting. 709 * Without this, a low-priority task could incorrectly run at high 710 * real-time priority for an extended period degrading real-time 711 * responsiveness. This applies to all CONFIG_RCU_BOOST=y kernels, 712 * not just to PREEMPT_RT. 713 */ 714 if (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && t->rcu_blocked_node) 715 return true; 716 717 return false; 718 } 719 720 /* 721 * Handle special cases during rcu_read_unlock(), such as needing to 722 * notify RCU core processing or task having blocked during the RCU 723 * read-side critical section. 724 */ 725 static void rcu_read_unlock_special(struct task_struct *t) 726 { 727 unsigned long flags; 728 bool irqs_were_disabled; 729 bool preempt_bh_were_disabled = 730 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 731 732 /* NMI handlers cannot block and cannot safely manipulate state. */ 733 if (in_nmi()) 734 return; 735 736 local_irq_save(flags); 737 irqs_were_disabled = irqs_disabled_flags(flags); 738 if (preempt_bh_were_disabled || irqs_were_disabled) { 739 bool needs_exp; // Expedited handling needed. 740 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 741 struct rcu_node *rnp = rdp->mynode; 742 743 needs_exp = rcu_unlock_needs_exp_handling(t, rdp, rnp, irqs_were_disabled); 744 745 // Need to defer quiescent state until everything is enabled. 746 if (use_softirq && (in_hardirq() || (needs_exp && !irqs_were_disabled))) { 747 // Using softirq, safe to awaken, and either the 748 // wakeup is free or there is either an expedited 749 // GP in flight or a potential need to deboost. 750 raise_softirq_irqoff(RCU_SOFTIRQ); 751 } else { 752 // Enabling BH or preempt does reschedule, so... 753 // Also if no expediting and no possible deboosting, 754 // slow is OK. Plus nohz_full CPUs eventually get 755 // tick enabled. 756 set_tsk_need_resched(current); 757 set_preempt_need_resched(); 758 if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && 759 needs_exp && rdp->defer_qs_iw_pending != DEFER_QS_PENDING && 760 cpu_online(rdp->cpu)) { 761 // Get scheduler to re-evaluate and call hooks. 762 // If !IRQ_WORK, FQS scan will eventually IPI. 763 rdp->defer_qs_iw_pending = DEFER_QS_PENDING; 764 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); 765 } 766 } 767 local_irq_restore(flags); 768 return; 769 } 770 rcu_preempt_deferred_qs_irqrestore(t, flags); 771 } 772 773 /* 774 * Check that the list of blocked tasks for the newly completed grace 775 * period is in fact empty. It is a serious bug to complete a grace 776 * period that still has RCU readers blocked! This function must be 777 * invoked -before- updating this rnp's ->gp_seq. 778 * 779 * Also, if there are blocked tasks on the list, they automatically 780 * block the newly created grace period, so set up ->gp_tasks accordingly. 781 */ 782 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 783 { 784 struct task_struct *t; 785 786 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 787 raw_lockdep_assert_held_rcu_node(rnp); 788 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 789 dump_blkd_tasks(rnp, 10); 790 if (rcu_preempt_has_tasks(rnp) && 791 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { 792 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); 793 t = container_of(rnp->gp_tasks, struct task_struct, 794 rcu_node_entry); 795 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 796 rnp->gp_seq, t->pid); 797 } 798 WARN_ON_ONCE(rnp->qsmask); 799 } 800 801 /* 802 * Check for a quiescent state from the current CPU, including voluntary 803 * context switches for Tasks RCU. When a task blocks, the task is 804 * recorded in the corresponding CPU's rcu_node structure, which is checked 805 * elsewhere, hence this function need only check for quiescent states 806 * related to the current CPU, not to those related to tasks. 807 */ 808 static void rcu_flavor_sched_clock_irq(int user) 809 { 810 struct task_struct *t = current; 811 812 lockdep_assert_irqs_disabled(); 813 if (rcu_preempt_depth() > 0 || 814 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { 815 /* No QS, force context switch if deferred. */ 816 if (rcu_preempt_need_deferred_qs(t)) { 817 set_tsk_need_resched(t); 818 set_preempt_need_resched(); 819 } 820 } else if (rcu_preempt_need_deferred_qs(t)) { 821 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ 822 return; 823 } else if (!WARN_ON_ONCE(rcu_preempt_depth())) { 824 rcu_qs(); /* Report immediate QS. */ 825 return; 826 } 827 828 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ 829 if (rcu_preempt_depth() > 0 && 830 __this_cpu_read(rcu_data.core_needs_qs) && 831 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && 832 !t->rcu_read_unlock_special.b.need_qs && 833 time_after(jiffies, rcu_state.gp_start + HZ)) 834 t->rcu_read_unlock_special.b.need_qs = true; 835 } 836 837 /* 838 * Check for a task exiting while in a preemptible-RCU read-side 839 * critical section, clean up if so. No need to issue warnings, as 840 * debug_check_no_locks_held() already does this if lockdep is enabled. 841 * Besides, if this function does anything other than just immediately 842 * return, there was a bug of some sort. Spewing warnings from this 843 * function is like as not to simply obscure important prior warnings. 844 */ 845 void exit_rcu(void) 846 { 847 struct task_struct *t = current; 848 849 if (unlikely(!list_empty(¤t->rcu_node_entry))) { 850 rcu_preempt_depth_set(1); 851 barrier(); 852 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); 853 } else if (unlikely(rcu_preempt_depth())) { 854 rcu_preempt_depth_set(1); 855 } else { 856 return; 857 } 858 __rcu_read_unlock(); 859 rcu_preempt_deferred_qs(current); 860 } 861 862 /* 863 * Dump the blocked-tasks state, but limit the list dump to the 864 * specified number of elements. 865 */ 866 static void 867 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 868 { 869 int cpu; 870 int i; 871 struct list_head *lhp; 872 struct rcu_data *rdp; 873 struct rcu_node *rnp1; 874 875 raw_lockdep_assert_held_rcu_node(rnp); 876 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 877 __func__, rnp->grplo, rnp->grphi, rnp->level, 878 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); 879 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 880 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", 881 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); 882 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", 883 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), 884 READ_ONCE(rnp->exp_tasks)); 885 pr_info("%s: ->blkd_tasks", __func__); 886 i = 0; 887 list_for_each(lhp, &rnp->blkd_tasks) { 888 pr_cont(" %p", lhp); 889 if (++i >= ncheck) 890 break; 891 } 892 pr_cont("\n"); 893 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { 894 rdp = per_cpu_ptr(&rcu_data, cpu); 895 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", 896 cpu, ".o"[rcu_rdp_cpu_online(rdp)], 897 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, 898 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); 899 } 900 } 901 902 static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) 903 { 904 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler); 905 } 906 #else /* #ifdef CONFIG_PREEMPT_RCU */ 907 908 /* 909 * If strict grace periods are enabled, and if the calling 910 * __rcu_read_unlock() marks the beginning of a quiescent state, immediately 911 * report that quiescent state and, if requested, spin for a bit. 912 */ 913 void rcu_read_unlock_strict(void) 914 { 915 struct rcu_data *rdp; 916 917 if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread) 918 return; 919 920 /* 921 * rcu_report_qs_rdp() can only be invoked with a stable rdp and 922 * from the local CPU. 923 * 924 * The in_atomic_preempt_off() check ensures that we come here holding 925 * the last preempt_count (which will get dropped once we return to 926 * __rcu_read_unlock(). 927 */ 928 rdp = this_cpu_ptr(&rcu_data); 929 rdp->cpu_no_qs.b.norm = false; 930 rcu_report_qs_rdp(rdp); 931 udelay(rcu_unlock_delay); 932 } 933 EXPORT_SYMBOL_GPL(rcu_read_unlock_strict); 934 935 /* 936 * Tell them what RCU they are running. 937 */ 938 static void __init rcu_bootup_announce(void) 939 { 940 pr_info("Hierarchical RCU implementation.\n"); 941 rcu_bootup_announce_oddness(); 942 } 943 944 /* 945 * Note a quiescent state for PREEMPTION=n. Because we do not need to know 946 * how many quiescent states passed, just if there was at least one since 947 * the start of the grace period, this just sets a flag. The caller must 948 * have disabled preemption. 949 */ 950 static void rcu_qs(void) 951 { 952 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); 953 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) 954 return; 955 trace_rcu_grace_period(TPS("rcu_sched"), 956 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); 957 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 958 if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 959 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 960 } 961 962 /* 963 * Register an urgently needed quiescent state. If there is an 964 * emergency, invoke rcu_momentary_eqs() to do a heavy-weight 965 * dyntick-idle quiescent state visible to other CPUs, which will in 966 * some cases serve for expedited as well as normal grace periods. 967 * Either way, register a lightweight quiescent state. 968 */ 969 void rcu_all_qs(void) 970 { 971 unsigned long flags; 972 973 if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) 974 return; 975 preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels 976 /* Load rcu_urgent_qs before other flags. */ 977 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 978 preempt_enable(); 979 return; 980 } 981 this_cpu_write(rcu_data.rcu_urgent_qs, false); 982 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { 983 local_irq_save(flags); 984 rcu_momentary_eqs(); 985 local_irq_restore(flags); 986 } 987 rcu_qs(); 988 preempt_enable(); 989 } 990 EXPORT_SYMBOL_GPL(rcu_all_qs); 991 992 /* 993 * Note a PREEMPTION=n context switch. The caller must have disabled interrupts. 994 */ 995 void rcu_note_context_switch(bool preempt) 996 { 997 trace_rcu_utilization(TPS("Start context switch")); 998 rcu_qs(); 999 /* Load rcu_urgent_qs before other flags. */ 1000 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) 1001 goto out; 1002 this_cpu_write(rcu_data.rcu_urgent_qs, false); 1003 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) 1004 rcu_momentary_eqs(); 1005 out: 1006 rcu_tasks_qs(current, preempt); 1007 trace_rcu_utilization(TPS("End context switch")); 1008 } 1009 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 1010 1011 /* 1012 * Because preemptible RCU does not exist, there are never any preempted 1013 * RCU readers. 1014 */ 1015 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 1016 { 1017 return 0; 1018 } 1019 1020 /* 1021 * Because there is no preemptible RCU, there can be no readers blocked. 1022 */ 1023 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 1024 { 1025 return false; 1026 } 1027 1028 /* 1029 * Because there is no preemptible RCU, there can be no deferred quiescent 1030 * states. 1031 */ 1032 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) 1033 { 1034 return false; 1035 } 1036 1037 // Except that we do need to respond to a request by an expedited 1038 // grace period for a quiescent state from this CPU. Note that in 1039 // non-preemptible kernels, there can be no context switches within RCU 1040 // read-side critical sections, which in turn means that the leaf rcu_node 1041 // structure's blocked-tasks list is always empty. is therefore no need to 1042 // actually check it. Instead, a quiescent state from this CPU suffices, 1043 // and this function is only called from such a quiescent state. 1044 notrace void rcu_preempt_deferred_qs(struct task_struct *t) 1045 { 1046 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1047 1048 if (READ_ONCE(rdp->cpu_no_qs.b.exp)) 1049 rcu_report_exp_rdp(rdp); 1050 } 1051 1052 /* 1053 * Because there is no preemptible RCU, there can be no readers blocked, 1054 * so there is no need to check for blocked tasks. So check only for 1055 * bogus qsmask values. 1056 */ 1057 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 1058 { 1059 WARN_ON_ONCE(rnp->qsmask); 1060 } 1061 1062 /* 1063 * Check to see if this CPU is in a non-context-switch quiescent state, 1064 * namely user mode and idle loop. 1065 */ 1066 static void rcu_flavor_sched_clock_irq(int user) 1067 { 1068 if (user || rcu_is_cpu_rrupt_from_idle() || 1069 (IS_ENABLED(CONFIG_PREEMPT_COUNT) && 1070 (preempt_count() == HARDIRQ_OFFSET))) { 1071 1072 /* 1073 * Get here if this CPU took its interrupt from user 1074 * mode, from the idle loop without this being a nested 1075 * interrupt, or while not holding the task preempt count 1076 * (with PREEMPT_COUNT=y). In this case, the CPU is in a 1077 * quiescent state, so note it. 1078 * 1079 * No memory barrier is required here because rcu_qs() 1080 * references only CPU-local variables that other CPUs 1081 * neither access nor modify, at least not while the 1082 * corresponding CPU is online. 1083 */ 1084 rcu_qs(); 1085 } 1086 } 1087 1088 /* 1089 * Because preemptible RCU does not exist, tasks cannot possibly exit 1090 * while in preemptible RCU read-side critical sections. 1091 */ 1092 void exit_rcu(void) 1093 { 1094 } 1095 1096 /* 1097 * Dump the guaranteed-empty blocked-tasks state. Trust but verify. 1098 */ 1099 static void 1100 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 1101 { 1102 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 1103 } 1104 1105 static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) { } 1106 1107 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 1108 1109 /* 1110 * If boosting, set rcuc kthreads to realtime priority. 1111 */ 1112 static void rcu_cpu_kthread_setup(unsigned int cpu) 1113 { 1114 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1115 #ifdef CONFIG_RCU_BOOST 1116 struct sched_param sp; 1117 1118 sp.sched_priority = kthread_prio; 1119 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1120 #endif /* #ifdef CONFIG_RCU_BOOST */ 1121 1122 WRITE_ONCE(rdp->rcuc_activity, jiffies); 1123 } 1124 1125 static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp) 1126 { 1127 #ifdef CONFIG_RCU_NOCB_CPU 1128 return rdp->nocb_cb_kthread == current; 1129 #else 1130 return false; 1131 #endif 1132 } 1133 1134 /* 1135 * Is the current CPU running the RCU-callbacks kthread? 1136 * Caller must have preemption disabled. 1137 */ 1138 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp) 1139 { 1140 return rdp->rcu_cpu_kthread_task == current || 1141 rcu_is_callbacks_nocb_kthread(rdp); 1142 } 1143 1144 #ifdef CONFIG_RCU_BOOST 1145 1146 /* 1147 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1148 * or ->boost_tasks, advancing the pointer to the next task in the 1149 * ->blkd_tasks list. 1150 * 1151 * Note that irqs must be enabled: boosting the task can block. 1152 * Returns 1 if there are more tasks needing to be boosted. 1153 */ 1154 static int rcu_boost(struct rcu_node *rnp) 1155 { 1156 unsigned long flags; 1157 struct task_struct *t; 1158 struct list_head *tb; 1159 1160 if (READ_ONCE(rnp->exp_tasks) == NULL && 1161 READ_ONCE(rnp->boost_tasks) == NULL) 1162 return 0; /* Nothing left to boost. */ 1163 1164 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1165 1166 /* 1167 * Recheck under the lock: all tasks in need of boosting 1168 * might exit their RCU read-side critical sections on their own. 1169 */ 1170 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1172 return 0; 1173 } 1174 1175 /* 1176 * Preferentially boost tasks blocking expedited grace periods. 1177 * This cannot starve the normal grace periods because a second 1178 * expedited grace period must boost all blocked tasks, including 1179 * those blocking the pre-existing normal grace period. 1180 */ 1181 if (rnp->exp_tasks != NULL) 1182 tb = rnp->exp_tasks; 1183 else 1184 tb = rnp->boost_tasks; 1185 1186 /* 1187 * We boost task t by manufacturing an rt_mutex that appears to 1188 * be held by task t. We leave a pointer to that rt_mutex where 1189 * task t can find it, and task t will release the mutex when it 1190 * exits its outermost RCU read-side critical section. Then 1191 * simply acquiring this artificial rt_mutex will boost task 1192 * t's priority. (Thanks to tglx for suggesting this approach!) 1193 * 1194 * Note that task t must acquire rnp->lock to remove itself from 1195 * the ->blkd_tasks list, which it will do from exit() if from 1196 * nowhere else. We therefore are guaranteed that task t will 1197 * stay around at least until we drop rnp->lock. Note that 1198 * rnp->lock also resolves races between our priority boosting 1199 * and task t's exiting its outermost RCU read-side critical 1200 * section. 1201 */ 1202 t = container_of(tb, struct task_struct, rcu_node_entry); 1203 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); 1204 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1205 /* Lock only for side effect: boosts task t's priority. */ 1206 rt_mutex_lock(&rnp->boost_mtx); 1207 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1208 rnp->n_boosts++; 1209 1210 return READ_ONCE(rnp->exp_tasks) != NULL || 1211 READ_ONCE(rnp->boost_tasks) != NULL; 1212 } 1213 1214 /* 1215 * Priority-boosting kthread, one per leaf rcu_node. 1216 */ 1217 static int rcu_boost_kthread(void *arg) 1218 { 1219 struct rcu_node *rnp = (struct rcu_node *)arg; 1220 int spincnt = 0; 1221 int more2boost; 1222 1223 trace_rcu_utilization(TPS("Start boost kthread@init")); 1224 for (;;) { 1225 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); 1226 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1227 rcu_wait(READ_ONCE(rnp->boost_tasks) || 1228 READ_ONCE(rnp->exp_tasks)); 1229 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1230 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); 1231 more2boost = rcu_boost(rnp); 1232 if (more2boost) 1233 spincnt++; 1234 else 1235 spincnt = 0; 1236 if (spincnt > 10) { 1237 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); 1238 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1239 schedule_timeout_idle(2); 1240 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1241 spincnt = 0; 1242 } 1243 } 1244 /* NOTREACHED */ 1245 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1246 return 0; 1247 } 1248 1249 /* 1250 * Check to see if it is time to start boosting RCU readers that are 1251 * blocking the current grace period, and, if so, tell the per-rcu_node 1252 * kthread to start boosting them. If there is an expedited grace 1253 * period in progress, it is always time to boost. 1254 * 1255 * The caller must hold rnp->lock, which this function releases. 1256 * The ->boost_kthread_task is immortal, so we don't need to worry 1257 * about it going away. 1258 */ 1259 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1260 __releases(rnp->lock) 1261 { 1262 raw_lockdep_assert_held_rcu_node(rnp); 1263 if (!rnp->boost_kthread_task || 1264 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) { 1265 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1266 return; 1267 } 1268 if (rnp->exp_tasks != NULL || 1269 (rnp->gp_tasks != NULL && 1270 rnp->boost_tasks == NULL && 1271 rnp->qsmask == 0 && 1272 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld || 1273 IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) { 1274 if (rnp->exp_tasks == NULL) 1275 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); 1276 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1277 rcu_wake_cond(rnp->boost_kthread_task, 1278 READ_ONCE(rnp->boost_kthread_status)); 1279 } else { 1280 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1281 } 1282 } 1283 1284 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1285 1286 /* 1287 * Do priority-boost accounting for the start of a new grace period. 1288 */ 1289 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1290 { 1291 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1292 } 1293 1294 /* 1295 * Create an RCU-boost kthread for the specified node if one does not 1296 * already exist. We only create this kthread for preemptible RCU. 1297 */ 1298 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1299 { 1300 unsigned long flags; 1301 int rnp_index = rnp - rcu_get_root(); 1302 struct sched_param sp; 1303 struct task_struct *t; 1304 1305 if (rnp->boost_kthread_task) 1306 return; 1307 1308 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1309 "rcub/%d", rnp_index); 1310 if (WARN_ON_ONCE(IS_ERR(t))) 1311 return; 1312 1313 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1314 rnp->boost_kthread_task = t; 1315 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1316 1317 sp.sched_priority = kthread_prio; 1318 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1319 rcu_thread_affine_rnp(t, rnp); 1320 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1321 } 1322 1323 #else /* #ifdef CONFIG_RCU_BOOST */ 1324 1325 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1326 __releases(rnp->lock) 1327 { 1328 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1329 } 1330 1331 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1332 { 1333 } 1334 1335 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1336 { 1337 } 1338 1339 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 1340 1341 /* 1342 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 1343 * grace-period kthread will do force_quiescent_state() processing? 1344 * The idea is to avoid waking up RCU core processing on such a 1345 * CPU unless the grace period has extended for too long. 1346 * 1347 * This code relies on the fact that all NO_HZ_FULL CPUs are also 1348 * RCU_NOCB_CPU CPUs. 1349 */ 1350 static bool rcu_nohz_full_cpu(void) 1351 { 1352 #ifdef CONFIG_NO_HZ_FULL 1353 if (tick_nohz_full_cpu(smp_processor_id()) && 1354 (!rcu_gp_in_progress() || 1355 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) 1356 return true; 1357 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 1358 return false; 1359 } 1360 1361 /* 1362 * Bind the RCU grace-period kthreads to the housekeeping CPU. 1363 */ 1364 static void rcu_bind_gp_kthread(void) 1365 { 1366 if (!tick_nohz_full_enabled()) 1367 return; 1368 housekeeping_affine(current, HK_TYPE_RCU); 1369 } 1370