1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * Internal non-public definitions that provide either classic 5 * or preemptible semantics. 6 * 7 * Copyright Red Hat, 2009 8 * Copyright IBM Corporation, 2009 9 * 10 * Author: Ingo Molnar <mingo@elte.hu> 11 * Paul E. McKenney <paulmck@linux.ibm.com> 12 */ 13 14 #include "../locking/rtmutex_common.h" 15 16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) 17 { 18 /* 19 * In order to read the offloaded state of an rdp in a safe 20 * and stable way and prevent from its value to be changed 21 * under us, we must either hold the barrier mutex, the cpu 22 * hotplug lock (read or write) or the nocb lock. Local 23 * non-preemptible reads are also safe. NOCB kthreads and 24 * timers have their own means of synchronization against the 25 * offloaded state updaters. 26 */ 27 RCU_LOCKDEP_WARN( 28 !(lockdep_is_held(&rcu_state.barrier_mutex) || 29 (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) || 30 rcu_lockdep_is_held_nocb(rdp) || 31 (!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) && 32 rdp == this_cpu_ptr(&rcu_data)) || 33 rcu_current_is_nocb_kthread(rdp)), 34 "Unsafe read of RCU_NOCB offloaded state" 35 ); 36 37 return rcu_segcblist_is_offloaded(&rdp->cblist); 38 } 39 40 /* 41 * Check the RCU kernel configuration parameters and print informative 42 * messages about anything out of the ordinary. 43 */ 44 static void __init rcu_bootup_announce_oddness(void) 45 { 46 if (IS_ENABLED(CONFIG_RCU_TRACE)) 47 pr_info("\tRCU event tracing is enabled.\n"); 48 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 49 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 50 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", 51 RCU_FANOUT); 52 if (rcu_fanout_exact) 53 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 54 if (IS_ENABLED(CONFIG_PROVE_RCU)) 55 pr_info("\tRCU lockdep checking is enabled.\n"); 56 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 57 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); 58 if (RCU_NUM_LVLS >= 4) 59 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); 60 if (RCU_FANOUT_LEAF != 16) 61 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 62 RCU_FANOUT_LEAF); 63 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 64 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", 65 rcu_fanout_leaf); 66 if (nr_cpu_ids != NR_CPUS) 67 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 68 #ifdef CONFIG_RCU_BOOST 69 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", 70 kthread_prio, CONFIG_RCU_BOOST_DELAY); 71 #endif 72 if (blimit != DEFAULT_RCU_BLIMIT) 73 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 74 if (qhimark != DEFAULT_RCU_QHIMARK) 75 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); 76 if (qlowmark != DEFAULT_RCU_QLOMARK) 77 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); 78 if (qovld != DEFAULT_RCU_QOVLD) 79 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); 80 if (jiffies_till_first_fqs != ULONG_MAX) 81 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); 82 if (jiffies_till_next_fqs != ULONG_MAX) 83 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); 84 if (jiffies_till_sched_qs != ULONG_MAX) 85 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); 86 if (rcu_kick_kthreads) 87 pr_info("\tKick kthreads if too-long grace period.\n"); 88 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) 89 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); 90 if (gp_preinit_delay) 91 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); 92 if (gp_init_delay) 93 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); 94 if (gp_cleanup_delay) 95 pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay); 96 if (nohz_full_patience_delay < 0) { 97 pr_info("\tRCU NOCB CPU patience negative (%d), resetting to zero.\n", nohz_full_patience_delay); 98 nohz_full_patience_delay = 0; 99 } else if (nohz_full_patience_delay > 5 * MSEC_PER_SEC) { 100 pr_info("\tRCU NOCB CPU patience too large (%d), resetting to %ld.\n", nohz_full_patience_delay, 5 * MSEC_PER_SEC); 101 nohz_full_patience_delay = 5 * MSEC_PER_SEC; 102 } else if (nohz_full_patience_delay) { 103 pr_info("\tRCU NOCB CPU patience set to %d milliseconds.\n", nohz_full_patience_delay); 104 } 105 nohz_full_patience_delay_jiffies = msecs_to_jiffies(nohz_full_patience_delay); 106 if (!use_softirq) 107 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); 108 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) 109 pr_info("\tRCU debug extended QS entry/exit.\n"); 110 rcupdate_announce_bootup_oddness(); 111 } 112 113 #ifdef CONFIG_PREEMPT_RCU 114 115 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 116 static void rcu_read_unlock_special(struct task_struct *t); 117 118 /* 119 * Tell them what RCU they are running. 120 */ 121 static void __init rcu_bootup_announce(void) 122 { 123 pr_info("Preemptible hierarchical RCU implementation.\n"); 124 rcu_bootup_announce_oddness(); 125 } 126 127 /* Flags for rcu_preempt_ctxt_queue() decision table. */ 128 #define RCU_GP_TASKS 0x8 129 #define RCU_EXP_TASKS 0x4 130 #define RCU_GP_BLKD 0x2 131 #define RCU_EXP_BLKD 0x1 132 133 /* 134 * Queues a task preempted within an RCU-preempt read-side critical 135 * section into the appropriate location within the ->blkd_tasks list, 136 * depending on the states of any ongoing normal and expedited grace 137 * periods. The ->gp_tasks pointer indicates which element the normal 138 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer 139 * indicates which element the expedited grace period is waiting on (again, 140 * NULL if none). If a grace period is waiting on a given element in the 141 * ->blkd_tasks list, it also waits on all subsequent elements. Thus, 142 * adding a task to the tail of the list blocks any grace period that is 143 * already waiting on one of the elements. In contrast, adding a task 144 * to the head of the list won't block any grace period that is already 145 * waiting on one of the elements. 146 * 147 * This queuing is imprecise, and can sometimes make an ongoing grace 148 * period wait for a task that is not strictly speaking blocking it. 149 * Given the choice, we needlessly block a normal grace period rather than 150 * blocking an expedited grace period. 151 * 152 * Note that an endless sequence of expedited grace periods still cannot 153 * indefinitely postpone a normal grace period. Eventually, all of the 154 * fixed number of preempted tasks blocking the normal grace period that are 155 * not also blocking the expedited grace period will resume and complete 156 * their RCU read-side critical sections. At that point, the ->gp_tasks 157 * pointer will equal the ->exp_tasks pointer, at which point the end of 158 * the corresponding expedited grace period will also be the end of the 159 * normal grace period. 160 */ 161 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) 162 __releases(rnp->lock) /* But leaves rrupts disabled. */ 163 { 164 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + 165 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + 166 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + 167 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 168 struct task_struct *t = current; 169 170 raw_lockdep_assert_held_rcu_node(rnp); 171 WARN_ON_ONCE(rdp->mynode != rnp); 172 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 173 /* RCU better not be waiting on newly onlined CPUs! */ 174 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & 175 rdp->grpmask); 176 177 /* 178 * Decide where to queue the newly blocked task. In theory, 179 * this could be an if-statement. In practice, when I tried 180 * that, it was quite messy. 181 */ 182 switch (blkd_state) { 183 case 0: 184 case RCU_EXP_TASKS: 185 case RCU_EXP_TASKS + RCU_GP_BLKD: 186 case RCU_GP_TASKS: 187 case RCU_GP_TASKS + RCU_EXP_TASKS: 188 189 /* 190 * Blocking neither GP, or first task blocking the normal 191 * GP but not blocking the already-waiting expedited GP. 192 * Queue at the head of the list to avoid unnecessarily 193 * blocking the already-waiting GPs. 194 */ 195 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 196 break; 197 198 case RCU_EXP_BLKD: 199 case RCU_GP_BLKD: 200 case RCU_GP_BLKD + RCU_EXP_BLKD: 201 case RCU_GP_TASKS + RCU_EXP_BLKD: 202 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 203 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 204 205 /* 206 * First task arriving that blocks either GP, or first task 207 * arriving that blocks the expedited GP (with the normal 208 * GP already waiting), or a task arriving that blocks 209 * both GPs with both GPs already waiting. Queue at the 210 * tail of the list to avoid any GP waiting on any of the 211 * already queued tasks that are not blocking it. 212 */ 213 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); 214 break; 215 216 case RCU_EXP_TASKS + RCU_EXP_BLKD: 217 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: 218 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD: 219 220 /* 221 * Second or subsequent task blocking the expedited GP. 222 * The task either does not block the normal GP, or is the 223 * first task blocking the normal GP. Queue just after 224 * the first task blocking the expedited GP. 225 */ 226 list_add(&t->rcu_node_entry, rnp->exp_tasks); 227 break; 228 229 case RCU_GP_TASKS + RCU_GP_BLKD: 230 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD: 231 232 /* 233 * Second or subsequent task blocking the normal GP. 234 * The task does not block the expedited GP. Queue just 235 * after the first task blocking the normal GP. 236 */ 237 list_add(&t->rcu_node_entry, rnp->gp_tasks); 238 break; 239 240 default: 241 242 /* Yet another exercise in excessive paranoia. */ 243 WARN_ON_ONCE(1); 244 break; 245 } 246 247 /* 248 * We have now queued the task. If it was the first one to 249 * block either grace period, update the ->gp_tasks and/or 250 * ->exp_tasks pointers, respectively, to reference the newly 251 * blocked tasks. 252 */ 253 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { 254 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); 255 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); 256 } 257 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 258 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); 259 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 260 !(rnp->qsmask & rdp->grpmask)); 261 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != 262 !(rnp->expmask & rdp->grpmask)); 263 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 264 265 /* 266 * Report the quiescent state for the expedited GP. This expedited 267 * GP should not be able to end until we report, so there should be 268 * no need to check for a subsequent expedited GP. (Though we are 269 * still in a quiescent state in any case.) 270 * 271 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change. 272 */ 273 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp) 274 rcu_report_exp_rdp(rdp); 275 else 276 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); 277 } 278 279 /* 280 * Record a preemptible-RCU quiescent state for the specified CPU. 281 * Note that this does not necessarily mean that the task currently running 282 * on the CPU is in a quiescent state: Instead, it means that the current 283 * grace period need not wait on any RCU read-side critical section that 284 * starts later on this CPU. It also means that if the current task is 285 * in an RCU read-side critical section, it has already added itself to 286 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the 287 * current task, there might be any number of other tasks blocked while 288 * in an RCU read-side critical section. 289 * 290 * Unlike non-preemptible-RCU, quiescent state reports for expedited 291 * grace periods are handled separately via deferred quiescent states 292 * and context switch events. 293 * 294 * Callers to this function must disable preemption. 295 */ 296 static void rcu_qs(void) 297 { 298 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); 299 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) { 300 trace_rcu_grace_period(TPS("rcu_preempt"), 301 __this_cpu_read(rcu_data.gp_seq), 302 TPS("cpuqs")); 303 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 304 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */ 305 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); 306 } 307 } 308 309 /* 310 * We have entered the scheduler, and the current task might soon be 311 * context-switched away from. If this task is in an RCU read-side 312 * critical section, we will no longer be able to rely on the CPU to 313 * record that fact, so we enqueue the task on the blkd_tasks list. 314 * The task will dequeue itself when it exits the outermost enclosing 315 * RCU read-side critical section. Therefore, the current grace period 316 * cannot be permitted to complete until the blkd_tasks list entries 317 * predating the current grace period drain, in other words, until 318 * rnp->gp_tasks becomes NULL. 319 * 320 * Caller must disable interrupts. 321 */ 322 void rcu_note_context_switch(bool preempt) 323 { 324 struct task_struct *t = current; 325 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 326 struct rcu_node *rnp; 327 328 trace_rcu_utilization(TPS("Start context switch")); 329 lockdep_assert_irqs_disabled(); 330 WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!"); 331 if (rcu_preempt_depth() > 0 && 332 !t->rcu_read_unlock_special.b.blocked) { 333 334 /* Possibly blocking in an RCU read-side critical section. */ 335 rnp = rdp->mynode; 336 raw_spin_lock_rcu_node(rnp); 337 t->rcu_read_unlock_special.b.blocked = true; 338 t->rcu_blocked_node = rnp; 339 340 /* 341 * Verify the CPU's sanity, trace the preemption, and 342 * then queue the task as required based on the states 343 * of any ongoing and expedited grace periods. 344 */ 345 WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp)); 346 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 347 trace_rcu_preempt_task(rcu_state.name, 348 t->pid, 349 (rnp->qsmask & rdp->grpmask) 350 ? rnp->gp_seq 351 : rcu_seq_snap(&rnp->gp_seq)); 352 rcu_preempt_ctxt_queue(rnp, rdp); 353 } else { 354 rcu_preempt_deferred_qs(t); 355 } 356 357 /* 358 * Either we were not in an RCU read-side critical section to 359 * begin with, or we have now recorded that critical section 360 * globally. Either way, we can now note a quiescent state 361 * for this CPU. Again, if we were in an RCU read-side critical 362 * section, and if that critical section was blocking the current 363 * grace period, then the fact that the task has been enqueued 364 * means that we continue to block the current grace period. 365 */ 366 rcu_qs(); 367 if (rdp->cpu_no_qs.b.exp) 368 rcu_report_exp_rdp(rdp); 369 rcu_tasks_qs(current, preempt); 370 trace_rcu_utilization(TPS("End context switch")); 371 } 372 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 373 374 /* 375 * Check for preempted RCU readers blocking the current grace period 376 * for the specified rcu_node structure. If the caller needs a reliable 377 * answer, it must hold the rcu_node's ->lock. 378 */ 379 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 380 { 381 return READ_ONCE(rnp->gp_tasks) != NULL; 382 } 383 384 /* limit value for ->rcu_read_lock_nesting. */ 385 #define RCU_NEST_PMAX (INT_MAX / 2) 386 387 static void rcu_preempt_read_enter(void) 388 { 389 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1); 390 } 391 392 static int rcu_preempt_read_exit(void) 393 { 394 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1; 395 396 WRITE_ONCE(current->rcu_read_lock_nesting, ret); 397 return ret; 398 } 399 400 static void rcu_preempt_depth_set(int val) 401 { 402 WRITE_ONCE(current->rcu_read_lock_nesting, val); 403 } 404 405 /* 406 * Preemptible RCU implementation for rcu_read_lock(). 407 * Just increment ->rcu_read_lock_nesting, shared state will be updated 408 * if we block. 409 */ 410 void __rcu_read_lock(void) 411 { 412 rcu_preempt_read_enter(); 413 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 414 WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX); 415 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread) 416 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); 417 barrier(); /* critical section after entry code. */ 418 } 419 EXPORT_SYMBOL_GPL(__rcu_read_lock); 420 421 /* 422 * Preemptible RCU implementation for rcu_read_unlock(). 423 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 424 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 425 * invoke rcu_read_unlock_special() to clean up after a context switch 426 * in an RCU read-side critical section and other special cases. 427 */ 428 void __rcu_read_unlock(void) 429 { 430 struct task_struct *t = current; 431 432 barrier(); // critical section before exit code. 433 if (rcu_preempt_read_exit() == 0) { 434 barrier(); // critical-section exit before .s check. 435 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 436 rcu_read_unlock_special(t); 437 } 438 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 439 int rrln = rcu_preempt_depth(); 440 441 WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX); 442 } 443 } 444 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 445 446 /* 447 * Advance a ->blkd_tasks-list pointer to the next entry, instead 448 * returning NULL if at the end of the list. 449 */ 450 static struct list_head *rcu_next_node_entry(struct task_struct *t, 451 struct rcu_node *rnp) 452 { 453 struct list_head *np; 454 455 np = t->rcu_node_entry.next; 456 if (np == &rnp->blkd_tasks) 457 np = NULL; 458 return np; 459 } 460 461 /* 462 * Return true if the specified rcu_node structure has tasks that were 463 * preempted within an RCU read-side critical section. 464 */ 465 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 466 { 467 return !list_empty(&rnp->blkd_tasks); 468 } 469 470 /* 471 * Report deferred quiescent states. The deferral time can 472 * be quite short, for example, in the case of the call from 473 * rcu_read_unlock_special(). 474 */ 475 static notrace void 476 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) 477 { 478 bool empty_exp; 479 bool empty_norm; 480 bool empty_exp_now; 481 struct list_head *np; 482 bool drop_boost_mutex = false; 483 struct rcu_data *rdp; 484 struct rcu_node *rnp; 485 union rcu_special special; 486 487 /* 488 * If RCU core is waiting for this CPU to exit its critical section, 489 * report the fact that it has exited. Because irqs are disabled, 490 * t->rcu_read_unlock_special cannot change. 491 */ 492 special = t->rcu_read_unlock_special; 493 rdp = this_cpu_ptr(&rcu_data); 494 if (!special.s && !rdp->cpu_no_qs.b.exp) { 495 local_irq_restore(flags); 496 return; 497 } 498 t->rcu_read_unlock_special.s = 0; 499 if (special.b.need_qs) { 500 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 501 rdp->cpu_no_qs.b.norm = false; 502 rcu_report_qs_rdp(rdp); 503 udelay(rcu_unlock_delay); 504 } else { 505 rcu_qs(); 506 } 507 } 508 509 /* 510 * Respond to a request by an expedited grace period for a 511 * quiescent state from this CPU. Note that requests from 512 * tasks are handled when removing the task from the 513 * blocked-tasks list below. 514 */ 515 if (rdp->cpu_no_qs.b.exp) 516 rcu_report_exp_rdp(rdp); 517 518 /* Clean up if blocked during RCU read-side critical section. */ 519 if (special.b.blocked) { 520 521 /* 522 * Remove this task from the list it blocked on. The task 523 * now remains queued on the rcu_node corresponding to the 524 * CPU it first blocked on, so there is no longer any need 525 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. 526 */ 527 rnp = t->rcu_blocked_node; 528 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 529 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 530 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 531 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 532 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && 533 (!empty_norm || rnp->qsmask)); 534 empty_exp = sync_rcu_exp_done(rnp); 535 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 536 np = rcu_next_node_entry(t, rnp); 537 list_del_init(&t->rcu_node_entry); 538 t->rcu_blocked_node = NULL; 539 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 540 rnp->gp_seq, t->pid); 541 if (&t->rcu_node_entry == rnp->gp_tasks) 542 WRITE_ONCE(rnp->gp_tasks, np); 543 if (&t->rcu_node_entry == rnp->exp_tasks) 544 WRITE_ONCE(rnp->exp_tasks, np); 545 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 546 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 547 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; 548 if (&t->rcu_node_entry == rnp->boost_tasks) 549 WRITE_ONCE(rnp->boost_tasks, np); 550 } 551 552 /* 553 * If this was the last task on the current list, and if 554 * we aren't waiting on any CPUs, report the quiescent state. 555 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 556 * so we must take a snapshot of the expedited state. 557 */ 558 empty_exp_now = sync_rcu_exp_done(rnp); 559 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 560 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 561 rnp->gp_seq, 562 0, rnp->qsmask, 563 rnp->level, 564 rnp->grplo, 565 rnp->grphi, 566 !!rnp->gp_tasks); 567 rcu_report_unblock_qs_rnp(rnp, flags); 568 } else { 569 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 570 } 571 572 /* 573 * If this was the last task on the expedited lists, 574 * then we need to report up the rcu_node hierarchy. 575 */ 576 if (!empty_exp && empty_exp_now) 577 rcu_report_exp_rnp(rnp, true); 578 579 /* Unboost if we were boosted. */ 580 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) 581 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); 582 } else { 583 local_irq_restore(flags); 584 } 585 } 586 587 /* 588 * Is a deferred quiescent-state pending, and are we also not in 589 * an RCU read-side critical section? It is the caller's responsibility 590 * to ensure it is otherwise safe to report any deferred quiescent 591 * states. The reason for this is that it is safe to report a 592 * quiescent state during context switch even though preemption 593 * is disabled. This function cannot be expected to understand these 594 * nuances, so the caller must handle them. 595 */ 596 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) 597 { 598 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) || 599 READ_ONCE(t->rcu_read_unlock_special.s)) && 600 rcu_preempt_depth() == 0; 601 } 602 603 /* 604 * Report a deferred quiescent state if needed and safe to do so. 605 * As with rcu_preempt_need_deferred_qs(), "safe" involves only 606 * not being in an RCU read-side critical section. The caller must 607 * evaluate safety in terms of interrupt, softirq, and preemption 608 * disabling. 609 */ 610 notrace void rcu_preempt_deferred_qs(struct task_struct *t) 611 { 612 unsigned long flags; 613 614 if (!rcu_preempt_need_deferred_qs(t)) 615 return; 616 local_irq_save(flags); 617 rcu_preempt_deferred_qs_irqrestore(t, flags); 618 } 619 620 /* 621 * Minimal handler to give the scheduler a chance to re-evaluate. 622 */ 623 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) 624 { 625 struct rcu_data *rdp; 626 627 rdp = container_of(iwp, struct rcu_data, defer_qs_iw); 628 rdp->defer_qs_iw_pending = false; 629 } 630 631 /* 632 * Handle special cases during rcu_read_unlock(), such as needing to 633 * notify RCU core processing or task having blocked during the RCU 634 * read-side critical section. 635 */ 636 static void rcu_read_unlock_special(struct task_struct *t) 637 { 638 unsigned long flags; 639 bool irqs_were_disabled; 640 bool preempt_bh_were_disabled = 641 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 642 643 /* NMI handlers cannot block and cannot safely manipulate state. */ 644 if (in_nmi()) 645 return; 646 647 local_irq_save(flags); 648 irqs_were_disabled = irqs_disabled_flags(flags); 649 if (preempt_bh_were_disabled || irqs_were_disabled) { 650 bool expboost; // Expedited GP in flight or possible boosting. 651 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 652 struct rcu_node *rnp = rdp->mynode; 653 654 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) || 655 (rdp->grpmask & READ_ONCE(rnp->expmask)) || 656 (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && 657 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) || 658 (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && 659 t->rcu_blocked_node); 660 // Need to defer quiescent state until everything is enabled. 661 if (use_softirq && (in_hardirq() || (expboost && !irqs_were_disabled))) { 662 // Using softirq, safe to awaken, and either the 663 // wakeup is free or there is either an expedited 664 // GP in flight or a potential need to deboost. 665 raise_softirq_irqoff(RCU_SOFTIRQ); 666 } else { 667 // Enabling BH or preempt does reschedule, so... 668 // Also if no expediting and no possible deboosting, 669 // slow is OK. Plus nohz_full CPUs eventually get 670 // tick enabled. 671 set_tsk_need_resched(current); 672 set_preempt_need_resched(); 673 if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && 674 expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) { 675 // Get scheduler to re-evaluate and call hooks. 676 // If !IRQ_WORK, FQS scan will eventually IPI. 677 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && 678 IS_ENABLED(CONFIG_PREEMPT_RT)) 679 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD( 680 rcu_preempt_deferred_qs_handler); 681 else 682 init_irq_work(&rdp->defer_qs_iw, 683 rcu_preempt_deferred_qs_handler); 684 rdp->defer_qs_iw_pending = true; 685 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); 686 } 687 } 688 local_irq_restore(flags); 689 return; 690 } 691 rcu_preempt_deferred_qs_irqrestore(t, flags); 692 } 693 694 /* 695 * Check that the list of blocked tasks for the newly completed grace 696 * period is in fact empty. It is a serious bug to complete a grace 697 * period that still has RCU readers blocked! This function must be 698 * invoked -before- updating this rnp's ->gp_seq. 699 * 700 * Also, if there are blocked tasks on the list, they automatically 701 * block the newly created grace period, so set up ->gp_tasks accordingly. 702 */ 703 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 704 { 705 struct task_struct *t; 706 707 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 708 raw_lockdep_assert_held_rcu_node(rnp); 709 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 710 dump_blkd_tasks(rnp, 10); 711 if (rcu_preempt_has_tasks(rnp) && 712 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { 713 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); 714 t = container_of(rnp->gp_tasks, struct task_struct, 715 rcu_node_entry); 716 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 717 rnp->gp_seq, t->pid); 718 } 719 WARN_ON_ONCE(rnp->qsmask); 720 } 721 722 /* 723 * Check for a quiescent state from the current CPU, including voluntary 724 * context switches for Tasks RCU. When a task blocks, the task is 725 * recorded in the corresponding CPU's rcu_node structure, which is checked 726 * elsewhere, hence this function need only check for quiescent states 727 * related to the current CPU, not to those related to tasks. 728 */ 729 static void rcu_flavor_sched_clock_irq(int user) 730 { 731 struct task_struct *t = current; 732 733 lockdep_assert_irqs_disabled(); 734 if (rcu_preempt_depth() > 0 || 735 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { 736 /* No QS, force context switch if deferred. */ 737 if (rcu_preempt_need_deferred_qs(t)) { 738 set_tsk_need_resched(t); 739 set_preempt_need_resched(); 740 } 741 } else if (rcu_preempt_need_deferred_qs(t)) { 742 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ 743 return; 744 } else if (!WARN_ON_ONCE(rcu_preempt_depth())) { 745 rcu_qs(); /* Report immediate QS. */ 746 return; 747 } 748 749 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ 750 if (rcu_preempt_depth() > 0 && 751 __this_cpu_read(rcu_data.core_needs_qs) && 752 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && 753 !t->rcu_read_unlock_special.b.need_qs && 754 time_after(jiffies, rcu_state.gp_start + HZ)) 755 t->rcu_read_unlock_special.b.need_qs = true; 756 } 757 758 /* 759 * Check for a task exiting while in a preemptible-RCU read-side 760 * critical section, clean up if so. No need to issue warnings, as 761 * debug_check_no_locks_held() already does this if lockdep is enabled. 762 * Besides, if this function does anything other than just immediately 763 * return, there was a bug of some sort. Spewing warnings from this 764 * function is like as not to simply obscure important prior warnings. 765 */ 766 void exit_rcu(void) 767 { 768 struct task_struct *t = current; 769 770 if (unlikely(!list_empty(¤t->rcu_node_entry))) { 771 rcu_preempt_depth_set(1); 772 barrier(); 773 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); 774 } else if (unlikely(rcu_preempt_depth())) { 775 rcu_preempt_depth_set(1); 776 } else { 777 return; 778 } 779 __rcu_read_unlock(); 780 rcu_preempt_deferred_qs(current); 781 } 782 783 /* 784 * Dump the blocked-tasks state, but limit the list dump to the 785 * specified number of elements. 786 */ 787 static void 788 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 789 { 790 int cpu; 791 int i; 792 struct list_head *lhp; 793 struct rcu_data *rdp; 794 struct rcu_node *rnp1; 795 796 raw_lockdep_assert_held_rcu_node(rnp); 797 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 798 __func__, rnp->grplo, rnp->grphi, rnp->level, 799 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); 800 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 801 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", 802 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); 803 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", 804 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), 805 READ_ONCE(rnp->exp_tasks)); 806 pr_info("%s: ->blkd_tasks", __func__); 807 i = 0; 808 list_for_each(lhp, &rnp->blkd_tasks) { 809 pr_cont(" %p", lhp); 810 if (++i >= ncheck) 811 break; 812 } 813 pr_cont("\n"); 814 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { 815 rdp = per_cpu_ptr(&rcu_data, cpu); 816 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", 817 cpu, ".o"[rcu_rdp_cpu_online(rdp)], 818 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, 819 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); 820 } 821 } 822 823 #else /* #ifdef CONFIG_PREEMPT_RCU */ 824 825 /* 826 * If strict grace periods are enabled, and if the calling 827 * __rcu_read_unlock() marks the beginning of a quiescent state, immediately 828 * report that quiescent state and, if requested, spin for a bit. 829 */ 830 void rcu_read_unlock_strict(void) 831 { 832 struct rcu_data *rdp; 833 834 if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) 835 return; 836 rdp = this_cpu_ptr(&rcu_data); 837 rdp->cpu_no_qs.b.norm = false; 838 rcu_report_qs_rdp(rdp); 839 udelay(rcu_unlock_delay); 840 } 841 EXPORT_SYMBOL_GPL(rcu_read_unlock_strict); 842 843 /* 844 * Tell them what RCU they are running. 845 */ 846 static void __init rcu_bootup_announce(void) 847 { 848 pr_info("Hierarchical RCU implementation.\n"); 849 rcu_bootup_announce_oddness(); 850 } 851 852 /* 853 * Note a quiescent state for PREEMPTION=n. Because we do not need to know 854 * how many quiescent states passed, just if there was at least one since 855 * the start of the grace period, this just sets a flag. The caller must 856 * have disabled preemption. 857 */ 858 static void rcu_qs(void) 859 { 860 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); 861 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) 862 return; 863 trace_rcu_grace_period(TPS("rcu_sched"), 864 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); 865 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 866 if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 867 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 868 } 869 870 /* 871 * Register an urgently needed quiescent state. If there is an 872 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight 873 * dyntick-idle quiescent state visible to other CPUs, which will in 874 * some cases serve for expedited as well as normal grace periods. 875 * Either way, register a lightweight quiescent state. 876 */ 877 void rcu_all_qs(void) 878 { 879 unsigned long flags; 880 881 if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) 882 return; 883 preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels 884 /* Load rcu_urgent_qs before other flags. */ 885 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 886 preempt_enable(); 887 return; 888 } 889 this_cpu_write(rcu_data.rcu_urgent_qs, false); 890 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { 891 local_irq_save(flags); 892 rcu_momentary_dyntick_idle(); 893 local_irq_restore(flags); 894 } 895 rcu_qs(); 896 preempt_enable(); 897 } 898 EXPORT_SYMBOL_GPL(rcu_all_qs); 899 900 /* 901 * Note a PREEMPTION=n context switch. The caller must have disabled interrupts. 902 */ 903 void rcu_note_context_switch(bool preempt) 904 { 905 trace_rcu_utilization(TPS("Start context switch")); 906 rcu_qs(); 907 /* Load rcu_urgent_qs before other flags. */ 908 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) 909 goto out; 910 this_cpu_write(rcu_data.rcu_urgent_qs, false); 911 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) 912 rcu_momentary_dyntick_idle(); 913 out: 914 rcu_tasks_qs(current, preempt); 915 trace_rcu_utilization(TPS("End context switch")); 916 } 917 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 918 919 /* 920 * Because preemptible RCU does not exist, there are never any preempted 921 * RCU readers. 922 */ 923 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 924 { 925 return 0; 926 } 927 928 /* 929 * Because there is no preemptible RCU, there can be no readers blocked. 930 */ 931 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 932 { 933 return false; 934 } 935 936 /* 937 * Because there is no preemptible RCU, there can be no deferred quiescent 938 * states. 939 */ 940 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) 941 { 942 return false; 943 } 944 945 // Except that we do need to respond to a request by an expedited 946 // grace period for a quiescent state from this CPU. Note that in 947 // non-preemptible kernels, there can be no context switches within RCU 948 // read-side critical sections, which in turn means that the leaf rcu_node 949 // structure's blocked-tasks list is always empty. is therefore no need to 950 // actually check it. Instead, a quiescent state from this CPU suffices, 951 // and this function is only called from such a quiescent state. 952 notrace void rcu_preempt_deferred_qs(struct task_struct *t) 953 { 954 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 955 956 if (READ_ONCE(rdp->cpu_no_qs.b.exp)) 957 rcu_report_exp_rdp(rdp); 958 } 959 960 /* 961 * Because there is no preemptible RCU, there can be no readers blocked, 962 * so there is no need to check for blocked tasks. So check only for 963 * bogus qsmask values. 964 */ 965 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 966 { 967 WARN_ON_ONCE(rnp->qsmask); 968 } 969 970 /* 971 * Check to see if this CPU is in a non-context-switch quiescent state, 972 * namely user mode and idle loop. 973 */ 974 static void rcu_flavor_sched_clock_irq(int user) 975 { 976 if (user || rcu_is_cpu_rrupt_from_idle()) { 977 978 /* 979 * Get here if this CPU took its interrupt from user 980 * mode or from the idle loop, and if this is not a 981 * nested interrupt. In this case, the CPU is in 982 * a quiescent state, so note it. 983 * 984 * No memory barrier is required here because rcu_qs() 985 * references only CPU-local variables that other CPUs 986 * neither access nor modify, at least not while the 987 * corresponding CPU is online. 988 */ 989 rcu_qs(); 990 } 991 } 992 993 /* 994 * Because preemptible RCU does not exist, tasks cannot possibly exit 995 * while in preemptible RCU read-side critical sections. 996 */ 997 void exit_rcu(void) 998 { 999 } 1000 1001 /* 1002 * Dump the guaranteed-empty blocked-tasks state. Trust but verify. 1003 */ 1004 static void 1005 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 1006 { 1007 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 1008 } 1009 1010 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 1011 1012 /* 1013 * If boosting, set rcuc kthreads to realtime priority. 1014 */ 1015 static void rcu_cpu_kthread_setup(unsigned int cpu) 1016 { 1017 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1018 #ifdef CONFIG_RCU_BOOST 1019 struct sched_param sp; 1020 1021 sp.sched_priority = kthread_prio; 1022 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1023 #endif /* #ifdef CONFIG_RCU_BOOST */ 1024 1025 WRITE_ONCE(rdp->rcuc_activity, jiffies); 1026 } 1027 1028 static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp) 1029 { 1030 #ifdef CONFIG_RCU_NOCB_CPU 1031 return rdp->nocb_cb_kthread == current; 1032 #else 1033 return false; 1034 #endif 1035 } 1036 1037 /* 1038 * Is the current CPU running the RCU-callbacks kthread? 1039 * Caller must have preemption disabled. 1040 */ 1041 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp) 1042 { 1043 return rdp->rcu_cpu_kthread_task == current || 1044 rcu_is_callbacks_nocb_kthread(rdp); 1045 } 1046 1047 #ifdef CONFIG_RCU_BOOST 1048 1049 /* 1050 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1051 * or ->boost_tasks, advancing the pointer to the next task in the 1052 * ->blkd_tasks list. 1053 * 1054 * Note that irqs must be enabled: boosting the task can block. 1055 * Returns 1 if there are more tasks needing to be boosted. 1056 */ 1057 static int rcu_boost(struct rcu_node *rnp) 1058 { 1059 unsigned long flags; 1060 struct task_struct *t; 1061 struct list_head *tb; 1062 1063 if (READ_ONCE(rnp->exp_tasks) == NULL && 1064 READ_ONCE(rnp->boost_tasks) == NULL) 1065 return 0; /* Nothing left to boost. */ 1066 1067 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1068 1069 /* 1070 * Recheck under the lock: all tasks in need of boosting 1071 * might exit their RCU read-side critical sections on their own. 1072 */ 1073 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1074 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1075 return 0; 1076 } 1077 1078 /* 1079 * Preferentially boost tasks blocking expedited grace periods. 1080 * This cannot starve the normal grace periods because a second 1081 * expedited grace period must boost all blocked tasks, including 1082 * those blocking the pre-existing normal grace period. 1083 */ 1084 if (rnp->exp_tasks != NULL) 1085 tb = rnp->exp_tasks; 1086 else 1087 tb = rnp->boost_tasks; 1088 1089 /* 1090 * We boost task t by manufacturing an rt_mutex that appears to 1091 * be held by task t. We leave a pointer to that rt_mutex where 1092 * task t can find it, and task t will release the mutex when it 1093 * exits its outermost RCU read-side critical section. Then 1094 * simply acquiring this artificial rt_mutex will boost task 1095 * t's priority. (Thanks to tglx for suggesting this approach!) 1096 * 1097 * Note that task t must acquire rnp->lock to remove itself from 1098 * the ->blkd_tasks list, which it will do from exit() if from 1099 * nowhere else. We therefore are guaranteed that task t will 1100 * stay around at least until we drop rnp->lock. Note that 1101 * rnp->lock also resolves races between our priority boosting 1102 * and task t's exiting its outermost RCU read-side critical 1103 * section. 1104 */ 1105 t = container_of(tb, struct task_struct, rcu_node_entry); 1106 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); 1107 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1108 /* Lock only for side effect: boosts task t's priority. */ 1109 rt_mutex_lock(&rnp->boost_mtx); 1110 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1111 rnp->n_boosts++; 1112 1113 return READ_ONCE(rnp->exp_tasks) != NULL || 1114 READ_ONCE(rnp->boost_tasks) != NULL; 1115 } 1116 1117 /* 1118 * Priority-boosting kthread, one per leaf rcu_node. 1119 */ 1120 static int rcu_boost_kthread(void *arg) 1121 { 1122 struct rcu_node *rnp = (struct rcu_node *)arg; 1123 int spincnt = 0; 1124 int more2boost; 1125 1126 trace_rcu_utilization(TPS("Start boost kthread@init")); 1127 for (;;) { 1128 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); 1129 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1130 rcu_wait(READ_ONCE(rnp->boost_tasks) || 1131 READ_ONCE(rnp->exp_tasks)); 1132 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1133 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); 1134 more2boost = rcu_boost(rnp); 1135 if (more2boost) 1136 spincnt++; 1137 else 1138 spincnt = 0; 1139 if (spincnt > 10) { 1140 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); 1141 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1142 schedule_timeout_idle(2); 1143 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1144 spincnt = 0; 1145 } 1146 } 1147 /* NOTREACHED */ 1148 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1149 return 0; 1150 } 1151 1152 /* 1153 * Check to see if it is time to start boosting RCU readers that are 1154 * blocking the current grace period, and, if so, tell the per-rcu_node 1155 * kthread to start boosting them. If there is an expedited grace 1156 * period in progress, it is always time to boost. 1157 * 1158 * The caller must hold rnp->lock, which this function releases. 1159 * The ->boost_kthread_task is immortal, so we don't need to worry 1160 * about it going away. 1161 */ 1162 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1163 __releases(rnp->lock) 1164 { 1165 raw_lockdep_assert_held_rcu_node(rnp); 1166 if (!rnp->boost_kthread_task || 1167 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) { 1168 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1169 return; 1170 } 1171 if (rnp->exp_tasks != NULL || 1172 (rnp->gp_tasks != NULL && 1173 rnp->boost_tasks == NULL && 1174 rnp->qsmask == 0 && 1175 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld || 1176 IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) { 1177 if (rnp->exp_tasks == NULL) 1178 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); 1179 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1180 rcu_wake_cond(rnp->boost_kthread_task, 1181 READ_ONCE(rnp->boost_kthread_status)); 1182 } else { 1183 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1184 } 1185 } 1186 1187 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1188 1189 /* 1190 * Do priority-boost accounting for the start of a new grace period. 1191 */ 1192 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1193 { 1194 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1195 } 1196 1197 /* 1198 * Create an RCU-boost kthread for the specified node if one does not 1199 * already exist. We only create this kthread for preemptible RCU. 1200 */ 1201 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1202 { 1203 unsigned long flags; 1204 int rnp_index = rnp - rcu_get_root(); 1205 struct sched_param sp; 1206 struct task_struct *t; 1207 1208 if (rnp->boost_kthread_task) 1209 return; 1210 1211 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1212 "rcub/%d", rnp_index); 1213 if (WARN_ON_ONCE(IS_ERR(t))) 1214 return; 1215 1216 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1217 rnp->boost_kthread_task = t; 1218 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1219 sp.sched_priority = kthread_prio; 1220 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1221 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1222 } 1223 1224 static struct task_struct *rcu_boost_task(struct rcu_node *rnp) 1225 { 1226 return READ_ONCE(rnp->boost_kthread_task); 1227 } 1228 1229 #else /* #ifdef CONFIG_RCU_BOOST */ 1230 1231 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1232 __releases(rnp->lock) 1233 { 1234 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1235 } 1236 1237 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1238 { 1239 } 1240 1241 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1242 { 1243 } 1244 1245 static struct task_struct *rcu_boost_task(struct rcu_node *rnp) 1246 { 1247 return NULL; 1248 } 1249 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 1250 1251 /* 1252 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 1253 * grace-period kthread will do force_quiescent_state() processing? 1254 * The idea is to avoid waking up RCU core processing on such a 1255 * CPU unless the grace period has extended for too long. 1256 * 1257 * This code relies on the fact that all NO_HZ_FULL CPUs are also 1258 * RCU_NOCB_CPU CPUs. 1259 */ 1260 static bool rcu_nohz_full_cpu(void) 1261 { 1262 #ifdef CONFIG_NO_HZ_FULL 1263 if (tick_nohz_full_cpu(smp_processor_id()) && 1264 (!rcu_gp_in_progress() || 1265 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) 1266 return true; 1267 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 1268 return false; 1269 } 1270 1271 /* 1272 * Bind the RCU grace-period kthreads to the housekeeping CPU. 1273 */ 1274 static void rcu_bind_gp_kthread(void) 1275 { 1276 if (!tick_nohz_full_enabled()) 1277 return; 1278 housekeeping_affine(current, HK_TYPE_RCU); 1279 } 1280