1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * Internal non-public definitions that provide either classic 5 * or preemptible semantics. 6 * 7 * Copyright Red Hat, 2009 8 * Copyright IBM Corporation, 2009 9 * 10 * Author: Ingo Molnar <mingo@elte.hu> 11 * Paul E. McKenney <paulmck@linux.ibm.com> 12 */ 13 14 #include "../locking/rtmutex_common.h" 15 16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) 17 { 18 /* 19 * In order to read the offloaded state of an rdp in a safe 20 * and stable way and prevent from its value to be changed 21 * under us, we must either hold the barrier mutex, the cpu 22 * hotplug lock (read or write) or the nocb lock. Local 23 * non-preemptible reads are also safe. NOCB kthreads and 24 * timers have their own means of synchronization against the 25 * offloaded state updaters. 26 */ 27 RCU_NOCB_LOCKDEP_WARN( 28 !(lockdep_is_held(&rcu_state.barrier_mutex) || 29 (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) || 30 lockdep_is_held(&rdp->nocb_lock) || 31 lockdep_is_held(&rcu_state.nocb_mutex) || 32 (!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) && 33 rdp == this_cpu_ptr(&rcu_data)) || 34 rcu_current_is_nocb_kthread(rdp)), 35 "Unsafe read of RCU_NOCB offloaded state" 36 ); 37 38 return rcu_segcblist_is_offloaded(&rdp->cblist); 39 } 40 41 /* 42 * Check the RCU kernel configuration parameters and print informative 43 * messages about anything out of the ordinary. 44 */ 45 static void __init rcu_bootup_announce_oddness(void) 46 { 47 if (IS_ENABLED(CONFIG_RCU_TRACE)) 48 pr_info("\tRCU event tracing is enabled.\n"); 49 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 50 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 51 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", 52 RCU_FANOUT); 53 if (rcu_fanout_exact) 54 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 55 if (IS_ENABLED(CONFIG_PROVE_RCU)) 56 pr_info("\tRCU lockdep checking is enabled.\n"); 57 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 58 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); 59 if (RCU_NUM_LVLS >= 4) 60 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); 61 if (RCU_FANOUT_LEAF != 16) 62 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 63 RCU_FANOUT_LEAF); 64 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 65 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", 66 rcu_fanout_leaf); 67 if (nr_cpu_ids != NR_CPUS) 68 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 69 #ifdef CONFIG_RCU_BOOST 70 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", 71 kthread_prio, CONFIG_RCU_BOOST_DELAY); 72 #endif 73 if (blimit != DEFAULT_RCU_BLIMIT) 74 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 75 if (qhimark != DEFAULT_RCU_QHIMARK) 76 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); 77 if (qlowmark != DEFAULT_RCU_QLOMARK) 78 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); 79 if (qovld != DEFAULT_RCU_QOVLD) 80 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); 81 if (jiffies_till_first_fqs != ULONG_MAX) 82 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); 83 if (jiffies_till_next_fqs != ULONG_MAX) 84 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); 85 if (jiffies_till_sched_qs != ULONG_MAX) 86 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); 87 if (rcu_kick_kthreads) 88 pr_info("\tKick kthreads if too-long grace period.\n"); 89 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) 90 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); 91 if (gp_preinit_delay) 92 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); 93 if (gp_init_delay) 94 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); 95 if (gp_cleanup_delay) 96 pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay); 97 if (nohz_full_patience_delay < 0) { 98 pr_info("\tRCU NOCB CPU patience negative (%d), resetting to zero.\n", nohz_full_patience_delay); 99 nohz_full_patience_delay = 0; 100 } else if (nohz_full_patience_delay > 5 * MSEC_PER_SEC) { 101 pr_info("\tRCU NOCB CPU patience too large (%d), resetting to %ld.\n", nohz_full_patience_delay, 5 * MSEC_PER_SEC); 102 nohz_full_patience_delay = 5 * MSEC_PER_SEC; 103 } else if (nohz_full_patience_delay) { 104 pr_info("\tRCU NOCB CPU patience set to %d milliseconds.\n", nohz_full_patience_delay); 105 } 106 nohz_full_patience_delay_jiffies = msecs_to_jiffies(nohz_full_patience_delay); 107 if (!use_softirq) 108 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); 109 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) 110 pr_info("\tRCU debug extended QS entry/exit.\n"); 111 rcupdate_announce_bootup_oddness(); 112 } 113 114 #ifdef CONFIG_PREEMPT_RCU 115 116 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 117 static void rcu_read_unlock_special(struct task_struct *t); 118 119 /* 120 * Tell them what RCU they are running. 121 */ 122 static void __init rcu_bootup_announce(void) 123 { 124 pr_info("Preemptible hierarchical RCU implementation.\n"); 125 rcu_bootup_announce_oddness(); 126 } 127 128 /* Flags for rcu_preempt_ctxt_queue() decision table. */ 129 #define RCU_GP_TASKS 0x8 130 #define RCU_EXP_TASKS 0x4 131 #define RCU_GP_BLKD 0x2 132 #define RCU_EXP_BLKD 0x1 133 134 /* 135 * Queues a task preempted within an RCU-preempt read-side critical 136 * section into the appropriate location within the ->blkd_tasks list, 137 * depending on the states of any ongoing normal and expedited grace 138 * periods. The ->gp_tasks pointer indicates which element the normal 139 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer 140 * indicates which element the expedited grace period is waiting on (again, 141 * NULL if none). If a grace period is waiting on a given element in the 142 * ->blkd_tasks list, it also waits on all subsequent elements. Thus, 143 * adding a task to the tail of the list blocks any grace period that is 144 * already waiting on one of the elements. In contrast, adding a task 145 * to the head of the list won't block any grace period that is already 146 * waiting on one of the elements. 147 * 148 * This queuing is imprecise, and can sometimes make an ongoing grace 149 * period wait for a task that is not strictly speaking blocking it. 150 * Given the choice, we needlessly block a normal grace period rather than 151 * blocking an expedited grace period. 152 * 153 * Note that an endless sequence of expedited grace periods still cannot 154 * indefinitely postpone a normal grace period. Eventually, all of the 155 * fixed number of preempted tasks blocking the normal grace period that are 156 * not also blocking the expedited grace period will resume and complete 157 * their RCU read-side critical sections. At that point, the ->gp_tasks 158 * pointer will equal the ->exp_tasks pointer, at which point the end of 159 * the corresponding expedited grace period will also be the end of the 160 * normal grace period. 161 */ 162 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) 163 __releases(rnp->lock) /* But leaves rrupts disabled. */ 164 { 165 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + 166 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + 167 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + 168 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 169 struct task_struct *t = current; 170 171 raw_lockdep_assert_held_rcu_node(rnp); 172 WARN_ON_ONCE(rdp->mynode != rnp); 173 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 174 /* RCU better not be waiting on newly onlined CPUs! */ 175 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & 176 rdp->grpmask); 177 178 /* 179 * Decide where to queue the newly blocked task. In theory, 180 * this could be an if-statement. In practice, when I tried 181 * that, it was quite messy. 182 */ 183 switch (blkd_state) { 184 case 0: 185 case RCU_EXP_TASKS: 186 case RCU_EXP_TASKS | RCU_GP_BLKD: 187 case RCU_GP_TASKS: 188 case RCU_GP_TASKS | RCU_EXP_TASKS: 189 190 /* 191 * Blocking neither GP, or first task blocking the normal 192 * GP but not blocking the already-waiting expedited GP. 193 * Queue at the head of the list to avoid unnecessarily 194 * blocking the already-waiting GPs. 195 */ 196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 197 break; 198 199 case RCU_EXP_BLKD: 200 case RCU_GP_BLKD: 201 case RCU_GP_BLKD | RCU_EXP_BLKD: 202 case RCU_GP_TASKS | RCU_EXP_BLKD: 203 case RCU_GP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: 204 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: 205 206 /* 207 * First task arriving that blocks either GP, or first task 208 * arriving that blocks the expedited GP (with the normal 209 * GP already waiting), or a task arriving that blocks 210 * both GPs with both GPs already waiting. Queue at the 211 * tail of the list to avoid any GP waiting on any of the 212 * already queued tasks that are not blocking it. 213 */ 214 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); 215 break; 216 217 case RCU_EXP_TASKS | RCU_EXP_BLKD: 218 case RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: 219 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_EXP_BLKD: 220 221 /* 222 * Second or subsequent task blocking the expedited GP. 223 * The task either does not block the normal GP, or is the 224 * first task blocking the normal GP. Queue just after 225 * the first task blocking the expedited GP. 226 */ 227 list_add(&t->rcu_node_entry, rnp->exp_tasks); 228 break; 229 230 case RCU_GP_TASKS | RCU_GP_BLKD: 231 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD: 232 233 /* 234 * Second or subsequent task blocking the normal GP. 235 * The task does not block the expedited GP. Queue just 236 * after the first task blocking the normal GP. 237 */ 238 list_add(&t->rcu_node_entry, rnp->gp_tasks); 239 break; 240 241 default: 242 243 /* Yet another exercise in excessive paranoia. */ 244 WARN_ON_ONCE(1); 245 break; 246 } 247 248 /* 249 * We have now queued the task. If it was the first one to 250 * block either grace period, update the ->gp_tasks and/or 251 * ->exp_tasks pointers, respectively, to reference the newly 252 * blocked tasks. 253 */ 254 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { 255 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); 256 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); 257 } 258 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 259 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); 260 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 261 !(rnp->qsmask & rdp->grpmask)); 262 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != 263 !(rnp->expmask & rdp->grpmask)); 264 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ 265 266 /* 267 * Report the quiescent state for the expedited GP. This expedited 268 * GP should not be able to end until we report, so there should be 269 * no need to check for a subsequent expedited GP. (Though we are 270 * still in a quiescent state in any case.) 271 * 272 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change. 273 */ 274 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp) 275 rcu_report_exp_rdp(rdp); 276 else 277 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); 278 } 279 280 /* 281 * Record a preemptible-RCU quiescent state for the specified CPU. 282 * Note that this does not necessarily mean that the task currently running 283 * on the CPU is in a quiescent state: Instead, it means that the current 284 * grace period need not wait on any RCU read-side critical section that 285 * starts later on this CPU. It also means that if the current task is 286 * in an RCU read-side critical section, it has already added itself to 287 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the 288 * current task, there might be any number of other tasks blocked while 289 * in an RCU read-side critical section. 290 * 291 * Unlike non-preemptible-RCU, quiescent state reports for expedited 292 * grace periods are handled separately via deferred quiescent states 293 * and context switch events. 294 * 295 * Callers to this function must disable preemption. 296 */ 297 static void rcu_qs(void) 298 { 299 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); 300 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) { 301 trace_rcu_grace_period(TPS("rcu_preempt"), 302 __this_cpu_read(rcu_data.gp_seq), 303 TPS("cpuqs")); 304 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 305 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */ 306 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); 307 } 308 } 309 310 /* 311 * We have entered the scheduler, and the current task might soon be 312 * context-switched away from. If this task is in an RCU read-side 313 * critical section, we will no longer be able to rely on the CPU to 314 * record that fact, so we enqueue the task on the blkd_tasks list. 315 * The task will dequeue itself when it exits the outermost enclosing 316 * RCU read-side critical section. Therefore, the current grace period 317 * cannot be permitted to complete until the blkd_tasks list entries 318 * predating the current grace period drain, in other words, until 319 * rnp->gp_tasks becomes NULL. 320 * 321 * Caller must disable interrupts. 322 */ 323 void rcu_note_context_switch(bool preempt) 324 { 325 struct task_struct *t = current; 326 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 327 struct rcu_node *rnp; 328 329 trace_rcu_utilization(TPS("Start context switch")); 330 lockdep_assert_irqs_disabled(); 331 WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!"); 332 if (rcu_preempt_depth() > 0 && 333 !t->rcu_read_unlock_special.b.blocked) { 334 335 /* Possibly blocking in an RCU read-side critical section. */ 336 rnp = rdp->mynode; 337 raw_spin_lock_rcu_node(rnp); 338 t->rcu_read_unlock_special.b.blocked = true; 339 t->rcu_blocked_node = rnp; 340 341 /* 342 * Verify the CPU's sanity, trace the preemption, and 343 * then queue the task as required based on the states 344 * of any ongoing and expedited grace periods. 345 */ 346 WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp)); 347 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 348 trace_rcu_preempt_task(rcu_state.name, 349 t->pid, 350 (rnp->qsmask & rdp->grpmask) 351 ? rnp->gp_seq 352 : rcu_seq_snap(&rnp->gp_seq)); 353 rcu_preempt_ctxt_queue(rnp, rdp); 354 } else { 355 rcu_preempt_deferred_qs(t); 356 } 357 358 /* 359 * Either we were not in an RCU read-side critical section to 360 * begin with, or we have now recorded that critical section 361 * globally. Either way, we can now note a quiescent state 362 * for this CPU. Again, if we were in an RCU read-side critical 363 * section, and if that critical section was blocking the current 364 * grace period, then the fact that the task has been enqueued 365 * means that we continue to block the current grace period. 366 */ 367 rcu_qs(); 368 if (rdp->cpu_no_qs.b.exp) 369 rcu_report_exp_rdp(rdp); 370 rcu_tasks_qs(current, preempt); 371 trace_rcu_utilization(TPS("End context switch")); 372 } 373 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 374 375 /* 376 * Check for preempted RCU readers blocking the current grace period 377 * for the specified rcu_node structure. If the caller needs a reliable 378 * answer, it must hold the rcu_node's ->lock. 379 */ 380 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 381 { 382 return READ_ONCE(rnp->gp_tasks) != NULL; 383 } 384 385 /* limit value for ->rcu_read_lock_nesting. */ 386 #define RCU_NEST_PMAX (INT_MAX / 2) 387 388 static void rcu_preempt_read_enter(void) 389 { 390 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1); 391 } 392 393 static int rcu_preempt_read_exit(void) 394 { 395 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1; 396 397 WRITE_ONCE(current->rcu_read_lock_nesting, ret); 398 return ret; 399 } 400 401 static void rcu_preempt_depth_set(int val) 402 { 403 WRITE_ONCE(current->rcu_read_lock_nesting, val); 404 } 405 406 /* 407 * Preemptible RCU implementation for rcu_read_lock(). 408 * Just increment ->rcu_read_lock_nesting, shared state will be updated 409 * if we block. 410 */ 411 void __rcu_read_lock(void) 412 { 413 rcu_preempt_read_enter(); 414 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 415 WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX); 416 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread) 417 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); 418 barrier(); /* critical section after entry code. */ 419 } 420 EXPORT_SYMBOL_GPL(__rcu_read_lock); 421 422 /* 423 * Preemptible RCU implementation for rcu_read_unlock(). 424 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 425 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 426 * invoke rcu_read_unlock_special() to clean up after a context switch 427 * in an RCU read-side critical section and other special cases. 428 */ 429 void __rcu_read_unlock(void) 430 { 431 struct task_struct *t = current; 432 433 barrier(); // critical section before exit code. 434 if (rcu_preempt_read_exit() == 0) { 435 barrier(); // critical-section exit before .s check. 436 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 437 rcu_read_unlock_special(t); 438 } 439 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 440 int rrln = rcu_preempt_depth(); 441 442 WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX); 443 } 444 } 445 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 446 447 /* 448 * Advance a ->blkd_tasks-list pointer to the next entry, instead 449 * returning NULL if at the end of the list. 450 */ 451 static struct list_head *rcu_next_node_entry(struct task_struct *t, 452 struct rcu_node *rnp) 453 { 454 struct list_head *np; 455 456 np = t->rcu_node_entry.next; 457 if (np == &rnp->blkd_tasks) 458 np = NULL; 459 return np; 460 } 461 462 /* 463 * Return true if the specified rcu_node structure has tasks that were 464 * preempted within an RCU read-side critical section. 465 */ 466 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 467 { 468 return !list_empty(&rnp->blkd_tasks); 469 } 470 471 /* 472 * Report deferred quiescent states. The deferral time can 473 * be quite short, for example, in the case of the call from 474 * rcu_read_unlock_special(). 475 */ 476 static notrace void 477 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) 478 { 479 bool empty_exp; 480 bool empty_norm; 481 bool empty_exp_now; 482 struct list_head *np; 483 bool drop_boost_mutex = false; 484 struct rcu_data *rdp; 485 struct rcu_node *rnp; 486 union rcu_special special; 487 488 /* 489 * If RCU core is waiting for this CPU to exit its critical section, 490 * report the fact that it has exited. Because irqs are disabled, 491 * t->rcu_read_unlock_special cannot change. 492 */ 493 special = t->rcu_read_unlock_special; 494 rdp = this_cpu_ptr(&rcu_data); 495 if (!special.s && !rdp->cpu_no_qs.b.exp) { 496 local_irq_restore(flags); 497 return; 498 } 499 t->rcu_read_unlock_special.s = 0; 500 if (special.b.need_qs) { 501 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 502 rdp->cpu_no_qs.b.norm = false; 503 rcu_report_qs_rdp(rdp); 504 udelay(rcu_unlock_delay); 505 } else { 506 rcu_qs(); 507 } 508 } 509 510 /* 511 * Respond to a request by an expedited grace period for a 512 * quiescent state from this CPU. Note that requests from 513 * tasks are handled when removing the task from the 514 * blocked-tasks list below. 515 */ 516 if (rdp->cpu_no_qs.b.exp) 517 rcu_report_exp_rdp(rdp); 518 519 /* Clean up if blocked during RCU read-side critical section. */ 520 if (special.b.blocked) { 521 522 /* 523 * Remove this task from the list it blocked on. The task 524 * now remains queued on the rcu_node corresponding to the 525 * CPU it first blocked on, so there is no longer any need 526 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. 527 */ 528 rnp = t->rcu_blocked_node; 529 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 530 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 531 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 532 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 533 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && 534 (!empty_norm || rnp->qsmask)); 535 empty_exp = sync_rcu_exp_done(rnp); 536 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 537 np = rcu_next_node_entry(t, rnp); 538 list_del_init(&t->rcu_node_entry); 539 t->rcu_blocked_node = NULL; 540 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 541 rnp->gp_seq, t->pid); 542 if (&t->rcu_node_entry == rnp->gp_tasks) 543 WRITE_ONCE(rnp->gp_tasks, np); 544 if (&t->rcu_node_entry == rnp->exp_tasks) 545 WRITE_ONCE(rnp->exp_tasks, np); 546 if (IS_ENABLED(CONFIG_RCU_BOOST)) { 547 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ 548 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; 549 if (&t->rcu_node_entry == rnp->boost_tasks) 550 WRITE_ONCE(rnp->boost_tasks, np); 551 } 552 553 /* 554 * If this was the last task on the current list, and if 555 * we aren't waiting on any CPUs, report the quiescent state. 556 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 557 * so we must take a snapshot of the expedited state. 558 */ 559 empty_exp_now = sync_rcu_exp_done(rnp); 560 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 561 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 562 rnp->gp_seq, 563 0, rnp->qsmask, 564 rnp->level, 565 rnp->grplo, 566 rnp->grphi, 567 !!rnp->gp_tasks); 568 rcu_report_unblock_qs_rnp(rnp, flags); 569 } else { 570 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 571 } 572 573 /* 574 * If this was the last task on the expedited lists, 575 * then we need to report up the rcu_node hierarchy. 576 */ 577 if (!empty_exp && empty_exp_now) 578 rcu_report_exp_rnp(rnp, true); 579 580 /* Unboost if we were boosted. */ 581 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) 582 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); 583 } else { 584 local_irq_restore(flags); 585 } 586 } 587 588 /* 589 * Is a deferred quiescent-state pending, and are we also not in 590 * an RCU read-side critical section? It is the caller's responsibility 591 * to ensure it is otherwise safe to report any deferred quiescent 592 * states. The reason for this is that it is safe to report a 593 * quiescent state during context switch even though preemption 594 * is disabled. This function cannot be expected to understand these 595 * nuances, so the caller must handle them. 596 */ 597 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) 598 { 599 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) || 600 READ_ONCE(t->rcu_read_unlock_special.s)) && 601 rcu_preempt_depth() == 0; 602 } 603 604 /* 605 * Report a deferred quiescent state if needed and safe to do so. 606 * As with rcu_preempt_need_deferred_qs(), "safe" involves only 607 * not being in an RCU read-side critical section. The caller must 608 * evaluate safety in terms of interrupt, softirq, and preemption 609 * disabling. 610 */ 611 notrace void rcu_preempt_deferred_qs(struct task_struct *t) 612 { 613 unsigned long flags; 614 615 if (!rcu_preempt_need_deferred_qs(t)) 616 return; 617 local_irq_save(flags); 618 rcu_preempt_deferred_qs_irqrestore(t, flags); 619 } 620 621 /* 622 * Minimal handler to give the scheduler a chance to re-evaluate. 623 */ 624 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) 625 { 626 struct rcu_data *rdp; 627 628 rdp = container_of(iwp, struct rcu_data, defer_qs_iw); 629 rdp->defer_qs_iw_pending = false; 630 } 631 632 /* 633 * Handle special cases during rcu_read_unlock(), such as needing to 634 * notify RCU core processing or task having blocked during the RCU 635 * read-side critical section. 636 */ 637 static void rcu_read_unlock_special(struct task_struct *t) 638 { 639 unsigned long flags; 640 bool irqs_were_disabled; 641 bool preempt_bh_were_disabled = 642 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 643 644 /* NMI handlers cannot block and cannot safely manipulate state. */ 645 if (in_nmi()) 646 return; 647 648 local_irq_save(flags); 649 irqs_were_disabled = irqs_disabled_flags(flags); 650 if (preempt_bh_were_disabled || irqs_were_disabled) { 651 bool expboost; // Expedited GP in flight or possible boosting. 652 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 653 struct rcu_node *rnp = rdp->mynode; 654 655 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) || 656 (rdp->grpmask & READ_ONCE(rnp->expmask)) || 657 (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && 658 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) || 659 (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && 660 t->rcu_blocked_node); 661 // Need to defer quiescent state until everything is enabled. 662 if (use_softirq && (in_hardirq() || (expboost && !irqs_were_disabled))) { 663 // Using softirq, safe to awaken, and either the 664 // wakeup is free or there is either an expedited 665 // GP in flight or a potential need to deboost. 666 raise_softirq_irqoff(RCU_SOFTIRQ); 667 } else { 668 // Enabling BH or preempt does reschedule, so... 669 // Also if no expediting and no possible deboosting, 670 // slow is OK. Plus nohz_full CPUs eventually get 671 // tick enabled. 672 set_tsk_need_resched(current); 673 set_preempt_need_resched(); 674 if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && 675 expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) { 676 // Get scheduler to re-evaluate and call hooks. 677 // If !IRQ_WORK, FQS scan will eventually IPI. 678 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && 679 IS_ENABLED(CONFIG_PREEMPT_RT)) 680 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD( 681 rcu_preempt_deferred_qs_handler); 682 else 683 init_irq_work(&rdp->defer_qs_iw, 684 rcu_preempt_deferred_qs_handler); 685 rdp->defer_qs_iw_pending = true; 686 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); 687 } 688 } 689 local_irq_restore(flags); 690 return; 691 } 692 rcu_preempt_deferred_qs_irqrestore(t, flags); 693 } 694 695 /* 696 * Check that the list of blocked tasks for the newly completed grace 697 * period is in fact empty. It is a serious bug to complete a grace 698 * period that still has RCU readers blocked! This function must be 699 * invoked -before- updating this rnp's ->gp_seq. 700 * 701 * Also, if there are blocked tasks on the list, they automatically 702 * block the newly created grace period, so set up ->gp_tasks accordingly. 703 */ 704 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 705 { 706 struct task_struct *t; 707 708 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 709 raw_lockdep_assert_held_rcu_node(rnp); 710 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 711 dump_blkd_tasks(rnp, 10); 712 if (rcu_preempt_has_tasks(rnp) && 713 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { 714 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); 715 t = container_of(rnp->gp_tasks, struct task_struct, 716 rcu_node_entry); 717 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 718 rnp->gp_seq, t->pid); 719 } 720 WARN_ON_ONCE(rnp->qsmask); 721 } 722 723 /* 724 * Check for a quiescent state from the current CPU, including voluntary 725 * context switches for Tasks RCU. When a task blocks, the task is 726 * recorded in the corresponding CPU's rcu_node structure, which is checked 727 * elsewhere, hence this function need only check for quiescent states 728 * related to the current CPU, not to those related to tasks. 729 */ 730 static void rcu_flavor_sched_clock_irq(int user) 731 { 732 struct task_struct *t = current; 733 734 lockdep_assert_irqs_disabled(); 735 if (rcu_preempt_depth() > 0 || 736 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { 737 /* No QS, force context switch if deferred. */ 738 if (rcu_preempt_need_deferred_qs(t)) { 739 set_tsk_need_resched(t); 740 set_preempt_need_resched(); 741 } 742 } else if (rcu_preempt_need_deferred_qs(t)) { 743 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ 744 return; 745 } else if (!WARN_ON_ONCE(rcu_preempt_depth())) { 746 rcu_qs(); /* Report immediate QS. */ 747 return; 748 } 749 750 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ 751 if (rcu_preempt_depth() > 0 && 752 __this_cpu_read(rcu_data.core_needs_qs) && 753 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && 754 !t->rcu_read_unlock_special.b.need_qs && 755 time_after(jiffies, rcu_state.gp_start + HZ)) 756 t->rcu_read_unlock_special.b.need_qs = true; 757 } 758 759 /* 760 * Check for a task exiting while in a preemptible-RCU read-side 761 * critical section, clean up if so. No need to issue warnings, as 762 * debug_check_no_locks_held() already does this if lockdep is enabled. 763 * Besides, if this function does anything other than just immediately 764 * return, there was a bug of some sort. Spewing warnings from this 765 * function is like as not to simply obscure important prior warnings. 766 */ 767 void exit_rcu(void) 768 { 769 struct task_struct *t = current; 770 771 if (unlikely(!list_empty(¤t->rcu_node_entry))) { 772 rcu_preempt_depth_set(1); 773 barrier(); 774 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); 775 } else if (unlikely(rcu_preempt_depth())) { 776 rcu_preempt_depth_set(1); 777 } else { 778 return; 779 } 780 __rcu_read_unlock(); 781 rcu_preempt_deferred_qs(current); 782 } 783 784 /* 785 * Dump the blocked-tasks state, but limit the list dump to the 786 * specified number of elements. 787 */ 788 static void 789 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 790 { 791 int cpu; 792 int i; 793 struct list_head *lhp; 794 struct rcu_data *rdp; 795 struct rcu_node *rnp1; 796 797 raw_lockdep_assert_held_rcu_node(rnp); 798 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 799 __func__, rnp->grplo, rnp->grphi, rnp->level, 800 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); 801 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 802 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", 803 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); 804 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", 805 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), 806 READ_ONCE(rnp->exp_tasks)); 807 pr_info("%s: ->blkd_tasks", __func__); 808 i = 0; 809 list_for_each(lhp, &rnp->blkd_tasks) { 810 pr_cont(" %p", lhp); 811 if (++i >= ncheck) 812 break; 813 } 814 pr_cont("\n"); 815 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { 816 rdp = per_cpu_ptr(&rcu_data, cpu); 817 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", 818 cpu, ".o"[rcu_rdp_cpu_online(rdp)], 819 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, 820 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); 821 } 822 } 823 824 #else /* #ifdef CONFIG_PREEMPT_RCU */ 825 826 /* 827 * If strict grace periods are enabled, and if the calling 828 * __rcu_read_unlock() marks the beginning of a quiescent state, immediately 829 * report that quiescent state and, if requested, spin for a bit. 830 */ 831 void rcu_read_unlock_strict(void) 832 { 833 struct rcu_data *rdp; 834 835 if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) 836 return; 837 rdp = this_cpu_ptr(&rcu_data); 838 rdp->cpu_no_qs.b.norm = false; 839 rcu_report_qs_rdp(rdp); 840 udelay(rcu_unlock_delay); 841 } 842 EXPORT_SYMBOL_GPL(rcu_read_unlock_strict); 843 844 /* 845 * Tell them what RCU they are running. 846 */ 847 static void __init rcu_bootup_announce(void) 848 { 849 pr_info("Hierarchical RCU implementation.\n"); 850 rcu_bootup_announce_oddness(); 851 } 852 853 /* 854 * Note a quiescent state for PREEMPTION=n. Because we do not need to know 855 * how many quiescent states passed, just if there was at least one since 856 * the start of the grace period, this just sets a flag. The caller must 857 * have disabled preemption. 858 */ 859 static void rcu_qs(void) 860 { 861 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); 862 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) 863 return; 864 trace_rcu_grace_period(TPS("rcu_sched"), 865 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); 866 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); 867 if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 868 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 869 } 870 871 /* 872 * Register an urgently needed quiescent state. If there is an 873 * emergency, invoke rcu_momentary_eqs() to do a heavy-weight 874 * dyntick-idle quiescent state visible to other CPUs, which will in 875 * some cases serve for expedited as well as normal grace periods. 876 * Either way, register a lightweight quiescent state. 877 */ 878 void rcu_all_qs(void) 879 { 880 unsigned long flags; 881 882 if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) 883 return; 884 preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels 885 /* Load rcu_urgent_qs before other flags. */ 886 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 887 preempt_enable(); 888 return; 889 } 890 this_cpu_write(rcu_data.rcu_urgent_qs, false); 891 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { 892 local_irq_save(flags); 893 rcu_momentary_eqs(); 894 local_irq_restore(flags); 895 } 896 rcu_qs(); 897 preempt_enable(); 898 } 899 EXPORT_SYMBOL_GPL(rcu_all_qs); 900 901 /* 902 * Note a PREEMPTION=n context switch. The caller must have disabled interrupts. 903 */ 904 void rcu_note_context_switch(bool preempt) 905 { 906 trace_rcu_utilization(TPS("Start context switch")); 907 rcu_qs(); 908 /* Load rcu_urgent_qs before other flags. */ 909 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) 910 goto out; 911 this_cpu_write(rcu_data.rcu_urgent_qs, false); 912 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) 913 rcu_momentary_eqs(); 914 out: 915 rcu_tasks_qs(current, preempt); 916 trace_rcu_utilization(TPS("End context switch")); 917 } 918 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 919 920 /* 921 * Because preemptible RCU does not exist, there are never any preempted 922 * RCU readers. 923 */ 924 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 925 { 926 return 0; 927 } 928 929 /* 930 * Because there is no preemptible RCU, there can be no readers blocked. 931 */ 932 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) 933 { 934 return false; 935 } 936 937 /* 938 * Because there is no preemptible RCU, there can be no deferred quiescent 939 * states. 940 */ 941 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) 942 { 943 return false; 944 } 945 946 // Except that we do need to respond to a request by an expedited 947 // grace period for a quiescent state from this CPU. Note that in 948 // non-preemptible kernels, there can be no context switches within RCU 949 // read-side critical sections, which in turn means that the leaf rcu_node 950 // structure's blocked-tasks list is always empty. is therefore no need to 951 // actually check it. Instead, a quiescent state from this CPU suffices, 952 // and this function is only called from such a quiescent state. 953 notrace void rcu_preempt_deferred_qs(struct task_struct *t) 954 { 955 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 956 957 if (READ_ONCE(rdp->cpu_no_qs.b.exp)) 958 rcu_report_exp_rdp(rdp); 959 } 960 961 /* 962 * Because there is no preemptible RCU, there can be no readers blocked, 963 * so there is no need to check for blocked tasks. So check only for 964 * bogus qsmask values. 965 */ 966 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 967 { 968 WARN_ON_ONCE(rnp->qsmask); 969 } 970 971 /* 972 * Check to see if this CPU is in a non-context-switch quiescent state, 973 * namely user mode and idle loop. 974 */ 975 static void rcu_flavor_sched_clock_irq(int user) 976 { 977 if (user || rcu_is_cpu_rrupt_from_idle()) { 978 979 /* 980 * Get here if this CPU took its interrupt from user 981 * mode or from the idle loop, and if this is not a 982 * nested interrupt. In this case, the CPU is in 983 * a quiescent state, so note it. 984 * 985 * No memory barrier is required here because rcu_qs() 986 * references only CPU-local variables that other CPUs 987 * neither access nor modify, at least not while the 988 * corresponding CPU is online. 989 */ 990 rcu_qs(); 991 } 992 } 993 994 /* 995 * Because preemptible RCU does not exist, tasks cannot possibly exit 996 * while in preemptible RCU read-side critical sections. 997 */ 998 void exit_rcu(void) 999 { 1000 } 1001 1002 /* 1003 * Dump the guaranteed-empty blocked-tasks state. Trust but verify. 1004 */ 1005 static void 1006 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) 1007 { 1008 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 1009 } 1010 1011 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 1012 1013 /* 1014 * If boosting, set rcuc kthreads to realtime priority. 1015 */ 1016 static void rcu_cpu_kthread_setup(unsigned int cpu) 1017 { 1018 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 1019 #ifdef CONFIG_RCU_BOOST 1020 struct sched_param sp; 1021 1022 sp.sched_priority = kthread_prio; 1023 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1024 #endif /* #ifdef CONFIG_RCU_BOOST */ 1025 1026 WRITE_ONCE(rdp->rcuc_activity, jiffies); 1027 } 1028 1029 static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp) 1030 { 1031 #ifdef CONFIG_RCU_NOCB_CPU 1032 return rdp->nocb_cb_kthread == current; 1033 #else 1034 return false; 1035 #endif 1036 } 1037 1038 /* 1039 * Is the current CPU running the RCU-callbacks kthread? 1040 * Caller must have preemption disabled. 1041 */ 1042 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp) 1043 { 1044 return rdp->rcu_cpu_kthread_task == current || 1045 rcu_is_callbacks_nocb_kthread(rdp); 1046 } 1047 1048 #ifdef CONFIG_RCU_BOOST 1049 1050 /* 1051 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1052 * or ->boost_tasks, advancing the pointer to the next task in the 1053 * ->blkd_tasks list. 1054 * 1055 * Note that irqs must be enabled: boosting the task can block. 1056 * Returns 1 if there are more tasks needing to be boosted. 1057 */ 1058 static int rcu_boost(struct rcu_node *rnp) 1059 { 1060 unsigned long flags; 1061 struct task_struct *t; 1062 struct list_head *tb; 1063 1064 if (READ_ONCE(rnp->exp_tasks) == NULL && 1065 READ_ONCE(rnp->boost_tasks) == NULL) 1066 return 0; /* Nothing left to boost. */ 1067 1068 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1069 1070 /* 1071 * Recheck under the lock: all tasks in need of boosting 1072 * might exit their RCU read-side critical sections on their own. 1073 */ 1074 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1075 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1076 return 0; 1077 } 1078 1079 /* 1080 * Preferentially boost tasks blocking expedited grace periods. 1081 * This cannot starve the normal grace periods because a second 1082 * expedited grace period must boost all blocked tasks, including 1083 * those blocking the pre-existing normal grace period. 1084 */ 1085 if (rnp->exp_tasks != NULL) 1086 tb = rnp->exp_tasks; 1087 else 1088 tb = rnp->boost_tasks; 1089 1090 /* 1091 * We boost task t by manufacturing an rt_mutex that appears to 1092 * be held by task t. We leave a pointer to that rt_mutex where 1093 * task t can find it, and task t will release the mutex when it 1094 * exits its outermost RCU read-side critical section. Then 1095 * simply acquiring this artificial rt_mutex will boost task 1096 * t's priority. (Thanks to tglx for suggesting this approach!) 1097 * 1098 * Note that task t must acquire rnp->lock to remove itself from 1099 * the ->blkd_tasks list, which it will do from exit() if from 1100 * nowhere else. We therefore are guaranteed that task t will 1101 * stay around at least until we drop rnp->lock. Note that 1102 * rnp->lock also resolves races between our priority boosting 1103 * and task t's exiting its outermost RCU read-side critical 1104 * section. 1105 */ 1106 t = container_of(tb, struct task_struct, rcu_node_entry); 1107 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); 1108 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1109 /* Lock only for side effect: boosts task t's priority. */ 1110 rt_mutex_lock(&rnp->boost_mtx); 1111 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1112 rnp->n_boosts++; 1113 1114 return READ_ONCE(rnp->exp_tasks) != NULL || 1115 READ_ONCE(rnp->boost_tasks) != NULL; 1116 } 1117 1118 /* 1119 * Priority-boosting kthread, one per leaf rcu_node. 1120 */ 1121 static int rcu_boost_kthread(void *arg) 1122 { 1123 struct rcu_node *rnp = (struct rcu_node *)arg; 1124 int spincnt = 0; 1125 int more2boost; 1126 1127 trace_rcu_utilization(TPS("Start boost kthread@init")); 1128 for (;;) { 1129 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); 1130 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1131 rcu_wait(READ_ONCE(rnp->boost_tasks) || 1132 READ_ONCE(rnp->exp_tasks)); 1133 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1134 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); 1135 more2boost = rcu_boost(rnp); 1136 if (more2boost) 1137 spincnt++; 1138 else 1139 spincnt = 0; 1140 if (spincnt > 10) { 1141 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); 1142 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1143 schedule_timeout_idle(2); 1144 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1145 spincnt = 0; 1146 } 1147 } 1148 /* NOTREACHED */ 1149 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1150 return 0; 1151 } 1152 1153 /* 1154 * Check to see if it is time to start boosting RCU readers that are 1155 * blocking the current grace period, and, if so, tell the per-rcu_node 1156 * kthread to start boosting them. If there is an expedited grace 1157 * period in progress, it is always time to boost. 1158 * 1159 * The caller must hold rnp->lock, which this function releases. 1160 * The ->boost_kthread_task is immortal, so we don't need to worry 1161 * about it going away. 1162 */ 1163 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1164 __releases(rnp->lock) 1165 { 1166 raw_lockdep_assert_held_rcu_node(rnp); 1167 if (!rnp->boost_kthread_task || 1168 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) { 1169 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1170 return; 1171 } 1172 if (rnp->exp_tasks != NULL || 1173 (rnp->gp_tasks != NULL && 1174 rnp->boost_tasks == NULL && 1175 rnp->qsmask == 0 && 1176 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld || 1177 IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) { 1178 if (rnp->exp_tasks == NULL) 1179 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); 1180 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1181 rcu_wake_cond(rnp->boost_kthread_task, 1182 READ_ONCE(rnp->boost_kthread_status)); 1183 } else { 1184 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1185 } 1186 } 1187 1188 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1189 1190 /* 1191 * Do priority-boost accounting for the start of a new grace period. 1192 */ 1193 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1194 { 1195 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1196 } 1197 1198 /* 1199 * Create an RCU-boost kthread for the specified node if one does not 1200 * already exist. We only create this kthread for preemptible RCU. 1201 */ 1202 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1203 { 1204 unsigned long flags; 1205 int rnp_index = rnp - rcu_get_root(); 1206 struct sched_param sp; 1207 struct task_struct *t; 1208 1209 if (rnp->boost_kthread_task) 1210 return; 1211 1212 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1213 "rcub/%d", rnp_index); 1214 if (WARN_ON_ONCE(IS_ERR(t))) 1215 return; 1216 1217 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1218 rnp->boost_kthread_task = t; 1219 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1220 sp.sched_priority = kthread_prio; 1221 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1222 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1223 } 1224 1225 static struct task_struct *rcu_boost_task(struct rcu_node *rnp) 1226 { 1227 return READ_ONCE(rnp->boost_kthread_task); 1228 } 1229 1230 #else /* #ifdef CONFIG_RCU_BOOST */ 1231 1232 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1233 __releases(rnp->lock) 1234 { 1235 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1236 } 1237 1238 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1239 { 1240 } 1241 1242 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) 1243 { 1244 } 1245 1246 static struct task_struct *rcu_boost_task(struct rcu_node *rnp) 1247 { 1248 return NULL; 1249 } 1250 #endif /* #else #ifdef CONFIG_RCU_BOOST */ 1251 1252 /* 1253 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 1254 * grace-period kthread will do force_quiescent_state() processing? 1255 * The idea is to avoid waking up RCU core processing on such a 1256 * CPU unless the grace period has extended for too long. 1257 * 1258 * This code relies on the fact that all NO_HZ_FULL CPUs are also 1259 * RCU_NOCB_CPU CPUs. 1260 */ 1261 static bool rcu_nohz_full_cpu(void) 1262 { 1263 #ifdef CONFIG_NO_HZ_FULL 1264 if (tick_nohz_full_cpu(smp_processor_id()) && 1265 (!rcu_gp_in_progress() || 1266 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) 1267 return true; 1268 #endif /* #ifdef CONFIG_NO_HZ_FULL */ 1269 return false; 1270 } 1271 1272 /* 1273 * Bind the RCU grace-period kthreads to the housekeeping CPU. 1274 */ 1275 static void rcu_bind_gp_kthread(void) 1276 { 1277 if (!tick_nohz_full_enabled()) 1278 return; 1279 housekeeping_affine(current, HK_TYPE_RCU); 1280 } 1281