1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * RCU expedited grace periods 4 * 5 * Copyright IBM Corporation, 2016 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #include <linux/lockdep.h> 11 12 static void rcu_exp_handler(void *unused); 13 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp); 15 16 /* 17 * Record the start of an expedited grace period. 18 */ 19 static void rcu_exp_gp_seq_start(void) 20 { 21 rcu_seq_start(&rcu_state.expedited_sequence); 22 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 23 } 24 25 /* 26 * Return the value that the expedited-grace-period counter will have 27 * at the end of the current grace period. 28 */ 29 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) 30 { 31 return rcu_seq_endval(&rcu_state.expedited_sequence); 32 } 33 34 /* 35 * Record the end of an expedited grace period. 36 */ 37 static void rcu_exp_gp_seq_end(void) 38 { 39 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 40 rcu_seq_end(&rcu_state.expedited_sequence); 41 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 42 } 43 44 /* 45 * Take a snapshot of the expedited-grace-period counter, which is the 46 * earliest value that will indicate that a full grace period has 47 * elapsed since the current time. 48 */ 49 static unsigned long rcu_exp_gp_seq_snap(void) 50 { 51 unsigned long s; 52 53 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 54 s = rcu_seq_snap(&rcu_state.expedited_sequence); 55 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); 56 return s; 57 } 58 59 /* 60 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true 61 * if a full expedited grace period has elapsed since that snapshot 62 * was taken. 63 */ 64 static bool rcu_exp_gp_seq_done(unsigned long s) 65 { 66 return rcu_seq_done(&rcu_state.expedited_sequence, s); 67 } 68 69 /* 70 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 71 * recent CPU-online activity. Note that these masks are not cleared 72 * when CPUs go offline, so they reflect the union of all CPUs that have 73 * ever been online. This means that this function normally takes its 74 * no-work-to-do fastpath. 75 */ 76 static void sync_exp_reset_tree_hotplug(void) 77 { 78 bool done; 79 unsigned long flags; 80 unsigned long mask; 81 unsigned long oldmask; 82 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ 83 struct rcu_node *rnp; 84 struct rcu_node *rnp_up; 85 86 /* If no new CPUs onlined since last time, nothing to do. */ 87 if (likely(ncpus == rcu_state.ncpus_snap)) 88 return; 89 rcu_state.ncpus_snap = ncpus; 90 91 /* 92 * Each pass through the following loop propagates newly onlined 93 * CPUs for the current rcu_node structure up the rcu_node tree. 94 */ 95 rcu_for_each_leaf_node(rnp) { 96 raw_spin_lock_irqsave_rcu_node(rnp, flags); 97 if (rnp->expmaskinit == rnp->expmaskinitnext) { 98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 99 continue; /* No new CPUs, nothing to do. */ 100 } 101 102 /* Update this node's mask, track old value for propagation. */ 103 oldmask = rnp->expmaskinit; 104 rnp->expmaskinit = rnp->expmaskinitnext; 105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 106 107 /* If was already nonzero, nothing to propagate. */ 108 if (oldmask) 109 continue; 110 111 /* Propagate the new CPU up the tree. */ 112 mask = rnp->grpmask; 113 rnp_up = rnp->parent; 114 done = false; 115 while (rnp_up) { 116 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 117 if (rnp_up->expmaskinit) 118 done = true; 119 rnp_up->expmaskinit |= mask; 120 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 121 if (done) 122 break; 123 mask = rnp_up->grpmask; 124 rnp_up = rnp_up->parent; 125 } 126 } 127 } 128 129 /* 130 * Reset the ->expmask values in the rcu_node tree in preparation for 131 * a new expedited grace period. 132 */ 133 static void __maybe_unused sync_exp_reset_tree(void) 134 { 135 unsigned long flags; 136 struct rcu_node *rnp; 137 138 sync_exp_reset_tree_hotplug(); 139 rcu_for_each_node_breadth_first(rnp) { 140 raw_spin_lock_irqsave_rcu_node(rnp, flags); 141 WARN_ON_ONCE(rnp->expmask); 142 WRITE_ONCE(rnp->expmask, rnp->expmaskinit); 143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 144 } 145 } 146 147 /* 148 * Return non-zero if there is no RCU expedited grace period in progress 149 * for the specified rcu_node structure, in other words, if all CPUs and 150 * tasks covered by the specified rcu_node structure have done their bit 151 * for the current expedited grace period. 152 */ 153 static bool sync_rcu_exp_done(struct rcu_node *rnp) 154 { 155 raw_lockdep_assert_held_rcu_node(rnp); 156 return READ_ONCE(rnp->exp_tasks) == NULL && 157 READ_ONCE(rnp->expmask) == 0; 158 } 159 160 /* 161 * Like sync_rcu_exp_done(), but where the caller does not hold the 162 * rcu_node's ->lock. 163 */ 164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) 165 { 166 unsigned long flags; 167 bool ret; 168 169 raw_spin_lock_irqsave_rcu_node(rnp, flags); 170 ret = sync_rcu_exp_done(rnp); 171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 172 173 return ret; 174 } 175 176 177 /* 178 * Report the exit from RCU read-side critical section for the last task 179 * that queued itself during or before the current expedited preemptible-RCU 180 * grace period. This event is reported either to the rcu_node structure on 181 * which the task was queued or to one of that rcu_node structure's ancestors, 182 * recursively up the tree. (Calm down, calm down, we do the recursion 183 * iteratively!) 184 */ 185 static void __rcu_report_exp_rnp(struct rcu_node *rnp, 186 bool wake, unsigned long flags) 187 __releases(rnp->lock) 188 { 189 unsigned long mask; 190 191 raw_lockdep_assert_held_rcu_node(rnp); 192 for (;;) { 193 if (!sync_rcu_exp_done(rnp)) { 194 if (!rnp->expmask) 195 rcu_initiate_boost(rnp, flags); 196 else 197 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 198 break; 199 } 200 if (rnp->parent == NULL) { 201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 202 if (wake) { 203 smp_mb(); /* EGP done before wake_up(). */ 204 swake_up_one(&rcu_state.expedited_wq); 205 } 206 break; 207 } 208 mask = rnp->grpmask; 209 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 210 rnp = rnp->parent; 211 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 212 WARN_ON_ONCE(!(rnp->expmask & mask)); 213 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 214 } 215 } 216 217 /* 218 * Report expedited quiescent state for specified node. This is a 219 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 220 */ 221 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) 222 { 223 unsigned long flags; 224 225 raw_spin_lock_irqsave_rcu_node(rnp, flags); 226 __rcu_report_exp_rnp(rnp, wake, flags); 227 } 228 229 /* 230 * Report expedited quiescent state for multiple CPUs, all covered by the 231 * specified leaf rcu_node structure. 232 */ 233 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, 234 unsigned long mask, bool wake) 235 { 236 int cpu; 237 unsigned long flags; 238 struct rcu_data *rdp; 239 240 raw_spin_lock_irqsave_rcu_node(rnp, flags); 241 if (!(rnp->expmask & mask)) { 242 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 243 return; 244 } 245 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 246 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 247 rdp = per_cpu_ptr(&rcu_data, cpu); 248 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp) 249 continue; 250 rdp->rcu_forced_tick_exp = false; 251 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 252 } 253 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ 254 } 255 256 /* 257 * Report expedited quiescent state for specified rcu_data (CPU). 258 */ 259 static void rcu_report_exp_rdp(struct rcu_data *rdp) 260 { 261 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); 262 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); 263 } 264 265 /* Common code for work-done checking. */ 266 static bool sync_exp_work_done(unsigned long s) 267 { 268 if (rcu_exp_gp_seq_done(s)) { 269 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); 270 smp_mb(); /* Ensure test happens before caller kfree(). */ 271 return true; 272 } 273 return false; 274 } 275 276 /* 277 * Funnel-lock acquisition for expedited grace periods. Returns true 278 * if some other task completed an expedited grace period that this task 279 * can piggy-back on, and with no mutex held. Otherwise, returns false 280 * with the mutex held, indicating that the caller must actually do the 281 * expedited grace period. 282 */ 283 static bool exp_funnel_lock(unsigned long s) 284 { 285 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 286 struct rcu_node *rnp = rdp->mynode; 287 struct rcu_node *rnp_root = rcu_get_root(); 288 289 /* Low-contention fastpath. */ 290 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && 291 (rnp == rnp_root || 292 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && 293 mutex_trylock(&rcu_state.exp_mutex)) 294 goto fastpath; 295 296 /* 297 * Each pass through the following loop works its way up 298 * the rcu_node tree, returning if others have done the work or 299 * otherwise falls through to acquire ->exp_mutex. The mapping 300 * from CPU to rcu_node structure can be inexact, as it is just 301 * promoting locality and is not strictly needed for correctness. 302 */ 303 for (; rnp != NULL; rnp = rnp->parent) { 304 if (sync_exp_work_done(s)) 305 return true; 306 307 /* Work not done, either wait here or go up. */ 308 spin_lock(&rnp->exp_lock); 309 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { 310 311 /* Someone else doing GP, so wait for them. */ 312 spin_unlock(&rnp->exp_lock); 313 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 314 rnp->grplo, rnp->grphi, 315 TPS("wait")); 316 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 317 sync_exp_work_done(s)); 318 return true; 319 } 320 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */ 321 spin_unlock(&rnp->exp_lock); 322 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 323 rnp->grplo, rnp->grphi, TPS("nxtlvl")); 324 } 325 mutex_lock(&rcu_state.exp_mutex); 326 fastpath: 327 if (sync_exp_work_done(s)) { 328 mutex_unlock(&rcu_state.exp_mutex); 329 return true; 330 } 331 rcu_exp_gp_seq_start(); 332 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); 333 return false; 334 } 335 336 /* 337 * Select the CPUs within the specified rcu_node that the upcoming 338 * expedited grace period needs to wait for. 339 */ 340 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp) 341 { 342 int cpu; 343 unsigned long flags; 344 unsigned long mask_ofl_test; 345 unsigned long mask_ofl_ipi; 346 int ret; 347 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); 348 349 raw_spin_lock_irqsave_rcu_node(rnp, flags); 350 351 /* Each pass checks a CPU for identity, offline, and idle. */ 352 mask_ofl_test = 0; 353 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 354 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 355 unsigned long mask = rdp->grpmask; 356 int snap; 357 358 if (raw_smp_processor_id() == cpu || 359 !(rnp->qsmaskinitnext & mask)) { 360 mask_ofl_test |= mask; 361 } else { 362 snap = rcu_dynticks_snap(cpu); 363 if (rcu_dynticks_in_eqs(snap)) 364 mask_ofl_test |= mask; 365 else 366 rdp->exp_dynticks_snap = snap; 367 } 368 } 369 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 370 371 /* 372 * Need to wait for any blocked tasks as well. Note that 373 * additional blocking tasks will also block the expedited GP 374 * until such time as the ->expmask bits are cleared. 375 */ 376 if (rcu_preempt_has_tasks(rnp)) 377 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next); 378 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 379 380 /* IPI the remaining CPUs for expedited quiescent state. */ 381 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { 382 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 383 unsigned long mask = rdp->grpmask; 384 385 retry_ipi: 386 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { 387 mask_ofl_test |= mask; 388 continue; 389 } 390 if (get_cpu() == cpu) { 391 mask_ofl_test |= mask; 392 put_cpu(); 393 continue; 394 } 395 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 396 put_cpu(); 397 /* The CPU will report the QS in response to the IPI. */ 398 if (!ret) 399 continue; 400 401 /* Failed, raced with CPU hotplug operation. */ 402 raw_spin_lock_irqsave_rcu_node(rnp, flags); 403 if ((rnp->qsmaskinitnext & mask) && 404 (rnp->expmask & mask)) { 405 /* Online, so delay for a bit and try again. */ 406 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 407 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); 408 schedule_timeout_idle(1); 409 goto retry_ipi; 410 } 411 /* CPU really is offline, so we must report its QS. */ 412 if (rnp->expmask & mask) 413 mask_ofl_test |= mask; 414 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 415 } 416 /* Report quiescent states for those that went offline. */ 417 if (mask_ofl_test) 418 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); 419 } 420 421 static void rcu_exp_sel_wait_wake(unsigned long s); 422 423 #ifdef CONFIG_RCU_EXP_KTHREAD 424 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp) 425 { 426 struct rcu_exp_work *rewp = 427 container_of(wp, struct rcu_exp_work, rew_work); 428 429 __sync_rcu_exp_select_node_cpus(rewp); 430 } 431 432 static inline bool rcu_gp_par_worker_started(void) 433 { 434 return !!READ_ONCE(rcu_exp_par_gp_kworker); 435 } 436 437 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) 438 { 439 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 440 /* 441 * Use rcu_exp_par_gp_kworker, because flushing a work item from 442 * another work item on the same kthread worker can result in 443 * deadlock. 444 */ 445 kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work); 446 } 447 448 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) 449 { 450 kthread_flush_work(&rnp->rew.rew_work); 451 } 452 453 /* 454 * Work-queue handler to drive an expedited grace period forward. 455 */ 456 static void wait_rcu_exp_gp(struct kthread_work *wp) 457 { 458 struct rcu_exp_work *rewp; 459 460 rewp = container_of(wp, struct rcu_exp_work, rew_work); 461 rcu_exp_sel_wait_wake(rewp->rew_s); 462 } 463 464 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) 465 { 466 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp); 467 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work); 468 } 469 470 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) 471 { 472 } 473 #else /* !CONFIG_RCU_EXP_KTHREAD */ 474 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) 475 { 476 struct rcu_exp_work *rewp = 477 container_of(wp, struct rcu_exp_work, rew_work); 478 479 __sync_rcu_exp_select_node_cpus(rewp); 480 } 481 482 static inline bool rcu_gp_par_worker_started(void) 483 { 484 return !!READ_ONCE(rcu_par_gp_wq); 485 } 486 487 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) 488 { 489 int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); 490 491 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 492 /* If all offline, queue the work on an unbound CPU. */ 493 if (unlikely(cpu > rnp->grphi - rnp->grplo)) 494 cpu = WORK_CPU_UNBOUND; 495 else 496 cpu += rnp->grplo; 497 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); 498 } 499 500 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) 501 { 502 flush_work(&rnp->rew.rew_work); 503 } 504 505 /* 506 * Work-queue handler to drive an expedited grace period forward. 507 */ 508 static void wait_rcu_exp_gp(struct work_struct *wp) 509 { 510 struct rcu_exp_work *rewp; 511 512 rewp = container_of(wp, struct rcu_exp_work, rew_work); 513 rcu_exp_sel_wait_wake(rewp->rew_s); 514 } 515 516 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) 517 { 518 INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp); 519 queue_work(rcu_gp_wq, &rew->rew_work); 520 } 521 522 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) 523 { 524 destroy_work_on_stack(&rew->rew_work); 525 } 526 #endif /* CONFIG_RCU_EXP_KTHREAD */ 527 528 /* 529 * Select the nodes that the upcoming expedited grace period needs 530 * to wait for. 531 */ 532 static void sync_rcu_exp_select_cpus(void) 533 { 534 struct rcu_node *rnp; 535 536 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); 537 sync_exp_reset_tree(); 538 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); 539 540 /* Schedule work for each leaf rcu_node structure. */ 541 rcu_for_each_leaf_node(rnp) { 542 rnp->exp_need_flush = false; 543 if (!READ_ONCE(rnp->expmask)) 544 continue; /* Avoid early boot non-existent wq. */ 545 if (!rcu_gp_par_worker_started() || 546 rcu_scheduler_active != RCU_SCHEDULER_RUNNING || 547 rcu_is_last_leaf_node(rnp)) { 548 /* No worker started yet or last leaf, do direct call. */ 549 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); 550 continue; 551 } 552 sync_rcu_exp_select_cpus_queue_work(rnp); 553 rnp->exp_need_flush = true; 554 } 555 556 /* Wait for jobs (if any) to complete. */ 557 rcu_for_each_leaf_node(rnp) 558 if (rnp->exp_need_flush) 559 sync_rcu_exp_select_cpus_flush_work(rnp); 560 } 561 562 /* 563 * Wait for the expedited grace period to elapse, within time limit. 564 * If the time limit is exceeded without the grace period elapsing, 565 * return false, otherwise return true. 566 */ 567 static bool synchronize_rcu_expedited_wait_once(long tlimit) 568 { 569 int t; 570 struct rcu_node *rnp_root = rcu_get_root(); 571 572 t = swait_event_timeout_exclusive(rcu_state.expedited_wq, 573 sync_rcu_exp_done_unlocked(rnp_root), 574 tlimit); 575 // Workqueues should not be signaled. 576 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root)) 577 return true; 578 WARN_ON(t < 0); /* workqueues should not be signaled. */ 579 return false; 580 } 581 582 /* 583 * Wait for the expedited grace period to elapse, issuing any needed 584 * RCU CPU stall warnings along the way. 585 */ 586 static void synchronize_rcu_expedited_wait(void) 587 { 588 int cpu; 589 unsigned long j; 590 unsigned long jiffies_stall; 591 unsigned long jiffies_start; 592 unsigned long mask; 593 int ndetected; 594 struct rcu_data *rdp; 595 struct rcu_node *rnp; 596 struct rcu_node *rnp_root = rcu_get_root(); 597 unsigned long flags; 598 599 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); 600 jiffies_stall = rcu_exp_jiffies_till_stall_check(); 601 jiffies_start = jiffies; 602 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) { 603 if (synchronize_rcu_expedited_wait_once(1)) 604 return; 605 rcu_for_each_leaf_node(rnp) { 606 raw_spin_lock_irqsave_rcu_node(rnp, flags); 607 mask = READ_ONCE(rnp->expmask); 608 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 609 rdp = per_cpu_ptr(&rcu_data, cpu); 610 if (rdp->rcu_forced_tick_exp) 611 continue; 612 rdp->rcu_forced_tick_exp = true; 613 if (cpu_online(cpu)) 614 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 615 } 616 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 617 } 618 j = READ_ONCE(jiffies_till_first_fqs); 619 if (synchronize_rcu_expedited_wait_once(j + HZ)) 620 return; 621 } 622 623 for (;;) { 624 if (synchronize_rcu_expedited_wait_once(jiffies_stall)) 625 return; 626 if (rcu_stall_is_suppressed()) 627 continue; 628 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall")); 629 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 630 rcu_state.name); 631 ndetected = 0; 632 rcu_for_each_leaf_node(rnp) { 633 ndetected += rcu_print_task_exp_stall(rnp); 634 for_each_leaf_node_possible_cpu(rnp, cpu) { 635 struct rcu_data *rdp; 636 637 mask = leaf_node_cpu_bit(rnp, cpu); 638 if (!(READ_ONCE(rnp->expmask) & mask)) 639 continue; 640 ndetected++; 641 rdp = per_cpu_ptr(&rcu_data, cpu); 642 pr_cont(" %d-%c%c%c%c", cpu, 643 "O."[!!cpu_online(cpu)], 644 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 645 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)], 646 "D."[!!(rdp->cpu_no_qs.b.exp)]); 647 } 648 } 649 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 650 jiffies - jiffies_start, rcu_state.expedited_sequence, 651 data_race(rnp_root->expmask), 652 ".T"[!!data_race(rnp_root->exp_tasks)]); 653 if (ndetected) { 654 pr_err("blocking rcu_node structures (internal RCU debug):"); 655 rcu_for_each_node_breadth_first(rnp) { 656 if (rnp == rnp_root) 657 continue; /* printed unconditionally */ 658 if (sync_rcu_exp_done_unlocked(rnp)) 659 continue; 660 pr_cont(" l=%u:%d-%d:%#lx/%c", 661 rnp->level, rnp->grplo, rnp->grphi, 662 data_race(rnp->expmask), 663 ".T"[!!data_race(rnp->exp_tasks)]); 664 } 665 pr_cont("\n"); 666 } 667 rcu_for_each_leaf_node(rnp) { 668 for_each_leaf_node_possible_cpu(rnp, cpu) { 669 mask = leaf_node_cpu_bit(rnp, cpu); 670 if (!(READ_ONCE(rnp->expmask) & mask)) 671 continue; 672 preempt_disable(); // For smp_processor_id() in dump_cpu_task(). 673 dump_cpu_task(cpu); 674 preempt_enable(); 675 } 676 rcu_exp_print_detail_task_stall_rnp(rnp); 677 } 678 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3; 679 panic_on_rcu_stall(); 680 } 681 } 682 683 /* 684 * Wait for the current expedited grace period to complete, and then 685 * wake up everyone who piggybacked on the just-completed expedited 686 * grace period. Also update all the ->exp_seq_rq counters as needed 687 * in order to avoid counter-wrap problems. 688 */ 689 static void rcu_exp_wait_wake(unsigned long s) 690 { 691 struct rcu_node *rnp; 692 693 synchronize_rcu_expedited_wait(); 694 695 // Switch over to wakeup mode, allowing the next GP to proceed. 696 // End the previous grace period only after acquiring the mutex 697 // to ensure that only one GP runs concurrently with wakeups. 698 mutex_lock(&rcu_state.exp_wake_mutex); 699 rcu_exp_gp_seq_end(); 700 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); 701 702 rcu_for_each_node_breadth_first(rnp) { 703 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 704 spin_lock(&rnp->exp_lock); 705 /* Recheck, avoid hang in case someone just arrived. */ 706 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) 707 WRITE_ONCE(rnp->exp_seq_rq, s); 708 spin_unlock(&rnp->exp_lock); 709 } 710 smp_mb(); /* All above changes before wakeup. */ 711 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); 712 } 713 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); 714 mutex_unlock(&rcu_state.exp_wake_mutex); 715 } 716 717 /* 718 * Common code to drive an expedited grace period forward, used by 719 * workqueues and mid-boot-time tasks. 720 */ 721 static void rcu_exp_sel_wait_wake(unsigned long s) 722 { 723 /* Initialize the rcu_node tree in preparation for the wait. */ 724 sync_rcu_exp_select_cpus(); 725 726 /* Wait and clean up, including waking everyone. */ 727 rcu_exp_wait_wake(s); 728 } 729 730 #ifdef CONFIG_PREEMPT_RCU 731 732 /* 733 * Remote handler for smp_call_function_single(). If there is an 734 * RCU read-side critical section in effect, request that the 735 * next rcu_read_unlock() record the quiescent state up the 736 * ->expmask fields in the rcu_node tree. Otherwise, immediately 737 * report the quiescent state. 738 */ 739 static void rcu_exp_handler(void *unused) 740 { 741 int depth = rcu_preempt_depth(); 742 unsigned long flags; 743 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 744 struct rcu_node *rnp = rdp->mynode; 745 struct task_struct *t = current; 746 747 /* 748 * First, the common case of not being in an RCU read-side 749 * critical section. If also enabled or idle, immediately 750 * report the quiescent state, otherwise defer. 751 */ 752 if (!depth) { 753 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 754 rcu_is_cpu_rrupt_from_idle()) { 755 rcu_report_exp_rdp(rdp); 756 } else { 757 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 758 set_tsk_need_resched(t); 759 set_preempt_need_resched(); 760 } 761 return; 762 } 763 764 /* 765 * Second, the less-common case of being in an RCU read-side 766 * critical section. In this case we can count on a future 767 * rcu_read_unlock(). However, this rcu_read_unlock() might 768 * execute on some other CPU, but in that case there will be 769 * a future context switch. Either way, if the expedited 770 * grace period is still waiting on this CPU, set ->deferred_qs 771 * so that the eventual quiescent state will be reported. 772 * Note that there is a large group of race conditions that 773 * can have caused this quiescent state to already have been 774 * reported, so we really do need to check ->expmask. 775 */ 776 if (depth > 0) { 777 raw_spin_lock_irqsave_rcu_node(rnp, flags); 778 if (rnp->expmask & rdp->grpmask) { 779 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 780 t->rcu_read_unlock_special.b.exp_hint = true; 781 } 782 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 783 return; 784 } 785 786 // Finally, negative nesting depth should not happen. 787 WARN_ON_ONCE(1); 788 } 789 790 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */ 791 static void sync_sched_exp_online_cleanup(int cpu) 792 { 793 } 794 795 /* 796 * Scan the current list of tasks blocked within RCU read-side critical 797 * sections, printing out the tid of each that is blocking the current 798 * expedited grace period. 799 */ 800 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 801 { 802 unsigned long flags; 803 int ndetected = 0; 804 struct task_struct *t; 805 806 raw_spin_lock_irqsave_rcu_node(rnp, flags); 807 if (!rnp->exp_tasks) { 808 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 809 return 0; 810 } 811 t = list_entry(rnp->exp_tasks->prev, 812 struct task_struct, rcu_node_entry); 813 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 814 pr_cont(" P%d", t->pid); 815 ndetected++; 816 } 817 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 818 return ndetected; 819 } 820 821 /* 822 * Scan the current list of tasks blocked within RCU read-side critical 823 * sections, dumping the stack of each that is blocking the current 824 * expedited grace period. 825 */ 826 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) 827 { 828 unsigned long flags; 829 struct task_struct *t; 830 831 if (!rcu_exp_stall_task_details) 832 return; 833 raw_spin_lock_irqsave_rcu_node(rnp, flags); 834 if (!READ_ONCE(rnp->exp_tasks)) { 835 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 836 return; 837 } 838 t = list_entry(rnp->exp_tasks->prev, 839 struct task_struct, rcu_node_entry); 840 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 841 /* 842 * We could be printing a lot while holding a spinlock. 843 * Avoid triggering hard lockup. 844 */ 845 touch_nmi_watchdog(); 846 sched_show_task(t); 847 } 848 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 849 } 850 851 #else /* #ifdef CONFIG_PREEMPT_RCU */ 852 853 /* Request an expedited quiescent state. */ 854 static void rcu_exp_need_qs(void) 855 { 856 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); 857 /* Store .exp before .rcu_urgent_qs. */ 858 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); 859 set_tsk_need_resched(current); 860 set_preempt_need_resched(); 861 } 862 863 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 864 static void rcu_exp_handler(void *unused) 865 { 866 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 867 struct rcu_node *rnp = rdp->mynode; 868 bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 869 870 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 871 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 872 return; 873 if (rcu_is_cpu_rrupt_from_idle() || 874 (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) { 875 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 876 return; 877 } 878 rcu_exp_need_qs(); 879 } 880 881 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 882 static void sync_sched_exp_online_cleanup(int cpu) 883 { 884 unsigned long flags; 885 int my_cpu; 886 struct rcu_data *rdp; 887 int ret; 888 struct rcu_node *rnp; 889 890 rdp = per_cpu_ptr(&rcu_data, cpu); 891 rnp = rdp->mynode; 892 my_cpu = get_cpu(); 893 /* Quiescent state either not needed or already requested, leave. */ 894 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 895 READ_ONCE(rdp->cpu_no_qs.b.exp)) { 896 put_cpu(); 897 return; 898 } 899 /* Quiescent state needed on current CPU, so set it up locally. */ 900 if (my_cpu == cpu) { 901 local_irq_save(flags); 902 rcu_exp_need_qs(); 903 local_irq_restore(flags); 904 put_cpu(); 905 return; 906 } 907 /* Quiescent state needed on some other CPU, send IPI. */ 908 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 909 put_cpu(); 910 WARN_ON_ONCE(ret); 911 } 912 913 /* 914 * Because preemptible RCU does not exist, we never have to check for 915 * tasks blocked within RCU read-side critical sections that are 916 * blocking the current expedited grace period. 917 */ 918 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 919 { 920 return 0; 921 } 922 923 /* 924 * Because preemptible RCU does not exist, we never have to print out 925 * tasks blocked within RCU read-side critical sections that are blocking 926 * the current expedited grace period. 927 */ 928 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) 929 { 930 } 931 932 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 933 934 /** 935 * synchronize_rcu_expedited - Brute-force RCU grace period 936 * 937 * Wait for an RCU grace period, but expedite it. The basic idea is to 938 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether 939 * the CPU is in an RCU critical section, and if so, it sets a flag that 940 * causes the outermost rcu_read_unlock() to report the quiescent state 941 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the 942 * other hand, if the CPU is not in an RCU read-side critical section, 943 * the IPI handler reports the quiescent state immediately. 944 * 945 * Although this is a great improvement over previous expedited 946 * implementations, it is still unfriendly to real-time workloads, so is 947 * thus not recommended for any sort of common-case code. In fact, if 948 * you are using synchronize_rcu_expedited() in a loop, please restructure 949 * your code to batch your updates, and then use a single synchronize_rcu() 950 * instead. 951 * 952 * This has the same semantics as (but is more brutal than) synchronize_rcu(). 953 */ 954 void synchronize_rcu_expedited(void) 955 { 956 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); 957 unsigned long flags; 958 struct rcu_exp_work rew; 959 struct rcu_node *rnp; 960 unsigned long s; 961 962 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 963 lock_is_held(&rcu_lock_map) || 964 lock_is_held(&rcu_sched_lock_map), 965 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 966 967 /* Is the state is such that the call is a grace period? */ 968 if (rcu_blocking_is_gp()) { 969 // Note well that this code runs with !PREEMPT && !SMP. 970 // In addition, all code that advances grace periods runs 971 // at process level. Therefore, this expedited GP overlaps 972 // with other expedited GPs only by being fully nested within 973 // them, which allows reuse of ->gp_seq_polled_exp_snap. 974 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 975 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 976 977 local_irq_save(flags); 978 WARN_ON_ONCE(num_online_cpus() > 1); 979 rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT); 980 local_irq_restore(flags); 981 return; // Context allows vacuous grace periods. 982 } 983 984 /* If expedited grace periods are prohibited, fall back to normal. */ 985 if (rcu_gp_is_normal()) { 986 wait_rcu_gp(call_rcu_hurry); 987 return; 988 } 989 990 /* Take a snapshot of the sequence number. */ 991 s = rcu_exp_gp_seq_snap(); 992 if (exp_funnel_lock(s)) 993 return; /* Someone else did our work for us. */ 994 995 /* Ensure that load happens before action based on it. */ 996 if (unlikely(boottime)) { 997 /* Direct call during scheduler init and early_initcalls(). */ 998 rcu_exp_sel_wait_wake(s); 999 } else { 1000 /* Marshall arguments & schedule the expedited grace period. */ 1001 rew.rew_s = s; 1002 synchronize_rcu_expedited_queue_work(&rew); 1003 } 1004 1005 /* Wait for expedited grace period to complete. */ 1006 rnp = rcu_get_root(); 1007 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 1008 sync_exp_work_done(s)); 1009 smp_mb(); /* Work actions happen before return. */ 1010 1011 /* Let the next expedited grace period start. */ 1012 mutex_unlock(&rcu_state.exp_mutex); 1013 1014 if (likely(!boottime)) 1015 synchronize_rcu_expedited_destroy_work(&rew); 1016 } 1017 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 1018 1019 /* 1020 * Ensure that start_poll_synchronize_rcu_expedited() has the expedited 1021 * RCU grace periods that it needs. 1022 */ 1023 static void sync_rcu_do_polled_gp(struct work_struct *wp) 1024 { 1025 unsigned long flags; 1026 int i = 0; 1027 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq); 1028 unsigned long s; 1029 1030 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1031 s = rnp->exp_seq_poll_rq; 1032 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 1033 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1034 if (s == RCU_GET_STATE_COMPLETED) 1035 return; 1036 while (!poll_state_synchronize_rcu(s)) { 1037 synchronize_rcu_expedited(); 1038 if (i == 10 || i == 20) 1039 pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled)); 1040 i++; 1041 } 1042 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1043 s = rnp->exp_seq_poll_rq; 1044 if (poll_state_synchronize_rcu(s)) 1045 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 1046 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1047 } 1048 1049 /** 1050 * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period 1051 * 1052 * Returns a cookie to pass to a call to cond_synchronize_rcu(), 1053 * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(), 1054 * allowing them to determine whether or not any sort of grace period has 1055 * elapsed in the meantime. If the needed expedited grace period is not 1056 * already slated to start, initiates that grace period. 1057 */ 1058 unsigned long start_poll_synchronize_rcu_expedited(void) 1059 { 1060 unsigned long flags; 1061 struct rcu_data *rdp; 1062 struct rcu_node *rnp; 1063 unsigned long s; 1064 1065 s = get_state_synchronize_rcu(); 1066 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 1067 rnp = rdp->mynode; 1068 if (rcu_init_invoked()) 1069 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1070 if (!poll_state_synchronize_rcu(s)) { 1071 if (rcu_init_invoked()) { 1072 rnp->exp_seq_poll_rq = s; 1073 queue_work(rcu_gp_wq, &rnp->exp_poll_wq); 1074 } 1075 } 1076 if (rcu_init_invoked()) 1077 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1078 1079 return s; 1080 } 1081 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited); 1082 1083 /** 1084 * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period 1085 * @rgosp: Place to put snapshot of grace-period state 1086 * 1087 * Places the normal and expedited grace-period states in rgosp. This 1088 * state value can be passed to a later call to cond_synchronize_rcu_full() 1089 * or poll_state_synchronize_rcu_full() to determine whether or not a 1090 * grace period (whether normal or expedited) has elapsed in the meantime. 1091 * If the needed expedited grace period is not already slated to start, 1092 * initiates that grace period. 1093 */ 1094 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 1095 { 1096 get_state_synchronize_rcu_full(rgosp); 1097 (void)start_poll_synchronize_rcu_expedited(); 1098 } 1099 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full); 1100 1101 /** 1102 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period 1103 * 1104 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 1105 * 1106 * If any type of full RCU grace period has elapsed since the earlier 1107 * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(), 1108 * or start_poll_synchronize_rcu_expedited(), just return. Otherwise, 1109 * invoke synchronize_rcu_expedited() to wait for a full grace period. 1110 * 1111 * Yes, this function does not take counter wrap into account. 1112 * But counter wrap is harmless. If the counter wraps, we have waited for 1113 * more than 2 billion grace periods (and way more on a 64-bit system!), 1114 * so waiting for a couple of additional grace periods should be just fine. 1115 * 1116 * This function provides the same memory-ordering guarantees that 1117 * would be provided by a synchronize_rcu() that was invoked at the call 1118 * to the function that provided @oldstate and that returned at the end 1119 * of this function. 1120 */ 1121 void cond_synchronize_rcu_expedited(unsigned long oldstate) 1122 { 1123 if (!poll_state_synchronize_rcu(oldstate)) 1124 synchronize_rcu_expedited(); 1125 } 1126 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited); 1127 1128 /** 1129 * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period 1130 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() 1131 * 1132 * If a full RCU grace period has elapsed since the call to 1133 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 1134 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was 1135 * obtained, just return. Otherwise, invoke synchronize_rcu_expedited() 1136 * to wait for a full grace period. 1137 * 1138 * Yes, this function does not take counter wrap into account. 1139 * But counter wrap is harmless. If the counter wraps, we have waited for 1140 * more than 2 billion grace periods (and way more on a 64-bit system!), 1141 * so waiting for a couple of additional grace periods should be just fine. 1142 * 1143 * This function provides the same memory-ordering guarantees that 1144 * would be provided by a synchronize_rcu() that was invoked at the call 1145 * to the function that provided @rgosp and that returned at the end of 1146 * this function. 1147 */ 1148 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 1149 { 1150 if (!poll_state_synchronize_rcu_full(rgosp)) 1151 synchronize_rcu_expedited(); 1152 } 1153 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full); 1154