1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * RCU expedited grace periods 4 * 5 * Copyright IBM Corporation, 2016 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #include <linux/lockdep.h> 11 12 static void rcu_exp_handler(void *unused); 13 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp); 15 16 /* 17 * Record the start of an expedited grace period. 18 */ 19 static void rcu_exp_gp_seq_start(void) 20 { 21 rcu_seq_start(&rcu_state.expedited_sequence); 22 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 23 } 24 25 /* 26 * Return the value that the expedited-grace-period counter will have 27 * at the end of the current grace period. 28 */ 29 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) 30 { 31 return rcu_seq_endval(&rcu_state.expedited_sequence); 32 } 33 34 /* 35 * Record the end of an expedited grace period. 36 */ 37 static void rcu_exp_gp_seq_end(void) 38 { 39 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 40 rcu_seq_end(&rcu_state.expedited_sequence); 41 smp_mb(); /* Ensure that consecutive grace periods serialize. */ 42 } 43 44 /* 45 * Take a snapshot of the expedited-grace-period counter, which is the 46 * earliest value that will indicate that a full grace period has 47 * elapsed since the current time. 48 */ 49 static unsigned long rcu_exp_gp_seq_snap(void) 50 { 51 unsigned long s; 52 53 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 54 s = rcu_seq_snap(&rcu_state.expedited_sequence); 55 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); 56 return s; 57 } 58 59 /* 60 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true 61 * if a full expedited grace period has elapsed since that snapshot 62 * was taken. 63 */ 64 static bool rcu_exp_gp_seq_done(unsigned long s) 65 { 66 return rcu_seq_done(&rcu_state.expedited_sequence, s); 67 } 68 69 /* 70 * Reset the ->expmaskinit values in the rcu_node tree to reflect any 71 * recent CPU-online activity. Note that these masks are not cleared 72 * when CPUs go offline, so they reflect the union of all CPUs that have 73 * ever been online. This means that this function normally takes its 74 * no-work-to-do fastpath. 75 */ 76 static void sync_exp_reset_tree_hotplug(void) 77 { 78 bool done; 79 unsigned long flags; 80 unsigned long mask; 81 unsigned long oldmask; 82 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ 83 struct rcu_node *rnp; 84 struct rcu_node *rnp_up; 85 86 /* If no new CPUs onlined since last time, nothing to do. */ 87 if (likely(ncpus == rcu_state.ncpus_snap)) 88 return; 89 rcu_state.ncpus_snap = ncpus; 90 91 /* 92 * Each pass through the following loop propagates newly onlined 93 * CPUs for the current rcu_node structure up the rcu_node tree. 94 */ 95 rcu_for_each_leaf_node(rnp) { 96 raw_spin_lock_irqsave_rcu_node(rnp, flags); 97 if (rnp->expmaskinit == rnp->expmaskinitnext) { 98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 99 continue; /* No new CPUs, nothing to do. */ 100 } 101 102 /* Update this node's mask, track old value for propagation. */ 103 oldmask = rnp->expmaskinit; 104 rnp->expmaskinit = rnp->expmaskinitnext; 105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 106 107 /* If was already nonzero, nothing to propagate. */ 108 if (oldmask) 109 continue; 110 111 /* Propagate the new CPU up the tree. */ 112 mask = rnp->grpmask; 113 rnp_up = rnp->parent; 114 done = false; 115 while (rnp_up) { 116 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); 117 if (rnp_up->expmaskinit) 118 done = true; 119 rnp_up->expmaskinit |= mask; 120 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); 121 if (done) 122 break; 123 mask = rnp_up->grpmask; 124 rnp_up = rnp_up->parent; 125 } 126 } 127 } 128 129 /* 130 * Reset the ->expmask values in the rcu_node tree in preparation for 131 * a new expedited grace period. 132 */ 133 static void __maybe_unused sync_exp_reset_tree(void) 134 { 135 unsigned long flags; 136 struct rcu_node *rnp; 137 138 sync_exp_reset_tree_hotplug(); 139 rcu_for_each_node_breadth_first(rnp) { 140 raw_spin_lock_irqsave_rcu_node(rnp, flags); 141 WARN_ON_ONCE(rnp->expmask); 142 WRITE_ONCE(rnp->expmask, rnp->expmaskinit); 143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 144 } 145 } 146 147 /* 148 * Return non-zero if there is no RCU expedited grace period in progress 149 * for the specified rcu_node structure, in other words, if all CPUs and 150 * tasks covered by the specified rcu_node structure have done their bit 151 * for the current expedited grace period. 152 */ 153 static bool sync_rcu_exp_done(struct rcu_node *rnp) 154 { 155 raw_lockdep_assert_held_rcu_node(rnp); 156 return READ_ONCE(rnp->exp_tasks) == NULL && 157 READ_ONCE(rnp->expmask) == 0; 158 } 159 160 /* 161 * Like sync_rcu_exp_done(), but where the caller does not hold the 162 * rcu_node's ->lock. 163 */ 164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) 165 { 166 unsigned long flags; 167 bool ret; 168 169 raw_spin_lock_irqsave_rcu_node(rnp, flags); 170 ret = sync_rcu_exp_done(rnp); 171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 172 173 return ret; 174 } 175 176 /* 177 * Report the exit from RCU read-side critical section for the last task 178 * that queued itself during or before the current expedited preemptible-RCU 179 * grace period. This event is reported either to the rcu_node structure on 180 * which the task was queued or to one of that rcu_node structure's ancestors, 181 * recursively up the tree. (Calm down, calm down, we do the recursion 182 * iteratively!) 183 */ 184 static void __rcu_report_exp_rnp(struct rcu_node *rnp, 185 bool wake, unsigned long flags) 186 __releases(rnp->lock) 187 { 188 unsigned long mask; 189 190 raw_lockdep_assert_held_rcu_node(rnp); 191 for (;;) { 192 if (!sync_rcu_exp_done(rnp)) { 193 if (!rnp->expmask) 194 rcu_initiate_boost(rnp, flags); 195 else 196 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 197 break; 198 } 199 if (rnp->parent == NULL) { 200 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 201 if (wake) 202 swake_up_one_online(&rcu_state.expedited_wq); 203 204 break; 205 } 206 mask = rnp->grpmask; 207 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ 208 rnp = rnp->parent; 209 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ 210 WARN_ON_ONCE(!(rnp->expmask & mask)); 211 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 212 } 213 } 214 215 /* 216 * Report expedited quiescent state for specified node. This is a 217 * lock-acquisition wrapper function for __rcu_report_exp_rnp(). 218 */ 219 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) 220 { 221 unsigned long flags; 222 223 raw_spin_lock_irqsave_rcu_node(rnp, flags); 224 __rcu_report_exp_rnp(rnp, wake, flags); 225 } 226 227 /* 228 * Report expedited quiescent state for multiple CPUs, all covered by the 229 * specified leaf rcu_node structure. 230 */ 231 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, 232 unsigned long mask, bool wake) 233 { 234 int cpu; 235 unsigned long flags; 236 struct rcu_data *rdp; 237 238 raw_spin_lock_irqsave_rcu_node(rnp, flags); 239 if (!(rnp->expmask & mask)) { 240 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 241 return; 242 } 243 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); 244 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 245 rdp = per_cpu_ptr(&rcu_data, cpu); 246 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp) 247 continue; 248 rdp->rcu_forced_tick_exp = false; 249 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 250 } 251 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ 252 } 253 254 /* 255 * Report expedited quiescent state for specified rcu_data (CPU). 256 */ 257 static void rcu_report_exp_rdp(struct rcu_data *rdp) 258 { 259 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); 260 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); 261 } 262 263 /* Common code for work-done checking. */ 264 static bool sync_exp_work_done(unsigned long s) 265 { 266 if (rcu_exp_gp_seq_done(s)) { 267 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); 268 /* 269 * Order GP completion with preceding accesses. Order also GP 270 * completion with post GP update side accesses. Pairs with 271 * rcu_seq_end(). 272 */ 273 smp_mb(); 274 return true; 275 } 276 return false; 277 } 278 279 /* 280 * Funnel-lock acquisition for expedited grace periods. Returns true 281 * if some other task completed an expedited grace period that this task 282 * can piggy-back on, and with no mutex held. Otherwise, returns false 283 * with the mutex held, indicating that the caller must actually do the 284 * expedited grace period. 285 */ 286 static bool exp_funnel_lock(unsigned long s) 287 { 288 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 289 struct rcu_node *rnp = rdp->mynode; 290 struct rcu_node *rnp_root = rcu_get_root(); 291 292 /* Low-contention fastpath. */ 293 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && 294 (rnp == rnp_root || 295 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && 296 mutex_trylock(&rcu_state.exp_mutex)) 297 goto fastpath; 298 299 /* 300 * Each pass through the following loop works its way up 301 * the rcu_node tree, returning if others have done the work or 302 * otherwise falls through to acquire ->exp_mutex. The mapping 303 * from CPU to rcu_node structure can be inexact, as it is just 304 * promoting locality and is not strictly needed for correctness. 305 */ 306 for (; rnp != NULL; rnp = rnp->parent) { 307 if (sync_exp_work_done(s)) 308 return true; 309 310 /* Work not done, either wait here or go up. */ 311 spin_lock(&rnp->exp_lock); 312 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { 313 314 /* Someone else doing GP, so wait for them. */ 315 spin_unlock(&rnp->exp_lock); 316 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 317 rnp->grplo, rnp->grphi, 318 TPS("wait")); 319 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 320 sync_exp_work_done(s)); 321 return true; 322 } 323 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */ 324 spin_unlock(&rnp->exp_lock); 325 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, 326 rnp->grplo, rnp->grphi, TPS("nxtlvl")); 327 } 328 mutex_lock(&rcu_state.exp_mutex); 329 fastpath: 330 if (sync_exp_work_done(s)) { 331 mutex_unlock(&rcu_state.exp_mutex); 332 return true; 333 } 334 rcu_exp_gp_seq_start(); 335 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); 336 return false; 337 } 338 339 /* 340 * Select the CPUs within the specified rcu_node that the upcoming 341 * expedited grace period needs to wait for. 342 */ 343 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp) 344 { 345 int cpu; 346 unsigned long flags; 347 unsigned long mask_ofl_test; 348 unsigned long mask_ofl_ipi; 349 int ret; 350 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); 351 352 raw_spin_lock_irqsave_rcu_node(rnp, flags); 353 354 /* Each pass checks a CPU for identity, offline, and idle. */ 355 mask_ofl_test = 0; 356 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { 357 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 358 unsigned long mask = rdp->grpmask; 359 int snap; 360 361 if (raw_smp_processor_id() == cpu || 362 !(rnp->qsmaskinitnext & mask)) { 363 mask_ofl_test |= mask; 364 } else { 365 /* 366 * Full ordering between remote CPU's post idle accesses 367 * and updater's accesses prior to current GP (and also 368 * the started GP sequence number) is enforced by 369 * rcu_seq_start() implicit barrier, relayed by kworkers 370 * locking and even further by smp_mb__after_unlock_lock() 371 * barriers chained all the way throughout the rnp locking 372 * tree since sync_exp_reset_tree() and up to the current 373 * leaf rnp locking. 374 * 375 * Ordering between remote CPU's pre idle accesses and 376 * post grace period updater's accesses is enforced by the 377 * below acquire semantic. 378 */ 379 snap = ct_dynticks_cpu_acquire(cpu); 380 if (rcu_dynticks_in_eqs(snap)) 381 mask_ofl_test |= mask; 382 else 383 rdp->exp_dynticks_snap = snap; 384 } 385 } 386 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; 387 388 /* 389 * Need to wait for any blocked tasks as well. Note that 390 * additional blocking tasks will also block the expedited GP 391 * until such time as the ->expmask bits are cleared. 392 */ 393 if (rcu_preempt_has_tasks(rnp)) 394 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next); 395 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 396 397 /* IPI the remaining CPUs for expedited quiescent state. */ 398 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { 399 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 400 unsigned long mask = rdp->grpmask; 401 402 retry_ipi: 403 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { 404 mask_ofl_test |= mask; 405 continue; 406 } 407 if (get_cpu() == cpu) { 408 mask_ofl_test |= mask; 409 put_cpu(); 410 continue; 411 } 412 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 413 put_cpu(); 414 /* The CPU will report the QS in response to the IPI. */ 415 if (!ret) 416 continue; 417 418 /* Failed, raced with CPU hotplug operation. */ 419 raw_spin_lock_irqsave_rcu_node(rnp, flags); 420 if ((rnp->qsmaskinitnext & mask) && 421 (rnp->expmask & mask)) { 422 /* Online, so delay for a bit and try again. */ 423 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 424 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); 425 schedule_timeout_idle(1); 426 goto retry_ipi; 427 } 428 /* CPU really is offline, so we must report its QS. */ 429 if (rnp->expmask & mask) 430 mask_ofl_test |= mask; 431 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 432 } 433 /* Report quiescent states for those that went offline. */ 434 if (mask_ofl_test) 435 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); 436 } 437 438 static void rcu_exp_sel_wait_wake(unsigned long s); 439 440 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp) 441 { 442 struct rcu_exp_work *rewp = 443 container_of(wp, struct rcu_exp_work, rew_work); 444 445 __sync_rcu_exp_select_node_cpus(rewp); 446 } 447 448 static inline bool rcu_exp_worker_started(void) 449 { 450 return !!READ_ONCE(rcu_exp_gp_kworker); 451 } 452 453 static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp) 454 { 455 return !!READ_ONCE(rnp->exp_kworker); 456 } 457 458 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) 459 { 460 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 461 /* 462 * Use rcu_exp_par_gp_kworker, because flushing a work item from 463 * another work item on the same kthread worker can result in 464 * deadlock. 465 */ 466 kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work); 467 } 468 469 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) 470 { 471 kthread_flush_work(&rnp->rew.rew_work); 472 } 473 474 /* 475 * Work-queue handler to drive an expedited grace period forward. 476 */ 477 static void wait_rcu_exp_gp(struct kthread_work *wp) 478 { 479 struct rcu_exp_work *rewp; 480 481 rewp = container_of(wp, struct rcu_exp_work, rew_work); 482 rcu_exp_sel_wait_wake(rewp->rew_s); 483 } 484 485 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) 486 { 487 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp); 488 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work); 489 } 490 491 /* 492 * Select the nodes that the upcoming expedited grace period needs 493 * to wait for. 494 */ 495 static void sync_rcu_exp_select_cpus(void) 496 { 497 struct rcu_node *rnp; 498 499 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); 500 sync_exp_reset_tree(); 501 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); 502 503 /* Schedule work for each leaf rcu_node structure. */ 504 rcu_for_each_leaf_node(rnp) { 505 rnp->exp_need_flush = false; 506 if (!READ_ONCE(rnp->expmask)) 507 continue; /* Avoid early boot non-existent wq. */ 508 if (!rcu_exp_par_worker_started(rnp) || 509 rcu_scheduler_active != RCU_SCHEDULER_RUNNING || 510 rcu_is_last_leaf_node(rnp)) { 511 /* No worker started yet or last leaf, do direct call. */ 512 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); 513 continue; 514 } 515 sync_rcu_exp_select_cpus_queue_work(rnp); 516 rnp->exp_need_flush = true; 517 } 518 519 /* Wait for jobs (if any) to complete. */ 520 rcu_for_each_leaf_node(rnp) 521 if (rnp->exp_need_flush) 522 sync_rcu_exp_select_cpus_flush_work(rnp); 523 } 524 525 /* 526 * Wait for the expedited grace period to elapse, within time limit. 527 * If the time limit is exceeded without the grace period elapsing, 528 * return false, otherwise return true. 529 */ 530 static bool synchronize_rcu_expedited_wait_once(long tlimit) 531 { 532 int t; 533 struct rcu_node *rnp_root = rcu_get_root(); 534 535 t = swait_event_timeout_exclusive(rcu_state.expedited_wq, 536 sync_rcu_exp_done_unlocked(rnp_root), 537 tlimit); 538 // Workqueues should not be signaled. 539 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root)) 540 return true; 541 WARN_ON(t < 0); /* workqueues should not be signaled. */ 542 return false; 543 } 544 545 /* 546 * Wait for the expedited grace period to elapse, issuing any needed 547 * RCU CPU stall warnings along the way. 548 */ 549 static void synchronize_rcu_expedited_wait(void) 550 { 551 int cpu; 552 unsigned long j; 553 unsigned long jiffies_stall; 554 unsigned long jiffies_start; 555 unsigned long mask; 556 int ndetected; 557 struct rcu_data *rdp; 558 struct rcu_node *rnp; 559 struct rcu_node *rnp_root = rcu_get_root(); 560 unsigned long flags; 561 562 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); 563 jiffies_stall = rcu_exp_jiffies_till_stall_check(); 564 jiffies_start = jiffies; 565 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) { 566 if (synchronize_rcu_expedited_wait_once(1)) 567 return; 568 rcu_for_each_leaf_node(rnp) { 569 raw_spin_lock_irqsave_rcu_node(rnp, flags); 570 mask = READ_ONCE(rnp->expmask); 571 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { 572 rdp = per_cpu_ptr(&rcu_data, cpu); 573 if (rdp->rcu_forced_tick_exp) 574 continue; 575 rdp->rcu_forced_tick_exp = true; 576 if (cpu_online(cpu)) 577 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); 578 } 579 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 580 } 581 j = READ_ONCE(jiffies_till_first_fqs); 582 if (synchronize_rcu_expedited_wait_once(j + HZ)) 583 return; 584 } 585 586 for (;;) { 587 unsigned long j; 588 589 if (synchronize_rcu_expedited_wait_once(jiffies_stall)) 590 return; 591 if (rcu_stall_is_suppressed()) 592 continue; 593 j = jiffies; 594 rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start)); 595 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall")); 596 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", 597 rcu_state.name); 598 ndetected = 0; 599 rcu_for_each_leaf_node(rnp) { 600 ndetected += rcu_print_task_exp_stall(rnp); 601 for_each_leaf_node_possible_cpu(rnp, cpu) { 602 struct rcu_data *rdp; 603 604 mask = leaf_node_cpu_bit(rnp, cpu); 605 if (!(READ_ONCE(rnp->expmask) & mask)) 606 continue; 607 ndetected++; 608 rdp = per_cpu_ptr(&rcu_data, cpu); 609 pr_cont(" %d-%c%c%c%c", cpu, 610 "O."[!!cpu_online(cpu)], 611 "o."[!!(rdp->grpmask & rnp->expmaskinit)], 612 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)], 613 "D."[!!data_race(rdp->cpu_no_qs.b.exp)]); 614 } 615 } 616 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", 617 j - jiffies_start, rcu_state.expedited_sequence, 618 data_race(rnp_root->expmask), 619 ".T"[!!data_race(rnp_root->exp_tasks)]); 620 if (ndetected) { 621 pr_err("blocking rcu_node structures (internal RCU debug):"); 622 rcu_for_each_node_breadth_first(rnp) { 623 if (rnp == rnp_root) 624 continue; /* printed unconditionally */ 625 if (sync_rcu_exp_done_unlocked(rnp)) 626 continue; 627 pr_cont(" l=%u:%d-%d:%#lx/%c", 628 rnp->level, rnp->grplo, rnp->grphi, 629 data_race(rnp->expmask), 630 ".T"[!!data_race(rnp->exp_tasks)]); 631 } 632 pr_cont("\n"); 633 } 634 rcu_for_each_leaf_node(rnp) { 635 for_each_leaf_node_possible_cpu(rnp, cpu) { 636 mask = leaf_node_cpu_bit(rnp, cpu); 637 if (!(READ_ONCE(rnp->expmask) & mask)) 638 continue; 639 preempt_disable(); // For smp_processor_id() in dump_cpu_task(). 640 dump_cpu_task(cpu); 641 preempt_enable(); 642 } 643 rcu_exp_print_detail_task_stall_rnp(rnp); 644 } 645 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3; 646 panic_on_rcu_stall(); 647 } 648 } 649 650 /* 651 * Wait for the current expedited grace period to complete, and then 652 * wake up everyone who piggybacked on the just-completed expedited 653 * grace period. Also update all the ->exp_seq_rq counters as needed 654 * in order to avoid counter-wrap problems. 655 */ 656 static void rcu_exp_wait_wake(unsigned long s) 657 { 658 struct rcu_node *rnp; 659 660 synchronize_rcu_expedited_wait(); 661 662 // Switch over to wakeup mode, allowing the next GP to proceed. 663 // End the previous grace period only after acquiring the mutex 664 // to ensure that only one GP runs concurrently with wakeups. 665 mutex_lock(&rcu_state.exp_wake_mutex); 666 rcu_exp_gp_seq_end(); 667 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); 668 669 rcu_for_each_node_breadth_first(rnp) { 670 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { 671 spin_lock(&rnp->exp_lock); 672 /* Recheck, avoid hang in case someone just arrived. */ 673 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) 674 WRITE_ONCE(rnp->exp_seq_rq, s); 675 spin_unlock(&rnp->exp_lock); 676 } 677 smp_mb(); /* All above changes before wakeup. */ 678 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); 679 } 680 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); 681 mutex_unlock(&rcu_state.exp_wake_mutex); 682 } 683 684 /* 685 * Common code to drive an expedited grace period forward, used by 686 * workqueues and mid-boot-time tasks. 687 */ 688 static void rcu_exp_sel_wait_wake(unsigned long s) 689 { 690 /* Initialize the rcu_node tree in preparation for the wait. */ 691 sync_rcu_exp_select_cpus(); 692 693 /* Wait and clean up, including waking everyone. */ 694 rcu_exp_wait_wake(s); 695 } 696 697 #ifdef CONFIG_PREEMPT_RCU 698 699 /* 700 * Remote handler for smp_call_function_single(). If there is an 701 * RCU read-side critical section in effect, request that the 702 * next rcu_read_unlock() record the quiescent state up the 703 * ->expmask fields in the rcu_node tree. Otherwise, immediately 704 * report the quiescent state. 705 */ 706 static void rcu_exp_handler(void *unused) 707 { 708 int depth = rcu_preempt_depth(); 709 unsigned long flags; 710 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 711 struct rcu_node *rnp = rdp->mynode; 712 struct task_struct *t = current; 713 714 /* 715 * First, the common case of not being in an RCU read-side 716 * critical section. If also enabled or idle, immediately 717 * report the quiescent state, otherwise defer. 718 */ 719 if (!depth) { 720 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 721 rcu_is_cpu_rrupt_from_idle()) { 722 rcu_report_exp_rdp(rdp); 723 } else { 724 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 725 set_tsk_need_resched(t); 726 set_preempt_need_resched(); 727 } 728 return; 729 } 730 731 /* 732 * Second, the less-common case of being in an RCU read-side 733 * critical section. In this case we can count on a future 734 * rcu_read_unlock(). However, this rcu_read_unlock() might 735 * execute on some other CPU, but in that case there will be 736 * a future context switch. Either way, if the expedited 737 * grace period is still waiting on this CPU, set ->deferred_qs 738 * so that the eventual quiescent state will be reported. 739 * Note that there is a large group of race conditions that 740 * can have caused this quiescent state to already have been 741 * reported, so we really do need to check ->expmask. 742 */ 743 if (depth > 0) { 744 raw_spin_lock_irqsave_rcu_node(rnp, flags); 745 if (rnp->expmask & rdp->grpmask) { 746 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); 747 t->rcu_read_unlock_special.b.exp_hint = true; 748 } 749 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 750 return; 751 } 752 753 // Finally, negative nesting depth should not happen. 754 WARN_ON_ONCE(1); 755 } 756 757 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */ 758 static void sync_sched_exp_online_cleanup(int cpu) 759 { 760 } 761 762 /* 763 * Scan the current list of tasks blocked within RCU read-side critical 764 * sections, printing out the tid of each that is blocking the current 765 * expedited grace period. 766 */ 767 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 768 { 769 unsigned long flags; 770 int ndetected = 0; 771 struct task_struct *t; 772 773 raw_spin_lock_irqsave_rcu_node(rnp, flags); 774 if (!rnp->exp_tasks) { 775 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 776 return 0; 777 } 778 t = list_entry(rnp->exp_tasks->prev, 779 struct task_struct, rcu_node_entry); 780 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 781 pr_cont(" P%d", t->pid); 782 ndetected++; 783 } 784 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 785 return ndetected; 786 } 787 788 /* 789 * Scan the current list of tasks blocked within RCU read-side critical 790 * sections, dumping the stack of each that is blocking the current 791 * expedited grace period. 792 */ 793 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) 794 { 795 unsigned long flags; 796 struct task_struct *t; 797 798 if (!rcu_exp_stall_task_details) 799 return; 800 raw_spin_lock_irqsave_rcu_node(rnp, flags); 801 if (!READ_ONCE(rnp->exp_tasks)) { 802 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 803 return; 804 } 805 t = list_entry(rnp->exp_tasks->prev, 806 struct task_struct, rcu_node_entry); 807 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 808 /* 809 * We could be printing a lot while holding a spinlock. 810 * Avoid triggering hard lockup. 811 */ 812 touch_nmi_watchdog(); 813 sched_show_task(t); 814 } 815 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 816 } 817 818 #else /* #ifdef CONFIG_PREEMPT_RCU */ 819 820 /* Request an expedited quiescent state. */ 821 static void rcu_exp_need_qs(void) 822 { 823 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); 824 /* Store .exp before .rcu_urgent_qs. */ 825 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); 826 set_tsk_need_resched(current); 827 set_preempt_need_resched(); 828 } 829 830 /* Invoked on each online non-idle CPU for expedited quiescent state. */ 831 static void rcu_exp_handler(void *unused) 832 { 833 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 834 struct rcu_node *rnp = rdp->mynode; 835 bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); 836 837 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 838 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) 839 return; 840 if (rcu_is_cpu_rrupt_from_idle() || 841 (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) { 842 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 843 return; 844 } 845 rcu_exp_need_qs(); 846 } 847 848 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ 849 static void sync_sched_exp_online_cleanup(int cpu) 850 { 851 unsigned long flags; 852 int my_cpu; 853 struct rcu_data *rdp; 854 int ret; 855 struct rcu_node *rnp; 856 857 rdp = per_cpu_ptr(&rcu_data, cpu); 858 rnp = rdp->mynode; 859 my_cpu = get_cpu(); 860 /* Quiescent state either not needed or already requested, leave. */ 861 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || 862 READ_ONCE(rdp->cpu_no_qs.b.exp)) { 863 put_cpu(); 864 return; 865 } 866 /* Quiescent state needed on current CPU, so set it up locally. */ 867 if (my_cpu == cpu) { 868 local_irq_save(flags); 869 rcu_exp_need_qs(); 870 local_irq_restore(flags); 871 put_cpu(); 872 return; 873 } 874 /* Quiescent state needed on some other CPU, send IPI. */ 875 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); 876 put_cpu(); 877 WARN_ON_ONCE(ret); 878 } 879 880 /* 881 * Because preemptible RCU does not exist, we never have to check for 882 * tasks blocked within RCU read-side critical sections that are 883 * blocking the current expedited grace period. 884 */ 885 static int rcu_print_task_exp_stall(struct rcu_node *rnp) 886 { 887 return 0; 888 } 889 890 /* 891 * Because preemptible RCU does not exist, we never have to print out 892 * tasks blocked within RCU read-side critical sections that are blocking 893 * the current expedited grace period. 894 */ 895 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) 896 { 897 } 898 899 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 900 901 /** 902 * synchronize_rcu_expedited - Brute-force RCU grace period 903 * 904 * Wait for an RCU grace period, but expedite it. The basic idea is to 905 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether 906 * the CPU is in an RCU critical section, and if so, it sets a flag that 907 * causes the outermost rcu_read_unlock() to report the quiescent state 908 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the 909 * other hand, if the CPU is not in an RCU read-side critical section, 910 * the IPI handler reports the quiescent state immediately. 911 * 912 * Although this is a great improvement over previous expedited 913 * implementations, it is still unfriendly to real-time workloads, so is 914 * thus not recommended for any sort of common-case code. In fact, if 915 * you are using synchronize_rcu_expedited() in a loop, please restructure 916 * your code to batch your updates, and then use a single synchronize_rcu() 917 * instead. 918 * 919 * This has the same semantics as (but is more brutal than) synchronize_rcu(). 920 */ 921 void synchronize_rcu_expedited(void) 922 { 923 unsigned long flags; 924 struct rcu_exp_work rew; 925 struct rcu_node *rnp; 926 unsigned long s; 927 928 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 929 lock_is_held(&rcu_lock_map) || 930 lock_is_held(&rcu_sched_lock_map), 931 "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); 932 933 /* Is the state is such that the call is a grace period? */ 934 if (rcu_blocking_is_gp()) { 935 // Note well that this code runs with !PREEMPT && !SMP. 936 // In addition, all code that advances grace periods runs 937 // at process level. Therefore, this expedited GP overlaps 938 // with other expedited GPs only by being fully nested within 939 // them, which allows reuse of ->gp_seq_polled_exp_snap. 940 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap); 941 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap); 942 943 local_irq_save(flags); 944 WARN_ON_ONCE(num_online_cpus() > 1); 945 rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT); 946 local_irq_restore(flags); 947 return; // Context allows vacuous grace periods. 948 } 949 950 /* If expedited grace periods are prohibited, fall back to normal. */ 951 if (rcu_gp_is_normal()) { 952 synchronize_rcu_normal(); 953 return; 954 } 955 956 /* Take a snapshot of the sequence number. */ 957 s = rcu_exp_gp_seq_snap(); 958 if (exp_funnel_lock(s)) 959 return; /* Someone else did our work for us. */ 960 961 /* Ensure that load happens before action based on it. */ 962 if (unlikely((rcu_scheduler_active == RCU_SCHEDULER_INIT) || !rcu_exp_worker_started())) { 963 /* Direct call during scheduler init and early_initcalls(). */ 964 rcu_exp_sel_wait_wake(s); 965 } else { 966 /* Marshall arguments & schedule the expedited grace period. */ 967 rew.rew_s = s; 968 synchronize_rcu_expedited_queue_work(&rew); 969 } 970 971 /* Wait for expedited grace period to complete. */ 972 rnp = rcu_get_root(); 973 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], 974 sync_exp_work_done(s)); 975 976 /* Let the next expedited grace period start. */ 977 mutex_unlock(&rcu_state.exp_mutex); 978 } 979 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 980 981 /* 982 * Ensure that start_poll_synchronize_rcu_expedited() has the expedited 983 * RCU grace periods that it needs. 984 */ 985 static void sync_rcu_do_polled_gp(struct work_struct *wp) 986 { 987 unsigned long flags; 988 int i = 0; 989 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq); 990 unsigned long s; 991 992 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 993 s = rnp->exp_seq_poll_rq; 994 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 995 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 996 if (s == RCU_GET_STATE_COMPLETED) 997 return; 998 while (!poll_state_synchronize_rcu(s)) { 999 synchronize_rcu_expedited(); 1000 if (i == 10 || i == 20) 1001 pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled)); 1002 i++; 1003 } 1004 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1005 s = rnp->exp_seq_poll_rq; 1006 if (poll_state_synchronize_rcu(s)) 1007 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 1008 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1009 } 1010 1011 /** 1012 * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period 1013 * 1014 * Returns a cookie to pass to a call to cond_synchronize_rcu(), 1015 * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(), 1016 * allowing them to determine whether or not any sort of grace period has 1017 * elapsed in the meantime. If the needed expedited grace period is not 1018 * already slated to start, initiates that grace period. 1019 */ 1020 unsigned long start_poll_synchronize_rcu_expedited(void) 1021 { 1022 unsigned long flags; 1023 struct rcu_data *rdp; 1024 struct rcu_node *rnp; 1025 unsigned long s; 1026 1027 s = get_state_synchronize_rcu(); 1028 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); 1029 rnp = rdp->mynode; 1030 if (rcu_init_invoked()) 1031 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); 1032 if (!poll_state_synchronize_rcu(s)) { 1033 if (rcu_init_invoked()) { 1034 rnp->exp_seq_poll_rq = s; 1035 queue_work(rcu_gp_wq, &rnp->exp_poll_wq); 1036 } 1037 } 1038 if (rcu_init_invoked()) 1039 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); 1040 1041 return s; 1042 } 1043 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited); 1044 1045 /** 1046 * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period 1047 * @rgosp: Place to put snapshot of grace-period state 1048 * 1049 * Places the normal and expedited grace-period states in rgosp. This 1050 * state value can be passed to a later call to cond_synchronize_rcu_full() 1051 * or poll_state_synchronize_rcu_full() to determine whether or not a 1052 * grace period (whether normal or expedited) has elapsed in the meantime. 1053 * If the needed expedited grace period is not already slated to start, 1054 * initiates that grace period. 1055 */ 1056 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 1057 { 1058 get_state_synchronize_rcu_full(rgosp); 1059 (void)start_poll_synchronize_rcu_expedited(); 1060 } 1061 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full); 1062 1063 /** 1064 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period 1065 * 1066 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 1067 * 1068 * If any type of full RCU grace period has elapsed since the earlier 1069 * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(), 1070 * or start_poll_synchronize_rcu_expedited(), just return. Otherwise, 1071 * invoke synchronize_rcu_expedited() to wait for a full grace period. 1072 * 1073 * Yes, this function does not take counter wrap into account. 1074 * But counter wrap is harmless. If the counter wraps, we have waited for 1075 * more than 2 billion grace periods (and way more on a 64-bit system!), 1076 * so waiting for a couple of additional grace periods should be just fine. 1077 * 1078 * This function provides the same memory-ordering guarantees that 1079 * would be provided by a synchronize_rcu() that was invoked at the call 1080 * to the function that provided @oldstate and that returned at the end 1081 * of this function. 1082 */ 1083 void cond_synchronize_rcu_expedited(unsigned long oldstate) 1084 { 1085 if (!poll_state_synchronize_rcu(oldstate)) 1086 synchronize_rcu_expedited(); 1087 } 1088 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited); 1089 1090 /** 1091 * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period 1092 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() 1093 * 1094 * If a full RCU grace period has elapsed since the call to 1095 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 1096 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was 1097 * obtained, just return. Otherwise, invoke synchronize_rcu_expedited() 1098 * to wait for a full grace period. 1099 * 1100 * Yes, this function does not take counter wrap into account. 1101 * But counter wrap is harmless. If the counter wraps, we have waited for 1102 * more than 2 billion grace periods (and way more on a 64-bit system!), 1103 * so waiting for a couple of additional grace periods should be just fine. 1104 * 1105 * This function provides the same memory-ordering guarantees that 1106 * would be provided by a synchronize_rcu() that was invoked at the call 1107 * to the function that provided @rgosp and that returned at the end of 1108 * this function. 1109 */ 1110 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 1111 { 1112 if (!poll_state_synchronize_rcu_full(rgosp)) 1113 synchronize_rcu_expedited(); 1114 } 1115 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full); 1116