1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * RCU CPU stall warnings for normal RCU grace periods 4 * 5 * Copyright IBM Corporation, 2019 6 * 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #include <linux/kvm_para.h> 11 12 ////////////////////////////////////////////////////////////////////////////// 13 // 14 // Controlling CPU stall warnings, including delay calculation. 15 16 /* panic() on RCU Stall sysctl. */ 17 int sysctl_panic_on_rcu_stall __read_mostly; 18 int sysctl_max_rcu_stall_to_panic __read_mostly; 19 20 #ifdef CONFIG_PROVE_RCU 21 #define RCU_STALL_DELAY_DELTA (5 * HZ) 22 #else 23 #define RCU_STALL_DELAY_DELTA 0 24 #endif 25 #define RCU_STALL_MIGHT_DIV 8 26 #define RCU_STALL_MIGHT_MIN (2 * HZ) 27 28 /* Limit-check stall timeouts specified at boottime and runtime. */ 29 int rcu_jiffies_till_stall_check(void) 30 { 31 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); 32 33 /* 34 * Limit check must be consistent with the Kconfig limits 35 * for CONFIG_RCU_CPU_STALL_TIMEOUT. 36 */ 37 if (till_stall_check < 3) { 38 WRITE_ONCE(rcu_cpu_stall_timeout, 3); 39 till_stall_check = 3; 40 } else if (till_stall_check > 300) { 41 WRITE_ONCE(rcu_cpu_stall_timeout, 300); 42 till_stall_check = 300; 43 } 44 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 45 } 46 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); 47 48 /** 49 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled? 50 * 51 * Returns @true if the current grace period is sufficiently old that 52 * it is reasonable to assume that it might be stalled. This can be 53 * useful when deciding whether to allocate memory to enable RCU-mediated 54 * freeing on the one hand or just invoking synchronize_rcu() on the other. 55 * The latter is preferable when the grace period is stalled. 56 * 57 * Note that sampling of the .gp_start and .gp_seq fields must be done 58 * carefully to avoid false positives at the beginnings and ends of 59 * grace periods. 60 */ 61 bool rcu_gp_might_be_stalled(void) 62 { 63 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; 64 unsigned long j = jiffies; 65 66 if (d < RCU_STALL_MIGHT_MIN) 67 d = RCU_STALL_MIGHT_MIN; 68 smp_mb(); // jiffies before .gp_seq to avoid false positives. 69 if (!rcu_gp_in_progress()) 70 return false; 71 // Long delays at this point avoids false positive, but a delay 72 // of ULONG_MAX/4 jiffies voids your no-false-positive warranty. 73 smp_mb(); // .gp_seq before second .gp_start 74 // And ditto here. 75 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); 76 } 77 78 /* Don't do RCU CPU stall warnings during long sysrq printouts. */ 79 void rcu_sysrq_start(void) 80 { 81 if (!rcu_cpu_stall_suppress) 82 rcu_cpu_stall_suppress = 2; 83 } 84 85 void rcu_sysrq_end(void) 86 { 87 if (rcu_cpu_stall_suppress == 2) 88 rcu_cpu_stall_suppress = 0; 89 } 90 91 /* Don't print RCU CPU stall warnings during a kernel panic. */ 92 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 93 { 94 rcu_cpu_stall_suppress = 1; 95 return NOTIFY_DONE; 96 } 97 98 static struct notifier_block rcu_panic_block = { 99 .notifier_call = rcu_panic, 100 }; 101 102 static int __init check_cpu_stall_init(void) 103 { 104 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 105 return 0; 106 } 107 early_initcall(check_cpu_stall_init); 108 109 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ 110 static void panic_on_rcu_stall(void) 111 { 112 static int cpu_stall; 113 114 if (++cpu_stall < sysctl_max_rcu_stall_to_panic) 115 return; 116 117 if (sysctl_panic_on_rcu_stall) 118 panic("RCU Stall\n"); 119 } 120 121 /** 122 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period 123 * 124 * The caller must disable hard irqs. 125 */ 126 void rcu_cpu_stall_reset(void) 127 { 128 WRITE_ONCE(rcu_state.jiffies_stall, 129 jiffies + rcu_jiffies_till_stall_check()); 130 } 131 132 ////////////////////////////////////////////////////////////////////////////// 133 // 134 // Interaction with RCU grace periods 135 136 /* Start of new grace period, so record stall time (and forcing times). */ 137 static void record_gp_stall_check_time(void) 138 { 139 unsigned long j = jiffies; 140 unsigned long j1; 141 142 WRITE_ONCE(rcu_state.gp_start, j); 143 j1 = rcu_jiffies_till_stall_check(); 144 smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq. 145 WRITE_ONCE(rcu_state.jiffies_stall, j + j1); 146 rcu_state.jiffies_resched = j + j1 / 2; 147 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); 148 } 149 150 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ 151 static void zero_cpu_stall_ticks(struct rcu_data *rdp) 152 { 153 rdp->ticks_this_gp = 0; 154 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); 155 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 156 } 157 158 /* 159 * If too much time has passed in the current grace period, and if 160 * so configured, go kick the relevant kthreads. 161 */ 162 static void rcu_stall_kick_kthreads(void) 163 { 164 unsigned long j; 165 166 if (!READ_ONCE(rcu_kick_kthreads)) 167 return; 168 j = READ_ONCE(rcu_state.jiffies_kick_kthreads); 169 if (time_after(jiffies, j) && rcu_state.gp_kthread && 170 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { 171 WARN_ONCE(1, "Kicking %s grace-period kthread\n", 172 rcu_state.name); 173 rcu_ftrace_dump(DUMP_ALL); 174 wake_up_process(rcu_state.gp_kthread); 175 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); 176 } 177 } 178 179 /* 180 * Handler for the irq_work request posted about halfway into the RCU CPU 181 * stall timeout, and used to detect excessive irq disabling. Set state 182 * appropriately, but just complain if there is unexpected state on entry. 183 */ 184 static void rcu_iw_handler(struct irq_work *iwp) 185 { 186 struct rcu_data *rdp; 187 struct rcu_node *rnp; 188 189 rdp = container_of(iwp, struct rcu_data, rcu_iw); 190 rnp = rdp->mynode; 191 raw_spin_lock_rcu_node(rnp); 192 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { 193 rdp->rcu_iw_gp_seq = rnp->gp_seq; 194 rdp->rcu_iw_pending = false; 195 } 196 raw_spin_unlock_rcu_node(rnp); 197 } 198 199 ////////////////////////////////////////////////////////////////////////////// 200 // 201 // Printing RCU CPU stall warnings 202 203 #ifdef CONFIG_PREEMPT_RCU 204 205 /* 206 * Dump detailed information for all tasks blocking the current RCU 207 * grace period on the specified rcu_node structure. 208 */ 209 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 210 { 211 unsigned long flags; 212 struct task_struct *t; 213 214 raw_spin_lock_irqsave_rcu_node(rnp, flags); 215 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 216 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 217 return; 218 } 219 t = list_entry(rnp->gp_tasks->prev, 220 struct task_struct, rcu_node_entry); 221 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 222 /* 223 * We could be printing a lot while holding a spinlock. 224 * Avoid triggering hard lockup. 225 */ 226 touch_nmi_watchdog(); 227 sched_show_task(t); 228 } 229 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 230 } 231 232 // Communicate task state back to the RCU CPU stall warning request. 233 struct rcu_stall_chk_rdr { 234 int nesting; 235 union rcu_special rs; 236 bool on_blkd_list; 237 }; 238 239 /* 240 * Report out the state of a not-running task that is stalling the 241 * current RCU grace period. 242 */ 243 static int check_slow_task(struct task_struct *t, void *arg) 244 { 245 struct rcu_stall_chk_rdr *rscrp = arg; 246 247 if (task_curr(t)) 248 return -EBUSY; // It is running, so decline to inspect it. 249 rscrp->nesting = t->rcu_read_lock_nesting; 250 rscrp->rs = t->rcu_read_unlock_special; 251 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); 252 return 0; 253 } 254 255 /* 256 * Scan the current list of tasks blocked within RCU read-side critical 257 * sections, printing out the tid of each of the first few of them. 258 */ 259 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) 260 __releases(rnp->lock) 261 { 262 int i = 0; 263 int ndetected = 0; 264 struct rcu_stall_chk_rdr rscr; 265 struct task_struct *t; 266 struct task_struct *ts[8]; 267 268 lockdep_assert_irqs_disabled(); 269 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 270 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 271 return 0; 272 } 273 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", 274 rnp->level, rnp->grplo, rnp->grphi); 275 t = list_entry(rnp->gp_tasks->prev, 276 struct task_struct, rcu_node_entry); 277 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 278 get_task_struct(t); 279 ts[i++] = t; 280 if (i >= ARRAY_SIZE(ts)) 281 break; 282 } 283 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 284 while (i) { 285 t = ts[--i]; 286 if (task_call_func(t, check_slow_task, &rscr)) 287 pr_cont(" P%d", t->pid); 288 else 289 pr_cont(" P%d/%d:%c%c%c%c", 290 t->pid, rscr.nesting, 291 ".b"[rscr.rs.b.blocked], 292 ".q"[rscr.rs.b.need_qs], 293 ".e"[rscr.rs.b.exp_hint], 294 ".l"[rscr.on_blkd_list]); 295 lockdep_assert_irqs_disabled(); 296 put_task_struct(t); 297 ndetected++; 298 } 299 pr_cont("\n"); 300 return ndetected; 301 } 302 303 #else /* #ifdef CONFIG_PREEMPT_RCU */ 304 305 /* 306 * Because preemptible RCU does not exist, we never have to check for 307 * tasks blocked within RCU read-side critical sections. 308 */ 309 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 310 { 311 } 312 313 /* 314 * Because preemptible RCU does not exist, we never have to check for 315 * tasks blocked within RCU read-side critical sections. 316 */ 317 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) 318 __releases(rnp->lock) 319 { 320 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 321 return 0; 322 } 323 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 324 325 /* 326 * Dump stacks of all tasks running on stalled CPUs. First try using 327 * NMIs, but fall back to manual remote stack tracing on architectures 328 * that don't support NMI-based stack dumps. The NMI-triggered stack 329 * traces are more accurate because they are printed by the target CPU. 330 */ 331 static void rcu_dump_cpu_stacks(void) 332 { 333 int cpu; 334 unsigned long flags; 335 struct rcu_node *rnp; 336 337 rcu_for_each_leaf_node(rnp) { 338 raw_spin_lock_irqsave_rcu_node(rnp, flags); 339 for_each_leaf_node_possible_cpu(rnp, cpu) 340 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { 341 if (cpu_is_offline(cpu)) 342 pr_err("Offline CPU %d blocking current GP.\n", cpu); 343 else if (!trigger_single_cpu_backtrace(cpu)) 344 dump_cpu_task(cpu); 345 } 346 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 347 } 348 } 349 350 static const char * const gp_state_names[] = { 351 [RCU_GP_IDLE] = "RCU_GP_IDLE", 352 [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", 353 [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS", 354 [RCU_GP_ONOFF] = "RCU_GP_ONOFF", 355 [RCU_GP_INIT] = "RCU_GP_INIT", 356 [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS", 357 [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS", 358 [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP", 359 [RCU_GP_CLEANED] = "RCU_GP_CLEANED", 360 }; 361 362 /* 363 * Convert a ->gp_state value to a character string. 364 */ 365 static const char *gp_state_getname(short gs) 366 { 367 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) 368 return "???"; 369 return gp_state_names[gs]; 370 } 371 372 /* Is the RCU grace-period kthread being starved of CPU time? */ 373 static bool rcu_is_gp_kthread_starving(unsigned long *jp) 374 { 375 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); 376 377 if (jp) 378 *jp = j; 379 return j > 2 * HZ; 380 } 381 382 /* 383 * Print out diagnostic information for the specified stalled CPU. 384 * 385 * If the specified CPU is aware of the current RCU grace period, then 386 * print the number of scheduling clock interrupts the CPU has taken 387 * during the time that it has been aware. Otherwise, print the number 388 * of RCU grace periods that this CPU is ignorant of, for example, "1" 389 * if the CPU was aware of the previous grace period. 390 * 391 * Also print out idle info. 392 */ 393 static void print_cpu_stall_info(int cpu) 394 { 395 unsigned long delta; 396 bool falsepositive; 397 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 398 char *ticks_title; 399 unsigned long ticks_value; 400 401 /* 402 * We could be printing a lot while holding a spinlock. Avoid 403 * triggering hard lockup. 404 */ 405 touch_nmi_watchdog(); 406 407 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); 408 if (ticks_value) { 409 ticks_title = "GPs behind"; 410 } else { 411 ticks_title = "ticks this GP"; 412 ticks_value = rdp->ticks_this_gp; 413 } 414 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); 415 falsepositive = rcu_is_gp_kthread_starving(NULL) && 416 rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); 417 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", 418 cpu, 419 "O."[!!cpu_online(cpu)], 420 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], 421 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], 422 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : 423 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : 424 "!."[!delta], 425 ticks_value, ticks_title, 426 rcu_dynticks_snap(rdp) & 0xfff, 427 rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, 428 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 429 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, 430 falsepositive ? " (false positive?)" : ""); 431 } 432 433 /* Complain about starvation of grace-period kthread. */ 434 static void rcu_check_gp_kthread_starvation(void) 435 { 436 int cpu; 437 struct task_struct *gpk = rcu_state.gp_kthread; 438 unsigned long j; 439 440 if (rcu_is_gp_kthread_starving(&j)) { 441 cpu = gpk ? task_cpu(gpk) : -1; 442 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n", 443 rcu_state.name, j, 444 (long)rcu_seq_current(&rcu_state.gp_seq), 445 data_race(READ_ONCE(rcu_state.gp_flags)), 446 gp_state_getname(rcu_state.gp_state), 447 data_race(READ_ONCE(rcu_state.gp_state)), 448 gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu); 449 if (gpk) { 450 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); 451 pr_err("RCU grace-period kthread stack dump:\n"); 452 sched_show_task(gpk); 453 if (cpu >= 0) { 454 if (cpu_is_offline(cpu)) { 455 pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); 456 } else { 457 pr_err("Stack dump where RCU GP kthread last ran:\n"); 458 if (!trigger_single_cpu_backtrace(cpu)) 459 dump_cpu_task(cpu); 460 } 461 } 462 wake_up_process(gpk); 463 } 464 } 465 } 466 467 /* Complain about missing wakeups from expired fqs wait timer */ 468 static void rcu_check_gp_kthread_expired_fqs_timer(void) 469 { 470 struct task_struct *gpk = rcu_state.gp_kthread; 471 short gp_state; 472 unsigned long jiffies_fqs; 473 int cpu; 474 475 /* 476 * Order reads of .gp_state and .jiffies_force_qs. 477 * Matching smp_wmb() is present in rcu_gp_fqs_loop(). 478 */ 479 gp_state = smp_load_acquire(&rcu_state.gp_state); 480 jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs); 481 482 if (gp_state == RCU_GP_WAIT_FQS && 483 time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) && 484 gpk && !READ_ONCE(gpk->on_rq)) { 485 cpu = task_cpu(gpk); 486 pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n", 487 rcu_state.name, (jiffies - jiffies_fqs), 488 (long)rcu_seq_current(&rcu_state.gp_seq), 489 data_race(rcu_state.gp_flags), 490 gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, 491 data_race(READ_ONCE(gpk->__state))); 492 pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", 493 cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); 494 } 495 } 496 497 static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) 498 { 499 int cpu; 500 unsigned long flags; 501 unsigned long gpa; 502 unsigned long j; 503 int ndetected = 0; 504 struct rcu_node *rnp; 505 long totqlen = 0; 506 507 lockdep_assert_irqs_disabled(); 508 509 /* Kick and suppress, if so configured. */ 510 rcu_stall_kick_kthreads(); 511 if (rcu_stall_is_suppressed()) 512 return; 513 514 /* 515 * OK, time to rat on our buddy... 516 * See Documentation/RCU/stallwarn.rst for info on how to debug 517 * RCU CPU stall warnings. 518 */ 519 trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected")); 520 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); 521 rcu_for_each_leaf_node(rnp) { 522 raw_spin_lock_irqsave_rcu_node(rnp, flags); 523 if (rnp->qsmask != 0) { 524 for_each_leaf_node_possible_cpu(rnp, cpu) 525 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { 526 print_cpu_stall_info(cpu); 527 ndetected++; 528 } 529 } 530 ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock. 531 lockdep_assert_irqs_disabled(); 532 } 533 534 for_each_possible_cpu(cpu) 535 totqlen += rcu_get_n_cbs_cpu(cpu); 536 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", 537 smp_processor_id(), (long)(jiffies - gps), 538 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); 539 if (ndetected) { 540 rcu_dump_cpu_stacks(); 541 542 /* Complain about tasks blocking the grace period. */ 543 rcu_for_each_leaf_node(rnp) 544 rcu_print_detail_task_stall_rnp(rnp); 545 } else { 546 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { 547 pr_err("INFO: Stall ended before state dump start\n"); 548 } else { 549 j = jiffies; 550 gpa = data_race(READ_ONCE(rcu_state.gp_activity)); 551 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", 552 rcu_state.name, j - gpa, j, gpa, 553 data_race(READ_ONCE(jiffies_till_next_fqs)), 554 data_race(READ_ONCE(rcu_get_root()->qsmask))); 555 } 556 } 557 /* Rewrite if needed in case of slow consoles. */ 558 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) 559 WRITE_ONCE(rcu_state.jiffies_stall, 560 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 561 562 rcu_check_gp_kthread_expired_fqs_timer(); 563 rcu_check_gp_kthread_starvation(); 564 565 panic_on_rcu_stall(); 566 567 rcu_force_quiescent_state(); /* Kick them all. */ 568 } 569 570 static void print_cpu_stall(unsigned long gps) 571 { 572 int cpu; 573 unsigned long flags; 574 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 575 struct rcu_node *rnp = rcu_get_root(); 576 long totqlen = 0; 577 578 lockdep_assert_irqs_disabled(); 579 580 /* Kick and suppress, if so configured. */ 581 rcu_stall_kick_kthreads(); 582 if (rcu_stall_is_suppressed()) 583 return; 584 585 /* 586 * OK, time to rat on ourselves... 587 * See Documentation/RCU/stallwarn.rst for info on how to debug 588 * RCU CPU stall warnings. 589 */ 590 trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); 591 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); 592 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); 593 print_cpu_stall_info(smp_processor_id()); 594 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); 595 for_each_possible_cpu(cpu) 596 totqlen += rcu_get_n_cbs_cpu(cpu); 597 pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", 598 jiffies - gps, 599 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); 600 601 rcu_check_gp_kthread_expired_fqs_timer(); 602 rcu_check_gp_kthread_starvation(); 603 604 rcu_dump_cpu_stacks(); 605 606 raw_spin_lock_irqsave_rcu_node(rnp, flags); 607 /* Rewrite if needed in case of slow consoles. */ 608 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) 609 WRITE_ONCE(rcu_state.jiffies_stall, 610 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 611 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 612 613 panic_on_rcu_stall(); 614 615 /* 616 * Attempt to revive the RCU machinery by forcing a context switch. 617 * 618 * A context switch would normally allow the RCU state machine to make 619 * progress and it could be we're stuck in kernel space without context 620 * switches for an entirely unreasonable amount of time. 621 */ 622 set_tsk_need_resched(current); 623 set_preempt_need_resched(); 624 } 625 626 static void check_cpu_stall(struct rcu_data *rdp) 627 { 628 bool didstall = false; 629 unsigned long gs1; 630 unsigned long gs2; 631 unsigned long gps; 632 unsigned long j; 633 unsigned long jn; 634 unsigned long js; 635 struct rcu_node *rnp; 636 637 lockdep_assert_irqs_disabled(); 638 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || 639 !rcu_gp_in_progress()) 640 return; 641 rcu_stall_kick_kthreads(); 642 j = jiffies; 643 644 /* 645 * Lots of memory barriers to reject false positives. 646 * 647 * The idea is to pick up rcu_state.gp_seq, then 648 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally 649 * another copy of rcu_state.gp_seq. These values are updated in 650 * the opposite order with memory barriers (or equivalent) during 651 * grace-period initialization and cleanup. Now, a false positive 652 * can occur if we get an new value of rcu_state.gp_start and a old 653 * value of rcu_state.jiffies_stall. But given the memory barriers, 654 * the only way that this can happen is if one grace period ends 655 * and another starts between these two fetches. This is detected 656 * by comparing the second fetch of rcu_state.gp_seq with the 657 * previous fetch from rcu_state.gp_seq. 658 * 659 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, 660 * and rcu_state.gp_start suffice to forestall false positives. 661 */ 662 gs1 = READ_ONCE(rcu_state.gp_seq); 663 smp_rmb(); /* Pick up ->gp_seq first... */ 664 js = READ_ONCE(rcu_state.jiffies_stall); 665 smp_rmb(); /* ...then ->jiffies_stall before the rest... */ 666 gps = READ_ONCE(rcu_state.gp_start); 667 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ 668 gs2 = READ_ONCE(rcu_state.gp_seq); 669 if (gs1 != gs2 || 670 ULONG_CMP_LT(j, js) || 671 ULONG_CMP_GE(gps, js)) 672 return; /* No stall or GP completed since entering function. */ 673 rnp = rdp->mynode; 674 jn = jiffies + ULONG_MAX / 2; 675 if (rcu_gp_in_progress() && 676 (READ_ONCE(rnp->qsmask) & rdp->grpmask) && 677 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { 678 679 /* 680 * If a virtual machine is stopped by the host it can look to 681 * the watchdog like an RCU stall. Check to see if the host 682 * stopped the vm. 683 */ 684 if (kvm_check_and_clear_guest_paused()) 685 return; 686 687 /* We haven't checked in, so go dump stack. */ 688 print_cpu_stall(gps); 689 if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) 690 rcu_ftrace_dump(DUMP_ALL); 691 didstall = true; 692 693 } else if (rcu_gp_in_progress() && 694 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && 695 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { 696 697 /* 698 * If a virtual machine is stopped by the host it can look to 699 * the watchdog like an RCU stall. Check to see if the host 700 * stopped the vm. 701 */ 702 if (kvm_check_and_clear_guest_paused()) 703 return; 704 705 /* They had a few time units to dump stack, so complain. */ 706 print_other_cpu_stall(gs2, gps); 707 if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) 708 rcu_ftrace_dump(DUMP_ALL); 709 didstall = true; 710 } 711 if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) { 712 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; 713 WRITE_ONCE(rcu_state.jiffies_stall, jn); 714 } 715 } 716 717 ////////////////////////////////////////////////////////////////////////////// 718 // 719 // RCU forward-progress mechanisms, including of callback invocation. 720 721 722 /* 723 * Check to see if a failure to end RCU priority inversion was due to 724 * a CPU not passing through a quiescent state. When this happens, there 725 * is nothing that RCU priority boosting can do to help, so we shouldn't 726 * count this as an RCU priority boosting failure. A return of true says 727 * RCU priority boosting is to blame, and false says otherwise. If false 728 * is returned, the first of the CPUs to blame is stored through cpup. 729 * If there was no CPU blocking the current grace period, but also nothing 730 * in need of being boosted, *cpup is set to -1. This can happen in case 731 * of vCPU preemption while the last CPU is reporting its quiscent state, 732 * for example. 733 * 734 * If cpup is NULL, then a lockless quick check is carried out, suitable 735 * for high-rate usage. On the other hand, if cpup is non-NULL, each 736 * rcu_node structure's ->lock is acquired, ruling out high-rate usage. 737 */ 738 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) 739 { 740 bool atb = false; 741 int cpu; 742 unsigned long flags; 743 struct rcu_node *rnp; 744 745 rcu_for_each_leaf_node(rnp) { 746 if (!cpup) { 747 if (data_race(READ_ONCE(rnp->qsmask))) { 748 return false; 749 } else { 750 if (READ_ONCE(rnp->gp_tasks)) 751 atb = true; 752 continue; 753 } 754 } 755 *cpup = -1; 756 raw_spin_lock_irqsave_rcu_node(rnp, flags); 757 if (rnp->gp_tasks) 758 atb = true; 759 if (!rnp->qsmask) { 760 // No CPUs without quiescent states for this rnp. 761 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 762 continue; 763 } 764 // Find the first holdout CPU. 765 for_each_leaf_node_possible_cpu(rnp, cpu) { 766 if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) { 767 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 768 *cpup = cpu; 769 return false; 770 } 771 } 772 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 773 } 774 // Can't blame CPUs, so must blame RCU priority boosting. 775 return atb; 776 } 777 EXPORT_SYMBOL_GPL(rcu_check_boost_fail); 778 779 /* 780 * Show the state of the grace-period kthreads. 781 */ 782 void show_rcu_gp_kthreads(void) 783 { 784 unsigned long cbs = 0; 785 int cpu; 786 unsigned long j; 787 unsigned long ja; 788 unsigned long jr; 789 unsigned long js; 790 unsigned long jw; 791 struct rcu_data *rdp; 792 struct rcu_node *rnp; 793 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 794 795 j = jiffies; 796 ja = j - data_race(READ_ONCE(rcu_state.gp_activity)); 797 jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity)); 798 js = j - data_race(READ_ONCE(rcu_state.gp_start)); 799 jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time)); 800 pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", 801 rcu_state.name, gp_state_getname(rcu_state.gp_state), 802 data_race(READ_ONCE(rcu_state.gp_state)), 803 t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU, 804 js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)), 805 (long)data_race(READ_ONCE(rcu_state.gp_seq)), 806 (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)), 807 data_race(READ_ONCE(rcu_state.gp_max)), 808 data_race(READ_ONCE(rcu_state.gp_flags))); 809 rcu_for_each_node_breadth_first(rnp) { 810 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && 811 !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) && 812 !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks))) 813 continue; 814 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n", 815 rnp->grplo, rnp->grphi, 816 (long)data_race(READ_ONCE(rnp->gp_seq)), 817 (long)data_race(READ_ONCE(rnp->gp_seq_needed)), 818 data_race(READ_ONCE(rnp->qsmask)), 819 ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))], 820 ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))], 821 ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))], 822 ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))], 823 data_race(READ_ONCE(rnp->n_boosts))); 824 if (!rcu_is_leaf_node(rnp)) 825 continue; 826 for_each_leaf_node_possible_cpu(rnp, cpu) { 827 rdp = per_cpu_ptr(&rcu_data, cpu); 828 if (READ_ONCE(rdp->gpwrap) || 829 ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), 830 READ_ONCE(rdp->gp_seq_needed))) 831 continue; 832 pr_info("\tcpu %d ->gp_seq_needed %ld\n", 833 cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed))); 834 } 835 } 836 for_each_possible_cpu(cpu) { 837 rdp = per_cpu_ptr(&rcu_data, cpu); 838 cbs += data_race(READ_ONCE(rdp->n_cbs_invoked)); 839 if (rcu_segcblist_is_offloaded(&rdp->cblist)) 840 show_rcu_nocb_state(rdp); 841 } 842 pr_info("RCU callbacks invoked since boot: %lu\n", cbs); 843 show_rcu_tasks_gp_kthreads(); 844 } 845 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); 846 847 /* 848 * This function checks for grace-period requests that fail to motivate 849 * RCU to come out of its idle mode. 850 */ 851 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, 852 const unsigned long gpssdelay) 853 { 854 unsigned long flags; 855 unsigned long j; 856 struct rcu_node *rnp_root = rcu_get_root(); 857 static atomic_t warned = ATOMIC_INIT(0); 858 859 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || 860 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), 861 READ_ONCE(rnp_root->gp_seq_needed)) || 862 !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread. 863 return; 864 j = jiffies; /* Expensive access, and in common case don't get here. */ 865 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || 866 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || 867 atomic_read(&warned)) 868 return; 869 870 raw_spin_lock_irqsave_rcu_node(rnp, flags); 871 j = jiffies; 872 if (rcu_gp_in_progress() || 873 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), 874 READ_ONCE(rnp_root->gp_seq_needed)) || 875 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || 876 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || 877 atomic_read(&warned)) { 878 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 879 return; 880 } 881 /* Hold onto the leaf lock to make others see warned==1. */ 882 883 if (rnp_root != rnp) 884 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ 885 j = jiffies; 886 if (rcu_gp_in_progress() || 887 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), 888 READ_ONCE(rnp_root->gp_seq_needed)) || 889 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || 890 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || 891 atomic_xchg(&warned, 1)) { 892 if (rnp_root != rnp) 893 /* irqs remain disabled. */ 894 raw_spin_unlock_rcu_node(rnp_root); 895 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 896 return; 897 } 898 WARN_ON(1); 899 if (rnp_root != rnp) 900 raw_spin_unlock_rcu_node(rnp_root); 901 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 902 show_rcu_gp_kthreads(); 903 } 904 905 /* 906 * Do a forward-progress check for rcutorture. This is normally invoked 907 * due to an OOM event. The argument "j" gives the time period during 908 * which rcutorture would like progress to have been made. 909 */ 910 void rcu_fwd_progress_check(unsigned long j) 911 { 912 unsigned long cbs; 913 int cpu; 914 unsigned long max_cbs = 0; 915 int max_cpu = -1; 916 struct rcu_data *rdp; 917 918 if (rcu_gp_in_progress()) { 919 pr_info("%s: GP age %lu jiffies\n", 920 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start))); 921 show_rcu_gp_kthreads(); 922 } else { 923 pr_info("%s: Last GP end %lu jiffies ago\n", 924 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end))); 925 preempt_disable(); 926 rdp = this_cpu_ptr(&rcu_data); 927 rcu_check_gp_start_stall(rdp->mynode, rdp, j); 928 preempt_enable(); 929 } 930 for_each_possible_cpu(cpu) { 931 cbs = rcu_get_n_cbs_cpu(cpu); 932 if (!cbs) 933 continue; 934 if (max_cpu < 0) 935 pr_info("%s: callbacks", __func__); 936 pr_cont(" %d: %lu", cpu, cbs); 937 if (cbs <= max_cbs) 938 continue; 939 max_cbs = cbs; 940 max_cpu = cpu; 941 } 942 if (max_cpu >= 0) 943 pr_cont("\n"); 944 } 945 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); 946 947 /* Commandeer a sysrq key to dump RCU's tree. */ 948 static bool sysrq_rcu; 949 module_param(sysrq_rcu, bool, 0444); 950 951 /* Dump grace-period-request information due to commandeered sysrq. */ 952 static void sysrq_show_rcu(int key) 953 { 954 show_rcu_gp_kthreads(); 955 } 956 957 static const struct sysrq_key_op sysrq_rcudump_op = { 958 .handler = sysrq_show_rcu, 959 .help_msg = "show-rcu(y)", 960 .action_msg = "Show RCU tree", 961 .enable_mask = SYSRQ_ENABLE_DUMP, 962 }; 963 964 static int __init rcu_sysrq_init(void) 965 { 966 if (sysrq_rcu) 967 return register_sysrq_key('y', &sysrq_rcudump_op); 968 return 0; 969 } 970 early_initcall(rcu_sysrq_init); 971