Lines Matching +full:lock +full:- +full:detect +full:- +full:precision +full:- +full:6 +full:ns +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0
3 * Detect hard and soft lockups on a system
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
63 * Should we panic when a soft-lockup or hard-lockup occurs:
91 * We may not want to enable hard lockup detection by default in all cases,
205 * Prevent multiple hard-lockup reports if one cpu is already in watchdog_hardlockup_check()
217 * other CPU somehow locked up while holding the lock associated in watchdog_hardlockup_check()
270 * Watchdog-detector specific API.
278 return -ENODEV; in watchdog_hardlockup_probe()
282 * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
292 * watchdog_hardlockup_start - Start the watchdog after reconfiguration
298 * - watchdog_enabled
299 * - watchdog_thresh
300 * - watchdog_cpumask
305 * lockup_detector_update_enable - Update the sysctl enable bit
422 * sufficient for our precision, allowing us to use u16 to store
424 * 2^24 ~= 16 * 10^6
429 * 2^24ns ~= 16.8ms in get_16bit_precision()
450 util = DIV_ROUND_UP(100 * (new_stat - old_stat), sample_period_16); in update_cpustat()
452 * Since we use 16-bit precision, the raw data will undergo in update_cpustat()
524 tail = (tail + NUM_HARDIRQ_REPORT - 1) % NUM_HARDIRQ_REPORT; in need_counting_irqs()
546 {-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}, {-1, 0} in print_irq_counts()
560 printk(KERN_CRIT "CPU#%d Detect HardIRQ Time exceeds %d%%. Most frequent HardIRQs:\n", in print_irq_counts()
564 if (irq_counts_sorted[i].irq == -1) in print_irq_counts()
567 printk(KERN_CRIT "\t#%u: %-10u\tirq#%d\n", in print_irq_counts()
595 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
609 * 2^30ns == 1.074s.
619 * convert watchdog_thresh from seconds to ns in set_sample_period()
642 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
645 * preventing the watchdog task from executing - e.g. the scheduler
708 * A poorly behaving BPF scheduler can live-lock the system into in is_softlockup()
713 scx_softlockup(now - touch_ts); in is_softlockup()
717 return now - touch_ts; in is_softlockup()
727 * The watchdog feed function - touches the timestamp.
731 * for more than 2*watchdog_thresh seconds then the debug-printout
818 * Prevent multiple soft-lockup reports if one cpu is already in watchdog_timer_fn()
830 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", in watchdog_timer_fn()
832 current->comm, task_pid_nr(current)); in watchdog_timer_fn()
876 /* Enable the hardlockup detector */ in watchdog_enable()
1018 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
1041 * caller | table->data points to | 'which'
1042 * -------------------|----------------------------------|-------------------------------
1045 * -------------------|----------------------------------|-------------------------------
1047 * -------------------|----------------------------------|-------------------------------
1053 int err, old, *param = table->data; in proc_watchdog_common()
1093 return -ENOTSUPP; in proc_nmi_watchdog()
1273 if (ret == -ENODEV) in lockup_detector_delay_init()
1288 * lockup_detector_retry_init - retry init lockup detector if possible.