xref: /linux/kernel/watchdog.c (revision 077ba03600faea5f2aa15afbb83580878cc8b500)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Detect hard and soft lockups on a system
4  *
5  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6  *
7  * Note: Most of this code is borrowed heavily from the original softlockup
8  * detector, so thanks to Ingo for the initial implementation.
9  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10  * to those contributors as well.
11  */
12 
13 #define pr_fmt(fmt) "watchdog: " fmt
14 
15 #include <linux/cpu.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
18 #include <linux/irqdesc.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/kvm_para.h>
21 #include <linux/math64.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/nmi.h>
25 #include <linux/stop_machine.h>
26 #include <linux/sysctl.h>
27 #include <linux/tick.h>
28 #include <linux/sys_info.h>
29 
30 #include <linux/sched/clock.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/isolation.h>
33 
34 #include <asm/irq_regs.h>
35 
36 static DEFINE_MUTEX(watchdog_mutex);
37 
38 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
39 # define WATCHDOG_HARDLOCKUP_DEFAULT	1
40 #else
41 # define WATCHDOG_HARDLOCKUP_DEFAULT	0
42 #endif
43 
44 #define NUM_SAMPLE_PERIODS	5
45 
46 unsigned long __read_mostly watchdog_enabled;
47 int __read_mostly watchdog_user_enabled = 1;
48 static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
49 static int __read_mostly watchdog_softlockup_user_enabled = 1;
50 int __read_mostly watchdog_thresh = 10;
51 static int __read_mostly watchdog_thresh_next;
52 static int __read_mostly watchdog_hardlockup_available;
53 
54 struct cpumask watchdog_cpumask __read_mostly;
55 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
56 
57 #ifdef CONFIG_HARDLOCKUP_DETECTOR
58 
59 # ifdef CONFIG_SMP
60 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
61 # endif /* CONFIG_SMP */
62 
63 /*
64  * Number of consecutive missed interrupts before declaring a lockup.
65  * Default to 1 (immediate) for NMI/Perf. Buddy will overwrite this to 3.
66  */
67 int __read_mostly watchdog_hardlockup_miss_thresh = 1;
68 EXPORT_SYMBOL_GPL(watchdog_hardlockup_miss_thresh);
69 
70 /*
71  * Should we panic when a soft-lockup or hard-lockup occurs:
72  */
73 unsigned int __read_mostly hardlockup_panic =
74 			IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
75 
76 /*
77  * bitmasks to control what kinds of system info to be printed when
78  * hard lockup is detected, it could be task, memory, lock etc.
79  * Refer include/linux/sys_info.h for detailed bit definition.
80  */
81 unsigned long hardlockup_si_mask;
82 
83 #ifdef CONFIG_SYSFS
84 
85 static unsigned int hardlockup_count;
86 
87 static ssize_t hardlockup_count_show(struct kobject *kobj, struct kobj_attribute *attr,
88 				     char *page)
89 {
90 	return sysfs_emit(page, "%u\n", hardlockup_count);
91 }
92 
93 static struct kobj_attribute hardlockup_count_attr = __ATTR_RO(hardlockup_count);
94 
95 static __init int kernel_hardlockup_sysfs_init(void)
96 {
97 	sysfs_add_file_to_group(kernel_kobj, &hardlockup_count_attr.attr, NULL);
98 	return 0;
99 }
100 
101 late_initcall(kernel_hardlockup_sysfs_init);
102 
103 #endif // CONFIG_SYSFS
104 
105 /*
106  * We may not want to enable hard lockup detection by default in all cases,
107  * for example when running the kernel as a guest on a hypervisor. In these
108  * cases this function can be called to disable hard lockup detection. This
109  * function should only be executed once by the boot processor before the
110  * kernel command line parameters are parsed, because otherwise it is not
111  * possible to override this in hardlockup_panic_setup().
112  */
113 void __init hardlockup_detector_disable(void)
114 {
115 	watchdog_hardlockup_user_enabled = 0;
116 }
117 
118 static int __init hardlockup_panic_setup(char *str)
119 {
120 next:
121 	if (!strncmp(str, "panic", 5))
122 		hardlockup_panic = 1;
123 	else if (!strncmp(str, "nopanic", 7))
124 		hardlockup_panic = 0;
125 	else if (!strncmp(str, "0", 1))
126 		watchdog_hardlockup_user_enabled = 0;
127 	else if (!strncmp(str, "1", 1))
128 		watchdog_hardlockup_user_enabled = 1;
129 	else if (!strncmp(str, "r", 1))
130 		hardlockup_config_perf_event(str + 1);
131 	while (*(str++)) {
132 		if (*str == ',') {
133 			str++;
134 			goto next;
135 		}
136 	}
137 	return 1;
138 }
139 __setup("nmi_watchdog=", hardlockup_panic_setup);
140 
141 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
142 
143 #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
144 
145 static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
146 static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
147 static DEFINE_PER_CPU(int, hrtimer_interrupts_missed);
148 static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
149 static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
150 static unsigned long hard_lockup_nmi_warn;
151 
152 notrace void arch_touch_nmi_watchdog(void)
153 {
154 	/*
155 	 * Using __raw here because some code paths have
156 	 * preemption enabled.  If preemption is enabled
157 	 * then interrupts should be enabled too, in which
158 	 * case we shouldn't have to worry about the watchdog
159 	 * going off.
160 	 */
161 	raw_cpu_write(watchdog_hardlockup_touched, true);
162 }
163 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
164 
165 void watchdog_hardlockup_touch_cpu(unsigned int cpu)
166 {
167 	per_cpu(watchdog_hardlockup_touched, cpu) = true;
168 }
169 
170 static void watchdog_hardlockup_update_reset(unsigned int cpu)
171 {
172 	int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
173 
174 	/*
175 	 * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
176 	 * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
177 	 * written/read by a single CPU.
178 	 */
179 	per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
180 	per_cpu(hrtimer_interrupts_missed, cpu) = 0;
181 }
182 
183 static bool is_hardlockup(unsigned int cpu)
184 {
185 	int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
186 
187 	if (per_cpu(hrtimer_interrupts_saved, cpu) != hrint) {
188 		watchdog_hardlockup_update_reset(cpu);
189 		return false;
190 	}
191 
192 	per_cpu(hrtimer_interrupts_missed, cpu)++;
193 	if (per_cpu(hrtimer_interrupts_missed, cpu) % watchdog_hardlockup_miss_thresh)
194 		return false;
195 
196 	return true;
197 }
198 
199 static void watchdog_hardlockup_kick(void)
200 {
201 	int new_interrupts;
202 
203 	new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
204 	watchdog_buddy_check_hardlockup(new_interrupts);
205 }
206 
207 void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
208 {
209 	int hardlockup_all_cpu_backtrace;
210 	unsigned int this_cpu;
211 	unsigned long flags;
212 
213 	if (per_cpu(watchdog_hardlockup_touched, cpu)) {
214 		watchdog_hardlockup_update_reset(cpu);
215 		per_cpu(watchdog_hardlockup_touched, cpu) = false;
216 		return;
217 	}
218 
219 	hardlockup_all_cpu_backtrace = (hardlockup_si_mask & SYS_INFO_ALL_BT) ?
220 					1 : sysctl_hardlockup_all_cpu_backtrace;
221 	/*
222 	 * Check for a hardlockup by making sure the CPU's timer
223 	 * interrupt is incrementing. The timer interrupt should have
224 	 * fired multiple times before we overflow'd. If it hasn't
225 	 * then this is a good indication the cpu is stuck
226 	 */
227 	if (!is_hardlockup(cpu)) {
228 		per_cpu(watchdog_hardlockup_warned, cpu) = false;
229 		return;
230 	}
231 
232 #ifdef CONFIG_SYSFS
233 	++hardlockup_count;
234 #endif
235 	/*
236 	 * A poorly behaving BPF scheduler can trigger hard lockup by
237 	 * e.g. putting numerous affinitized tasks in a single queue and
238 	 * directing all CPUs at it. The following call can return true
239 	 * only once when sched_ext is enabled and will immediately
240 	 * abort the BPF scheduler and print out a warning message.
241 	 */
242 	if (scx_hardlockup(cpu))
243 		return;
244 
245 	/* Only print hardlockups once. */
246 	if (per_cpu(watchdog_hardlockup_warned, cpu))
247 		return;
248 
249 	/*
250 	 * Prevent multiple hard-lockup reports if one cpu is already
251 	 * engaged in dumping all cpu back traces.
252 	 */
253 	if (hardlockup_all_cpu_backtrace) {
254 		if (test_and_set_bit_lock(0, &hard_lockup_nmi_warn))
255 			return;
256 	}
257 
258 	/*
259 	 * NOTE: we call printk_cpu_sync_get_irqsave() after printing
260 	 * the lockup message. While it would be nice to serialize
261 	 * that printout, we really want to make sure that if some
262 	 * other CPU somehow locked up while holding the lock associated
263 	 * with printk_cpu_sync_get_irqsave() that we can still at least
264 	 * get the message about the lockup out.
265 	 */
266 	this_cpu = smp_processor_id();
267 	pr_emerg("CPU%u: Watchdog detected hard LOCKUP on cpu %u\n", this_cpu, cpu);
268 	printk_cpu_sync_get_irqsave(flags);
269 
270 	print_modules();
271 	print_irqtrace_events(current);
272 	if (cpu == this_cpu) {
273 		if (regs)
274 			show_regs(regs);
275 		else
276 			dump_stack();
277 		printk_cpu_sync_put_irqrestore(flags);
278 	} else {
279 		printk_cpu_sync_put_irqrestore(flags);
280 		trigger_single_cpu_backtrace(cpu);
281 	}
282 
283 	if (hardlockup_all_cpu_backtrace) {
284 		trigger_allbutcpu_cpu_backtrace(cpu);
285 		if (!hardlockup_panic)
286 			clear_bit_unlock(0, &hard_lockup_nmi_warn);
287 	}
288 
289 	sys_info(hardlockup_si_mask & ~SYS_INFO_ALL_BT);
290 	if (hardlockup_panic)
291 		nmi_panic(regs, "Hard LOCKUP");
292 
293 	per_cpu(watchdog_hardlockup_warned, cpu) = true;
294 }
295 
296 #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
297 
298 static inline void watchdog_hardlockup_kick(void) { }
299 
300 #endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
301 
302 /*
303  * These functions can be overridden based on the configured hardlockdup detector.
304  *
305  * watchdog_hardlockup_enable/disable can be implemented to start and stop when
306  * softlockup watchdog start and stop. The detector must select the
307  * SOFTLOCKUP_DETECTOR Kconfig.
308  */
309 void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
310 
311 void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
312 
313 /*
314  * Watchdog-detector specific API.
315  *
316  * Return 0 when hardlockup watchdog is available, negative value otherwise.
317  * Note that the negative value means that a delayed probe might
318  * succeed later.
319  */
320 int __weak __init watchdog_hardlockup_probe(void)
321 {
322 	return -ENODEV;
323 }
324 
325 /**
326  * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
327  *
328  * The reconfiguration steps are:
329  * watchdog_hardlockup_stop();
330  * update_variables();
331  * watchdog_hardlockup_start();
332  */
333 void __weak watchdog_hardlockup_stop(void) { }
334 
335 /**
336  * watchdog_hardlockup_start - Start the watchdog after reconfiguration
337  *
338  * Counterpart to watchdog_hardlockup_stop().
339  *
340  * The following variables have been updated in update_variables() and
341  * contain the currently valid configuration:
342  * - watchdog_enabled
343  * - watchdog_thresh
344  * - watchdog_cpumask
345  */
346 void __weak watchdog_hardlockup_start(void) { }
347 
348 /**
349  * lockup_detector_update_enable - Update the sysctl enable bit
350  *
351  * Caller needs to make sure that the hard watchdogs are off, so this
352  * can't race with watchdog_hardlockup_disable().
353  */
354 static void lockup_detector_update_enable(void)
355 {
356 	watchdog_enabled = 0;
357 	if (!watchdog_user_enabled)
358 		return;
359 	if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
360 		watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
361 	if (watchdog_softlockup_user_enabled)
362 		watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
363 }
364 
365 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
366 
367 /*
368  * Delay the soflockup report when running a known slow code.
369  * It does _not_ affect the timestamp of the last successdul reschedule.
370  */
371 #define SOFTLOCKUP_DELAY_REPORT	ULONG_MAX
372 
373 #ifdef CONFIG_SMP
374 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
375 #endif
376 
377 /*
378  * bitmasks to control what kinds of system info to be printed when
379  * soft lockup is detected, it could be task, memory, lock etc.
380  * Refer include/linux/sys_info.h for detailed bit definition.
381  */
382 static unsigned long softlockup_si_mask;
383 
384 static struct cpumask watchdog_allowed_mask __read_mostly;
385 
386 /* Global variables, exported for sysctl */
387 unsigned int __read_mostly softlockup_panic =
388 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC;
389 
390 static bool softlockup_initialized __read_mostly;
391 static u64 __read_mostly sample_period;
392 
393 #ifdef CONFIG_SYSFS
394 
395 static unsigned int softlockup_count;
396 
397 static ssize_t softlockup_count_show(struct kobject *kobj, struct kobj_attribute *attr,
398 				     char *page)
399 {
400 	return sysfs_emit(page, "%u\n", softlockup_count);
401 }
402 
403 static struct kobj_attribute softlockup_count_attr = __ATTR_RO(softlockup_count);
404 
405 static __init int kernel_softlockup_sysfs_init(void)
406 {
407 	sysfs_add_file_to_group(kernel_kobj, &softlockup_count_attr.attr, NULL);
408 	return 0;
409 }
410 
411 late_initcall(kernel_softlockup_sysfs_init);
412 
413 #endif // CONFIG_SYSFS
414 
415 /* Timestamp taken after the last successful reschedule. */
416 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
417 /* Timestamp of the last softlockup report. */
418 static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
419 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
420 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
421 static unsigned long soft_lockup_nmi_warn;
422 
423 static int __init softlockup_panic_setup(char *str)
424 {
425 	softlockup_panic = simple_strtoul(str, NULL, 0);
426 	return 1;
427 }
428 __setup("softlockup_panic=", softlockup_panic_setup);
429 
430 static int __init nowatchdog_setup(char *str)
431 {
432 	watchdog_user_enabled = 0;
433 	return 1;
434 }
435 __setup("nowatchdog", nowatchdog_setup);
436 
437 static int __init nosoftlockup_setup(char *str)
438 {
439 	watchdog_softlockup_user_enabled = 0;
440 	return 1;
441 }
442 __setup("nosoftlockup", nosoftlockup_setup);
443 
444 static int __init watchdog_thresh_setup(char *str)
445 {
446 	get_option(&str, &watchdog_thresh);
447 	return 1;
448 }
449 __setup("watchdog_thresh=", watchdog_thresh_setup);
450 
451 #ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
452 enum stats_per_group {
453 	STATS_SYSTEM,
454 	STATS_SOFTIRQ,
455 	STATS_HARDIRQ,
456 	STATS_IDLE,
457 	NUM_STATS_PER_GROUP,
458 };
459 
460 static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = {
461 	CPUTIME_SYSTEM,
462 	CPUTIME_SOFTIRQ,
463 	CPUTIME_IRQ,
464 	CPUTIME_IDLE,
465 };
466 
467 static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
468 static DEFINE_PER_CPU(u8, cpustat_util[NUM_SAMPLE_PERIODS][NUM_STATS_PER_GROUP]);
469 static DEFINE_PER_CPU(u8, cpustat_tail);
470 
471 /*
472  * We don't need nanosecond resolution. A granularity of 16ms is
473  * sufficient for our precision, allowing us to use u16 to store
474  * cpustats, which will roll over roughly every ~1000 seconds.
475  * 2^24 ~= 16 * 10^6
476  */
477 static u16 get_16bit_precision(u64 data_ns)
478 {
479 	/*
480 	 * 2^24ns ~= 16.8ms
481 	 * Round to the nearest multiple of 16.8 milliseconds.
482 	 */
483 	return (data_ns + (1 << 23)) >> 24LL;
484 }
485 
486 static void update_cpustat(void)
487 {
488 	int i;
489 	u8 util;
490 	u16 old_stat, new_stat;
491 	struct kernel_cpustat kcpustat;
492 	u64 *cpustat = kcpustat.cpustat;
493 	u8 tail = __this_cpu_read(cpustat_tail);
494 	u16 sample_period_16 = get_16bit_precision(sample_period);
495 
496 	kcpustat_cpu_fetch(&kcpustat, smp_processor_id());
497 
498 	for (i = 0; i < NUM_STATS_PER_GROUP; i++) {
499 		old_stat = __this_cpu_read(cpustat_old[i]);
500 		new_stat = get_16bit_precision(cpustat[tracked_stats[i]]);
501 		util = DIV_ROUND_UP(100 * (new_stat - old_stat), sample_period_16);
502 		/*
503 		 * Since we use 16-bit precision, the raw data will undergo
504 		 * integer division, which may sometimes result in data loss,
505 		 * and then result might exceed 100%. To avoid confusion,
506 		 * we enforce a 100% display cap when calculations exceed this threshold.
507 		 */
508 		if (util > 100)
509 			util = 100;
510 		__this_cpu_write(cpustat_util[tail][i], util);
511 		__this_cpu_write(cpustat_old[i], new_stat);
512 	}
513 
514 	__this_cpu_write(cpustat_tail, (tail + 1) % NUM_SAMPLE_PERIODS);
515 }
516 
517 static void print_cpustat(void)
518 {
519 	int i, group;
520 	u8 tail = __this_cpu_read(cpustat_tail);
521 	u64 sample_period_msecond = sample_period;
522 
523 	do_div(sample_period_msecond, NSEC_PER_MSEC);
524 
525 	/*
526 	 * Outputting the "watchdog" prefix on every line is redundant and not
527 	 * concise, and the original alarm information is sufficient for
528 	 * positioning in logs, hence here printk() is used instead of pr_crit().
529 	 */
530 	printk(KERN_CRIT "CPU#%d Utilization every %llums during lockup:\n",
531 	       smp_processor_id(), sample_period_msecond);
532 
533 	for (i = 0; i < NUM_SAMPLE_PERIODS; i++) {
534 		group = (tail + i) % NUM_SAMPLE_PERIODS;
535 		printk(KERN_CRIT "\t#%d: %3u%% system,\t%3u%% softirq,\t"
536 			"%3u%% hardirq,\t%3u%% idle\n", i + 1,
537 			__this_cpu_read(cpustat_util[group][STATS_SYSTEM]),
538 			__this_cpu_read(cpustat_util[group][STATS_SOFTIRQ]),
539 			__this_cpu_read(cpustat_util[group][STATS_HARDIRQ]),
540 			__this_cpu_read(cpustat_util[group][STATS_IDLE]));
541 	}
542 }
543 
544 #define HARDIRQ_PERCENT_THRESH          50
545 #define NUM_HARDIRQ_REPORT              5
546 struct irq_counts {
547 	int irq;
548 	u32 counts;
549 };
550 
551 static DEFINE_PER_CPU(bool, snapshot_taken);
552 
553 /* Tabulate the most frequent interrupts. */
554 static void tabulate_irq_count(struct irq_counts *irq_counts, int irq, u32 counts, int rank)
555 {
556 	int i;
557 	struct irq_counts new_count = {irq, counts};
558 
559 	for (i = 0; i < rank; i++) {
560 		if (counts > irq_counts[i].counts)
561 			swap(new_count, irq_counts[i]);
562 	}
563 }
564 
565 /*
566  * If the hardirq time exceeds HARDIRQ_PERCENT_THRESH% of the sample_period,
567  * then the cause of softlockup might be interrupt storm. In this case, it
568  * would be useful to start interrupt counting.
569  */
570 static bool need_counting_irqs(void)
571 {
572 	u8 util;
573 	int tail = __this_cpu_read(cpustat_tail);
574 
575 	tail = (tail + NUM_SAMPLE_PERIODS - 1) % NUM_SAMPLE_PERIODS;
576 	util = __this_cpu_read(cpustat_util[tail][STATS_HARDIRQ]);
577 	return util > HARDIRQ_PERCENT_THRESH;
578 }
579 
580 static void start_counting_irqs(void)
581 {
582 	if (!__this_cpu_read(snapshot_taken)) {
583 		kstat_snapshot_irqs();
584 		__this_cpu_write(snapshot_taken, true);
585 	}
586 }
587 
588 static void stop_counting_irqs(void)
589 {
590 	__this_cpu_write(snapshot_taken, false);
591 }
592 
593 static void print_irq_counts(void)
594 {
595 	unsigned int i, count;
596 	struct irq_counts irq_counts_sorted[NUM_HARDIRQ_REPORT] = {
597 		{-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}
598 	};
599 
600 	if (__this_cpu_read(snapshot_taken)) {
601 		for_each_active_irq(i) {
602 			count = kstat_get_irq_since_snapshot(i);
603 			tabulate_irq_count(irq_counts_sorted, i, count, NUM_HARDIRQ_REPORT);
604 		}
605 
606 		/*
607 		 * Outputting the "watchdog" prefix on every line is redundant and not
608 		 * concise, and the original alarm information is sufficient for
609 		 * positioning in logs, hence here printk() is used instead of pr_crit().
610 		 */
611 		printk(KERN_CRIT "CPU#%d Detect HardIRQ Time exceeds %d%%. Most frequent HardIRQs:\n",
612 		       smp_processor_id(), HARDIRQ_PERCENT_THRESH);
613 
614 		for (i = 0; i < NUM_HARDIRQ_REPORT; i++) {
615 			if (irq_counts_sorted[i].irq == -1)
616 				break;
617 
618 			printk(KERN_CRIT "\t#%u: %-10u\tirq#%d\n",
619 			       i + 1, irq_counts_sorted[i].counts,
620 			       irq_counts_sorted[i].irq);
621 		}
622 
623 		/*
624 		 * If the hardirq time is less than HARDIRQ_PERCENT_THRESH% in the last
625 		 * sample_period, then we suspect the interrupt storm might be subsiding.
626 		 */
627 		if (!need_counting_irqs())
628 			stop_counting_irqs();
629 	}
630 }
631 
632 static void report_cpu_status(void)
633 {
634 	print_cpustat();
635 	print_irq_counts();
636 }
637 #else
638 static inline void update_cpustat(void) { }
639 static inline void report_cpu_status(void) { }
640 static inline bool need_counting_irqs(void) { return false; }
641 static inline void start_counting_irqs(void) { }
642 static inline void stop_counting_irqs(void) { }
643 #endif
644 
645 /*
646  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
647  * lockups can have false positives under extreme conditions. So we generally
648  * want a higher threshold for soft lockups than for hard lockups. So we couple
649  * the thresholds with a factor: we make the soft threshold twice the amount of
650  * time the hard threshold is.
651  */
652 static int get_softlockup_thresh(void)
653 {
654 	return watchdog_thresh * 2;
655 }
656 
657 /*
658  * Returns seconds, approximately.  We don't need nanosecond
659  * resolution, and we don't need to waste time with a big divide when
660  * 2^30ns == 1.074s.
661  */
662 static unsigned long get_timestamp(void)
663 {
664 	return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
665 }
666 
667 static void set_sample_period(void)
668 {
669 	/*
670 	 * convert watchdog_thresh from seconds to ns
671 	 * the divide by 5 is to give hrtimer several chances (two
672 	 * or three with the current relation between the soft
673 	 * and hard thresholds) to increment before the
674 	 * hardlockup detector generates a warning
675 	 */
676 	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / NUM_SAMPLE_PERIODS);
677 	watchdog_update_hrtimer_threshold(sample_period);
678 }
679 
680 static void update_report_ts(void)
681 {
682 	__this_cpu_write(watchdog_report_ts, get_timestamp());
683 }
684 
685 /* Commands for resetting the watchdog */
686 static void update_touch_ts(void)
687 {
688 	__this_cpu_write(watchdog_touch_ts, get_timestamp());
689 	update_report_ts();
690 }
691 
692 /**
693  * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
694  *
695  * Call when the scheduler may have stalled for legitimate reasons
696  * preventing the watchdog task from executing - e.g. the scheduler
697  * entering idle state.  This should only be used for scheduler events.
698  * Use touch_softlockup_watchdog() for everything else.
699  */
700 notrace void touch_softlockup_watchdog_sched(void)
701 {
702 	/*
703 	 * Preemption can be enabled.  It doesn't matter which CPU's watchdog
704 	 * report period gets restarted here, so use the raw_ operation.
705 	 */
706 	raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
707 }
708 
709 notrace void touch_softlockup_watchdog(void)
710 {
711 	touch_softlockup_watchdog_sched();
712 	wq_watchdog_touch(raw_smp_processor_id());
713 }
714 EXPORT_SYMBOL(touch_softlockup_watchdog);
715 
716 void touch_all_softlockup_watchdogs(void)
717 {
718 	int cpu;
719 
720 	/*
721 	 * watchdog_mutex cannpt be taken here, as this might be called
722 	 * from (soft)interrupt context, so the access to
723 	 * watchdog_allowed_cpumask might race with a concurrent update.
724 	 *
725 	 * The watchdog time stamp can race against a concurrent real
726 	 * update as well, the only side effect might be a cycle delay for
727 	 * the softlockup check.
728 	 */
729 	for_each_cpu(cpu, &watchdog_allowed_mask) {
730 		per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
731 		wq_watchdog_touch(cpu);
732 	}
733 }
734 
735 void touch_softlockup_watchdog_sync(void)
736 {
737 	__this_cpu_write(softlockup_touch_sync, true);
738 	__this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
739 }
740 
741 static int is_softlockup(unsigned long touch_ts,
742 			 unsigned long period_ts,
743 			 unsigned long now)
744 {
745 	if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
746 		/*
747 		 * If period_ts has not been updated during a sample_period, then
748 		 * in the subsequent few sample_periods, period_ts might also not
749 		 * be updated, which could indicate a potential softlockup. In
750 		 * this case, if we suspect the cause of the potential softlockup
751 		 * might be interrupt storm, then we need to count the interrupts
752 		 * to find which interrupt is storming.
753 		 */
754 		if (time_after_eq(now, period_ts + get_softlockup_thresh() / NUM_SAMPLE_PERIODS) &&
755 		    need_counting_irqs())
756 			start_counting_irqs();
757 
758 		/*
759 		 * A poorly behaving BPF scheduler can live-lock the system into
760 		 * soft lockups. Tell sched_ext to try ejecting the BPF
761 		 * scheduler when close to a soft lockup.
762 		 */
763 		if (time_after_eq(now, period_ts + get_softlockup_thresh() * 3 / 4))
764 			scx_softlockup(now - touch_ts);
765 
766 		/* Warn about unreasonable delays. */
767 		if (time_after(now, period_ts + get_softlockup_thresh()))
768 			return now - touch_ts;
769 	}
770 	return 0;
771 }
772 
773 /* watchdog detector functions */
774 static DEFINE_PER_CPU(struct completion, softlockup_completion);
775 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
776 
777 /*
778  * The watchdog feed function - touches the timestamp.
779  *
780  * It only runs once every sample_period seconds (4 seconds by
781  * default) to reset the softlockup timestamp. If this gets delayed
782  * for more than 2*watchdog_thresh seconds then the debug-printout
783  * triggers in watchdog_timer_fn().
784  */
785 static int softlockup_fn(void *data)
786 {
787 	update_touch_ts();
788 	stop_counting_irqs();
789 	complete(this_cpu_ptr(&softlockup_completion));
790 
791 	return 0;
792 }
793 
794 /* watchdog kicker functions */
795 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
796 {
797 	unsigned long touch_ts, period_ts, now;
798 	struct pt_regs *regs = get_irq_regs();
799 	int softlockup_all_cpu_backtrace;
800 	int duration, thresh_count;
801 	unsigned long flags;
802 
803 	if (!watchdog_enabled)
804 		return HRTIMER_NORESTART;
805 
806 	/*
807 	 * pass the buddy check if a panic is in process
808 	 */
809 	if (panic_in_progress())
810 		return HRTIMER_NORESTART;
811 
812 	softlockup_all_cpu_backtrace = (softlockup_si_mask & SYS_INFO_ALL_BT) ?
813 					1 : sysctl_softlockup_all_cpu_backtrace;
814 
815 	watchdog_hardlockup_kick();
816 
817 	/* kick the softlockup detector */
818 	if (completion_done(this_cpu_ptr(&softlockup_completion))) {
819 		reinit_completion(this_cpu_ptr(&softlockup_completion));
820 		stop_one_cpu_nowait(smp_processor_id(),
821 				softlockup_fn, NULL,
822 				this_cpu_ptr(&softlockup_stop_work));
823 	}
824 
825 	/* .. and repeat */
826 	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
827 
828 	/*
829 	 * Read the current timestamp first. It might become invalid anytime
830 	 * when a virtual machine is stopped by the host or when the watchog
831 	 * is touched from NMI.
832 	 */
833 	now = get_timestamp();
834 	/*
835 	 * If a virtual machine is stopped by the host it can look to
836 	 * the watchdog like a soft lockup. This function touches the watchdog.
837 	 */
838 	kvm_check_and_clear_guest_paused();
839 	/*
840 	 * The stored timestamp is comparable with @now only when not touched.
841 	 * It might get touched anytime from NMI. Make sure that is_softlockup()
842 	 * uses the same (valid) value.
843 	 */
844 	period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
845 
846 	update_cpustat();
847 
848 	/* Reset the interval when touched by known problematic code. */
849 	if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
850 		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
851 			/*
852 			 * If the time stamp was touched atomically
853 			 * make sure the scheduler tick is up to date.
854 			 */
855 			__this_cpu_write(softlockup_touch_sync, false);
856 			sched_clock_tick();
857 		}
858 
859 		update_report_ts();
860 		return HRTIMER_RESTART;
861 	}
862 
863 	/* Check for a softlockup. */
864 	touch_ts = __this_cpu_read(watchdog_touch_ts);
865 	duration = is_softlockup(touch_ts, period_ts, now);
866 	if (unlikely(duration)) {
867 #ifdef CONFIG_SYSFS
868 		++softlockup_count;
869 #endif
870 
871 		/*
872 		 * Prevent multiple soft-lockup reports if one cpu is already
873 		 * engaged in dumping all cpu back traces.
874 		 */
875 		if (softlockup_all_cpu_backtrace) {
876 			if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
877 				return HRTIMER_RESTART;
878 		}
879 
880 		/* Start period for the next softlockup warning. */
881 		update_report_ts();
882 
883 		printk_cpu_sync_get_irqsave(flags);
884 		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
885 			smp_processor_id(), duration,
886 			current->comm, task_pid_nr(current));
887 		report_cpu_status();
888 		print_modules();
889 		print_irqtrace_events(current);
890 		if (regs)
891 			show_regs(regs);
892 		else
893 			dump_stack();
894 		printk_cpu_sync_put_irqrestore(flags);
895 
896 		if (softlockup_all_cpu_backtrace) {
897 			trigger_allbutcpu_cpu_backtrace(smp_processor_id());
898 			if (!softlockup_panic)
899 				clear_bit_unlock(0, &soft_lockup_nmi_warn);
900 		}
901 
902 		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
903 		sys_info(softlockup_si_mask & ~SYS_INFO_ALL_BT);
904 		thresh_count = duration / get_softlockup_thresh();
905 
906 		if (softlockup_panic && thresh_count >= softlockup_panic)
907 			panic("softlockup: hung tasks");
908 	}
909 
910 	return HRTIMER_RESTART;
911 }
912 
913 static void watchdog_enable(unsigned int cpu)
914 {
915 	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
916 	struct completion *done = this_cpu_ptr(&softlockup_completion);
917 
918 	WARN_ON_ONCE(cpu != smp_processor_id());
919 
920 	init_completion(done);
921 	complete(done);
922 
923 	/*
924 	 * Start the timer first to prevent the hardlockup watchdog triggering
925 	 * before the timer has a chance to fire.
926 	 */
927 	hrtimer_setup(hrtimer, watchdog_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
928 	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
929 		      HRTIMER_MODE_REL_PINNED_HARD);
930 
931 	/* Initialize timestamp */
932 	update_touch_ts();
933 	/* Enable the hardlockup detector */
934 	if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
935 		watchdog_hardlockup_enable(cpu);
936 }
937 
938 static void watchdog_disable(unsigned int cpu)
939 {
940 	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
941 
942 	WARN_ON_ONCE(cpu != smp_processor_id());
943 
944 	/*
945 	 * Disable the hardlockup detector first. That prevents that a large
946 	 * delay between disabling the timer and disabling the hardlockup
947 	 * detector causes a false positive.
948 	 */
949 	watchdog_hardlockup_disable(cpu);
950 	hrtimer_cancel(hrtimer);
951 	wait_for_completion(this_cpu_ptr(&softlockup_completion));
952 }
953 
954 static int softlockup_stop_fn(void *data)
955 {
956 	watchdog_disable(smp_processor_id());
957 	return 0;
958 }
959 
960 static void softlockup_stop_all(void)
961 {
962 	int cpu;
963 
964 	if (!softlockup_initialized)
965 		return;
966 
967 	for_each_cpu(cpu, &watchdog_allowed_mask)
968 		smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
969 
970 	cpumask_clear(&watchdog_allowed_mask);
971 }
972 
973 static int softlockup_start_fn(void *data)
974 {
975 	watchdog_enable(smp_processor_id());
976 	return 0;
977 }
978 
979 static void softlockup_start_all(void)
980 {
981 	int cpu;
982 
983 	cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
984 	for_each_cpu(cpu, &watchdog_allowed_mask)
985 		smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
986 }
987 
988 int lockup_detector_online_cpu(unsigned int cpu)
989 {
990 	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
991 		watchdog_enable(cpu);
992 	return 0;
993 }
994 
995 int lockup_detector_offline_cpu(unsigned int cpu)
996 {
997 	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
998 		watchdog_disable(cpu);
999 	return 0;
1000 }
1001 
1002 static void __lockup_detector_reconfigure(bool thresh_changed)
1003 {
1004 	cpus_read_lock();
1005 	watchdog_hardlockup_stop();
1006 
1007 	softlockup_stop_all();
1008 	/*
1009 	 * To prevent watchdog_timer_fn from using the old interval and
1010 	 * the new watchdog_thresh at the same time, which could lead to
1011 	 * false softlockup reports, it is necessary to update the
1012 	 * watchdog_thresh after the softlockup is completed.
1013 	 */
1014 	if (thresh_changed)
1015 		watchdog_thresh = READ_ONCE(watchdog_thresh_next);
1016 	set_sample_period();
1017 	lockup_detector_update_enable();
1018 	if (watchdog_enabled && watchdog_thresh)
1019 		softlockup_start_all();
1020 
1021 	watchdog_hardlockup_start();
1022 	cpus_read_unlock();
1023 }
1024 
1025 void lockup_detector_reconfigure(void)
1026 {
1027 	mutex_lock(&watchdog_mutex);
1028 	__lockup_detector_reconfigure(false);
1029 	mutex_unlock(&watchdog_mutex);
1030 }
1031 
1032 /*
1033  * Create the watchdog infrastructure and configure the detector(s).
1034  */
1035 static __init void lockup_detector_setup(void)
1036 {
1037 	/*
1038 	 * If sysctl is off and watchdog got disabled on the command line,
1039 	 * nothing to do here.
1040 	 */
1041 	lockup_detector_update_enable();
1042 
1043 	if (!IS_ENABLED(CONFIG_SYSCTL) &&
1044 	    !(watchdog_enabled && watchdog_thresh))
1045 		return;
1046 
1047 	mutex_lock(&watchdog_mutex);
1048 	__lockup_detector_reconfigure(false);
1049 	softlockup_initialized = true;
1050 	mutex_unlock(&watchdog_mutex);
1051 }
1052 
1053 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
1054 static void __lockup_detector_reconfigure(bool thresh_changed)
1055 {
1056 	cpus_read_lock();
1057 	watchdog_hardlockup_stop();
1058 	if (thresh_changed)
1059 		watchdog_thresh = READ_ONCE(watchdog_thresh_next);
1060 	lockup_detector_update_enable();
1061 	watchdog_hardlockup_start();
1062 	cpus_read_unlock();
1063 }
1064 void lockup_detector_reconfigure(void)
1065 {
1066 	__lockup_detector_reconfigure(false);
1067 }
1068 static inline void lockup_detector_setup(void)
1069 {
1070 	__lockup_detector_reconfigure(false);
1071 }
1072 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
1073 
1074 /**
1075  * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
1076  *
1077  * Special interface for parisc. It prevents lockup detector warnings from
1078  * the default pm_poweroff() function which busy loops forever.
1079  */
1080 void lockup_detector_soft_poweroff(void)
1081 {
1082 	watchdog_enabled = 0;
1083 }
1084 
1085 #ifdef CONFIG_SYSCTL
1086 
1087 /* Propagate any changes to the watchdog infrastructure */
1088 static void proc_watchdog_update(bool thresh_changed)
1089 {
1090 	/* Remove impossible cpus to keep sysctl output clean. */
1091 	cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
1092 	__lockup_detector_reconfigure(thresh_changed);
1093 }
1094 
1095 /*
1096  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
1097  *
1098  * caller             | table->data points to            | 'which'
1099  * -------------------|----------------------------------|-------------------------------
1100  * proc_watchdog      | watchdog_user_enabled            | WATCHDOG_HARDLOCKUP_ENABLED |
1101  *                    |                                  | WATCHDOG_SOFTOCKUP_ENABLED
1102  * -------------------|----------------------------------|-------------------------------
1103  * proc_nmi_watchdog  | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
1104  * -------------------|----------------------------------|-------------------------------
1105  * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
1106  */
1107 static int proc_watchdog_common(int which, const struct ctl_table *table, int write,
1108 				void *buffer, size_t *lenp, loff_t *ppos)
1109 {
1110 	int err, old, *param = table->data;
1111 
1112 	mutex_lock(&watchdog_mutex);
1113 
1114 	old = *param;
1115 	if (!write) {
1116 		/*
1117 		 * On read synchronize the userspace interface. This is a
1118 		 * racy snapshot.
1119 		 */
1120 		*param = (watchdog_enabled & which) != 0;
1121 		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1122 		*param = old;
1123 	} else {
1124 		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1125 		if (!err && old != READ_ONCE(*param))
1126 			proc_watchdog_update(false);
1127 	}
1128 	mutex_unlock(&watchdog_mutex);
1129 	return err;
1130 }
1131 
1132 /*
1133  * /proc/sys/kernel/watchdog
1134  */
1135 static int proc_watchdog(const struct ctl_table *table, int write,
1136 			 void *buffer, size_t *lenp, loff_t *ppos)
1137 {
1138 	return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
1139 				    WATCHDOG_SOFTOCKUP_ENABLED,
1140 				    table, write, buffer, lenp, ppos);
1141 }
1142 
1143 /*
1144  * /proc/sys/kernel/nmi_watchdog
1145  */
1146 static int proc_nmi_watchdog(const struct ctl_table *table, int write,
1147 			     void *buffer, size_t *lenp, loff_t *ppos)
1148 {
1149 	if (!watchdog_hardlockup_available && write)
1150 		return -ENOTSUPP;
1151 	return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
1152 				    table, write, buffer, lenp, ppos);
1153 }
1154 
1155 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
1156 /*
1157  * /proc/sys/kernel/soft_watchdog
1158  */
1159 static int proc_soft_watchdog(const struct ctl_table *table, int write,
1160 			      void *buffer, size_t *lenp, loff_t *ppos)
1161 {
1162 	return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
1163 				    table, write, buffer, lenp, ppos);
1164 }
1165 #endif
1166 
1167 /*
1168  * /proc/sys/kernel/watchdog_thresh
1169  */
1170 static int proc_watchdog_thresh(const struct ctl_table *table, int write,
1171 				void *buffer, size_t *lenp, loff_t *ppos)
1172 {
1173 	int err, old;
1174 
1175 	mutex_lock(&watchdog_mutex);
1176 
1177 	watchdog_thresh_next = READ_ONCE(watchdog_thresh);
1178 
1179 	old = watchdog_thresh_next;
1180 	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1181 
1182 	if (!err && write && old != READ_ONCE(watchdog_thresh_next))
1183 		proc_watchdog_update(true);
1184 
1185 	mutex_unlock(&watchdog_mutex);
1186 	return err;
1187 }
1188 
1189 /*
1190  * The cpumask is the mask of possible cpus that the watchdog can run
1191  * on, not the mask of cpus it is actually running on.  This allows the
1192  * user to specify a mask that will include cpus that have not yet
1193  * been brought online, if desired.
1194  */
1195 static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
1196 				 void *buffer, size_t *lenp, loff_t *ppos)
1197 {
1198 	int err;
1199 
1200 	mutex_lock(&watchdog_mutex);
1201 
1202 	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
1203 	if (!err && write)
1204 		proc_watchdog_update(false);
1205 
1206 	mutex_unlock(&watchdog_mutex);
1207 	return err;
1208 }
1209 
1210 static const int sixty = 60;
1211 
1212 static const struct ctl_table watchdog_sysctls[] = {
1213 	{
1214 		.procname       = "watchdog",
1215 		.data		= &watchdog_user_enabled,
1216 		.maxlen		= sizeof(int),
1217 		.mode		= 0644,
1218 		.proc_handler   = proc_watchdog,
1219 		.extra1		= SYSCTL_ZERO,
1220 		.extra2		= SYSCTL_ONE,
1221 	},
1222 	{
1223 		.procname	= "watchdog_thresh",
1224 		.data		= &watchdog_thresh_next,
1225 		.maxlen		= sizeof(int),
1226 		.mode		= 0644,
1227 		.proc_handler	= proc_watchdog_thresh,
1228 		.extra1		= SYSCTL_ZERO,
1229 		.extra2		= (void *)&sixty,
1230 	},
1231 	{
1232 		.procname	= "watchdog_cpumask",
1233 		.data		= &watchdog_cpumask_bits,
1234 		.maxlen		= NR_CPUS,
1235 		.mode		= 0644,
1236 		.proc_handler	= proc_watchdog_cpumask,
1237 	},
1238 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
1239 	{
1240 		.procname       = "soft_watchdog",
1241 		.data		= &watchdog_softlockup_user_enabled,
1242 		.maxlen		= sizeof(int),
1243 		.mode		= 0644,
1244 		.proc_handler   = proc_soft_watchdog,
1245 		.extra1		= SYSCTL_ZERO,
1246 		.extra2		= SYSCTL_ONE,
1247 	},
1248 	{
1249 		.procname	= "softlockup_panic",
1250 		.data		= &softlockup_panic,
1251 		.maxlen		= sizeof(int),
1252 		.mode		= 0644,
1253 		.proc_handler	= proc_dointvec_minmax,
1254 		.extra1		= SYSCTL_ZERO,
1255 		.extra2		= SYSCTL_INT_MAX,
1256 	},
1257 	{
1258 		.procname	= "softlockup_sys_info",
1259 		.data		= &softlockup_si_mask,
1260 		.maxlen         = sizeof(softlockup_si_mask),
1261 		.mode		= 0644,
1262 		.proc_handler	= sysctl_sys_info_handler,
1263 	},
1264 #ifdef CONFIG_SMP
1265 	{
1266 		.procname	= "softlockup_all_cpu_backtrace",
1267 		.data		= &sysctl_softlockup_all_cpu_backtrace,
1268 		.maxlen		= sizeof(int),
1269 		.mode		= 0644,
1270 		.proc_handler	= proc_dointvec_minmax,
1271 		.extra1		= SYSCTL_ZERO,
1272 		.extra2		= SYSCTL_ONE,
1273 	},
1274 #endif /* CONFIG_SMP */
1275 #endif
1276 #ifdef CONFIG_HARDLOCKUP_DETECTOR
1277 	{
1278 		.procname	= "hardlockup_panic",
1279 		.data		= &hardlockup_panic,
1280 		.maxlen		= sizeof(int),
1281 		.mode		= 0644,
1282 		.proc_handler	= proc_dointvec_minmax,
1283 		.extra1		= SYSCTL_ZERO,
1284 		.extra2		= SYSCTL_ONE,
1285 	},
1286 	{
1287 		.procname	= "hardlockup_sys_info",
1288 		.data		= &hardlockup_si_mask,
1289 		.maxlen         = sizeof(hardlockup_si_mask),
1290 		.mode		= 0644,
1291 		.proc_handler	= sysctl_sys_info_handler,
1292 	},
1293 #ifdef CONFIG_SMP
1294 	{
1295 		.procname	= "hardlockup_all_cpu_backtrace",
1296 		.data		= &sysctl_hardlockup_all_cpu_backtrace,
1297 		.maxlen		= sizeof(int),
1298 		.mode		= 0644,
1299 		.proc_handler	= proc_dointvec_minmax,
1300 		.extra1		= SYSCTL_ZERO,
1301 		.extra2		= SYSCTL_ONE,
1302 	},
1303 #endif /* CONFIG_SMP */
1304 #endif
1305 	{
1306 		.procname       = "nmi_watchdog",
1307 		.data		= &watchdog_hardlockup_user_enabled,
1308 		.maxlen		= sizeof(int),
1309 		.mode		= 0644,
1310 		.proc_handler   = proc_nmi_watchdog,
1311 		.extra1		= SYSCTL_ZERO,
1312 		.extra2		= SYSCTL_ONE,
1313 	},
1314 };
1315 
1316 static void __init watchdog_sysctl_init(void)
1317 {
1318 	register_sysctl_init("kernel", watchdog_sysctls);
1319 }
1320 
1321 #else
1322 #define watchdog_sysctl_init() do { } while (0)
1323 #endif /* CONFIG_SYSCTL */
1324 
1325 static void __init lockup_detector_delay_init(struct work_struct *work);
1326 static bool allow_lockup_detector_init_retry __initdata;
1327 
1328 static struct work_struct detector_work __initdata =
1329 		__WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
1330 
1331 static void __init lockup_detector_delay_init(struct work_struct *work)
1332 {
1333 	int ret;
1334 
1335 	ret = watchdog_hardlockup_probe();
1336 	if (ret) {
1337 		if (ret == -ENODEV)
1338 			pr_info("NMI not fully supported\n");
1339 		else
1340 			pr_info("Delayed init of the lockup detector failed: %d\n", ret);
1341 		pr_info("Hard watchdog permanently disabled\n");
1342 		return;
1343 	}
1344 
1345 	allow_lockup_detector_init_retry = false;
1346 
1347 	watchdog_hardlockup_available = true;
1348 	lockup_detector_setup();
1349 }
1350 
1351 /*
1352  * lockup_detector_retry_init - retry init lockup detector if possible.
1353  *
1354  * Retry hardlockup detector init. It is useful when it requires some
1355  * functionality that has to be initialized later on a particular
1356  * platform.
1357  */
1358 void __init lockup_detector_retry_init(void)
1359 {
1360 	/* Must be called before late init calls */
1361 	if (!allow_lockup_detector_init_retry)
1362 		return;
1363 
1364 	schedule_work(&detector_work);
1365 }
1366 
1367 /*
1368  * Ensure that optional delayed hardlockup init is proceed before
1369  * the init code and memory is freed.
1370  */
1371 static int __init lockup_detector_check(void)
1372 {
1373 	/* Prevent any later retry. */
1374 	allow_lockup_detector_init_retry = false;
1375 
1376 	/* Make sure no work is pending. */
1377 	flush_work(&detector_work);
1378 
1379 	watchdog_sysctl_init();
1380 
1381 	return 0;
1382 
1383 }
1384 late_initcall_sync(lockup_detector_check);
1385 
1386 void __init lockup_detector_init(void)
1387 {
1388 	if (tick_nohz_full_enabled())
1389 		pr_info("Disabling watchdog on nohz_full cores by default\n");
1390 
1391 	cpumask_copy(&watchdog_cpumask,
1392 		     housekeeping_cpumask(HK_TYPE_TIMER));
1393 
1394 	if (!watchdog_hardlockup_probe())
1395 		watchdog_hardlockup_available = true;
1396 	else
1397 		allow_lockup_detector_init_retry = true;
1398 
1399 	lockup_detector_setup();
1400 }
1401