xref: /linux/kernel/watchdog.c (revision 7f8998c7aef3ac9c5f3f2943e083dfa6302e90d0)
1 /*
2  * Detect hard and soft lockups on a system
3  *
4  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  * Note: Most of this code is borrowed heavily from the original softlockup
7  * detector, so thanks to Ingo for the initial implementation.
8  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9  * to those contributors as well.
10  */
11 
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
13 
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/lockdep.h>
22 #include <linux/notifier.h>
23 #include <linux/module.h>
24 #include <linux/sysctl.h>
25 #include <linux/smpboot.h>
26 #include <linux/sched/rt.h>
27 
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/perf_event.h>
31 
32 int watchdog_user_enabled = 1;
33 int __read_mostly watchdog_thresh = 10;
34 #ifdef CONFIG_SMP
35 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
36 #else
37 #define sysctl_softlockup_all_cpu_backtrace 0
38 #endif
39 
40 static int __read_mostly watchdog_running;
41 static u64 __read_mostly sample_period;
42 
43 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
44 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
45 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
46 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
47 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
48 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
49 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
50 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
51 #ifdef CONFIG_HARDLOCKUP_DETECTOR
52 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
53 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
54 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
55 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
56 #endif
57 static unsigned long soft_lockup_nmi_warn;
58 
59 /* boot commands */
60 /*
61  * Should we panic when a soft-lockup or hard-lockup occurs:
62  */
63 #ifdef CONFIG_HARDLOCKUP_DETECTOR
64 static int hardlockup_panic =
65 			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
66 
67 static int __init hardlockup_panic_setup(char *str)
68 {
69 	if (!strncmp(str, "panic", 5))
70 		hardlockup_panic = 1;
71 	else if (!strncmp(str, "nopanic", 7))
72 		hardlockup_panic = 0;
73 	else if (!strncmp(str, "0", 1))
74 		watchdog_user_enabled = 0;
75 	return 1;
76 }
77 __setup("nmi_watchdog=", hardlockup_panic_setup);
78 #endif
79 
80 unsigned int __read_mostly softlockup_panic =
81 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
82 
83 static int __init softlockup_panic_setup(char *str)
84 {
85 	softlockup_panic = simple_strtoul(str, NULL, 0);
86 
87 	return 1;
88 }
89 __setup("softlockup_panic=", softlockup_panic_setup);
90 
91 static int __init nowatchdog_setup(char *str)
92 {
93 	watchdog_user_enabled = 0;
94 	return 1;
95 }
96 __setup("nowatchdog", nowatchdog_setup);
97 
98 /* deprecated */
99 static int __init nosoftlockup_setup(char *str)
100 {
101 	watchdog_user_enabled = 0;
102 	return 1;
103 }
104 __setup("nosoftlockup", nosoftlockup_setup);
105 /*  */
106 #ifdef CONFIG_SMP
107 static int __init softlockup_all_cpu_backtrace_setup(char *str)
108 {
109 	sysctl_softlockup_all_cpu_backtrace =
110 		!!simple_strtol(str, NULL, 0);
111 	return 1;
112 }
113 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
114 #endif
115 
116 /*
117  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
118  * lockups can have false positives under extreme conditions. So we generally
119  * want a higher threshold for soft lockups than for hard lockups. So we couple
120  * the thresholds with a factor: we make the soft threshold twice the amount of
121  * time the hard threshold is.
122  */
123 static int get_softlockup_thresh(void)
124 {
125 	return watchdog_thresh * 2;
126 }
127 
128 /*
129  * Returns seconds, approximately.  We don't need nanosecond
130  * resolution, and we don't need to waste time with a big divide when
131  * 2^30ns == 1.074s.
132  */
133 static unsigned long get_timestamp(void)
134 {
135 	return local_clock() >> 30LL;  /* 2^30 ~= 10^9 */
136 }
137 
138 static void set_sample_period(void)
139 {
140 	/*
141 	 * convert watchdog_thresh from seconds to ns
142 	 * the divide by 5 is to give hrtimer several chances (two
143 	 * or three with the current relation between the soft
144 	 * and hard thresholds) to increment before the
145 	 * hardlockup detector generates a warning
146 	 */
147 	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
148 }
149 
150 /* Commands for resetting the watchdog */
151 static void __touch_watchdog(void)
152 {
153 	__this_cpu_write(watchdog_touch_ts, get_timestamp());
154 }
155 
156 void touch_softlockup_watchdog(void)
157 {
158 	/*
159 	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
160 	 * gets zeroed here, so use the raw_ operation.
161 	 */
162 	raw_cpu_write(watchdog_touch_ts, 0);
163 }
164 EXPORT_SYMBOL(touch_softlockup_watchdog);
165 
166 void touch_all_softlockup_watchdogs(void)
167 {
168 	int cpu;
169 
170 	/*
171 	 * this is done lockless
172 	 * do we care if a 0 races with a timestamp?
173 	 * all it means is the softlock check starts one cycle later
174 	 */
175 	for_each_online_cpu(cpu)
176 		per_cpu(watchdog_touch_ts, cpu) = 0;
177 }
178 
179 #ifdef CONFIG_HARDLOCKUP_DETECTOR
180 void touch_nmi_watchdog(void)
181 {
182 	/*
183 	 * Using __raw here because some code paths have
184 	 * preemption enabled.  If preemption is enabled
185 	 * then interrupts should be enabled too, in which
186 	 * case we shouldn't have to worry about the watchdog
187 	 * going off.
188 	 */
189 	__raw_get_cpu_var(watchdog_nmi_touch) = true;
190 	touch_softlockup_watchdog();
191 }
192 EXPORT_SYMBOL(touch_nmi_watchdog);
193 
194 #endif
195 
196 void touch_softlockup_watchdog_sync(void)
197 {
198 	__raw_get_cpu_var(softlockup_touch_sync) = true;
199 	__raw_get_cpu_var(watchdog_touch_ts) = 0;
200 }
201 
202 #ifdef CONFIG_HARDLOCKUP_DETECTOR
203 /* watchdog detector functions */
204 static int is_hardlockup(void)
205 {
206 	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
207 
208 	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
209 		return 1;
210 
211 	__this_cpu_write(hrtimer_interrupts_saved, hrint);
212 	return 0;
213 }
214 #endif
215 
216 static int is_softlockup(unsigned long touch_ts)
217 {
218 	unsigned long now = get_timestamp();
219 
220 	/* Warn about unreasonable delays: */
221 	if (time_after(now, touch_ts + get_softlockup_thresh()))
222 		return now - touch_ts;
223 
224 	return 0;
225 }
226 
227 #ifdef CONFIG_HARDLOCKUP_DETECTOR
228 
229 static struct perf_event_attr wd_hw_attr = {
230 	.type		= PERF_TYPE_HARDWARE,
231 	.config		= PERF_COUNT_HW_CPU_CYCLES,
232 	.size		= sizeof(struct perf_event_attr),
233 	.pinned		= 1,
234 	.disabled	= 1,
235 };
236 
237 /* Callback function for perf event subsystem */
238 static void watchdog_overflow_callback(struct perf_event *event,
239 		 struct perf_sample_data *data,
240 		 struct pt_regs *regs)
241 {
242 	/* Ensure the watchdog never gets throttled */
243 	event->hw.interrupts = 0;
244 
245 	if (__this_cpu_read(watchdog_nmi_touch) == true) {
246 		__this_cpu_write(watchdog_nmi_touch, false);
247 		return;
248 	}
249 
250 	/* check for a hardlockup
251 	 * This is done by making sure our timer interrupt
252 	 * is incrementing.  The timer interrupt should have
253 	 * fired multiple times before we overflow'd.  If it hasn't
254 	 * then this is a good indication the cpu is stuck
255 	 */
256 	if (is_hardlockup()) {
257 		int this_cpu = smp_processor_id();
258 
259 		/* only print hardlockups once */
260 		if (__this_cpu_read(hard_watchdog_warn) == true)
261 			return;
262 
263 		if (hardlockup_panic)
264 			panic("Watchdog detected hard LOCKUP on cpu %d",
265 			      this_cpu);
266 		else
267 			WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
268 			     this_cpu);
269 
270 		__this_cpu_write(hard_watchdog_warn, true);
271 		return;
272 	}
273 
274 	__this_cpu_write(hard_watchdog_warn, false);
275 	return;
276 }
277 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
278 
279 static void watchdog_interrupt_count(void)
280 {
281 	__this_cpu_inc(hrtimer_interrupts);
282 }
283 
284 static int watchdog_nmi_enable(unsigned int cpu);
285 static void watchdog_nmi_disable(unsigned int cpu);
286 
287 /* watchdog kicker functions */
288 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
289 {
290 	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
291 	struct pt_regs *regs = get_irq_regs();
292 	int duration;
293 	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
294 
295 	/* kick the hardlockup detector */
296 	watchdog_interrupt_count();
297 
298 	/* kick the softlockup detector */
299 	wake_up_process(__this_cpu_read(softlockup_watchdog));
300 
301 	/* .. and repeat */
302 	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
303 
304 	if (touch_ts == 0) {
305 		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
306 			/*
307 			 * If the time stamp was touched atomically
308 			 * make sure the scheduler tick is up to date.
309 			 */
310 			__this_cpu_write(softlockup_touch_sync, false);
311 			sched_clock_tick();
312 		}
313 
314 		/* Clear the guest paused flag on watchdog reset */
315 		kvm_check_and_clear_guest_paused();
316 		__touch_watchdog();
317 		return HRTIMER_RESTART;
318 	}
319 
320 	/* check for a softlockup
321 	 * This is done by making sure a high priority task is
322 	 * being scheduled.  The task touches the watchdog to
323 	 * indicate it is getting cpu time.  If it hasn't then
324 	 * this is a good indication some task is hogging the cpu
325 	 */
326 	duration = is_softlockup(touch_ts);
327 	if (unlikely(duration)) {
328 		/*
329 		 * If a virtual machine is stopped by the host it can look to
330 		 * the watchdog like a soft lockup, check to see if the host
331 		 * stopped the vm before we issue the warning
332 		 */
333 		if (kvm_check_and_clear_guest_paused())
334 			return HRTIMER_RESTART;
335 
336 		/* only warn once */
337 		if (__this_cpu_read(soft_watchdog_warn) == true) {
338 			/*
339 			 * When multiple processes are causing softlockups the
340 			 * softlockup detector only warns on the first one
341 			 * because the code relies on a full quiet cycle to
342 			 * re-arm.  The second process prevents the quiet cycle
343 			 * and never gets reported.  Use task pointers to detect
344 			 * this.
345 			 */
346 			if (__this_cpu_read(softlockup_task_ptr_saved) !=
347 			    current) {
348 				__this_cpu_write(soft_watchdog_warn, false);
349 				__touch_watchdog();
350 			}
351 			return HRTIMER_RESTART;
352 		}
353 
354 		if (softlockup_all_cpu_backtrace) {
355 			/* Prevent multiple soft-lockup reports if one cpu is already
356 			 * engaged in dumping cpu back traces
357 			 */
358 			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
359 				/* Someone else will report us. Let's give up */
360 				__this_cpu_write(soft_watchdog_warn, true);
361 				return HRTIMER_RESTART;
362 			}
363 		}
364 
365 		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
366 			smp_processor_id(), duration,
367 			current->comm, task_pid_nr(current));
368 		__this_cpu_write(softlockup_task_ptr_saved, current);
369 		print_modules();
370 		print_irqtrace_events(current);
371 		if (regs)
372 			show_regs(regs);
373 		else
374 			dump_stack();
375 
376 		if (softlockup_all_cpu_backtrace) {
377 			/* Avoid generating two back traces for current
378 			 * given that one is already made above
379 			 */
380 			trigger_allbutself_cpu_backtrace();
381 
382 			clear_bit(0, &soft_lockup_nmi_warn);
383 			/* Barrier to sync with other cpus */
384 			smp_mb__after_atomic();
385 		}
386 
387 		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
388 		if (softlockup_panic)
389 			panic("softlockup: hung tasks");
390 		__this_cpu_write(soft_watchdog_warn, true);
391 	} else
392 		__this_cpu_write(soft_watchdog_warn, false);
393 
394 	return HRTIMER_RESTART;
395 }
396 
397 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
398 {
399 	struct sched_param param = { .sched_priority = prio };
400 
401 	sched_setscheduler(current, policy, &param);
402 }
403 
404 static void watchdog_enable(unsigned int cpu)
405 {
406 	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
407 
408 	/* kick off the timer for the hardlockup detector */
409 	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
410 	hrtimer->function = watchdog_timer_fn;
411 
412 	/* Enable the perf event */
413 	watchdog_nmi_enable(cpu);
414 
415 	/* done here because hrtimer_start can only pin to smp_processor_id() */
416 	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
417 		      HRTIMER_MODE_REL_PINNED);
418 
419 	/* initialize timestamp */
420 	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
421 	__touch_watchdog();
422 }
423 
424 static void watchdog_disable(unsigned int cpu)
425 {
426 	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
427 
428 	watchdog_set_prio(SCHED_NORMAL, 0);
429 	hrtimer_cancel(hrtimer);
430 	/* disable the perf event */
431 	watchdog_nmi_disable(cpu);
432 }
433 
434 static void watchdog_cleanup(unsigned int cpu, bool online)
435 {
436 	watchdog_disable(cpu);
437 }
438 
439 static int watchdog_should_run(unsigned int cpu)
440 {
441 	return __this_cpu_read(hrtimer_interrupts) !=
442 		__this_cpu_read(soft_lockup_hrtimer_cnt);
443 }
444 
445 /*
446  * The watchdog thread function - touches the timestamp.
447  *
448  * It only runs once every sample_period seconds (4 seconds by
449  * default) to reset the softlockup timestamp. If this gets delayed
450  * for more than 2*watchdog_thresh seconds then the debug-printout
451  * triggers in watchdog_timer_fn().
452  */
453 static void watchdog(unsigned int cpu)
454 {
455 	__this_cpu_write(soft_lockup_hrtimer_cnt,
456 			 __this_cpu_read(hrtimer_interrupts));
457 	__touch_watchdog();
458 }
459 
460 #ifdef CONFIG_HARDLOCKUP_DETECTOR
461 /*
462  * People like the simple clean cpu node info on boot.
463  * Reduce the watchdog noise by only printing messages
464  * that are different from what cpu0 displayed.
465  */
466 static unsigned long cpu0_err;
467 
468 static int watchdog_nmi_enable(unsigned int cpu)
469 {
470 	struct perf_event_attr *wd_attr;
471 	struct perf_event *event = per_cpu(watchdog_ev, cpu);
472 
473 	/* is it already setup and enabled? */
474 	if (event && event->state > PERF_EVENT_STATE_OFF)
475 		goto out;
476 
477 	/* it is setup but not enabled */
478 	if (event != NULL)
479 		goto out_enable;
480 
481 	wd_attr = &wd_hw_attr;
482 	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
483 
484 	/* Try to register using hardware perf events */
485 	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
486 
487 	/* save cpu0 error for future comparision */
488 	if (cpu == 0 && IS_ERR(event))
489 		cpu0_err = PTR_ERR(event);
490 
491 	if (!IS_ERR(event)) {
492 		/* only print for cpu0 or different than cpu0 */
493 		if (cpu == 0 || cpu0_err)
494 			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
495 		goto out_save;
496 	}
497 
498 	/* skip displaying the same error again */
499 	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
500 		return PTR_ERR(event);
501 
502 	/* vary the KERN level based on the returned errno */
503 	if (PTR_ERR(event) == -EOPNOTSUPP)
504 		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
505 	else if (PTR_ERR(event) == -ENOENT)
506 		pr_warn("disabled (cpu%i): hardware events not enabled\n",
507 			 cpu);
508 	else
509 		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
510 			cpu, PTR_ERR(event));
511 	return PTR_ERR(event);
512 
513 	/* success path */
514 out_save:
515 	per_cpu(watchdog_ev, cpu) = event;
516 out_enable:
517 	perf_event_enable(per_cpu(watchdog_ev, cpu));
518 out:
519 	return 0;
520 }
521 
522 static void watchdog_nmi_disable(unsigned int cpu)
523 {
524 	struct perf_event *event = per_cpu(watchdog_ev, cpu);
525 
526 	if (event) {
527 		perf_event_disable(event);
528 		per_cpu(watchdog_ev, cpu) = NULL;
529 
530 		/* should be in cleanup, but blocks oprofile */
531 		perf_event_release_kernel(event);
532 	}
533 	return;
534 }
535 #else
536 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
537 static void watchdog_nmi_disable(unsigned int cpu) { return; }
538 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
539 
540 static struct smp_hotplug_thread watchdog_threads = {
541 	.store			= &softlockup_watchdog,
542 	.thread_should_run	= watchdog_should_run,
543 	.thread_fn		= watchdog,
544 	.thread_comm		= "watchdog/%u",
545 	.setup			= watchdog_enable,
546 	.cleanup		= watchdog_cleanup,
547 	.park			= watchdog_disable,
548 	.unpark			= watchdog_enable,
549 };
550 
551 static void restart_watchdog_hrtimer(void *info)
552 {
553 	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
554 	int ret;
555 
556 	/*
557 	 * No need to cancel and restart hrtimer if it is currently executing
558 	 * because it will reprogram itself with the new period now.
559 	 * We should never see it unqueued here because we are running per-cpu
560 	 * with interrupts disabled.
561 	 */
562 	ret = hrtimer_try_to_cancel(hrtimer);
563 	if (ret == 1)
564 		hrtimer_start(hrtimer, ns_to_ktime(sample_period),
565 				HRTIMER_MODE_REL_PINNED);
566 }
567 
568 static void update_timers(int cpu)
569 {
570 	/*
571 	 * Make sure that perf event counter will adopt to a new
572 	 * sampling period. Updating the sampling period directly would
573 	 * be much nicer but we do not have an API for that now so
574 	 * let's use a big hammer.
575 	 * Hrtimer will adopt the new period on the next tick but this
576 	 * might be late already so we have to restart the timer as well.
577 	 */
578 	watchdog_nmi_disable(cpu);
579 	smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
580 	watchdog_nmi_enable(cpu);
581 }
582 
583 static void update_timers_all_cpus(void)
584 {
585 	int cpu;
586 
587 	get_online_cpus();
588 	for_each_online_cpu(cpu)
589 		update_timers(cpu);
590 	put_online_cpus();
591 }
592 
593 static int watchdog_enable_all_cpus(bool sample_period_changed)
594 {
595 	int err = 0;
596 
597 	if (!watchdog_running) {
598 		err = smpboot_register_percpu_thread(&watchdog_threads);
599 		if (err)
600 			pr_err("Failed to create watchdog threads, disabled\n");
601 		else
602 			watchdog_running = 1;
603 	} else if (sample_period_changed) {
604 		update_timers_all_cpus();
605 	}
606 
607 	return err;
608 }
609 
610 /* prepare/enable/disable routines */
611 /* sysctl functions */
612 #ifdef CONFIG_SYSCTL
613 static void watchdog_disable_all_cpus(void)
614 {
615 	if (watchdog_running) {
616 		watchdog_running = 0;
617 		smpboot_unregister_percpu_thread(&watchdog_threads);
618 	}
619 }
620 
621 /*
622  * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
623  */
624 
625 int proc_dowatchdog(struct ctl_table *table, int write,
626 		    void __user *buffer, size_t *lenp, loff_t *ppos)
627 {
628 	int err, old_thresh, old_enabled;
629 	static DEFINE_MUTEX(watchdog_proc_mutex);
630 
631 	mutex_lock(&watchdog_proc_mutex);
632 	old_thresh = ACCESS_ONCE(watchdog_thresh);
633 	old_enabled = ACCESS_ONCE(watchdog_user_enabled);
634 
635 	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
636 	if (err || !write)
637 		goto out;
638 
639 	set_sample_period();
640 	/*
641 	 * Watchdog threads shouldn't be enabled if they are
642 	 * disabled. The 'watchdog_running' variable check in
643 	 * watchdog_*_all_cpus() function takes care of this.
644 	 */
645 	if (watchdog_user_enabled && watchdog_thresh)
646 		err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
647 	else
648 		watchdog_disable_all_cpus();
649 
650 	/* Restore old values on failure */
651 	if (err) {
652 		watchdog_thresh = old_thresh;
653 		watchdog_user_enabled = old_enabled;
654 	}
655 out:
656 	mutex_unlock(&watchdog_proc_mutex);
657 	return err;
658 }
659 #endif /* CONFIG_SYSCTL */
660 
661 void __init lockup_detector_init(void)
662 {
663 	set_sample_period();
664 
665 	if (watchdog_user_enabled)
666 		watchdog_enable_all_cpus(false);
667 }
668