1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e5553a6dSDavid S. Miller /* Pseudo NMI support on sparc64 systems.
3e5553a6dSDavid S. Miller *
4e5553a6dSDavid S. Miller * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
5e5553a6dSDavid S. Miller *
6e5553a6dSDavid S. Miller * The NMI watchdog support and infrastructure is based almost
7e5553a6dSDavid S. Miller * entirely upon the x86 NMI support code.
8e5553a6dSDavid S. Miller */
9e5553a6dSDavid S. Miller #include <linux/kernel.h>
10e5553a6dSDavid S. Miller #include <linux/param.h>
11e5553a6dSDavid S. Miller #include <linux/init.h>
12e5553a6dSDavid S. Miller #include <linux/percpu.h>
13e5553a6dSDavid S. Miller #include <linux/nmi.h>
14066bcacaSPaul Gortmaker #include <linux/export.h>
15e5553a6dSDavid S. Miller #include <linux/kprobes.h>
16e5553a6dSDavid S. Miller #include <linux/kernel_stat.h>
17ffaba674SDavid S. Miller #include <linux/reboot.h>
18e5553a6dSDavid S. Miller #include <linux/slab.h>
19e5553a6dSDavid S. Miller #include <linux/kdebug.h>
20e5553a6dSDavid S. Miller #include <linux/delay.h>
21e5553a6dSDavid S. Miller #include <linux/smp.h>
22e5553a6dSDavid S. Miller
23cdd6c482SIngo Molnar #include <asm/perf_event.h>
24e5553a6dSDavid S. Miller #include <asm/ptrace.h>
25e5553a6dSDavid S. Miller #include <asm/pcr.h>
26e5553a6dSDavid S. Miller
27ec687886SDavid S. Miller #include "kstack.h"
28ec687886SDavid S. Miller
29e5553a6dSDavid S. Miller /* We don't have a real NMI on sparc64, but we can fake one
30e5553a6dSDavid S. Miller * up using profiling counter overflow interrupts and interrupt
31e5553a6dSDavid S. Miller * levels.
32e5553a6dSDavid S. Miller *
33e5553a6dSDavid S. Miller * The profile overflow interrupts at level 15, so we use
34e5553a6dSDavid S. Miller * level 14 as our IRQ off level.
35e5553a6dSDavid S. Miller */
36e5553a6dSDavid S. Miller
37e5553a6dSDavid S. Miller static int panic_on_timeout;
38e5553a6dSDavid S. Miller
39a8f22264SDavid S. Miller /* nmi_active:
40a8f22264SDavid S. Miller * >0: the NMI watchdog is active, but can be disabled
41a8f22264SDavid S. Miller * <0: the NMI watchdog has not been set up, and cannot be enabled
42a8f22264SDavid S. Miller * 0: the NMI watchdog is disabled, but can be enabled
43a8f22264SDavid S. Miller */
44a8f22264SDavid S. Miller atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
45a8f22264SDavid S. Miller EXPORT_SYMBOL(nmi_active);
467a5c8b57SBabu Moger static int nmi_init_done;
47e5553a6dSDavid S. Miller static unsigned int nmi_hz = HZ;
48a8f22264SDavid S. Miller static DEFINE_PER_CPU(short, wd_enabled);
49a8f22264SDavid S. Miller static int endflag __initdata;
50e5553a6dSDavid S. Miller
51e5553a6dSDavid S. Miller static DEFINE_PER_CPU(unsigned int, last_irq_sum);
52494f6a9eSChristoph Lameter static DEFINE_PER_CPU(long, alert_counter);
53e5553a6dSDavid S. Miller static DEFINE_PER_CPU(int, nmi_touch);
54e5553a6dSDavid S. Miller
arch_touch_nmi_watchdog(void)55f2e0cff8SNicholas Piggin void arch_touch_nmi_watchdog(void)
56e5553a6dSDavid S. Miller {
57d89be56bSDavid S. Miller if (atomic_read(&nmi_active)) {
58e5553a6dSDavid S. Miller int cpu;
59e5553a6dSDavid S. Miller
60e5553a6dSDavid S. Miller for_each_present_cpu(cpu) {
61e5553a6dSDavid S. Miller if (per_cpu(nmi_touch, cpu) != 1)
62e5553a6dSDavid S. Miller per_cpu(nmi_touch, cpu) = 1;
63e5553a6dSDavid S. Miller }
64e5553a6dSDavid S. Miller }
65e5553a6dSDavid S. Miller }
66f2e0cff8SNicholas Piggin EXPORT_SYMBOL(arch_touch_nmi_watchdog);
67e5553a6dSDavid S. Miller
watchdog_hardlockup_probe(void)686426e8d1SDouglas Anderson int __init watchdog_hardlockup_probe(void)
696426e8d1SDouglas Anderson {
706426e8d1SDouglas Anderson return 0;
716426e8d1SDouglas Anderson }
726426e8d1SDouglas Anderson
die_nmi(const char * str,struct pt_regs * regs,int do_panic)73e5553a6dSDavid S. Miller static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
74e5553a6dSDavid S. Miller {
7516ce8a30SDavid S. Miller int this_cpu = smp_processor_id();
7616ce8a30SDavid S. Miller
77e5553a6dSDavid S. Miller if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
78e5553a6dSDavid S. Miller pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
79e5553a6dSDavid S. Miller return;
80e5553a6dSDavid S. Miller
81e5553a6dSDavid S. Miller if (do_panic || panic_on_oops)
8216ce8a30SDavid S. Miller panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
8316ce8a30SDavid S. Miller else
8416ce8a30SDavid S. Miller WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
85e5553a6dSDavid S. Miller }
86e5553a6dSDavid S. Miller
perfctr_irq(int irq,struct pt_regs * regs)87e5553a6dSDavid S. Miller notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
88e5553a6dSDavid S. Miller {
89e5553a6dSDavid S. Miller unsigned int sum, touched = 0;
90ec687886SDavid S. Miller void *orig_sp;
91e5553a6dSDavid S. Miller
92e5553a6dSDavid S. Miller clear_softint(1 << irq);
93e5553a6dSDavid S. Miller
94e5553a6dSDavid S. Miller local_cpu_data().__nmi_count++;
95e5553a6dSDavid S. Miller
962d0740c4SDavid S. Miller nmi_enter();
972d0740c4SDavid S. Miller
98ec687886SDavid S. Miller orig_sp = set_hardirq_stack();
99ec687886SDavid S. Miller
100e5553a6dSDavid S. Miller if (notify_die(DIE_NMI, "nmi", regs, 0,
101e5553a6dSDavid S. Miller pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
102e5553a6dSDavid S. Miller touched = 1;
1038183e2b3SDavid S. Miller else
104ce4a925cSDavid S. Miller pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
105e5553a6dSDavid S. Miller
106daecbf58SDavid S. Miller sum = local_cpu_data().irq0_irqs;
107494fc421SChristoph Lameter if (__this_cpu_read(nmi_touch)) {
108494fc421SChristoph Lameter __this_cpu_write(nmi_touch, 0);
109e5553a6dSDavid S. Miller touched = 1;
110e5553a6dSDavid S. Miller }
111494fc421SChristoph Lameter if (!touched && __this_cpu_read(last_irq_sum) == sum) {
112dd17c8f7SRusty Russell __this_cpu_inc(alert_counter);
113dd17c8f7SRusty Russell if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
114e5553a6dSDavid S. Miller die_nmi("BUG: NMI Watchdog detected LOCKUP",
115e5553a6dSDavid S. Miller regs, panic_on_timeout);
116e5553a6dSDavid S. Miller } else {
117494fc421SChristoph Lameter __this_cpu_write(last_irq_sum, sum);
118dd17c8f7SRusty Russell __this_cpu_write(alert_counter, 0);
119e5553a6dSDavid S. Miller }
120494fc421SChristoph Lameter if (__this_cpu_read(wd_enabled)) {
12173a6b053SDavid S. Miller pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
122ce4a925cSDavid S. Miller pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
123e5553a6dSDavid S. Miller }
1242d0740c4SDavid S. Miller
125ec687886SDavid S. Miller restore_hardirq_stack(orig_sp);
126ec687886SDavid S. Miller
1272d0740c4SDavid S. Miller nmi_exit();
128e5553a6dSDavid S. Miller }
129e5553a6dSDavid S. Miller
get_nmi_count(int cpu)130e5553a6dSDavid S. Miller static inline unsigned int get_nmi_count(int cpu)
131e5553a6dSDavid S. Miller {
132e5553a6dSDavid S. Miller return cpu_data(cpu).__nmi_count;
133e5553a6dSDavid S. Miller }
134e5553a6dSDavid S. Miller
nmi_cpu_busy(void * data)135e5553a6dSDavid S. Miller static __init void nmi_cpu_busy(void *data)
136e5553a6dSDavid S. Miller {
137e5553a6dSDavid S. Miller while (endflag == 0)
138e5553a6dSDavid S. Miller mb();
139e5553a6dSDavid S. Miller }
140e5553a6dSDavid S. Miller
report_broken_nmi(int cpu,int * prev_nmi_count)141e5553a6dSDavid S. Miller static void report_broken_nmi(int cpu, int *prev_nmi_count)
142e5553a6dSDavid S. Miller {
143e5553a6dSDavid S. Miller printk(KERN_CONT "\n");
144e5553a6dSDavid S. Miller
145e5553a6dSDavid S. Miller printk(KERN_WARNING
146e5553a6dSDavid S. Miller "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
147e5553a6dSDavid S. Miller cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
148e5553a6dSDavid S. Miller
149e5553a6dSDavid S. Miller printk(KERN_WARNING
150e5553a6dSDavid S. Miller "Please report this to bugzilla.kernel.org,\n");
151e5553a6dSDavid S. Miller printk(KERN_WARNING
152e5553a6dSDavid S. Miller "and attach the output of the 'dmesg' command.\n");
153e5553a6dSDavid S. Miller
154a8f22264SDavid S. Miller per_cpu(wd_enabled, cpu) = 0;
155a8f22264SDavid S. Miller atomic_dec(&nmi_active);
156e5553a6dSDavid S. Miller }
157e5553a6dSDavid S. Miller
stop_nmi_watchdog(void * unused)15859abbd1eSDavid S. Miller void stop_nmi_watchdog(void *unused)
159e5553a6dSDavid S. Miller {
1607a5c8b57SBabu Moger if (!__this_cpu_read(wd_enabled))
1617a5c8b57SBabu Moger return;
162ce4a925cSDavid S. Miller pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
163494fc421SChristoph Lameter __this_cpu_write(wd_enabled, 0);
164a8f22264SDavid S. Miller atomic_dec(&nmi_active);
165e5553a6dSDavid S. Miller }
166e5553a6dSDavid S. Miller
check_nmi_watchdog(void)167e5553a6dSDavid S. Miller static int __init check_nmi_watchdog(void)
168e5553a6dSDavid S. Miller {
169e5553a6dSDavid S. Miller unsigned int *prev_nmi_count;
170e5553a6dSDavid S. Miller int cpu, err;
171e5553a6dSDavid S. Miller
172a8f22264SDavid S. Miller if (!atomic_read(&nmi_active))
173a8f22264SDavid S. Miller return 0;
174a8f22264SDavid S. Miller
1756da2ec56SKees Cook prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int),
1766da2ec56SKees Cook GFP_KERNEL);
177e5553a6dSDavid S. Miller if (!prev_nmi_count) {
178e5553a6dSDavid S. Miller err = -ENOMEM;
179e5553a6dSDavid S. Miller goto error;
180e5553a6dSDavid S. Miller }
181e5553a6dSDavid S. Miller
182e5553a6dSDavid S. Miller printk(KERN_INFO "Testing NMI watchdog ... ");
183e5553a6dSDavid S. Miller
184e5553a6dSDavid S. Miller smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
185e5553a6dSDavid S. Miller
186e5553a6dSDavid S. Miller for_each_possible_cpu(cpu)
187e5553a6dSDavid S. Miller prev_nmi_count[cpu] = get_nmi_count(cpu);
188e5553a6dSDavid S. Miller local_irq_enable();
189e5553a6dSDavid S. Miller mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
190e5553a6dSDavid S. Miller
191e5553a6dSDavid S. Miller for_each_online_cpu(cpu) {
192a8f22264SDavid S. Miller if (!per_cpu(wd_enabled, cpu))
193a8f22264SDavid S. Miller continue;
194e5553a6dSDavid S. Miller if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
195e5553a6dSDavid S. Miller report_broken_nmi(cpu, prev_nmi_count);
196e5553a6dSDavid S. Miller }
197e5553a6dSDavid S. Miller endflag = 1;
198a8f22264SDavid S. Miller if (!atomic_read(&nmi_active)) {
199e5553a6dSDavid S. Miller kfree(prev_nmi_count);
200a8f22264SDavid S. Miller atomic_set(&nmi_active, -1);
201e5553a6dSDavid S. Miller err = -ENODEV;
202e5553a6dSDavid S. Miller goto error;
203e5553a6dSDavid S. Miller }
204e5553a6dSDavid S. Miller printk("OK.\n");
205e5553a6dSDavid S. Miller
206e5553a6dSDavid S. Miller nmi_hz = 1;
207e5553a6dSDavid S. Miller
208e5553a6dSDavid S. Miller kfree(prev_nmi_count);
209e5553a6dSDavid S. Miller return 0;
210e5553a6dSDavid S. Miller error:
211a8f22264SDavid S. Miller on_each_cpu(stop_nmi_watchdog, NULL, 1);
212e5553a6dSDavid S. Miller return err;
213e5553a6dSDavid S. Miller }
214e5553a6dSDavid S. Miller
start_nmi_watchdog(void * unused)21559abbd1eSDavid S. Miller void start_nmi_watchdog(void *unused)
216e5553a6dSDavid S. Miller {
2177a5c8b57SBabu Moger if (__this_cpu_read(wd_enabled))
2187a5c8b57SBabu Moger return;
2197a5c8b57SBabu Moger
220494fc421SChristoph Lameter __this_cpu_write(wd_enabled, 1);
221a8f22264SDavid S. Miller atomic_inc(&nmi_active);
222a8f22264SDavid S. Miller
223ce4a925cSDavid S. Miller pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
22473a6b053SDavid S. Miller pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
225a8f22264SDavid S. Miller
226ce4a925cSDavid S. Miller pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
227a8f22264SDavid S. Miller }
228a8f22264SDavid S. Miller
nmi_adjust_hz_one(void * unused)229a8f22264SDavid S. Miller static void nmi_adjust_hz_one(void *unused)
230a8f22264SDavid S. Miller {
231494fc421SChristoph Lameter if (!__this_cpu_read(wd_enabled))
232a8f22264SDavid S. Miller return;
233a8f22264SDavid S. Miller
234ce4a925cSDavid S. Miller pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
23573a6b053SDavid S. Miller pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
236e5553a6dSDavid S. Miller
237ce4a925cSDavid S. Miller pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
238e5553a6dSDavid S. Miller }
239e5553a6dSDavid S. Miller
nmi_adjust_hz(unsigned int new_hz)240e5553a6dSDavid S. Miller void nmi_adjust_hz(unsigned int new_hz)
241e5553a6dSDavid S. Miller {
242e5553a6dSDavid S. Miller nmi_hz = new_hz;
243a8f22264SDavid S. Miller on_each_cpu(nmi_adjust_hz_one, NULL, 1);
244e5553a6dSDavid S. Miller }
245e5553a6dSDavid S. Miller EXPORT_SYMBOL_GPL(nmi_adjust_hz);
246e5553a6dSDavid S. Miller
nmi_shutdown(struct notifier_block * nb,unsigned long cmd,void * p)247ffaba674SDavid S. Miller static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
248ffaba674SDavid S. Miller {
249a8f22264SDavid S. Miller on_each_cpu(stop_nmi_watchdog, NULL, 1);
250ffaba674SDavid S. Miller return 0;
251ffaba674SDavid S. Miller }
252ffaba674SDavid S. Miller
253ffaba674SDavid S. Miller static struct notifier_block nmi_reboot_notifier = {
254ffaba674SDavid S. Miller .notifier_call = nmi_shutdown,
255ffaba674SDavid S. Miller };
256ffaba674SDavid S. Miller
nmi_init(void)257e5553a6dSDavid S. Miller int __init nmi_init(void)
258e5553a6dSDavid S. Miller {
259ffaba674SDavid S. Miller int err;
260ffaba674SDavid S. Miller
261a8f22264SDavid S. Miller on_each_cpu(start_nmi_watchdog, NULL, 1);
262e5553a6dSDavid S. Miller
263ffaba674SDavid S. Miller err = check_nmi_watchdog();
264ffaba674SDavid S. Miller if (!err) {
265ffaba674SDavid S. Miller err = register_reboot_notifier(&nmi_reboot_notifier);
266ffaba674SDavid S. Miller if (err) {
267a8f22264SDavid S. Miller on_each_cpu(stop_nmi_watchdog, NULL, 1);
268a8f22264SDavid S. Miller atomic_set(&nmi_active, -1);
269ffaba674SDavid S. Miller }
270ffaba674SDavid S. Miller }
27159abbd1eSDavid S. Miller
2727a5c8b57SBabu Moger nmi_init_done = 1;
2737a5c8b57SBabu Moger
274ffaba674SDavid S. Miller return err;
275e5553a6dSDavid S. Miller }
276e5553a6dSDavid S. Miller
setup_nmi_watchdog(char * str)277e5553a6dSDavid S. Miller static int __init setup_nmi_watchdog(char *str)
278e5553a6dSDavid S. Miller {
279e5553a6dSDavid S. Miller if (!strncmp(str, "panic", 5))
280e5553a6dSDavid S. Miller panic_on_timeout = 1;
281e5553a6dSDavid S. Miller
282*3ed7c61eSRandy Dunlap return 1;
283e5553a6dSDavid S. Miller }
284e5553a6dSDavid S. Miller __setup("nmi_watchdog=", setup_nmi_watchdog);
2857a5c8b57SBabu Moger
2867a5c8b57SBabu Moger /*
2877a5c8b57SBabu Moger * sparc specific NMI watchdog enable function.
2887a5c8b57SBabu Moger * Enables watchdog if it is not enabled already.
2897a5c8b57SBabu Moger */
watchdog_hardlockup_enable(unsigned int cpu)290df95d308SDouglas Anderson void watchdog_hardlockup_enable(unsigned int cpu)
2917a5c8b57SBabu Moger {
2927a5c8b57SBabu Moger if (atomic_read(&nmi_active) == -1) {
2937a5c8b57SBabu Moger pr_warn("NMI watchdog cannot be enabled or disabled\n");
29473021118SLecopzer Chen return;
2957a5c8b57SBabu Moger }
2967a5c8b57SBabu Moger
2977a5c8b57SBabu Moger /*
2987a5c8b57SBabu Moger * watchdog thread could start even before nmi_init is called.
2997a5c8b57SBabu Moger * Just Return in that case. Let nmi_init finish the init
3007a5c8b57SBabu Moger * process first.
3017a5c8b57SBabu Moger */
3027a5c8b57SBabu Moger if (!nmi_init_done)
30373021118SLecopzer Chen return;
3047a5c8b57SBabu Moger
3057a5c8b57SBabu Moger smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1);
3067a5c8b57SBabu Moger }
3077a5c8b57SBabu Moger /*
3087a5c8b57SBabu Moger * sparc specific NMI watchdog disable function.
3097a5c8b57SBabu Moger * Disables watchdog if it is not disabled already.
3107a5c8b57SBabu Moger */
watchdog_hardlockup_disable(unsigned int cpu)311df95d308SDouglas Anderson void watchdog_hardlockup_disable(unsigned int cpu)
3127a5c8b57SBabu Moger {
3137a5c8b57SBabu Moger if (atomic_read(&nmi_active) == -1)
3147a5c8b57SBabu Moger pr_warn_once("NMI watchdog cannot be enabled or disabled\n");
3157a5c8b57SBabu Moger else
3167a5c8b57SBabu Moger smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1);
3177a5c8b57SBabu Moger }
318