xref: /linux/arch/x86/kernel/nmi.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *  Copyright (C) 2011	Don Zickus Red Hat, Inc.
5  *
6  *  Pentium III FXSR, SSE support
7  *	Gareth Hughes <gareth@valinux.com>, May 2000
8  */
9 
10 /*
11  * Handle hardware traps and faults.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/sched/debug.h>
17 #include <linux/nmi.h>
18 #include <linux/debugfs.h>
19 #include <linux/delay.h>
20 #include <linux/hardirq.h>
21 #include <linux/ratelimit.h>
22 #include <linux/slab.h>
23 #include <linux/export.h>
24 #include <linux/sched/clock.h>
25 
26 #if defined(CONFIG_EDAC)
27 #include <linux/edac.h>
28 #endif
29 
30 #include <linux/atomic.h>
31 #include <asm/traps.h>
32 #include <asm/mach_traps.h>
33 #include <asm/nmi.h>
34 #include <asm/x86_init.h>
35 #include <asm/reboot.h>
36 #include <asm/cache.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/nmi.h>
40 
41 struct nmi_desc {
42 	raw_spinlock_t lock;
43 	struct list_head head;
44 };
45 
46 static struct nmi_desc nmi_desc[NMI_MAX] =
47 {
48 	{
49 		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
50 		.head = LIST_HEAD_INIT(nmi_desc[0].head),
51 	},
52 	{
53 		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
54 		.head = LIST_HEAD_INIT(nmi_desc[1].head),
55 	},
56 	{
57 		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
58 		.head = LIST_HEAD_INIT(nmi_desc[2].head),
59 	},
60 	{
61 		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
62 		.head = LIST_HEAD_INIT(nmi_desc[3].head),
63 	},
64 
65 };
66 
67 struct nmi_stats {
68 	unsigned int normal;
69 	unsigned int unknown;
70 	unsigned int external;
71 	unsigned int swallow;
72 };
73 
74 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
75 
76 static int ignore_nmis __read_mostly;
77 
78 int unknown_nmi_panic;
79 /*
80  * Prevent NMI reason port (0x61) being accessed simultaneously, can
81  * only be used in NMI handler.
82  */
83 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
84 
85 static int __init setup_unknown_nmi_panic(char *str)
86 {
87 	unknown_nmi_panic = 1;
88 	return 1;
89 }
90 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
91 
92 #define nmi_to_desc(type) (&nmi_desc[type])
93 
94 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
95 
96 static int __init nmi_warning_debugfs(void)
97 {
98 	debugfs_create_u64("nmi_longest_ns", 0644,
99 			arch_debugfs_dir, &nmi_longest_ns);
100 	return 0;
101 }
102 fs_initcall(nmi_warning_debugfs);
103 
104 static void nmi_max_handler(struct irq_work *w)
105 {
106 	struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
107 	int remainder_ns, decimal_msecs;
108 	u64 whole_msecs = READ_ONCE(a->max_duration);
109 
110 	remainder_ns = do_div(whole_msecs, (1000 * 1000));
111 	decimal_msecs = remainder_ns / 1000;
112 
113 	printk_ratelimited(KERN_INFO
114 		"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
115 		a->handler, whole_msecs, decimal_msecs);
116 }
117 
118 static int nmi_handle(unsigned int type, struct pt_regs *regs)
119 {
120 	struct nmi_desc *desc = nmi_to_desc(type);
121 	struct nmiaction *a;
122 	int handled=0;
123 
124 	rcu_read_lock();
125 
126 	/*
127 	 * NMIs are edge-triggered, which means if you have enough
128 	 * of them concurrently, you can lose some because only one
129 	 * can be latched at any given time.  Walk the whole list
130 	 * to handle those situations.
131 	 */
132 	list_for_each_entry_rcu(a, &desc->head, list) {
133 		int thishandled;
134 		u64 delta;
135 
136 		delta = sched_clock();
137 		thishandled = a->handler(type, regs);
138 		handled += thishandled;
139 		delta = sched_clock() - delta;
140 		trace_nmi_handler(a->handler, (int)delta, thishandled);
141 
142 		if (delta < nmi_longest_ns || delta < a->max_duration)
143 			continue;
144 
145 		a->max_duration = delta;
146 		irq_work_queue(&a->irq_work);
147 	}
148 
149 	rcu_read_unlock();
150 
151 	/* return total number of NMI events handled */
152 	return handled;
153 }
154 NOKPROBE_SYMBOL(nmi_handle);
155 
156 int __register_nmi_handler(unsigned int type, struct nmiaction *action)
157 {
158 	struct nmi_desc *desc = nmi_to_desc(type);
159 	unsigned long flags;
160 
161 	if (!action->handler)
162 		return -EINVAL;
163 
164 	init_irq_work(&action->irq_work, nmi_max_handler);
165 
166 	raw_spin_lock_irqsave(&desc->lock, flags);
167 
168 	/*
169 	 * Indicate if there are multiple registrations on the
170 	 * internal NMI handler call chains (SERR and IO_CHECK).
171 	 */
172 	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
173 	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
174 
175 	/*
176 	 * some handlers need to be executed first otherwise a fake
177 	 * event confuses some handlers (kdump uses this flag)
178 	 */
179 	if (action->flags & NMI_FLAG_FIRST)
180 		list_add_rcu(&action->list, &desc->head);
181 	else
182 		list_add_tail_rcu(&action->list, &desc->head);
183 
184 	raw_spin_unlock_irqrestore(&desc->lock, flags);
185 	return 0;
186 }
187 EXPORT_SYMBOL(__register_nmi_handler);
188 
189 void unregister_nmi_handler(unsigned int type, const char *name)
190 {
191 	struct nmi_desc *desc = nmi_to_desc(type);
192 	struct nmiaction *n;
193 	unsigned long flags;
194 
195 	raw_spin_lock_irqsave(&desc->lock, flags);
196 
197 	list_for_each_entry_rcu(n, &desc->head, list) {
198 		/*
199 		 * the name passed in to describe the nmi handler
200 		 * is used as the lookup key
201 		 */
202 		if (!strcmp(n->name, name)) {
203 			WARN(in_nmi(),
204 				"Trying to free NMI (%s) from NMI context!\n", n->name);
205 			list_del_rcu(&n->list);
206 			break;
207 		}
208 	}
209 
210 	raw_spin_unlock_irqrestore(&desc->lock, flags);
211 	synchronize_rcu();
212 }
213 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
214 
215 static void
216 pci_serr_error(unsigned char reason, struct pt_regs *regs)
217 {
218 	/* check to see if anyone registered against these types of errors */
219 	if (nmi_handle(NMI_SERR, regs))
220 		return;
221 
222 	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
223 		 reason, smp_processor_id());
224 
225 	if (panic_on_unrecovered_nmi)
226 		nmi_panic(regs, "NMI: Not continuing");
227 
228 	pr_emerg("Dazed and confused, but trying to continue\n");
229 
230 	/* Clear and disable the PCI SERR error line. */
231 	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
232 	outb(reason, NMI_REASON_PORT);
233 }
234 NOKPROBE_SYMBOL(pci_serr_error);
235 
236 static void
237 io_check_error(unsigned char reason, struct pt_regs *regs)
238 {
239 	unsigned long i;
240 
241 	/* check to see if anyone registered against these types of errors */
242 	if (nmi_handle(NMI_IO_CHECK, regs))
243 		return;
244 
245 	pr_emerg(
246 	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
247 		 reason, smp_processor_id());
248 	show_regs(regs);
249 
250 	if (panic_on_io_nmi) {
251 		nmi_panic(regs, "NMI IOCK error: Not continuing");
252 
253 		/*
254 		 * If we end up here, it means we have received an NMI while
255 		 * processing panic(). Simply return without delaying and
256 		 * re-enabling NMIs.
257 		 */
258 		return;
259 	}
260 
261 	/* Re-enable the IOCK line, wait for a few seconds */
262 	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
263 	outb(reason, NMI_REASON_PORT);
264 
265 	i = 20000;
266 	while (--i) {
267 		touch_nmi_watchdog();
268 		udelay(100);
269 	}
270 
271 	reason &= ~NMI_REASON_CLEAR_IOCHK;
272 	outb(reason, NMI_REASON_PORT);
273 }
274 NOKPROBE_SYMBOL(io_check_error);
275 
276 static void
277 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
278 {
279 	int handled;
280 
281 	/*
282 	 * Use 'false' as back-to-back NMIs are dealt with one level up.
283 	 * Of course this makes having multiple 'unknown' handlers useless
284 	 * as only the first one is ever run (unless it can actually determine
285 	 * if it caused the NMI)
286 	 */
287 	handled = nmi_handle(NMI_UNKNOWN, regs);
288 	if (handled) {
289 		__this_cpu_add(nmi_stats.unknown, handled);
290 		return;
291 	}
292 
293 	__this_cpu_add(nmi_stats.unknown, 1);
294 
295 	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
296 		 reason, smp_processor_id());
297 
298 	pr_emerg("Do you have a strange power saving mode enabled?\n");
299 	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
300 		nmi_panic(regs, "NMI: Not continuing");
301 
302 	pr_emerg("Dazed and confused, but trying to continue\n");
303 }
304 NOKPROBE_SYMBOL(unknown_nmi_error);
305 
306 static DEFINE_PER_CPU(bool, swallow_nmi);
307 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
308 
309 static void default_do_nmi(struct pt_regs *regs)
310 {
311 	unsigned char reason = 0;
312 	int handled;
313 	bool b2b = false;
314 
315 	/*
316 	 * CPU-specific NMI must be processed before non-CPU-specific
317 	 * NMI, otherwise we may lose it, because the CPU-specific
318 	 * NMI can not be detected/processed on other CPUs.
319 	 */
320 
321 	/*
322 	 * Back-to-back NMIs are interesting because they can either
323 	 * be two NMI or more than two NMIs (any thing over two is dropped
324 	 * due to NMI being edge-triggered).  If this is the second half
325 	 * of the back-to-back NMI, assume we dropped things and process
326 	 * more handlers.  Otherwise reset the 'swallow' NMI behaviour
327 	 */
328 	if (regs->ip == __this_cpu_read(last_nmi_rip))
329 		b2b = true;
330 	else
331 		__this_cpu_write(swallow_nmi, false);
332 
333 	__this_cpu_write(last_nmi_rip, regs->ip);
334 
335 	handled = nmi_handle(NMI_LOCAL, regs);
336 	__this_cpu_add(nmi_stats.normal, handled);
337 	if (handled) {
338 		/*
339 		 * There are cases when a NMI handler handles multiple
340 		 * events in the current NMI.  One of these events may
341 		 * be queued for in the next NMI.  Because the event is
342 		 * already handled, the next NMI will result in an unknown
343 		 * NMI.  Instead lets flag this for a potential NMI to
344 		 * swallow.
345 		 */
346 		if (handled > 1)
347 			__this_cpu_write(swallow_nmi, true);
348 		return;
349 	}
350 
351 	/*
352 	 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
353 	 *
354 	 * Another CPU may be processing panic routines while holding
355 	 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
356 	 * and if so, call its callback directly.  If there is no CPU preparing
357 	 * crash dump, we simply loop here.
358 	 */
359 	while (!raw_spin_trylock(&nmi_reason_lock)) {
360 		run_crash_ipi_callback(regs);
361 		cpu_relax();
362 	}
363 
364 	reason = x86_platform.get_nmi_reason();
365 
366 	if (reason & NMI_REASON_MASK) {
367 		if (reason & NMI_REASON_SERR)
368 			pci_serr_error(reason, regs);
369 		else if (reason & NMI_REASON_IOCHK)
370 			io_check_error(reason, regs);
371 #ifdef CONFIG_X86_32
372 		/*
373 		 * Reassert NMI in case it became active
374 		 * meanwhile as it's edge-triggered:
375 		 */
376 		reassert_nmi();
377 #endif
378 		__this_cpu_add(nmi_stats.external, 1);
379 		raw_spin_unlock(&nmi_reason_lock);
380 		return;
381 	}
382 	raw_spin_unlock(&nmi_reason_lock);
383 
384 	/*
385 	 * Only one NMI can be latched at a time.  To handle
386 	 * this we may process multiple nmi handlers at once to
387 	 * cover the case where an NMI is dropped.  The downside
388 	 * to this approach is we may process an NMI prematurely,
389 	 * while its real NMI is sitting latched.  This will cause
390 	 * an unknown NMI on the next run of the NMI processing.
391 	 *
392 	 * We tried to flag that condition above, by setting the
393 	 * swallow_nmi flag when we process more than one event.
394 	 * This condition is also only present on the second half
395 	 * of a back-to-back NMI, so we flag that condition too.
396 	 *
397 	 * If both are true, we assume we already processed this
398 	 * NMI previously and we swallow it.  Otherwise we reset
399 	 * the logic.
400 	 *
401 	 * There are scenarios where we may accidentally swallow
402 	 * a 'real' unknown NMI.  For example, while processing
403 	 * a perf NMI another perf NMI comes in along with a
404 	 * 'real' unknown NMI.  These two NMIs get combined into
405 	 * one (as descibed above).  When the next NMI gets
406 	 * processed, it will be flagged by perf as handled, but
407 	 * noone will know that there was a 'real' unknown NMI sent
408 	 * also.  As a result it gets swallowed.  Or if the first
409 	 * perf NMI returns two events handled then the second
410 	 * NMI will get eaten by the logic below, again losing a
411 	 * 'real' unknown NMI.  But this is the best we can do
412 	 * for now.
413 	 */
414 	if (b2b && __this_cpu_read(swallow_nmi))
415 		__this_cpu_add(nmi_stats.swallow, 1);
416 	else
417 		unknown_nmi_error(reason, regs);
418 }
419 NOKPROBE_SYMBOL(default_do_nmi);
420 
421 /*
422  * NMIs can page fault or hit breakpoints which will cause it to lose
423  * its NMI context with the CPU when the breakpoint or page fault does an IRET.
424  *
425  * As a result, NMIs can nest if NMIs get unmasked due an IRET during
426  * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
427  * if the outer NMI came from kernel mode, but we can still nest if the
428  * outer NMI came from user mode.
429  *
430  * To handle these nested NMIs, we have three states:
431  *
432  *  1) not running
433  *  2) executing
434  *  3) latched
435  *
436  * When no NMI is in progress, it is in the "not running" state.
437  * When an NMI comes in, it goes into the "executing" state.
438  * Normally, if another NMI is triggered, it does not interrupt
439  * the running NMI and the HW will simply latch it so that when
440  * the first NMI finishes, it will restart the second NMI.
441  * (Note, the latch is binary, thus multiple NMIs triggering,
442  *  when one is running, are ignored. Only one NMI is restarted.)
443  *
444  * If an NMI executes an iret, another NMI can preempt it. We do not
445  * want to allow this new NMI to run, but we want to execute it when the
446  * first one finishes.  We set the state to "latched", and the exit of
447  * the first NMI will perform a dec_return, if the result is zero
448  * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
449  * dec_return would have set the state to NMI_EXECUTING (what we want it
450  * to be when we are running). In this case, we simply jump back to
451  * rerun the NMI handler again, and restart the 'latched' NMI.
452  *
453  * No trap (breakpoint or page fault) should be hit before nmi_restart,
454  * thus there is no race between the first check of state for NOT_RUNNING
455  * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
456  * at this point.
457  *
458  * In case the NMI takes a page fault, we need to save off the CR2
459  * because the NMI could have preempted another page fault and corrupt
460  * the CR2 that is about to be read. As nested NMIs must be restarted
461  * and they can not take breakpoints or page faults, the update of the
462  * CR2 must be done before converting the nmi state back to NOT_RUNNING.
463  * Otherwise, there would be a race of another nested NMI coming in
464  * after setting state to NOT_RUNNING but before updating the nmi_cr2.
465  */
466 enum nmi_states {
467 	NMI_NOT_RUNNING = 0,
468 	NMI_EXECUTING,
469 	NMI_LATCHED,
470 };
471 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
472 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
473 
474 #ifdef CONFIG_X86_64
475 /*
476  * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
477  * some care, the inner breakpoint will clobber the outer breakpoint's
478  * stack.
479  *
480  * If a breakpoint is being processed, and the debug stack is being
481  * used, if an NMI comes in and also hits a breakpoint, the stack
482  * pointer will be set to the same fixed address as the breakpoint that
483  * was interrupted, causing that stack to be corrupted. To handle this
484  * case, check if the stack that was interrupted is the debug stack, and
485  * if so, change the IDT so that new breakpoints will use the current
486  * stack and not switch to the fixed address. On return of the NMI,
487  * switch back to the original IDT.
488  */
489 static DEFINE_PER_CPU(int, update_debug_stack);
490 #endif
491 
492 dotraplinkage notrace void
493 do_nmi(struct pt_regs *regs, long error_code)
494 {
495 	if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
496 		this_cpu_write(nmi_state, NMI_LATCHED);
497 		return;
498 	}
499 	this_cpu_write(nmi_state, NMI_EXECUTING);
500 	this_cpu_write(nmi_cr2, read_cr2());
501 nmi_restart:
502 
503 #ifdef CONFIG_X86_64
504 	/*
505 	 * If we interrupted a breakpoint, it is possible that
506 	 * the nmi handler will have breakpoints too. We need to
507 	 * change the IDT such that breakpoints that happen here
508 	 * continue to use the NMI stack.
509 	 */
510 	if (unlikely(is_debug_stack(regs->sp))) {
511 		debug_stack_set_zero();
512 		this_cpu_write(update_debug_stack, 1);
513 	}
514 #endif
515 
516 	nmi_enter();
517 
518 	inc_irq_stat(__nmi_count);
519 
520 	if (!ignore_nmis)
521 		default_do_nmi(regs);
522 
523 	nmi_exit();
524 
525 #ifdef CONFIG_X86_64
526 	if (unlikely(this_cpu_read(update_debug_stack))) {
527 		debug_stack_reset();
528 		this_cpu_write(update_debug_stack, 0);
529 	}
530 #endif
531 
532 	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
533 		write_cr2(this_cpu_read(nmi_cr2));
534 	if (this_cpu_dec_return(nmi_state))
535 		goto nmi_restart;
536 }
537 NOKPROBE_SYMBOL(do_nmi);
538 
539 void stop_nmi(void)
540 {
541 	ignore_nmis++;
542 }
543 
544 void restart_nmi(void)
545 {
546 	ignore_nmis--;
547 }
548 
549 /* reset the back-to-back NMI logic */
550 void local_touch_nmi(void)
551 {
552 	__this_cpu_write(last_nmi_rip, 0);
553 }
554 EXPORT_SYMBOL_GPL(local_touch_nmi);
555