xref: /linux/arch/x86/kernel/nmi.c (revision 3f07c0144132e4f59d88055ac8ff3e691a5fa2b8)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *  Copyright (C) 2011	Don Zickus Red Hat, Inc.
5  *
6  *  Pentium III FXSR, SSE support
7  *	Gareth Hughes <gareth@valinux.com>, May 2000
8  */
9 
10 /*
11  * Handle hardware traps and faults.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/nmi.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/hardirq.h>
20 #include <linux/ratelimit.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/sched/clock.h>
24 
25 #if defined(CONFIG_EDAC)
26 #include <linux/edac.h>
27 #endif
28 
29 #include <linux/atomic.h>
30 #include <asm/traps.h>
31 #include <asm/mach_traps.h>
32 #include <asm/nmi.h>
33 #include <asm/x86_init.h>
34 #include <asm/reboot.h>
35 #include <asm/cache.h>
36 
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/nmi.h>
39 
40 struct nmi_desc {
41 	spinlock_t lock;
42 	struct list_head head;
43 };
44 
45 static struct nmi_desc nmi_desc[NMI_MAX] =
46 {
47 	{
48 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
49 		.head = LIST_HEAD_INIT(nmi_desc[0].head),
50 	},
51 	{
52 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
53 		.head = LIST_HEAD_INIT(nmi_desc[1].head),
54 	},
55 	{
56 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
57 		.head = LIST_HEAD_INIT(nmi_desc[2].head),
58 	},
59 	{
60 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
61 		.head = LIST_HEAD_INIT(nmi_desc[3].head),
62 	},
63 
64 };
65 
66 struct nmi_stats {
67 	unsigned int normal;
68 	unsigned int unknown;
69 	unsigned int external;
70 	unsigned int swallow;
71 };
72 
73 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
74 
75 static int ignore_nmis __read_mostly;
76 
77 int unknown_nmi_panic;
78 /*
79  * Prevent NMI reason port (0x61) being accessed simultaneously, can
80  * only be used in NMI handler.
81  */
82 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
83 
84 static int __init setup_unknown_nmi_panic(char *str)
85 {
86 	unknown_nmi_panic = 1;
87 	return 1;
88 }
89 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
90 
91 #define nmi_to_desc(type) (&nmi_desc[type])
92 
93 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
94 
95 static int __init nmi_warning_debugfs(void)
96 {
97 	debugfs_create_u64("nmi_longest_ns", 0644,
98 			arch_debugfs_dir, &nmi_longest_ns);
99 	return 0;
100 }
101 fs_initcall(nmi_warning_debugfs);
102 
103 static void nmi_max_handler(struct irq_work *w)
104 {
105 	struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
106 	int remainder_ns, decimal_msecs;
107 	u64 whole_msecs = ACCESS_ONCE(a->max_duration);
108 
109 	remainder_ns = do_div(whole_msecs, (1000 * 1000));
110 	decimal_msecs = remainder_ns / 1000;
111 
112 	printk_ratelimited(KERN_INFO
113 		"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
114 		a->handler, whole_msecs, decimal_msecs);
115 }
116 
117 static int nmi_handle(unsigned int type, struct pt_regs *regs)
118 {
119 	struct nmi_desc *desc = nmi_to_desc(type);
120 	struct nmiaction *a;
121 	int handled=0;
122 
123 	rcu_read_lock();
124 
125 	/*
126 	 * NMIs are edge-triggered, which means if you have enough
127 	 * of them concurrently, you can lose some because only one
128 	 * can be latched at any given time.  Walk the whole list
129 	 * to handle those situations.
130 	 */
131 	list_for_each_entry_rcu(a, &desc->head, list) {
132 		int thishandled;
133 		u64 delta;
134 
135 		delta = sched_clock();
136 		thishandled = a->handler(type, regs);
137 		handled += thishandled;
138 		delta = sched_clock() - delta;
139 		trace_nmi_handler(a->handler, (int)delta, thishandled);
140 
141 		if (delta < nmi_longest_ns || delta < a->max_duration)
142 			continue;
143 
144 		a->max_duration = delta;
145 		irq_work_queue(&a->irq_work);
146 	}
147 
148 	rcu_read_unlock();
149 
150 	/* return total number of NMI events handled */
151 	return handled;
152 }
153 NOKPROBE_SYMBOL(nmi_handle);
154 
155 int __register_nmi_handler(unsigned int type, struct nmiaction *action)
156 {
157 	struct nmi_desc *desc = nmi_to_desc(type);
158 	unsigned long flags;
159 
160 	if (!action->handler)
161 		return -EINVAL;
162 
163 	init_irq_work(&action->irq_work, nmi_max_handler);
164 
165 	spin_lock_irqsave(&desc->lock, flags);
166 
167 	/*
168 	 * most handlers of type NMI_UNKNOWN never return because
169 	 * they just assume the NMI is theirs.  Just a sanity check
170 	 * to manage expectations
171 	 */
172 	WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
173 	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
174 	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
175 
176 	/*
177 	 * some handlers need to be executed first otherwise a fake
178 	 * event confuses some handlers (kdump uses this flag)
179 	 */
180 	if (action->flags & NMI_FLAG_FIRST)
181 		list_add_rcu(&action->list, &desc->head);
182 	else
183 		list_add_tail_rcu(&action->list, &desc->head);
184 
185 	spin_unlock_irqrestore(&desc->lock, flags);
186 	return 0;
187 }
188 EXPORT_SYMBOL(__register_nmi_handler);
189 
190 void unregister_nmi_handler(unsigned int type, const char *name)
191 {
192 	struct nmi_desc *desc = nmi_to_desc(type);
193 	struct nmiaction *n;
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&desc->lock, flags);
197 
198 	list_for_each_entry_rcu(n, &desc->head, list) {
199 		/*
200 		 * the name passed in to describe the nmi handler
201 		 * is used as the lookup key
202 		 */
203 		if (!strcmp(n->name, name)) {
204 			WARN(in_nmi(),
205 				"Trying to free NMI (%s) from NMI context!\n", n->name);
206 			list_del_rcu(&n->list);
207 			break;
208 		}
209 	}
210 
211 	spin_unlock_irqrestore(&desc->lock, flags);
212 	synchronize_rcu();
213 }
214 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
215 
216 static void
217 pci_serr_error(unsigned char reason, struct pt_regs *regs)
218 {
219 	/* check to see if anyone registered against these types of errors */
220 	if (nmi_handle(NMI_SERR, regs))
221 		return;
222 
223 	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
224 		 reason, smp_processor_id());
225 
226 	/*
227 	 * On some machines, PCI SERR line is used to report memory
228 	 * errors. EDAC makes use of it.
229 	 */
230 #if defined(CONFIG_EDAC)
231 	if (edac_handler_set()) {
232 		edac_atomic_assert_error();
233 		return;
234 	}
235 #endif
236 
237 	if (panic_on_unrecovered_nmi)
238 		nmi_panic(regs, "NMI: Not continuing");
239 
240 	pr_emerg("Dazed and confused, but trying to continue\n");
241 
242 	/* Clear and disable the PCI SERR error line. */
243 	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
244 	outb(reason, NMI_REASON_PORT);
245 }
246 NOKPROBE_SYMBOL(pci_serr_error);
247 
248 static void
249 io_check_error(unsigned char reason, struct pt_regs *regs)
250 {
251 	unsigned long i;
252 
253 	/* check to see if anyone registered against these types of errors */
254 	if (nmi_handle(NMI_IO_CHECK, regs))
255 		return;
256 
257 	pr_emerg(
258 	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
259 		 reason, smp_processor_id());
260 	show_regs(regs);
261 
262 	if (panic_on_io_nmi) {
263 		nmi_panic(regs, "NMI IOCK error: Not continuing");
264 
265 		/*
266 		 * If we end up here, it means we have received an NMI while
267 		 * processing panic(). Simply return without delaying and
268 		 * re-enabling NMIs.
269 		 */
270 		return;
271 	}
272 
273 	/* Re-enable the IOCK line, wait for a few seconds */
274 	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
275 	outb(reason, NMI_REASON_PORT);
276 
277 	i = 20000;
278 	while (--i) {
279 		touch_nmi_watchdog();
280 		udelay(100);
281 	}
282 
283 	reason &= ~NMI_REASON_CLEAR_IOCHK;
284 	outb(reason, NMI_REASON_PORT);
285 }
286 NOKPROBE_SYMBOL(io_check_error);
287 
288 static void
289 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
290 {
291 	int handled;
292 
293 	/*
294 	 * Use 'false' as back-to-back NMIs are dealt with one level up.
295 	 * Of course this makes having multiple 'unknown' handlers useless
296 	 * as only the first one is ever run (unless it can actually determine
297 	 * if it caused the NMI)
298 	 */
299 	handled = nmi_handle(NMI_UNKNOWN, regs);
300 	if (handled) {
301 		__this_cpu_add(nmi_stats.unknown, handled);
302 		return;
303 	}
304 
305 	__this_cpu_add(nmi_stats.unknown, 1);
306 
307 	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
308 		 reason, smp_processor_id());
309 
310 	pr_emerg("Do you have a strange power saving mode enabled?\n");
311 	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
312 		nmi_panic(regs, "NMI: Not continuing");
313 
314 	pr_emerg("Dazed and confused, but trying to continue\n");
315 }
316 NOKPROBE_SYMBOL(unknown_nmi_error);
317 
318 static DEFINE_PER_CPU(bool, swallow_nmi);
319 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
320 
321 static void default_do_nmi(struct pt_regs *regs)
322 {
323 	unsigned char reason = 0;
324 	int handled;
325 	bool b2b = false;
326 
327 	/*
328 	 * CPU-specific NMI must be processed before non-CPU-specific
329 	 * NMI, otherwise we may lose it, because the CPU-specific
330 	 * NMI can not be detected/processed on other CPUs.
331 	 */
332 
333 	/*
334 	 * Back-to-back NMIs are interesting because they can either
335 	 * be two NMI or more than two NMIs (any thing over two is dropped
336 	 * due to NMI being edge-triggered).  If this is the second half
337 	 * of the back-to-back NMI, assume we dropped things and process
338 	 * more handlers.  Otherwise reset the 'swallow' NMI behaviour
339 	 */
340 	if (regs->ip == __this_cpu_read(last_nmi_rip))
341 		b2b = true;
342 	else
343 		__this_cpu_write(swallow_nmi, false);
344 
345 	__this_cpu_write(last_nmi_rip, regs->ip);
346 
347 	handled = nmi_handle(NMI_LOCAL, regs);
348 	__this_cpu_add(nmi_stats.normal, handled);
349 	if (handled) {
350 		/*
351 		 * There are cases when a NMI handler handles multiple
352 		 * events in the current NMI.  One of these events may
353 		 * be queued for in the next NMI.  Because the event is
354 		 * already handled, the next NMI will result in an unknown
355 		 * NMI.  Instead lets flag this for a potential NMI to
356 		 * swallow.
357 		 */
358 		if (handled > 1)
359 			__this_cpu_write(swallow_nmi, true);
360 		return;
361 	}
362 
363 	/*
364 	 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
365 	 *
366 	 * Another CPU may be processing panic routines while holding
367 	 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
368 	 * and if so, call its callback directly.  If there is no CPU preparing
369 	 * crash dump, we simply loop here.
370 	 */
371 	while (!raw_spin_trylock(&nmi_reason_lock)) {
372 		run_crash_ipi_callback(regs);
373 		cpu_relax();
374 	}
375 
376 	reason = x86_platform.get_nmi_reason();
377 
378 	if (reason & NMI_REASON_MASK) {
379 		if (reason & NMI_REASON_SERR)
380 			pci_serr_error(reason, regs);
381 		else if (reason & NMI_REASON_IOCHK)
382 			io_check_error(reason, regs);
383 #ifdef CONFIG_X86_32
384 		/*
385 		 * Reassert NMI in case it became active
386 		 * meanwhile as it's edge-triggered:
387 		 */
388 		reassert_nmi();
389 #endif
390 		__this_cpu_add(nmi_stats.external, 1);
391 		raw_spin_unlock(&nmi_reason_lock);
392 		return;
393 	}
394 	raw_spin_unlock(&nmi_reason_lock);
395 
396 	/*
397 	 * Only one NMI can be latched at a time.  To handle
398 	 * this we may process multiple nmi handlers at once to
399 	 * cover the case where an NMI is dropped.  The downside
400 	 * to this approach is we may process an NMI prematurely,
401 	 * while its real NMI is sitting latched.  This will cause
402 	 * an unknown NMI on the next run of the NMI processing.
403 	 *
404 	 * We tried to flag that condition above, by setting the
405 	 * swallow_nmi flag when we process more than one event.
406 	 * This condition is also only present on the second half
407 	 * of a back-to-back NMI, so we flag that condition too.
408 	 *
409 	 * If both are true, we assume we already processed this
410 	 * NMI previously and we swallow it.  Otherwise we reset
411 	 * the logic.
412 	 *
413 	 * There are scenarios where we may accidentally swallow
414 	 * a 'real' unknown NMI.  For example, while processing
415 	 * a perf NMI another perf NMI comes in along with a
416 	 * 'real' unknown NMI.  These two NMIs get combined into
417 	 * one (as descibed above).  When the next NMI gets
418 	 * processed, it will be flagged by perf as handled, but
419 	 * noone will know that there was a 'real' unknown NMI sent
420 	 * also.  As a result it gets swallowed.  Or if the first
421 	 * perf NMI returns two events handled then the second
422 	 * NMI will get eaten by the logic below, again losing a
423 	 * 'real' unknown NMI.  But this is the best we can do
424 	 * for now.
425 	 */
426 	if (b2b && __this_cpu_read(swallow_nmi))
427 		__this_cpu_add(nmi_stats.swallow, 1);
428 	else
429 		unknown_nmi_error(reason, regs);
430 }
431 NOKPROBE_SYMBOL(default_do_nmi);
432 
433 /*
434  * NMIs can page fault or hit breakpoints which will cause it to lose
435  * its NMI context with the CPU when the breakpoint or page fault does an IRET.
436  *
437  * As a result, NMIs can nest if NMIs get unmasked due an IRET during
438  * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
439  * if the outer NMI came from kernel mode, but we can still nest if the
440  * outer NMI came from user mode.
441  *
442  * To handle these nested NMIs, we have three states:
443  *
444  *  1) not running
445  *  2) executing
446  *  3) latched
447  *
448  * When no NMI is in progress, it is in the "not running" state.
449  * When an NMI comes in, it goes into the "executing" state.
450  * Normally, if another NMI is triggered, it does not interrupt
451  * the running NMI and the HW will simply latch it so that when
452  * the first NMI finishes, it will restart the second NMI.
453  * (Note, the latch is binary, thus multiple NMIs triggering,
454  *  when one is running, are ignored. Only one NMI is restarted.)
455  *
456  * If an NMI executes an iret, another NMI can preempt it. We do not
457  * want to allow this new NMI to run, but we want to execute it when the
458  * first one finishes.  We set the state to "latched", and the exit of
459  * the first NMI will perform a dec_return, if the result is zero
460  * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
461  * dec_return would have set the state to NMI_EXECUTING (what we want it
462  * to be when we are running). In this case, we simply jump back to
463  * rerun the NMI handler again, and restart the 'latched' NMI.
464  *
465  * No trap (breakpoint or page fault) should be hit before nmi_restart,
466  * thus there is no race between the first check of state for NOT_RUNNING
467  * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
468  * at this point.
469  *
470  * In case the NMI takes a page fault, we need to save off the CR2
471  * because the NMI could have preempted another page fault and corrupt
472  * the CR2 that is about to be read. As nested NMIs must be restarted
473  * and they can not take breakpoints or page faults, the update of the
474  * CR2 must be done before converting the nmi state back to NOT_RUNNING.
475  * Otherwise, there would be a race of another nested NMI coming in
476  * after setting state to NOT_RUNNING but before updating the nmi_cr2.
477  */
478 enum nmi_states {
479 	NMI_NOT_RUNNING = 0,
480 	NMI_EXECUTING,
481 	NMI_LATCHED,
482 };
483 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
484 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
485 
486 #ifdef CONFIG_X86_64
487 /*
488  * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
489  * some care, the inner breakpoint will clobber the outer breakpoint's
490  * stack.
491  *
492  * If a breakpoint is being processed, and the debug stack is being
493  * used, if an NMI comes in and also hits a breakpoint, the stack
494  * pointer will be set to the same fixed address as the breakpoint that
495  * was interrupted, causing that stack to be corrupted. To handle this
496  * case, check if the stack that was interrupted is the debug stack, and
497  * if so, change the IDT so that new breakpoints will use the current
498  * stack and not switch to the fixed address. On return of the NMI,
499  * switch back to the original IDT.
500  */
501 static DEFINE_PER_CPU(int, update_debug_stack);
502 #endif
503 
504 dotraplinkage notrace void
505 do_nmi(struct pt_regs *regs, long error_code)
506 {
507 	if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
508 		this_cpu_write(nmi_state, NMI_LATCHED);
509 		return;
510 	}
511 	this_cpu_write(nmi_state, NMI_EXECUTING);
512 	this_cpu_write(nmi_cr2, read_cr2());
513 nmi_restart:
514 
515 #ifdef CONFIG_X86_64
516 	/*
517 	 * If we interrupted a breakpoint, it is possible that
518 	 * the nmi handler will have breakpoints too. We need to
519 	 * change the IDT such that breakpoints that happen here
520 	 * continue to use the NMI stack.
521 	 */
522 	if (unlikely(is_debug_stack(regs->sp))) {
523 		debug_stack_set_zero();
524 		this_cpu_write(update_debug_stack, 1);
525 	}
526 #endif
527 
528 	nmi_enter();
529 
530 	inc_irq_stat(__nmi_count);
531 
532 	if (!ignore_nmis)
533 		default_do_nmi(regs);
534 
535 	nmi_exit();
536 
537 #ifdef CONFIG_X86_64
538 	if (unlikely(this_cpu_read(update_debug_stack))) {
539 		debug_stack_reset();
540 		this_cpu_write(update_debug_stack, 0);
541 	}
542 #endif
543 
544 	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
545 		write_cr2(this_cpu_read(nmi_cr2));
546 	if (this_cpu_dec_return(nmi_state))
547 		goto nmi_restart;
548 }
549 NOKPROBE_SYMBOL(do_nmi);
550 
551 void stop_nmi(void)
552 {
553 	ignore_nmis++;
554 }
555 
556 void restart_nmi(void)
557 {
558 	ignore_nmis--;
559 }
560 
561 /* reset the back-to-back NMI logic */
562 void local_touch_nmi(void)
563 {
564 	__this_cpu_write(last_nmi_rip, 0);
565 }
566 EXPORT_SYMBOL_GPL(local_touch_nmi);
567