1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * Copyright (C) 2011 Don Zickus Red Hat, Inc. 5 * 6 * Pentium III FXSR, SSE support 7 * Gareth Hughes <gareth@valinux.com>, May 2000 8 */ 9 10 /* 11 * Handle hardware traps and faults. 12 */ 13 #include <linux/spinlock.h> 14 #include <linux/kprobes.h> 15 #include <linux/kdebug.h> 16 #include <linux/nmi.h> 17 #include <linux/debugfs.h> 18 #include <linux/delay.h> 19 #include <linux/hardirq.h> 20 #include <linux/slab.h> 21 #include <linux/export.h> 22 23 #if defined(CONFIG_EDAC) 24 #include <linux/edac.h> 25 #endif 26 27 #include <linux/atomic.h> 28 #include <asm/traps.h> 29 #include <asm/mach_traps.h> 30 #include <asm/nmi.h> 31 #include <asm/x86_init.h> 32 33 #define CREATE_TRACE_POINTS 34 #include <trace/events/nmi.h> 35 36 struct nmi_desc { 37 spinlock_t lock; 38 struct list_head head; 39 }; 40 41 static struct nmi_desc nmi_desc[NMI_MAX] = 42 { 43 { 44 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), 45 .head = LIST_HEAD_INIT(nmi_desc[0].head), 46 }, 47 { 48 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), 49 .head = LIST_HEAD_INIT(nmi_desc[1].head), 50 }, 51 { 52 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock), 53 .head = LIST_HEAD_INIT(nmi_desc[2].head), 54 }, 55 { 56 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock), 57 .head = LIST_HEAD_INIT(nmi_desc[3].head), 58 }, 59 60 }; 61 62 struct nmi_stats { 63 unsigned int normal; 64 unsigned int unknown; 65 unsigned int external; 66 unsigned int swallow; 67 }; 68 69 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); 70 71 static int ignore_nmis; 72 73 int unknown_nmi_panic; 74 /* 75 * Prevent NMI reason port (0x61) being accessed simultaneously, can 76 * only be used in NMI handler. 77 */ 78 static DEFINE_RAW_SPINLOCK(nmi_reason_lock); 79 80 static int __init setup_unknown_nmi_panic(char *str) 81 { 82 unknown_nmi_panic = 1; 83 return 1; 84 } 85 __setup("unknown_nmi_panic", setup_unknown_nmi_panic); 86 87 #define nmi_to_desc(type) (&nmi_desc[type]) 88 89 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; 90 91 static int __init nmi_warning_debugfs(void) 92 { 93 debugfs_create_u64("nmi_longest_ns", 0644, 94 arch_debugfs_dir, &nmi_longest_ns); 95 return 0; 96 } 97 fs_initcall(nmi_warning_debugfs); 98 99 static void nmi_max_handler(struct irq_work *w) 100 { 101 struct nmiaction *a = container_of(w, struct nmiaction, irq_work); 102 int remainder_ns, decimal_msecs; 103 u64 whole_msecs = ACCESS_ONCE(a->max_duration); 104 105 remainder_ns = do_div(whole_msecs, (1000 * 1000)); 106 decimal_msecs = remainder_ns / 1000; 107 108 printk_ratelimited(KERN_INFO 109 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", 110 a->handler, whole_msecs, decimal_msecs); 111 } 112 113 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) 114 { 115 struct nmi_desc *desc = nmi_to_desc(type); 116 struct nmiaction *a; 117 int handled=0; 118 119 rcu_read_lock(); 120 121 /* 122 * NMIs are edge-triggered, which means if you have enough 123 * of them concurrently, you can lose some because only one 124 * can be latched at any given time. Walk the whole list 125 * to handle those situations. 126 */ 127 list_for_each_entry_rcu(a, &desc->head, list) { 128 int thishandled; 129 u64 delta; 130 131 delta = sched_clock(); 132 thishandled = a->handler(type, regs); 133 handled += thishandled; 134 delta = sched_clock() - delta; 135 trace_nmi_handler(a->handler, (int)delta, thishandled); 136 137 if (delta < nmi_longest_ns || delta < a->max_duration) 138 continue; 139 140 a->max_duration = delta; 141 irq_work_queue(&a->irq_work); 142 } 143 144 rcu_read_unlock(); 145 146 /* return total number of NMI events handled */ 147 return handled; 148 } 149 NOKPROBE_SYMBOL(nmi_handle); 150 151 int __register_nmi_handler(unsigned int type, struct nmiaction *action) 152 { 153 struct nmi_desc *desc = nmi_to_desc(type); 154 unsigned long flags; 155 156 if (!action->handler) 157 return -EINVAL; 158 159 init_irq_work(&action->irq_work, nmi_max_handler); 160 161 spin_lock_irqsave(&desc->lock, flags); 162 163 /* 164 * most handlers of type NMI_UNKNOWN never return because 165 * they just assume the NMI is theirs. Just a sanity check 166 * to manage expectations 167 */ 168 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); 169 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); 170 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); 171 172 /* 173 * some handlers need to be executed first otherwise a fake 174 * event confuses some handlers (kdump uses this flag) 175 */ 176 if (action->flags & NMI_FLAG_FIRST) 177 list_add_rcu(&action->list, &desc->head); 178 else 179 list_add_tail_rcu(&action->list, &desc->head); 180 181 spin_unlock_irqrestore(&desc->lock, flags); 182 return 0; 183 } 184 EXPORT_SYMBOL(__register_nmi_handler); 185 186 void unregister_nmi_handler(unsigned int type, const char *name) 187 { 188 struct nmi_desc *desc = nmi_to_desc(type); 189 struct nmiaction *n; 190 unsigned long flags; 191 192 spin_lock_irqsave(&desc->lock, flags); 193 194 list_for_each_entry_rcu(n, &desc->head, list) { 195 /* 196 * the name passed in to describe the nmi handler 197 * is used as the lookup key 198 */ 199 if (!strcmp(n->name, name)) { 200 WARN(in_nmi(), 201 "Trying to free NMI (%s) from NMI context!\n", n->name); 202 list_del_rcu(&n->list); 203 break; 204 } 205 } 206 207 spin_unlock_irqrestore(&desc->lock, flags); 208 synchronize_rcu(); 209 } 210 EXPORT_SYMBOL_GPL(unregister_nmi_handler); 211 212 static void 213 pci_serr_error(unsigned char reason, struct pt_regs *regs) 214 { 215 /* check to see if anyone registered against these types of errors */ 216 if (nmi_handle(NMI_SERR, regs, false)) 217 return; 218 219 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", 220 reason, smp_processor_id()); 221 222 /* 223 * On some machines, PCI SERR line is used to report memory 224 * errors. EDAC makes use of it. 225 */ 226 #if defined(CONFIG_EDAC) 227 if (edac_handler_set()) { 228 edac_atomic_assert_error(); 229 return; 230 } 231 #endif 232 233 if (panic_on_unrecovered_nmi) 234 panic("NMI: Not continuing"); 235 236 pr_emerg("Dazed and confused, but trying to continue\n"); 237 238 /* Clear and disable the PCI SERR error line. */ 239 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; 240 outb(reason, NMI_REASON_PORT); 241 } 242 NOKPROBE_SYMBOL(pci_serr_error); 243 244 static void 245 io_check_error(unsigned char reason, struct pt_regs *regs) 246 { 247 unsigned long i; 248 249 /* check to see if anyone registered against these types of errors */ 250 if (nmi_handle(NMI_IO_CHECK, regs, false)) 251 return; 252 253 pr_emerg( 254 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", 255 reason, smp_processor_id()); 256 show_regs(regs); 257 258 if (panic_on_io_nmi) 259 panic("NMI IOCK error: Not continuing"); 260 261 /* Re-enable the IOCK line, wait for a few seconds */ 262 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; 263 outb(reason, NMI_REASON_PORT); 264 265 i = 20000; 266 while (--i) { 267 touch_nmi_watchdog(); 268 udelay(100); 269 } 270 271 reason &= ~NMI_REASON_CLEAR_IOCHK; 272 outb(reason, NMI_REASON_PORT); 273 } 274 NOKPROBE_SYMBOL(io_check_error); 275 276 static void 277 unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 278 { 279 int handled; 280 281 /* 282 * Use 'false' as back-to-back NMIs are dealt with one level up. 283 * Of course this makes having multiple 'unknown' handlers useless 284 * as only the first one is ever run (unless it can actually determine 285 * if it caused the NMI) 286 */ 287 handled = nmi_handle(NMI_UNKNOWN, regs, false); 288 if (handled) { 289 __this_cpu_add(nmi_stats.unknown, handled); 290 return; 291 } 292 293 __this_cpu_add(nmi_stats.unknown, 1); 294 295 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 296 reason, smp_processor_id()); 297 298 pr_emerg("Do you have a strange power saving mode enabled?\n"); 299 if (unknown_nmi_panic || panic_on_unrecovered_nmi) 300 panic("NMI: Not continuing"); 301 302 pr_emerg("Dazed and confused, but trying to continue\n"); 303 } 304 NOKPROBE_SYMBOL(unknown_nmi_error); 305 306 static DEFINE_PER_CPU(bool, swallow_nmi); 307 static DEFINE_PER_CPU(unsigned long, last_nmi_rip); 308 309 static void default_do_nmi(struct pt_regs *regs) 310 { 311 unsigned char reason = 0; 312 int handled; 313 bool b2b = false; 314 315 /* 316 * CPU-specific NMI must be processed before non-CPU-specific 317 * NMI, otherwise we may lose it, because the CPU-specific 318 * NMI can not be detected/processed on other CPUs. 319 */ 320 321 /* 322 * Back-to-back NMIs are interesting because they can either 323 * be two NMI or more than two NMIs (any thing over two is dropped 324 * due to NMI being edge-triggered). If this is the second half 325 * of the back-to-back NMI, assume we dropped things and process 326 * more handlers. Otherwise reset the 'swallow' NMI behaviour 327 */ 328 if (regs->ip == __this_cpu_read(last_nmi_rip)) 329 b2b = true; 330 else 331 __this_cpu_write(swallow_nmi, false); 332 333 __this_cpu_write(last_nmi_rip, regs->ip); 334 335 handled = nmi_handle(NMI_LOCAL, regs, b2b); 336 __this_cpu_add(nmi_stats.normal, handled); 337 if (handled) { 338 /* 339 * There are cases when a NMI handler handles multiple 340 * events in the current NMI. One of these events may 341 * be queued for in the next NMI. Because the event is 342 * already handled, the next NMI will result in an unknown 343 * NMI. Instead lets flag this for a potential NMI to 344 * swallow. 345 */ 346 if (handled > 1) 347 __this_cpu_write(swallow_nmi, true); 348 return; 349 } 350 351 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ 352 raw_spin_lock(&nmi_reason_lock); 353 reason = x86_platform.get_nmi_reason(); 354 355 if (reason & NMI_REASON_MASK) { 356 if (reason & NMI_REASON_SERR) 357 pci_serr_error(reason, regs); 358 else if (reason & NMI_REASON_IOCHK) 359 io_check_error(reason, regs); 360 #ifdef CONFIG_X86_32 361 /* 362 * Reassert NMI in case it became active 363 * meanwhile as it's edge-triggered: 364 */ 365 reassert_nmi(); 366 #endif 367 __this_cpu_add(nmi_stats.external, 1); 368 raw_spin_unlock(&nmi_reason_lock); 369 return; 370 } 371 raw_spin_unlock(&nmi_reason_lock); 372 373 /* 374 * Only one NMI can be latched at a time. To handle 375 * this we may process multiple nmi handlers at once to 376 * cover the case where an NMI is dropped. The downside 377 * to this approach is we may process an NMI prematurely, 378 * while its real NMI is sitting latched. This will cause 379 * an unknown NMI on the next run of the NMI processing. 380 * 381 * We tried to flag that condition above, by setting the 382 * swallow_nmi flag when we process more than one event. 383 * This condition is also only present on the second half 384 * of a back-to-back NMI, so we flag that condition too. 385 * 386 * If both are true, we assume we already processed this 387 * NMI previously and we swallow it. Otherwise we reset 388 * the logic. 389 * 390 * There are scenarios where we may accidentally swallow 391 * a 'real' unknown NMI. For example, while processing 392 * a perf NMI another perf NMI comes in along with a 393 * 'real' unknown NMI. These two NMIs get combined into 394 * one (as descibed above). When the next NMI gets 395 * processed, it will be flagged by perf as handled, but 396 * noone will know that there was a 'real' unknown NMI sent 397 * also. As a result it gets swallowed. Or if the first 398 * perf NMI returns two events handled then the second 399 * NMI will get eaten by the logic below, again losing a 400 * 'real' unknown NMI. But this is the best we can do 401 * for now. 402 */ 403 if (b2b && __this_cpu_read(swallow_nmi)) 404 __this_cpu_add(nmi_stats.swallow, 1); 405 else 406 unknown_nmi_error(reason, regs); 407 } 408 NOKPROBE_SYMBOL(default_do_nmi); 409 410 /* 411 * NMIs can hit breakpoints which will cause it to lose its 412 * NMI context with the CPU when the breakpoint does an iret. 413 */ 414 #ifdef CONFIG_X86_32 415 /* 416 * For i386, NMIs use the same stack as the kernel, and we can 417 * add a workaround to the iret problem in C (preventing nested 418 * NMIs if an NMI takes a trap). Simply have 3 states the NMI 419 * can be in: 420 * 421 * 1) not running 422 * 2) executing 423 * 3) latched 424 * 425 * When no NMI is in progress, it is in the "not running" state. 426 * When an NMI comes in, it goes into the "executing" state. 427 * Normally, if another NMI is triggered, it does not interrupt 428 * the running NMI and the HW will simply latch it so that when 429 * the first NMI finishes, it will restart the second NMI. 430 * (Note, the latch is binary, thus multiple NMIs triggering, 431 * when one is running, are ignored. Only one NMI is restarted.) 432 * 433 * If an NMI hits a breakpoint that executes an iret, another 434 * NMI can preempt it. We do not want to allow this new NMI 435 * to run, but we want to execute it when the first one finishes. 436 * We set the state to "latched", and the exit of the first NMI will 437 * perform a dec_return, if the result is zero (NOT_RUNNING), then 438 * it will simply exit the NMI handler. If not, the dec_return 439 * would have set the state to NMI_EXECUTING (what we want it to 440 * be when we are running). In this case, we simply jump back 441 * to rerun the NMI handler again, and restart the 'latched' NMI. 442 * 443 * No trap (breakpoint or page fault) should be hit before nmi_restart, 444 * thus there is no race between the first check of state for NOT_RUNNING 445 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs 446 * at this point. 447 * 448 * In case the NMI takes a page fault, we need to save off the CR2 449 * because the NMI could have preempted another page fault and corrupt 450 * the CR2 that is about to be read. As nested NMIs must be restarted 451 * and they can not take breakpoints or page faults, the update of the 452 * CR2 must be done before converting the nmi state back to NOT_RUNNING. 453 * Otherwise, there would be a race of another nested NMI coming in 454 * after setting state to NOT_RUNNING but before updating the nmi_cr2. 455 */ 456 enum nmi_states { 457 NMI_NOT_RUNNING = 0, 458 NMI_EXECUTING, 459 NMI_LATCHED, 460 }; 461 static DEFINE_PER_CPU(enum nmi_states, nmi_state); 462 static DEFINE_PER_CPU(unsigned long, nmi_cr2); 463 464 #define nmi_nesting_preprocess(regs) \ 465 do { \ 466 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ 467 this_cpu_write(nmi_state, NMI_LATCHED); \ 468 return; \ 469 } \ 470 this_cpu_write(nmi_state, NMI_EXECUTING); \ 471 this_cpu_write(nmi_cr2, read_cr2()); \ 472 } while (0); \ 473 nmi_restart: 474 475 #define nmi_nesting_postprocess() \ 476 do { \ 477 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ 478 write_cr2(this_cpu_read(nmi_cr2)); \ 479 if (this_cpu_dec_return(nmi_state)) \ 480 goto nmi_restart; \ 481 } while (0) 482 #else /* x86_64 */ 483 /* 484 * In x86_64 things are a bit more difficult. This has the same problem 485 * where an NMI hitting a breakpoint that calls iret will remove the 486 * NMI context, allowing a nested NMI to enter. What makes this more 487 * difficult is that both NMIs and breakpoints have their own stack. 488 * When a new NMI or breakpoint is executed, the stack is set to a fixed 489 * point. If an NMI is nested, it will have its stack set at that same 490 * fixed address that the first NMI had, and will start corrupting the 491 * stack. This is handled in entry_64.S, but the same problem exists with 492 * the breakpoint stack. 493 * 494 * If a breakpoint is being processed, and the debug stack is being used, 495 * if an NMI comes in and also hits a breakpoint, the stack pointer 496 * will be set to the same fixed address as the breakpoint that was 497 * interrupted, causing that stack to be corrupted. To handle this case, 498 * check if the stack that was interrupted is the debug stack, and if 499 * so, change the IDT so that new breakpoints will use the current stack 500 * and not switch to the fixed address. On return of the NMI, switch back 501 * to the original IDT. 502 */ 503 static DEFINE_PER_CPU(int, update_debug_stack); 504 505 static inline void nmi_nesting_preprocess(struct pt_regs *regs) 506 { 507 /* 508 * If we interrupted a breakpoint, it is possible that 509 * the nmi handler will have breakpoints too. We need to 510 * change the IDT such that breakpoints that happen here 511 * continue to use the NMI stack. 512 */ 513 if (unlikely(is_debug_stack(regs->sp))) { 514 debug_stack_set_zero(); 515 this_cpu_write(update_debug_stack, 1); 516 } 517 } 518 519 static inline void nmi_nesting_postprocess(void) 520 { 521 if (unlikely(this_cpu_read(update_debug_stack))) { 522 debug_stack_reset(); 523 this_cpu_write(update_debug_stack, 0); 524 } 525 } 526 #endif 527 528 dotraplinkage notrace void 529 do_nmi(struct pt_regs *regs, long error_code) 530 { 531 nmi_nesting_preprocess(regs); 532 533 nmi_enter(); 534 535 inc_irq_stat(__nmi_count); 536 537 if (!ignore_nmis) 538 default_do_nmi(regs); 539 540 nmi_exit(); 541 542 /* On i386, may loop back to preprocess */ 543 nmi_nesting_postprocess(); 544 } 545 NOKPROBE_SYMBOL(do_nmi); 546 547 void stop_nmi(void) 548 { 549 ignore_nmis++; 550 } 551 552 void restart_nmi(void) 553 { 554 ignore_nmis--; 555 } 556 557 /* reset the back-to-back NMI logic */ 558 void local_touch_nmi(void) 559 { 560 __this_cpu_write(last_nmi_rip, 0); 561 } 562 EXPORT_SYMBOL_GPL(local_touch_nmi); 563