1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 #include <linux/interrupt.h> 13 #include <linux/kallsyms.h> 14 #include <linux/spinlock.h> 15 #include <linux/kprobes.h> 16 #include <linux/uaccess.h> 17 #include <linux/kdebug.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/ptrace.h> 21 #include <linux/string.h> 22 #include <linux/delay.h> 23 #include <linux/errno.h> 24 #include <linux/kexec.h> 25 #include <linux/sched.h> 26 #include <linux/timer.h> 27 #include <linux/init.h> 28 #include <linux/bug.h> 29 #include <linux/nmi.h> 30 #include <linux/mm.h> 31 #include <linux/smp.h> 32 #include <linux/io.h> 33 34 #ifdef CONFIG_EISA 35 #include <linux/ioport.h> 36 #include <linux/eisa.h> 37 #endif 38 39 #ifdef CONFIG_MCA 40 #include <linux/mca.h> 41 #endif 42 43 #if defined(CONFIG_EDAC) 44 #include <linux/edac.h> 45 #endif 46 47 #include <asm/kmemcheck.h> 48 #include <asm/stacktrace.h> 49 #include <asm/processor.h> 50 #include <asm/debugreg.h> 51 #include <asm/atomic.h> 52 #include <asm/system.h> 53 #include <asm/traps.h> 54 #include <asm/desc.h> 55 #include <asm/i387.h> 56 #include <asm/mce.h> 57 58 #include <asm/mach_traps.h> 59 60 #ifdef CONFIG_X86_64 61 #include <asm/x86_init.h> 62 #include <asm/pgalloc.h> 63 #include <asm/proto.h> 64 #else 65 #include <asm/processor-flags.h> 66 #include <asm/setup.h> 67 68 asmlinkage int system_call(void); 69 70 /* Do we ignore FPU interrupts ? */ 71 char ignore_fpu_irq; 72 73 /* 74 * The IDT has to be page-aligned to simplify the Pentium 75 * F0 0F bug workaround.. We have a special link segment 76 * for this. 77 */ 78 gate_desc idt_table[NR_VECTORS] 79 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 80 #endif 81 82 DECLARE_BITMAP(used_vectors, NR_VECTORS); 83 EXPORT_SYMBOL_GPL(used_vectors); 84 85 static int ignore_nmis; 86 87 static inline void conditional_sti(struct pt_regs *regs) 88 { 89 if (regs->flags & X86_EFLAGS_IF) 90 local_irq_enable(); 91 } 92 93 static inline void preempt_conditional_sti(struct pt_regs *regs) 94 { 95 inc_preempt_count(); 96 if (regs->flags & X86_EFLAGS_IF) 97 local_irq_enable(); 98 } 99 100 static inline void conditional_cli(struct pt_regs *regs) 101 { 102 if (regs->flags & X86_EFLAGS_IF) 103 local_irq_disable(); 104 } 105 106 static inline void preempt_conditional_cli(struct pt_regs *regs) 107 { 108 if (regs->flags & X86_EFLAGS_IF) 109 local_irq_disable(); 110 dec_preempt_count(); 111 } 112 113 #ifdef CONFIG_X86_32 114 static inline void 115 die_if_kernel(const char *str, struct pt_regs *regs, long err) 116 { 117 if (!user_mode_vm(regs)) 118 die(str, regs, err); 119 } 120 #endif 121 122 static void __kprobes 123 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 124 long error_code, siginfo_t *info) 125 { 126 struct task_struct *tsk = current; 127 128 #ifdef CONFIG_X86_32 129 if (regs->flags & X86_VM_MASK) { 130 /* 131 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 132 * On nmi (interrupt 2), do_trap should not be called. 133 */ 134 if (trapnr < 6) 135 goto vm86_trap; 136 goto trap_signal; 137 } 138 #endif 139 140 if (!user_mode(regs)) 141 goto kernel_trap; 142 143 #ifdef CONFIG_X86_32 144 trap_signal: 145 #endif 146 /* 147 * We want error_code and trap_no set for userspace faults and 148 * kernelspace faults which result in die(), but not 149 * kernelspace faults which are fixed up. die() gives the 150 * process no chance to handle the signal and notice the 151 * kernel fault information, so that won't result in polluting 152 * the information about previously queued, but not yet 153 * delivered, faults. See also do_general_protection below. 154 */ 155 tsk->thread.error_code = error_code; 156 tsk->thread.trap_no = trapnr; 157 158 #ifdef CONFIG_X86_64 159 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 160 printk_ratelimit()) { 161 printk(KERN_INFO 162 "%s[%d] trap %s ip:%lx sp:%lx error:%lx", 163 tsk->comm, tsk->pid, str, 164 regs->ip, regs->sp, error_code); 165 print_vma_addr(" in ", regs->ip); 166 printk("\n"); 167 } 168 #endif 169 170 if (info) 171 force_sig_info(signr, info, tsk); 172 else 173 force_sig(signr, tsk); 174 return; 175 176 kernel_trap: 177 if (!fixup_exception(regs)) { 178 tsk->thread.error_code = error_code; 179 tsk->thread.trap_no = trapnr; 180 die(str, regs, error_code); 181 } 182 return; 183 184 #ifdef CONFIG_X86_32 185 vm86_trap: 186 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 187 error_code, trapnr)) 188 goto trap_signal; 189 return; 190 #endif 191 } 192 193 #define DO_ERROR(trapnr, signr, str, name) \ 194 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 195 { \ 196 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 197 == NOTIFY_STOP) \ 198 return; \ 199 conditional_sti(regs); \ 200 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 201 } 202 203 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 204 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 205 { \ 206 siginfo_t info; \ 207 info.si_signo = signr; \ 208 info.si_errno = 0; \ 209 info.si_code = sicode; \ 210 info.si_addr = (void __user *)siaddr; \ 211 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 212 == NOTIFY_STOP) \ 213 return; \ 214 conditional_sti(regs); \ 215 do_trap(trapnr, signr, str, regs, error_code, &info); \ 216 } 217 218 DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 219 DO_ERROR(4, SIGSEGV, "overflow", overflow) 220 DO_ERROR(5, SIGSEGV, "bounds", bounds) 221 DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 222 DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 223 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 224 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 225 #ifdef CONFIG_X86_32 226 DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 227 #endif 228 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 229 230 #ifdef CONFIG_X86_64 231 /* Runs on IST stack */ 232 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 233 { 234 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 235 12, SIGBUS) == NOTIFY_STOP) 236 return; 237 preempt_conditional_sti(regs); 238 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 239 preempt_conditional_cli(regs); 240 } 241 242 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 243 { 244 static const char str[] = "double fault"; 245 struct task_struct *tsk = current; 246 247 /* Return not checked because double check cannot be ignored */ 248 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 249 250 tsk->thread.error_code = error_code; 251 tsk->thread.trap_no = 8; 252 253 /* 254 * This is always a kernel trap and never fixable (and thus must 255 * never return). 256 */ 257 for (;;) 258 die(str, regs, error_code); 259 } 260 #endif 261 262 dotraplinkage void __kprobes 263 do_general_protection(struct pt_regs *regs, long error_code) 264 { 265 struct task_struct *tsk; 266 267 conditional_sti(regs); 268 269 #ifdef CONFIG_X86_32 270 if (regs->flags & X86_VM_MASK) 271 goto gp_in_vm86; 272 #endif 273 274 tsk = current; 275 if (!user_mode(regs)) 276 goto gp_in_kernel; 277 278 tsk->thread.error_code = error_code; 279 tsk->thread.trap_no = 13; 280 281 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 282 printk_ratelimit()) { 283 printk(KERN_INFO 284 "%s[%d] general protection ip:%lx sp:%lx error:%lx", 285 tsk->comm, task_pid_nr(tsk), 286 regs->ip, regs->sp, error_code); 287 print_vma_addr(" in ", regs->ip); 288 printk("\n"); 289 } 290 291 force_sig(SIGSEGV, tsk); 292 return; 293 294 #ifdef CONFIG_X86_32 295 gp_in_vm86: 296 local_irq_enable(); 297 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 298 return; 299 #endif 300 301 gp_in_kernel: 302 if (fixup_exception(regs)) 303 return; 304 305 tsk->thread.error_code = error_code; 306 tsk->thread.trap_no = 13; 307 if (notify_die(DIE_GPF, "general protection fault", regs, 308 error_code, 13, SIGSEGV) == NOTIFY_STOP) 309 return; 310 die("general protection fault", regs, error_code); 311 } 312 313 static notrace __kprobes void 314 mem_parity_error(unsigned char reason, struct pt_regs *regs) 315 { 316 printk(KERN_EMERG 317 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 318 reason, smp_processor_id()); 319 320 printk(KERN_EMERG 321 "You have some hardware problem, likely on the PCI bus.\n"); 322 323 #if defined(CONFIG_EDAC) 324 if (edac_handler_set()) { 325 edac_atomic_assert_error(); 326 return; 327 } 328 #endif 329 330 if (panic_on_unrecovered_nmi) 331 panic("NMI: Not continuing"); 332 333 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 334 335 /* Clear and disable the memory parity error line. */ 336 reason = (reason & 0xf) | 4; 337 outb(reason, 0x61); 338 } 339 340 static notrace __kprobes void 341 io_check_error(unsigned char reason, struct pt_regs *regs) 342 { 343 unsigned long i; 344 345 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); 346 show_registers(regs); 347 348 if (panic_on_io_nmi) 349 panic("NMI IOCK error: Not continuing"); 350 351 /* Re-enable the IOCK line, wait for a few seconds */ 352 reason = (reason & 0xf) | 8; 353 outb(reason, 0x61); 354 355 i = 2000; 356 while (--i) 357 udelay(1000); 358 359 reason &= ~8; 360 outb(reason, 0x61); 361 } 362 363 static notrace __kprobes void 364 unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 365 { 366 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == 367 NOTIFY_STOP) 368 return; 369 #ifdef CONFIG_MCA 370 /* 371 * Might actually be able to figure out what the guilty party 372 * is: 373 */ 374 if (MCA_bus) { 375 mca_handle_nmi(); 376 return; 377 } 378 #endif 379 printk(KERN_EMERG 380 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 381 reason, smp_processor_id()); 382 383 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); 384 if (panic_on_unrecovered_nmi) 385 panic("NMI: Not continuing"); 386 387 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 388 } 389 390 static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 391 { 392 unsigned char reason = 0; 393 int cpu; 394 395 cpu = smp_processor_id(); 396 397 /* Only the BSP gets external NMIs from the system. */ 398 if (!cpu) 399 reason = get_nmi_reason(); 400 401 if (!(reason & 0xc0)) { 402 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) 403 == NOTIFY_STOP) 404 return; 405 #ifdef CONFIG_X86_LOCAL_APIC 406 /* 407 * Ok, so this is none of the documented NMI sources, 408 * so it must be the NMI watchdog. 409 */ 410 if (nmi_watchdog_tick(regs, reason)) 411 return; 412 if (!do_nmi_callback(regs, cpu)) 413 unknown_nmi_error(reason, regs); 414 #else 415 unknown_nmi_error(reason, regs); 416 #endif 417 418 return; 419 } 420 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 421 return; 422 423 /* AK: following checks seem to be broken on modern chipsets. FIXME */ 424 if (reason & 0x80) 425 mem_parity_error(reason, regs); 426 if (reason & 0x40) 427 io_check_error(reason, regs); 428 #ifdef CONFIG_X86_32 429 /* 430 * Reassert NMI in case it became active meanwhile 431 * as it's edge-triggered: 432 */ 433 reassert_nmi(); 434 #endif 435 } 436 437 dotraplinkage notrace __kprobes void 438 do_nmi(struct pt_regs *regs, long error_code) 439 { 440 nmi_enter(); 441 442 inc_irq_stat(__nmi_count); 443 444 if (!ignore_nmis) 445 default_do_nmi(regs); 446 447 nmi_exit(); 448 } 449 450 void stop_nmi(void) 451 { 452 acpi_nmi_disable(); 453 ignore_nmis++; 454 } 455 456 void restart_nmi(void) 457 { 458 ignore_nmis--; 459 acpi_nmi_enable(); 460 } 461 462 /* May run on IST stack. */ 463 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 464 { 465 #ifdef CONFIG_KPROBES 466 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 467 == NOTIFY_STOP) 468 return; 469 #else 470 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP) 471 == NOTIFY_STOP) 472 return; 473 #endif 474 475 preempt_conditional_sti(regs); 476 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 477 preempt_conditional_cli(regs); 478 } 479 480 #ifdef CONFIG_X86_64 481 /* 482 * Help handler running on IST stack to switch back to user stack 483 * for scheduling or signal handling. The actual stack switch is done in 484 * entry.S 485 */ 486 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 487 { 488 struct pt_regs *regs = eregs; 489 /* Did already sync */ 490 if (eregs == (struct pt_regs *)eregs->sp) 491 ; 492 /* Exception from user space */ 493 else if (user_mode(eregs)) 494 regs = task_pt_regs(current); 495 /* 496 * Exception from kernel and interrupts are enabled. Move to 497 * kernel process stack. 498 */ 499 else if (eregs->flags & X86_EFLAGS_IF) 500 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 501 if (eregs != regs) 502 *regs = *eregs; 503 return regs; 504 } 505 #endif 506 507 /* 508 * Our handling of the processor debug registers is non-trivial. 509 * We do not clear them on entry and exit from the kernel. Therefore 510 * it is possible to get a watchpoint trap here from inside the kernel. 511 * However, the code in ./ptrace.c has ensured that the user can 512 * only set watchpoints on userspace addresses. Therefore the in-kernel 513 * watchpoint trap can only occur in code which is reading/writing 514 * from user space. Such code must not hold kernel locks (since it 515 * can equally take a page fault), therefore it is safe to call 516 * force_sig_info even though that claims and releases locks. 517 * 518 * Code in ./signal.c ensures that the debug control register 519 * is restored before we deliver any signal, and therefore that 520 * user code runs with the correct debug control register even though 521 * we clear it here. 522 * 523 * Being careful here means that we don't have to be as careful in a 524 * lot of more complicated places (task switching can be a bit lazy 525 * about restoring all the debug state, and ptrace doesn't have to 526 * find every occurrence of the TF bit that could be saved away even 527 * by user code) 528 * 529 * May run on IST stack. 530 */ 531 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 532 { 533 struct task_struct *tsk = current; 534 unsigned long condition; 535 int si_code; 536 537 get_debugreg(condition, 6); 538 539 /* Catch kmemcheck conditions first of all! */ 540 if (condition & DR_STEP && kmemcheck_trap(regs)) 541 return; 542 543 /* 544 * The processor cleared BTF, so don't mark that we need it set. 545 */ 546 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); 547 tsk->thread.debugctlmsr = 0; 548 549 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, 550 SIGTRAP) == NOTIFY_STOP) 551 return; 552 553 /* It's safe to allow irq's after DR6 has been saved */ 554 preempt_conditional_sti(regs); 555 556 /* Mask out spurious debug traps due to lazy DR7 setting */ 557 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { 558 if (!tsk->thread.debugreg7) 559 goto clear_dr7; 560 } 561 562 #ifdef CONFIG_X86_32 563 if (regs->flags & X86_VM_MASK) 564 goto debug_vm86; 565 #endif 566 567 /* Save debug status register where ptrace can see it */ 568 tsk->thread.debugreg6 = condition; 569 570 /* 571 * Single-stepping through TF: make sure we ignore any events in 572 * kernel space (but re-enable TF when returning to user mode). 573 */ 574 if (condition & DR_STEP) { 575 if (!user_mode(regs)) 576 goto clear_TF_reenable; 577 } 578 579 si_code = get_si_code(condition); 580 /* Ok, finally something we can handle */ 581 send_sigtrap(tsk, regs, error_code, si_code); 582 583 /* 584 * Disable additional traps. They'll be re-enabled when 585 * the signal is delivered. 586 */ 587 clear_dr7: 588 set_debugreg(0, 7); 589 preempt_conditional_cli(regs); 590 return; 591 592 #ifdef CONFIG_X86_32 593 debug_vm86: 594 /* reenable preemption: handle_vm86_trap() might sleep */ 595 dec_preempt_count(); 596 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); 597 conditional_cli(regs); 598 return; 599 #endif 600 601 clear_TF_reenable: 602 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 603 regs->flags &= ~X86_EFLAGS_TF; 604 preempt_conditional_cli(regs); 605 return; 606 } 607 608 #ifdef CONFIG_X86_64 609 static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr) 610 { 611 if (fixup_exception(regs)) 612 return 1; 613 614 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE); 615 /* Illegal floating point operation in the kernel */ 616 current->thread.trap_no = trapnr; 617 die(str, regs, 0); 618 return 0; 619 } 620 #endif 621 622 /* 623 * Note that we play around with the 'TS' bit in an attempt to get 624 * the correct behaviour even in the presence of the asynchronous 625 * IRQ13 behaviour 626 */ 627 void math_error(void __user *ip) 628 { 629 struct task_struct *task; 630 siginfo_t info; 631 unsigned short cwd, swd, err; 632 633 /* 634 * Save the info for the exception handler and clear the error. 635 */ 636 task = current; 637 save_init_fpu(task); 638 task->thread.trap_no = 16; 639 task->thread.error_code = 0; 640 info.si_signo = SIGFPE; 641 info.si_errno = 0; 642 info.si_addr = ip; 643 /* 644 * (~cwd & swd) will mask out exceptions that are not set to unmasked 645 * status. 0x3f is the exception bits in these regs, 0x200 is the 646 * C1 reg you need in case of a stack fault, 0x040 is the stack 647 * fault bit. We should only be taking one exception at a time, 648 * so if this combination doesn't produce any single exception, 649 * then we have a bad program that isn't synchronizing its FPU usage 650 * and it will suffer the consequences since we won't be able to 651 * fully reproduce the context of the exception 652 */ 653 cwd = get_fpu_cwd(task); 654 swd = get_fpu_swd(task); 655 656 err = swd & ~cwd; 657 658 if (err & 0x001) { /* Invalid op */ 659 /* 660 * swd & 0x240 == 0x040: Stack Underflow 661 * swd & 0x240 == 0x240: Stack Overflow 662 * User must clear the SF bit (0x40) if set 663 */ 664 info.si_code = FPE_FLTINV; 665 } else if (err & 0x004) { /* Divide by Zero */ 666 info.si_code = FPE_FLTDIV; 667 } else if (err & 0x008) { /* Overflow */ 668 info.si_code = FPE_FLTOVF; 669 } else if (err & 0x012) { /* Denormal, Underflow */ 670 info.si_code = FPE_FLTUND; 671 } else if (err & 0x020) { /* Precision */ 672 info.si_code = FPE_FLTRES; 673 } else { 674 /* 675 * If we're using IRQ 13, or supposedly even some trap 16 676 * implementations, it's possible we get a spurious trap... 677 */ 678 return; /* Spurious trap, no error */ 679 } 680 force_sig_info(SIGFPE, &info, task); 681 } 682 683 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 684 { 685 conditional_sti(regs); 686 687 #ifdef CONFIG_X86_32 688 ignore_fpu_irq = 1; 689 #else 690 if (!user_mode(regs) && 691 kernel_math_error(regs, "kernel x87 math error", 16)) 692 return; 693 #endif 694 695 math_error((void __user *)regs->ip); 696 } 697 698 static void simd_math_error(void __user *ip) 699 { 700 struct task_struct *task; 701 siginfo_t info; 702 unsigned short mxcsr; 703 704 /* 705 * Save the info for the exception handler and clear the error. 706 */ 707 task = current; 708 save_init_fpu(task); 709 task->thread.trap_no = 19; 710 task->thread.error_code = 0; 711 info.si_signo = SIGFPE; 712 info.si_errno = 0; 713 info.si_code = __SI_FAULT; 714 info.si_addr = ip; 715 /* 716 * The SIMD FPU exceptions are handled a little differently, as there 717 * is only a single status/control register. Thus, to determine which 718 * unmasked exception was caught we must mask the exception mask bits 719 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 720 */ 721 mxcsr = get_fpu_mxcsr(task); 722 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { 723 case 0x000: 724 default: 725 break; 726 case 0x001: /* Invalid Op */ 727 info.si_code = FPE_FLTINV; 728 break; 729 case 0x002: /* Denormalize */ 730 case 0x010: /* Underflow */ 731 info.si_code = FPE_FLTUND; 732 break; 733 case 0x004: /* Zero Divide */ 734 info.si_code = FPE_FLTDIV; 735 break; 736 case 0x008: /* Overflow */ 737 info.si_code = FPE_FLTOVF; 738 break; 739 case 0x020: /* Precision */ 740 info.si_code = FPE_FLTRES; 741 break; 742 } 743 force_sig_info(SIGFPE, &info, task); 744 } 745 746 dotraplinkage void 747 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 748 { 749 conditional_sti(regs); 750 751 #ifdef CONFIG_X86_32 752 if (cpu_has_xmm) { 753 /* Handle SIMD FPU exceptions on PIII+ processors. */ 754 ignore_fpu_irq = 1; 755 simd_math_error((void __user *)regs->ip); 756 return; 757 } 758 /* 759 * Handle strange cache flush from user space exception 760 * in all other cases. This is undocumented behaviour. 761 */ 762 if (regs->flags & X86_VM_MASK) { 763 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); 764 return; 765 } 766 current->thread.trap_no = 19; 767 current->thread.error_code = error_code; 768 die_if_kernel("cache flush denied", regs, error_code); 769 force_sig(SIGSEGV, current); 770 #else 771 if (!user_mode(regs) && 772 kernel_math_error(regs, "kernel simd math error", 19)) 773 return; 774 simd_math_error((void __user *)regs->ip); 775 #endif 776 } 777 778 dotraplinkage void 779 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 780 { 781 conditional_sti(regs); 782 #if 0 783 /* No need to warn about this any longer. */ 784 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 785 #endif 786 } 787 788 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 789 { 790 } 791 792 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 793 { 794 } 795 796 /* 797 * __math_state_restore assumes that cr0.TS is already clear and the 798 * fpu state is all ready for use. Used during context switch. 799 */ 800 void __math_state_restore(void) 801 { 802 struct thread_info *thread = current_thread_info(); 803 struct task_struct *tsk = thread->task; 804 805 /* 806 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 807 */ 808 if (unlikely(restore_fpu_checking(tsk))) { 809 stts(); 810 force_sig(SIGSEGV, tsk); 811 return; 812 } 813 814 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 815 tsk->fpu_counter++; 816 } 817 818 /* 819 * 'math_state_restore()' saves the current math information in the 820 * old math state array, and gets the new ones from the current task 821 * 822 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 823 * Don't touch unless you *really* know how it works. 824 * 825 * Must be called with kernel preemption disabled (in this case, 826 * local interrupts are disabled at the call-site in entry.S). 827 */ 828 asmlinkage void math_state_restore(void) 829 { 830 struct thread_info *thread = current_thread_info(); 831 struct task_struct *tsk = thread->task; 832 833 if (!tsk_used_math(tsk)) { 834 local_irq_enable(); 835 /* 836 * does a slab alloc which can sleep 837 */ 838 if (init_fpu(tsk)) { 839 /* 840 * ran out of memory! 841 */ 842 do_group_exit(SIGKILL); 843 return; 844 } 845 local_irq_disable(); 846 } 847 848 clts(); /* Allow maths ops (or we recurse) */ 849 850 __math_state_restore(); 851 } 852 EXPORT_SYMBOL_GPL(math_state_restore); 853 854 #ifndef CONFIG_MATH_EMULATION 855 void math_emulate(struct math_emu_info *info) 856 { 857 printk(KERN_EMERG 858 "math-emulation not enabled and no coprocessor found.\n"); 859 printk(KERN_EMERG "killing %s.\n", current->comm); 860 force_sig(SIGFPE, current); 861 schedule(); 862 } 863 #endif /* CONFIG_MATH_EMULATION */ 864 865 dotraplinkage void __kprobes 866 do_device_not_available(struct pt_regs *regs, long error_code) 867 { 868 #ifdef CONFIG_X86_32 869 if (read_cr0() & X86_CR0_EM) { 870 struct math_emu_info info = { }; 871 872 conditional_sti(regs); 873 874 info.regs = regs; 875 math_emulate(&info); 876 } else { 877 math_state_restore(); /* interrupts still off */ 878 conditional_sti(regs); 879 } 880 #else 881 math_state_restore(); 882 #endif 883 } 884 885 #ifdef CONFIG_X86_32 886 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 887 { 888 siginfo_t info; 889 local_irq_enable(); 890 891 info.si_signo = SIGILL; 892 info.si_errno = 0; 893 info.si_code = ILL_BADSTK; 894 info.si_addr = NULL; 895 if (notify_die(DIE_TRAP, "iret exception", 896 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 897 return; 898 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 899 } 900 #endif 901 902 void __init trap_init(void) 903 { 904 int i; 905 906 #ifdef CONFIG_EISA 907 void __iomem *p = early_ioremap(0x0FFFD9, 4); 908 909 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 910 EISA_bus = 1; 911 early_iounmap(p, 4); 912 #endif 913 914 set_intr_gate(0, ÷_error); 915 set_intr_gate_ist(1, &debug, DEBUG_STACK); 916 set_intr_gate_ist(2, &nmi, NMI_STACK); 917 /* int3 can be called from all */ 918 set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 919 /* int4 can be called from all */ 920 set_system_intr_gate(4, &overflow); 921 set_intr_gate(5, &bounds); 922 set_intr_gate(6, &invalid_op); 923 set_intr_gate(7, &device_not_available); 924 #ifdef CONFIG_X86_32 925 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 926 #else 927 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 928 #endif 929 set_intr_gate(9, &coprocessor_segment_overrun); 930 set_intr_gate(10, &invalid_TSS); 931 set_intr_gate(11, &segment_not_present); 932 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 933 set_intr_gate(13, &general_protection); 934 set_intr_gate(14, &page_fault); 935 set_intr_gate(15, &spurious_interrupt_bug); 936 set_intr_gate(16, &coprocessor_error); 937 set_intr_gate(17, &alignment_check); 938 #ifdef CONFIG_X86_MCE 939 set_intr_gate_ist(18, &machine_check, MCE_STACK); 940 #endif 941 set_intr_gate(19, &simd_coprocessor_error); 942 943 /* Reserve all the builtin and the syscall vector: */ 944 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 945 set_bit(i, used_vectors); 946 947 #ifdef CONFIG_IA32_EMULATION 948 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 949 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 950 #endif 951 952 #ifdef CONFIG_X86_32 953 if (cpu_has_fxsr) { 954 printk(KERN_INFO "Enabling fast FPU save and restore... "); 955 set_in_cr4(X86_CR4_OSFXSR); 956 printk("done.\n"); 957 } 958 if (cpu_has_xmm) { 959 printk(KERN_INFO 960 "Enabling unmasked SIMD FPU exception support... "); 961 set_in_cr4(X86_CR4_OSXMMEXCPT); 962 printk("done.\n"); 963 } 964 965 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 966 set_bit(SYSCALL_VECTOR, used_vectors); 967 #endif 968 969 /* 970 * Should be a barrier for any external CPU state: 971 */ 972 cpu_init(); 973 974 x86_init.irqs.trap_init(); 975 } 976