1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 #include <linux/interrupt.h> 13 #include <linux/kallsyms.h> 14 #include <linux/spinlock.h> 15 #include <linux/kprobes.h> 16 #include <linux/uaccess.h> 17 #include <linux/kdebug.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/ptrace.h> 21 #include <linux/string.h> 22 #include <linux/delay.h> 23 #include <linux/errno.h> 24 #include <linux/kexec.h> 25 #include <linux/sched.h> 26 #include <linux/timer.h> 27 #include <linux/init.h> 28 #include <linux/bug.h> 29 #include <linux/nmi.h> 30 #include <linux/mm.h> 31 #include <linux/smp.h> 32 #include <linux/io.h> 33 34 #ifdef CONFIG_EISA 35 #include <linux/ioport.h> 36 #include <linux/eisa.h> 37 #endif 38 39 #ifdef CONFIG_MCA 40 #include <linux/mca.h> 41 #endif 42 43 #if defined(CONFIG_EDAC) 44 #include <linux/edac.h> 45 #endif 46 47 #include <asm/kmemcheck.h> 48 #include <asm/stacktrace.h> 49 #include <asm/processor.h> 50 #include <asm/debugreg.h> 51 #include <asm/atomic.h> 52 #include <asm/system.h> 53 #include <asm/traps.h> 54 #include <asm/desc.h> 55 #include <asm/i387.h> 56 #include <asm/mce.h> 57 58 #include <asm/mach_traps.h> 59 60 #ifdef CONFIG_X86_64 61 #include <asm/x86_init.h> 62 #include <asm/pgalloc.h> 63 #include <asm/proto.h> 64 #else 65 #include <asm/processor-flags.h> 66 #include <asm/setup.h> 67 68 asmlinkage int system_call(void); 69 70 /* Do we ignore FPU interrupts ? */ 71 char ignore_fpu_irq; 72 73 /* 74 * The IDT has to be page-aligned to simplify the Pentium 75 * F0 0F bug workaround. 76 */ 77 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; 78 #endif 79 80 DECLARE_BITMAP(used_vectors, NR_VECTORS); 81 EXPORT_SYMBOL_GPL(used_vectors); 82 83 static int ignore_nmis; 84 85 static inline void conditional_sti(struct pt_regs *regs) 86 { 87 if (regs->flags & X86_EFLAGS_IF) 88 local_irq_enable(); 89 } 90 91 static inline void preempt_conditional_sti(struct pt_regs *regs) 92 { 93 inc_preempt_count(); 94 if (regs->flags & X86_EFLAGS_IF) 95 local_irq_enable(); 96 } 97 98 static inline void conditional_cli(struct pt_regs *regs) 99 { 100 if (regs->flags & X86_EFLAGS_IF) 101 local_irq_disable(); 102 } 103 104 static inline void preempt_conditional_cli(struct pt_regs *regs) 105 { 106 if (regs->flags & X86_EFLAGS_IF) 107 local_irq_disable(); 108 dec_preempt_count(); 109 } 110 111 static void __kprobes 112 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 113 long error_code, siginfo_t *info) 114 { 115 struct task_struct *tsk = current; 116 117 #ifdef CONFIG_X86_32 118 if (regs->flags & X86_VM_MASK) { 119 /* 120 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 121 * On nmi (interrupt 2), do_trap should not be called. 122 */ 123 if (trapnr < 6) 124 goto vm86_trap; 125 goto trap_signal; 126 } 127 #endif 128 129 if (!user_mode(regs)) 130 goto kernel_trap; 131 132 #ifdef CONFIG_X86_32 133 trap_signal: 134 #endif 135 /* 136 * We want error_code and trap_no set for userspace faults and 137 * kernelspace faults which result in die(), but not 138 * kernelspace faults which are fixed up. die() gives the 139 * process no chance to handle the signal and notice the 140 * kernel fault information, so that won't result in polluting 141 * the information about previously queued, but not yet 142 * delivered, faults. See also do_general_protection below. 143 */ 144 tsk->thread.error_code = error_code; 145 tsk->thread.trap_no = trapnr; 146 147 #ifdef CONFIG_X86_64 148 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 149 printk_ratelimit()) { 150 printk(KERN_INFO 151 "%s[%d] trap %s ip:%lx sp:%lx error:%lx", 152 tsk->comm, tsk->pid, str, 153 regs->ip, regs->sp, error_code); 154 print_vma_addr(" in ", regs->ip); 155 printk("\n"); 156 } 157 #endif 158 159 if (info) 160 force_sig_info(signr, info, tsk); 161 else 162 force_sig(signr, tsk); 163 return; 164 165 kernel_trap: 166 if (!fixup_exception(regs)) { 167 tsk->thread.error_code = error_code; 168 tsk->thread.trap_no = trapnr; 169 die(str, regs, error_code); 170 } 171 return; 172 173 #ifdef CONFIG_X86_32 174 vm86_trap: 175 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 176 error_code, trapnr)) 177 goto trap_signal; 178 return; 179 #endif 180 } 181 182 #define DO_ERROR(trapnr, signr, str, name) \ 183 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 184 { \ 185 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 186 == NOTIFY_STOP) \ 187 return; \ 188 conditional_sti(regs); \ 189 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 190 } 191 192 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 193 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 194 { \ 195 siginfo_t info; \ 196 info.si_signo = signr; \ 197 info.si_errno = 0; \ 198 info.si_code = sicode; \ 199 info.si_addr = (void __user *)siaddr; \ 200 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 201 == NOTIFY_STOP) \ 202 return; \ 203 conditional_sti(regs); \ 204 do_trap(trapnr, signr, str, regs, error_code, &info); \ 205 } 206 207 DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 208 DO_ERROR(4, SIGSEGV, "overflow", overflow) 209 DO_ERROR(5, SIGSEGV, "bounds", bounds) 210 DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 211 DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 212 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 213 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 214 #ifdef CONFIG_X86_32 215 DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 216 #endif 217 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 218 219 #ifdef CONFIG_X86_64 220 /* Runs on IST stack */ 221 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 222 { 223 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 224 12, SIGBUS) == NOTIFY_STOP) 225 return; 226 preempt_conditional_sti(regs); 227 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 228 preempt_conditional_cli(regs); 229 } 230 231 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 232 { 233 static const char str[] = "double fault"; 234 struct task_struct *tsk = current; 235 236 /* Return not checked because double check cannot be ignored */ 237 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 238 239 tsk->thread.error_code = error_code; 240 tsk->thread.trap_no = 8; 241 242 /* 243 * This is always a kernel trap and never fixable (and thus must 244 * never return). 245 */ 246 for (;;) 247 die(str, regs, error_code); 248 } 249 #endif 250 251 dotraplinkage void __kprobes 252 do_general_protection(struct pt_regs *regs, long error_code) 253 { 254 struct task_struct *tsk; 255 256 conditional_sti(regs); 257 258 #ifdef CONFIG_X86_32 259 if (regs->flags & X86_VM_MASK) 260 goto gp_in_vm86; 261 #endif 262 263 tsk = current; 264 if (!user_mode(regs)) 265 goto gp_in_kernel; 266 267 tsk->thread.error_code = error_code; 268 tsk->thread.trap_no = 13; 269 270 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 271 printk_ratelimit()) { 272 printk(KERN_INFO 273 "%s[%d] general protection ip:%lx sp:%lx error:%lx", 274 tsk->comm, task_pid_nr(tsk), 275 regs->ip, regs->sp, error_code); 276 print_vma_addr(" in ", regs->ip); 277 printk("\n"); 278 } 279 280 force_sig(SIGSEGV, tsk); 281 return; 282 283 #ifdef CONFIG_X86_32 284 gp_in_vm86: 285 local_irq_enable(); 286 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 287 return; 288 #endif 289 290 gp_in_kernel: 291 if (fixup_exception(regs)) 292 return; 293 294 tsk->thread.error_code = error_code; 295 tsk->thread.trap_no = 13; 296 if (notify_die(DIE_GPF, "general protection fault", regs, 297 error_code, 13, SIGSEGV) == NOTIFY_STOP) 298 return; 299 die("general protection fault", regs, error_code); 300 } 301 302 static notrace __kprobes void 303 mem_parity_error(unsigned char reason, struct pt_regs *regs) 304 { 305 printk(KERN_EMERG 306 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 307 reason, smp_processor_id()); 308 309 printk(KERN_EMERG 310 "You have some hardware problem, likely on the PCI bus.\n"); 311 312 #if defined(CONFIG_EDAC) 313 if (edac_handler_set()) { 314 edac_atomic_assert_error(); 315 return; 316 } 317 #endif 318 319 if (panic_on_unrecovered_nmi) 320 panic("NMI: Not continuing"); 321 322 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 323 324 /* Clear and disable the memory parity error line. */ 325 reason = (reason & 0xf) | 4; 326 outb(reason, 0x61); 327 } 328 329 static notrace __kprobes void 330 io_check_error(unsigned char reason, struct pt_regs *regs) 331 { 332 unsigned long i; 333 334 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); 335 show_registers(regs); 336 337 if (panic_on_io_nmi) 338 panic("NMI IOCK error: Not continuing"); 339 340 /* Re-enable the IOCK line, wait for a few seconds */ 341 reason = (reason & 0xf) | 8; 342 outb(reason, 0x61); 343 344 i = 2000; 345 while (--i) 346 udelay(1000); 347 348 reason &= ~8; 349 outb(reason, 0x61); 350 } 351 352 static notrace __kprobes void 353 unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 354 { 355 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == 356 NOTIFY_STOP) 357 return; 358 #ifdef CONFIG_MCA 359 /* 360 * Might actually be able to figure out what the guilty party 361 * is: 362 */ 363 if (MCA_bus) { 364 mca_handle_nmi(); 365 return; 366 } 367 #endif 368 printk(KERN_EMERG 369 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 370 reason, smp_processor_id()); 371 372 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); 373 if (panic_on_unrecovered_nmi) 374 panic("NMI: Not continuing"); 375 376 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 377 } 378 379 static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 380 { 381 unsigned char reason = 0; 382 int cpu; 383 384 cpu = smp_processor_id(); 385 386 /* Only the BSP gets external NMIs from the system. */ 387 if (!cpu) 388 reason = get_nmi_reason(); 389 390 if (!(reason & 0xc0)) { 391 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) 392 == NOTIFY_STOP) 393 return; 394 #ifdef CONFIG_X86_LOCAL_APIC 395 /* 396 * Ok, so this is none of the documented NMI sources, 397 * so it must be the NMI watchdog. 398 */ 399 if (nmi_watchdog_tick(regs, reason)) 400 return; 401 if (!do_nmi_callback(regs, cpu)) 402 unknown_nmi_error(reason, regs); 403 #else 404 unknown_nmi_error(reason, regs); 405 #endif 406 407 return; 408 } 409 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 410 return; 411 412 /* AK: following checks seem to be broken on modern chipsets. FIXME */ 413 if (reason & 0x80) 414 mem_parity_error(reason, regs); 415 if (reason & 0x40) 416 io_check_error(reason, regs); 417 #ifdef CONFIG_X86_32 418 /* 419 * Reassert NMI in case it became active meanwhile 420 * as it's edge-triggered: 421 */ 422 reassert_nmi(); 423 #endif 424 } 425 426 dotraplinkage notrace __kprobes void 427 do_nmi(struct pt_regs *regs, long error_code) 428 { 429 nmi_enter(); 430 431 inc_irq_stat(__nmi_count); 432 433 if (!ignore_nmis) 434 default_do_nmi(regs); 435 436 nmi_exit(); 437 } 438 439 void stop_nmi(void) 440 { 441 acpi_nmi_disable(); 442 ignore_nmis++; 443 } 444 445 void restart_nmi(void) 446 { 447 ignore_nmis--; 448 acpi_nmi_enable(); 449 } 450 451 /* May run on IST stack. */ 452 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 453 { 454 #ifdef CONFIG_KPROBES 455 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 456 == NOTIFY_STOP) 457 return; 458 #else 459 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP) 460 == NOTIFY_STOP) 461 return; 462 #endif 463 464 preempt_conditional_sti(regs); 465 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 466 preempt_conditional_cli(regs); 467 } 468 469 #ifdef CONFIG_X86_64 470 /* 471 * Help handler running on IST stack to switch back to user stack 472 * for scheduling or signal handling. The actual stack switch is done in 473 * entry.S 474 */ 475 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 476 { 477 struct pt_regs *regs = eregs; 478 /* Did already sync */ 479 if (eregs == (struct pt_regs *)eregs->sp) 480 ; 481 /* Exception from user space */ 482 else if (user_mode(eregs)) 483 regs = task_pt_regs(current); 484 /* 485 * Exception from kernel and interrupts are enabled. Move to 486 * kernel process stack. 487 */ 488 else if (eregs->flags & X86_EFLAGS_IF) 489 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 490 if (eregs != regs) 491 *regs = *eregs; 492 return regs; 493 } 494 #endif 495 496 /* 497 * Our handling of the processor debug registers is non-trivial. 498 * We do not clear them on entry and exit from the kernel. Therefore 499 * it is possible to get a watchpoint trap here from inside the kernel. 500 * However, the code in ./ptrace.c has ensured that the user can 501 * only set watchpoints on userspace addresses. Therefore the in-kernel 502 * watchpoint trap can only occur in code which is reading/writing 503 * from user space. Such code must not hold kernel locks (since it 504 * can equally take a page fault), therefore it is safe to call 505 * force_sig_info even though that claims and releases locks. 506 * 507 * Code in ./signal.c ensures that the debug control register 508 * is restored before we deliver any signal, and therefore that 509 * user code runs with the correct debug control register even though 510 * we clear it here. 511 * 512 * Being careful here means that we don't have to be as careful in a 513 * lot of more complicated places (task switching can be a bit lazy 514 * about restoring all the debug state, and ptrace doesn't have to 515 * find every occurrence of the TF bit that could be saved away even 516 * by user code) 517 * 518 * May run on IST stack. 519 */ 520 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 521 { 522 struct task_struct *tsk = current; 523 unsigned long dr6; 524 int si_code; 525 526 get_debugreg(dr6, 6); 527 528 /* Filter out all the reserved bits which are preset to 1 */ 529 dr6 &= ~DR6_RESERVED; 530 531 /* Catch kmemcheck conditions first of all! */ 532 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 533 return; 534 535 /* DR6 may or may not be cleared by the CPU */ 536 set_debugreg(0, 6); 537 538 /* 539 * The processor cleared BTF, so don't mark that we need it set. 540 */ 541 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 542 543 /* Store the virtualized DR6 value */ 544 tsk->thread.debugreg6 = dr6; 545 546 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, 547 SIGTRAP) == NOTIFY_STOP) 548 return; 549 550 /* It's safe to allow irq's after DR6 has been saved */ 551 preempt_conditional_sti(regs); 552 553 if (regs->flags & X86_VM_MASK) { 554 handle_vm86_trap((struct kernel_vm86_regs *) regs, 555 error_code, 1); 556 return; 557 } 558 559 /* 560 * Single-stepping through system calls: ignore any exceptions in 561 * kernel space, but re-enable TF when returning to user mode. 562 * 563 * We already checked v86 mode above, so we can check for kernel mode 564 * by just checking the CPL of CS. 565 */ 566 if ((dr6 & DR_STEP) && !user_mode(regs)) { 567 tsk->thread.debugreg6 &= ~DR_STEP; 568 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 569 regs->flags &= ~X86_EFLAGS_TF; 570 } 571 si_code = get_si_code(tsk->thread.debugreg6); 572 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) 573 send_sigtrap(tsk, regs, error_code, si_code); 574 preempt_conditional_cli(regs); 575 576 return; 577 } 578 579 /* 580 * Note that we play around with the 'TS' bit in an attempt to get 581 * the correct behaviour even in the presence of the asynchronous 582 * IRQ13 behaviour 583 */ 584 void math_error(struct pt_regs *regs, int error_code, int trapnr) 585 { 586 struct task_struct *task = current; 587 siginfo_t info; 588 unsigned short err; 589 char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; 590 591 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 592 return; 593 conditional_sti(regs); 594 595 if (!user_mode_vm(regs)) 596 { 597 if (!fixup_exception(regs)) { 598 task->thread.error_code = error_code; 599 task->thread.trap_no = trapnr; 600 die(str, regs, error_code); 601 } 602 return; 603 } 604 605 /* 606 * Save the info for the exception handler and clear the error. 607 */ 608 save_init_fpu(task); 609 task->thread.trap_no = trapnr; 610 task->thread.error_code = error_code; 611 info.si_signo = SIGFPE; 612 info.si_errno = 0; 613 info.si_addr = (void __user *)regs->ip; 614 if (trapnr == 16) { 615 unsigned short cwd, swd; 616 /* 617 * (~cwd & swd) will mask out exceptions that are not set to unmasked 618 * status. 0x3f is the exception bits in these regs, 0x200 is the 619 * C1 reg you need in case of a stack fault, 0x040 is the stack 620 * fault bit. We should only be taking one exception at a time, 621 * so if this combination doesn't produce any single exception, 622 * then we have a bad program that isn't synchronizing its FPU usage 623 * and it will suffer the consequences since we won't be able to 624 * fully reproduce the context of the exception 625 */ 626 cwd = get_fpu_cwd(task); 627 swd = get_fpu_swd(task); 628 629 err = swd & ~cwd; 630 } else { 631 /* 632 * The SIMD FPU exceptions are handled a little differently, as there 633 * is only a single status/control register. Thus, to determine which 634 * unmasked exception was caught we must mask the exception mask bits 635 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 636 */ 637 unsigned short mxcsr = get_fpu_mxcsr(task); 638 err = ~(mxcsr >> 7) & mxcsr; 639 } 640 641 if (err & 0x001) { /* Invalid op */ 642 /* 643 * swd & 0x240 == 0x040: Stack Underflow 644 * swd & 0x240 == 0x240: Stack Overflow 645 * User must clear the SF bit (0x40) if set 646 */ 647 info.si_code = FPE_FLTINV; 648 } else if (err & 0x004) { /* Divide by Zero */ 649 info.si_code = FPE_FLTDIV; 650 } else if (err & 0x008) { /* Overflow */ 651 info.si_code = FPE_FLTOVF; 652 } else if (err & 0x012) { /* Denormal, Underflow */ 653 info.si_code = FPE_FLTUND; 654 } else if (err & 0x020) { /* Precision */ 655 info.si_code = FPE_FLTRES; 656 } else { 657 /* 658 * If we're using IRQ 13, or supposedly even some trap 16 659 * implementations, it's possible we get a spurious trap... 660 */ 661 return; /* Spurious trap, no error */ 662 } 663 force_sig_info(SIGFPE, &info, task); 664 } 665 666 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 667 { 668 #ifdef CONFIG_X86_32 669 ignore_fpu_irq = 1; 670 #endif 671 672 math_error(regs, error_code, 16); 673 } 674 675 dotraplinkage void 676 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 677 { 678 math_error(regs, error_code, 19); 679 } 680 681 dotraplinkage void 682 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 683 { 684 conditional_sti(regs); 685 #if 0 686 /* No need to warn about this any longer. */ 687 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 688 #endif 689 } 690 691 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 692 { 693 } 694 695 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 696 { 697 } 698 699 /* 700 * __math_state_restore assumes that cr0.TS is already clear and the 701 * fpu state is all ready for use. Used during context switch. 702 */ 703 void __math_state_restore(void) 704 { 705 struct thread_info *thread = current_thread_info(); 706 struct task_struct *tsk = thread->task; 707 708 /* 709 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 710 */ 711 if (unlikely(restore_fpu_checking(tsk))) { 712 stts(); 713 force_sig(SIGSEGV, tsk); 714 return; 715 } 716 717 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 718 tsk->fpu_counter++; 719 } 720 721 /* 722 * 'math_state_restore()' saves the current math information in the 723 * old math state array, and gets the new ones from the current task 724 * 725 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 726 * Don't touch unless you *really* know how it works. 727 * 728 * Must be called with kernel preemption disabled (in this case, 729 * local interrupts are disabled at the call-site in entry.S). 730 */ 731 asmlinkage void math_state_restore(void) 732 { 733 struct thread_info *thread = current_thread_info(); 734 struct task_struct *tsk = thread->task; 735 736 if (!tsk_used_math(tsk)) { 737 local_irq_enable(); 738 /* 739 * does a slab alloc which can sleep 740 */ 741 if (init_fpu(tsk)) { 742 /* 743 * ran out of memory! 744 */ 745 do_group_exit(SIGKILL); 746 return; 747 } 748 local_irq_disable(); 749 } 750 751 clts(); /* Allow maths ops (or we recurse) */ 752 753 __math_state_restore(); 754 } 755 EXPORT_SYMBOL_GPL(math_state_restore); 756 757 #ifndef CONFIG_MATH_EMULATION 758 void math_emulate(struct math_emu_info *info) 759 { 760 printk(KERN_EMERG 761 "math-emulation not enabled and no coprocessor found.\n"); 762 printk(KERN_EMERG "killing %s.\n", current->comm); 763 force_sig(SIGFPE, current); 764 schedule(); 765 } 766 #endif /* CONFIG_MATH_EMULATION */ 767 768 dotraplinkage void __kprobes 769 do_device_not_available(struct pt_regs *regs, long error_code) 770 { 771 #ifdef CONFIG_X86_32 772 if (read_cr0() & X86_CR0_EM) { 773 struct math_emu_info info = { }; 774 775 conditional_sti(regs); 776 777 info.regs = regs; 778 math_emulate(&info); 779 } else { 780 math_state_restore(); /* interrupts still off */ 781 conditional_sti(regs); 782 } 783 #else 784 math_state_restore(); 785 #endif 786 } 787 788 #ifdef CONFIG_X86_32 789 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 790 { 791 siginfo_t info; 792 local_irq_enable(); 793 794 info.si_signo = SIGILL; 795 info.si_errno = 0; 796 info.si_code = ILL_BADSTK; 797 info.si_addr = NULL; 798 if (notify_die(DIE_TRAP, "iret exception", 799 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 800 return; 801 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 802 } 803 #endif 804 805 void __init trap_init(void) 806 { 807 int i; 808 809 #ifdef CONFIG_EISA 810 void __iomem *p = early_ioremap(0x0FFFD9, 4); 811 812 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 813 EISA_bus = 1; 814 early_iounmap(p, 4); 815 #endif 816 817 set_intr_gate(0, ÷_error); 818 set_intr_gate_ist(1, &debug, DEBUG_STACK); 819 set_intr_gate_ist(2, &nmi, NMI_STACK); 820 /* int3 can be called from all */ 821 set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 822 /* int4 can be called from all */ 823 set_system_intr_gate(4, &overflow); 824 set_intr_gate(5, &bounds); 825 set_intr_gate(6, &invalid_op); 826 set_intr_gate(7, &device_not_available); 827 #ifdef CONFIG_X86_32 828 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 829 #else 830 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 831 #endif 832 set_intr_gate(9, &coprocessor_segment_overrun); 833 set_intr_gate(10, &invalid_TSS); 834 set_intr_gate(11, &segment_not_present); 835 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 836 set_intr_gate(13, &general_protection); 837 set_intr_gate(14, &page_fault); 838 set_intr_gate(15, &spurious_interrupt_bug); 839 set_intr_gate(16, &coprocessor_error); 840 set_intr_gate(17, &alignment_check); 841 #ifdef CONFIG_X86_MCE 842 set_intr_gate_ist(18, &machine_check, MCE_STACK); 843 #endif 844 set_intr_gate(19, &simd_coprocessor_error); 845 846 /* Reserve all the builtin and the syscall vector: */ 847 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 848 set_bit(i, used_vectors); 849 850 #ifdef CONFIG_IA32_EMULATION 851 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 852 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 853 #endif 854 855 #ifdef CONFIG_X86_32 856 if (cpu_has_fxsr) { 857 printk(KERN_INFO "Enabling fast FPU save and restore... "); 858 set_in_cr4(X86_CR4_OSFXSR); 859 printk("done.\n"); 860 } 861 if (cpu_has_xmm) { 862 printk(KERN_INFO 863 "Enabling unmasked SIMD FPU exception support... "); 864 set_in_cr4(X86_CR4_OSXMMEXCPT); 865 printk("done.\n"); 866 } 867 868 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 869 set_bit(SYSCALL_VECTOR, used_vectors); 870 #endif 871 872 /* 873 * Should be a barrier for any external CPU state: 874 */ 875 cpu_init(); 876 877 x86_init.irqs.trap_init(); 878 } 879