1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/interrupt.h> 16 #include <linux/kallsyms.h> 17 #include <linux/spinlock.h> 18 #include <linux/kprobes.h> 19 #include <linux/uaccess.h> 20 #include <linux/kdebug.h> 21 #include <linux/kgdb.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/ptrace.h> 25 #include <linux/string.h> 26 #include <linux/delay.h> 27 #include <linux/errno.h> 28 #include <linux/kexec.h> 29 #include <linux/sched.h> 30 #include <linux/timer.h> 31 #include <linux/init.h> 32 #include <linux/bug.h> 33 #include <linux/nmi.h> 34 #include <linux/mm.h> 35 #include <linux/smp.h> 36 #include <linux/io.h> 37 38 #ifdef CONFIG_EISA 39 #include <linux/ioport.h> 40 #include <linux/eisa.h> 41 #endif 42 43 #if defined(CONFIG_EDAC) 44 #include <linux/edac.h> 45 #endif 46 47 #include <asm/kmemcheck.h> 48 #include <asm/stacktrace.h> 49 #include <asm/processor.h> 50 #include <asm/debugreg.h> 51 #include <linux/atomic.h> 52 #include <asm/ftrace.h> 53 #include <asm/traps.h> 54 #include <asm/desc.h> 55 #include <asm/i387.h> 56 #include <asm/fpu-internal.h> 57 #include <asm/mce.h> 58 #include <asm/rcu.h> 59 60 #include <asm/mach_traps.h> 61 62 #ifdef CONFIG_X86_64 63 #include <asm/x86_init.h> 64 #include <asm/pgalloc.h> 65 #include <asm/proto.h> 66 #else 67 #include <asm/processor-flags.h> 68 #include <asm/setup.h> 69 70 asmlinkage int system_call(void); 71 72 /* Do we ignore FPU interrupts ? */ 73 char ignore_fpu_irq; 74 75 /* 76 * The IDT has to be page-aligned to simplify the Pentium 77 * F0 0F bug workaround. 78 */ 79 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; 80 #endif 81 82 DECLARE_BITMAP(used_vectors, NR_VECTORS); 83 EXPORT_SYMBOL_GPL(used_vectors); 84 85 static inline void conditional_sti(struct pt_regs *regs) 86 { 87 if (regs->flags & X86_EFLAGS_IF) 88 local_irq_enable(); 89 } 90 91 static inline void preempt_conditional_sti(struct pt_regs *regs) 92 { 93 inc_preempt_count(); 94 if (regs->flags & X86_EFLAGS_IF) 95 local_irq_enable(); 96 } 97 98 static inline void conditional_cli(struct pt_regs *regs) 99 { 100 if (regs->flags & X86_EFLAGS_IF) 101 local_irq_disable(); 102 } 103 104 static inline void preempt_conditional_cli(struct pt_regs *regs) 105 { 106 if (regs->flags & X86_EFLAGS_IF) 107 local_irq_disable(); 108 dec_preempt_count(); 109 } 110 111 static int __kprobes 112 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 113 struct pt_regs *regs, long error_code) 114 { 115 #ifdef CONFIG_X86_32 116 if (regs->flags & X86_VM_MASK) { 117 /* 118 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 119 * On nmi (interrupt 2), do_trap should not be called. 120 */ 121 if (trapnr < X86_TRAP_UD) { 122 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, 123 error_code, trapnr)) 124 return 0; 125 } 126 return -1; 127 } 128 #endif 129 if (!user_mode(regs)) { 130 if (!fixup_exception(regs)) { 131 tsk->thread.error_code = error_code; 132 tsk->thread.trap_nr = trapnr; 133 die(str, regs, error_code); 134 } 135 return 0; 136 } 137 138 return -1; 139 } 140 141 static void __kprobes 142 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 143 long error_code, siginfo_t *info) 144 { 145 struct task_struct *tsk = current; 146 147 148 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) 149 return; 150 /* 151 * We want error_code and trap_nr set for userspace faults and 152 * kernelspace faults which result in die(), but not 153 * kernelspace faults which are fixed up. die() gives the 154 * process no chance to handle the signal and notice the 155 * kernel fault information, so that won't result in polluting 156 * the information about previously queued, but not yet 157 * delivered, faults. See also do_general_protection below. 158 */ 159 tsk->thread.error_code = error_code; 160 tsk->thread.trap_nr = trapnr; 161 162 #ifdef CONFIG_X86_64 163 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 164 printk_ratelimit()) { 165 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 166 tsk->comm, tsk->pid, str, 167 regs->ip, regs->sp, error_code); 168 print_vma_addr(" in ", regs->ip); 169 pr_cont("\n"); 170 } 171 #endif 172 173 if (info) 174 force_sig_info(signr, info, tsk); 175 else 176 force_sig(signr, tsk); 177 } 178 179 #define DO_ERROR(trapnr, signr, str, name) \ 180 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 181 { \ 182 exception_enter(regs); \ 183 if (notify_die(DIE_TRAP, str, regs, error_code, \ 184 trapnr, signr) == NOTIFY_STOP) { \ 185 exception_exit(regs); \ 186 return; \ 187 } \ 188 conditional_sti(regs); \ 189 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 190 exception_exit(regs); \ 191 } 192 193 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 194 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 195 { \ 196 siginfo_t info; \ 197 info.si_signo = signr; \ 198 info.si_errno = 0; \ 199 info.si_code = sicode; \ 200 info.si_addr = (void __user *)siaddr; \ 201 exception_enter(regs); \ 202 if (notify_die(DIE_TRAP, str, regs, error_code, \ 203 trapnr, signr) == NOTIFY_STOP) { \ 204 exception_exit(regs); \ 205 return; \ 206 } \ 207 conditional_sti(regs); \ 208 do_trap(trapnr, signr, str, regs, error_code, &info); \ 209 exception_exit(regs); \ 210 } 211 212 DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 213 regs->ip) 214 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 215 DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) 216 DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, 217 regs->ip) 218 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", 219 coprocessor_segment_overrun) 220 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 221 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 222 #ifdef CONFIG_X86_32 223 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 224 #endif 225 DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, 226 BUS_ADRALN, 0) 227 228 #ifdef CONFIG_X86_64 229 /* Runs on IST stack */ 230 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 231 { 232 exception_enter(regs); 233 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 234 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { 235 preempt_conditional_sti(regs); 236 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 237 preempt_conditional_cli(regs); 238 } 239 exception_exit(regs); 240 } 241 242 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 243 { 244 static const char str[] = "double fault"; 245 struct task_struct *tsk = current; 246 247 exception_enter(regs); 248 /* Return not checked because double check cannot be ignored */ 249 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 250 251 tsk->thread.error_code = error_code; 252 tsk->thread.trap_nr = X86_TRAP_DF; 253 254 /* 255 * This is always a kernel trap and never fixable (and thus must 256 * never return). 257 */ 258 for (;;) 259 die(str, regs, error_code); 260 } 261 #endif 262 263 dotraplinkage void __kprobes 264 do_general_protection(struct pt_regs *regs, long error_code) 265 { 266 struct task_struct *tsk; 267 268 exception_enter(regs); 269 conditional_sti(regs); 270 271 #ifdef CONFIG_X86_32 272 if (regs->flags & X86_VM_MASK) { 273 local_irq_enable(); 274 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 275 goto exit; 276 } 277 #endif 278 279 tsk = current; 280 if (!user_mode(regs)) { 281 if (fixup_exception(regs)) 282 goto exit; 283 284 tsk->thread.error_code = error_code; 285 tsk->thread.trap_nr = X86_TRAP_GP; 286 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 287 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) 288 die("general protection fault", regs, error_code); 289 goto exit; 290 } 291 292 tsk->thread.error_code = error_code; 293 tsk->thread.trap_nr = X86_TRAP_GP; 294 295 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 296 printk_ratelimit()) { 297 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 298 tsk->comm, task_pid_nr(tsk), 299 regs->ip, regs->sp, error_code); 300 print_vma_addr(" in ", regs->ip); 301 pr_cont("\n"); 302 } 303 304 force_sig(SIGSEGV, tsk); 305 exit: 306 exception_exit(regs); 307 } 308 309 /* May run on IST stack. */ 310 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 311 { 312 #ifdef CONFIG_DYNAMIC_FTRACE 313 /* 314 * ftrace must be first, everything else may cause a recursive crash. 315 * See note by declaration of modifying_ftrace_code in ftrace.c 316 */ 317 if (unlikely(atomic_read(&modifying_ftrace_code)) && 318 ftrace_int3_handler(regs)) 319 return; 320 #endif 321 exception_enter(regs); 322 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 323 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 324 SIGTRAP) == NOTIFY_STOP) 325 goto exit; 326 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 327 328 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 329 SIGTRAP) == NOTIFY_STOP) 330 goto exit; 331 332 /* 333 * Let others (NMI) know that the debug stack is in use 334 * as we may switch to the interrupt stack. 335 */ 336 debug_stack_usage_inc(); 337 preempt_conditional_sti(regs); 338 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 339 preempt_conditional_cli(regs); 340 debug_stack_usage_dec(); 341 exit: 342 exception_exit(regs); 343 } 344 345 #ifdef CONFIG_X86_64 346 /* 347 * Help handler running on IST stack to switch back to user stack 348 * for scheduling or signal handling. The actual stack switch is done in 349 * entry.S 350 */ 351 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 352 { 353 struct pt_regs *regs = eregs; 354 /* Did already sync */ 355 if (eregs == (struct pt_regs *)eregs->sp) 356 ; 357 /* Exception from user space */ 358 else if (user_mode(eregs)) 359 regs = task_pt_regs(current); 360 /* 361 * Exception from kernel and interrupts are enabled. Move to 362 * kernel process stack. 363 */ 364 else if (eregs->flags & X86_EFLAGS_IF) 365 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 366 if (eregs != regs) 367 *regs = *eregs; 368 return regs; 369 } 370 #endif 371 372 /* 373 * Our handling of the processor debug registers is non-trivial. 374 * We do not clear them on entry and exit from the kernel. Therefore 375 * it is possible to get a watchpoint trap here from inside the kernel. 376 * However, the code in ./ptrace.c has ensured that the user can 377 * only set watchpoints on userspace addresses. Therefore the in-kernel 378 * watchpoint trap can only occur in code which is reading/writing 379 * from user space. Such code must not hold kernel locks (since it 380 * can equally take a page fault), therefore it is safe to call 381 * force_sig_info even though that claims and releases locks. 382 * 383 * Code in ./signal.c ensures that the debug control register 384 * is restored before we deliver any signal, and therefore that 385 * user code runs with the correct debug control register even though 386 * we clear it here. 387 * 388 * Being careful here means that we don't have to be as careful in a 389 * lot of more complicated places (task switching can be a bit lazy 390 * about restoring all the debug state, and ptrace doesn't have to 391 * find every occurrence of the TF bit that could be saved away even 392 * by user code) 393 * 394 * May run on IST stack. 395 */ 396 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 397 { 398 struct task_struct *tsk = current; 399 int user_icebp = 0; 400 unsigned long dr6; 401 int si_code; 402 403 exception_enter(regs); 404 405 get_debugreg(dr6, 6); 406 407 /* Filter out all the reserved bits which are preset to 1 */ 408 dr6 &= ~DR6_RESERVED; 409 410 /* 411 * If dr6 has no reason to give us about the origin of this trap, 412 * then it's very likely the result of an icebp/int01 trap. 413 * User wants a sigtrap for that. 414 */ 415 if (!dr6 && user_mode(regs)) 416 user_icebp = 1; 417 418 /* Catch kmemcheck conditions first of all! */ 419 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 420 goto exit; 421 422 /* DR6 may or may not be cleared by the CPU */ 423 set_debugreg(0, 6); 424 425 /* 426 * The processor cleared BTF, so don't mark that we need it set. 427 */ 428 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 429 430 /* Store the virtualized DR6 value */ 431 tsk->thread.debugreg6 = dr6; 432 433 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, 434 SIGTRAP) == NOTIFY_STOP) 435 goto exit; 436 437 /* 438 * Let others (NMI) know that the debug stack is in use 439 * as we may switch to the interrupt stack. 440 */ 441 debug_stack_usage_inc(); 442 443 /* It's safe to allow irq's after DR6 has been saved */ 444 preempt_conditional_sti(regs); 445 446 if (regs->flags & X86_VM_MASK) { 447 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 448 X86_TRAP_DB); 449 preempt_conditional_cli(regs); 450 debug_stack_usage_dec(); 451 goto exit; 452 } 453 454 /* 455 * Single-stepping through system calls: ignore any exceptions in 456 * kernel space, but re-enable TF when returning to user mode. 457 * 458 * We already checked v86 mode above, so we can check for kernel mode 459 * by just checking the CPL of CS. 460 */ 461 if ((dr6 & DR_STEP) && !user_mode(regs)) { 462 tsk->thread.debugreg6 &= ~DR_STEP; 463 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 464 regs->flags &= ~X86_EFLAGS_TF; 465 } 466 si_code = get_si_code(tsk->thread.debugreg6); 467 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 468 send_sigtrap(tsk, regs, error_code, si_code); 469 preempt_conditional_cli(regs); 470 debug_stack_usage_dec(); 471 472 exit: 473 exception_exit(regs); 474 } 475 476 /* 477 * Note that we play around with the 'TS' bit in an attempt to get 478 * the correct behaviour even in the presence of the asynchronous 479 * IRQ13 behaviour 480 */ 481 void math_error(struct pt_regs *regs, int error_code, int trapnr) 482 { 483 struct task_struct *task = current; 484 siginfo_t info; 485 unsigned short err; 486 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 487 "simd exception"; 488 489 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 490 return; 491 conditional_sti(regs); 492 493 if (!user_mode_vm(regs)) 494 { 495 if (!fixup_exception(regs)) { 496 task->thread.error_code = error_code; 497 task->thread.trap_nr = trapnr; 498 die(str, regs, error_code); 499 } 500 return; 501 } 502 503 /* 504 * Save the info for the exception handler and clear the error. 505 */ 506 save_init_fpu(task); 507 task->thread.trap_nr = trapnr; 508 task->thread.error_code = error_code; 509 info.si_signo = SIGFPE; 510 info.si_errno = 0; 511 info.si_addr = (void __user *)regs->ip; 512 if (trapnr == X86_TRAP_MF) { 513 unsigned short cwd, swd; 514 /* 515 * (~cwd & swd) will mask out exceptions that are not set to unmasked 516 * status. 0x3f is the exception bits in these regs, 0x200 is the 517 * C1 reg you need in case of a stack fault, 0x040 is the stack 518 * fault bit. We should only be taking one exception at a time, 519 * so if this combination doesn't produce any single exception, 520 * then we have a bad program that isn't synchronizing its FPU usage 521 * and it will suffer the consequences since we won't be able to 522 * fully reproduce the context of the exception 523 */ 524 cwd = get_fpu_cwd(task); 525 swd = get_fpu_swd(task); 526 527 err = swd & ~cwd; 528 } else { 529 /* 530 * The SIMD FPU exceptions are handled a little differently, as there 531 * is only a single status/control register. Thus, to determine which 532 * unmasked exception was caught we must mask the exception mask bits 533 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 534 */ 535 unsigned short mxcsr = get_fpu_mxcsr(task); 536 err = ~(mxcsr >> 7) & mxcsr; 537 } 538 539 if (err & 0x001) { /* Invalid op */ 540 /* 541 * swd & 0x240 == 0x040: Stack Underflow 542 * swd & 0x240 == 0x240: Stack Overflow 543 * User must clear the SF bit (0x40) if set 544 */ 545 info.si_code = FPE_FLTINV; 546 } else if (err & 0x004) { /* Divide by Zero */ 547 info.si_code = FPE_FLTDIV; 548 } else if (err & 0x008) { /* Overflow */ 549 info.si_code = FPE_FLTOVF; 550 } else if (err & 0x012) { /* Denormal, Underflow */ 551 info.si_code = FPE_FLTUND; 552 } else if (err & 0x020) { /* Precision */ 553 info.si_code = FPE_FLTRES; 554 } else { 555 /* 556 * If we're using IRQ 13, or supposedly even some trap 557 * X86_TRAP_MF implementations, it's possible 558 * we get a spurious trap, which is not an error. 559 */ 560 return; 561 } 562 force_sig_info(SIGFPE, &info, task); 563 } 564 565 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 566 { 567 #ifdef CONFIG_X86_32 568 ignore_fpu_irq = 1; 569 #endif 570 exception_enter(regs); 571 math_error(regs, error_code, X86_TRAP_MF); 572 exception_exit(regs); 573 } 574 575 dotraplinkage void 576 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 577 { 578 exception_enter(regs); 579 math_error(regs, error_code, X86_TRAP_XF); 580 exception_exit(regs); 581 } 582 583 dotraplinkage void 584 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 585 { 586 conditional_sti(regs); 587 #if 0 588 /* No need to warn about this any longer. */ 589 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 590 #endif 591 } 592 593 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 594 { 595 } 596 597 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 598 { 599 } 600 601 /* 602 * 'math_state_restore()' saves the current math information in the 603 * old math state array, and gets the new ones from the current task 604 * 605 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 606 * Don't touch unless you *really* know how it works. 607 * 608 * Must be called with kernel preemption disabled (eg with local 609 * local interrupts as in the case of do_device_not_available). 610 */ 611 void math_state_restore(void) 612 { 613 struct task_struct *tsk = current; 614 615 if (!tsk_used_math(tsk)) { 616 local_irq_enable(); 617 /* 618 * does a slab alloc which can sleep 619 */ 620 if (init_fpu(tsk)) { 621 /* 622 * ran out of memory! 623 */ 624 do_group_exit(SIGKILL); 625 return; 626 } 627 local_irq_disable(); 628 } 629 630 __thread_fpu_begin(tsk); 631 632 /* 633 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 634 */ 635 if (unlikely(restore_fpu_checking(tsk))) { 636 drop_init_fpu(tsk); 637 force_sig(SIGSEGV, tsk); 638 return; 639 } 640 641 tsk->fpu_counter++; 642 } 643 EXPORT_SYMBOL_GPL(math_state_restore); 644 645 dotraplinkage void __kprobes 646 do_device_not_available(struct pt_regs *regs, long error_code) 647 { 648 exception_enter(regs); 649 BUG_ON(use_eager_fpu()); 650 651 #ifdef CONFIG_MATH_EMULATION 652 if (read_cr0() & X86_CR0_EM) { 653 struct math_emu_info info = { }; 654 655 conditional_sti(regs); 656 657 info.regs = regs; 658 math_emulate(&info); 659 exception_exit(regs); 660 return; 661 } 662 #endif 663 math_state_restore(); /* interrupts still off */ 664 #ifdef CONFIG_X86_32 665 conditional_sti(regs); 666 #endif 667 exception_exit(regs); 668 } 669 670 #ifdef CONFIG_X86_32 671 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 672 { 673 siginfo_t info; 674 675 exception_enter(regs); 676 local_irq_enable(); 677 678 info.si_signo = SIGILL; 679 info.si_errno = 0; 680 info.si_code = ILL_BADSTK; 681 info.si_addr = NULL; 682 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 683 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { 684 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 685 &info); 686 } 687 exception_exit(regs); 688 } 689 #endif 690 691 /* Set of traps needed for early debugging. */ 692 void __init early_trap_init(void) 693 { 694 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 695 /* int3 can be called from all */ 696 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 697 set_intr_gate(X86_TRAP_PF, &page_fault); 698 load_idt(&idt_descr); 699 } 700 701 void __init trap_init(void) 702 { 703 int i; 704 705 #ifdef CONFIG_EISA 706 void __iomem *p = early_ioremap(0x0FFFD9, 4); 707 708 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 709 EISA_bus = 1; 710 early_iounmap(p, 4); 711 #endif 712 713 set_intr_gate(X86_TRAP_DE, ÷_error); 714 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 715 /* int4 can be called from all */ 716 set_system_intr_gate(X86_TRAP_OF, &overflow); 717 set_intr_gate(X86_TRAP_BR, &bounds); 718 set_intr_gate(X86_TRAP_UD, &invalid_op); 719 set_intr_gate(X86_TRAP_NM, &device_not_available); 720 #ifdef CONFIG_X86_32 721 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 722 #else 723 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 724 #endif 725 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); 726 set_intr_gate(X86_TRAP_TS, &invalid_TSS); 727 set_intr_gate(X86_TRAP_NP, &segment_not_present); 728 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 729 set_intr_gate(X86_TRAP_GP, &general_protection); 730 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); 731 set_intr_gate(X86_TRAP_MF, &coprocessor_error); 732 set_intr_gate(X86_TRAP_AC, &alignment_check); 733 #ifdef CONFIG_X86_MCE 734 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 735 #endif 736 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); 737 738 /* Reserve all the builtin and the syscall vector: */ 739 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 740 set_bit(i, used_vectors); 741 742 #ifdef CONFIG_IA32_EMULATION 743 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 744 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 745 #endif 746 747 #ifdef CONFIG_X86_32 748 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 749 set_bit(SYSCALL_VECTOR, used_vectors); 750 #endif 751 752 /* 753 * Should be a barrier for any external CPU state: 754 */ 755 cpu_init(); 756 757 x86_init.irqs.trap_init(); 758 759 #ifdef CONFIG_X86_64 760 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 761 set_nmi_gate(X86_TRAP_DB, &debug); 762 set_nmi_gate(X86_TRAP_BP, &int3); 763 #endif 764 } 765