1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 #include <linux/interrupt.h> 13 #include <linux/kallsyms.h> 14 #include <linux/spinlock.h> 15 #include <linux/kprobes.h> 16 #include <linux/uaccess.h> 17 #include <linux/kdebug.h> 18 #include <linux/kgdb.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/ptrace.h> 22 #include <linux/string.h> 23 #include <linux/delay.h> 24 #include <linux/errno.h> 25 #include <linux/kexec.h> 26 #include <linux/sched.h> 27 #include <linux/timer.h> 28 #include <linux/init.h> 29 #include <linux/bug.h> 30 #include <linux/nmi.h> 31 #include <linux/mm.h> 32 #include <linux/smp.h> 33 #include <linux/io.h> 34 35 #ifdef CONFIG_EISA 36 #include <linux/ioport.h> 37 #include <linux/eisa.h> 38 #endif 39 40 #if defined(CONFIG_EDAC) 41 #include <linux/edac.h> 42 #endif 43 44 #include <asm/kmemcheck.h> 45 #include <asm/stacktrace.h> 46 #include <asm/processor.h> 47 #include <asm/debugreg.h> 48 #include <linux/atomic.h> 49 #include <asm/ftrace.h> 50 #include <asm/traps.h> 51 #include <asm/desc.h> 52 #include <asm/i387.h> 53 #include <asm/fpu-internal.h> 54 #include <asm/mce.h> 55 56 #include <asm/mach_traps.h> 57 58 #ifdef CONFIG_X86_64 59 #include <asm/x86_init.h> 60 #include <asm/pgalloc.h> 61 #include <asm/proto.h> 62 #else 63 #include <asm/processor-flags.h> 64 #include <asm/setup.h> 65 66 asmlinkage int system_call(void); 67 68 /* Do we ignore FPU interrupts ? */ 69 char ignore_fpu_irq; 70 71 /* 72 * The IDT has to be page-aligned to simplify the Pentium 73 * F0 0F bug workaround. 74 */ 75 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; 76 #endif 77 78 DECLARE_BITMAP(used_vectors, NR_VECTORS); 79 EXPORT_SYMBOL_GPL(used_vectors); 80 81 static inline void conditional_sti(struct pt_regs *regs) 82 { 83 if (regs->flags & X86_EFLAGS_IF) 84 local_irq_enable(); 85 } 86 87 static inline void preempt_conditional_sti(struct pt_regs *regs) 88 { 89 inc_preempt_count(); 90 if (regs->flags & X86_EFLAGS_IF) 91 local_irq_enable(); 92 } 93 94 static inline void conditional_cli(struct pt_regs *regs) 95 { 96 if (regs->flags & X86_EFLAGS_IF) 97 local_irq_disable(); 98 } 99 100 static inline void preempt_conditional_cli(struct pt_regs *regs) 101 { 102 if (regs->flags & X86_EFLAGS_IF) 103 local_irq_disable(); 104 dec_preempt_count(); 105 } 106 107 static void __kprobes 108 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 109 long error_code, siginfo_t *info) 110 { 111 struct task_struct *tsk = current; 112 113 #ifdef CONFIG_X86_32 114 if (regs->flags & X86_VM_MASK) { 115 /* 116 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 117 * On nmi (interrupt 2), do_trap should not be called. 118 */ 119 if (trapnr < X86_TRAP_UD) 120 goto vm86_trap; 121 goto trap_signal; 122 } 123 #endif 124 125 if (!user_mode(regs)) 126 goto kernel_trap; 127 128 #ifdef CONFIG_X86_32 129 trap_signal: 130 #endif 131 /* 132 * We want error_code and trap_nr set for userspace faults and 133 * kernelspace faults which result in die(), but not 134 * kernelspace faults which are fixed up. die() gives the 135 * process no chance to handle the signal and notice the 136 * kernel fault information, so that won't result in polluting 137 * the information about previously queued, but not yet 138 * delivered, faults. See also do_general_protection below. 139 */ 140 tsk->thread.error_code = error_code; 141 tsk->thread.trap_nr = trapnr; 142 143 #ifdef CONFIG_X86_64 144 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 145 printk_ratelimit()) { 146 printk(KERN_INFO 147 "%s[%d] trap %s ip:%lx sp:%lx error:%lx", 148 tsk->comm, tsk->pid, str, 149 regs->ip, regs->sp, error_code); 150 print_vma_addr(" in ", regs->ip); 151 printk("\n"); 152 } 153 #endif 154 155 if (info) 156 force_sig_info(signr, info, tsk); 157 else 158 force_sig(signr, tsk); 159 return; 160 161 kernel_trap: 162 if (!fixup_exception(regs)) { 163 tsk->thread.error_code = error_code; 164 tsk->thread.trap_nr = trapnr; 165 die(str, regs, error_code); 166 } 167 return; 168 169 #ifdef CONFIG_X86_32 170 vm86_trap: 171 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 172 error_code, trapnr)) 173 goto trap_signal; 174 return; 175 #endif 176 } 177 178 #define DO_ERROR(trapnr, signr, str, name) \ 179 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 180 { \ 181 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 182 == NOTIFY_STOP) \ 183 return; \ 184 conditional_sti(regs); \ 185 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 186 } 187 188 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 189 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 190 { \ 191 siginfo_t info; \ 192 info.si_signo = signr; \ 193 info.si_errno = 0; \ 194 info.si_code = sicode; \ 195 info.si_addr = (void __user *)siaddr; \ 196 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 197 == NOTIFY_STOP) \ 198 return; \ 199 conditional_sti(regs); \ 200 do_trap(trapnr, signr, str, regs, error_code, &info); \ 201 } 202 203 DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 204 regs->ip) 205 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 206 DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) 207 DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, 208 regs->ip) 209 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", 210 coprocessor_segment_overrun) 211 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 212 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 213 #ifdef CONFIG_X86_32 214 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 215 #endif 216 DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, 217 BUS_ADRALN, 0) 218 219 #ifdef CONFIG_X86_64 220 /* Runs on IST stack */ 221 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 222 { 223 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 224 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) 225 return; 226 preempt_conditional_sti(regs); 227 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 228 preempt_conditional_cli(regs); 229 } 230 231 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 232 { 233 static const char str[] = "double fault"; 234 struct task_struct *tsk = current; 235 236 /* Return not checked because double check cannot be ignored */ 237 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 238 239 tsk->thread.error_code = error_code; 240 tsk->thread.trap_nr = X86_TRAP_DF; 241 242 /* 243 * This is always a kernel trap and never fixable (and thus must 244 * never return). 245 */ 246 for (;;) 247 die(str, regs, error_code); 248 } 249 #endif 250 251 dotraplinkage void __kprobes 252 do_general_protection(struct pt_regs *regs, long error_code) 253 { 254 struct task_struct *tsk; 255 256 conditional_sti(regs); 257 258 #ifdef CONFIG_X86_32 259 if (regs->flags & X86_VM_MASK) 260 goto gp_in_vm86; 261 #endif 262 263 tsk = current; 264 if (!user_mode(regs)) 265 goto gp_in_kernel; 266 267 tsk->thread.error_code = error_code; 268 tsk->thread.trap_nr = X86_TRAP_GP; 269 270 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 271 printk_ratelimit()) { 272 printk(KERN_INFO 273 "%s[%d] general protection ip:%lx sp:%lx error:%lx", 274 tsk->comm, task_pid_nr(tsk), 275 regs->ip, regs->sp, error_code); 276 print_vma_addr(" in ", regs->ip); 277 printk("\n"); 278 } 279 280 force_sig(SIGSEGV, tsk); 281 return; 282 283 #ifdef CONFIG_X86_32 284 gp_in_vm86: 285 local_irq_enable(); 286 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 287 return; 288 #endif 289 290 gp_in_kernel: 291 if (fixup_exception(regs)) 292 return; 293 294 tsk->thread.error_code = error_code; 295 tsk->thread.trap_nr = X86_TRAP_GP; 296 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 297 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) 298 return; 299 die("general protection fault", regs, error_code); 300 } 301 302 /* May run on IST stack. */ 303 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 304 { 305 #ifdef CONFIG_DYNAMIC_FTRACE 306 /* ftrace must be first, everything else may cause a recursive crash */ 307 if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs)) 308 return; 309 #endif 310 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 311 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 312 SIGTRAP) == NOTIFY_STOP) 313 return; 314 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 315 316 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 317 SIGTRAP) == NOTIFY_STOP) 318 return; 319 320 /* 321 * Let others (NMI) know that the debug stack is in use 322 * as we may switch to the interrupt stack. 323 */ 324 debug_stack_usage_inc(); 325 preempt_conditional_sti(regs); 326 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 327 preempt_conditional_cli(regs); 328 debug_stack_usage_dec(); 329 } 330 331 #ifdef CONFIG_X86_64 332 /* 333 * Help handler running on IST stack to switch back to user stack 334 * for scheduling or signal handling. The actual stack switch is done in 335 * entry.S 336 */ 337 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 338 { 339 struct pt_regs *regs = eregs; 340 /* Did already sync */ 341 if (eregs == (struct pt_regs *)eregs->sp) 342 ; 343 /* Exception from user space */ 344 else if (user_mode(eregs)) 345 regs = task_pt_regs(current); 346 /* 347 * Exception from kernel and interrupts are enabled. Move to 348 * kernel process stack. 349 */ 350 else if (eregs->flags & X86_EFLAGS_IF) 351 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 352 if (eregs != regs) 353 *regs = *eregs; 354 return regs; 355 } 356 #endif 357 358 /* 359 * Our handling of the processor debug registers is non-trivial. 360 * We do not clear them on entry and exit from the kernel. Therefore 361 * it is possible to get a watchpoint trap here from inside the kernel. 362 * However, the code in ./ptrace.c has ensured that the user can 363 * only set watchpoints on userspace addresses. Therefore the in-kernel 364 * watchpoint trap can only occur in code which is reading/writing 365 * from user space. Such code must not hold kernel locks (since it 366 * can equally take a page fault), therefore it is safe to call 367 * force_sig_info even though that claims and releases locks. 368 * 369 * Code in ./signal.c ensures that the debug control register 370 * is restored before we deliver any signal, and therefore that 371 * user code runs with the correct debug control register even though 372 * we clear it here. 373 * 374 * Being careful here means that we don't have to be as careful in a 375 * lot of more complicated places (task switching can be a bit lazy 376 * about restoring all the debug state, and ptrace doesn't have to 377 * find every occurrence of the TF bit that could be saved away even 378 * by user code) 379 * 380 * May run on IST stack. 381 */ 382 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 383 { 384 struct task_struct *tsk = current; 385 int user_icebp = 0; 386 unsigned long dr6; 387 int si_code; 388 389 get_debugreg(dr6, 6); 390 391 /* Filter out all the reserved bits which are preset to 1 */ 392 dr6 &= ~DR6_RESERVED; 393 394 /* 395 * If dr6 has no reason to give us about the origin of this trap, 396 * then it's very likely the result of an icebp/int01 trap. 397 * User wants a sigtrap for that. 398 */ 399 if (!dr6 && user_mode(regs)) 400 user_icebp = 1; 401 402 /* Catch kmemcheck conditions first of all! */ 403 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 404 return; 405 406 /* DR6 may or may not be cleared by the CPU */ 407 set_debugreg(0, 6); 408 409 /* 410 * The processor cleared BTF, so don't mark that we need it set. 411 */ 412 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 413 414 /* Store the virtualized DR6 value */ 415 tsk->thread.debugreg6 = dr6; 416 417 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, 418 SIGTRAP) == NOTIFY_STOP) 419 return; 420 421 /* 422 * Let others (NMI) know that the debug stack is in use 423 * as we may switch to the interrupt stack. 424 */ 425 debug_stack_usage_inc(); 426 427 /* It's safe to allow irq's after DR6 has been saved */ 428 preempt_conditional_sti(regs); 429 430 if (regs->flags & X86_VM_MASK) { 431 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 432 X86_TRAP_DB); 433 preempt_conditional_cli(regs); 434 debug_stack_usage_dec(); 435 return; 436 } 437 438 /* 439 * Single-stepping through system calls: ignore any exceptions in 440 * kernel space, but re-enable TF when returning to user mode. 441 * 442 * We already checked v86 mode above, so we can check for kernel mode 443 * by just checking the CPL of CS. 444 */ 445 if ((dr6 & DR_STEP) && !user_mode(regs)) { 446 tsk->thread.debugreg6 &= ~DR_STEP; 447 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 448 regs->flags &= ~X86_EFLAGS_TF; 449 } 450 si_code = get_si_code(tsk->thread.debugreg6); 451 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 452 send_sigtrap(tsk, regs, error_code, si_code); 453 preempt_conditional_cli(regs); 454 debug_stack_usage_dec(); 455 456 return; 457 } 458 459 /* 460 * Note that we play around with the 'TS' bit in an attempt to get 461 * the correct behaviour even in the presence of the asynchronous 462 * IRQ13 behaviour 463 */ 464 void math_error(struct pt_regs *regs, int error_code, int trapnr) 465 { 466 struct task_struct *task = current; 467 siginfo_t info; 468 unsigned short err; 469 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 470 "simd exception"; 471 472 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 473 return; 474 conditional_sti(regs); 475 476 if (!user_mode_vm(regs)) 477 { 478 if (!fixup_exception(regs)) { 479 task->thread.error_code = error_code; 480 task->thread.trap_nr = trapnr; 481 die(str, regs, error_code); 482 } 483 return; 484 } 485 486 /* 487 * Save the info for the exception handler and clear the error. 488 */ 489 save_init_fpu(task); 490 task->thread.trap_nr = trapnr; 491 task->thread.error_code = error_code; 492 info.si_signo = SIGFPE; 493 info.si_errno = 0; 494 info.si_addr = (void __user *)regs->ip; 495 if (trapnr == X86_TRAP_MF) { 496 unsigned short cwd, swd; 497 /* 498 * (~cwd & swd) will mask out exceptions that are not set to unmasked 499 * status. 0x3f is the exception bits in these regs, 0x200 is the 500 * C1 reg you need in case of a stack fault, 0x040 is the stack 501 * fault bit. We should only be taking one exception at a time, 502 * so if this combination doesn't produce any single exception, 503 * then we have a bad program that isn't synchronizing its FPU usage 504 * and it will suffer the consequences since we won't be able to 505 * fully reproduce the context of the exception 506 */ 507 cwd = get_fpu_cwd(task); 508 swd = get_fpu_swd(task); 509 510 err = swd & ~cwd; 511 } else { 512 /* 513 * The SIMD FPU exceptions are handled a little differently, as there 514 * is only a single status/control register. Thus, to determine which 515 * unmasked exception was caught we must mask the exception mask bits 516 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 517 */ 518 unsigned short mxcsr = get_fpu_mxcsr(task); 519 err = ~(mxcsr >> 7) & mxcsr; 520 } 521 522 if (err & 0x001) { /* Invalid op */ 523 /* 524 * swd & 0x240 == 0x040: Stack Underflow 525 * swd & 0x240 == 0x240: Stack Overflow 526 * User must clear the SF bit (0x40) if set 527 */ 528 info.si_code = FPE_FLTINV; 529 } else if (err & 0x004) { /* Divide by Zero */ 530 info.si_code = FPE_FLTDIV; 531 } else if (err & 0x008) { /* Overflow */ 532 info.si_code = FPE_FLTOVF; 533 } else if (err & 0x012) { /* Denormal, Underflow */ 534 info.si_code = FPE_FLTUND; 535 } else if (err & 0x020) { /* Precision */ 536 info.si_code = FPE_FLTRES; 537 } else { 538 /* 539 * If we're using IRQ 13, or supposedly even some trap 540 * X86_TRAP_MF implementations, it's possible 541 * we get a spurious trap, which is not an error. 542 */ 543 return; 544 } 545 force_sig_info(SIGFPE, &info, task); 546 } 547 548 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 549 { 550 #ifdef CONFIG_X86_32 551 ignore_fpu_irq = 1; 552 #endif 553 554 math_error(regs, error_code, X86_TRAP_MF); 555 } 556 557 dotraplinkage void 558 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 559 { 560 math_error(regs, error_code, X86_TRAP_XF); 561 } 562 563 dotraplinkage void 564 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 565 { 566 conditional_sti(regs); 567 #if 0 568 /* No need to warn about this any longer. */ 569 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 570 #endif 571 } 572 573 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 574 { 575 } 576 577 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 578 { 579 } 580 581 /* 582 * 'math_state_restore()' saves the current math information in the 583 * old math state array, and gets the new ones from the current task 584 * 585 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 586 * Don't touch unless you *really* know how it works. 587 * 588 * Must be called with kernel preemption disabled (eg with local 589 * local interrupts as in the case of do_device_not_available). 590 */ 591 void math_state_restore(void) 592 { 593 struct task_struct *tsk = current; 594 595 if (!tsk_used_math(tsk)) { 596 local_irq_enable(); 597 /* 598 * does a slab alloc which can sleep 599 */ 600 if (init_fpu(tsk)) { 601 /* 602 * ran out of memory! 603 */ 604 do_group_exit(SIGKILL); 605 return; 606 } 607 local_irq_disable(); 608 } 609 610 __thread_fpu_begin(tsk); 611 /* 612 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 613 */ 614 if (unlikely(restore_fpu_checking(tsk))) { 615 __thread_fpu_end(tsk); 616 force_sig(SIGSEGV, tsk); 617 return; 618 } 619 620 tsk->fpu_counter++; 621 } 622 EXPORT_SYMBOL_GPL(math_state_restore); 623 624 dotraplinkage void __kprobes 625 do_device_not_available(struct pt_regs *regs, long error_code) 626 { 627 #ifdef CONFIG_MATH_EMULATION 628 if (read_cr0() & X86_CR0_EM) { 629 struct math_emu_info info = { }; 630 631 conditional_sti(regs); 632 633 info.regs = regs; 634 math_emulate(&info); 635 return; 636 } 637 #endif 638 math_state_restore(); /* interrupts still off */ 639 #ifdef CONFIG_X86_32 640 conditional_sti(regs); 641 #endif 642 } 643 644 #ifdef CONFIG_X86_32 645 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 646 { 647 siginfo_t info; 648 local_irq_enable(); 649 650 info.si_signo = SIGILL; 651 info.si_errno = 0; 652 info.si_code = ILL_BADSTK; 653 info.si_addr = NULL; 654 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 655 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) 656 return; 657 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 658 &info); 659 } 660 #endif 661 662 /* Set of traps needed for early debugging. */ 663 void __init early_trap_init(void) 664 { 665 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 666 /* int3 can be called from all */ 667 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 668 set_intr_gate(X86_TRAP_PF, &page_fault); 669 load_idt(&idt_descr); 670 } 671 672 void __init trap_init(void) 673 { 674 int i; 675 676 #ifdef CONFIG_EISA 677 void __iomem *p = early_ioremap(0x0FFFD9, 4); 678 679 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 680 EISA_bus = 1; 681 early_iounmap(p, 4); 682 #endif 683 684 set_intr_gate(X86_TRAP_DE, ÷_error); 685 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 686 /* int4 can be called from all */ 687 set_system_intr_gate(X86_TRAP_OF, &overflow); 688 set_intr_gate(X86_TRAP_BR, &bounds); 689 set_intr_gate(X86_TRAP_UD, &invalid_op); 690 set_intr_gate(X86_TRAP_NM, &device_not_available); 691 #ifdef CONFIG_X86_32 692 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 693 #else 694 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 695 #endif 696 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); 697 set_intr_gate(X86_TRAP_TS, &invalid_TSS); 698 set_intr_gate(X86_TRAP_NP, &segment_not_present); 699 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 700 set_intr_gate(X86_TRAP_GP, &general_protection); 701 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); 702 set_intr_gate(X86_TRAP_MF, &coprocessor_error); 703 set_intr_gate(X86_TRAP_AC, &alignment_check); 704 #ifdef CONFIG_X86_MCE 705 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 706 #endif 707 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); 708 709 /* Reserve all the builtin and the syscall vector: */ 710 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 711 set_bit(i, used_vectors); 712 713 #ifdef CONFIG_IA32_EMULATION 714 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 715 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 716 #endif 717 718 #ifdef CONFIG_X86_32 719 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 720 set_bit(SYSCALL_VECTOR, used_vectors); 721 #endif 722 723 /* 724 * Should be a barrier for any external CPU state: 725 */ 726 cpu_init(); 727 728 x86_init.irqs.trap_init(); 729 730 #ifdef CONFIG_X86_64 731 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 732 set_nmi_gate(X86_TRAP_DB, &debug); 733 set_nmi_gate(X86_TRAP_BP, &int3); 734 #endif 735 } 736