1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/context_tracking.h> 16 #include <linux/interrupt.h> 17 #include <linux/kallsyms.h> 18 #include <linux/spinlock.h> 19 #include <linux/kprobes.h> 20 #include <linux/uaccess.h> 21 #include <linux/kdebug.h> 22 #include <linux/kgdb.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ptrace.h> 26 #include <linux/uprobes.h> 27 #include <linux/string.h> 28 #include <linux/delay.h> 29 #include <linux/errno.h> 30 #include <linux/kexec.h> 31 #include <linux/sched.h> 32 #include <linux/timer.h> 33 #include <linux/init.h> 34 #include <linux/bug.h> 35 #include <linux/nmi.h> 36 #include <linux/mm.h> 37 #include <linux/smp.h> 38 #include <linux/io.h> 39 40 #ifdef CONFIG_EISA 41 #include <linux/ioport.h> 42 #include <linux/eisa.h> 43 #endif 44 45 #if defined(CONFIG_EDAC) 46 #include <linux/edac.h> 47 #endif 48 49 #include <asm/kmemcheck.h> 50 #include <asm/stacktrace.h> 51 #include <asm/processor.h> 52 #include <asm/debugreg.h> 53 #include <linux/atomic.h> 54 #include <asm/ftrace.h> 55 #include <asm/traps.h> 56 #include <asm/desc.h> 57 #include <asm/fpu/internal.h> 58 #include <asm/mce.h> 59 #include <asm/fixmap.h> 60 #include <asm/mach_traps.h> 61 #include <asm/alternative.h> 62 #include <asm/fpu/xstate.h> 63 #include <asm/trace/mpx.h> 64 #include <asm/mpx.h> 65 66 #ifdef CONFIG_X86_64 67 #include <asm/x86_init.h> 68 #include <asm/pgalloc.h> 69 #include <asm/proto.h> 70 71 /* No need to be aligned, but done to keep all IDTs defined the same way. */ 72 gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; 73 #else 74 #include <asm/processor-flags.h> 75 #include <asm/setup.h> 76 #include <asm/proto.h> 77 #endif 78 79 /* Must be page-aligned because the real IDT is used in a fixmap. */ 80 gate_desc idt_table[NR_VECTORS] __page_aligned_bss; 81 82 DECLARE_BITMAP(used_vectors, NR_VECTORS); 83 EXPORT_SYMBOL_GPL(used_vectors); 84 85 static inline void conditional_sti(struct pt_regs *regs) 86 { 87 if (regs->flags & X86_EFLAGS_IF) 88 local_irq_enable(); 89 } 90 91 static inline void preempt_conditional_sti(struct pt_regs *regs) 92 { 93 preempt_count_inc(); 94 if (regs->flags & X86_EFLAGS_IF) 95 local_irq_enable(); 96 } 97 98 static inline void conditional_cli(struct pt_regs *regs) 99 { 100 if (regs->flags & X86_EFLAGS_IF) 101 local_irq_disable(); 102 } 103 104 static inline void preempt_conditional_cli(struct pt_regs *regs) 105 { 106 if (regs->flags & X86_EFLAGS_IF) 107 local_irq_disable(); 108 preempt_count_dec(); 109 } 110 111 enum ctx_state ist_enter(struct pt_regs *regs) 112 { 113 enum ctx_state prev_state; 114 115 if (user_mode(regs)) { 116 /* Other than that, we're just an exception. */ 117 prev_state = exception_enter(); 118 } else { 119 /* 120 * We might have interrupted pretty much anything. In 121 * fact, if we're a machine check, we can even interrupt 122 * NMI processing. We don't want in_nmi() to return true, 123 * but we need to notify RCU. 124 */ 125 rcu_nmi_enter(); 126 prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */ 127 } 128 129 /* 130 * We are atomic because we're on the IST stack (or we're on x86_32, 131 * in which case we still shouldn't schedule). 132 * 133 * This must be after exception_enter(), because exception_enter() 134 * won't do anything if in_interrupt() returns true. 135 */ 136 preempt_count_add(HARDIRQ_OFFSET); 137 138 /* This code is a bit fragile. Test it. */ 139 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); 140 141 return prev_state; 142 } 143 144 void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) 145 { 146 /* Must be before exception_exit. */ 147 preempt_count_sub(HARDIRQ_OFFSET); 148 149 if (user_mode(regs)) 150 return exception_exit(prev_state); 151 else 152 rcu_nmi_exit(); 153 } 154 155 /** 156 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception 157 * @regs: regs passed to the IST exception handler 158 * 159 * IST exception handlers normally cannot schedule. As a special 160 * exception, if the exception interrupted userspace code (i.e. 161 * user_mode(regs) would return true) and the exception was not 162 * a double fault, it can be safe to schedule. ist_begin_non_atomic() 163 * begins a non-atomic section within an ist_enter()/ist_exit() region. 164 * Callers are responsible for enabling interrupts themselves inside 165 * the non-atomic section, and callers must call is_end_non_atomic() 166 * before ist_exit(). 167 */ 168 void ist_begin_non_atomic(struct pt_regs *regs) 169 { 170 BUG_ON(!user_mode(regs)); 171 172 /* 173 * Sanity check: we need to be on the normal thread stack. This 174 * will catch asm bugs and any attempt to use ist_preempt_enable 175 * from double_fault. 176 */ 177 BUG_ON((unsigned long)(current_top_of_stack() - 178 current_stack_pointer()) >= THREAD_SIZE); 179 180 preempt_count_sub(HARDIRQ_OFFSET); 181 } 182 183 /** 184 * ist_end_non_atomic() - begin a non-atomic section in an IST exception 185 * 186 * Ends a non-atomic section started with ist_begin_non_atomic(). 187 */ 188 void ist_end_non_atomic(void) 189 { 190 preempt_count_add(HARDIRQ_OFFSET); 191 } 192 193 static nokprobe_inline int 194 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 195 struct pt_regs *regs, long error_code) 196 { 197 if (v8086_mode(regs)) { 198 /* 199 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 200 * On nmi (interrupt 2), do_trap should not be called. 201 */ 202 if (trapnr < X86_TRAP_UD) { 203 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, 204 error_code, trapnr)) 205 return 0; 206 } 207 return -1; 208 } 209 210 if (!user_mode(regs)) { 211 if (!fixup_exception(regs)) { 212 tsk->thread.error_code = error_code; 213 tsk->thread.trap_nr = trapnr; 214 die(str, regs, error_code); 215 } 216 return 0; 217 } 218 219 return -1; 220 } 221 222 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, 223 siginfo_t *info) 224 { 225 unsigned long siaddr; 226 int sicode; 227 228 switch (trapnr) { 229 default: 230 return SEND_SIG_PRIV; 231 232 case X86_TRAP_DE: 233 sicode = FPE_INTDIV; 234 siaddr = uprobe_get_trap_addr(regs); 235 break; 236 case X86_TRAP_UD: 237 sicode = ILL_ILLOPN; 238 siaddr = uprobe_get_trap_addr(regs); 239 break; 240 case X86_TRAP_AC: 241 sicode = BUS_ADRALN; 242 siaddr = 0; 243 break; 244 } 245 246 info->si_signo = signr; 247 info->si_errno = 0; 248 info->si_code = sicode; 249 info->si_addr = (void __user *)siaddr; 250 return info; 251 } 252 253 static void 254 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 255 long error_code, siginfo_t *info) 256 { 257 struct task_struct *tsk = current; 258 259 260 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) 261 return; 262 /* 263 * We want error_code and trap_nr set for userspace faults and 264 * kernelspace faults which result in die(), but not 265 * kernelspace faults which are fixed up. die() gives the 266 * process no chance to handle the signal and notice the 267 * kernel fault information, so that won't result in polluting 268 * the information about previously queued, but not yet 269 * delivered, faults. See also do_general_protection below. 270 */ 271 tsk->thread.error_code = error_code; 272 tsk->thread.trap_nr = trapnr; 273 274 #ifdef CONFIG_X86_64 275 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 276 printk_ratelimit()) { 277 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 278 tsk->comm, tsk->pid, str, 279 regs->ip, regs->sp, error_code); 280 print_vma_addr(" in ", regs->ip); 281 pr_cont("\n"); 282 } 283 #endif 284 285 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); 286 } 287 NOKPROBE_SYMBOL(do_trap); 288 289 static void do_error_trap(struct pt_regs *regs, long error_code, char *str, 290 unsigned long trapnr, int signr) 291 { 292 enum ctx_state prev_state = exception_enter(); 293 siginfo_t info; 294 295 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != 296 NOTIFY_STOP) { 297 conditional_sti(regs); 298 do_trap(trapnr, signr, str, regs, error_code, 299 fill_trap_info(regs, signr, trapnr, &info)); 300 } 301 302 exception_exit(prev_state); 303 } 304 305 #define DO_ERROR(trapnr, signr, str, name) \ 306 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 307 { \ 308 do_error_trap(regs, error_code, str, trapnr, signr); \ 309 } 310 311 DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) 312 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 313 DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) 314 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) 315 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 316 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 317 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 318 DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) 319 320 #ifdef CONFIG_X86_64 321 /* Runs on IST stack */ 322 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 323 { 324 static const char str[] = "double fault"; 325 struct task_struct *tsk = current; 326 327 #ifdef CONFIG_X86_ESPFIX64 328 extern unsigned char native_irq_return_iret[]; 329 330 /* 331 * If IRET takes a non-IST fault on the espfix64 stack, then we 332 * end up promoting it to a doublefault. In that case, modify 333 * the stack to make it look like we just entered the #GP 334 * handler from user space, similar to bad_iret. 335 * 336 * No need for ist_enter here because we don't use RCU. 337 */ 338 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && 339 regs->cs == __KERNEL_CS && 340 regs->ip == (unsigned long)native_irq_return_iret) 341 { 342 struct pt_regs *normal_regs = task_pt_regs(current); 343 344 /* Fake a #GP(0) from userspace. */ 345 memmove(&normal_regs->ip, (void *)regs->sp, 5*8); 346 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ 347 regs->ip = (unsigned long)general_protection; 348 regs->sp = (unsigned long)&normal_regs->orig_ax; 349 350 return; 351 } 352 #endif 353 354 ist_enter(regs); /* Discard prev_state because we won't return. */ 355 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 356 357 tsk->thread.error_code = error_code; 358 tsk->thread.trap_nr = X86_TRAP_DF; 359 360 #ifdef CONFIG_DOUBLEFAULT 361 df_debug(regs, error_code); 362 #endif 363 /* 364 * This is always a kernel trap and never fixable (and thus must 365 * never return). 366 */ 367 for (;;) 368 die(str, regs, error_code); 369 } 370 #endif 371 372 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) 373 { 374 enum ctx_state prev_state; 375 const struct bndcsr *bndcsr; 376 siginfo_t *info; 377 378 prev_state = exception_enter(); 379 if (notify_die(DIE_TRAP, "bounds", regs, error_code, 380 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) 381 goto exit; 382 conditional_sti(regs); 383 384 if (!user_mode(regs)) 385 die("bounds", regs, error_code); 386 387 if (!cpu_feature_enabled(X86_FEATURE_MPX)) { 388 /* The exception is not from Intel MPX */ 389 goto exit_trap; 390 } 391 392 /* 393 * We need to look at BNDSTATUS to resolve this exception. 394 * A NULL here might mean that it is in its 'init state', 395 * which is all zeros which indicates MPX was not 396 * responsible for the exception. 397 */ 398 bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); 399 if (!bndcsr) 400 goto exit_trap; 401 402 trace_bounds_exception_mpx(bndcsr); 403 /* 404 * The error code field of the BNDSTATUS register communicates status 405 * information of a bound range exception #BR or operation involving 406 * bound directory. 407 */ 408 switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { 409 case 2: /* Bound directory has invalid entry. */ 410 if (mpx_handle_bd_fault()) 411 goto exit_trap; 412 break; /* Success, it was handled */ 413 case 1: /* Bound violation. */ 414 info = mpx_generate_siginfo(regs); 415 if (IS_ERR(info)) { 416 /* 417 * We failed to decode the MPX instruction. Act as if 418 * the exception was not caused by MPX. 419 */ 420 goto exit_trap; 421 } 422 /* 423 * Success, we decoded the instruction and retrieved 424 * an 'info' containing the address being accessed 425 * which caused the exception. This information 426 * allows and application to possibly handle the 427 * #BR exception itself. 428 */ 429 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); 430 kfree(info); 431 break; 432 case 0: /* No exception caused by Intel MPX operations. */ 433 goto exit_trap; 434 default: 435 die("bounds", regs, error_code); 436 } 437 438 exit: 439 exception_exit(prev_state); 440 return; 441 exit_trap: 442 /* 443 * This path out is for all the cases where we could not 444 * handle the exception in some way (like allocating a 445 * table or telling userspace about it. We will also end 446 * up here if the kernel has MPX turned off at compile 447 * time.. 448 */ 449 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); 450 exception_exit(prev_state); 451 } 452 453 dotraplinkage void 454 do_general_protection(struct pt_regs *regs, long error_code) 455 { 456 struct task_struct *tsk; 457 enum ctx_state prev_state; 458 459 prev_state = exception_enter(); 460 conditional_sti(regs); 461 462 if (v8086_mode(regs)) { 463 local_irq_enable(); 464 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 465 goto exit; 466 } 467 468 tsk = current; 469 if (!user_mode(regs)) { 470 if (fixup_exception(regs)) 471 goto exit; 472 473 tsk->thread.error_code = error_code; 474 tsk->thread.trap_nr = X86_TRAP_GP; 475 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 476 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) 477 die("general protection fault", regs, error_code); 478 goto exit; 479 } 480 481 tsk->thread.error_code = error_code; 482 tsk->thread.trap_nr = X86_TRAP_GP; 483 484 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 485 printk_ratelimit()) { 486 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 487 tsk->comm, task_pid_nr(tsk), 488 regs->ip, regs->sp, error_code); 489 print_vma_addr(" in ", regs->ip); 490 pr_cont("\n"); 491 } 492 493 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 494 exit: 495 exception_exit(prev_state); 496 } 497 NOKPROBE_SYMBOL(do_general_protection); 498 499 /* May run on IST stack. */ 500 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) 501 { 502 enum ctx_state prev_state; 503 504 #ifdef CONFIG_DYNAMIC_FTRACE 505 /* 506 * ftrace must be first, everything else may cause a recursive crash. 507 * See note by declaration of modifying_ftrace_code in ftrace.c 508 */ 509 if (unlikely(atomic_read(&modifying_ftrace_code)) && 510 ftrace_int3_handler(regs)) 511 return; 512 #endif 513 if (poke_int3_handler(regs)) 514 return; 515 516 prev_state = ist_enter(regs); 517 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 518 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 519 SIGTRAP) == NOTIFY_STOP) 520 goto exit; 521 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 522 523 #ifdef CONFIG_KPROBES 524 if (kprobe_int3_handler(regs)) 525 goto exit; 526 #endif 527 528 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 529 SIGTRAP) == NOTIFY_STOP) 530 goto exit; 531 532 /* 533 * Let others (NMI) know that the debug stack is in use 534 * as we may switch to the interrupt stack. 535 */ 536 debug_stack_usage_inc(); 537 preempt_conditional_sti(regs); 538 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 539 preempt_conditional_cli(regs); 540 debug_stack_usage_dec(); 541 exit: 542 ist_exit(regs, prev_state); 543 } 544 NOKPROBE_SYMBOL(do_int3); 545 546 #ifdef CONFIG_X86_64 547 /* 548 * Help handler running on IST stack to switch off the IST stack if the 549 * interrupted code was in user mode. The actual stack switch is done in 550 * entry_64.S 551 */ 552 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) 553 { 554 struct pt_regs *regs = task_pt_regs(current); 555 *regs = *eregs; 556 return regs; 557 } 558 NOKPROBE_SYMBOL(sync_regs); 559 560 struct bad_iret_stack { 561 void *error_entry_ret; 562 struct pt_regs regs; 563 }; 564 565 asmlinkage __visible notrace 566 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) 567 { 568 /* 569 * This is called from entry_64.S early in handling a fault 570 * caused by a bad iret to user mode. To handle the fault 571 * correctly, we want move our stack frame to task_pt_regs 572 * and we want to pretend that the exception came from the 573 * iret target. 574 */ 575 struct bad_iret_stack *new_stack = 576 container_of(task_pt_regs(current), 577 struct bad_iret_stack, regs); 578 579 /* Copy the IRET target to the new stack. */ 580 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); 581 582 /* Copy the remainder of the stack from the current stack. */ 583 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); 584 585 BUG_ON(!user_mode(&new_stack->regs)); 586 return new_stack; 587 } 588 NOKPROBE_SYMBOL(fixup_bad_iret); 589 #endif 590 591 /* 592 * Our handling of the processor debug registers is non-trivial. 593 * We do not clear them on entry and exit from the kernel. Therefore 594 * it is possible to get a watchpoint trap here from inside the kernel. 595 * However, the code in ./ptrace.c has ensured that the user can 596 * only set watchpoints on userspace addresses. Therefore the in-kernel 597 * watchpoint trap can only occur in code which is reading/writing 598 * from user space. Such code must not hold kernel locks (since it 599 * can equally take a page fault), therefore it is safe to call 600 * force_sig_info even though that claims and releases locks. 601 * 602 * Code in ./signal.c ensures that the debug control register 603 * is restored before we deliver any signal, and therefore that 604 * user code runs with the correct debug control register even though 605 * we clear it here. 606 * 607 * Being careful here means that we don't have to be as careful in a 608 * lot of more complicated places (task switching can be a bit lazy 609 * about restoring all the debug state, and ptrace doesn't have to 610 * find every occurrence of the TF bit that could be saved away even 611 * by user code) 612 * 613 * May run on IST stack. 614 */ 615 dotraplinkage void do_debug(struct pt_regs *regs, long error_code) 616 { 617 struct task_struct *tsk = current; 618 enum ctx_state prev_state; 619 int user_icebp = 0; 620 unsigned long dr6; 621 int si_code; 622 623 prev_state = ist_enter(regs); 624 625 get_debugreg(dr6, 6); 626 627 /* Filter out all the reserved bits which are preset to 1 */ 628 dr6 &= ~DR6_RESERVED; 629 630 /* 631 * If dr6 has no reason to give us about the origin of this trap, 632 * then it's very likely the result of an icebp/int01 trap. 633 * User wants a sigtrap for that. 634 */ 635 if (!dr6 && user_mode(regs)) 636 user_icebp = 1; 637 638 /* Catch kmemcheck conditions first of all! */ 639 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 640 goto exit; 641 642 /* DR6 may or may not be cleared by the CPU */ 643 set_debugreg(0, 6); 644 645 /* 646 * The processor cleared BTF, so don't mark that we need it set. 647 */ 648 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 649 650 /* Store the virtualized DR6 value */ 651 tsk->thread.debugreg6 = dr6; 652 653 #ifdef CONFIG_KPROBES 654 if (kprobe_debug_handler(regs)) 655 goto exit; 656 #endif 657 658 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, 659 SIGTRAP) == NOTIFY_STOP) 660 goto exit; 661 662 /* 663 * Let others (NMI) know that the debug stack is in use 664 * as we may switch to the interrupt stack. 665 */ 666 debug_stack_usage_inc(); 667 668 /* It's safe to allow irq's after DR6 has been saved */ 669 preempt_conditional_sti(regs); 670 671 if (v8086_mode(regs)) { 672 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 673 X86_TRAP_DB); 674 preempt_conditional_cli(regs); 675 debug_stack_usage_dec(); 676 goto exit; 677 } 678 679 /* 680 * Single-stepping through system calls: ignore any exceptions in 681 * kernel space, but re-enable TF when returning to user mode. 682 * 683 * We already checked v86 mode above, so we can check for kernel mode 684 * by just checking the CPL of CS. 685 */ 686 if ((dr6 & DR_STEP) && !user_mode(regs)) { 687 tsk->thread.debugreg6 &= ~DR_STEP; 688 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 689 regs->flags &= ~X86_EFLAGS_TF; 690 } 691 si_code = get_si_code(tsk->thread.debugreg6); 692 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 693 send_sigtrap(tsk, regs, error_code, si_code); 694 preempt_conditional_cli(regs); 695 debug_stack_usage_dec(); 696 697 exit: 698 ist_exit(regs, prev_state); 699 } 700 NOKPROBE_SYMBOL(do_debug); 701 702 /* 703 * Note that we play around with the 'TS' bit in an attempt to get 704 * the correct behaviour even in the presence of the asynchronous 705 * IRQ13 behaviour 706 */ 707 static void math_error(struct pt_regs *regs, int error_code, int trapnr) 708 { 709 struct task_struct *task = current; 710 struct fpu *fpu = &task->thread.fpu; 711 siginfo_t info; 712 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 713 "simd exception"; 714 715 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 716 return; 717 conditional_sti(regs); 718 719 if (!user_mode(regs)) { 720 if (!fixup_exception(regs)) { 721 task->thread.error_code = error_code; 722 task->thread.trap_nr = trapnr; 723 die(str, regs, error_code); 724 } 725 return; 726 } 727 728 /* 729 * Save the info for the exception handler and clear the error. 730 */ 731 fpu__save(fpu); 732 733 task->thread.trap_nr = trapnr; 734 task->thread.error_code = error_code; 735 info.si_signo = SIGFPE; 736 info.si_errno = 0; 737 info.si_addr = (void __user *)uprobe_get_trap_addr(regs); 738 739 info.si_code = fpu__exception_code(fpu, trapnr); 740 741 /* Retry when we get spurious exceptions: */ 742 if (!info.si_code) 743 return; 744 745 force_sig_info(SIGFPE, &info, task); 746 } 747 748 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 749 { 750 enum ctx_state prev_state; 751 752 prev_state = exception_enter(); 753 math_error(regs, error_code, X86_TRAP_MF); 754 exception_exit(prev_state); 755 } 756 757 dotraplinkage void 758 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 759 { 760 enum ctx_state prev_state; 761 762 prev_state = exception_enter(); 763 math_error(regs, error_code, X86_TRAP_XF); 764 exception_exit(prev_state); 765 } 766 767 dotraplinkage void 768 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 769 { 770 conditional_sti(regs); 771 } 772 773 dotraplinkage void 774 do_device_not_available(struct pt_regs *regs, long error_code) 775 { 776 enum ctx_state prev_state; 777 778 prev_state = exception_enter(); 779 BUG_ON(use_eager_fpu()); 780 781 #ifdef CONFIG_MATH_EMULATION 782 if (read_cr0() & X86_CR0_EM) { 783 struct math_emu_info info = { }; 784 785 conditional_sti(regs); 786 787 info.regs = regs; 788 math_emulate(&info); 789 exception_exit(prev_state); 790 return; 791 } 792 #endif 793 fpu__restore(¤t->thread.fpu); /* interrupts still off */ 794 #ifdef CONFIG_X86_32 795 conditional_sti(regs); 796 #endif 797 exception_exit(prev_state); 798 } 799 NOKPROBE_SYMBOL(do_device_not_available); 800 801 #ifdef CONFIG_X86_32 802 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 803 { 804 siginfo_t info; 805 enum ctx_state prev_state; 806 807 prev_state = exception_enter(); 808 local_irq_enable(); 809 810 info.si_signo = SIGILL; 811 info.si_errno = 0; 812 info.si_code = ILL_BADSTK; 813 info.si_addr = NULL; 814 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 815 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { 816 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 817 &info); 818 } 819 exception_exit(prev_state); 820 } 821 #endif 822 823 /* Set of traps needed for early debugging. */ 824 void __init early_trap_init(void) 825 { 826 /* 827 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS 828 * is ready in cpu_init() <-- trap_init(). Before trap_init(), 829 * CPU runs at ring 0 so it is impossible to hit an invalid 830 * stack. Using the original stack works well enough at this 831 * early stage. DEBUG_STACK will be equipped after cpu_init() in 832 * trap_init(). 833 * 834 * We don't need to set trace_idt_table like set_intr_gate(), 835 * since we don't have trace_debug and it will be reset to 836 * 'debug' in trap_init() by set_intr_gate_ist(). 837 */ 838 set_intr_gate_notrace(X86_TRAP_DB, debug); 839 /* int3 can be called from all */ 840 set_system_intr_gate(X86_TRAP_BP, &int3); 841 #ifdef CONFIG_X86_32 842 set_intr_gate(X86_TRAP_PF, page_fault); 843 #endif 844 load_idt(&idt_descr); 845 } 846 847 void __init early_trap_pf_init(void) 848 { 849 #ifdef CONFIG_X86_64 850 set_intr_gate(X86_TRAP_PF, page_fault); 851 #endif 852 } 853 854 void __init trap_init(void) 855 { 856 int i; 857 858 #ifdef CONFIG_EISA 859 void __iomem *p = early_ioremap(0x0FFFD9, 4); 860 861 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 862 EISA_bus = 1; 863 early_iounmap(p, 4); 864 #endif 865 866 set_intr_gate(X86_TRAP_DE, divide_error); 867 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 868 /* int4 can be called from all */ 869 set_system_intr_gate(X86_TRAP_OF, &overflow); 870 set_intr_gate(X86_TRAP_BR, bounds); 871 set_intr_gate(X86_TRAP_UD, invalid_op); 872 set_intr_gate(X86_TRAP_NM, device_not_available); 873 #ifdef CONFIG_X86_32 874 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 875 #else 876 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 877 #endif 878 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); 879 set_intr_gate(X86_TRAP_TS, invalid_TSS); 880 set_intr_gate(X86_TRAP_NP, segment_not_present); 881 set_intr_gate(X86_TRAP_SS, stack_segment); 882 set_intr_gate(X86_TRAP_GP, general_protection); 883 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); 884 set_intr_gate(X86_TRAP_MF, coprocessor_error); 885 set_intr_gate(X86_TRAP_AC, alignment_check); 886 #ifdef CONFIG_X86_MCE 887 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 888 #endif 889 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error); 890 891 /* Reserve all the builtin and the syscall vector: */ 892 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 893 set_bit(i, used_vectors); 894 895 #ifdef CONFIG_IA32_EMULATION 896 set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat); 897 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 898 #endif 899 900 #ifdef CONFIG_X86_32 901 set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32); 902 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 903 #endif 904 905 /* 906 * Set the IDT descriptor to a fixed read-only location, so that the 907 * "sidt" instruction will not leak the location of the kernel, and 908 * to defend the IDT against arbitrary memory write vulnerabilities. 909 * It will be reloaded in cpu_init() */ 910 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); 911 idt_descr.address = fix_to_virt(FIX_RO_IDT); 912 913 /* 914 * Should be a barrier for any external CPU state: 915 */ 916 cpu_init(); 917 918 /* 919 * X86_TRAP_DB and X86_TRAP_BP have been set 920 * in early_trap_init(). However, ITS works only after 921 * cpu_init() loads TSS. See comments in early_trap_init(). 922 */ 923 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 924 /* int3 can be called from all */ 925 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 926 927 x86_init.irqs.trap_init(); 928 929 #ifdef CONFIG_X86_64 930 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); 931 set_nmi_gate(X86_TRAP_DB, &debug); 932 set_nmi_gate(X86_TRAP_BP, &int3); 933 #endif 934 } 935