1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/context_tracking.h> 16 #include <linux/interrupt.h> 17 #include <linux/kallsyms.h> 18 #include <linux/kmsan.h> 19 #include <linux/spinlock.h> 20 #include <linux/kprobes.h> 21 #include <linux/uaccess.h> 22 #include <linux/kdebug.h> 23 #include <linux/kgdb.h> 24 #include <linux/kernel.h> 25 #include <linux/export.h> 26 #include <linux/ptrace.h> 27 #include <linux/uprobes.h> 28 #include <linux/string.h> 29 #include <linux/delay.h> 30 #include <linux/errno.h> 31 #include <linux/kexec.h> 32 #include <linux/sched.h> 33 #include <linux/sched/task_stack.h> 34 #include <linux/timer.h> 35 #include <linux/init.h> 36 #include <linux/bug.h> 37 #include <linux/nmi.h> 38 #include <linux/mm.h> 39 #include <linux/smp.h> 40 #include <linux/cpu.h> 41 #include <linux/io.h> 42 #include <linux/hardirq.h> 43 #include <linux/atomic.h> 44 #include <linux/iommu.h> 45 #include <linux/ubsan.h> 46 47 #include <asm/stacktrace.h> 48 #include <asm/processor.h> 49 #include <asm/debugreg.h> 50 #include <asm/realmode.h> 51 #include <asm/text-patching.h> 52 #include <asm/ftrace.h> 53 #include <asm/traps.h> 54 #include <asm/desc.h> 55 #include <asm/fred.h> 56 #include <asm/fpu/api.h> 57 #include <asm/cpu.h> 58 #include <asm/cpu_entry_area.h> 59 #include <asm/mce.h> 60 #include <asm/fixmap.h> 61 #include <asm/mach_traps.h> 62 #include <asm/alternative.h> 63 #include <asm/fpu/xstate.h> 64 #include <asm/vm86.h> 65 #include <asm/umip.h> 66 #include <asm/insn.h> 67 #include <asm/insn-eval.h> 68 #include <asm/vdso.h> 69 #include <asm/tdx.h> 70 #include <asm/cfi.h> 71 72 #ifdef CONFIG_X86_64 73 #include <asm/x86_init.h> 74 #else 75 #include <asm/processor-flags.h> 76 #include <asm/setup.h> 77 #endif 78 79 #include <asm/proto.h> 80 81 DECLARE_BITMAP(system_vectors, NR_VECTORS); 82 83 __always_inline int is_valid_bugaddr(unsigned long addr) 84 { 85 if (addr < TASK_SIZE_MAX) 86 return 0; 87 88 /* 89 * We got #UD, if the text isn't readable we'd have gotten 90 * a different exception. 91 */ 92 return *(unsigned short *)addr == INSN_UD2; 93 } 94 95 /* 96 * Check for UD1 or UD2, accounting for Address Size Override Prefixes. 97 * If it's a UD1, get the ModRM byte to pass along to UBSan. 98 */ 99 __always_inline int decode_bug(unsigned long addr, u32 *imm) 100 { 101 u8 v; 102 103 if (addr < TASK_SIZE_MAX) 104 return BUG_NONE; 105 106 v = *(u8 *)(addr++); 107 if (v == INSN_ASOP) 108 v = *(u8 *)(addr++); 109 if (v != OPCODE_ESCAPE) 110 return BUG_NONE; 111 112 v = *(u8 *)(addr++); 113 if (v == SECOND_BYTE_OPCODE_UD2) 114 return BUG_UD2; 115 116 if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1) 117 return BUG_NONE; 118 119 /* Retrieve the immediate (type value) for the UBSAN UD1 */ 120 v = *(u8 *)(addr++); 121 if (X86_MODRM_RM(v) == 4) 122 addr++; 123 124 *imm = 0; 125 if (X86_MODRM_MOD(v) == 1) 126 *imm = *(u8 *)addr; 127 else if (X86_MODRM_MOD(v) == 2) 128 *imm = *(u32 *)addr; 129 else 130 WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v)); 131 132 return BUG_UD1; 133 } 134 135 136 static nokprobe_inline int 137 do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, 138 struct pt_regs *regs, long error_code) 139 { 140 if (v8086_mode(regs)) { 141 /* 142 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 143 * On nmi (interrupt 2), do_trap should not be called. 144 */ 145 if (trapnr < X86_TRAP_UD) { 146 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, 147 error_code, trapnr)) 148 return 0; 149 } 150 } else if (!user_mode(regs)) { 151 if (fixup_exception(regs, trapnr, error_code, 0)) 152 return 0; 153 154 tsk->thread.error_code = error_code; 155 tsk->thread.trap_nr = trapnr; 156 die(str, regs, error_code); 157 } else { 158 if (fixup_vdso_exception(regs, trapnr, error_code, 0)) 159 return 0; 160 } 161 162 /* 163 * We want error_code and trap_nr set for userspace faults and 164 * kernelspace faults which result in die(), but not 165 * kernelspace faults which are fixed up. die() gives the 166 * process no chance to handle the signal and notice the 167 * kernel fault information, so that won't result in polluting 168 * the information about previously queued, but not yet 169 * delivered, faults. See also exc_general_protection below. 170 */ 171 tsk->thread.error_code = error_code; 172 tsk->thread.trap_nr = trapnr; 173 174 return -1; 175 } 176 177 static void show_signal(struct task_struct *tsk, int signr, 178 const char *type, const char *desc, 179 struct pt_regs *regs, long error_code) 180 { 181 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 182 printk_ratelimit()) { 183 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx", 184 tsk->comm, task_pid_nr(tsk), type, desc, 185 regs->ip, regs->sp, error_code); 186 print_vma_addr(KERN_CONT " in ", regs->ip); 187 pr_cont("\n"); 188 } 189 } 190 191 static void 192 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 193 long error_code, int sicode, void __user *addr) 194 { 195 struct task_struct *tsk = current; 196 197 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) 198 return; 199 200 show_signal(tsk, signr, "trap ", str, regs, error_code); 201 202 if (!sicode) 203 force_sig(signr); 204 else 205 force_sig_fault(signr, sicode, addr); 206 } 207 NOKPROBE_SYMBOL(do_trap); 208 209 static void do_error_trap(struct pt_regs *regs, long error_code, char *str, 210 unsigned long trapnr, int signr, int sicode, void __user *addr) 211 { 212 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); 213 214 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != 215 NOTIFY_STOP) { 216 cond_local_irq_enable(regs); 217 do_trap(trapnr, signr, str, regs, error_code, sicode, addr); 218 cond_local_irq_disable(regs); 219 } 220 } 221 222 /* 223 * Posix requires to provide the address of the faulting instruction for 224 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t. 225 * 226 * This address is usually regs->ip, but when an uprobe moved the code out 227 * of line then regs->ip points to the XOL code which would confuse 228 * anything which analyzes the fault address vs. the unmodified binary. If 229 * a trap happened in XOL code then uprobe maps regs->ip back to the 230 * original instruction address. 231 */ 232 static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs) 233 { 234 return (void __user *)uprobe_get_trap_addr(regs); 235 } 236 237 DEFINE_IDTENTRY(exc_divide_error) 238 { 239 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, 240 FPE_INTDIV, error_get_trap_addr(regs)); 241 } 242 243 DEFINE_IDTENTRY(exc_overflow) 244 { 245 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL); 246 } 247 248 #ifdef CONFIG_X86_F00F_BUG 249 void handle_invalid_op(struct pt_regs *regs) 250 #else 251 static inline void handle_invalid_op(struct pt_regs *regs) 252 #endif 253 { 254 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL, 255 ILL_ILLOPN, error_get_trap_addr(regs)); 256 } 257 258 static noinstr bool handle_bug(struct pt_regs *regs) 259 { 260 bool handled = false; 261 int ud_type; 262 u32 imm; 263 264 ud_type = decode_bug(regs->ip, &imm); 265 if (ud_type == BUG_NONE) 266 return handled; 267 268 /* 269 * All lies, just get the WARN/BUG out. 270 */ 271 instrumentation_begin(); 272 /* 273 * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug() 274 * is a rare case that uses @regs without passing them to 275 * irqentry_enter(). 276 */ 277 kmsan_unpoison_entry_regs(regs); 278 /* 279 * Since we're emulating a CALL with exceptions, restore the interrupt 280 * state to what it was at the exception site. 281 */ 282 if (regs->flags & X86_EFLAGS_IF) 283 raw_local_irq_enable(); 284 if (ud_type == BUG_UD2) { 285 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN || 286 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) { 287 regs->ip += LEN_UD2; 288 handled = true; 289 } 290 } else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) { 291 pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip); 292 } 293 if (regs->flags & X86_EFLAGS_IF) 294 raw_local_irq_disable(); 295 instrumentation_end(); 296 297 return handled; 298 } 299 300 DEFINE_IDTENTRY_RAW(exc_invalid_op) 301 { 302 irqentry_state_t state; 303 304 /* 305 * We use UD2 as a short encoding for 'CALL __WARN', as such 306 * handle it before exception entry to avoid recursive WARN 307 * in case exception entry is the one triggering WARNs. 308 */ 309 if (!user_mode(regs) && handle_bug(regs)) 310 return; 311 312 state = irqentry_enter(regs); 313 instrumentation_begin(); 314 handle_invalid_op(regs); 315 instrumentation_end(); 316 irqentry_exit(regs, state); 317 } 318 319 DEFINE_IDTENTRY(exc_coproc_segment_overrun) 320 { 321 do_error_trap(regs, 0, "coprocessor segment overrun", 322 X86_TRAP_OLD_MF, SIGFPE, 0, NULL); 323 } 324 325 DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss) 326 { 327 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV, 328 0, NULL); 329 } 330 331 DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present) 332 { 333 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP, 334 SIGBUS, 0, NULL); 335 } 336 337 DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment) 338 { 339 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS, 340 0, NULL); 341 } 342 343 DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check) 344 { 345 char *str = "alignment check"; 346 347 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP) 348 return; 349 350 if (!user_mode(regs)) 351 die("Split lock detected\n", regs, error_code); 352 353 local_irq_enable(); 354 355 if (handle_user_split_lock(regs, error_code)) 356 goto out; 357 358 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs, 359 error_code, BUS_ADRALN, NULL); 360 361 out: 362 local_irq_disable(); 363 } 364 365 #ifdef CONFIG_VMAP_STACK 366 __visible void __noreturn handle_stack_overflow(struct pt_regs *regs, 367 unsigned long fault_address, 368 struct stack_info *info) 369 { 370 const char *name = stack_type_name(info->type); 371 372 printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n", 373 name, (void *)fault_address, info->begin, info->end); 374 375 die("stack guard page", regs, 0); 376 377 /* Be absolutely certain we don't return. */ 378 panic("%s stack guard hit", name); 379 } 380 #endif 381 382 /* 383 * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64 384 * version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch 385 * between configs triggers objtool warnings. 386 * 387 * This is a temporary hack until we have compiler or plugin support for 388 * annotating noreturns. 389 */ 390 #ifdef CONFIG_X86_ESPFIX64 391 #define always_true() true 392 #else 393 bool always_true(void); 394 bool __weak always_true(void) { return true; } 395 #endif 396 397 /* 398 * Runs on an IST stack for x86_64 and on a special task stack for x86_32. 399 * 400 * On x86_64, this is more or less a normal kernel entry. Notwithstanding the 401 * SDM's warnings about double faults being unrecoverable, returning works as 402 * expected. Presumably what the SDM actually means is that the CPU may get 403 * the register state wrong on entry, so returning could be a bad idea. 404 * 405 * Various CPU engineers have promised that double faults due to an IRET fault 406 * while the stack is read-only are, in fact, recoverable. 407 * 408 * On x86_32, this is entered through a task gate, and regs are synthesized 409 * from the TSS. Returning is, in principle, okay, but changes to regs will 410 * be lost. If, for some reason, we need to return to a context with modified 411 * regs, the shim code could be adjusted to synchronize the registers. 412 * 413 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs 414 * to be read before doing anything else. 415 */ 416 DEFINE_IDTENTRY_DF(exc_double_fault) 417 { 418 static const char str[] = "double fault"; 419 struct task_struct *tsk = current; 420 421 #ifdef CONFIG_VMAP_STACK 422 unsigned long address = read_cr2(); 423 struct stack_info info; 424 #endif 425 426 #ifdef CONFIG_X86_ESPFIX64 427 extern unsigned char native_irq_return_iret[]; 428 429 /* 430 * If IRET takes a non-IST fault on the espfix64 stack, then we 431 * end up promoting it to a doublefault. In that case, take 432 * advantage of the fact that we're not using the normal (TSS.sp0) 433 * stack right now. We can write a fake #GP(0) frame at TSS.sp0 434 * and then modify our own IRET frame so that, when we return, 435 * we land directly at the #GP(0) vector with the stack already 436 * set up according to its expectations. 437 * 438 * The net result is that our #GP handler will think that we 439 * entered from usermode with the bad user context. 440 * 441 * No need for nmi_enter() here because we don't use RCU. 442 */ 443 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && 444 regs->cs == __KERNEL_CS && 445 regs->ip == (unsigned long)native_irq_return_iret) 446 { 447 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; 448 unsigned long *p = (unsigned long *)regs->sp; 449 450 /* 451 * regs->sp points to the failing IRET frame on the 452 * ESPFIX64 stack. Copy it to the entry stack. This fills 453 * in gpregs->ss through gpregs->ip. 454 * 455 */ 456 gpregs->ip = p[0]; 457 gpregs->cs = p[1]; 458 gpregs->flags = p[2]; 459 gpregs->sp = p[3]; 460 gpregs->ss = p[4]; 461 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ 462 463 /* 464 * Adjust our frame so that we return straight to the #GP 465 * vector with the expected RSP value. This is safe because 466 * we won't enable interrupts or schedule before we invoke 467 * general_protection, so nothing will clobber the stack 468 * frame we just set up. 469 * 470 * We will enter general_protection with kernel GSBASE, 471 * which is what the stub expects, given that the faulting 472 * RIP will be the IRET instruction. 473 */ 474 regs->ip = (unsigned long)asm_exc_general_protection; 475 regs->sp = (unsigned long)&gpregs->orig_ax; 476 477 return; 478 } 479 #endif 480 481 irqentry_nmi_enter(regs); 482 instrumentation_begin(); 483 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 484 485 tsk->thread.error_code = error_code; 486 tsk->thread.trap_nr = X86_TRAP_DF; 487 488 #ifdef CONFIG_VMAP_STACK 489 /* 490 * If we overflow the stack into a guard page, the CPU will fail 491 * to deliver #PF and will send #DF instead. Similarly, if we 492 * take any non-IST exception while too close to the bottom of 493 * the stack, the processor will get a page fault while 494 * delivering the exception and will generate a double fault. 495 * 496 * According to the SDM (footnote in 6.15 under "Interrupt 14 - 497 * Page-Fault Exception (#PF): 498 * 499 * Processors update CR2 whenever a page fault is detected. If a 500 * second page fault occurs while an earlier page fault is being 501 * delivered, the faulting linear address of the second fault will 502 * overwrite the contents of CR2 (replacing the previous 503 * address). These updates to CR2 occur even if the page fault 504 * results in a double fault or occurs during the delivery of a 505 * double fault. 506 * 507 * The logic below has a small possibility of incorrectly diagnosing 508 * some errors as stack overflows. For example, if the IDT or GDT 509 * gets corrupted such that #GP delivery fails due to a bad descriptor 510 * causing #GP and we hit this condition while CR2 coincidentally 511 * points to the stack guard page, we'll think we overflowed the 512 * stack. Given that we're going to panic one way or another 513 * if this happens, this isn't necessarily worth fixing. 514 * 515 * If necessary, we could improve the test by only diagnosing 516 * a stack overflow if the saved RSP points within 47 bytes of 517 * the bottom of the stack: if RSP == tsk_stack + 48 and we 518 * take an exception, the stack is already aligned and there 519 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a 520 * possible error code, so a stack overflow would *not* double 521 * fault. With any less space left, exception delivery could 522 * fail, and, as a practical matter, we've overflowed the 523 * stack even if the actual trigger for the double fault was 524 * something else. 525 */ 526 if (get_stack_guard_info((void *)address, &info)) 527 handle_stack_overflow(regs, address, &info); 528 #endif 529 530 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); 531 die("double fault", regs, error_code); 532 if (always_true()) 533 panic("Machine halted."); 534 instrumentation_end(); 535 } 536 537 DEFINE_IDTENTRY(exc_bounds) 538 { 539 if (notify_die(DIE_TRAP, "bounds", regs, 0, 540 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) 541 return; 542 cond_local_irq_enable(regs); 543 544 if (!user_mode(regs)) 545 die("bounds", regs, 0); 546 547 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL); 548 549 cond_local_irq_disable(regs); 550 } 551 552 enum kernel_gp_hint { 553 GP_NO_HINT, 554 GP_NON_CANONICAL, 555 GP_CANONICAL 556 }; 557 558 /* 559 * When an uncaught #GP occurs, try to determine the memory address accessed by 560 * the instruction and return that address to the caller. Also, try to figure 561 * out whether any part of the access to that address was non-canonical. 562 */ 563 static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, 564 unsigned long *addr) 565 { 566 u8 insn_buf[MAX_INSN_SIZE]; 567 struct insn insn; 568 int ret; 569 570 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, 571 MAX_INSN_SIZE)) 572 return GP_NO_HINT; 573 574 ret = insn_decode_kernel(&insn, insn_buf); 575 if (ret < 0) 576 return GP_NO_HINT; 577 578 *addr = (unsigned long)insn_get_addr_ref(&insn, regs); 579 if (*addr == -1UL) 580 return GP_NO_HINT; 581 582 #ifdef CONFIG_X86_64 583 /* 584 * Check that: 585 * - the operand is not in the kernel half 586 * - the last byte of the operand is not in the user canonical half 587 */ 588 if (*addr < ~__VIRTUAL_MASK && 589 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK) 590 return GP_NON_CANONICAL; 591 #endif 592 593 return GP_CANONICAL; 594 } 595 596 #define GPFSTR "general protection fault" 597 598 static bool fixup_iopl_exception(struct pt_regs *regs) 599 { 600 struct thread_struct *t = ¤t->thread; 601 unsigned char byte; 602 unsigned long ip; 603 604 if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3) 605 return false; 606 607 if (insn_get_effective_ip(regs, &ip)) 608 return false; 609 610 if (get_user(byte, (const char __user *)ip)) 611 return false; 612 613 if (byte != 0xfa && byte != 0xfb) 614 return false; 615 616 if (!t->iopl_warn && printk_ratelimit()) { 617 pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx", 618 current->comm, task_pid_nr(current), ip); 619 print_vma_addr(KERN_CONT " in ", ip); 620 pr_cont("\n"); 621 t->iopl_warn = 1; 622 } 623 624 regs->ip += 1; 625 return true; 626 } 627 628 /* 629 * The unprivileged ENQCMD instruction generates #GPs if the 630 * IA32_PASID MSR has not been populated. If possible, populate 631 * the MSR from a PASID previously allocated to the mm. 632 */ 633 static bool try_fixup_enqcmd_gp(void) 634 { 635 #ifdef CONFIG_ARCH_HAS_CPU_PASID 636 u32 pasid; 637 638 /* 639 * MSR_IA32_PASID is managed using XSAVE. Directly 640 * writing to the MSR is only possible when fpregs 641 * are valid and the fpstate is not. This is 642 * guaranteed when handling a userspace exception 643 * in *before* interrupts are re-enabled. 644 */ 645 lockdep_assert_irqs_disabled(); 646 647 /* 648 * Hardware without ENQCMD will not generate 649 * #GPs that can be fixed up here. 650 */ 651 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 652 return false; 653 654 /* 655 * If the mm has not been allocated a 656 * PASID, the #GP can not be fixed up. 657 */ 658 if (!mm_valid_pasid(current->mm)) 659 return false; 660 661 pasid = mm_get_enqcmd_pasid(current->mm); 662 663 /* 664 * Did this thread already have its PASID activated? 665 * If so, the #GP must be from something else. 666 */ 667 if (current->pasid_activated) 668 return false; 669 670 wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID); 671 current->pasid_activated = 1; 672 673 return true; 674 #else 675 return false; 676 #endif 677 } 678 679 static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr, 680 unsigned long error_code, const char *str, 681 unsigned long address) 682 { 683 if (fixup_exception(regs, trapnr, error_code, address)) 684 return true; 685 686 current->thread.error_code = error_code; 687 current->thread.trap_nr = trapnr; 688 689 /* 690 * To be potentially processing a kprobe fault and to trust the result 691 * from kprobe_running(), we have to be non-preemptible. 692 */ 693 if (!preemptible() && kprobe_running() && 694 kprobe_fault_handler(regs, trapnr)) 695 return true; 696 697 return notify_die(DIE_GPF, str, regs, error_code, trapnr, SIGSEGV) == NOTIFY_STOP; 698 } 699 700 static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr, 701 unsigned long error_code, const char *str) 702 { 703 current->thread.error_code = error_code; 704 current->thread.trap_nr = trapnr; 705 show_signal(current, SIGSEGV, "", str, regs, error_code); 706 force_sig(SIGSEGV); 707 } 708 709 DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) 710 { 711 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR; 712 enum kernel_gp_hint hint = GP_NO_HINT; 713 unsigned long gp_addr; 714 715 if (user_mode(regs) && try_fixup_enqcmd_gp()) 716 return; 717 718 cond_local_irq_enable(regs); 719 720 if (static_cpu_has(X86_FEATURE_UMIP)) { 721 if (user_mode(regs) && fixup_umip_exception(regs)) 722 goto exit; 723 } 724 725 if (v8086_mode(regs)) { 726 local_irq_enable(); 727 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 728 local_irq_disable(); 729 return; 730 } 731 732 if (user_mode(regs)) { 733 if (fixup_iopl_exception(regs)) 734 goto exit; 735 736 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0)) 737 goto exit; 738 739 gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc); 740 goto exit; 741 } 742 743 if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0)) 744 goto exit; 745 746 if (error_code) 747 snprintf(desc, sizeof(desc), "segment-related " GPFSTR); 748 else 749 hint = get_kernel_gp_address(regs, &gp_addr); 750 751 if (hint != GP_NO_HINT) 752 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx", 753 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address" 754 : "maybe for address", 755 gp_addr); 756 757 /* 758 * KASAN is interested only in the non-canonical case, clear it 759 * otherwise. 760 */ 761 if (hint != GP_NON_CANONICAL) 762 gp_addr = 0; 763 764 die_addr(desc, regs, error_code, gp_addr); 765 766 exit: 767 cond_local_irq_disable(regs); 768 } 769 770 static bool do_int3(struct pt_regs *regs) 771 { 772 int res; 773 774 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 775 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, 776 SIGTRAP) == NOTIFY_STOP) 777 return true; 778 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 779 780 #ifdef CONFIG_KPROBES 781 if (kprobe_int3_handler(regs)) 782 return true; 783 #endif 784 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP); 785 786 return res == NOTIFY_STOP; 787 } 788 NOKPROBE_SYMBOL(do_int3); 789 790 static void do_int3_user(struct pt_regs *regs) 791 { 792 if (do_int3(regs)) 793 return; 794 795 cond_local_irq_enable(regs); 796 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL); 797 cond_local_irq_disable(regs); 798 } 799 800 DEFINE_IDTENTRY_RAW(exc_int3) 801 { 802 /* 803 * poke_int3_handler() is completely self contained code; it does (and 804 * must) *NOT* call out to anything, lest it hits upon yet another 805 * INT3. 806 */ 807 if (poke_int3_handler(regs)) 808 return; 809 810 /* 811 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely() 812 * and therefore can trigger INT3, hence poke_int3_handler() must 813 * be done before. If the entry came from kernel mode, then use 814 * nmi_enter() because the INT3 could have been hit in any context 815 * including NMI. 816 */ 817 if (user_mode(regs)) { 818 irqentry_enter_from_user_mode(regs); 819 instrumentation_begin(); 820 do_int3_user(regs); 821 instrumentation_end(); 822 irqentry_exit_to_user_mode(regs); 823 } else { 824 irqentry_state_t irq_state = irqentry_nmi_enter(regs); 825 826 instrumentation_begin(); 827 if (!do_int3(regs)) 828 die("int3", regs, 0); 829 instrumentation_end(); 830 irqentry_nmi_exit(regs, irq_state); 831 } 832 } 833 834 #ifdef CONFIG_X86_64 835 /* 836 * Help handler running on a per-cpu (IST or entry trampoline) stack 837 * to switch to the normal thread stack if the interrupted code was in 838 * user mode. The actual stack switch is done in entry_64.S 839 */ 840 asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs) 841 { 842 struct pt_regs *regs = (struct pt_regs *)current_top_of_stack() - 1; 843 if (regs != eregs) 844 *regs = *eregs; 845 return regs; 846 } 847 848 #ifdef CONFIG_AMD_MEM_ENCRYPT 849 asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs) 850 { 851 unsigned long sp, *stack; 852 struct stack_info info; 853 struct pt_regs *regs_ret; 854 855 /* 856 * In the SYSCALL entry path the RSP value comes from user-space - don't 857 * trust it and switch to the current kernel stack 858 */ 859 if (ip_within_syscall_gap(regs)) { 860 sp = current_top_of_stack(); 861 goto sync; 862 } 863 864 /* 865 * From here on the RSP value is trusted. Now check whether entry 866 * happened from a safe stack. Not safe are the entry or unknown stacks, 867 * use the fall-back stack instead in this case. 868 */ 869 sp = regs->sp; 870 stack = (unsigned long *)sp; 871 872 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY || 873 info.type > STACK_TYPE_EXCEPTION_LAST) 874 sp = __this_cpu_ist_top_va(VC2); 875 876 sync: 877 /* 878 * Found a safe stack - switch to it as if the entry didn't happen via 879 * IST stack. The code below only copies pt_regs, the real switch happens 880 * in assembly code. 881 */ 882 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret); 883 884 regs_ret = (struct pt_regs *)sp; 885 *regs_ret = *regs; 886 887 return regs_ret; 888 } 889 #endif 890 891 asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs) 892 { 893 struct pt_regs tmp, *new_stack; 894 895 /* 896 * This is called from entry_64.S early in handling a fault 897 * caused by a bad iret to user mode. To handle the fault 898 * correctly, we want to move our stack frame to where it would 899 * be had we entered directly on the entry stack (rather than 900 * just below the IRET frame) and we want to pretend that the 901 * exception came from the IRET target. 902 */ 903 new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; 904 905 /* Copy the IRET target to the temporary storage. */ 906 __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8); 907 908 /* Copy the remainder of the stack from the current stack. */ 909 __memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip)); 910 911 /* Update the entry stack */ 912 __memcpy(new_stack, &tmp, sizeof(tmp)); 913 914 BUG_ON(!user_mode(new_stack)); 915 return new_stack; 916 } 917 #endif 918 919 static bool is_sysenter_singlestep(struct pt_regs *regs) 920 { 921 /* 922 * We don't try for precision here. If we're anywhere in the region of 923 * code that can be single-stepped in the SYSENTER entry path, then 924 * assume that this is a useless single-step trap due to SYSENTER 925 * being invoked with TF set. (We don't know in advance exactly 926 * which instructions will be hit because BTF could plausibly 927 * be set.) 928 */ 929 #ifdef CONFIG_X86_32 930 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) < 931 (unsigned long)__end_SYSENTER_singlestep_region - 932 (unsigned long)__begin_SYSENTER_singlestep_region; 933 #elif defined(CONFIG_IA32_EMULATION) 934 return (regs->ip - (unsigned long)entry_SYSENTER_compat) < 935 (unsigned long)__end_entry_SYSENTER_compat - 936 (unsigned long)entry_SYSENTER_compat; 937 #else 938 return false; 939 #endif 940 } 941 942 static __always_inline unsigned long debug_read_clear_dr6(void) 943 { 944 unsigned long dr6; 945 946 /* 947 * The Intel SDM says: 948 * 949 * Certain debug exceptions may clear bits 0-3. The remaining 950 * contents of the DR6 register are never cleared by the 951 * processor. To avoid confusion in identifying debug 952 * exceptions, debug handlers should clear the register before 953 * returning to the interrupted task. 954 * 955 * Keep it simple: clear DR6 immediately. 956 */ 957 get_debugreg(dr6, 6); 958 set_debugreg(DR6_RESERVED, 6); 959 dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ 960 961 return dr6; 962 } 963 964 /* 965 * Our handling of the processor debug registers is non-trivial. 966 * We do not clear them on entry and exit from the kernel. Therefore 967 * it is possible to get a watchpoint trap here from inside the kernel. 968 * However, the code in ./ptrace.c has ensured that the user can 969 * only set watchpoints on userspace addresses. Therefore the in-kernel 970 * watchpoint trap can only occur in code which is reading/writing 971 * from user space. Such code must not hold kernel locks (since it 972 * can equally take a page fault), therefore it is safe to call 973 * force_sig_info even though that claims and releases locks. 974 * 975 * Code in ./signal.c ensures that the debug control register 976 * is restored before we deliver any signal, and therefore that 977 * user code runs with the correct debug control register even though 978 * we clear it here. 979 * 980 * Being careful here means that we don't have to be as careful in a 981 * lot of more complicated places (task switching can be a bit lazy 982 * about restoring all the debug state, and ptrace doesn't have to 983 * find every occurrence of the TF bit that could be saved away even 984 * by user code) 985 * 986 * May run on IST stack. 987 */ 988 989 static bool notify_debug(struct pt_regs *regs, unsigned long *dr6) 990 { 991 /* 992 * Notifiers will clear bits in @dr6 to indicate the event has been 993 * consumed - hw_breakpoint_handler(), single_stop_cont(). 994 * 995 * Notifiers will set bits in @virtual_dr6 to indicate the desire 996 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler(). 997 */ 998 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP) 999 return true; 1000 1001 return false; 1002 } 1003 1004 static noinstr void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6) 1005 { 1006 /* 1007 * Disable breakpoints during exception handling; recursive exceptions 1008 * are exceedingly 'fun'. 1009 * 1010 * Since this function is NOKPROBE, and that also applies to 1011 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a 1012 * HW_BREAKPOINT_W on our stack) 1013 * 1014 * Entry text is excluded for HW_BP_X and cpu_entry_area, which 1015 * includes the entry stack is excluded for everything. 1016 * 1017 * For FRED, nested #DB should just work fine. But when a watchpoint or 1018 * breakpoint is set in the code path which is executed by #DB handler, 1019 * it results in an endless recursion and stack overflow. Thus we stay 1020 * with the IDT approach, i.e., save DR7 and disable #DB. 1021 */ 1022 unsigned long dr7 = local_db_save(); 1023 irqentry_state_t irq_state = irqentry_nmi_enter(regs); 1024 instrumentation_begin(); 1025 1026 /* 1027 * If something gets miswired and we end up here for a user mode 1028 * #DB, we will malfunction. 1029 */ 1030 WARN_ON_ONCE(user_mode(regs)); 1031 1032 if (test_thread_flag(TIF_BLOCKSTEP)) { 1033 /* 1034 * The SDM says "The processor clears the BTF flag when it 1035 * generates a debug exception." but PTRACE_BLOCKSTEP requested 1036 * it for userspace, but we just took a kernel #DB, so re-set 1037 * BTF. 1038 */ 1039 unsigned long debugctl; 1040 1041 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1042 debugctl |= DEBUGCTLMSR_BTF; 1043 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1044 } 1045 1046 /* 1047 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a 1048 * watchpoint at the same time then that will still be handled. 1049 */ 1050 if (!cpu_feature_enabled(X86_FEATURE_FRED) && 1051 (dr6 & DR_STEP) && is_sysenter_singlestep(regs)) 1052 dr6 &= ~DR_STEP; 1053 1054 /* 1055 * The kernel doesn't use INT1 1056 */ 1057 if (!dr6) 1058 goto out; 1059 1060 if (notify_debug(regs, &dr6)) 1061 goto out; 1062 1063 /* 1064 * The kernel doesn't use TF single-step outside of: 1065 * 1066 * - Kprobes, consumed through kprobe_debug_handler() 1067 * - KGDB, consumed through notify_debug() 1068 * 1069 * So if we get here with DR_STEP set, something is wonky. 1070 * 1071 * A known way to trigger this is through QEMU's GDB stub, 1072 * which leaks #DB into the guest and causes IST recursion. 1073 */ 1074 if (WARN_ON_ONCE(dr6 & DR_STEP)) 1075 regs->flags &= ~X86_EFLAGS_TF; 1076 out: 1077 instrumentation_end(); 1078 irqentry_nmi_exit(regs, irq_state); 1079 1080 local_db_restore(dr7); 1081 } 1082 1083 static noinstr void exc_debug_user(struct pt_regs *regs, unsigned long dr6) 1084 { 1085 bool icebp; 1086 1087 /* 1088 * If something gets miswired and we end up here for a kernel mode 1089 * #DB, we will malfunction. 1090 */ 1091 WARN_ON_ONCE(!user_mode(regs)); 1092 1093 /* 1094 * NB: We can't easily clear DR7 here because 1095 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access 1096 * user memory, etc. This means that a recursive #DB is possible. If 1097 * this happens, that #DB will hit exc_debug_kernel() and clear DR7. 1098 * Since we're not on the IST stack right now, everything will be 1099 * fine. 1100 */ 1101 1102 irqentry_enter_from_user_mode(regs); 1103 instrumentation_begin(); 1104 1105 /* 1106 * Start the virtual/ptrace DR6 value with just the DR_STEP mask 1107 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits. 1108 * 1109 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6) 1110 * even if it is not the result of PTRACE_SINGLESTEP. 1111 */ 1112 current->thread.virtual_dr6 = (dr6 & DR_STEP); 1113 1114 /* 1115 * The SDM says "The processor clears the BTF flag when it 1116 * generates a debug exception." Clear TIF_BLOCKSTEP to keep 1117 * TIF_BLOCKSTEP in sync with the hardware BTF flag. 1118 */ 1119 clear_thread_flag(TIF_BLOCKSTEP); 1120 1121 /* 1122 * If dr6 has no reason to give us about the origin of this trap, 1123 * then it's very likely the result of an icebp/int01 trap. 1124 * User wants a sigtrap for that. 1125 */ 1126 icebp = !dr6; 1127 1128 if (notify_debug(regs, &dr6)) 1129 goto out; 1130 1131 /* It's safe to allow irq's after DR6 has been saved */ 1132 local_irq_enable(); 1133 1134 if (v8086_mode(regs)) { 1135 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB); 1136 goto out_irq; 1137 } 1138 1139 /* #DB for bus lock can only be triggered from userspace. */ 1140 if (dr6 & DR_BUS_LOCK) 1141 handle_bus_lock(regs); 1142 1143 /* Add the virtual_dr6 bits for signals. */ 1144 dr6 |= current->thread.virtual_dr6; 1145 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp) 1146 send_sigtrap(regs, 0, get_si_code(dr6)); 1147 1148 out_irq: 1149 local_irq_disable(); 1150 out: 1151 instrumentation_end(); 1152 irqentry_exit_to_user_mode(regs); 1153 } 1154 1155 #ifdef CONFIG_X86_64 1156 /* IST stack entry */ 1157 DEFINE_IDTENTRY_DEBUG(exc_debug) 1158 { 1159 exc_debug_kernel(regs, debug_read_clear_dr6()); 1160 } 1161 1162 /* User entry, runs on regular task stack */ 1163 DEFINE_IDTENTRY_DEBUG_USER(exc_debug) 1164 { 1165 exc_debug_user(regs, debug_read_clear_dr6()); 1166 } 1167 1168 #ifdef CONFIG_X86_FRED 1169 /* 1170 * When occurred on different ring level, i.e., from user or kernel 1171 * context, #DB needs to be handled on different stack: User #DB on 1172 * current task stack, while kernel #DB on a dedicated stack. 1173 * 1174 * This is exactly how FRED event delivery invokes an exception 1175 * handler: ring 3 event on level 0 stack, i.e., current task stack; 1176 * ring 0 event on the #DB dedicated stack specified in the 1177 * IA32_FRED_STKLVLS MSR. So unlike IDT, the FRED debug exception 1178 * entry stub doesn't do stack switch. 1179 */ 1180 DEFINE_FREDENTRY_DEBUG(exc_debug) 1181 { 1182 /* 1183 * FRED #DB stores DR6 on the stack in the format which 1184 * debug_read_clear_dr6() returns for the IDT entry points. 1185 */ 1186 unsigned long dr6 = fred_event_data(regs); 1187 1188 if (user_mode(regs)) 1189 exc_debug_user(regs, dr6); 1190 else 1191 exc_debug_kernel(regs, dr6); 1192 } 1193 #endif /* CONFIG_X86_FRED */ 1194 1195 #else 1196 /* 32 bit does not have separate entry points. */ 1197 DEFINE_IDTENTRY_RAW(exc_debug) 1198 { 1199 unsigned long dr6 = debug_read_clear_dr6(); 1200 1201 if (user_mode(regs)) 1202 exc_debug_user(regs, dr6); 1203 else 1204 exc_debug_kernel(regs, dr6); 1205 } 1206 #endif 1207 1208 /* 1209 * Note that we play around with the 'TS' bit in an attempt to get 1210 * the correct behaviour even in the presence of the asynchronous 1211 * IRQ13 behaviour 1212 */ 1213 static void math_error(struct pt_regs *regs, int trapnr) 1214 { 1215 struct task_struct *task = current; 1216 struct fpu *fpu = &task->thread.fpu; 1217 int si_code; 1218 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 1219 "simd exception"; 1220 1221 cond_local_irq_enable(regs); 1222 1223 if (!user_mode(regs)) { 1224 if (fixup_exception(regs, trapnr, 0, 0)) 1225 goto exit; 1226 1227 task->thread.error_code = 0; 1228 task->thread.trap_nr = trapnr; 1229 1230 if (notify_die(DIE_TRAP, str, regs, 0, trapnr, 1231 SIGFPE) != NOTIFY_STOP) 1232 die(str, regs, 0); 1233 goto exit; 1234 } 1235 1236 /* 1237 * Synchronize the FPU register state to the memory register state 1238 * if necessary. This allows the exception handler to inspect it. 1239 */ 1240 fpu_sync_fpstate(fpu); 1241 1242 task->thread.trap_nr = trapnr; 1243 task->thread.error_code = 0; 1244 1245 si_code = fpu__exception_code(fpu, trapnr); 1246 /* Retry when we get spurious exceptions: */ 1247 if (!si_code) 1248 goto exit; 1249 1250 if (fixup_vdso_exception(regs, trapnr, 0, 0)) 1251 goto exit; 1252 1253 force_sig_fault(SIGFPE, si_code, 1254 (void __user *)uprobe_get_trap_addr(regs)); 1255 exit: 1256 cond_local_irq_disable(regs); 1257 } 1258 1259 DEFINE_IDTENTRY(exc_coprocessor_error) 1260 { 1261 math_error(regs, X86_TRAP_MF); 1262 } 1263 1264 DEFINE_IDTENTRY(exc_simd_coprocessor_error) 1265 { 1266 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) { 1267 /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */ 1268 if (!static_cpu_has(X86_FEATURE_XMM)) { 1269 __exc_general_protection(regs, 0); 1270 return; 1271 } 1272 } 1273 math_error(regs, X86_TRAP_XF); 1274 } 1275 1276 DEFINE_IDTENTRY(exc_spurious_interrupt_bug) 1277 { 1278 /* 1279 * This addresses a Pentium Pro Erratum: 1280 * 1281 * PROBLEM: If the APIC subsystem is configured in mixed mode with 1282 * Virtual Wire mode implemented through the local APIC, an 1283 * interrupt vector of 0Fh (Intel reserved encoding) may be 1284 * generated by the local APIC (Int 15). This vector may be 1285 * generated upon receipt of a spurious interrupt (an interrupt 1286 * which is removed before the system receives the INTA sequence) 1287 * instead of the programmed 8259 spurious interrupt vector. 1288 * 1289 * IMPLICATION: The spurious interrupt vector programmed in the 1290 * 8259 is normally handled by an operating system's spurious 1291 * interrupt handler. However, a vector of 0Fh is unknown to some 1292 * operating systems, which would crash if this erratum occurred. 1293 * 1294 * In theory this could be limited to 32bit, but the handler is not 1295 * hurting and who knows which other CPUs suffer from this. 1296 */ 1297 } 1298 1299 static bool handle_xfd_event(struct pt_regs *regs) 1300 { 1301 u64 xfd_err; 1302 int err; 1303 1304 if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD)) 1305 return false; 1306 1307 rdmsrl(MSR_IA32_XFD_ERR, xfd_err); 1308 if (!xfd_err) 1309 return false; 1310 1311 wrmsrl(MSR_IA32_XFD_ERR, 0); 1312 1313 /* Die if that happens in kernel space */ 1314 if (WARN_ON(!user_mode(regs))) 1315 return false; 1316 1317 local_irq_enable(); 1318 1319 err = xfd_enable_feature(xfd_err); 1320 1321 switch (err) { 1322 case -EPERM: 1323 force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs)); 1324 break; 1325 case -EFAULT: 1326 force_sig(SIGSEGV); 1327 break; 1328 } 1329 1330 local_irq_disable(); 1331 return true; 1332 } 1333 1334 DEFINE_IDTENTRY(exc_device_not_available) 1335 { 1336 unsigned long cr0 = read_cr0(); 1337 1338 if (handle_xfd_event(regs)) 1339 return; 1340 1341 #ifdef CONFIG_MATH_EMULATION 1342 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) { 1343 struct math_emu_info info = { }; 1344 1345 cond_local_irq_enable(regs); 1346 1347 info.regs = regs; 1348 math_emulate(&info); 1349 1350 cond_local_irq_disable(regs); 1351 return; 1352 } 1353 #endif 1354 1355 /* This should not happen. */ 1356 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) { 1357 /* Try to fix it up and carry on. */ 1358 write_cr0(cr0 & ~X86_CR0_TS); 1359 } else { 1360 /* 1361 * Something terrible happened, and we're better off trying 1362 * to kill the task than getting stuck in a never-ending 1363 * loop of #NM faults. 1364 */ 1365 die("unexpected #NM exception", regs, 0); 1366 } 1367 } 1368 1369 #ifdef CONFIG_INTEL_TDX_GUEST 1370 1371 #define VE_FAULT_STR "VE fault" 1372 1373 static void ve_raise_fault(struct pt_regs *regs, long error_code, 1374 unsigned long address) 1375 { 1376 if (user_mode(regs)) { 1377 gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR); 1378 return; 1379 } 1380 1381 if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code, 1382 VE_FAULT_STR, address)) { 1383 return; 1384 } 1385 1386 die_addr(VE_FAULT_STR, regs, error_code, address); 1387 } 1388 1389 /* 1390 * Virtualization Exceptions (#VE) are delivered to TDX guests due to 1391 * specific guest actions which may happen in either user space or the 1392 * kernel: 1393 * 1394 * * Specific instructions (WBINVD, for example) 1395 * * Specific MSR accesses 1396 * * Specific CPUID leaf accesses 1397 * * Access to specific guest physical addresses 1398 * 1399 * In the settings that Linux will run in, virtualization exceptions are 1400 * never generated on accesses to normal, TD-private memory that has been 1401 * accepted (by BIOS or with tdx_enc_status_changed()). 1402 * 1403 * Syscall entry code has a critical window where the kernel stack is not 1404 * yet set up. Any exception in this window leads to hard to debug issues 1405 * and can be exploited for privilege escalation. Exceptions in the NMI 1406 * entry code also cause issues. Returning from the exception handler with 1407 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack. 1408 * 1409 * For these reasons, the kernel avoids #VEs during the syscall gap and 1410 * the NMI entry code. Entry code paths do not access TD-shared memory, 1411 * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves 1412 * that might generate #VE. VMM can remove memory from TD at any point, 1413 * but access to unaccepted (or missing) private memory leads to VM 1414 * termination, not to #VE. 1415 * 1416 * Similarly to page faults and breakpoints, #VEs are allowed in NMI 1417 * handlers once the kernel is ready to deal with nested NMIs. 1418 * 1419 * During #VE delivery, all interrupts, including NMIs, are blocked until 1420 * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads 1421 * the VE info. 1422 * 1423 * If a guest kernel action which would normally cause a #VE occurs in 1424 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault 1425 * exception) is delivered to the guest which will result in an oops. 1426 * 1427 * The entry code has been audited carefully for following these expectations. 1428 * Changes in the entry code have to be audited for correctness vs. this 1429 * aspect. Similarly to #PF, #VE in these places will expose kernel to 1430 * privilege escalation or may lead to random crashes. 1431 */ 1432 DEFINE_IDTENTRY(exc_virtualization_exception) 1433 { 1434 struct ve_info ve; 1435 1436 /* 1437 * NMIs/Machine-checks/Interrupts will be in a disabled state 1438 * till TDGETVEINFO TDCALL is executed. This ensures that VE 1439 * info cannot be overwritten by a nested #VE. 1440 */ 1441 tdx_get_ve_info(&ve); 1442 1443 cond_local_irq_enable(regs); 1444 1445 /* 1446 * If tdx_handle_virt_exception() could not process 1447 * it successfully, treat it as #GP(0) and handle it. 1448 */ 1449 if (!tdx_handle_virt_exception(regs, &ve)) 1450 ve_raise_fault(regs, 0, ve.gla); 1451 1452 cond_local_irq_disable(regs); 1453 } 1454 1455 #endif 1456 1457 #ifdef CONFIG_X86_32 1458 DEFINE_IDTENTRY_SW(iret_error) 1459 { 1460 local_irq_enable(); 1461 if (notify_die(DIE_TRAP, "iret exception", regs, 0, 1462 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { 1463 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0, 1464 ILL_BADSTK, (void __user *)NULL); 1465 } 1466 local_irq_disable(); 1467 } 1468 #endif 1469 1470 void __init trap_init(void) 1471 { 1472 /* Init cpu_entry_area before IST entries are set up */ 1473 setup_cpu_entry_areas(); 1474 1475 /* Init GHCB memory pages when running as an SEV-ES guest */ 1476 sev_es_init_vc_handling(); 1477 1478 /* Initialize TSS before setting up traps so ISTs work */ 1479 cpu_init_exception_handling(true); 1480 1481 /* Setup traps as cpu_init() might #GP */ 1482 if (!cpu_feature_enabled(X86_FEATURE_FRED)) 1483 idt_setup_traps(); 1484 1485 cpu_init(); 1486 } 1487