1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9 /*
10 * Handle hardware traps and faults.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kmsan.h>
19 #include <linux/spinlock.h>
20 #include <linux/kprobes.h>
21 #include <linux/uaccess.h>
22 #include <linux/kdebug.h>
23 #include <linux/kgdb.h>
24 #include <linux/kernel.h>
25 #include <linux/export.h>
26 #include <linux/ptrace.h>
27 #include <linux/uprobes.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/kexec.h>
32 #include <linux/sched.h>
33 #include <linux/sched/task_stack.h>
34 #include <linux/timer.h>
35 #include <linux/init.h>
36 #include <linux/bug.h>
37 #include <linux/nmi.h>
38 #include <linux/mm.h>
39 #include <linux/smp.h>
40 #include <linux/cpu.h>
41 #include <linux/io.h>
42 #include <linux/hardirq.h>
43 #include <linux/atomic.h>
44 #include <linux/iommu.h>
45 #include <linux/ubsan.h>
46
47 #include <asm/stacktrace.h>
48 #include <asm/processor.h>
49 #include <asm/debugreg.h>
50 #include <asm/realmode.h>
51 #include <asm/text-patching.h>
52 #include <asm/ftrace.h>
53 #include <asm/traps.h>
54 #include <asm/desc.h>
55 #include <asm/fred.h>
56 #include <asm/fpu/api.h>
57 #include <asm/cpu.h>
58 #include <asm/cpu_entry_area.h>
59 #include <asm/mce.h>
60 #include <asm/fixmap.h>
61 #include <asm/mach_traps.h>
62 #include <asm/alternative.h>
63 #include <asm/fpu/xstate.h>
64 #include <asm/vm86.h>
65 #include <asm/umip.h>
66 #include <asm/insn.h>
67 #include <asm/insn-eval.h>
68 #include <asm/vdso.h>
69 #include <asm/tdx.h>
70 #include <asm/cfi.h>
71 #include <asm/msr.h>
72
73 #ifdef CONFIG_X86_64
74 #include <asm/x86_init.h>
75 #else
76 #include <asm/processor-flags.h>
77 #include <asm/setup.h>
78 #endif
79
80 #include <asm/proto.h>
81
82 DECLARE_BITMAP(system_vectors, NR_VECTORS);
83
is_valid_bugaddr(unsigned long addr)84 __always_inline int is_valid_bugaddr(unsigned long addr)
85 {
86 if (addr < TASK_SIZE_MAX)
87 return 0;
88
89 /*
90 * We got #UD, if the text isn't readable we'd have gotten
91 * a different exception.
92 */
93 return *(unsigned short *)addr == INSN_UD2;
94 }
95
96 /*
97 * Check for UD1 or UD2, accounting for Address Size Override Prefixes.
98 * If it's a UD1, further decode to determine its use:
99 *
100 * FineIBT: ea (bad)
101 * FineIBT: f0 75 f9 lock jne . - 6
102 * UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax
103 * UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax
104 * static_call: 0f b9 cc ud1 %esp,%ecx
105 *
106 * Notably UBSAN uses EAX, static_call uses ECX.
107 */
decode_bug(unsigned long addr,s32 * imm,int * len)108 __always_inline int decode_bug(unsigned long addr, s32 *imm, int *len)
109 {
110 unsigned long start = addr;
111 bool lock = false;
112 u8 v;
113
114 if (addr < TASK_SIZE_MAX)
115 return BUG_NONE;
116
117 v = *(u8 *)(addr++);
118 if (v == INSN_ASOP)
119 v = *(u8 *)(addr++);
120
121 if (v == INSN_LOCK) {
122 lock = true;
123 v = *(u8 *)(addr++);
124 }
125
126 switch (v) {
127 case 0x70 ... 0x7f: /* Jcc.d8 */
128 addr += 1; /* d8 */
129 *len = addr - start;
130 WARN_ON_ONCE(!lock);
131 return BUG_LOCK;
132
133 case 0xea:
134 *len = addr - start;
135 return BUG_EA;
136
137 case OPCODE_ESCAPE:
138 break;
139
140 default:
141 return BUG_NONE;
142 }
143
144 v = *(u8 *)(addr++);
145 if (v == SECOND_BYTE_OPCODE_UD2) {
146 *len = addr - start;
147 return BUG_UD2;
148 }
149
150 if (v != SECOND_BYTE_OPCODE_UD1)
151 return BUG_NONE;
152
153 *imm = 0;
154 v = *(u8 *)(addr++); /* ModRM */
155
156 if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4)
157 addr++; /* SIB */
158
159 /* Decode immediate, if present */
160 switch (X86_MODRM_MOD(v)) {
161 case 0: if (X86_MODRM_RM(v) == 5)
162 addr += 4; /* RIP + disp32 */
163 break;
164
165 case 1: *imm = *(s8 *)addr;
166 addr += 1;
167 break;
168
169 case 2: *imm = *(s32 *)addr;
170 addr += 4;
171 break;
172
173 case 3: break;
174 }
175
176 /* record instruction length */
177 *len = addr - start;
178
179 if (X86_MODRM_REG(v) == 0) /* EAX */
180 return BUG_UD1_UBSAN;
181
182 return BUG_UD1;
183 }
184
185
186 static nokprobe_inline int
do_trap_no_signal(struct task_struct * tsk,int trapnr,const char * str,struct pt_regs * regs,long error_code)187 do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
188 struct pt_regs *regs, long error_code)
189 {
190 if (v8086_mode(regs)) {
191 /*
192 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
193 * On nmi (interrupt 2), do_trap should not be called.
194 */
195 if (trapnr < X86_TRAP_UD) {
196 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
197 error_code, trapnr))
198 return 0;
199 }
200 } else if (!user_mode(regs)) {
201 if (fixup_exception(regs, trapnr, error_code, 0))
202 return 0;
203
204 tsk->thread.error_code = error_code;
205 tsk->thread.trap_nr = trapnr;
206 die(str, regs, error_code);
207 } else {
208 if (fixup_vdso_exception(regs, trapnr, error_code, 0))
209 return 0;
210 }
211
212 /*
213 * We want error_code and trap_nr set for userspace faults and
214 * kernelspace faults which result in die(), but not
215 * kernelspace faults which are fixed up. die() gives the
216 * process no chance to handle the signal and notice the
217 * kernel fault information, so that won't result in polluting
218 * the information about previously queued, but not yet
219 * delivered, faults. See also exc_general_protection below.
220 */
221 tsk->thread.error_code = error_code;
222 tsk->thread.trap_nr = trapnr;
223
224 return -1;
225 }
226
show_signal(struct task_struct * tsk,int signr,const char * type,const char * desc,struct pt_regs * regs,long error_code)227 static void show_signal(struct task_struct *tsk, int signr,
228 const char *type, const char *desc,
229 struct pt_regs *regs, long error_code)
230 {
231 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
232 printk_ratelimit()) {
233 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
234 tsk->comm, task_pid_nr(tsk), type, desc,
235 regs->ip, regs->sp, error_code);
236 print_vma_addr(KERN_CONT " in ", regs->ip);
237 pr_cont("\n");
238 }
239 }
240
241 static void
do_trap(int trapnr,int signr,char * str,struct pt_regs * regs,long error_code,int sicode,void __user * addr)242 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
243 long error_code, int sicode, void __user *addr)
244 {
245 struct task_struct *tsk = current;
246
247 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
248 return;
249
250 show_signal(tsk, signr, "trap ", str, regs, error_code);
251
252 if (!sicode)
253 force_sig(signr);
254 else
255 force_sig_fault(signr, sicode, addr);
256 }
257 NOKPROBE_SYMBOL(do_trap);
258
do_error_trap(struct pt_regs * regs,long error_code,char * str,unsigned long trapnr,int signr,int sicode,void __user * addr)259 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
260 unsigned long trapnr, int signr, int sicode, void __user *addr)
261 {
262 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
263
264 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
265 NOTIFY_STOP) {
266 cond_local_irq_enable(regs);
267 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
268 cond_local_irq_disable(regs);
269 }
270 }
271
272 /*
273 * Posix requires to provide the address of the faulting instruction for
274 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
275 *
276 * This address is usually regs->ip, but when an uprobe moved the code out
277 * of line then regs->ip points to the XOL code which would confuse
278 * anything which analyzes the fault address vs. the unmodified binary. If
279 * a trap happened in XOL code then uprobe maps regs->ip back to the
280 * original instruction address.
281 */
error_get_trap_addr(struct pt_regs * regs)282 static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
283 {
284 return (void __user *)uprobe_get_trap_addr(regs);
285 }
286
DEFINE_IDTENTRY(exc_divide_error)287 DEFINE_IDTENTRY(exc_divide_error)
288 {
289 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
290 FPE_INTDIV, error_get_trap_addr(regs));
291 }
292
DEFINE_IDTENTRY(exc_overflow)293 DEFINE_IDTENTRY(exc_overflow)
294 {
295 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
296 }
297
298 #ifdef CONFIG_X86_F00F_BUG
handle_invalid_op(struct pt_regs * regs)299 void handle_invalid_op(struct pt_regs *regs)
300 #else
301 static inline void handle_invalid_op(struct pt_regs *regs)
302 #endif
303 {
304 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
305 ILL_ILLOPN, error_get_trap_addr(regs));
306 }
307
handle_bug(struct pt_regs * regs)308 static noinstr bool handle_bug(struct pt_regs *regs)
309 {
310 unsigned long addr = regs->ip;
311 bool handled = false;
312 int ud_type, ud_len;
313 s32 ud_imm;
314
315 ud_type = decode_bug(addr, &ud_imm, &ud_len);
316 if (ud_type == BUG_NONE)
317 return handled;
318
319 /*
320 * All lies, just get the WARN/BUG out.
321 */
322 instrumentation_begin();
323 /*
324 * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
325 * is a rare case that uses @regs without passing them to
326 * irqentry_enter().
327 */
328 kmsan_unpoison_entry_regs(regs);
329 /*
330 * Since we're emulating a CALL with exceptions, restore the interrupt
331 * state to what it was at the exception site.
332 */
333 if (regs->flags & X86_EFLAGS_IF)
334 raw_local_irq_enable();
335
336 switch (ud_type) {
337 case BUG_UD2:
338 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
339 handled = true;
340 break;
341 }
342 fallthrough;
343
344 case BUG_EA:
345 case BUG_LOCK:
346 if (handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
347 handled = true;
348 break;
349 }
350 break;
351
352 case BUG_UD1_UBSAN:
353 if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
354 pr_crit("%s at %pS\n",
355 report_ubsan_failure(ud_imm),
356 (void *)regs->ip);
357 }
358 break;
359
360 default:
361 break;
362 }
363
364 /*
365 * When continuing, and regs->ip hasn't changed, move it to the next
366 * instruction. When not continuing execution, restore the instruction
367 * pointer.
368 */
369 if (handled) {
370 if (regs->ip == addr)
371 regs->ip += ud_len;
372 } else {
373 regs->ip = addr;
374 }
375
376 if (regs->flags & X86_EFLAGS_IF)
377 raw_local_irq_disable();
378 instrumentation_end();
379
380 return handled;
381 }
382
DEFINE_IDTENTRY_RAW(exc_invalid_op)383 DEFINE_IDTENTRY_RAW(exc_invalid_op)
384 {
385 irqentry_state_t state;
386
387 /*
388 * We use UD2 as a short encoding for 'CALL __WARN', as such
389 * handle it before exception entry to avoid recursive WARN
390 * in case exception entry is the one triggering WARNs.
391 */
392 if (!user_mode(regs) && handle_bug(regs))
393 return;
394
395 state = irqentry_enter(regs);
396 instrumentation_begin();
397 handle_invalid_op(regs);
398 instrumentation_end();
399 irqentry_exit(regs, state);
400 }
401
DEFINE_IDTENTRY(exc_coproc_segment_overrun)402 DEFINE_IDTENTRY(exc_coproc_segment_overrun)
403 {
404 do_error_trap(regs, 0, "coprocessor segment overrun",
405 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
406 }
407
DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)408 DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
409 {
410 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
411 0, NULL);
412 }
413
DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)414 DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
415 {
416 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
417 SIGBUS, 0, NULL);
418 }
419
DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)420 DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
421 {
422 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
423 0, NULL);
424 }
425
DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)426 DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
427 {
428 char *str = "alignment check";
429
430 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
431 return;
432
433 if (!user_mode(regs))
434 die("Split lock detected\n", regs, error_code);
435
436 local_irq_enable();
437
438 if (handle_user_split_lock(regs, error_code))
439 goto out;
440
441 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
442 error_code, BUS_ADRALN, NULL);
443
444 out:
445 local_irq_disable();
446 }
447
448 #ifdef CONFIG_VMAP_STACK
handle_stack_overflow(struct pt_regs * regs,unsigned long fault_address,struct stack_info * info)449 __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
450 unsigned long fault_address,
451 struct stack_info *info)
452 {
453 const char *name = stack_type_name(info->type);
454
455 printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n",
456 name, (void *)fault_address, info->begin, info->end);
457
458 die("stack guard page", regs, 0);
459
460 /* Be absolutely certain we don't return. */
461 panic("%s stack guard hit", name);
462 }
463 #endif
464
465 /*
466 * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64
467 * version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch
468 * between configs triggers objtool warnings.
469 *
470 * This is a temporary hack until we have compiler or plugin support for
471 * annotating noreturns.
472 */
473 #ifdef CONFIG_X86_ESPFIX64
474 #define always_true() true
475 #else
476 bool always_true(void);
always_true(void)477 bool __weak always_true(void) { return true; }
478 #endif
479
480 /*
481 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
482 *
483 * On x86_64, this is more or less a normal kernel entry. Notwithstanding the
484 * SDM's warnings about double faults being unrecoverable, returning works as
485 * expected. Presumably what the SDM actually means is that the CPU may get
486 * the register state wrong on entry, so returning could be a bad idea.
487 *
488 * Various CPU engineers have promised that double faults due to an IRET fault
489 * while the stack is read-only are, in fact, recoverable.
490 *
491 * On x86_32, this is entered through a task gate, and regs are synthesized
492 * from the TSS. Returning is, in principle, okay, but changes to regs will
493 * be lost. If, for some reason, we need to return to a context with modified
494 * regs, the shim code could be adjusted to synchronize the registers.
495 *
496 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
497 * to be read before doing anything else.
498 */
DEFINE_IDTENTRY_DF(exc_double_fault)499 DEFINE_IDTENTRY_DF(exc_double_fault)
500 {
501 static const char str[] = "double fault";
502 struct task_struct *tsk = current;
503
504 #ifdef CONFIG_VMAP_STACK
505 unsigned long address = read_cr2();
506 struct stack_info info;
507 #endif
508
509 #ifdef CONFIG_X86_ESPFIX64
510 extern unsigned char native_irq_return_iret[];
511
512 /*
513 * If IRET takes a non-IST fault on the espfix64 stack, then we
514 * end up promoting it to a doublefault. In that case, take
515 * advantage of the fact that we're not using the normal (TSS.sp0)
516 * stack right now. We can write a fake #GP(0) frame at TSS.sp0
517 * and then modify our own IRET frame so that, when we return,
518 * we land directly at the #GP(0) vector with the stack already
519 * set up according to its expectations.
520 *
521 * The net result is that our #GP handler will think that we
522 * entered from usermode with the bad user context.
523 *
524 * No need for nmi_enter() here because we don't use RCU.
525 */
526 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
527 regs->cs == __KERNEL_CS &&
528 regs->ip == (unsigned long)native_irq_return_iret)
529 {
530 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
531 unsigned long *p = (unsigned long *)regs->sp;
532
533 /*
534 * regs->sp points to the failing IRET frame on the
535 * ESPFIX64 stack. Copy it to the entry stack. This fills
536 * in gpregs->ss through gpregs->ip.
537 *
538 */
539 gpregs->ip = p[0];
540 gpregs->cs = p[1];
541 gpregs->flags = p[2];
542 gpregs->sp = p[3];
543 gpregs->ss = p[4];
544 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
545
546 /*
547 * Adjust our frame so that we return straight to the #GP
548 * vector with the expected RSP value. This is safe because
549 * we won't enable interrupts or schedule before we invoke
550 * general_protection, so nothing will clobber the stack
551 * frame we just set up.
552 *
553 * We will enter general_protection with kernel GSBASE,
554 * which is what the stub expects, given that the faulting
555 * RIP will be the IRET instruction.
556 */
557 regs->ip = (unsigned long)asm_exc_general_protection;
558 regs->sp = (unsigned long)&gpregs->orig_ax;
559
560 return;
561 }
562 #endif
563
564 irqentry_nmi_enter(regs);
565 instrumentation_begin();
566 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
567
568 tsk->thread.error_code = error_code;
569 tsk->thread.trap_nr = X86_TRAP_DF;
570
571 #ifdef CONFIG_VMAP_STACK
572 /*
573 * If we overflow the stack into a guard page, the CPU will fail
574 * to deliver #PF and will send #DF instead. Similarly, if we
575 * take any non-IST exception while too close to the bottom of
576 * the stack, the processor will get a page fault while
577 * delivering the exception and will generate a double fault.
578 *
579 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
580 * Page-Fault Exception (#PF):
581 *
582 * Processors update CR2 whenever a page fault is detected. If a
583 * second page fault occurs while an earlier page fault is being
584 * delivered, the faulting linear address of the second fault will
585 * overwrite the contents of CR2 (replacing the previous
586 * address). These updates to CR2 occur even if the page fault
587 * results in a double fault or occurs during the delivery of a
588 * double fault.
589 *
590 * The logic below has a small possibility of incorrectly diagnosing
591 * some errors as stack overflows. For example, if the IDT or GDT
592 * gets corrupted such that #GP delivery fails due to a bad descriptor
593 * causing #GP and we hit this condition while CR2 coincidentally
594 * points to the stack guard page, we'll think we overflowed the
595 * stack. Given that we're going to panic one way or another
596 * if this happens, this isn't necessarily worth fixing.
597 *
598 * If necessary, we could improve the test by only diagnosing
599 * a stack overflow if the saved RSP points within 47 bytes of
600 * the bottom of the stack: if RSP == tsk_stack + 48 and we
601 * take an exception, the stack is already aligned and there
602 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
603 * possible error code, so a stack overflow would *not* double
604 * fault. With any less space left, exception delivery could
605 * fail, and, as a practical matter, we've overflowed the
606 * stack even if the actual trigger for the double fault was
607 * something else.
608 */
609 if (get_stack_guard_info((void *)address, &info))
610 handle_stack_overflow(regs, address, &info);
611 #endif
612
613 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
614 die("double fault", regs, error_code);
615 if (always_true())
616 panic("Machine halted.");
617 instrumentation_end();
618 }
619
DEFINE_IDTENTRY(exc_bounds)620 DEFINE_IDTENTRY(exc_bounds)
621 {
622 if (notify_die(DIE_TRAP, "bounds", regs, 0,
623 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
624 return;
625 cond_local_irq_enable(regs);
626
627 if (!user_mode(regs))
628 die("bounds", regs, 0);
629
630 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
631
632 cond_local_irq_disable(regs);
633 }
634
635 enum kernel_gp_hint {
636 GP_NO_HINT,
637 GP_NON_CANONICAL,
638 GP_CANONICAL
639 };
640
641 /*
642 * When an uncaught #GP occurs, try to determine the memory address accessed by
643 * the instruction and return that address to the caller. Also, try to figure
644 * out whether any part of the access to that address was non-canonical.
645 */
get_kernel_gp_address(struct pt_regs * regs,unsigned long * addr)646 static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
647 unsigned long *addr)
648 {
649 u8 insn_buf[MAX_INSN_SIZE];
650 struct insn insn;
651 int ret;
652
653 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
654 MAX_INSN_SIZE))
655 return GP_NO_HINT;
656
657 ret = insn_decode_kernel(&insn, insn_buf);
658 if (ret < 0)
659 return GP_NO_HINT;
660
661 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
662 if (*addr == -1UL)
663 return GP_NO_HINT;
664
665 #ifdef CONFIG_X86_64
666 /*
667 * Check that:
668 * - the operand is not in the kernel half
669 * - the last byte of the operand is not in the user canonical half
670 */
671 if (*addr < ~__VIRTUAL_MASK &&
672 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
673 return GP_NON_CANONICAL;
674 #endif
675
676 return GP_CANONICAL;
677 }
678
679 #define GPFSTR "general protection fault"
680
fixup_iopl_exception(struct pt_regs * regs)681 static bool fixup_iopl_exception(struct pt_regs *regs)
682 {
683 struct thread_struct *t = ¤t->thread;
684 unsigned char byte;
685 unsigned long ip;
686
687 if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
688 return false;
689
690 if (insn_get_effective_ip(regs, &ip))
691 return false;
692
693 if (get_user(byte, (const char __user *)ip))
694 return false;
695
696 if (byte != 0xfa && byte != 0xfb)
697 return false;
698
699 if (!t->iopl_warn && printk_ratelimit()) {
700 pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
701 current->comm, task_pid_nr(current), ip);
702 print_vma_addr(KERN_CONT " in ", ip);
703 pr_cont("\n");
704 t->iopl_warn = 1;
705 }
706
707 regs->ip += 1;
708 return true;
709 }
710
711 /*
712 * The unprivileged ENQCMD instruction generates #GPs if the
713 * IA32_PASID MSR has not been populated. If possible, populate
714 * the MSR from a PASID previously allocated to the mm.
715 */
try_fixup_enqcmd_gp(void)716 static bool try_fixup_enqcmd_gp(void)
717 {
718 #ifdef CONFIG_ARCH_HAS_CPU_PASID
719 u32 pasid;
720
721 /*
722 * MSR_IA32_PASID is managed using XSAVE. Directly
723 * writing to the MSR is only possible when fpregs
724 * are valid and the fpstate is not. This is
725 * guaranteed when handling a userspace exception
726 * in *before* interrupts are re-enabled.
727 */
728 lockdep_assert_irqs_disabled();
729
730 /*
731 * Hardware without ENQCMD will not generate
732 * #GPs that can be fixed up here.
733 */
734 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
735 return false;
736
737 /*
738 * If the mm has not been allocated a
739 * PASID, the #GP can not be fixed up.
740 */
741 if (!mm_valid_pasid(current->mm))
742 return false;
743
744 pasid = mm_get_enqcmd_pasid(current->mm);
745
746 /*
747 * Did this thread already have its PASID activated?
748 * If so, the #GP must be from something else.
749 */
750 if (current->pasid_activated)
751 return false;
752
753 wrmsrq(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
754 current->pasid_activated = 1;
755
756 return true;
757 #else
758 return false;
759 #endif
760 }
761
gp_try_fixup_and_notify(struct pt_regs * regs,int trapnr,unsigned long error_code,const char * str,unsigned long address)762 static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
763 unsigned long error_code, const char *str,
764 unsigned long address)
765 {
766 if (fixup_exception(regs, trapnr, error_code, address))
767 return true;
768
769 current->thread.error_code = error_code;
770 current->thread.trap_nr = trapnr;
771
772 /*
773 * To be potentially processing a kprobe fault and to trust the result
774 * from kprobe_running(), we have to be non-preemptible.
775 */
776 if (!preemptible() && kprobe_running() &&
777 kprobe_fault_handler(regs, trapnr))
778 return true;
779
780 return notify_die(DIE_GPF, str, regs, error_code, trapnr, SIGSEGV) == NOTIFY_STOP;
781 }
782
gp_user_force_sig_segv(struct pt_regs * regs,int trapnr,unsigned long error_code,const char * str)783 static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr,
784 unsigned long error_code, const char *str)
785 {
786 current->thread.error_code = error_code;
787 current->thread.trap_nr = trapnr;
788 show_signal(current, SIGSEGV, "", str, regs, error_code);
789 force_sig(SIGSEGV);
790 }
791
DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)792 DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
793 {
794 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
795 enum kernel_gp_hint hint = GP_NO_HINT;
796 unsigned long gp_addr;
797
798 if (user_mode(regs) && try_fixup_enqcmd_gp())
799 return;
800
801 cond_local_irq_enable(regs);
802
803 if (static_cpu_has(X86_FEATURE_UMIP)) {
804 if (user_mode(regs) && fixup_umip_exception(regs))
805 goto exit;
806 }
807
808 if (v8086_mode(regs)) {
809 local_irq_enable();
810 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
811 local_irq_disable();
812 return;
813 }
814
815 if (user_mode(regs)) {
816 if (fixup_iopl_exception(regs))
817 goto exit;
818
819 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
820 goto exit;
821
822 gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
823 goto exit;
824 }
825
826 if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
827 goto exit;
828
829 if (error_code)
830 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
831 else
832 hint = get_kernel_gp_address(regs, &gp_addr);
833
834 if (hint != GP_NO_HINT)
835 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
836 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
837 : "maybe for address",
838 gp_addr);
839
840 /*
841 * KASAN is interested only in the non-canonical case, clear it
842 * otherwise.
843 */
844 if (hint != GP_NON_CANONICAL)
845 gp_addr = 0;
846
847 die_addr(desc, regs, error_code, gp_addr);
848
849 exit:
850 cond_local_irq_disable(regs);
851 }
852
do_int3(struct pt_regs * regs)853 static bool do_int3(struct pt_regs *regs)
854 {
855 int res;
856
857 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
858 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
859 SIGTRAP) == NOTIFY_STOP)
860 return true;
861 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
862
863 #ifdef CONFIG_KPROBES
864 if (kprobe_int3_handler(regs))
865 return true;
866 #endif
867 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
868
869 return res == NOTIFY_STOP;
870 }
871 NOKPROBE_SYMBOL(do_int3);
872
do_int3_user(struct pt_regs * regs)873 static void do_int3_user(struct pt_regs *regs)
874 {
875 if (do_int3(regs))
876 return;
877
878 cond_local_irq_enable(regs);
879 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
880 cond_local_irq_disable(regs);
881 }
882
DEFINE_IDTENTRY_RAW(exc_int3)883 DEFINE_IDTENTRY_RAW(exc_int3)
884 {
885 /*
886 * smp_text_poke_int3_handler() is completely self contained code; it does (and
887 * must) *NOT* call out to anything, lest it hits upon yet another
888 * INT3.
889 */
890 if (smp_text_poke_int3_handler(regs))
891 return;
892
893 /*
894 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
895 * and therefore can trigger INT3, hence smp_text_poke_int3_handler() must
896 * be done before. If the entry came from kernel mode, then use
897 * nmi_enter() because the INT3 could have been hit in any context
898 * including NMI.
899 */
900 if (user_mode(regs)) {
901 irqentry_enter_from_user_mode(regs);
902 instrumentation_begin();
903 do_int3_user(regs);
904 instrumentation_end();
905 irqentry_exit_to_user_mode(regs);
906 } else {
907 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
908
909 instrumentation_begin();
910 if (!do_int3(regs))
911 die("int3", regs, 0);
912 instrumentation_end();
913 irqentry_nmi_exit(regs, irq_state);
914 }
915 }
916
917 #ifdef CONFIG_X86_64
918 /*
919 * Help handler running on a per-cpu (IST or entry trampoline) stack
920 * to switch to the normal thread stack if the interrupted code was in
921 * user mode. The actual stack switch is done in entry_64.S
922 */
sync_regs(struct pt_regs * eregs)923 asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
924 {
925 struct pt_regs *regs = (struct pt_regs *)current_top_of_stack() - 1;
926 if (regs != eregs)
927 *regs = *eregs;
928 return regs;
929 }
930
931 #ifdef CONFIG_AMD_MEM_ENCRYPT
vc_switch_off_ist(struct pt_regs * regs)932 asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
933 {
934 unsigned long sp, *stack;
935 struct stack_info info;
936 struct pt_regs *regs_ret;
937
938 /*
939 * In the SYSCALL entry path the RSP value comes from user-space - don't
940 * trust it and switch to the current kernel stack
941 */
942 if (ip_within_syscall_gap(regs)) {
943 sp = current_top_of_stack();
944 goto sync;
945 }
946
947 /*
948 * From here on the RSP value is trusted. Now check whether entry
949 * happened from a safe stack. Not safe are the entry or unknown stacks,
950 * use the fall-back stack instead in this case.
951 */
952 sp = regs->sp;
953 stack = (unsigned long *)sp;
954
955 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
956 info.type > STACK_TYPE_EXCEPTION_LAST)
957 sp = __this_cpu_ist_top_va(VC2);
958
959 sync:
960 /*
961 * Found a safe stack - switch to it as if the entry didn't happen via
962 * IST stack. The code below only copies pt_regs, the real switch happens
963 * in assembly code.
964 */
965 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
966
967 regs_ret = (struct pt_regs *)sp;
968 *regs_ret = *regs;
969
970 return regs_ret;
971 }
972 #endif
973
fixup_bad_iret(struct pt_regs * bad_regs)974 asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
975 {
976 struct pt_regs tmp, *new_stack;
977
978 /*
979 * This is called from entry_64.S early in handling a fault
980 * caused by a bad iret to user mode. To handle the fault
981 * correctly, we want to move our stack frame to where it would
982 * be had we entered directly on the entry stack (rather than
983 * just below the IRET frame) and we want to pretend that the
984 * exception came from the IRET target.
985 */
986 new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
987
988 /* Copy the IRET target to the temporary storage. */
989 __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
990
991 /* Copy the remainder of the stack from the current stack. */
992 __memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
993
994 /* Update the entry stack */
995 __memcpy(new_stack, &tmp, sizeof(tmp));
996
997 BUG_ON(!user_mode(new_stack));
998 return new_stack;
999 }
1000 #endif
1001
is_sysenter_singlestep(struct pt_regs * regs)1002 static bool is_sysenter_singlestep(struct pt_regs *regs)
1003 {
1004 /*
1005 * We don't try for precision here. If we're anywhere in the region of
1006 * code that can be single-stepped in the SYSENTER entry path, then
1007 * assume that this is a useless single-step trap due to SYSENTER
1008 * being invoked with TF set. (We don't know in advance exactly
1009 * which instructions will be hit because BTF could plausibly
1010 * be set.)
1011 */
1012 #ifdef CONFIG_X86_32
1013 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
1014 (unsigned long)__end_SYSENTER_singlestep_region -
1015 (unsigned long)__begin_SYSENTER_singlestep_region;
1016 #elif defined(CONFIG_IA32_EMULATION)
1017 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
1018 (unsigned long)__end_entry_SYSENTER_compat -
1019 (unsigned long)entry_SYSENTER_compat;
1020 #else
1021 return false;
1022 #endif
1023 }
1024
debug_read_clear_dr6(void)1025 static __always_inline unsigned long debug_read_clear_dr6(void)
1026 {
1027 unsigned long dr6;
1028
1029 /*
1030 * The Intel SDM says:
1031 *
1032 * Certain debug exceptions may clear bits 0-3. The remaining
1033 * contents of the DR6 register are never cleared by the
1034 * processor. To avoid confusion in identifying debug
1035 * exceptions, debug handlers should clear the register before
1036 * returning to the interrupted task.
1037 *
1038 * Keep it simple: clear DR6 immediately.
1039 */
1040 get_debugreg(dr6, 6);
1041 set_debugreg(DR6_RESERVED, 6);
1042 dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
1043
1044 return dr6;
1045 }
1046
1047 /*
1048 * Our handling of the processor debug registers is non-trivial.
1049 * We do not clear them on entry and exit from the kernel. Therefore
1050 * it is possible to get a watchpoint trap here from inside the kernel.
1051 * However, the code in ./ptrace.c has ensured that the user can
1052 * only set watchpoints on userspace addresses. Therefore the in-kernel
1053 * watchpoint trap can only occur in code which is reading/writing
1054 * from user space. Such code must not hold kernel locks (since it
1055 * can equally take a page fault), therefore it is safe to call
1056 * force_sig_info even though that claims and releases locks.
1057 *
1058 * Code in ./signal.c ensures that the debug control register
1059 * is restored before we deliver any signal, and therefore that
1060 * user code runs with the correct debug control register even though
1061 * we clear it here.
1062 *
1063 * Being careful here means that we don't have to be as careful in a
1064 * lot of more complicated places (task switching can be a bit lazy
1065 * about restoring all the debug state, and ptrace doesn't have to
1066 * find every occurrence of the TF bit that could be saved away even
1067 * by user code)
1068 *
1069 * May run on IST stack.
1070 */
1071
notify_debug(struct pt_regs * regs,unsigned long * dr6)1072 static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
1073 {
1074 /*
1075 * Notifiers will clear bits in @dr6 to indicate the event has been
1076 * consumed - hw_breakpoint_handler(), single_stop_cont().
1077 *
1078 * Notifiers will set bits in @virtual_dr6 to indicate the desire
1079 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
1080 */
1081 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
1082 return true;
1083
1084 return false;
1085 }
1086
exc_debug_kernel(struct pt_regs * regs,unsigned long dr6)1087 static noinstr void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6)
1088 {
1089 /*
1090 * Disable breakpoints during exception handling; recursive exceptions
1091 * are exceedingly 'fun'.
1092 *
1093 * Since this function is NOKPROBE, and that also applies to
1094 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
1095 * HW_BREAKPOINT_W on our stack)
1096 *
1097 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
1098 * includes the entry stack is excluded for everything.
1099 *
1100 * For FRED, nested #DB should just work fine. But when a watchpoint or
1101 * breakpoint is set in the code path which is executed by #DB handler,
1102 * it results in an endless recursion and stack overflow. Thus we stay
1103 * with the IDT approach, i.e., save DR7 and disable #DB.
1104 */
1105 unsigned long dr7 = local_db_save();
1106 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
1107 instrumentation_begin();
1108
1109 /*
1110 * If something gets miswired and we end up here for a user mode
1111 * #DB, we will malfunction.
1112 */
1113 WARN_ON_ONCE(user_mode(regs));
1114
1115 if (test_thread_flag(TIF_BLOCKSTEP)) {
1116 /*
1117 * The SDM says "The processor clears the BTF flag when it
1118 * generates a debug exception." but PTRACE_BLOCKSTEP requested
1119 * it for userspace, but we just took a kernel #DB, so re-set
1120 * BTF.
1121 */
1122 unsigned long debugctl;
1123
1124 rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
1125 debugctl |= DEBUGCTLMSR_BTF;
1126 wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
1127 }
1128
1129 /*
1130 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
1131 * watchpoint at the same time then that will still be handled.
1132 */
1133 if (!cpu_feature_enabled(X86_FEATURE_FRED) &&
1134 (dr6 & DR_STEP) && is_sysenter_singlestep(regs))
1135 dr6 &= ~DR_STEP;
1136
1137 /*
1138 * The kernel doesn't use INT1
1139 */
1140 if (!dr6)
1141 goto out;
1142
1143 if (notify_debug(regs, &dr6))
1144 goto out;
1145
1146 /*
1147 * The kernel doesn't use TF single-step outside of:
1148 *
1149 * - Kprobes, consumed through kprobe_debug_handler()
1150 * - KGDB, consumed through notify_debug()
1151 *
1152 * So if we get here with DR_STEP set, something is wonky.
1153 *
1154 * A known way to trigger this is through QEMU's GDB stub,
1155 * which leaks #DB into the guest and causes IST recursion.
1156 */
1157 if (WARN_ON_ONCE(dr6 & DR_STEP))
1158 regs->flags &= ~X86_EFLAGS_TF;
1159 out:
1160 instrumentation_end();
1161 irqentry_nmi_exit(regs, irq_state);
1162
1163 local_db_restore(dr7);
1164 }
1165
exc_debug_user(struct pt_regs * regs,unsigned long dr6)1166 static noinstr void exc_debug_user(struct pt_regs *regs, unsigned long dr6)
1167 {
1168 bool icebp;
1169
1170 /*
1171 * If something gets miswired and we end up here for a kernel mode
1172 * #DB, we will malfunction.
1173 */
1174 WARN_ON_ONCE(!user_mode(regs));
1175
1176 /*
1177 * NB: We can't easily clear DR7 here because
1178 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
1179 * user memory, etc. This means that a recursive #DB is possible. If
1180 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
1181 * Since we're not on the IST stack right now, everything will be
1182 * fine.
1183 */
1184
1185 irqentry_enter_from_user_mode(regs);
1186 instrumentation_begin();
1187
1188 /*
1189 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
1190 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
1191 *
1192 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
1193 * even if it is not the result of PTRACE_SINGLESTEP.
1194 */
1195 current->thread.virtual_dr6 = (dr6 & DR_STEP);
1196
1197 /*
1198 * The SDM says "The processor clears the BTF flag when it
1199 * generates a debug exception." Clear TIF_BLOCKSTEP to keep
1200 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
1201 */
1202 clear_thread_flag(TIF_BLOCKSTEP);
1203
1204 /*
1205 * If dr6 has no reason to give us about the origin of this trap,
1206 * then it's very likely the result of an icebp/int01 trap.
1207 * User wants a sigtrap for that.
1208 */
1209 icebp = !dr6;
1210
1211 if (notify_debug(regs, &dr6))
1212 goto out;
1213
1214 /* It's safe to allow irq's after DR6 has been saved */
1215 local_irq_enable();
1216
1217 if (v8086_mode(regs)) {
1218 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1219 goto out_irq;
1220 }
1221
1222 /* #DB for bus lock can only be triggered from userspace. */
1223 if (dr6 & DR_BUS_LOCK)
1224 handle_bus_lock(regs);
1225
1226 /* Add the virtual_dr6 bits for signals. */
1227 dr6 |= current->thread.virtual_dr6;
1228 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1229 send_sigtrap(regs, 0, get_si_code(dr6));
1230
1231 out_irq:
1232 local_irq_disable();
1233 out:
1234 instrumentation_end();
1235 irqentry_exit_to_user_mode(regs);
1236 }
1237
1238 #ifdef CONFIG_X86_64
1239 /* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)1240 DEFINE_IDTENTRY_DEBUG(exc_debug)
1241 {
1242 exc_debug_kernel(regs, debug_read_clear_dr6());
1243 }
1244
1245 /* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)1246 DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1247 {
1248 exc_debug_user(regs, debug_read_clear_dr6());
1249 }
1250
1251 #ifdef CONFIG_X86_FRED
1252 /*
1253 * When occurred on different ring level, i.e., from user or kernel
1254 * context, #DB needs to be handled on different stack: User #DB on
1255 * current task stack, while kernel #DB on a dedicated stack.
1256 *
1257 * This is exactly how FRED event delivery invokes an exception
1258 * handler: ring 3 event on level 0 stack, i.e., current task stack;
1259 * ring 0 event on the #DB dedicated stack specified in the
1260 * IA32_FRED_STKLVLS MSR. So unlike IDT, the FRED debug exception
1261 * entry stub doesn't do stack switch.
1262 */
DEFINE_FREDENTRY_DEBUG(exc_debug)1263 DEFINE_FREDENTRY_DEBUG(exc_debug)
1264 {
1265 /*
1266 * FRED #DB stores DR6 on the stack in the format which
1267 * debug_read_clear_dr6() returns for the IDT entry points.
1268 */
1269 unsigned long dr6 = fred_event_data(regs);
1270
1271 if (user_mode(regs))
1272 exc_debug_user(regs, dr6);
1273 else
1274 exc_debug_kernel(regs, dr6);
1275 }
1276 #endif /* CONFIG_X86_FRED */
1277
1278 #else
1279 /* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)1280 DEFINE_IDTENTRY_RAW(exc_debug)
1281 {
1282 unsigned long dr6 = debug_read_clear_dr6();
1283
1284 if (user_mode(regs))
1285 exc_debug_user(regs, dr6);
1286 else
1287 exc_debug_kernel(regs, dr6);
1288 }
1289 #endif
1290
1291 /*
1292 * Note that we play around with the 'TS' bit in an attempt to get
1293 * the correct behaviour even in the presence of the asynchronous
1294 * IRQ13 behaviour
1295 */
math_error(struct pt_regs * regs,int trapnr)1296 static void math_error(struct pt_regs *regs, int trapnr)
1297 {
1298 struct task_struct *task = current;
1299 struct fpu *fpu = x86_task_fpu(task);
1300 int si_code;
1301 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1302 "simd exception";
1303
1304 cond_local_irq_enable(regs);
1305
1306 if (!user_mode(regs)) {
1307 if (fixup_exception(regs, trapnr, 0, 0))
1308 goto exit;
1309
1310 task->thread.error_code = 0;
1311 task->thread.trap_nr = trapnr;
1312
1313 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1314 SIGFPE) != NOTIFY_STOP)
1315 die(str, regs, 0);
1316 goto exit;
1317 }
1318
1319 /*
1320 * Synchronize the FPU register state to the memory register state
1321 * if necessary. This allows the exception handler to inspect it.
1322 */
1323 fpu_sync_fpstate(fpu);
1324
1325 task->thread.trap_nr = trapnr;
1326 task->thread.error_code = 0;
1327
1328 si_code = fpu__exception_code(fpu, trapnr);
1329 /* Retry when we get spurious exceptions: */
1330 if (!si_code)
1331 goto exit;
1332
1333 if (fixup_vdso_exception(regs, trapnr, 0, 0))
1334 goto exit;
1335
1336 force_sig_fault(SIGFPE, si_code,
1337 (void __user *)uprobe_get_trap_addr(regs));
1338 exit:
1339 cond_local_irq_disable(regs);
1340 }
1341
DEFINE_IDTENTRY(exc_coprocessor_error)1342 DEFINE_IDTENTRY(exc_coprocessor_error)
1343 {
1344 math_error(regs, X86_TRAP_MF);
1345 }
1346
DEFINE_IDTENTRY(exc_simd_coprocessor_error)1347 DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1348 {
1349 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1350 /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1351 if (!static_cpu_has(X86_FEATURE_XMM)) {
1352 __exc_general_protection(regs, 0);
1353 return;
1354 }
1355 }
1356 math_error(regs, X86_TRAP_XF);
1357 }
1358
DEFINE_IDTENTRY(exc_spurious_interrupt_bug)1359 DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1360 {
1361 /*
1362 * This addresses a Pentium Pro Erratum:
1363 *
1364 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1365 * Virtual Wire mode implemented through the local APIC, an
1366 * interrupt vector of 0Fh (Intel reserved encoding) may be
1367 * generated by the local APIC (Int 15). This vector may be
1368 * generated upon receipt of a spurious interrupt (an interrupt
1369 * which is removed before the system receives the INTA sequence)
1370 * instead of the programmed 8259 spurious interrupt vector.
1371 *
1372 * IMPLICATION: The spurious interrupt vector programmed in the
1373 * 8259 is normally handled by an operating system's spurious
1374 * interrupt handler. However, a vector of 0Fh is unknown to some
1375 * operating systems, which would crash if this erratum occurred.
1376 *
1377 * In theory this could be limited to 32bit, but the handler is not
1378 * hurting and who knows which other CPUs suffer from this.
1379 */
1380 }
1381
handle_xfd_event(struct pt_regs * regs)1382 static bool handle_xfd_event(struct pt_regs *regs)
1383 {
1384 u64 xfd_err;
1385 int err;
1386
1387 if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
1388 return false;
1389
1390 rdmsrq(MSR_IA32_XFD_ERR, xfd_err);
1391 if (!xfd_err)
1392 return false;
1393
1394 wrmsrq(MSR_IA32_XFD_ERR, 0);
1395
1396 /* Die if that happens in kernel space */
1397 if (WARN_ON(!user_mode(regs)))
1398 return false;
1399
1400 local_irq_enable();
1401
1402 err = xfd_enable_feature(xfd_err);
1403
1404 switch (err) {
1405 case -EPERM:
1406 force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs));
1407 break;
1408 case -EFAULT:
1409 force_sig(SIGSEGV);
1410 break;
1411 }
1412
1413 local_irq_disable();
1414 return true;
1415 }
1416
DEFINE_IDTENTRY(exc_device_not_available)1417 DEFINE_IDTENTRY(exc_device_not_available)
1418 {
1419 unsigned long cr0 = read_cr0();
1420
1421 if (handle_xfd_event(regs))
1422 return;
1423
1424 #ifdef CONFIG_MATH_EMULATION
1425 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1426 struct math_emu_info info = { };
1427
1428 cond_local_irq_enable(regs);
1429
1430 info.regs = regs;
1431 math_emulate(&info);
1432
1433 cond_local_irq_disable(regs);
1434 return;
1435 }
1436 #endif
1437
1438 /* This should not happen. */
1439 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1440 /* Try to fix it up and carry on. */
1441 write_cr0(cr0 & ~X86_CR0_TS);
1442 } else {
1443 /*
1444 * Something terrible happened, and we're better off trying
1445 * to kill the task than getting stuck in a never-ending
1446 * loop of #NM faults.
1447 */
1448 die("unexpected #NM exception", regs, 0);
1449 }
1450 }
1451
1452 #ifdef CONFIG_INTEL_TDX_GUEST
1453
1454 #define VE_FAULT_STR "VE fault"
1455
ve_raise_fault(struct pt_regs * regs,long error_code,unsigned long address)1456 static void ve_raise_fault(struct pt_regs *regs, long error_code,
1457 unsigned long address)
1458 {
1459 if (user_mode(regs)) {
1460 gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
1461 return;
1462 }
1463
1464 if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
1465 VE_FAULT_STR, address)) {
1466 return;
1467 }
1468
1469 die_addr(VE_FAULT_STR, regs, error_code, address);
1470 }
1471
1472 /*
1473 * Virtualization Exceptions (#VE) are delivered to TDX guests due to
1474 * specific guest actions which may happen in either user space or the
1475 * kernel:
1476 *
1477 * * Specific instructions (WBINVD, for example)
1478 * * Specific MSR accesses
1479 * * Specific CPUID leaf accesses
1480 * * Access to specific guest physical addresses
1481 *
1482 * In the settings that Linux will run in, virtualization exceptions are
1483 * never generated on accesses to normal, TD-private memory that has been
1484 * accepted (by BIOS or with tdx_enc_status_changed()).
1485 *
1486 * Syscall entry code has a critical window where the kernel stack is not
1487 * yet set up. Any exception in this window leads to hard to debug issues
1488 * and can be exploited for privilege escalation. Exceptions in the NMI
1489 * entry code also cause issues. Returning from the exception handler with
1490 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack.
1491 *
1492 * For these reasons, the kernel avoids #VEs during the syscall gap and
1493 * the NMI entry code. Entry code paths do not access TD-shared memory,
1494 * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves
1495 * that might generate #VE. VMM can remove memory from TD at any point,
1496 * but access to unaccepted (or missing) private memory leads to VM
1497 * termination, not to #VE.
1498 *
1499 * Similarly to page faults and breakpoints, #VEs are allowed in NMI
1500 * handlers once the kernel is ready to deal with nested NMIs.
1501 *
1502 * During #VE delivery, all interrupts, including NMIs, are blocked until
1503 * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads
1504 * the VE info.
1505 *
1506 * If a guest kernel action which would normally cause a #VE occurs in
1507 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault
1508 * exception) is delivered to the guest which will result in an oops.
1509 *
1510 * The entry code has been audited carefully for following these expectations.
1511 * Changes in the entry code have to be audited for correctness vs. this
1512 * aspect. Similarly to #PF, #VE in these places will expose kernel to
1513 * privilege escalation or may lead to random crashes.
1514 */
DEFINE_IDTENTRY(exc_virtualization_exception)1515 DEFINE_IDTENTRY(exc_virtualization_exception)
1516 {
1517 struct ve_info ve;
1518
1519 /*
1520 * NMIs/Machine-checks/Interrupts will be in a disabled state
1521 * till TDGETVEINFO TDCALL is executed. This ensures that VE
1522 * info cannot be overwritten by a nested #VE.
1523 */
1524 tdx_get_ve_info(&ve);
1525
1526 cond_local_irq_enable(regs);
1527
1528 /*
1529 * If tdx_handle_virt_exception() could not process
1530 * it successfully, treat it as #GP(0) and handle it.
1531 */
1532 if (!tdx_handle_virt_exception(regs, &ve))
1533 ve_raise_fault(regs, 0, ve.gla);
1534
1535 cond_local_irq_disable(regs);
1536 }
1537
1538 #endif
1539
1540 #ifdef CONFIG_X86_32
DEFINE_IDTENTRY_SW(iret_error)1541 DEFINE_IDTENTRY_SW(iret_error)
1542 {
1543 local_irq_enable();
1544 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1545 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1546 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1547 ILL_BADSTK, (void __user *)NULL);
1548 }
1549 local_irq_disable();
1550 }
1551 #endif
1552
trap_init(void)1553 void __init trap_init(void)
1554 {
1555 /* Init cpu_entry_area before IST entries are set up */
1556 setup_cpu_entry_areas();
1557
1558 /* Init GHCB memory pages when running as an SEV-ES guest */
1559 sev_es_init_vc_handling();
1560
1561 /* Initialize TSS before setting up traps so ISTs work */
1562 cpu_init_exception_handling(true);
1563
1564 /* Setup traps as cpu_init() might #GP */
1565 if (!cpu_feature_enabled(X86_FEATURE_FRED))
1566 idt_setup_traps();
1567
1568 cpu_init();
1569 }
1570