1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9 /*
10 * Handle hardware traps and faults.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kmsan.h>
19 #include <linux/spinlock.h>
20 #include <linux/kprobes.h>
21 #include <linux/uaccess.h>
22 #include <linux/kdebug.h>
23 #include <linux/kgdb.h>
24 #include <linux/kernel.h>
25 #include <linux/export.h>
26 #include <linux/ptrace.h>
27 #include <linux/uprobes.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/kexec.h>
32 #include <linux/sched.h>
33 #include <linux/sched/task_stack.h>
34 #include <linux/static_call.h>
35 #include <linux/timer.h>
36 #include <linux/init.h>
37 #include <linux/bug.h>
38 #include <linux/nmi.h>
39 #include <linux/mm.h>
40 #include <linux/smp.h>
41 #include <linux/cpu.h>
42 #include <linux/io.h>
43 #include <linux/hardirq.h>
44 #include <linux/atomic.h>
45 #include <linux/iommu.h>
46 #include <linux/ubsan.h>
47
48 #include <asm/stacktrace.h>
49 #include <asm/processor.h>
50 #include <asm/debugreg.h>
51 #include <asm/realmode.h>
52 #include <asm/text-patching.h>
53 #include <asm/ftrace.h>
54 #include <asm/traps.h>
55 #include <asm/desc.h>
56 #include <asm/fred.h>
57 #include <asm/fpu/api.h>
58 #include <asm/cpu.h>
59 #include <asm/cpu_entry_area.h>
60 #include <asm/mce.h>
61 #include <asm/fixmap.h>
62 #include <asm/mach_traps.h>
63 #include <asm/alternative.h>
64 #include <asm/fpu/xstate.h>
65 #include <asm/vm86.h>
66 #include <asm/umip.h>
67 #include <asm/insn.h>
68 #include <asm/insn-eval.h>
69 #include <asm/vdso.h>
70 #include <asm/tdx.h>
71 #include <asm/cfi.h>
72 #include <asm/msr.h>
73
74 #ifdef CONFIG_X86_64
75 #include <asm/x86_init.h>
76 #else
77 #include <asm/processor-flags.h>
78 #include <asm/setup.h>
79 #endif
80
81 #include <asm/proto.h>
82
83 DECLARE_BITMAP(system_vectors, NR_VECTORS);
84
is_valid_bugaddr(unsigned long addr)85 __always_inline int is_valid_bugaddr(unsigned long addr)
86 {
87 if (addr < TASK_SIZE_MAX)
88 return 0;
89
90 /*
91 * We got #UD, if the text isn't readable we'd have gotten
92 * a different exception.
93 */
94 return *(unsigned short *)addr == INSN_UD2;
95 }
96
97 /*
98 * Check for UD1 or UD2, accounting for Address Size Override Prefixes.
99 * If it's a UD1, further decode to determine its use:
100 *
101 * FineIBT: d6 udb
102 * FineIBT: f0 75 f9 lock jne . - 6
103 * UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax
104 * UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax
105 * static_call: 0f b9 cc ud1 %esp,%ecx
106 * __WARN_trap: 67 48 0f b9 3a ud1 (%edx),%reg
107 *
108 * Notable, since __WARN_trap can use all registers, the distinction between
109 * UD1 users is through R/M.
110 */
decode_bug(unsigned long addr,s32 * imm,int * len)111 __always_inline int decode_bug(unsigned long addr, s32 *imm, int *len)
112 {
113 unsigned long start = addr;
114 u8 v, reg, rm, rex = 0;
115 int type = BUG_UD1;
116 bool lock = false;
117
118 if (addr < TASK_SIZE_MAX)
119 return BUG_NONE;
120
121 for (;;) {
122 v = *(u8 *)(addr++);
123 if (v == INSN_ASOP)
124 continue;
125
126 if (v == INSN_LOCK) {
127 lock = true;
128 continue;
129 }
130
131 if ((v & 0xf0) == 0x40) {
132 rex = v;
133 continue;
134 }
135
136 break;
137 }
138
139 switch (v) {
140 case 0x70 ... 0x7f: /* Jcc.d8 */
141 addr += 1; /* d8 */
142 *len = addr - start;
143 WARN_ON_ONCE(!lock);
144 return BUG_LOCK;
145
146 case 0xd6:
147 *len = addr - start;
148 return BUG_UDB;
149
150 case OPCODE_ESCAPE:
151 break;
152
153 default:
154 return BUG_NONE;
155 }
156
157 v = *(u8 *)(addr++);
158 if (v == SECOND_BYTE_OPCODE_UD2) {
159 *len = addr - start;
160 return BUG_UD2;
161 }
162
163 if (v != SECOND_BYTE_OPCODE_UD1)
164 return BUG_NONE;
165
166 *imm = 0;
167 v = *(u8 *)(addr++); /* ModRM */
168
169 if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4)
170 addr++; /* SIB */
171
172 reg = X86_MODRM_REG(v) + 8*!!X86_REX_R(rex);
173 rm = X86_MODRM_RM(v) + 8*!!X86_REX_B(rex);
174
175 /* Decode immediate, if present */
176 switch (X86_MODRM_MOD(v)) {
177 case 0: if (X86_MODRM_RM(v) == 5)
178 addr += 4; /* RIP + disp32 */
179
180 if (rm == 0) /* (%eax) */
181 type = BUG_UD1_UBSAN;
182
183 if (rm == 2) { /* (%edx) */
184 *imm = reg;
185 type = BUG_UD1_WARN;
186 }
187 break;
188
189 case 1: *imm = *(s8 *)addr;
190 addr += 1;
191 if (rm == 0) /* (%eax) */
192 type = BUG_UD1_UBSAN;
193 break;
194
195 case 2: *imm = *(s32 *)addr;
196 addr += 4;
197 if (rm == 0) /* (%eax) */
198 type = BUG_UD1_UBSAN;
199 break;
200
201 case 3: break;
202 }
203
204 /* record instruction length */
205 *len = addr - start;
206
207 return type;
208 }
209
pt_regs_val(struct pt_regs * regs,int nr)210 static inline unsigned long pt_regs_val(struct pt_regs *regs, int nr)
211 {
212 int offset = pt_regs_offset(regs, nr);
213 if (WARN_ON_ONCE(offset < -0))
214 return 0;
215 return *((unsigned long *)((void *)regs + offset));
216 }
217
218 #ifdef HAVE_ARCH_BUG_FORMAT_ARGS
219 DEFINE_STATIC_CALL(WARN_trap, __WARN_trap);
220 EXPORT_STATIC_CALL_TRAMP(WARN_trap);
221
222 /*
223 * Create a va_list from an exception context.
224 */
__warn_args(struct arch_va_list * args,struct pt_regs * regs)225 void *__warn_args(struct arch_va_list *args, struct pt_regs *regs)
226 {
227 /*
228 * Register save area; populate with function call argument registers
229 */
230 args->regs[0] = regs->di;
231 args->regs[1] = regs->si;
232 args->regs[2] = regs->dx;
233 args->regs[3] = regs->cx;
234 args->regs[4] = regs->r8;
235 args->regs[5] = regs->r9;
236
237 /*
238 * From the ABI document:
239 *
240 * @gp_offset - the element holds the offset in bytes from
241 * reg_save_area to the place where the next available general purpose
242 * argument register is saved. In case all argument registers have
243 * been exhausted, it is set to the value 48 (6*8).
244 *
245 * @fp_offset - the element holds the offset in bytes from
246 * reg_save_area to the place where the next available floating point
247 * argument is saved. In case all argument registers have been
248 * exhausted, it is set to the value 176 (6*8 + 8*16)
249 *
250 * @overflow_arg_area - this pointer is used to fetch arguments passed
251 * on the stack. It is initialized with the address of the first
252 * argument passed on the stack, if any, and then always updated to
253 * point to the start of the next argument on the stack.
254 *
255 * @reg_save_area - the element points to the start of the register
256 * save area.
257 *
258 * Notably the vararg starts with the second argument and there are no
259 * floating point arguments in the kernel.
260 */
261 args->args.gp_offset = 1*8;
262 args->args.fp_offset = 6*8 + 8*16;
263 args->args.reg_save_area = &args->regs;
264 args->args.overflow_arg_area = (void *)regs->sp;
265
266 /*
267 * If the exception came from __WARN_trap, there is a return
268 * address on the stack, skip that. This is why any __WARN_trap()
269 * caller must inhibit tail-call optimization.
270 */
271 if ((void *)regs->ip == &__WARN_trap)
272 args->args.overflow_arg_area += 8;
273
274 return &args->args;
275 }
276 #endif /* HAVE_ARCH_BUG_FORMAT */
277
278 static nokprobe_inline int
do_trap_no_signal(struct task_struct * tsk,int trapnr,const char * str,struct pt_regs * regs,long error_code)279 do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
280 struct pt_regs *regs, long error_code)
281 {
282 if (v8086_mode(regs)) {
283 /*
284 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
285 * On nmi (interrupt 2), do_trap should not be called.
286 */
287 if (trapnr < X86_TRAP_UD) {
288 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
289 error_code, trapnr))
290 return 0;
291 }
292 } else if (!user_mode(regs)) {
293 if (fixup_exception(regs, trapnr, error_code, 0))
294 return 0;
295
296 tsk->thread.error_code = error_code;
297 tsk->thread.trap_nr = trapnr;
298 die(str, regs, error_code);
299 } else {
300 if (fixup_vdso_exception(regs, trapnr, error_code, 0))
301 return 0;
302 }
303
304 /*
305 * We want error_code and trap_nr set for userspace faults and
306 * kernelspace faults which result in die(), but not
307 * kernelspace faults which are fixed up. die() gives the
308 * process no chance to handle the signal and notice the
309 * kernel fault information, so that won't result in polluting
310 * the information about previously queued, but not yet
311 * delivered, faults. See also exc_general_protection below.
312 */
313 tsk->thread.error_code = error_code;
314 tsk->thread.trap_nr = trapnr;
315
316 return -1;
317 }
318
show_signal(struct task_struct * tsk,int signr,const char * type,const char * desc,struct pt_regs * regs,long error_code)319 static void show_signal(struct task_struct *tsk, int signr,
320 const char *type, const char *desc,
321 struct pt_regs *regs, long error_code)
322 {
323 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
324 printk_ratelimit()) {
325 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
326 tsk->comm, task_pid_nr(tsk), type, desc,
327 regs->ip, regs->sp, error_code);
328 print_vma_addr(KERN_CONT " in ", regs->ip);
329 pr_cont("\n");
330 }
331 }
332
333 static void
do_trap(int trapnr,int signr,char * str,struct pt_regs * regs,long error_code,int sicode,void __user * addr)334 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
335 long error_code, int sicode, void __user *addr)
336 {
337 struct task_struct *tsk = current;
338
339 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
340 return;
341
342 show_signal(tsk, signr, "trap ", str, regs, error_code);
343
344 if (!sicode)
345 force_sig(signr);
346 else
347 force_sig_fault(signr, sicode, addr);
348 }
349 NOKPROBE_SYMBOL(do_trap);
350
do_error_trap(struct pt_regs * regs,long error_code,char * str,unsigned long trapnr,int signr,int sicode,void __user * addr)351 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
352 unsigned long trapnr, int signr, int sicode, void __user *addr)
353 {
354 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
355
356 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
357 NOTIFY_STOP) {
358 cond_local_irq_enable(regs);
359 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
360 cond_local_irq_disable(regs);
361 }
362 }
363
364 /*
365 * Posix requires to provide the address of the faulting instruction for
366 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
367 *
368 * This address is usually regs->ip, but when an uprobe moved the code out
369 * of line then regs->ip points to the XOL code which would confuse
370 * anything which analyzes the fault address vs. the unmodified binary. If
371 * a trap happened in XOL code then uprobe maps regs->ip back to the
372 * original instruction address.
373 */
error_get_trap_addr(struct pt_regs * regs)374 static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
375 {
376 return (void __user *)uprobe_get_trap_addr(regs);
377 }
378
DEFINE_IDTENTRY(exc_divide_error)379 DEFINE_IDTENTRY(exc_divide_error)
380 {
381 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
382 FPE_INTDIV, error_get_trap_addr(regs));
383 }
384
DEFINE_IDTENTRY(exc_overflow)385 DEFINE_IDTENTRY(exc_overflow)
386 {
387 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
388 }
389
390 #ifdef CONFIG_X86_F00F_BUG
handle_invalid_op(struct pt_regs * regs)391 void handle_invalid_op(struct pt_regs *regs)
392 #else
393 static inline void handle_invalid_op(struct pt_regs *regs)
394 #endif
395 {
396 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
397 ILL_ILLOPN, error_get_trap_addr(regs));
398 }
399
handle_bug(struct pt_regs * regs)400 noinstr bool handle_bug(struct pt_regs *regs)
401 {
402 unsigned long addr = regs->ip;
403 bool handled = false;
404 int ud_type, ud_len;
405 s32 ud_imm;
406
407 ud_type = decode_bug(addr, &ud_imm, &ud_len);
408 if (ud_type == BUG_NONE)
409 return handled;
410
411 /*
412 * All lies, just get the WARN/BUG out.
413 */
414 instrumentation_begin();
415 /*
416 * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
417 * is a rare case that uses @regs without passing them to
418 * irqentry_enter().
419 */
420 kmsan_unpoison_entry_regs(regs);
421 /*
422 * Since we're emulating a CALL with exceptions, restore the interrupt
423 * state to what it was at the exception site.
424 */
425 if (regs->flags & X86_EFLAGS_IF)
426 raw_local_irq_enable();
427
428 switch (ud_type) {
429 case BUG_UD1_WARN:
430 if (report_bug_entry((void *)pt_regs_val(regs, ud_imm), regs) == BUG_TRAP_TYPE_WARN)
431 handled = true;
432 break;
433
434 case BUG_UD2:
435 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
436 handled = true;
437 break;
438 }
439 fallthrough;
440
441 case BUG_UDB:
442 case BUG_LOCK:
443 if (handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
444 handled = true;
445 break;
446 }
447 break;
448
449 case BUG_UD1_UBSAN:
450 if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
451 pr_crit("%s at %pS\n",
452 report_ubsan_failure(ud_imm),
453 (void *)regs->ip);
454 }
455 break;
456
457 default:
458 break;
459 }
460
461 /*
462 * When continuing, and regs->ip hasn't changed, move it to the next
463 * instruction. When not continuing execution, restore the instruction
464 * pointer.
465 */
466 if (handled) {
467 if (regs->ip == addr)
468 regs->ip += ud_len;
469 } else {
470 regs->ip = addr;
471 }
472
473 if (regs->flags & X86_EFLAGS_IF)
474 raw_local_irq_disable();
475 instrumentation_end();
476
477 return handled;
478 }
479
DEFINE_IDTENTRY_RAW(exc_invalid_op)480 DEFINE_IDTENTRY_RAW(exc_invalid_op)
481 {
482 irqentry_state_t state;
483
484 /*
485 * We use UD2 as a short encoding for 'CALL __WARN', as such
486 * handle it before exception entry to avoid recursive WARN
487 * in case exception entry is the one triggering WARNs.
488 */
489 if (!user_mode(regs) && handle_bug(regs))
490 return;
491
492 state = irqentry_enter(regs);
493 instrumentation_begin();
494 handle_invalid_op(regs);
495 instrumentation_end();
496 irqentry_exit(regs, state);
497 }
498
DEFINE_IDTENTRY(exc_coproc_segment_overrun)499 DEFINE_IDTENTRY(exc_coproc_segment_overrun)
500 {
501 do_error_trap(regs, 0, "coprocessor segment overrun",
502 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
503 }
504
DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)505 DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
506 {
507 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
508 0, NULL);
509 }
510
DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)511 DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
512 {
513 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
514 SIGBUS, 0, NULL);
515 }
516
DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)517 DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
518 {
519 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
520 0, NULL);
521 }
522
DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)523 DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
524 {
525 char *str = "alignment check";
526
527 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
528 return;
529
530 if (!user_mode(regs))
531 die("Split lock detected\n", regs, error_code);
532
533 local_irq_enable();
534
535 if (handle_user_split_lock(regs, error_code))
536 goto out;
537
538 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
539 error_code, BUS_ADRALN, NULL);
540
541 out:
542 local_irq_disable();
543 }
544
545 #ifdef CONFIG_VMAP_STACK
handle_stack_overflow(struct pt_regs * regs,unsigned long fault_address,struct stack_info * info)546 __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
547 unsigned long fault_address,
548 struct stack_info *info)
549 {
550 const char *name = stack_type_name(info->type);
551
552 printk(KERN_EMERG "BUG: %s stack guard page was hit at %px (stack is %px..%px)\n",
553 name, (void *)fault_address, info->begin, info->end);
554
555 die("stack guard page", regs, 0);
556
557 /* Be absolutely certain we don't return. */
558 panic("%s stack guard hit", name);
559 }
560 #endif
561
562 /*
563 * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64
564 * version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch
565 * between configs triggers objtool warnings.
566 *
567 * This is a temporary hack until we have compiler or plugin support for
568 * annotating noreturns.
569 */
570 #ifdef CONFIG_X86_ESPFIX64
571 #define always_true() true
572 #else
573 bool always_true(void);
always_true(void)574 bool __weak always_true(void) { return true; }
575 #endif
576
577 /*
578 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
579 *
580 * On x86_64, this is more or less a normal kernel entry. Notwithstanding the
581 * SDM's warnings about double faults being unrecoverable, returning works as
582 * expected. Presumably what the SDM actually means is that the CPU may get
583 * the register state wrong on entry, so returning could be a bad idea.
584 *
585 * Various CPU engineers have promised that double faults due to an IRET fault
586 * while the stack is read-only are, in fact, recoverable.
587 *
588 * On x86_32, this is entered through a task gate, and regs are synthesized
589 * from the TSS. Returning is, in principle, okay, but changes to regs will
590 * be lost. If, for some reason, we need to return to a context with modified
591 * regs, the shim code could be adjusted to synchronize the registers.
592 *
593 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
594 * to be read before doing anything else.
595 */
DEFINE_IDTENTRY_DF(exc_double_fault)596 DEFINE_IDTENTRY_DF(exc_double_fault)
597 {
598 static const char str[] = "double fault";
599 struct task_struct *tsk = current;
600
601 #ifdef CONFIG_VMAP_STACK
602 unsigned long address = read_cr2();
603 struct stack_info info;
604 #endif
605
606 #ifdef CONFIG_X86_ESPFIX64
607 extern unsigned char native_irq_return_iret[];
608
609 /*
610 * If IRET takes a non-IST fault on the espfix64 stack, then we
611 * end up promoting it to a doublefault. In that case, take
612 * advantage of the fact that we're not using the normal (TSS.sp0)
613 * stack right now. We can write a fake #GP(0) frame at TSS.sp0
614 * and then modify our own IRET frame so that, when we return,
615 * we land directly at the #GP(0) vector with the stack already
616 * set up according to its expectations.
617 *
618 * The net result is that our #GP handler will think that we
619 * entered from usermode with the bad user context.
620 *
621 * No need for nmi_enter() here because we don't use RCU.
622 */
623 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
624 regs->cs == __KERNEL_CS &&
625 regs->ip == (unsigned long)native_irq_return_iret)
626 {
627 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
628 unsigned long *p = (unsigned long *)regs->sp;
629
630 /*
631 * regs->sp points to the failing IRET frame on the
632 * ESPFIX64 stack. Copy it to the entry stack. This fills
633 * in gpregs->ss through gpregs->ip.
634 *
635 */
636 gpregs->ip = p[0];
637 gpregs->cs = p[1];
638 gpregs->flags = p[2];
639 gpregs->sp = p[3];
640 gpregs->ss = p[4];
641 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
642
643 /*
644 * Adjust our frame so that we return straight to the #GP
645 * vector with the expected RSP value. This is safe because
646 * we won't enable interrupts or schedule before we invoke
647 * general_protection, so nothing will clobber the stack
648 * frame we just set up.
649 *
650 * We will enter general_protection with kernel GSBASE,
651 * which is what the stub expects, given that the faulting
652 * RIP will be the IRET instruction.
653 */
654 regs->ip = (unsigned long)asm_exc_general_protection;
655 regs->sp = (unsigned long)&gpregs->orig_ax;
656
657 return;
658 }
659 #endif
660
661 irqentry_nmi_enter(regs);
662 instrumentation_begin();
663 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
664
665 tsk->thread.error_code = error_code;
666 tsk->thread.trap_nr = X86_TRAP_DF;
667
668 #ifdef CONFIG_VMAP_STACK
669 /*
670 * If we overflow the stack into a guard page, the CPU will fail
671 * to deliver #PF and will send #DF instead. Similarly, if we
672 * take any non-IST exception while too close to the bottom of
673 * the stack, the processor will get a page fault while
674 * delivering the exception and will generate a double fault.
675 *
676 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
677 * Page-Fault Exception (#PF):
678 *
679 * Processors update CR2 whenever a page fault is detected. If a
680 * second page fault occurs while an earlier page fault is being
681 * delivered, the faulting linear address of the second fault will
682 * overwrite the contents of CR2 (replacing the previous
683 * address). These updates to CR2 occur even if the page fault
684 * results in a double fault or occurs during the delivery of a
685 * double fault.
686 *
687 * The logic below has a small possibility of incorrectly diagnosing
688 * some errors as stack overflows. For example, if the IDT or GDT
689 * gets corrupted such that #GP delivery fails due to a bad descriptor
690 * causing #GP and we hit this condition while CR2 coincidentally
691 * points to the stack guard page, we'll think we overflowed the
692 * stack. Given that we're going to panic one way or another
693 * if this happens, this isn't necessarily worth fixing.
694 *
695 * If necessary, we could improve the test by only diagnosing
696 * a stack overflow if the saved RSP points within 47 bytes of
697 * the bottom of the stack: if RSP == tsk_stack + 48 and we
698 * take an exception, the stack is already aligned and there
699 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
700 * possible error code, so a stack overflow would *not* double
701 * fault. With any less space left, exception delivery could
702 * fail, and, as a practical matter, we've overflowed the
703 * stack even if the actual trigger for the double fault was
704 * something else.
705 */
706 if (get_stack_guard_info((void *)address, &info))
707 handle_stack_overflow(regs, address, &info);
708 #endif
709
710 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
711 die("double fault", regs, error_code);
712 if (always_true())
713 panic("Machine halted.");
714 instrumentation_end();
715 }
716
DEFINE_IDTENTRY(exc_bounds)717 DEFINE_IDTENTRY(exc_bounds)
718 {
719 if (notify_die(DIE_TRAP, "bounds", regs, 0,
720 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
721 return;
722 cond_local_irq_enable(regs);
723
724 if (!user_mode(regs))
725 die("bounds", regs, 0);
726
727 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
728
729 cond_local_irq_disable(regs);
730 }
731
732 enum kernel_gp_hint {
733 GP_NO_HINT,
734 GP_NON_CANONICAL,
735 GP_CANONICAL,
736 GP_LASS_VIOLATION,
737 GP_NULL_POINTER,
738 };
739
740 static const char * const kernel_gp_hint_help[] = {
741 [GP_NON_CANONICAL] = "probably for non-canonical address",
742 [GP_CANONICAL] = "maybe for address",
743 [GP_LASS_VIOLATION] = "probably LASS violation for address",
744 [GP_NULL_POINTER] = "kernel NULL pointer dereference",
745 };
746
747 /*
748 * When an uncaught #GP occurs, try to determine the memory address accessed by
749 * the instruction and return that address to the caller. Also, try to figure
750 * out whether any part of the access to that address was non-canonical or
751 * across privilege levels.
752 */
get_kernel_gp_address(struct pt_regs * regs,unsigned long * addr)753 static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
754 unsigned long *addr)
755 {
756 u8 insn_buf[MAX_INSN_SIZE];
757 struct insn insn;
758 int ret;
759
760 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
761 MAX_INSN_SIZE))
762 return GP_NO_HINT;
763
764 ret = insn_decode_kernel(&insn, insn_buf);
765 if (ret < 0)
766 return GP_NO_HINT;
767
768 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
769 if (*addr == -1UL)
770 return GP_NO_HINT;
771
772 #ifdef CONFIG_X86_64
773 /* Operand is in the kernel half */
774 if (*addr >= ~__VIRTUAL_MASK)
775 return GP_CANONICAL;
776
777 /* The last byte of the operand is not in the user canonical half */
778 if (*addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
779 return GP_NON_CANONICAL;
780
781 /*
782 * A NULL pointer dereference usually causes a #PF. However, it
783 * can result in a #GP when LASS is active. Provide the same
784 * hint in the rare case that the condition is hit without LASS.
785 */
786 if (*addr < PAGE_SIZE)
787 return GP_NULL_POINTER;
788
789 /*
790 * Assume that LASS caused the exception, because the address is
791 * canonical and in the user half.
792 */
793 if (cpu_feature_enabled(X86_FEATURE_LASS))
794 return GP_LASS_VIOLATION;
795 #endif
796
797 return GP_CANONICAL;
798 }
799
800 #define GPFSTR "general protection fault"
801
fixup_iopl_exception(struct pt_regs * regs)802 static bool fixup_iopl_exception(struct pt_regs *regs)
803 {
804 struct thread_struct *t = ¤t->thread;
805 unsigned char byte;
806 unsigned long ip;
807
808 if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
809 return false;
810
811 if (insn_get_effective_ip(regs, &ip))
812 return false;
813
814 if (get_user(byte, (const char __user *)ip))
815 return false;
816
817 if (byte != 0xfa && byte != 0xfb)
818 return false;
819
820 if (!t->iopl_warn && printk_ratelimit()) {
821 pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
822 current->comm, task_pid_nr(current), ip);
823 print_vma_addr(KERN_CONT " in ", ip);
824 pr_cont("\n");
825 t->iopl_warn = 1;
826 }
827
828 regs->ip += 1;
829 return true;
830 }
831
832 /*
833 * The unprivileged ENQCMD instruction generates #GPs if the
834 * IA32_PASID MSR has not been populated. If possible, populate
835 * the MSR from a PASID previously allocated to the mm.
836 */
try_fixup_enqcmd_gp(void)837 static bool try_fixup_enqcmd_gp(void)
838 {
839 #ifdef CONFIG_ARCH_HAS_CPU_PASID
840 u32 pasid;
841
842 /*
843 * MSR_IA32_PASID is managed using XSAVE. Directly
844 * writing to the MSR is only possible when fpregs
845 * are valid and the fpstate is not. This is
846 * guaranteed when handling a userspace exception
847 * in *before* interrupts are re-enabled.
848 */
849 lockdep_assert_irqs_disabled();
850
851 /*
852 * Hardware without ENQCMD will not generate
853 * #GPs that can be fixed up here.
854 */
855 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
856 return false;
857
858 /*
859 * If the mm has not been allocated a
860 * PASID, the #GP can not be fixed up.
861 */
862 if (!mm_valid_pasid(current->mm))
863 return false;
864
865 pasid = mm_get_enqcmd_pasid(current->mm);
866
867 /*
868 * Did this thread already have its PASID activated?
869 * If so, the #GP must be from something else.
870 */
871 if (current->pasid_activated)
872 return false;
873
874 wrmsrq(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
875 current->pasid_activated = 1;
876
877 return true;
878 #else
879 return false;
880 #endif
881 }
882
gp_try_fixup_and_notify(struct pt_regs * regs,int trapnr,unsigned long error_code,const char * str,unsigned long address)883 static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
884 unsigned long error_code, const char *str,
885 unsigned long address)
886 {
887 if (fixup_exception(regs, trapnr, error_code, address))
888 return true;
889
890 current->thread.error_code = error_code;
891 current->thread.trap_nr = trapnr;
892
893 /*
894 * To be potentially processing a kprobe fault and to trust the result
895 * from kprobe_running(), we have to be non-preemptible.
896 */
897 if (!preemptible() && kprobe_running() &&
898 kprobe_fault_handler(regs, trapnr))
899 return true;
900
901 return notify_die(DIE_GPF, str, regs, error_code, trapnr, SIGSEGV) == NOTIFY_STOP;
902 }
903
gp_user_force_sig_segv(struct pt_regs * regs,int trapnr,unsigned long error_code,const char * str)904 static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr,
905 unsigned long error_code, const char *str)
906 {
907 current->thread.error_code = error_code;
908 current->thread.trap_nr = trapnr;
909 show_signal(current, SIGSEGV, "", str, regs, error_code);
910 force_sig(SIGSEGV);
911 }
912
DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)913 DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
914 {
915 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
916 enum kernel_gp_hint hint = GP_NO_HINT;
917 unsigned long gp_addr;
918
919 if (user_mode(regs) && try_fixup_enqcmd_gp())
920 return;
921
922 cond_local_irq_enable(regs);
923
924 if (static_cpu_has(X86_FEATURE_UMIP)) {
925 if (user_mode(regs) && fixup_umip_exception(regs))
926 goto exit;
927 }
928
929 if (v8086_mode(regs)) {
930 local_irq_enable();
931 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
932 local_irq_disable();
933 return;
934 }
935
936 if (user_mode(regs)) {
937 if (fixup_iopl_exception(regs))
938 goto exit;
939
940 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
941 goto exit;
942
943 gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
944 goto exit;
945 }
946
947 if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
948 goto exit;
949
950 if (error_code)
951 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
952 else
953 hint = get_kernel_gp_address(regs, &gp_addr);
954
955 if (hint != GP_NO_HINT)
956 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
957 kernel_gp_hint_help[hint], gp_addr);
958
959 /*
960 * KASAN is interested only in the non-canonical case, clear it
961 * otherwise.
962 */
963 if (hint != GP_NON_CANONICAL)
964 gp_addr = 0;
965
966 die_addr(desc, regs, error_code, gp_addr);
967
968 exit:
969 cond_local_irq_disable(regs);
970 }
971
do_int3(struct pt_regs * regs)972 static bool do_int3(struct pt_regs *regs)
973 {
974 int res;
975
976 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
977 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
978 SIGTRAP) == NOTIFY_STOP)
979 return true;
980 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
981
982 #ifdef CONFIG_KPROBES
983 if (kprobe_int3_handler(regs))
984 return true;
985 #endif
986 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
987
988 return res == NOTIFY_STOP;
989 }
990 NOKPROBE_SYMBOL(do_int3);
991
do_int3_user(struct pt_regs * regs)992 static void do_int3_user(struct pt_regs *regs)
993 {
994 if (do_int3(regs))
995 return;
996
997 cond_local_irq_enable(regs);
998 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
999 cond_local_irq_disable(regs);
1000 }
1001
DEFINE_IDTENTRY_RAW(exc_int3)1002 DEFINE_IDTENTRY_RAW(exc_int3)
1003 {
1004 /*
1005 * smp_text_poke_int3_handler() is completely self contained code; it does (and
1006 * must) *NOT* call out to anything, lest it hits upon yet another
1007 * INT3.
1008 */
1009 if (smp_text_poke_int3_handler(regs))
1010 return;
1011
1012 /*
1013 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
1014 * and therefore can trigger INT3, hence smp_text_poke_int3_handler() must
1015 * be done before. If the entry came from kernel mode, then use
1016 * nmi_enter() because the INT3 could have been hit in any context
1017 * including NMI.
1018 */
1019 if (user_mode(regs)) {
1020 irqentry_enter_from_user_mode(regs);
1021 instrumentation_begin();
1022 do_int3_user(regs);
1023 instrumentation_end();
1024 irqentry_exit_to_user_mode(regs);
1025 } else {
1026 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
1027
1028 instrumentation_begin();
1029 if (!do_int3(regs))
1030 die("int3", regs, 0);
1031 instrumentation_end();
1032 irqentry_nmi_exit(regs, irq_state);
1033 }
1034 }
1035
1036 #ifdef CONFIG_X86_64
1037 /*
1038 * Help handler running on a per-cpu (IST or entry trampoline) stack
1039 * to switch to the normal thread stack if the interrupted code was in
1040 * user mode. The actual stack switch is done in entry_64.S
1041 */
sync_regs(struct pt_regs * eregs)1042 asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
1043 {
1044 struct pt_regs *regs = (struct pt_regs *)current_top_of_stack() - 1;
1045 if (regs != eregs)
1046 *regs = *eregs;
1047 return regs;
1048 }
1049
1050 #ifdef CONFIG_AMD_MEM_ENCRYPT
vc_switch_off_ist(struct pt_regs * regs)1051 asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
1052 {
1053 unsigned long sp, *stack;
1054 struct stack_info info;
1055 struct pt_regs *regs_ret;
1056
1057 /*
1058 * In the SYSCALL entry path the RSP value comes from user-space - don't
1059 * trust it and switch to the current kernel stack
1060 */
1061 if (ip_within_syscall_gap(regs)) {
1062 sp = current_top_of_stack();
1063 goto sync;
1064 }
1065
1066 /*
1067 * From here on the RSP value is trusted. Now check whether entry
1068 * happened from a safe stack. Not safe are the entry or unknown stacks,
1069 * use the fall-back stack instead in this case.
1070 */
1071 sp = regs->sp;
1072 stack = (unsigned long *)sp;
1073
1074 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
1075 info.type > STACK_TYPE_EXCEPTION_LAST)
1076 sp = __this_cpu_ist_top_va(VC2);
1077
1078 sync:
1079 /*
1080 * Found a safe stack - switch to it as if the entry didn't happen via
1081 * IST stack. The code below only copies pt_regs, the real switch happens
1082 * in assembly code.
1083 */
1084 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
1085
1086 regs_ret = (struct pt_regs *)sp;
1087 *regs_ret = *regs;
1088
1089 return regs_ret;
1090 }
1091 #endif
1092
fixup_bad_iret(struct pt_regs * bad_regs)1093 asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
1094 {
1095 struct pt_regs tmp, *new_stack;
1096
1097 /*
1098 * This is called from entry_64.S early in handling a fault
1099 * caused by a bad iret to user mode. To handle the fault
1100 * correctly, we want to move our stack frame to where it would
1101 * be had we entered directly on the entry stack (rather than
1102 * just below the IRET frame) and we want to pretend that the
1103 * exception came from the IRET target.
1104 */
1105 new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
1106
1107 /* Copy the IRET target to the temporary storage. */
1108 __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
1109
1110 /* Copy the remainder of the stack from the current stack. */
1111 __memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
1112
1113 /* Update the entry stack */
1114 __memcpy(new_stack, &tmp, sizeof(tmp));
1115
1116 BUG_ON(!user_mode(new_stack));
1117 return new_stack;
1118 }
1119 #endif
1120
is_sysenter_singlestep(struct pt_regs * regs)1121 static bool is_sysenter_singlestep(struct pt_regs *regs)
1122 {
1123 /*
1124 * We don't try for precision here. If we're anywhere in the region of
1125 * code that can be single-stepped in the SYSENTER entry path, then
1126 * assume that this is a useless single-step trap due to SYSENTER
1127 * being invoked with TF set. (We don't know in advance exactly
1128 * which instructions will be hit because BTF could plausibly
1129 * be set.)
1130 */
1131 #ifdef CONFIG_X86_32
1132 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
1133 (unsigned long)__end_SYSENTER_singlestep_region -
1134 (unsigned long)__begin_SYSENTER_singlestep_region;
1135 #elif defined(CONFIG_IA32_EMULATION)
1136 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
1137 (unsigned long)__end_entry_SYSENTER_compat -
1138 (unsigned long)entry_SYSENTER_compat;
1139 #else
1140 return false;
1141 #endif
1142 }
1143
debug_read_reset_dr6(void)1144 static __always_inline unsigned long debug_read_reset_dr6(void)
1145 {
1146 unsigned long dr6;
1147
1148 get_debugreg(dr6, 6);
1149 dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
1150
1151 /*
1152 * The Intel SDM says:
1153 *
1154 * Certain debug exceptions may clear bits 0-3 of DR6.
1155 *
1156 * BLD induced #DB clears DR6.BLD and any other debug
1157 * exception doesn't modify DR6.BLD.
1158 *
1159 * RTM induced #DB clears DR6.RTM and any other debug
1160 * exception sets DR6.RTM.
1161 *
1162 * To avoid confusion in identifying debug exceptions,
1163 * debug handlers should set DR6.BLD and DR6.RTM, and
1164 * clear other DR6 bits before returning.
1165 *
1166 * Keep it simple: write DR6 with its architectural reset
1167 * value 0xFFFF0FF0, defined as DR6_RESERVED, immediately.
1168 */
1169 set_debugreg(DR6_RESERVED, 6);
1170
1171 return dr6;
1172 }
1173
1174 /*
1175 * Our handling of the processor debug registers is non-trivial.
1176 * We do not clear them on entry and exit from the kernel. Therefore
1177 * it is possible to get a watchpoint trap here from inside the kernel.
1178 * However, the code in ./ptrace.c has ensured that the user can
1179 * only set watchpoints on userspace addresses. Therefore the in-kernel
1180 * watchpoint trap can only occur in code which is reading/writing
1181 * from user space. Such code must not hold kernel locks (since it
1182 * can equally take a page fault), therefore it is safe to call
1183 * force_sig_info even though that claims and releases locks.
1184 *
1185 * Code in ./signal.c ensures that the debug control register
1186 * is restored before we deliver any signal, and therefore that
1187 * user code runs with the correct debug control register even though
1188 * we clear it here.
1189 *
1190 * Being careful here means that we don't have to be as careful in a
1191 * lot of more complicated places (task switching can be a bit lazy
1192 * about restoring all the debug state, and ptrace doesn't have to
1193 * find every occurrence of the TF bit that could be saved away even
1194 * by user code)
1195 *
1196 * May run on IST stack.
1197 */
1198
notify_debug(struct pt_regs * regs,unsigned long * dr6)1199 static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
1200 {
1201 /*
1202 * Notifiers will clear bits in @dr6 to indicate the event has been
1203 * consumed - hw_breakpoint_handler(), single_stop_cont().
1204 *
1205 * Notifiers will set bits in @virtual_dr6 to indicate the desire
1206 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
1207 */
1208 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
1209 return true;
1210
1211 return false;
1212 }
1213
exc_debug_kernel(struct pt_regs * regs,unsigned long dr6)1214 static noinstr void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6)
1215 {
1216 /*
1217 * Disable breakpoints during exception handling; recursive exceptions
1218 * are exceedingly 'fun'.
1219 *
1220 * Since this function is NOKPROBE, and that also applies to
1221 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
1222 * HW_BREAKPOINT_W on our stack)
1223 *
1224 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
1225 * includes the entry stack is excluded for everything.
1226 *
1227 * For FRED, nested #DB should just work fine. But when a watchpoint or
1228 * breakpoint is set in the code path which is executed by #DB handler,
1229 * it results in an endless recursion and stack overflow. Thus we stay
1230 * with the IDT approach, i.e., save DR7 and disable #DB.
1231 */
1232 unsigned long dr7 = local_db_save();
1233 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
1234 instrumentation_begin();
1235
1236 /*
1237 * If something gets miswired and we end up here for a user mode
1238 * #DB, we will malfunction.
1239 */
1240 WARN_ON_ONCE(user_mode(regs));
1241
1242 if (test_thread_flag(TIF_BLOCKSTEP)) {
1243 /*
1244 * The SDM says "The processor clears the BTF flag when it
1245 * generates a debug exception." but PTRACE_BLOCKSTEP requested
1246 * it for userspace, but we just took a kernel #DB, so re-set
1247 * BTF.
1248 */
1249 unsigned long debugctl;
1250
1251 rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
1252 debugctl |= DEBUGCTLMSR_BTF;
1253 wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
1254 }
1255
1256 /*
1257 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
1258 * watchpoint at the same time then that will still be handled.
1259 */
1260 if (!cpu_feature_enabled(X86_FEATURE_FRED) &&
1261 (dr6 & DR_STEP) && is_sysenter_singlestep(regs))
1262 dr6 &= ~DR_STEP;
1263
1264 /*
1265 * The kernel doesn't use INT1
1266 */
1267 if (!dr6)
1268 goto out;
1269
1270 if (notify_debug(regs, &dr6))
1271 goto out;
1272
1273 /*
1274 * The kernel doesn't use TF single-step outside of:
1275 *
1276 * - Kprobes, consumed through kprobe_debug_handler()
1277 * - KGDB, consumed through notify_debug()
1278 *
1279 * So if we get here with DR_STEP set, something is wonky.
1280 *
1281 * A known way to trigger this is through QEMU's GDB stub,
1282 * which leaks #DB into the guest and causes IST recursion.
1283 */
1284 if (WARN_ON_ONCE(dr6 & DR_STEP))
1285 regs->flags &= ~X86_EFLAGS_TF;
1286 out:
1287 instrumentation_end();
1288 irqentry_nmi_exit(regs, irq_state);
1289
1290 local_db_restore(dr7);
1291 }
1292
exc_debug_user(struct pt_regs * regs,unsigned long dr6)1293 static noinstr void exc_debug_user(struct pt_regs *regs, unsigned long dr6)
1294 {
1295 bool icebp;
1296
1297 /*
1298 * If something gets miswired and we end up here for a kernel mode
1299 * #DB, we will malfunction.
1300 */
1301 WARN_ON_ONCE(!user_mode(regs));
1302
1303 /*
1304 * NB: We can't easily clear DR7 here because
1305 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
1306 * user memory, etc. This means that a recursive #DB is possible. If
1307 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
1308 * Since we're not on the IST stack right now, everything will be
1309 * fine.
1310 */
1311
1312 irqentry_enter_from_user_mode(regs);
1313 instrumentation_begin();
1314
1315 /*
1316 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
1317 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
1318 *
1319 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
1320 * even if it is not the result of PTRACE_SINGLESTEP.
1321 */
1322 current->thread.virtual_dr6 = (dr6 & DR_STEP);
1323
1324 /*
1325 * The SDM says "The processor clears the BTF flag when it
1326 * generates a debug exception." Clear TIF_BLOCKSTEP to keep
1327 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
1328 */
1329 clear_thread_flag(TIF_BLOCKSTEP);
1330
1331 /*
1332 * If dr6 has no reason to give us about the origin of this trap,
1333 * then it's very likely the result of an icebp/int01 trap.
1334 * User wants a sigtrap for that.
1335 */
1336 icebp = !dr6;
1337
1338 if (notify_debug(regs, &dr6))
1339 goto out;
1340
1341 /* It's safe to allow irq's after DR6 has been saved */
1342 local_irq_enable();
1343
1344 if (v8086_mode(regs)) {
1345 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1346 goto out_irq;
1347 }
1348
1349 /* #DB for bus lock can only be triggered from userspace. */
1350 if (dr6 & DR_BUS_LOCK)
1351 handle_bus_lock(regs);
1352
1353 /* Add the virtual_dr6 bits for signals. */
1354 dr6 |= current->thread.virtual_dr6;
1355 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1356 send_sigtrap(regs, 0, get_si_code(dr6));
1357
1358 out_irq:
1359 local_irq_disable();
1360 out:
1361 instrumentation_end();
1362 irqentry_exit_to_user_mode(regs);
1363 }
1364
1365 #ifdef CONFIG_X86_64
1366 /* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)1367 DEFINE_IDTENTRY_DEBUG(exc_debug)
1368 {
1369 exc_debug_kernel(regs, debug_read_reset_dr6());
1370 }
1371
1372 /* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)1373 DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1374 {
1375 exc_debug_user(regs, debug_read_reset_dr6());
1376 }
1377
1378 #ifdef CONFIG_X86_FRED
1379 /*
1380 * When occurred on different ring level, i.e., from user or kernel
1381 * context, #DB needs to be handled on different stack: User #DB on
1382 * current task stack, while kernel #DB on a dedicated stack.
1383 *
1384 * This is exactly how FRED event delivery invokes an exception
1385 * handler: ring 3 event on level 0 stack, i.e., current task stack;
1386 * ring 0 event on the #DB dedicated stack specified in the
1387 * IA32_FRED_STKLVLS MSR. So unlike IDT, the FRED debug exception
1388 * entry stub doesn't do stack switch.
1389 */
DEFINE_FREDENTRY_DEBUG(exc_debug)1390 DEFINE_FREDENTRY_DEBUG(exc_debug)
1391 {
1392 /*
1393 * FRED #DB stores DR6 on the stack in the format which
1394 * debug_read_reset_dr6() returns for the IDT entry points.
1395 */
1396 unsigned long dr6 = fred_event_data(regs);
1397
1398 if (user_mode(regs))
1399 exc_debug_user(regs, dr6);
1400 else
1401 exc_debug_kernel(regs, dr6);
1402 }
1403 #endif /* CONFIG_X86_FRED */
1404
1405 #else
1406 /* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)1407 DEFINE_IDTENTRY_RAW(exc_debug)
1408 {
1409 unsigned long dr6 = debug_read_reset_dr6();
1410
1411 if (user_mode(regs))
1412 exc_debug_user(regs, dr6);
1413 else
1414 exc_debug_kernel(regs, dr6);
1415 }
1416 #endif
1417
1418 /*
1419 * Note that we play around with the 'TS' bit in an attempt to get
1420 * the correct behaviour even in the presence of the asynchronous
1421 * IRQ13 behaviour
1422 */
math_error(struct pt_regs * regs,int trapnr)1423 static void math_error(struct pt_regs *regs, int trapnr)
1424 {
1425 struct task_struct *task = current;
1426 struct fpu *fpu = x86_task_fpu(task);
1427 int si_code;
1428 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1429 "simd exception";
1430
1431 cond_local_irq_enable(regs);
1432
1433 if (!user_mode(regs)) {
1434 if (fixup_exception(regs, trapnr, 0, 0))
1435 goto exit;
1436
1437 task->thread.error_code = 0;
1438 task->thread.trap_nr = trapnr;
1439
1440 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1441 SIGFPE) != NOTIFY_STOP)
1442 die(str, regs, 0);
1443 goto exit;
1444 }
1445
1446 /*
1447 * Synchronize the FPU register state to the memory register state
1448 * if necessary. This allows the exception handler to inspect it.
1449 */
1450 fpu_sync_fpstate(fpu);
1451
1452 task->thread.trap_nr = trapnr;
1453 task->thread.error_code = 0;
1454
1455 si_code = fpu__exception_code(fpu, trapnr);
1456 /* Retry when we get spurious exceptions: */
1457 if (!si_code)
1458 goto exit;
1459
1460 if (fixup_vdso_exception(regs, trapnr, 0, 0))
1461 goto exit;
1462
1463 force_sig_fault(SIGFPE, si_code,
1464 (void __user *)uprobe_get_trap_addr(regs));
1465 exit:
1466 cond_local_irq_disable(regs);
1467 }
1468
DEFINE_IDTENTRY(exc_coprocessor_error)1469 DEFINE_IDTENTRY(exc_coprocessor_error)
1470 {
1471 math_error(regs, X86_TRAP_MF);
1472 }
1473
DEFINE_IDTENTRY(exc_simd_coprocessor_error)1474 DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1475 {
1476 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1477 /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1478 if (!static_cpu_has(X86_FEATURE_XMM)) {
1479 __exc_general_protection(regs, 0);
1480 return;
1481 }
1482 }
1483 math_error(regs, X86_TRAP_XF);
1484 }
1485
DEFINE_IDTENTRY(exc_spurious_interrupt_bug)1486 DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1487 {
1488 /*
1489 * This addresses a Pentium Pro Erratum:
1490 *
1491 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1492 * Virtual Wire mode implemented through the local APIC, an
1493 * interrupt vector of 0Fh (Intel reserved encoding) may be
1494 * generated by the local APIC (Int 15). This vector may be
1495 * generated upon receipt of a spurious interrupt (an interrupt
1496 * which is removed before the system receives the INTA sequence)
1497 * instead of the programmed 8259 spurious interrupt vector.
1498 *
1499 * IMPLICATION: The spurious interrupt vector programmed in the
1500 * 8259 is normally handled by an operating system's spurious
1501 * interrupt handler. However, a vector of 0Fh is unknown to some
1502 * operating systems, which would crash if this erratum occurred.
1503 *
1504 * In theory this could be limited to 32bit, but the handler is not
1505 * hurting and who knows which other CPUs suffer from this.
1506 */
1507 }
1508
handle_xfd_event(struct pt_regs * regs)1509 static bool handle_xfd_event(struct pt_regs *regs)
1510 {
1511 u64 xfd_err;
1512 int err;
1513
1514 if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
1515 return false;
1516
1517 rdmsrq(MSR_IA32_XFD_ERR, xfd_err);
1518 if (!xfd_err)
1519 return false;
1520
1521 wrmsrq(MSR_IA32_XFD_ERR, 0);
1522
1523 /* Die if that happens in kernel space */
1524 if (WARN_ON(!user_mode(regs)))
1525 return false;
1526
1527 local_irq_enable();
1528
1529 err = xfd_enable_feature(xfd_err);
1530
1531 switch (err) {
1532 case -EPERM:
1533 force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs));
1534 break;
1535 case -EFAULT:
1536 force_sig(SIGSEGV);
1537 break;
1538 }
1539
1540 local_irq_disable();
1541 return true;
1542 }
1543
DEFINE_IDTENTRY(exc_device_not_available)1544 DEFINE_IDTENTRY(exc_device_not_available)
1545 {
1546 unsigned long cr0 = read_cr0();
1547
1548 if (handle_xfd_event(regs))
1549 return;
1550
1551 #ifdef CONFIG_MATH_EMULATION
1552 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1553 struct math_emu_info info = { };
1554
1555 cond_local_irq_enable(regs);
1556
1557 info.regs = regs;
1558 math_emulate(&info);
1559
1560 cond_local_irq_disable(regs);
1561 return;
1562 }
1563 #endif
1564
1565 /* This should not happen. */
1566 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1567 /* Try to fix it up and carry on. */
1568 write_cr0(cr0 & ~X86_CR0_TS);
1569 } else {
1570 /*
1571 * Something terrible happened, and we're better off trying
1572 * to kill the task than getting stuck in a never-ending
1573 * loop of #NM faults.
1574 */
1575 die("unexpected #NM exception", regs, 0);
1576 }
1577 }
1578
1579 #ifdef CONFIG_INTEL_TDX_GUEST
1580
1581 #define VE_FAULT_STR "VE fault"
1582
ve_raise_fault(struct pt_regs * regs,long error_code,unsigned long address)1583 static void ve_raise_fault(struct pt_regs *regs, long error_code,
1584 unsigned long address)
1585 {
1586 if (user_mode(regs)) {
1587 gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
1588 return;
1589 }
1590
1591 if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
1592 VE_FAULT_STR, address)) {
1593 return;
1594 }
1595
1596 die_addr(VE_FAULT_STR, regs, error_code, address);
1597 }
1598
1599 /*
1600 * Virtualization Exceptions (#VE) are delivered to TDX guests due to
1601 * specific guest actions which may happen in either user space or the
1602 * kernel:
1603 *
1604 * * Specific instructions (WBINVD, for example)
1605 * * Specific MSR accesses
1606 * * Specific CPUID leaf accesses
1607 * * Access to specific guest physical addresses
1608 *
1609 * In the settings that Linux will run in, virtualization exceptions are
1610 * never generated on accesses to normal, TD-private memory that has been
1611 * accepted (by BIOS or with tdx_enc_status_changed()).
1612 *
1613 * Syscall entry code has a critical window where the kernel stack is not
1614 * yet set up. Any exception in this window leads to hard to debug issues
1615 * and can be exploited for privilege escalation. Exceptions in the NMI
1616 * entry code also cause issues. Returning from the exception handler with
1617 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack.
1618 *
1619 * For these reasons, the kernel avoids #VEs during the syscall gap and
1620 * the NMI entry code. Entry code paths do not access TD-shared memory,
1621 * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves
1622 * that might generate #VE. VMM can remove memory from TD at any point,
1623 * but access to unaccepted (or missing) private memory leads to VM
1624 * termination, not to #VE.
1625 *
1626 * Similarly to page faults and breakpoints, #VEs are allowed in NMI
1627 * handlers once the kernel is ready to deal with nested NMIs.
1628 *
1629 * During #VE delivery, all interrupts, including NMIs, are blocked until
1630 * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads
1631 * the VE info.
1632 *
1633 * If a guest kernel action which would normally cause a #VE occurs in
1634 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault
1635 * exception) is delivered to the guest which will result in an oops.
1636 *
1637 * The entry code has been audited carefully for following these expectations.
1638 * Changes in the entry code have to be audited for correctness vs. this
1639 * aspect. Similarly to #PF, #VE in these places will expose kernel to
1640 * privilege escalation or may lead to random crashes.
1641 */
DEFINE_IDTENTRY(exc_virtualization_exception)1642 DEFINE_IDTENTRY(exc_virtualization_exception)
1643 {
1644 struct ve_info ve;
1645
1646 /*
1647 * NMIs/Machine-checks/Interrupts will be in a disabled state
1648 * till TDGETVEINFO TDCALL is executed. This ensures that VE
1649 * info cannot be overwritten by a nested #VE.
1650 */
1651 tdx_get_ve_info(&ve);
1652
1653 cond_local_irq_enable(regs);
1654
1655 /*
1656 * If tdx_handle_virt_exception() could not process
1657 * it successfully, treat it as #GP(0) and handle it.
1658 */
1659 if (!tdx_handle_virt_exception(regs, &ve))
1660 ve_raise_fault(regs, 0, ve.gla);
1661
1662 cond_local_irq_disable(regs);
1663 }
1664
1665 #endif
1666
1667 #ifdef CONFIG_X86_32
DEFINE_IDTENTRY_SW(iret_error)1668 DEFINE_IDTENTRY_SW(iret_error)
1669 {
1670 local_irq_enable();
1671 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1672 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1673 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1674 ILL_BADSTK, (void __user *)NULL);
1675 }
1676 local_irq_disable();
1677 }
1678 #endif
1679
trap_init(void)1680 void __init trap_init(void)
1681 {
1682 /* Init cpu_entry_area before IST entries are set up */
1683 setup_cpu_entry_areas();
1684
1685 /* Init GHCB memory pages when running as an SEV-ES guest */
1686 sev_es_init_vc_handling();
1687
1688 /* Initialize TSS before setting up traps so ISTs work */
1689 cpu_init_exception_handling(true);
1690
1691 /* Setup traps as cpu_init() might #GP */
1692 if (!cpu_feature_enabled(X86_FEATURE_FRED))
1693 idt_setup_traps();
1694
1695 cpu_init();
1696 }
1697