Lines Matching +full:mixed +full:- +full:signals

51 #include <asm/text-patching.h>
67 #include <asm/insn-eval.h>
75 #include <asm/processor-flags.h>
154 tsk->thread.error_code = error_code; in do_trap_no_signal()
155 tsk->thread.trap_nr = trapnr; in do_trap_no_signal()
171 tsk->thread.error_code = error_code; in do_trap_no_signal()
172 tsk->thread.trap_nr = trapnr; in do_trap_no_signal()
174 return -1; in do_trap_no_signal()
184 tsk->comm, task_pid_nr(tsk), type, desc, in show_signal()
185 regs->ip, regs->sp, error_code); in show_signal()
186 print_vma_addr(KERN_CONT " in ", regs->ip); in show_signal()
226 * This address is usually regs->ip, but when an uprobe moved the code out
227 * of line then regs->ip points to the XOL code which would confuse
229 * a trap happened in XOL code then uprobe maps regs->ip back to the
264 ud_type = decode_bug(regs->ip, &imm); in handle_bug()
282 if (regs->flags & X86_EFLAGS_IF) in handle_bug()
285 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN || in handle_bug()
287 regs->ip += LEN_UD2; in handle_bug()
291 pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip); in handle_bug()
293 if (regs->flags & X86_EFLAGS_IF) in handle_bug()
370 const char *name = stack_type_name(info->type); in handle_stack_overflow()
373 name, (void *)fault_address, info->begin, info->end); in handle_stack_overflow()
391 * while the stack is read-only are, in fact, recoverable.
415 * If IRET takes a non-IST fault on the espfix64 stack, then we in DEFINE_IDTENTRY_DF()
428 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && in DEFINE_IDTENTRY_DF()
429 regs->cs == __KERNEL_CS && in DEFINE_IDTENTRY_DF()
430 regs->ip == (unsigned long)native_irq_return_iret) in DEFINE_IDTENTRY_DF()
432 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; in DEFINE_IDTENTRY_DF()
433 unsigned long *p = (unsigned long *)regs->sp; in DEFINE_IDTENTRY_DF()
436 * regs->sp points to the failing IRET frame on the in DEFINE_IDTENTRY_DF()
438 * in gpregs->ss through gpregs->ip. in DEFINE_IDTENTRY_DF()
441 gpregs->ip = p[0]; in DEFINE_IDTENTRY_DF()
442 gpregs->cs = p[1]; in DEFINE_IDTENTRY_DF()
443 gpregs->flags = p[2]; in DEFINE_IDTENTRY_DF()
444 gpregs->sp = p[3]; in DEFINE_IDTENTRY_DF()
445 gpregs->ss = p[4]; in DEFINE_IDTENTRY_DF()
446 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ in DEFINE_IDTENTRY_DF()
459 regs->ip = (unsigned long)asm_exc_general_protection; in DEFINE_IDTENTRY_DF()
460 regs->sp = (unsigned long)&gpregs->orig_ax; in DEFINE_IDTENTRY_DF()
470 tsk->thread.error_code = error_code; in DEFINE_IDTENTRY_DF()
471 tsk->thread.trap_nr = X86_TRAP_DF; in DEFINE_IDTENTRY_DF()
477 * take any non-IST exception while too close to the bottom of in DEFINE_IDTENTRY_DF()
481 * According to the SDM (footnote in 6.15 under "Interrupt 14 - in DEFINE_IDTENTRY_DF()
482 * Page-Fault Exception (#PF): in DEFINE_IDTENTRY_DF()
545 * out whether any part of the access to that address was non-canonical.
554 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, in get_kernel_gp_address()
563 if (*addr == -1UL) in get_kernel_gp_address()
569 * - the operand is not in the kernel half in get_kernel_gp_address()
570 * - the last byte of the operand is not in the user canonical half in get_kernel_gp_address()
573 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK) in get_kernel_gp_address()
584 struct thread_struct *t = &current->thread; in fixup_iopl_exception()
588 if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3) in fixup_iopl_exception()
600 if (!t->iopl_warn && printk_ratelimit()) { in fixup_iopl_exception()
602 current->comm, task_pid_nr(current), ip); in fixup_iopl_exception()
605 t->iopl_warn = 1; in fixup_iopl_exception()
608 regs->ip += 1; in fixup_iopl_exception()
627 * in *before* interrupts are re-enabled. in try_fixup_enqcmd_gp()
642 if (!mm_valid_pasid(current->mm)) in try_fixup_enqcmd_gp()
645 pasid = mm_get_enqcmd_pasid(current->mm); in try_fixup_enqcmd_gp()
651 if (current->pasid_activated) in try_fixup_enqcmd_gp()
655 current->pasid_activated = 1; in try_fixup_enqcmd_gp()
670 current->thread.error_code = error_code; in gp_try_fixup_and_notify()
671 current->thread.trap_nr = trapnr; in gp_try_fixup_and_notify()
675 * from kprobe_running(), we have to be non-preemptible. in gp_try_fixup_and_notify()
687 current->thread.error_code = error_code; in gp_user_force_sig_segv()
688 current->thread.trap_nr = trapnr; in gp_user_force_sig_segv()
731 snprintf(desc, sizeof(desc), "segment-related " GPFSTR); in DEFINE_IDTENTRY_ERRORCODE()
737 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address" in DEFINE_IDTENTRY_ERRORCODE()
742 * KASAN is interested only in the non-canonical case, clear it in DEFINE_IDTENTRY_ERRORCODE()
820 * Help handler running on a per-cpu (IST or entry trampoline) stack
826 struct pt_regs *regs = (struct pt_regs *)current_top_of_stack() - 1; in sync_regs()
840 * In the SYSCALL entry path the RSP value comes from user-space - don't in vc_switch_off_ist()
851 * use the fall-back stack instead in this case. in vc_switch_off_ist()
853 sp = regs->sp; in vc_switch_off_ist()
862 * Found a safe stack - switch to it as if the entry didn't happen via in vc_switch_off_ist()
866 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret); in vc_switch_off_ist()
887 new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; in fixup_bad_iret()
890 __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8); in fixup_bad_iret()
907 * code that can be single-stepped in the SYSENTER entry path, then in is_sysenter_singlestep()
908 * assume that this is a useless single-step trap due to SYSENTER in is_sysenter_singlestep()
914 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) < in is_sysenter_singlestep()
915 (unsigned long)__end_SYSENTER_singlestep_region - in is_sysenter_singlestep()
918 return (regs->ip - (unsigned long)entry_SYSENTER_compat) < in is_sysenter_singlestep()
919 (unsigned long)__end_entry_SYSENTER_compat - in is_sysenter_singlestep()
933 * Certain debug exceptions may clear bits 0-3. The remaining in debug_read_clear_dr6()
949 * Our handling of the processor debug registers is non-trivial.
953 * only set watchpoints on userspace addresses. Therefore the in-kernel
977 * consumed - hw_breakpoint_handler(), single_stop_cont(). in notify_debug()
980 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler(). in notify_debug()
1020 * it for userspace, but we just took a kernel #DB, so re-set in exc_debug_kernel()
1048 * The kernel doesn't use TF single-step outside of: in exc_debug_kernel()
1050 * - Kprobes, consumed through kprobe_debug_handler() in exc_debug_kernel()
1051 * - KGDB, consumed through notify_debug() in exc_debug_kernel()
1059 regs->flags &= ~X86_EFLAGS_TF; in exc_debug_kernel()
1096 current->thread.virtual_dr6 = (dr6 & DR_STEP); in exc_debug_user()
1127 /* Add the virtual_dr6 bits for signals. */ in exc_debug_user()
1128 dr6 |= current->thread.virtual_dr6; in exc_debug_user()
1200 struct fpu *fpu = &task->thread.fpu; in math_error()
1211 task->thread.error_code = 0; in math_error()
1212 task->thread.trap_nr = trapnr; in math_error()
1226 task->thread.trap_nr = trapnr; in math_error()
1227 task->thread.error_code = 0; in math_error()
1265 * PROBLEM: If the APIC subsystem is configured in mixed mode with in DEFINE_IDTENTRY()
1306 case -EPERM: in handle_xfd_event()
1309 case -EFAULT: in handle_xfd_event()
1346 * to kill the task than getting stuck in a never-ending in DEFINE_IDTENTRY()
1384 * never generated on accesses to normal, TD-private memory that has been
1391 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack.
1394 * the NMI entry code. Entry code paths do not access TD-shared memory,
1408 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault
1421 * NMIs/Machine-checks/Interrupts will be in a disabled state in DEFINE_IDTENTRY()
1459 /* Init GHCB memory pages when running as an SEV-ES guest */ in trap_init()