1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ptrace.h> 4 #include <asm/bugs.h> 5 #include <asm/traps.h> 6 7 enum cp_error_code { 8 CP_EC = (1 << 15) - 1, 9 10 CP_RET = 1, 11 CP_IRET = 2, 12 CP_ENDBR = 3, 13 CP_RSTRORSSP = 4, 14 CP_SETSSBSY = 5, 15 16 CP_ENCL = 1 << 15, 17 }; 18 19 static const char cp_err[][10] = { 20 [0] = "unknown", 21 [1] = "near ret", 22 [2] = "far/iret", 23 [3] = "endbranch", 24 [4] = "rstorssp", 25 [5] = "setssbsy", 26 }; 27 28 static const char *cp_err_string(unsigned long error_code) 29 { 30 unsigned int cpec = error_code & CP_EC; 31 32 if (cpec >= ARRAY_SIZE(cp_err)) 33 cpec = 0; 34 return cp_err[cpec]; 35 } 36 37 static void do_unexpected_cp(struct pt_regs *regs, unsigned long error_code) 38 { 39 WARN_ONCE(1, "Unexpected %s #CP, error_code: %s\n", 40 user_mode(regs) ? "user mode" : "kernel mode", 41 cp_err_string(error_code)); 42 } 43 44 static DEFINE_RATELIMIT_STATE(cpf_rate, DEFAULT_RATELIMIT_INTERVAL, 45 DEFAULT_RATELIMIT_BURST); 46 47 static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code) 48 { 49 struct task_struct *tsk; 50 unsigned long ssp; 51 52 /* 53 * An exception was just taken from userspace. Since interrupts are disabled 54 * here, no scheduling should have messed with the registers yet and they 55 * will be whatever is live in userspace. So read the SSP before enabling 56 * interrupts so locking the fpregs to do it later is not required. 57 */ 58 rdmsrl(MSR_IA32_PL3_SSP, ssp); 59 60 cond_local_irq_enable(regs); 61 62 tsk = current; 63 tsk->thread.error_code = error_code; 64 tsk->thread.trap_nr = X86_TRAP_CP; 65 66 /* Ratelimit to prevent log spamming. */ 67 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 68 __ratelimit(&cpf_rate)) { 69 pr_emerg("%s[%d] control protection ip:%lx sp:%lx ssp:%lx error:%lx(%s)%s", 70 tsk->comm, task_pid_nr(tsk), 71 regs->ip, regs->sp, ssp, error_code, 72 cp_err_string(error_code), 73 error_code & CP_ENCL ? " in enclave" : ""); 74 print_vma_addr(KERN_CONT " in ", regs->ip); 75 pr_cont("\n"); 76 } 77 78 force_sig_fault(SIGSEGV, SEGV_CPERR, (void __user *)0); 79 cond_local_irq_disable(regs); 80 } 81 82 static __ro_after_init bool ibt_fatal = true; 83 84 static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code) 85 { 86 if ((error_code & CP_EC) != CP_ENDBR) { 87 do_unexpected_cp(regs, error_code); 88 return; 89 } 90 91 if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) { 92 regs->ax = 0; 93 return; 94 } 95 96 pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs)); 97 if (!ibt_fatal) { 98 printk(KERN_DEFAULT CUT_HERE); 99 __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL); 100 return; 101 } 102 BUG(); 103 } 104 105 static int __init ibt_setup(char *str) 106 { 107 if (!strcmp(str, "off")) 108 setup_clear_cpu_cap(X86_FEATURE_IBT); 109 110 if (!strcmp(str, "warn")) 111 ibt_fatal = false; 112 113 return 1; 114 } 115 116 __setup("ibt=", ibt_setup); 117 118 DEFINE_IDTENTRY_ERRORCODE(exc_control_protection) 119 { 120 if (user_mode(regs)) { 121 if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) 122 do_user_cp_fault(regs, error_code); 123 else 124 do_unexpected_cp(regs, error_code); 125 } else { 126 if (cpu_feature_enabled(X86_FEATURE_IBT)) 127 do_kernel_cp_fault(regs, error_code); 128 else 129 do_unexpected_cp(regs, error_code); 130 } 131 } 132