xref: /linux/arch/x86/kernel/cet.c (revision 6cbc4b29eb0d115e9cf7dcc513a5324dc4c9fcc8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/ptrace.h>
4 #include <asm/bugs.h>
5 #include <asm/traps.h>
6 
7 enum cp_error_code {
8 	CP_EC        = (1 << 15) - 1,
9 
10 	CP_RET       = 1,
11 	CP_IRET      = 2,
12 	CP_ENDBR     = 3,
13 	CP_RSTRORSSP = 4,
14 	CP_SETSSBSY  = 5,
15 
16 	CP_ENCL	     = 1 << 15,
17 };
18 
19 static const char cp_err[][10] = {
20 	[0] = "unknown",
21 	[1] = "near ret",
22 	[2] = "far/iret",
23 	[3] = "endbranch",
24 	[4] = "rstorssp",
25 	[5] = "setssbsy",
26 };
27 
cp_err_string(unsigned long error_code)28 static const char *cp_err_string(unsigned long error_code)
29 {
30 	unsigned int cpec = error_code & CP_EC;
31 
32 	if (cpec >= ARRAY_SIZE(cp_err))
33 		cpec = 0;
34 	return cp_err[cpec];
35 }
36 
do_unexpected_cp(struct pt_regs * regs,unsigned long error_code)37 static void do_unexpected_cp(struct pt_regs *regs, unsigned long error_code)
38 {
39 	WARN_ONCE(1, "Unexpected %s #CP, error_code: %s\n",
40 		  user_mode(regs) ? "user mode" : "kernel mode",
41 		  cp_err_string(error_code));
42 }
43 
44 static DEFINE_RATELIMIT_STATE(cpf_rate, DEFAULT_RATELIMIT_INTERVAL,
45 			      DEFAULT_RATELIMIT_BURST);
46 
do_user_cp_fault(struct pt_regs * regs,unsigned long error_code)47 static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
48 {
49 	struct task_struct *tsk;
50 	unsigned long ssp;
51 
52 	/*
53 	 * An exception was just taken from userspace. Since interrupts are disabled
54 	 * here, no scheduling should have messed with the registers yet and they
55 	 * will be whatever is live in userspace. So read the SSP before enabling
56 	 * interrupts so locking the fpregs to do it later is not required.
57 	 */
58 	rdmsrl(MSR_IA32_PL3_SSP, ssp);
59 
60 	cond_local_irq_enable(regs);
61 
62 	tsk = current;
63 	tsk->thread.error_code = error_code;
64 	tsk->thread.trap_nr = X86_TRAP_CP;
65 
66 	/* Ratelimit to prevent log spamming. */
67 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
68 	    __ratelimit(&cpf_rate)) {
69 		pr_emerg("%s[%d] control protection ip:%lx sp:%lx ssp:%lx error:%lx(%s)%s",
70 			 tsk->comm, task_pid_nr(tsk),
71 			 regs->ip, regs->sp, ssp, error_code,
72 			 cp_err_string(error_code),
73 			 error_code & CP_ENCL ? " in enclave" : "");
74 		print_vma_addr(KERN_CONT " in ", regs->ip);
75 		pr_cont("\n");
76 	}
77 
78 	force_sig_fault(SIGSEGV, SEGV_CPERR, (void __user *)0);
79 	cond_local_irq_disable(regs);
80 }
81 
82 static __ro_after_init bool ibt_fatal = true;
83 
84 /*
85  * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
86  *
87  * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
88  * the WFE state of the interrupted context needs to be cleared to let execution
89  * continue.  Otherwise when the CPU resumes from the instruction that just
90  * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
91  * enters a dead loop.
92  *
93  * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
94  * set WFE.  But FRED provides space on the entry stack (in an expanded CS area)
95  * to save and restore the WFE state, thus the WFE state is no longer clobbered,
96  * so software must clear it.
97  */
ibt_clear_fred_wfe(struct pt_regs * regs)98 static void ibt_clear_fred_wfe(struct pt_regs *regs)
99 {
100 	/*
101 	 * No need to do any FRED checks.
102 	 *
103 	 * For IDT event delivery, the high-order 48 bits of CS are pushed
104 	 * as 0s into the stack, and later IRET ignores these bits.
105 	 *
106 	 * For FRED, a test to check if fred_cs.wfe is set would be dropped
107 	 * by compilers.
108 	 */
109 	regs->fred_cs.wfe = 0;
110 }
111 
do_kernel_cp_fault(struct pt_regs * regs,unsigned long error_code)112 static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
113 {
114 	if ((error_code & CP_EC) != CP_ENDBR) {
115 		do_unexpected_cp(regs, error_code);
116 		return;
117 	}
118 
119 	if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
120 		regs->ax = 0;
121 		ibt_clear_fred_wfe(regs);
122 		return;
123 	}
124 
125 	pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
126 	if (!ibt_fatal) {
127 		printk(KERN_DEFAULT CUT_HERE);
128 		__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
129 		ibt_clear_fred_wfe(regs);
130 		return;
131 	}
132 	BUG();
133 }
134 
ibt_setup(char * str)135 static int __init ibt_setup(char *str)
136 {
137 	if (!strcmp(str, "off"))
138 		setup_clear_cpu_cap(X86_FEATURE_IBT);
139 
140 	if (!strcmp(str, "warn"))
141 		ibt_fatal = false;
142 
143 	return 1;
144 }
145 
146 __setup("ibt=", ibt_setup);
147 
DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)148 DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)
149 {
150 	if (user_mode(regs)) {
151 		if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
152 			do_user_cp_fault(regs, error_code);
153 		else
154 			do_unexpected_cp(regs, error_code);
155 	} else {
156 		if (cpu_feature_enabled(X86_FEATURE_IBT))
157 			do_kernel_cp_fault(regs, error_code);
158 		else
159 			do_unexpected_cp(regs, error_code);
160 	}
161 }
162