xref: /freebsd/sys/arm64/arm64/ptrauth.c (revision 055229eda697445880edd0050d0230a3f1bc85b3)
1 /*-
2  * Copyright (c) 2021 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under sponsorship from
5  * the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * This manages pointer authentication. As it needs to enable the use of
31  * pointer authentication and change the keys we must built this with
32  * pointer authentication disabled.
33  */
34 #ifdef __ARM_FEATURE_PAC_DEFAULT
35 #error Must be built with pointer authentication disabled
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/libkern.h>
41 #include <sys/proc.h>
42 #include <sys/reboot.h>
43 
44 #include <machine/armreg.h>
45 #include <machine/cpu.h>
46 #include <machine/cpu_feat.h>
47 #include <machine/reg.h>
48 #include <machine/vmparam.h>
49 
50 #define	SCTLR_PTRAUTH	(SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)
51 
52 static bool __read_mostly enable_ptrauth = false;
53 
54 /* Functions called from assembly. */
55 void ptrauth_start(void);
56 struct thread *ptrauth_switch(struct thread *);
57 void ptrauth_exit_el0(struct thread *);
58 void ptrauth_enter_el0(struct thread *);
59 
60 static bool
ptrauth_disable(void)61 ptrauth_disable(void)
62 {
63 	const char *family, *maker, *product;
64 
65 	family = kern_getenv("smbios.system.family");
66 	maker = kern_getenv("smbios.system.maker");
67 	product = kern_getenv("smbios.system.product");
68 	if (family == NULL || maker == NULL || product == NULL)
69 		return (false);
70 
71 	/*
72 	 * The Dev Kit appears to be configured to trap upon access to PAC
73 	 * registers, but the kernel boots at EL1 and so we have no way to
74 	 * inspect or change this configuration.  As a workaround, simply
75 	 * disable PAC on this platform.
76 	 */
77 	if (strcmp(maker, "Microsoft Corporation") == 0 &&
78 	    strcmp(family, "Surface") == 0 &&
79 	    strcmp(product, "Windows Dev Kit 2023") == 0)
80 		return (true);
81 
82 	return (false);
83 }
84 
85 static cpu_feat_en
ptrauth_check(const struct cpu_feat * feat __unused,u_int midr __unused)86 ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
87 {
88 	uint64_t isar;
89 	int pac_enable;
90 
91 	/*
92 	 * Allow the sysadmin to disable pointer authentication globally,
93 	 * e.g. on broken hardware.
94 	 */
95 	pac_enable = 1;
96 	TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable);
97 	if (!pac_enable) {
98 		if (boothowto & RB_VERBOSE)
99 			printf("Pointer authentication is disabled\n");
100 		return (FEAT_ALWAYS_DISABLE);
101 	}
102 
103 	if (ptrauth_disable())
104 		return (FEAT_ALWAYS_DISABLE);
105 
106 	/*
107 	 * This assumes if there is pointer authentication on the boot CPU
108 	 * it will also be available on any non-boot CPUs. If this is ever
109 	 * not the case we will have to add a quirk.
110 	 */
111 
112 	/*
113 	 * The QARMA5 or implementation defined algorithms are reported in
114 	 * ID_AA64ISAR1_EL1.
115 	 */
116 	if (get_kernel_reg(ID_AA64ISAR1_EL1, &isar)) {
117 		if (ID_AA64ISAR1_APA_VAL(isar) > 0 ||
118 		    ID_AA64ISAR1_API_VAL(isar) > 0) {
119 			return (FEAT_DEFAULT_ENABLE);
120 		}
121 	}
122 
123 	/* The QARMA3 algorithm is reported in ID_AA64ISAR2_EL1. */
124 	if (get_kernel_reg(ID_AA64ISAR2_EL1, &isar)) {
125 		if (ID_AA64ISAR2_APA3_VAL(isar) > 0) {
126 			return (FEAT_DEFAULT_ENABLE);
127 		}
128 	}
129 
130 	return (FEAT_ALWAYS_DISABLE);
131 }
132 
133 static bool
ptrauth_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status __unused,u_int * errata_list __unused,u_int errata_count __unused)134 ptrauth_enable(const struct cpu_feat *feat __unused,
135     cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
136     u_int errata_count __unused)
137 {
138 	enable_ptrauth = true;
139 	elf64_addr_mask.code |= PAC_ADDR_MASK;
140 	elf64_addr_mask.data |= PAC_ADDR_MASK;
141 #ifdef COMPAT_FREEBSD14
142 	elf64_addr_mask_14.code |= PAC_ADDR_MASK_14;
143 	elf64_addr_mask_14.data |= PAC_ADDR_MASK_14;
144 #endif
145 
146 	return (true);
147 }
148 
149 static void
ptrauth_disabled(const struct cpu_feat * feat __unused)150 ptrauth_disabled(const struct cpu_feat *feat __unused)
151 {
152 	/*
153 	 * Pointer authentication may be disabled, mask out the ID fields we
154 	 * expose to userspace and the rest of the kernel so they don't try
155 	 * to use it.
156 	 */
157 	if (PCPU_GET(cpuid) == 0) {
158 		update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
159 		    ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
160 		    ID_AA64ISAR1_GPI_MASK, 0);
161 		update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
162 	}
163 
164 }
165 
166 CPU_FEAT(feat_pauth, "Pointer Authentication",
167     ptrauth_check, NULL, ptrauth_enable, ptrauth_disabled,
168     CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM);
169 
170 /* Copy the keys when forking a new process */
171 void
ptrauth_fork(struct thread * new_td,struct thread * orig_td)172 ptrauth_fork(struct thread *new_td, struct thread *orig_td)
173 {
174 	if (!enable_ptrauth)
175 		return;
176 
177 	memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
178 	    sizeof(new_td->td_md.md_ptrauth_user));
179 }
180 
181 /* Generate new userspace keys when executing a new process */
182 void
ptrauth_exec(struct thread * td)183 ptrauth_exec(struct thread *td)
184 {
185 	if (!enable_ptrauth)
186 		return;
187 
188 	arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user),
189 	    0);
190 }
191 
192 /*
193  * Copy the user keys when creating a new userspace thread until it's clear
194  * how the ABI expects the various keys to be assigned.
195  */
196 void
ptrauth_copy_thread(struct thread * new_td,struct thread * orig_td)197 ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td)
198 {
199 	if (!enable_ptrauth)
200 		return;
201 
202 	memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
203 	    sizeof(new_td->td_md.md_ptrauth_user));
204 }
205 
206 /* Generate new kernel keys when executing a new kernel thread */
207 void
ptrauth_thread_alloc(struct thread * td)208 ptrauth_thread_alloc(struct thread *td)
209 {
210 	if (!enable_ptrauth)
211 		return;
212 
213 	arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern),
214 	    0);
215 }
216 
217 /*
218  * Load the userspace keys. We can't use WRITE_SPECIALREG as we need
219  * to set the architecture extension.
220  */
221 #define	LOAD_KEY(space, name, reg)					\
222 __asm __volatile(							\
223 	"msr	"__XSTRING(MRS_REG_ALT_NAME(reg ## KeyLo_EL1))", %0	\n"	\
224 	"msr	"__XSTRING(MRS_REG_ALT_NAME(reg ## KeyHi_EL1))", %1	\n"	\
225 	:: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo),		\
226 	   "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi))
227 
228 void
ptrauth_thread0(struct thread * td)229 ptrauth_thread0(struct thread *td)
230 {
231 	if (!enable_ptrauth)
232 		return;
233 
234 	/* TODO: Generate a random number here */
235 	memset(&td->td_md.md_ptrauth_kern, 0,
236 	    sizeof(td->td_md.md_ptrauth_kern));
237 	LOAD_KEY(kern, apia, APIA);
238 	/*
239 	 * No isb as this is called before ptrauth_start so can rely on
240 	 * the instruction barrier there.
241 	 */
242 }
243 
244 /*
245  * Enable pointer authentication. After this point userspace and the kernel
246  * can sign return addresses, etc. based on their keys
247  *
248  * This assumes either all or no CPUs have pointer authentication support,
249  * and, if supported, all CPUs have the same algorithm.
250  */
251 void
ptrauth_start(void)252 ptrauth_start(void)
253 {
254 	uint64_t sctlr;
255 
256 	if (!enable_ptrauth)
257 		return;
258 
259 	/* Enable pointer authentication */
260 	sctlr = READ_SPECIALREG(sctlr_el1);
261 	sctlr |= SCTLR_PTRAUTH;
262 	WRITE_SPECIALREG(sctlr_el1, sctlr);
263 	isb();
264 }
265 
266 #ifdef SMP
267 void
ptrauth_mp_start(uint64_t cpu)268 ptrauth_mp_start(uint64_t cpu)
269 {
270 	struct ptrauth_key start_key;
271 	uint64_t sctlr;
272 
273 	if (!enable_ptrauth)
274 		return;
275 
276 	/*
277 	 * We need a key until we call sched_throw, however we don't have
278 	 * a thread until then. Create a key just for use within
279 	 * init_secondary and whatever it calls. As init_secondary never
280 	 * returns it is safe to do so from within it.
281 	 *
282 	 * As it's only used for a short length of time just use the cpu
283 	 * as the key.
284 	 */
285 	start_key.pa_key_lo = cpu;
286 	start_key.pa_key_hi = ~cpu;
287 
288 	__asm __volatile(
289 	    ".arch_extension pauth		\n"
290 	    "msr	"__XSTRING(APIAKeyLo_EL1_REG)", %0	\n"
291 	    "msr	"__XSTRING(APIAKeyHi_EL1_REG)", %1	\n"
292 	    ".arch_extension nopauth		\n"
293 	    :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi));
294 
295 	/* Enable pointer authentication */
296 	sctlr = READ_SPECIALREG(sctlr_el1);
297 	sctlr |= SCTLR_PTRAUTH;
298 	WRITE_SPECIALREG(sctlr_el1, sctlr);
299 	isb();
300 }
301 #endif
302 
303 struct thread *
ptrauth_switch(struct thread * td)304 ptrauth_switch(struct thread *td)
305 {
306 	if (enable_ptrauth) {
307 		LOAD_KEY(kern, apia, APIA);
308 		isb();
309 	}
310 
311 	return (td);
312 }
313 
314 /* Called when we are exiting uerspace and entering the kernel */
315 void
ptrauth_exit_el0(struct thread * td)316 ptrauth_exit_el0(struct thread *td)
317 {
318 	if (!enable_ptrauth)
319 		return;
320 
321 	LOAD_KEY(kern, apia, APIA);
322 	isb();
323 }
324 
325 /* Called when we are about to exit the kernel and enter userspace */
326 void
ptrauth_enter_el0(struct thread * td)327 ptrauth_enter_el0(struct thread *td)
328 {
329 	if (!enable_ptrauth)
330 		return;
331 
332 	LOAD_KEY(user, apia, APIA);
333 	LOAD_KEY(user, apib, APIB);
334 	LOAD_KEY(user, apda, APDA);
335 	LOAD_KEY(user, apdb, APDB);
336 	LOAD_KEY(user, apga, APGA);
337 	/*
338 	 * No isb as this is called from the exception handler so can rely
339 	 * on the eret instruction to be the needed context synchronizing event.
340 	 */
341 }
342