1 /*-
2 * Copyright (c) 2021 The FreeBSD Foundation
3 *
4 * This software was developed by Andrew Turner under sponsorship from
5 * the FreeBSD Foundation.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * This manages pointer authentication. As it needs to enable the use of
31 * pointer authentication and change the keys we must built this with
32 * pointer authentication disabled.
33 */
34 #ifdef __ARM_FEATURE_PAC_DEFAULT
35 #error Must be built with pointer authentication disabled
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/libkern.h>
41 #include <sys/proc.h>
42 #include <sys/reboot.h>
43
44 #include <machine/armreg.h>
45 #include <machine/cpu.h>
46 #include <machine/reg.h>
47 #include <machine/vmparam.h>
48
49 #define SCTLR_PTRAUTH (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)
50
51 static bool __read_mostly enable_ptrauth = false;
52
53 /* Functions called from assembly. */
54 void ptrauth_start(void);
55 struct thread *ptrauth_switch(struct thread *);
56 void ptrauth_exit_el0(struct thread *);
57 void ptrauth_enter_el0(struct thread *);
58
59 static bool
ptrauth_disable(void)60 ptrauth_disable(void)
61 {
62 const char *family, *maker, *product;
63
64 family = kern_getenv("smbios.system.family");
65 maker = kern_getenv("smbios.system.maker");
66 product = kern_getenv("smbios.system.product");
67 if (family == NULL || maker == NULL || product == NULL)
68 return (false);
69
70 /*
71 * The Dev Kit appears to be configured to trap upon access to PAC
72 * registers, but the kernel boots at EL1 and so we have no way to
73 * inspect or change this configuration. As a workaround, simply
74 * disable PAC on this platform.
75 */
76 if (strcmp(maker, "Microsoft Corporation") == 0 &&
77 strcmp(family, "Surface") == 0 &&
78 strcmp(product, "Windows Dev Kit 2023") == 0)
79 return (true);
80
81 return (false);
82 }
83
84 void
ptrauth_init(void)85 ptrauth_init(void)
86 {
87 uint64_t isar1;
88 int pac_enable;
89
90 /*
91 * Allow the sysadmin to disable pointer authentication globally,
92 * e.g. on broken hardware.
93 */
94 pac_enable = 1;
95 TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable);
96 if (!pac_enable) {
97 if (boothowto & RB_VERBOSE)
98 printf("Pointer authentication is disabled\n");
99 return;
100 }
101
102 if (!get_kernel_reg(ID_AA64ISAR1_EL1, &isar1))
103 return;
104
105 if (ptrauth_disable())
106 return;
107
108 /*
109 * This assumes if there is pointer authentication on the boot CPU
110 * it will also be available on any non-boot CPUs. If this is ever
111 * not the case we will have to add a quirk.
112 */
113 if (ID_AA64ISAR1_APA_VAL(isar1) > 0 ||
114 ID_AA64ISAR1_API_VAL(isar1) > 0) {
115 enable_ptrauth = true;
116 elf64_addr_mask.code |= PAC_ADDR_MASK;
117 elf64_addr_mask.data |= PAC_ADDR_MASK;
118 }
119 }
120
121 /* Copy the keys when forking a new process */
122 void
ptrauth_fork(struct thread * new_td,struct thread * orig_td)123 ptrauth_fork(struct thread *new_td, struct thread *orig_td)
124 {
125 if (!enable_ptrauth)
126 return;
127
128 memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
129 sizeof(new_td->td_md.md_ptrauth_user));
130 }
131
132 /* Generate new userspace keys when executing a new process */
133 void
ptrauth_exec(struct thread * td)134 ptrauth_exec(struct thread *td)
135 {
136 if (!enable_ptrauth)
137 return;
138
139 arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user),
140 0);
141 }
142
143 /*
144 * Copy the user keys when creating a new userspace thread until it's clear
145 * how the ABI expects the various keys to be assigned.
146 */
147 void
ptrauth_copy_thread(struct thread * new_td,struct thread * orig_td)148 ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td)
149 {
150 if (!enable_ptrauth)
151 return;
152
153 memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
154 sizeof(new_td->td_md.md_ptrauth_user));
155 }
156
157 /* Generate new kernel keys when executing a new kernel thread */
158 void
ptrauth_thread_alloc(struct thread * td)159 ptrauth_thread_alloc(struct thread *td)
160 {
161 if (!enable_ptrauth)
162 return;
163
164 arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern),
165 0);
166 }
167
168 /*
169 * Load the userspace keys. We can't use WRITE_SPECIALREG as we need
170 * to set the architecture extension.
171 */
172 #define LOAD_KEY(space, name, reg) \
173 __asm __volatile( \
174 "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyLo_EL1))", %0 \n" \
175 "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyHi_EL1))", %1 \n" \
176 :: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \
177 "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi))
178
179 void
ptrauth_thread0(struct thread * td)180 ptrauth_thread0(struct thread *td)
181 {
182 if (!enable_ptrauth)
183 return;
184
185 /* TODO: Generate a random number here */
186 memset(&td->td_md.md_ptrauth_kern, 0,
187 sizeof(td->td_md.md_ptrauth_kern));
188 LOAD_KEY(kern, apia, APIA);
189 /*
190 * No isb as this is called before ptrauth_start so can rely on
191 * the instruction barrier there.
192 */
193 }
194
195 /*
196 * Enable pointer authentication. After this point userspace and the kernel
197 * can sign return addresses, etc. based on their keys
198 *
199 * This assumes either all or no CPUs have pointer authentication support,
200 * and, if supported, all CPUs have the same algorithm.
201 */
202 void
ptrauth_start(void)203 ptrauth_start(void)
204 {
205 uint64_t sctlr;
206
207 if (!enable_ptrauth)
208 return;
209
210 /* Enable pointer authentication */
211 sctlr = READ_SPECIALREG(sctlr_el1);
212 sctlr |= SCTLR_PTRAUTH;
213 WRITE_SPECIALREG(sctlr_el1, sctlr);
214 isb();
215 }
216
217 #ifdef SMP
218 void
ptrauth_mp_start(uint64_t cpu)219 ptrauth_mp_start(uint64_t cpu)
220 {
221 struct ptrauth_key start_key;
222 uint64_t sctlr;
223
224 if (!enable_ptrauth)
225 return;
226
227 /*
228 * We need a key until we call sched_throw, however we don't have
229 * a thread until then. Create a key just for use within
230 * init_secondary and whatever it calls. As init_secondary never
231 * returns it is safe to do so from within it.
232 *
233 * As it's only used for a short length of time just use the cpu
234 * as the key.
235 */
236 start_key.pa_key_lo = cpu;
237 start_key.pa_key_hi = ~cpu;
238
239 __asm __volatile(
240 ".arch_extension pauth \n"
241 "msr "__XSTRING(APIAKeyLo_EL1_REG)", %0 \n"
242 "msr "__XSTRING(APIAKeyHi_EL1_REG)", %1 \n"
243 ".arch_extension nopauth \n"
244 :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi));
245
246 /* Enable pointer authentication */
247 sctlr = READ_SPECIALREG(sctlr_el1);
248 sctlr |= SCTLR_PTRAUTH;
249 WRITE_SPECIALREG(sctlr_el1, sctlr);
250 isb();
251 }
252 #endif
253
254 struct thread *
ptrauth_switch(struct thread * td)255 ptrauth_switch(struct thread *td)
256 {
257 if (enable_ptrauth) {
258 LOAD_KEY(kern, apia, APIA);
259 isb();
260 }
261
262 return (td);
263 }
264
265 /* Called when we are exiting uerspace and entering the kernel */
266 void
ptrauth_exit_el0(struct thread * td)267 ptrauth_exit_el0(struct thread *td)
268 {
269 if (!enable_ptrauth)
270 return;
271
272 LOAD_KEY(kern, apia, APIA);
273 isb();
274 }
275
276 /* Called when we are about to exit the kernel and enter userspace */
277 void
ptrauth_enter_el0(struct thread * td)278 ptrauth_enter_el0(struct thread *td)
279 {
280 if (!enable_ptrauth)
281 return;
282
283 LOAD_KEY(user, apia, APIA);
284 LOAD_KEY(user, apib, APIB);
285 LOAD_KEY(user, apda, APDA);
286 LOAD_KEY(user, apdb, APDB);
287 LOAD_KEY(user, apga, APGA);
288 /*
289 * No isb as this is called from the exception handler so can rely
290 * on the eret instruction to be the needed context synchronizing event.
291 */
292 }
293