1 /*- 2 * Copyright (c) 2021 The FreeBSD Foundation 3 * 4 * This software was developed by Andrew Turner under sponsorship from 5 * the FreeBSD Foundation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * This manages pointer authentication. As it needs to enable the use of 31 * pointer authentication and change the keys we must built this with 32 * pointer authentication disabled. 33 */ 34 #ifdef __ARM_FEATURE_PAC_DEFAULT 35 #error Must be built with pointer authentication disabled 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/libkern.h> 41 #include <sys/proc.h> 42 #include <sys/reboot.h> 43 44 #include <machine/armreg.h> 45 #include <machine/cpu.h> 46 #include <machine/cpu_feat.h> 47 #include <machine/reg.h> 48 #include <machine/vmparam.h> 49 50 #define SCTLR_PTRAUTH (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB) 51 52 static bool __read_mostly enable_ptrauth = false; 53 54 /* Functions called from assembly. */ 55 void ptrauth_start(void); 56 struct thread *ptrauth_switch(struct thread *); 57 void ptrauth_exit_el0(struct thread *); 58 void ptrauth_enter_el0(struct thread *); 59 60 static bool 61 ptrauth_disable(void) 62 { 63 const char *family, *maker, *product; 64 65 family = kern_getenv("smbios.system.family"); 66 maker = kern_getenv("smbios.system.maker"); 67 product = kern_getenv("smbios.system.product"); 68 if (family == NULL || maker == NULL || product == NULL) 69 return (false); 70 71 /* 72 * The Dev Kit appears to be configured to trap upon access to PAC 73 * registers, but the kernel boots at EL1 and so we have no way to 74 * inspect or change this configuration. As a workaround, simply 75 * disable PAC on this platform. 76 */ 77 if (strcmp(maker, "Microsoft Corporation") == 0 && 78 strcmp(family, "Surface") == 0 && 79 strcmp(product, "Windows Dev Kit 2023") == 0) 80 return (true); 81 82 return (false); 83 } 84 85 static cpu_feat_en 86 ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused) 87 { 88 uint64_t isar; 89 int pac_enable; 90 91 /* 92 * Allow the sysadmin to disable pointer authentication globally, 93 * e.g. on broken hardware. 94 */ 95 pac_enable = 1; 96 TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable); 97 if (!pac_enable) { 98 if (boothowto & RB_VERBOSE) 99 printf("Pointer authentication is disabled\n"); 100 return (FEAT_ALWAYS_DISABLE); 101 } 102 103 if (ptrauth_disable()) 104 return (FEAT_ALWAYS_DISABLE); 105 106 /* 107 * This assumes if there is pointer authentication on the boot CPU 108 * it will also be available on any non-boot CPUs. If this is ever 109 * not the case we will have to add a quirk. 110 */ 111 112 /* 113 * The QARMA5 or implementation defined algorithms are reported in 114 * ID_AA64ISAR1_EL1. 115 */ 116 get_kernel_reg(ID_AA64ISAR1_EL1, &isar); 117 if (ID_AA64ISAR1_APA_VAL(isar) > 0 || ID_AA64ISAR1_API_VAL(isar) > 0) { 118 return (FEAT_DEFAULT_ENABLE); 119 } 120 121 /* The QARMA3 algorithm is reported in ID_AA64ISAR2_EL1. */ 122 get_kernel_reg(ID_AA64ISAR2_EL1, &isar); 123 if (ID_AA64ISAR2_APA3_VAL(isar) > 0) { 124 return (FEAT_DEFAULT_ENABLE); 125 } 126 127 return (FEAT_ALWAYS_DISABLE); 128 } 129 130 static bool 131 ptrauth_enable(const struct cpu_feat *feat __unused, 132 cpu_feat_errata errata_status __unused, u_int *errata_list __unused, 133 u_int errata_count __unused) 134 { 135 enable_ptrauth = true; 136 elf64_addr_mask.code |= PAC_ADDR_MASK; 137 elf64_addr_mask.data |= PAC_ADDR_MASK; 138 #ifdef COMPAT_FREEBSD14 139 elf64_addr_mask_14.code |= PAC_ADDR_MASK_14; 140 elf64_addr_mask_14.data |= PAC_ADDR_MASK_14; 141 #endif 142 143 return (true); 144 } 145 146 static void 147 ptrauth_disabled(const struct cpu_feat *feat __unused) 148 { 149 /* 150 * Pointer authentication may be disabled, mask out the ID fields we 151 * expose to userspace and the rest of the kernel so they don't try 152 * to use it. 153 */ 154 if (PCPU_GET(cpuid) == 0) { 155 update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK | 156 ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK | 157 ID_AA64ISAR1_GPI_MASK, 0); 158 update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0); 159 } 160 161 } 162 163 CPU_FEAT(feat_pauth, "Pointer Authentication", 164 ptrauth_check, NULL, ptrauth_enable, ptrauth_disabled, 165 CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM); 166 167 /* Copy the keys when forking a new process */ 168 void 169 ptrauth_fork(struct thread *new_td, struct thread *orig_td) 170 { 171 if (!enable_ptrauth) 172 return; 173 174 memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user, 175 sizeof(new_td->td_md.md_ptrauth_user)); 176 } 177 178 /* Generate new userspace keys when executing a new process */ 179 void 180 ptrauth_exec(struct thread *td) 181 { 182 if (!enable_ptrauth) 183 return; 184 185 arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user), 186 0); 187 } 188 189 /* 190 * Copy the user keys when creating a new userspace thread until it's clear 191 * how the ABI expects the various keys to be assigned. 192 */ 193 void 194 ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td) 195 { 196 if (!enable_ptrauth) 197 return; 198 199 memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user, 200 sizeof(new_td->td_md.md_ptrauth_user)); 201 } 202 203 /* Generate new kernel keys when executing a new kernel thread */ 204 void 205 ptrauth_thread_alloc(struct thread *td) 206 { 207 if (!enable_ptrauth) 208 return; 209 210 arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern), 211 0); 212 } 213 214 /* 215 * Load the userspace keys. We can't use WRITE_SPECIALREG as we need 216 * to set the architecture extension. 217 */ 218 #define LOAD_KEY(space, name, reg) \ 219 __asm __volatile( \ 220 "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyLo_EL1))", %0 \n" \ 221 "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyHi_EL1))", %1 \n" \ 222 :: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \ 223 "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi)) 224 225 void 226 ptrauth_thread0(struct thread *td) 227 { 228 if (!enable_ptrauth) 229 return; 230 231 /* TODO: Generate a random number here */ 232 memset(&td->td_md.md_ptrauth_kern, 0, 233 sizeof(td->td_md.md_ptrauth_kern)); 234 LOAD_KEY(kern, apia, APIA); 235 /* 236 * No isb as this is called before ptrauth_start so can rely on 237 * the instruction barrier there. 238 */ 239 } 240 241 /* 242 * Enable pointer authentication. After this point userspace and the kernel 243 * can sign return addresses, etc. based on their keys 244 * 245 * This assumes either all or no CPUs have pointer authentication support, 246 * and, if supported, all CPUs have the same algorithm. 247 */ 248 void 249 ptrauth_start(void) 250 { 251 uint64_t sctlr; 252 253 if (!enable_ptrauth) 254 return; 255 256 /* Enable pointer authentication */ 257 sctlr = READ_SPECIALREG(sctlr_el1); 258 sctlr |= SCTLR_PTRAUTH; 259 WRITE_SPECIALREG(sctlr_el1, sctlr); 260 isb(); 261 } 262 263 #ifdef SMP 264 void 265 ptrauth_mp_start(uint64_t cpu) 266 { 267 struct ptrauth_key start_key; 268 uint64_t sctlr; 269 270 if (!enable_ptrauth) 271 return; 272 273 /* 274 * We need a key until we call sched_throw, however we don't have 275 * a thread until then. Create a key just for use within 276 * init_secondary and whatever it calls. As init_secondary never 277 * returns it is safe to do so from within it. 278 * 279 * As it's only used for a short length of time just use the cpu 280 * as the key. 281 */ 282 start_key.pa_key_lo = cpu; 283 start_key.pa_key_hi = ~cpu; 284 285 __asm __volatile( 286 ".arch_extension pauth \n" 287 "msr "__XSTRING(APIAKeyLo_EL1_REG)", %0 \n" 288 "msr "__XSTRING(APIAKeyHi_EL1_REG)", %1 \n" 289 ".arch_extension nopauth \n" 290 :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi)); 291 292 /* Enable pointer authentication */ 293 sctlr = READ_SPECIALREG(sctlr_el1); 294 sctlr |= SCTLR_PTRAUTH; 295 WRITE_SPECIALREG(sctlr_el1, sctlr); 296 isb(); 297 } 298 #endif 299 300 struct thread * 301 ptrauth_switch(struct thread *td) 302 { 303 if (enable_ptrauth) { 304 LOAD_KEY(kern, apia, APIA); 305 isb(); 306 } 307 308 return (td); 309 } 310 311 /* Called when we are exiting uerspace and entering the kernel */ 312 void 313 ptrauth_exit_el0(struct thread *td) 314 { 315 if (!enable_ptrauth) 316 return; 317 318 LOAD_KEY(kern, apia, APIA); 319 isb(); 320 } 321 322 /* Called when we are about to exit the kernel and enter userspace */ 323 void 324 ptrauth_enter_el0(struct thread *td) 325 { 326 if (!enable_ptrauth) 327 return; 328 329 LOAD_KEY(user, apia, APIA); 330 LOAD_KEY(user, apib, APIB); 331 LOAD_KEY(user, apda, APDA); 332 LOAD_KEY(user, apdb, APDB); 333 LOAD_KEY(user, apga, APGA); 334 /* 335 * No isb as this is called from the exception handler so can rely 336 * on the eret instruction to be the needed context synchronizing event. 337 */ 338 } 339