1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H 3 #define _ASM_POWERPC_BOOK3S_64_KUP_H 4 5 #include <linux/const.h> 6 #include <asm/reg.h> 7 8 #define AMR_KUAP_BLOCK_READ UL(0x5455555555555555) 9 #define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa) 10 #define AMR_KUEP_BLOCKED UL(0x5455555555555555) 11 #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE) 12 13 #ifdef __ASSEMBLY__ 14 15 .macro kuap_user_restore gpr1, gpr2 16 #if defined(CONFIG_PPC_PKEY) 17 BEGIN_MMU_FTR_SECTION_NESTED(67) 18 b 100f // skip_restore_amr 19 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67) 20 /* 21 * AMR and IAMR are going to be different when 22 * returning to userspace. 23 */ 24 ld \gpr1, STACK_REGS_AMR(r1) 25 26 /* 27 * If kuap feature is not enabled, do the mtspr 28 * only if AMR value is different. 29 */ 30 BEGIN_MMU_FTR_SECTION_NESTED(68) 31 mfspr \gpr2, SPRN_AMR 32 cmpd \gpr1, \gpr2 33 beq 99f 34 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68) 35 36 isync 37 mtspr SPRN_AMR, \gpr1 38 99: 39 /* 40 * Restore IAMR only when returning to userspace 41 */ 42 ld \gpr1, STACK_REGS_IAMR(r1) 43 44 /* 45 * If kuep feature is not enabled, do the mtspr 46 * only if IAMR value is different. 47 */ 48 BEGIN_MMU_FTR_SECTION_NESTED(69) 49 mfspr \gpr2, SPRN_IAMR 50 cmpd \gpr1, \gpr2 51 beq 100f 52 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69) 53 54 isync 55 mtspr SPRN_IAMR, \gpr1 56 57 100: //skip_restore_amr 58 /* No isync required, see kuap_user_restore() */ 59 #endif 60 .endm 61 62 .macro kuap_kernel_restore gpr1, gpr2 63 #if defined(CONFIG_PPC_PKEY) 64 65 BEGIN_MMU_FTR_SECTION_NESTED(67) 66 /* 67 * AMR is going to be mostly the same since we are 68 * returning to the kernel. Compare and do a mtspr. 69 */ 70 ld \gpr2, STACK_REGS_AMR(r1) 71 mfspr \gpr1, SPRN_AMR 72 cmpd \gpr1, \gpr2 73 beq 100f 74 isync 75 mtspr SPRN_AMR, \gpr2 76 /* 77 * No isync required, see kuap_restore_amr() 78 * No need to restore IAMR when returning to kernel space. 79 */ 80 100: 81 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67) 82 #endif 83 .endm 84 85 #ifdef CONFIG_PPC_KUAP 86 .macro kuap_check_amr gpr1, gpr2 87 #ifdef CONFIG_PPC_KUAP_DEBUG 88 BEGIN_MMU_FTR_SECTION_NESTED(67) 89 mfspr \gpr1, SPRN_AMR 90 /* Prevent access to userspace using any key values */ 91 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED) 92 999: tdne \gpr1, \gpr2 93 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) 94 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67) 95 #endif 96 .endm 97 #endif 98 99 /* 100 * if (pkey) { 101 * 102 * save AMR -> stack; 103 * if (kuap) { 104 * if (AMR != BLOCKED) 105 * KUAP_BLOCKED -> AMR; 106 * } 107 * if (from_user) { 108 * save IAMR -> stack; 109 * if (kuep) { 110 * KUEP_BLOCKED ->IAMR 111 * } 112 * } 113 * return; 114 * } 115 * 116 * if (kuap) { 117 * if (from_kernel) { 118 * save AMR -> stack; 119 * if (AMR != BLOCKED) 120 * KUAP_BLOCKED -> AMR; 121 * } 122 * 123 * } 124 */ 125 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr 126 #if defined(CONFIG_PPC_PKEY) 127 128 /* 129 * if both pkey and kuap is disabled, nothing to do 130 */ 131 BEGIN_MMU_FTR_SECTION_NESTED(68) 132 b 100f // skip_save_amr 133 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68) 134 135 /* 136 * if pkey is disabled and we are entering from userspace 137 * don't do anything. 138 */ 139 BEGIN_MMU_FTR_SECTION_NESTED(67) 140 .ifnb \msr_pr_cr 141 /* 142 * Without pkey we are not changing AMR outside the kernel 143 * hence skip this completely. 144 */ 145 bne \msr_pr_cr, 100f // from userspace 146 .endif 147 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67) 148 149 /* 150 * pkey is enabled or pkey is disabled but entering from kernel 151 */ 152 mfspr \gpr1, SPRN_AMR 153 std \gpr1, STACK_REGS_AMR(r1) 154 155 /* 156 * update kernel AMR with AMR_KUAP_BLOCKED only 157 * if KUAP feature is enabled 158 */ 159 BEGIN_MMU_FTR_SECTION_NESTED(69) 160 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED) 161 cmpd \use_cr, \gpr1, \gpr2 162 beq \use_cr, 102f 163 /* 164 * We don't isync here because we very recently entered via an interrupt 165 */ 166 mtspr SPRN_AMR, \gpr2 167 isync 168 102: 169 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69) 170 171 /* 172 * if entering from kernel we don't need save IAMR 173 */ 174 .ifnb \msr_pr_cr 175 beq \msr_pr_cr, 100f // from kernel space 176 mfspr \gpr1, SPRN_IAMR 177 std \gpr1, STACK_REGS_IAMR(r1) 178 179 /* 180 * update kernel IAMR with AMR_KUEP_BLOCKED only 181 * if KUEP feature is enabled 182 */ 183 BEGIN_MMU_FTR_SECTION_NESTED(70) 184 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED) 185 mtspr SPRN_IAMR, \gpr2 186 isync 187 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70) 188 .endif 189 190 100: // skip_save_amr 191 #endif 192 .endm 193 194 #else /* !__ASSEMBLY__ */ 195 196 #include <linux/jump_label.h> 197 198 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); 199 200 #ifdef CONFIG_PPC_PKEY 201 202 #include <asm/mmu.h> 203 #include <asm/ptrace.h> 204 205 /* 206 * For kernel thread that doesn't have thread.regs return 207 * default AMR/IAMR values. 208 */ 209 static inline u64 current_thread_amr(void) 210 { 211 if (current->thread.regs) 212 return current->thread.regs->amr; 213 return AMR_KUAP_BLOCKED; 214 } 215 216 static inline u64 current_thread_iamr(void) 217 { 218 if (current->thread.regs) 219 return current->thread.regs->iamr; 220 return AMR_KUEP_BLOCKED; 221 } 222 #endif /* CONFIG_PPC_PKEY */ 223 224 #ifdef CONFIG_PPC_KUAP 225 226 static inline void kuap_user_restore(struct pt_regs *regs) 227 { 228 bool restore_amr = false, restore_iamr = false; 229 unsigned long amr, iamr; 230 231 if (!mmu_has_feature(MMU_FTR_PKEY)) 232 return; 233 234 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { 235 amr = mfspr(SPRN_AMR); 236 if (amr != regs->amr) 237 restore_amr = true; 238 } else { 239 restore_amr = true; 240 } 241 242 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { 243 iamr = mfspr(SPRN_IAMR); 244 if (iamr != regs->iamr) 245 restore_iamr = true; 246 } else { 247 restore_iamr = true; 248 } 249 250 251 if (restore_amr || restore_iamr) { 252 isync(); 253 if (restore_amr) 254 mtspr(SPRN_AMR, regs->amr); 255 if (restore_iamr) 256 mtspr(SPRN_IAMR, regs->iamr); 257 } 258 /* 259 * No isync required here because we are about to rfi 260 * back to previous context before any user accesses 261 * would be made, which is a CSI. 262 */ 263 } 264 265 static inline void kuap_kernel_restore(struct pt_regs *regs, 266 unsigned long amr) 267 { 268 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { 269 if (unlikely(regs->amr != amr)) { 270 isync(); 271 mtspr(SPRN_AMR, regs->amr); 272 /* 273 * No isync required here because we are about to rfi 274 * back to previous context before any user accesses 275 * would be made, which is a CSI. 276 */ 277 } 278 } 279 /* 280 * No need to restore IAMR when returning to kernel space. 281 */ 282 } 283 284 static inline unsigned long kuap_get_and_check_amr(void) 285 { 286 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { 287 unsigned long amr = mfspr(SPRN_AMR); 288 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ 289 WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); 290 return amr; 291 } 292 return 0; 293 } 294 295 #else /* CONFIG_PPC_PKEY */ 296 297 static inline void kuap_user_restore(struct pt_regs *regs) 298 { 299 } 300 301 static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) 302 { 303 } 304 305 static inline unsigned long kuap_get_and_check_amr(void) 306 { 307 return 0; 308 } 309 310 #endif /* CONFIG_PPC_PKEY */ 311 312 313 #ifdef CONFIG_PPC_KUAP 314 315 static inline void kuap_check_amr(void) 316 { 317 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) 318 WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED); 319 } 320 321 /* 322 * We support individually allowing read or write, but we don't support nesting 323 * because that would require an expensive read/modify write of the AMR. 324 */ 325 326 static inline unsigned long get_kuap(void) 327 { 328 /* 329 * We return AMR_KUAP_BLOCKED when we don't support KUAP because 330 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to 331 * cause restore_user_access to do a flush. 332 * 333 * This has no effect in terms of actually blocking things on hash, 334 * so it doesn't break anything. 335 */ 336 if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) 337 return AMR_KUAP_BLOCKED; 338 339 return mfspr(SPRN_AMR); 340 } 341 342 static inline void set_kuap(unsigned long value) 343 { 344 if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) 345 return; 346 347 /* 348 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both 349 * before and after the move to AMR. See table 6 on page 1134. 350 */ 351 isync(); 352 mtspr(SPRN_AMR, value); 353 isync(); 354 } 355 356 static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, 357 bool is_write) 358 { 359 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) 360 return false; 361 /* 362 * For radix this will be a storage protection fault (DSISR_PROTFAULT). 363 * For hash this will be a key fault (DSISR_KEYFAULT) 364 */ 365 /* 366 * We do have exception table entry, but accessing the 367 * userspace results in fault. This could be because we 368 * didn't unlock the AMR or access is denied by userspace 369 * using a key value that blocks access. We are only interested 370 * in catching the use case of accessing without unlocking 371 * the AMR. Hence check for BLOCK_WRITE/READ against AMR. 372 */ 373 if (is_write) { 374 return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE; 375 } 376 return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ; 377 } 378 379 static __always_inline void allow_user_access(void __user *to, const void __user *from, 380 unsigned long size, unsigned long dir) 381 { 382 unsigned long thread_amr = 0; 383 384 // This is written so we can resolve to a single case at build time 385 BUILD_BUG_ON(!__builtin_constant_p(dir)); 386 387 if (mmu_has_feature(MMU_FTR_PKEY)) 388 thread_amr = current_thread_amr(); 389 390 if (dir == KUAP_READ) 391 set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE); 392 else if (dir == KUAP_WRITE) 393 set_kuap(thread_amr | AMR_KUAP_BLOCK_READ); 394 else if (dir == KUAP_READ_WRITE) 395 set_kuap(thread_amr); 396 else 397 BUILD_BUG(); 398 } 399 400 #else /* CONFIG_PPC_KUAP */ 401 402 static inline unsigned long get_kuap(void) 403 { 404 return AMR_KUAP_BLOCKED; 405 } 406 407 static inline void set_kuap(unsigned long value) { } 408 409 static __always_inline void allow_user_access(void __user *to, const void __user *from, 410 unsigned long size, unsigned long dir) 411 { } 412 413 #endif /* !CONFIG_PPC_KUAP */ 414 415 static inline void prevent_user_access(void __user *to, const void __user *from, 416 unsigned long size, unsigned long dir) 417 { 418 set_kuap(AMR_KUAP_BLOCKED); 419 if (static_branch_unlikely(&uaccess_flush_key)) 420 do_uaccess_flush(); 421 } 422 423 static inline unsigned long prevent_user_access_return(void) 424 { 425 unsigned long flags = get_kuap(); 426 427 set_kuap(AMR_KUAP_BLOCKED); 428 if (static_branch_unlikely(&uaccess_flush_key)) 429 do_uaccess_flush(); 430 431 return flags; 432 } 433 434 static inline void restore_user_access(unsigned long flags) 435 { 436 set_kuap(flags); 437 if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) 438 do_uaccess_flush(); 439 } 440 #endif /* __ASSEMBLY__ */ 441 442 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */ 443