1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PowerPC Memory Protection Keys management 4 * 5 * Copyright 2017, Ram Pai, IBM Corporation. 6 */ 7 8 #include <asm/mman.h> 9 #include <asm/mmu_context.h> 10 #include <asm/mmu.h> 11 #include <asm/setup.h> 12 #include <linux/pkeys.h> 13 #include <linux/of_fdt.h> 14 15 int num_pkey; /* Max number of pkeys supported */ 16 /* 17 * Keys marked in the reservation list cannot be allocated by userspace 18 */ 19 u32 reserved_allocation_mask __ro_after_init; 20 21 /* Bits set for the initially allocated keys */ 22 static u32 initial_allocation_mask __ro_after_init; 23 24 /* 25 * Even if we allocate keys with sys_pkey_alloc(), we need to make sure 26 * other thread still find the access denied using the same keys. 27 */ 28 static u64 default_amr = ~0x0UL; 29 static u64 default_iamr = 0x5555555555555555UL; 30 u64 default_uamor __ro_after_init; 31 /* 32 * Key used to implement PROT_EXEC mmap. Denies READ/WRITE 33 * We pick key 2 because 0 is special key and 1 is reserved as per ISA. 34 */ 35 static int execute_only_key = 2; 36 static bool pkey_execute_disable_supported; 37 38 39 #define AMR_BITS_PER_PKEY 2 40 #define AMR_RD_BIT 0x1UL 41 #define AMR_WR_BIT 0x2UL 42 #define IAMR_EX_BIT 0x1UL 43 #define PKEY_REG_BITS (sizeof(u64) * 8) 44 #define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY)) 45 46 static int __init dt_scan_storage_keys(unsigned long node, 47 const char *uname, int depth, 48 void *data) 49 { 50 const char *type = of_get_flat_dt_prop(node, "device_type", NULL); 51 const __be32 *prop; 52 int *pkeys_total = (int *) data; 53 54 /* We are scanning "cpu" nodes only */ 55 if (type == NULL || strcmp(type, "cpu") != 0) 56 return 0; 57 58 prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL); 59 if (!prop) 60 return 0; 61 *pkeys_total = be32_to_cpu(prop[0]); 62 return 1; 63 } 64 65 static int scan_pkey_feature(void) 66 { 67 int ret; 68 int pkeys_total = 0; 69 70 /* 71 * Pkey is not supported with Radix translation. 72 */ 73 if (early_radix_enabled()) 74 return 0; 75 76 ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total); 77 if (ret == 0) { 78 /* 79 * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device 80 * tree. We make this exception since some version of skiboot forgot to 81 * expose this property on power8/9. 82 */ 83 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 84 unsigned long pvr = mfspr(SPRN_PVR); 85 86 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || 87 PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) 88 pkeys_total = 32; 89 } 90 } 91 92 #ifdef CONFIG_PPC_MEM_KEYS 93 /* 94 * Adjust the upper limit, based on the number of bits supported by 95 * arch-neutral code. 96 */ 97 pkeys_total = min_t(int, pkeys_total, 98 ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1)); 99 #endif 100 return pkeys_total; 101 } 102 103 void __init pkey_early_init_devtree(void) 104 { 105 int pkeys_total, i; 106 107 #ifdef CONFIG_PPC_MEM_KEYS 108 /* 109 * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral 110 * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE. 111 * Ensure that the bits a distinct. 112 */ 113 BUILD_BUG_ON(PKEY_DISABLE_EXECUTE & 114 (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); 115 116 /* 117 * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous 118 * in the vmaflag. Make sure that is really the case. 119 */ 120 BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 121 __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) 122 != (sizeof(u64) * BITS_PER_BYTE)); 123 #endif 124 /* 125 * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 126 */ 127 if (!early_cpu_has_feature(CPU_FTR_ARCH_206)) 128 return; 129 130 /* scan the device tree for pkey feature */ 131 pkeys_total = scan_pkey_feature(); 132 if (!pkeys_total) 133 goto out; 134 135 /* Allow all keys to be modified by default */ 136 default_uamor = ~0x0UL; 137 138 cur_cpu_spec->mmu_features |= MMU_FTR_PKEY; 139 140 /* 141 * The device tree cannot be relied to indicate support for 142 * execute_disable support. Instead we use a PVR check. 143 */ 144 if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p)) 145 pkey_execute_disable_supported = false; 146 else 147 pkey_execute_disable_supported = true; 148 149 #ifdef CONFIG_PPC_4K_PAGES 150 /* 151 * The OS can manage only 8 pkeys due to its inability to represent them 152 * in the Linux 4K PTE. Mark all other keys reserved. 153 */ 154 num_pkey = min(8, pkeys_total); 155 #else 156 num_pkey = pkeys_total; 157 #endif 158 159 if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) { 160 /* 161 * Insufficient number of keys to support 162 * execute only key. Mark it unavailable. 163 */ 164 execute_only_key = -1; 165 } else { 166 /* 167 * Mark the execute_only_pkey as not available for 168 * user allocation via pkey_alloc. 169 */ 170 reserved_allocation_mask |= (0x1 << execute_only_key); 171 172 /* 173 * Deny READ/WRITE for execute_only_key. 174 * Allow execute in IAMR. 175 */ 176 default_amr |= (0x3ul << pkeyshift(execute_only_key)); 177 default_iamr &= ~(0x1ul << pkeyshift(execute_only_key)); 178 179 /* 180 * Clear the uamor bits for this key. 181 */ 182 default_uamor &= ~(0x3ul << pkeyshift(execute_only_key)); 183 } 184 185 /* 186 * Allow access for only key 0. And prevent any other modification. 187 */ 188 default_amr &= ~(0x3ul << pkeyshift(0)); 189 default_iamr &= ~(0x1ul << pkeyshift(0)); 190 default_uamor &= ~(0x3ul << pkeyshift(0)); 191 /* 192 * key 0 is special in that we want to consider it an allocated 193 * key which is preallocated. We don't allow changing AMR bits 194 * w.r.t key 0. But one can pkey_free(key0) 195 */ 196 initial_allocation_mask |= (0x1 << 0); 197 198 /* 199 * key 1 is recommended not to be used. PowerISA(3.0) page 1015, 200 * programming note. 201 */ 202 reserved_allocation_mask |= (0x1 << 1); 203 default_uamor &= ~(0x3ul << pkeyshift(1)); 204 205 /* 206 * Prevent the usage of OS reserved keys. Update UAMOR 207 * for those keys. Also mark the rest of the bits in the 208 * 32 bit mask as reserved. 209 */ 210 for (i = num_pkey; i < 32 ; i++) { 211 reserved_allocation_mask |= (0x1 << i); 212 default_uamor &= ~(0x3ul << pkeyshift(i)); 213 } 214 /* 215 * Prevent the allocation of reserved keys too. 216 */ 217 initial_allocation_mask |= reserved_allocation_mask; 218 219 pr_info("Enabling pkeys with max key count %d\n", num_pkey); 220 out: 221 /* 222 * Setup uamor on boot cpu 223 */ 224 mtspr(SPRN_UAMOR, default_uamor); 225 226 return; 227 } 228 229 static inline u64 read_amr(void) 230 { 231 return mfspr(SPRN_AMR); 232 } 233 234 static inline void write_amr(u64 value) 235 { 236 mtspr(SPRN_AMR, value); 237 } 238 239 static inline u64 read_iamr(void) 240 { 241 if (!likely(pkey_execute_disable_supported)) 242 return 0x0UL; 243 244 return mfspr(SPRN_IAMR); 245 } 246 247 static inline void write_iamr(u64 value) 248 { 249 if (!likely(pkey_execute_disable_supported)) 250 return; 251 252 mtspr(SPRN_IAMR, value); 253 } 254 255 #ifdef CONFIG_PPC_MEM_KEYS 256 void pkey_mm_init(struct mm_struct *mm) 257 { 258 if (!mmu_has_feature(MMU_FTR_PKEY)) 259 return; 260 mm_pkey_allocation_map(mm) = initial_allocation_mask; 261 mm->context.execute_only_pkey = execute_only_key; 262 } 263 264 static inline void init_amr(int pkey, u8 init_bits) 265 { 266 u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); 267 u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey)); 268 269 write_amr(old_amr | new_amr_bits); 270 } 271 272 static inline void init_iamr(int pkey, u8 init_bits) 273 { 274 u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey)); 275 u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey)); 276 277 write_iamr(old_iamr | new_iamr_bits); 278 } 279 280 /* 281 * Set the access rights in AMR IAMR and UAMOR registers for @pkey to that 282 * specified in @init_val. 283 */ 284 int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 285 unsigned long init_val) 286 { 287 u64 new_amr_bits = 0x0ul; 288 u64 new_iamr_bits = 0x0ul; 289 u64 pkey_bits, uamor_pkey_bits; 290 291 /* 292 * Check whether the key is disabled by UAMOR. 293 */ 294 pkey_bits = 0x3ul << pkeyshift(pkey); 295 uamor_pkey_bits = (default_uamor & pkey_bits); 296 297 /* 298 * Both the bits in UAMOR corresponding to the key should be set 299 */ 300 if (uamor_pkey_bits != pkey_bits) 301 return -EINVAL; 302 303 if (init_val & PKEY_DISABLE_EXECUTE) { 304 if (!pkey_execute_disable_supported) 305 return -EINVAL; 306 new_iamr_bits |= IAMR_EX_BIT; 307 } 308 init_iamr(pkey, new_iamr_bits); 309 310 /* Set the bits we need in AMR: */ 311 if (init_val & PKEY_DISABLE_ACCESS) 312 new_amr_bits |= AMR_RD_BIT | AMR_WR_BIT; 313 else if (init_val & PKEY_DISABLE_WRITE) 314 new_amr_bits |= AMR_WR_BIT; 315 316 init_amr(pkey, new_amr_bits); 317 return 0; 318 } 319 320 void thread_pkey_regs_save(struct thread_struct *thread) 321 { 322 if (!mmu_has_feature(MMU_FTR_PKEY)) 323 return; 324 325 /* 326 * TODO: Skip saving registers if @thread hasn't used any keys yet. 327 */ 328 thread->amr = read_amr(); 329 thread->iamr = read_iamr(); 330 } 331 332 void thread_pkey_regs_restore(struct thread_struct *new_thread, 333 struct thread_struct *old_thread) 334 { 335 if (!mmu_has_feature(MMU_FTR_PKEY)) 336 return; 337 338 if (old_thread->amr != new_thread->amr) 339 write_amr(new_thread->amr); 340 if (old_thread->iamr != new_thread->iamr) 341 write_iamr(new_thread->iamr); 342 } 343 344 void thread_pkey_regs_init(struct thread_struct *thread) 345 { 346 if (!mmu_has_feature(MMU_FTR_PKEY)) 347 return; 348 349 thread->amr = default_amr; 350 thread->iamr = default_iamr; 351 352 write_amr(default_amr); 353 write_iamr(default_iamr); 354 } 355 356 int execute_only_pkey(struct mm_struct *mm) 357 { 358 return mm->context.execute_only_pkey; 359 } 360 361 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) 362 { 363 /* Do this check first since the vm_flags should be hot */ 364 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) 365 return false; 366 367 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey); 368 } 369 370 /* 371 * This should only be called for *plain* mprotect calls. 372 */ 373 int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, 374 int pkey) 375 { 376 /* 377 * If the currently associated pkey is execute-only, but the requested 378 * protection is not execute-only, move it back to the default pkey. 379 */ 380 if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC)) 381 return 0; 382 383 /* 384 * The requested protection is execute-only. Hence let's use an 385 * execute-only pkey. 386 */ 387 if (prot == PROT_EXEC) { 388 pkey = execute_only_pkey(vma->vm_mm); 389 if (pkey > 0) 390 return pkey; 391 } 392 393 /* Nothing to override. */ 394 return vma_pkey(vma); 395 } 396 397 static bool pkey_access_permitted(int pkey, bool write, bool execute) 398 { 399 int pkey_shift; 400 u64 amr; 401 402 pkey_shift = pkeyshift(pkey); 403 if (execute) 404 return !(read_iamr() & (IAMR_EX_BIT << pkey_shift)); 405 406 amr = read_amr(); 407 if (write) 408 return !(amr & (AMR_WR_BIT << pkey_shift)); 409 410 return !(amr & (AMR_RD_BIT << pkey_shift)); 411 } 412 413 bool arch_pte_access_permitted(u64 pte, bool write, bool execute) 414 { 415 if (!mmu_has_feature(MMU_FTR_PKEY)) 416 return true; 417 418 return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute); 419 } 420 421 /* 422 * We only want to enforce protection keys on the current thread because we 423 * effectively have no access to AMR/IAMR for other threads or any way to tell 424 * which AMR/IAMR in a threaded process we could use. 425 * 426 * So do not enforce things if the VMA is not from the current mm, or if we are 427 * in a kernel thread. 428 */ 429 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, 430 bool execute, bool foreign) 431 { 432 if (!mmu_has_feature(MMU_FTR_PKEY)) 433 return true; 434 /* 435 * Do not enforce our key-permissions on a foreign vma. 436 */ 437 if (foreign || vma_is_foreign(vma)) 438 return true; 439 440 return pkey_access_permitted(vma_pkey(vma), write, execute); 441 } 442 443 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm) 444 { 445 if (!mmu_has_feature(MMU_FTR_PKEY)) 446 return; 447 448 /* Duplicate the oldmm pkey state in mm: */ 449 mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm); 450 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; 451 } 452 453 #endif /* CONFIG_PPC_MEM_KEYS */ 454