pkeys.c (05909cd9a0c8811731b38697af13075e8954314f) | pkeys.c (227ae625522c65c4535cabe407f47abc058585ed) |
---|---|
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * PowerPC Memory Protection Keys management 4 * 5 * Copyright 2017, Ram Pai, IBM Corporation. 6 */ 7 8#include <asm/mman.h> --- 75 unchanged lines hidden (view full) --- 84 unsigned long pvr = mfspr(SPRN_PVR); 85 86 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || 87 PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) 88 pkeys_total = 32; 89 } 90 } 91 | 1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * PowerPC Memory Protection Keys management 4 * 5 * Copyright 2017, Ram Pai, IBM Corporation. 6 */ 7 8#include <asm/mman.h> --- 75 unchanged lines hidden (view full) --- 84 unsigned long pvr = mfspr(SPRN_PVR); 85 86 if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || 87 PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) 88 pkeys_total = 32; 89 } 90 } 91 |
92#ifdef CONFIG_PPC_MEM_KEYS |
|
92 /* 93 * Adjust the upper limit, based on the number of bits supported by 94 * arch-neutral code. 95 */ 96 pkeys_total = min_t(int, pkeys_total, 97 ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1)); | 93 /* 94 * Adjust the upper limit, based on the number of bits supported by 95 * arch-neutral code. 96 */ 97 pkeys_total = min_t(int, pkeys_total, 98 ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1)); |
99#endif |
|
98 return pkeys_total; 99} 100 101void __init pkey_early_init_devtree(void) 102{ 103 int pkeys_total, i; 104 | 100 return pkeys_total; 101} 102 103void __init pkey_early_init_devtree(void) 104{ 105 int pkeys_total, i; 106 |
107#ifdef CONFIG_PPC_MEM_KEYS |
|
105 /* 106 * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral 107 * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE. 108 * Ensure that the bits a distinct. 109 */ 110 BUILD_BUG_ON(PKEY_DISABLE_EXECUTE & 111 (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); 112 113 /* 114 * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous 115 * in the vmaflag. Make sure that is really the case. 116 */ 117 BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 118 __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) 119 != (sizeof(u64) * BITS_PER_BYTE)); | 108 /* 109 * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral 110 * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE. 111 * Ensure that the bits a distinct. 112 */ 113 BUILD_BUG_ON(PKEY_DISABLE_EXECUTE & 114 (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); 115 116 /* 117 * pkey_to_vmflag_bits() assumes that the pkey bits are contiguous 118 * in the vmaflag. Make sure that is really the case. 119 */ 120 BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 121 __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) 122 != (sizeof(u64) * BITS_PER_BYTE)); |
120 | 123#endif |
121 /* 122 * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 123 */ 124 if (!early_cpu_has_feature(CPU_FTR_ARCH_206)) 125 return; 126 127 /* scan the device tree for pkey feature */ 128 pkeys_total = scan_pkey_feature(); --- 89 unchanged lines hidden (view full) --- 218 /* 219 * Setup uamor on boot cpu 220 */ 221 mtspr(SPRN_UAMOR, default_uamor); 222 223 return; 224} 225 | 124 /* 125 * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 126 */ 127 if (!early_cpu_has_feature(CPU_FTR_ARCH_206)) 128 return; 129 130 /* scan the device tree for pkey feature */ 131 pkeys_total = scan_pkey_feature(); --- 89 unchanged lines hidden (view full) --- 221 /* 222 * Setup uamor on boot cpu 223 */ 224 mtspr(SPRN_UAMOR, default_uamor); 225 226 return; 227} 228 |
226void pkey_mm_init(struct mm_struct *mm) 227{ 228 if (!mmu_has_feature(MMU_FTR_PKEY)) 229 return; 230 mm_pkey_allocation_map(mm) = initial_allocation_mask; 231 mm->context.execute_only_pkey = execute_only_key; 232} 233 | |
234static inline u64 read_amr(void) 235{ 236 return mfspr(SPRN_AMR); 237} 238 239static inline void write_amr(u64 value) 240{ 241 mtspr(SPRN_AMR, value); --- 10 unchanged lines hidden (view full) --- 252static inline void write_iamr(u64 value) 253{ 254 if (!likely(pkey_execute_disable_supported)) 255 return; 256 257 mtspr(SPRN_IAMR, value); 258} 259 | 229static inline u64 read_amr(void) 230{ 231 return mfspr(SPRN_AMR); 232} 233 234static inline void write_amr(u64 value) 235{ 236 mtspr(SPRN_AMR, value); --- 10 unchanged lines hidden (view full) --- 247static inline void write_iamr(u64 value) 248{ 249 if (!likely(pkey_execute_disable_supported)) 250 return; 251 252 mtspr(SPRN_IAMR, value); 253} 254 |
255#ifdef CONFIG_PPC_MEM_KEYS 256void pkey_mm_init(struct mm_struct *mm) 257{ 258 if (!mmu_has_feature(MMU_FTR_PKEY)) 259 return; 260 mm_pkey_allocation_map(mm) = initial_allocation_mask; 261 mm->context.execute_only_pkey = execute_only_key; 262} 263 |
|
260static inline void init_amr(int pkey, u8 init_bits) 261{ 262 u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); 263 u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey)); 264 265 write_amr(old_amr | new_amr_bits); 266} 267 --- 172 unchanged lines hidden (view full) --- 440{ 441 if (!mmu_has_feature(MMU_FTR_PKEY)) 442 return; 443 444 /* Duplicate the oldmm pkey state in mm: */ 445 mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm); 446 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; 447} | 264static inline void init_amr(int pkey, u8 init_bits) 265{ 266 u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); 267 u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey)); 268 269 write_amr(old_amr | new_amr_bits); 270} 271 --- 172 unchanged lines hidden (view full) --- 444{ 445 if (!mmu_has_feature(MMU_FTR_PKEY)) 446 return; 447 448 /* Duplicate the oldmm pkey state in mm: */ 449 mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm); 450 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; 451} |
452 453#endif /* CONFIG_PPC_MEM_KEYS */ |
|