1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H 3 #define __ASM_POWERPC_MMU_CONTEXT_H 4 #ifdef __KERNEL__ 5 6 #include <linux/kernel.h> 7 #include <linux/mm.h> 8 #include <linux/sched.h> 9 #include <linux/spinlock.h> 10 #include <asm/mmu.h> 11 #include <asm/cputable.h> 12 #include <asm/cputhreads.h> 13 14 /* 15 * Most if the context management is out of line 16 */ 17 #define init_new_context init_new_context 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 19 #define destroy_context destroy_context 20 extern void destroy_context(struct mm_struct *mm); 21 #ifdef CONFIG_SPAPR_TCE_IOMMU 22 struct mm_iommu_table_group_mem_t; 23 24 extern bool mm_iommu_preregistered(struct mm_struct *mm); 25 extern long mm_iommu_new(struct mm_struct *mm, 26 unsigned long ua, unsigned long entries, 27 struct mm_iommu_table_group_mem_t **pmem); 28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 29 unsigned long entries, unsigned long dev_hpa, 30 struct mm_iommu_table_group_mem_t **pmem); 31 extern long mm_iommu_put(struct mm_struct *mm, 32 struct mm_iommu_table_group_mem_t *mem); 33 extern void mm_iommu_init(struct mm_struct *mm); 34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 35 unsigned long ua, unsigned long size); 36 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, 37 unsigned long ua, unsigned long entries); 38 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, 39 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 40 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, 41 unsigned int pageshift, unsigned long *size); 42 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); 43 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); 44 #else 45 static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, 46 unsigned int pageshift, unsigned long *size) 47 { 48 return false; 49 } 50 static inline void mm_iommu_init(struct mm_struct *mm) { } 51 #endif 52 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 53 54 #ifdef CONFIG_PPC_BOOK3S_64 55 extern void radix__switch_mmu_context(struct mm_struct *prev, 56 struct mm_struct *next); 57 static inline void switch_mmu_context(struct mm_struct *prev, 58 struct mm_struct *next, 59 struct task_struct *tsk) 60 { 61 if (radix_enabled()) 62 return radix__switch_mmu_context(prev, next); 63 return switch_slb(tsk, next); 64 } 65 66 extern int hash__alloc_context_id(void); 67 void __init hash__reserve_context_id(int id); 68 extern void __destroy_context(int context_id); 69 static inline void mmu_context_init(void) { } 70 71 #ifdef CONFIG_PPC_64S_HASH_MMU 72 static inline int alloc_extended_context(struct mm_struct *mm, 73 unsigned long ea) 74 { 75 int context_id; 76 77 int index = ea >> MAX_EA_BITS_PER_CONTEXT; 78 79 context_id = hash__alloc_context_id(); 80 if (context_id < 0) 81 return context_id; 82 83 VM_WARN_ON(mm->context.extended_id[index]); 84 mm->context.extended_id[index] = context_id; 85 return context_id; 86 } 87 88 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 89 { 90 int context_id; 91 92 context_id = get_user_context(&mm->context, ea); 93 if (!context_id) 94 return true; 95 return false; 96 } 97 #endif 98 99 #else 100 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, 101 struct task_struct *tsk); 102 extern unsigned long __init_new_context(void); 103 extern void __destroy_context(unsigned long context_id); 104 extern void mmu_context_init(void); 105 static inline int alloc_extended_context(struct mm_struct *mm, 106 unsigned long ea) 107 { 108 /* non book3s_64 should never find this called */ 109 WARN_ON(1); 110 return -ENOMEM; 111 } 112 113 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) 114 { 115 return false; 116 } 117 #endif 118 119 extern int use_cop(unsigned long acop, struct mm_struct *mm); 120 extern void drop_cop(unsigned long acop, struct mm_struct *mm); 121 122 #ifdef CONFIG_PPC_BOOK3S_64 123 static inline void inc_mm_active_cpus(struct mm_struct *mm) 124 { 125 atomic_inc(&mm->context.active_cpus); 126 } 127 128 static inline void dec_mm_active_cpus(struct mm_struct *mm) 129 { 130 VM_WARN_ON_ONCE(atomic_read(&mm->context.active_cpus) <= 0); 131 atomic_dec(&mm->context.active_cpus); 132 } 133 134 static inline void mm_context_add_copro(struct mm_struct *mm) 135 { 136 /* 137 * If any copro is in use, increment the active CPU count 138 * in order to force TLB invalidations to be global as to 139 * propagate to the Nest MMU. 140 */ 141 if (atomic_inc_return(&mm->context.copros) == 1) 142 inc_mm_active_cpus(mm); 143 } 144 145 static inline void mm_context_remove_copro(struct mm_struct *mm) 146 { 147 int c; 148 149 /* 150 * When removing the last copro, we need to broadcast a global 151 * flush of the full mm, as the next TLBI may be local and the 152 * nMMU and/or PSL need to be cleaned up. 153 * 154 * Both the 'copros' and 'active_cpus' counts are looked at in 155 * radix__flush_all_mm() to determine the scope (local/global) 156 * of the TLBIs, so we need to flush first before decrementing 157 * 'copros'. If this API is used by several callers for the 158 * same context, it can lead to over-flushing. It's hopefully 159 * not common enough to be a problem. 160 * 161 * Skip on hash, as we don't know how to do the proper flush 162 * for the time being. Invalidations will remain global if 163 * used on hash. Note that we can't drop 'copros' either, as 164 * it could make some invalidations local with no flush 165 * in-between. 166 */ 167 if (radix_enabled()) { 168 radix__flush_all_mm(mm); 169 170 c = atomic_dec_if_positive(&mm->context.copros); 171 /* Detect imbalance between add and remove */ 172 WARN_ON(c < 0); 173 174 if (c == 0) 175 dec_mm_active_cpus(mm); 176 } 177 } 178 179 /* 180 * vas_windows counter shows number of open windows in the mm 181 * context. During context switch, use this counter to clear the 182 * foreign real address mapping (CP_ABORT) for the thread / process 183 * that intend to use COPY/PASTE. When a process closes all windows, 184 * disable CP_ABORT which is expensive to run. 185 * 186 * For user context, register a copro so that TLBIs are seen by the 187 * nest MMU. mm_context_add/remove_vas_window() are used only for user 188 * space windows. 189 */ 190 static inline void mm_context_add_vas_window(struct mm_struct *mm) 191 { 192 atomic_inc(&mm->context.vas_windows); 193 mm_context_add_copro(mm); 194 } 195 196 static inline void mm_context_remove_vas_window(struct mm_struct *mm) 197 { 198 int v; 199 200 mm_context_remove_copro(mm); 201 v = atomic_dec_if_positive(&mm->context.vas_windows); 202 203 /* Detect imbalance between add and remove */ 204 WARN_ON(v < 0); 205 } 206 #else 207 static inline void inc_mm_active_cpus(struct mm_struct *mm) { } 208 static inline void dec_mm_active_cpus(struct mm_struct *mm) { } 209 static inline void mm_context_add_copro(struct mm_struct *mm) { } 210 static inline void mm_context_remove_copro(struct mm_struct *mm) { } 211 #endif 212 213 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) 214 void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid, 215 unsigned long type, unsigned long pg_sizes, 216 unsigned long start, unsigned long end); 217 #else 218 static inline void do_h_rpt_invalidate_prt(unsigned long pid, 219 unsigned long lpid, 220 unsigned long type, 221 unsigned long pg_sizes, 222 unsigned long start, 223 unsigned long end) { } 224 #endif 225 226 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 227 struct task_struct *tsk); 228 229 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 230 struct task_struct *tsk) 231 { 232 unsigned long flags; 233 234 local_irq_save(flags); 235 switch_mm_irqs_off(prev, next, tsk); 236 local_irq_restore(flags); 237 } 238 #define switch_mm_irqs_off switch_mm_irqs_off 239 240 /* 241 * After we have set current->mm to a new value, this activates 242 * the context for the new mm so we see the new mappings. 243 */ 244 #define activate_mm activate_mm 245 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 246 { 247 switch_mm_irqs_off(prev, next, current); 248 } 249 250 /* We don't currently use enter_lazy_tlb() for anything */ 251 #ifdef CONFIG_PPC_BOOK3E_64 252 #define enter_lazy_tlb enter_lazy_tlb 253 static inline void enter_lazy_tlb(struct mm_struct *mm, 254 struct task_struct *tsk) 255 { 256 /* 64-bit Book3E keeps track of current PGD in the PACA */ 257 get_paca()->pgd = NULL; 258 } 259 #endif 260 261 extern void arch_exit_mmap(struct mm_struct *mm); 262 263 static inline void arch_unmap(struct mm_struct *mm, 264 unsigned long start, unsigned long end) 265 { 266 unsigned long vdso_base = (unsigned long)mm->context.vdso; 267 268 if (start <= vdso_base && vdso_base < end) 269 mm->context.vdso = NULL; 270 } 271 272 #ifdef CONFIG_PPC_MEM_KEYS 273 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, 274 bool execute, bool foreign); 275 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm); 276 #else /* CONFIG_PPC_MEM_KEYS */ 277 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 278 bool write, bool execute, bool foreign) 279 { 280 /* by default, allow everything */ 281 return true; 282 } 283 284 #define pkey_mm_init(mm) 285 #define arch_dup_pkeys(oldmm, mm) 286 287 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) 288 { 289 return 0x0UL; 290 } 291 292 #endif /* CONFIG_PPC_MEM_KEYS */ 293 294 static inline int arch_dup_mmap(struct mm_struct *oldmm, 295 struct mm_struct *mm) 296 { 297 arch_dup_pkeys(oldmm, mm); 298 return 0; 299 } 300 301 #include <asm-generic/mmu_context.h> 302 303 #endif /* __KERNEL__ */ 304 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 305