xref: /linux/arch/powerpc/include/asm/mmu_context.h (revision ec8a42e7343234802b9054874fe01810880289ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
3 #define __ASM_POWERPC_MMU_CONTEXT_H
4 #ifdef __KERNEL__
5 
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/spinlock.h>
10 #include <asm/mmu.h>
11 #include <asm/cputable.h>
12 #include <asm/cputhreads.h>
13 
14 /*
15  * Most if the context management is out of line
16  */
17 #define init_new_context init_new_context
18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
19 #define destroy_context destroy_context
20 extern void destroy_context(struct mm_struct *mm);
21 #ifdef CONFIG_SPAPR_TCE_IOMMU
22 struct mm_iommu_table_group_mem_t;
23 
24 extern int isolate_lru_page(struct page *page);	/* from internal.h */
25 extern bool mm_iommu_preregistered(struct mm_struct *mm);
26 extern long mm_iommu_new(struct mm_struct *mm,
27 		unsigned long ua, unsigned long entries,
28 		struct mm_iommu_table_group_mem_t **pmem);
29 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
30 		unsigned long entries, unsigned long dev_hpa,
31 		struct mm_iommu_table_group_mem_t **pmem);
32 extern long mm_iommu_put(struct mm_struct *mm,
33 		struct mm_iommu_table_group_mem_t *mem);
34 extern void mm_iommu_init(struct mm_struct *mm);
35 extern void mm_iommu_cleanup(struct mm_struct *mm);
36 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
37 		unsigned long ua, unsigned long size);
38 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
39 		struct mm_struct *mm, unsigned long ua, unsigned long size);
40 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
41 		unsigned long ua, unsigned long entries);
42 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
43 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
44 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
45 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
46 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
47 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
48 		unsigned int pageshift, unsigned long *size);
49 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
50 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
51 #else
52 static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
53 		unsigned int pageshift, unsigned long *size)
54 {
55 	return false;
56 }
57 static inline void mm_iommu_init(struct mm_struct *mm) { }
58 #endif
59 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
60 extern void set_context(unsigned long id, pgd_t *pgd);
61 
62 #ifdef CONFIG_PPC_BOOK3S_64
63 extern void radix__switch_mmu_context(struct mm_struct *prev,
64 				      struct mm_struct *next);
65 static inline void switch_mmu_context(struct mm_struct *prev,
66 				      struct mm_struct *next,
67 				      struct task_struct *tsk)
68 {
69 	if (radix_enabled())
70 		return radix__switch_mmu_context(prev, next);
71 	return switch_slb(tsk, next);
72 }
73 
74 extern int hash__alloc_context_id(void);
75 extern void hash__reserve_context_id(int id);
76 extern void __destroy_context(int context_id);
77 static inline void mmu_context_init(void) { }
78 
79 static inline int alloc_extended_context(struct mm_struct *mm,
80 					 unsigned long ea)
81 {
82 	int context_id;
83 
84 	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
85 
86 	context_id = hash__alloc_context_id();
87 	if (context_id < 0)
88 		return context_id;
89 
90 	VM_WARN_ON(mm->context.extended_id[index]);
91 	mm->context.extended_id[index] = context_id;
92 	return context_id;
93 }
94 
95 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
96 {
97 	int context_id;
98 
99 	context_id = get_user_context(&mm->context, ea);
100 	if (!context_id)
101 		return true;
102 	return false;
103 }
104 
105 #else
106 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
107 			       struct task_struct *tsk);
108 extern unsigned long __init_new_context(void);
109 extern void __destroy_context(unsigned long context_id);
110 extern void mmu_context_init(void);
111 static inline int alloc_extended_context(struct mm_struct *mm,
112 					 unsigned long ea)
113 {
114 	/* non book3s_64 should never find this called */
115 	WARN_ON(1);
116 	return -ENOMEM;
117 }
118 
119 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
120 {
121 	return false;
122 }
123 #endif
124 
125 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
126 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
127 #else
128 static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
129 #endif
130 
131 extern void switch_cop(struct mm_struct *next);
132 extern int use_cop(unsigned long acop, struct mm_struct *mm);
133 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
134 
135 #ifdef CONFIG_PPC_BOOK3S_64
136 static inline void inc_mm_active_cpus(struct mm_struct *mm)
137 {
138 	atomic_inc(&mm->context.active_cpus);
139 }
140 
141 static inline void dec_mm_active_cpus(struct mm_struct *mm)
142 {
143 	atomic_dec(&mm->context.active_cpus);
144 }
145 
146 static inline void mm_context_add_copro(struct mm_struct *mm)
147 {
148 	/*
149 	 * If any copro is in use, increment the active CPU count
150 	 * in order to force TLB invalidations to be global as to
151 	 * propagate to the Nest MMU.
152 	 */
153 	if (atomic_inc_return(&mm->context.copros) == 1)
154 		inc_mm_active_cpus(mm);
155 }
156 
157 static inline void mm_context_remove_copro(struct mm_struct *mm)
158 {
159 	int c;
160 
161 	/*
162 	 * When removing the last copro, we need to broadcast a global
163 	 * flush of the full mm, as the next TLBI may be local and the
164 	 * nMMU and/or PSL need to be cleaned up.
165 	 *
166 	 * Both the 'copros' and 'active_cpus' counts are looked at in
167 	 * flush_all_mm() to determine the scope (local/global) of the
168 	 * TLBIs, so we need to flush first before decrementing
169 	 * 'copros'. If this API is used by several callers for the
170 	 * same context, it can lead to over-flushing. It's hopefully
171 	 * not common enough to be a problem.
172 	 *
173 	 * Skip on hash, as we don't know how to do the proper flush
174 	 * for the time being. Invalidations will remain global if
175 	 * used on hash. Note that we can't drop 'copros' either, as
176 	 * it could make some invalidations local with no flush
177 	 * in-between.
178 	 */
179 	if (radix_enabled()) {
180 		flush_all_mm(mm);
181 
182 		c = atomic_dec_if_positive(&mm->context.copros);
183 		/* Detect imbalance between add and remove */
184 		WARN_ON(c < 0);
185 
186 		if (c == 0)
187 			dec_mm_active_cpus(mm);
188 	}
189 }
190 
191 /*
192  * vas_windows counter shows number of open windows in the mm
193  * context. During context switch, use this counter to clear the
194  * foreign real address mapping (CP_ABORT) for the thread / process
195  * that intend to use COPY/PASTE. When a process closes all windows,
196  * disable CP_ABORT which is expensive to run.
197  *
198  * For user context, register a copro so that TLBIs are seen by the
199  * nest MMU. mm_context_add/remove_vas_window() are used only for user
200  * space windows.
201  */
202 static inline void mm_context_add_vas_window(struct mm_struct *mm)
203 {
204 	atomic_inc(&mm->context.vas_windows);
205 	mm_context_add_copro(mm);
206 }
207 
208 static inline void mm_context_remove_vas_window(struct mm_struct *mm)
209 {
210 	int v;
211 
212 	mm_context_remove_copro(mm);
213 	v = atomic_dec_if_positive(&mm->context.vas_windows);
214 
215 	/* Detect imbalance between add and remove */
216 	WARN_ON(v < 0);
217 }
218 #else
219 static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
220 static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
221 static inline void mm_context_add_copro(struct mm_struct *mm) { }
222 static inline void mm_context_remove_copro(struct mm_struct *mm) { }
223 #endif
224 
225 
226 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
227 			       struct task_struct *tsk);
228 
229 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
230 			     struct task_struct *tsk)
231 {
232 	unsigned long flags;
233 
234 	local_irq_save(flags);
235 	switch_mm_irqs_off(prev, next, tsk);
236 	local_irq_restore(flags);
237 }
238 #define switch_mm_irqs_off switch_mm_irqs_off
239 
240 /*
241  * After we have set current->mm to a new value, this activates
242  * the context for the new mm so we see the new mappings.
243  */
244 #define activate_mm activate_mm
245 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
246 {
247 	switch_mm_irqs_off(prev, next, current);
248 }
249 
250 /* We don't currently use enter_lazy_tlb() for anything */
251 #ifdef CONFIG_PPC_BOOK3E_64
252 #define enter_lazy_tlb enter_lazy_tlb
253 static inline void enter_lazy_tlb(struct mm_struct *mm,
254 				  struct task_struct *tsk)
255 {
256 	/* 64-bit Book3E keeps track of current PGD in the PACA */
257 	get_paca()->pgd = NULL;
258 }
259 #endif
260 
261 extern void arch_exit_mmap(struct mm_struct *mm);
262 
263 static inline void arch_unmap(struct mm_struct *mm,
264 			      unsigned long start, unsigned long end)
265 {
266 	unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
267 
268 	if (start <= vdso_base && vdso_base < end)
269 		mm->context.vdso = NULL;
270 }
271 
272 #ifdef CONFIG_PPC_MEM_KEYS
273 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
274 			       bool execute, bool foreign);
275 void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
276 #else /* CONFIG_PPC_MEM_KEYS */
277 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
278 		bool write, bool execute, bool foreign)
279 {
280 	/* by default, allow everything */
281 	return true;
282 }
283 
284 #define pkey_mm_init(mm)
285 #define thread_pkey_regs_save(thread)
286 #define thread_pkey_regs_restore(new_thread, old_thread)
287 #define thread_pkey_regs_init(thread)
288 #define arch_dup_pkeys(oldmm, mm)
289 
290 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
291 {
292 	return 0x0UL;
293 }
294 
295 #endif /* CONFIG_PPC_MEM_KEYS */
296 
297 static inline int arch_dup_mmap(struct mm_struct *oldmm,
298 				struct mm_struct *mm)
299 {
300 	arch_dup_pkeys(oldmm, mm);
301 	return 0;
302 }
303 
304 #include <asm-generic/mmu_context.h>
305 
306 #endif /* __KERNEL__ */
307 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
308