xref: /linux/arch/powerpc/include/asm/nohash/pgtable.h (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
4 
5 #ifndef __ASSEMBLER__
6 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
7 				     unsigned long clr, unsigned long set, int huge);
8 #endif
9 
10 #if defined(CONFIG_PPC64)
11 #include <asm/nohash/64/pgtable.h>
12 #else
13 #include <asm/nohash/32/pgtable.h>
14 #endif
15 
16 /*
17  * _PAGE_CHG_MASK masks of bits that are to be preserved across
18  * pgprot changes.
19  */
20 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
21 
22 /* Permission masks used for kernel mappings */
23 #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
24 #define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
25 #define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
26 #define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
27 #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
28 #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
29 
30 #ifndef __ASSEMBLER__
31 
32 #include <linux/page_table_check.h>
33 
34 extern int icache_44x_need_flush;
35 
36 #ifndef pte_huge_size
37 static inline unsigned long pte_huge_size(pte_t pte)
38 {
39 	return PAGE_SIZE;
40 }
41 #endif
42 
43 /*
44  * PTE updates. This function is called whenever an existing
45  * valid PTE is updated. This does -not- include set_pte_at()
46  * which nowadays only sets a new PTE.
47  *
48  * Depending on the type of MMU, we may need to use atomic updates
49  * and the PTE may be either 32 or 64 bit wide. In the later case,
50  * when using atomic updates, only the low part of the PTE is
51  * accessed atomically.
52  *
53  * In addition, on 44x, we also maintain a global flag indicating
54  * that an executable user mapping was modified, which is needed
55  * to properly flush the virtually tagged instruction cache of
56  * those implementations.
57  */
58 #ifndef pte_update
59 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
60 				     unsigned long clr, unsigned long set, int huge)
61 {
62 	pte_basic_t old = pte_val(*p);
63 	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
64 	unsigned long sz;
65 	unsigned long pdsize;
66 	int i;
67 
68 	if (new == old)
69 		return old;
70 
71 	if (huge)
72 		sz = pte_huge_size(__pte(old));
73 	else
74 		sz = PAGE_SIZE;
75 
76 	if (sz < PMD_SIZE)
77 		pdsize = PAGE_SIZE;
78 	else if (sz < PUD_SIZE)
79 		pdsize = PMD_SIZE;
80 	else if (sz < P4D_SIZE)
81 		pdsize = PUD_SIZE;
82 	else if (sz < PGDIR_SIZE)
83 		pdsize = P4D_SIZE;
84 	else
85 		pdsize = PGDIR_SIZE;
86 
87 	for (i = 0; i < sz / pdsize; i++, p++) {
88 		*p = __pte(new);
89 		if (new)
90 			new += (unsigned long long)(pdsize / PAGE_SIZE) << PTE_RPN_SHIFT;
91 	}
92 
93 	if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
94 		icache_44x_need_flush = 1;
95 
96 	/* huge pages use the old page table lock */
97 	if (!huge)
98 		assert_pte_locked(mm, addr);
99 
100 	return old;
101 }
102 #endif
103 
104 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
105 					    unsigned long addr, pte_t *ptep)
106 {
107 	unsigned long old;
108 
109 	old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
110 
111 	return (old & _PAGE_ACCESSED) != 0;
112 }
113 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
114 
115 #ifndef ptep_set_wrprotect
116 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
117 				      pte_t *ptep)
118 {
119 	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
120 }
121 #endif
122 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
123 
124 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
125 				       pte_t *ptep)
126 {
127 	pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
128 
129 	page_table_check_pte_clear(mm, addr, old_pte);
130 
131 	return old_pte;
132 }
133 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
134 
135 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
136 {
137 	pte_update(mm, addr, ptep, ~0UL, 0, 0);
138 }
139 
140 /* Set the dirty and/or accessed bits atomically in a linux PTE */
141 #ifndef __ptep_set_access_flags
142 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
143 					   pte_t *ptep, pte_t entry,
144 					   unsigned long address,
145 					   int psize)
146 {
147 	unsigned long set = pte_val(entry) &
148 			    (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
149 	int huge = psize > mmu_virtual_psize ? 1 : 0;
150 
151 	pte_update(vma->vm_mm, address, ptep, 0, set, huge);
152 
153 	flush_tlb_page(vma, address);
154 }
155 #endif
156 
157 /* Generic accessors to PTE bits */
158 #ifndef pte_mkwrite_novma
159 static inline pte_t pte_mkwrite_novma(pte_t pte)
160 {
161 	/*
162 	 * write implies read, hence set both
163 	 */
164 	return __pte(pte_val(pte) | _PAGE_RW);
165 }
166 #endif
167 
168 static inline pte_t pte_mkdirty(pte_t pte)
169 {
170 	return __pte(pte_val(pte) | _PAGE_DIRTY);
171 }
172 
173 static inline pte_t pte_mkyoung(pte_t pte)
174 {
175 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
176 }
177 
178 #ifndef pte_wrprotect
179 static inline pte_t pte_wrprotect(pte_t pte)
180 {
181 	return __pte(pte_val(pte) & ~_PAGE_WRITE);
182 }
183 #endif
184 
185 #ifndef pte_mkexec
186 static inline pte_t pte_mkexec(pte_t pte)
187 {
188 	return __pte(pte_val(pte) | _PAGE_EXEC);
189 }
190 #endif
191 
192 #ifndef pte_write
193 static inline int pte_write(pte_t pte)
194 {
195 	return pte_val(pte) & _PAGE_WRITE;
196 }
197 #endif
198 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
199 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
200 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
201 static inline bool pte_hashpte(pte_t pte)	{ return false; }
202 static inline bool pte_ci(pte_t pte)		{ return pte_val(pte) & _PAGE_NO_CACHE; }
203 static inline bool pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }
204 
205 static inline int pte_present(pte_t pte)
206 {
207 	return pte_val(pte) & _PAGE_PRESENT;
208 }
209 
210 static inline bool pte_hw_valid(pte_t pte)
211 {
212 	return pte_val(pte) & _PAGE_PRESENT;
213 }
214 
215 static inline int pte_young(pte_t pte)
216 {
217 	return pte_val(pte) & _PAGE_ACCESSED;
218 }
219 
220 /*
221  * Don't just check for any non zero bits in __PAGE_READ, since for book3e
222  * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
223  * _PAGE_READ.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
224  */
225 #ifndef pte_read
226 static inline bool pte_read(pte_t pte)
227 {
228 	return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
229 }
230 #endif
231 
232 /*
233  * We only find page table entry in the last level
234  * Hence no need for other accessors
235  */
236 #define pte_access_permitted pte_access_permitted
237 static inline bool pte_access_permitted(pte_t pte, bool write)
238 {
239 	/*
240 	 * A read-only access is controlled by _PAGE_READ bit.
241 	 * We have _PAGE_READ set for WRITE
242 	 */
243 	if (!pte_present(pte) || !pte_read(pte))
244 		return false;
245 
246 	if (write && !pte_write(pte))
247 		return false;
248 
249 	return true;
250 }
251 
252 static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
253 {
254 	return pte_present(pte) && !is_kernel_addr(addr);
255 }
256 
257 /* Conversion functions: convert a page and protection to a page entry,
258  * and a page entry and page directory to the page they refer to.
259  *
260  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
261  * long for now.
262  */
263 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
264 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
265 		     pgprot_val(pgprot)); }
266 
267 /* Generic modifiers for PTE bits */
268 static inline pte_t pte_exprotect(pte_t pte)
269 {
270 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
271 }
272 
273 static inline pte_t pte_mkclean(pte_t pte)
274 {
275 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
276 }
277 
278 static inline pte_t pte_mkold(pte_t pte)
279 {
280 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
281 }
282 
283 static inline pte_t pte_mkspecial(pte_t pte)
284 {
285 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
286 }
287 
288 #ifndef pte_mkhuge
289 static inline pte_t pte_mkhuge(pte_t pte)
290 {
291 	return __pte(pte_val(pte));
292 }
293 #endif
294 
295 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
296 {
297 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
298 }
299 
300 static inline bool pte_swp_exclusive(pte_t pte)
301 {
302 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
303 }
304 
305 static inline pte_t pte_swp_mkexclusive(pte_t pte)
306 {
307 	return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
308 }
309 
310 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
311 {
312 	return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
313 }
314 
315 /* This low level function performs the actual PTE insertion
316  * Setting the PTE depends on the MMU type and other factors. It's
317  * an horrible mess that I'm not going to try to clean up now but
318  * I'm keeping it in one place rather than spread around
319  */
320 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
321 				pte_t *ptep, pte_t pte, int percpu)
322 {
323 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
324 	 * can just store as long as we do the two halves in the right order
325 	 * with a barrier in between.
326 	 * In the percpu case, we also fallback to the simple update
327 	 */
328 	if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
329 		__asm__ __volatile__("\
330 			stw%X0 %2,%0\n\
331 			mbar\n\
332 			stw%X1 %L2,%1"
333 		: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
334 		: "r" (pte) : "memory");
335 		return;
336 	}
337 	/* Anything else just stores the PTE normally. That covers all 64-bit
338 	 * cases, and 32-bit non-hash with 32-bit PTEs.
339 	 */
340 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
341 	ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte);
342 #else
343 	*ptep = pte;
344 #endif
345 
346 	/*
347 	 * With hardware tablewalk, a sync is needed to ensure that
348 	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
349 	 * mappings, we can't tolerate spurious faults, so make sure
350 	 * the new PTE will be seen the first time.
351 	 */
352 	if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
353 		mb();
354 }
355 
356 /*
357  * Macro to mark a page protection value as "uncacheable".
358  */
359 
360 #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
361 			 _PAGE_WRITETHRU)
362 
363 #define pgprot_noncached(prot)	  (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
364 				            _PAGE_NO_CACHE | _PAGE_GUARDED))
365 
366 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
367 				            _PAGE_NO_CACHE))
368 
369 #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
370 				            _PAGE_COHERENT))
371 
372 #if _PAGE_WRITETHRU != 0
373 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
374 				            _PAGE_COHERENT | _PAGE_WRITETHRU))
375 #else
376 #define pgprot_cached_wthru(prot)	pgprot_noncached(prot)
377 #endif
378 
379 #define pgprot_cached_noncoherent(prot) \
380 		(__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
381 
382 #define pgprot_writecombine pgprot_noncached_wc
383 
384 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
385 void unmap_kernel_page(unsigned long va);
386 
387 #endif /* __ASSEMBLER__ */
388 #endif
389