xref: /linux/arch/arm64/include/asm/pgtable.h (revision bef4d2037d2143a4df6430bbe1e970fc7e616f6c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
7 
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
10 
11 #include <asm/memory.h>
12 #include <asm/pgtable-hwdef.h>
13 #include <asm/pgtable-prot.h>
14 #include <asm/tlbflush.h>
15 
16 /*
17  * VMALLOC range.
18  *
19  * VMALLOC_START: beginning of the kernel vmalloc space
20  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
21  *	and fixed mappings
22  */
23 #define VMALLOC_START		(MODULES_END)
24 #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
25 
26 #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
27 
28 #define FIRST_USER_ADDRESS	0UL
29 
30 #ifndef __ASSEMBLY__
31 
32 #include <asm/cmpxchg.h>
33 #include <asm/fixmap.h>
34 #include <linux/mmdebug.h>
35 #include <linux/mm_types.h>
36 #include <linux/sched.h>
37 
38 extern void __pte_error(const char *file, int line, unsigned long val);
39 extern void __pmd_error(const char *file, int line, unsigned long val);
40 extern void __pud_error(const char *file, int line, unsigned long val);
41 extern void __pgd_error(const char *file, int line, unsigned long val);
42 
43 /*
44  * ZERO_PAGE is a global shared page that is always zero: used
45  * for zero-mapped memory areas etc..
46  */
47 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
48 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
49 
50 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
51 
52 /*
53  * Macros to convert between a physical address and its placement in a
54  * page table entry, taking care of 52-bit addresses.
55  */
56 #ifdef CONFIG_ARM64_PA_BITS_52
57 #define __pte_to_phys(pte)	\
58 	((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
59 #define __phys_to_pte_val(phys)	(((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
60 #else
61 #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
62 #define __phys_to_pte_val(phys)	(phys)
63 #endif
64 
65 #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
66 #define pfn_pte(pfn,prot)	\
67 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
68 
69 #define pte_none(pte)		(!pte_val(pte))
70 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
71 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
72 
73 /*
74  * The following only work if pte_present(). Undefined behaviour otherwise.
75  */
76 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
77 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
78 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
79 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
80 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
81 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
82 #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
83 
84 #define pte_cont_addr_end(addr, end)						\
85 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
86 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
87 })
88 
89 #define pmd_cont_addr_end(addr, end)						\
90 ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
91 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
92 })
93 
94 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
95 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
96 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
97 
98 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
99 /*
100  * Execute-only user mappings do not have the PTE_USER bit set. All valid
101  * kernel mappings have the PTE_UXN bit set.
102  */
103 #define pte_valid_not_user(pte) \
104 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
105 #define pte_valid_young(pte) \
106 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
107 #define pte_valid_user(pte) \
108 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
109 
110 /*
111  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
112  * so that we don't erroneously return false for pages that have been
113  * remapped as PROT_NONE but are yet to be flushed from the TLB.
114  */
115 #define pte_accessible(mm, pte)	\
116 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
117 
118 /*
119  * p??_access_permitted() is true for valid user mappings (subject to the
120  * write permission check) other than user execute-only which do not have the
121  * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
122  */
123 #define pte_access_permitted(pte, write) \
124 	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
125 #define pmd_access_permitted(pmd, write) \
126 	(pte_access_permitted(pmd_pte(pmd), (write)))
127 #define pud_access_permitted(pud, write) \
128 	(pte_access_permitted(pud_pte(pud), (write)))
129 
130 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
131 {
132 	pte_val(pte) &= ~pgprot_val(prot);
133 	return pte;
134 }
135 
136 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
137 {
138 	pte_val(pte) |= pgprot_val(prot);
139 	return pte;
140 }
141 
142 static inline pte_t pte_wrprotect(pte_t pte)
143 {
144 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
145 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
146 	return pte;
147 }
148 
149 static inline pte_t pte_mkwrite(pte_t pte)
150 {
151 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
152 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
153 	return pte;
154 }
155 
156 static inline pte_t pte_mkclean(pte_t pte)
157 {
158 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
159 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
160 
161 	return pte;
162 }
163 
164 static inline pte_t pte_mkdirty(pte_t pte)
165 {
166 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
167 
168 	if (pte_write(pte))
169 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
170 
171 	return pte;
172 }
173 
174 static inline pte_t pte_mkold(pte_t pte)
175 {
176 	return clear_pte_bit(pte, __pgprot(PTE_AF));
177 }
178 
179 static inline pte_t pte_mkyoung(pte_t pte)
180 {
181 	return set_pte_bit(pte, __pgprot(PTE_AF));
182 }
183 
184 static inline pte_t pte_mkspecial(pte_t pte)
185 {
186 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
187 }
188 
189 static inline pte_t pte_mkcont(pte_t pte)
190 {
191 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
192 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
193 }
194 
195 static inline pte_t pte_mknoncont(pte_t pte)
196 {
197 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
198 }
199 
200 static inline pte_t pte_mkpresent(pte_t pte)
201 {
202 	return set_pte_bit(pte, __pgprot(PTE_VALID));
203 }
204 
205 static inline pmd_t pmd_mkcont(pmd_t pmd)
206 {
207 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
208 }
209 
210 static inline pte_t pte_mkdevmap(pte_t pte)
211 {
212 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
213 }
214 
215 static inline void set_pte(pte_t *ptep, pte_t pte)
216 {
217 	WRITE_ONCE(*ptep, pte);
218 
219 	/*
220 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
221 	 * or update_mmu_cache() have the necessary barriers.
222 	 */
223 	if (pte_valid_not_user(pte))
224 		dsb(ishst);
225 }
226 
227 extern void __sync_icache_dcache(pte_t pteval);
228 
229 /*
230  * PTE bits configuration in the presence of hardware Dirty Bit Management
231  * (PTE_WRITE == PTE_DBM):
232  *
233  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
234  *   0      0      |   1           0          0
235  *   0      1      |   1           1          0
236  *   1      0      |   1           0          1
237  *   1      1      |   0           1          x
238  *
239  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
240  * the page fault mechanism. Checking the dirty status of a pte becomes:
241  *
242  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
243  */
244 
245 static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
246 					   pte_t pte)
247 {
248 	pte_t old_pte;
249 
250 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
251 		return;
252 
253 	old_pte = READ_ONCE(*ptep);
254 
255 	if (!pte_valid(old_pte) || !pte_valid(pte))
256 		return;
257 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
258 		return;
259 
260 	/*
261 	 * Check for potential race with hardware updates of the pte
262 	 * (ptep_set_access_flags safely changes valid ptes without going
263 	 * through an invalid entry).
264 	 */
265 	VM_WARN_ONCE(!pte_young(pte),
266 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
267 		     __func__, pte_val(old_pte), pte_val(pte));
268 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
269 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
270 		     __func__, pte_val(old_pte), pte_val(pte));
271 }
272 
273 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
274 			      pte_t *ptep, pte_t pte)
275 {
276 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
277 		__sync_icache_dcache(pte);
278 
279 	__check_racy_pte_update(mm, ptep, pte);
280 
281 	set_pte(ptep, pte);
282 }
283 
284 #define __HAVE_ARCH_PTE_SAME
285 static inline int pte_same(pte_t pte_a, pte_t pte_b)
286 {
287 	pteval_t lhs, rhs;
288 
289 	lhs = pte_val(pte_a);
290 	rhs = pte_val(pte_b);
291 
292 	if (pte_present(pte_a))
293 		lhs &= ~PTE_RDONLY;
294 
295 	if (pte_present(pte_b))
296 		rhs &= ~PTE_RDONLY;
297 
298 	return (lhs == rhs);
299 }
300 
301 /*
302  * Huge pte definitions.
303  */
304 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
305 
306 /*
307  * Hugetlb definitions.
308  */
309 #define HUGE_MAX_HSTATE		4
310 #define HPAGE_SHIFT		PMD_SHIFT
311 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
312 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
313 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
314 
315 static inline pte_t pgd_pte(pgd_t pgd)
316 {
317 	return __pte(pgd_val(pgd));
318 }
319 
320 static inline pte_t pud_pte(pud_t pud)
321 {
322 	return __pte(pud_val(pud));
323 }
324 
325 static inline pud_t pte_pud(pte_t pte)
326 {
327 	return __pud(pte_val(pte));
328 }
329 
330 static inline pmd_t pud_pmd(pud_t pud)
331 {
332 	return __pmd(pud_val(pud));
333 }
334 
335 static inline pte_t pmd_pte(pmd_t pmd)
336 {
337 	return __pte(pmd_val(pmd));
338 }
339 
340 static inline pmd_t pte_pmd(pte_t pte)
341 {
342 	return __pmd(pte_val(pte));
343 }
344 
345 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
346 {
347 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
348 }
349 
350 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
351 {
352 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
353 }
354 
355 #ifdef CONFIG_NUMA_BALANCING
356 /*
357  * See the comment in include/asm-generic/pgtable.h
358  */
359 static inline int pte_protnone(pte_t pte)
360 {
361 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
362 }
363 
364 static inline int pmd_protnone(pmd_t pmd)
365 {
366 	return pte_protnone(pmd_pte(pmd));
367 }
368 #endif
369 
370 /*
371  * THP definitions.
372  */
373 
374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
375 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
376 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
377 
378 #define pmd_present(pmd)	pte_present(pmd_pte(pmd))
379 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
380 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
381 #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
382 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
383 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
384 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
385 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
386 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
387 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
388 #define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
389 
390 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
391 
392 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
393 
394 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
395 
396 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
397 #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
398 #endif
399 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
400 {
401 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
402 }
403 
404 #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
405 #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
406 #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
407 #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
408 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
409 
410 #define pud_young(pud)		pte_young(pud_pte(pud))
411 #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
412 #define pud_write(pud)		pte_write(pud_pte(pud))
413 
414 #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
415 
416 #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
417 #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
418 #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
419 #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
420 
421 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
422 
423 #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
424 #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
425 
426 #define __pgprot_modify(prot,mask,bits) \
427 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
428 
429 /*
430  * Mark the prot value as uncacheable and unbufferable.
431  */
432 #define pgprot_noncached(prot) \
433 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
434 #define pgprot_writecombine(prot) \
435 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
436 #define pgprot_device(prot) \
437 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
438 /*
439  * DMA allocations for non-coherent devices use what the Arm architecture calls
440  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
441  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
442  * is intended for MMIO and thus forbids speculation, preserves access size,
443  * requires strict alignment and can also force write responses to come from the
444  * endpoint.
445  */
446 #define pgprot_dmacoherent(prot) \
447 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
448 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
449 
450 #define __HAVE_PHYS_MEM_ACCESS_PROT
451 struct file;
452 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
453 				     unsigned long size, pgprot_t vma_prot);
454 
455 #define pmd_none(pmd)		(!pmd_val(pmd))
456 
457 #define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
458 
459 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
460 				 PMD_TYPE_TABLE)
461 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
462 				 PMD_TYPE_SECT)
463 
464 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
465 static inline bool pud_sect(pud_t pud) { return false; }
466 static inline bool pud_table(pud_t pud) { return true; }
467 #else
468 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
469 				 PUD_TYPE_SECT)
470 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
471 				 PUD_TYPE_TABLE)
472 #endif
473 
474 extern pgd_t init_pg_dir[PTRS_PER_PGD];
475 extern pgd_t init_pg_end[];
476 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
477 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
478 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
479 
480 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
481 
482 static inline bool in_swapper_pgdir(void *addr)
483 {
484 	return ((unsigned long)addr & PAGE_MASK) ==
485 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
486 }
487 
488 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
489 {
490 #ifdef __PAGETABLE_PMD_FOLDED
491 	if (in_swapper_pgdir(pmdp)) {
492 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
493 		return;
494 	}
495 #endif /* __PAGETABLE_PMD_FOLDED */
496 
497 	WRITE_ONCE(*pmdp, pmd);
498 
499 	if (pmd_valid(pmd))
500 		dsb(ishst);
501 }
502 
503 static inline void pmd_clear(pmd_t *pmdp)
504 {
505 	set_pmd(pmdp, __pmd(0));
506 }
507 
508 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
509 {
510 	return __pmd_to_phys(pmd);
511 }
512 
513 static inline void pte_unmap(pte_t *pte) { }
514 
515 /* Find an entry in the third-level page table. */
516 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
517 
518 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
519 #define pte_offset_kernel(dir,addr)	((pte_t *)__va(pte_offset_phys((dir), (addr))))
520 
521 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
522 
523 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
524 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
525 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
526 
527 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd)))
528 
529 /* use ONLY for statically allocated translation tables */
530 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
531 
532 /*
533  * Conversion functions: convert a page and protection to a page entry,
534  * and a page entry and page directory to the page they refer to.
535  */
536 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
537 
538 #if CONFIG_PGTABLE_LEVELS > 2
539 
540 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
541 
542 #define pud_none(pud)		(!pud_val(pud))
543 #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
544 #define pud_present(pud)	pte_present(pud_pte(pud))
545 #define pud_valid(pud)		pte_valid(pud_pte(pud))
546 
547 static inline void set_pud(pud_t *pudp, pud_t pud)
548 {
549 #ifdef __PAGETABLE_PUD_FOLDED
550 	if (in_swapper_pgdir(pudp)) {
551 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
552 		return;
553 	}
554 #endif /* __PAGETABLE_PUD_FOLDED */
555 
556 	WRITE_ONCE(*pudp, pud);
557 
558 	if (pud_valid(pud))
559 		dsb(ishst);
560 }
561 
562 static inline void pud_clear(pud_t *pudp)
563 {
564 	set_pud(pudp, __pud(0));
565 }
566 
567 static inline phys_addr_t pud_page_paddr(pud_t pud)
568 {
569 	return __pud_to_phys(pud);
570 }
571 
572 /* Find an entry in the second-level page table. */
573 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
574 
575 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
576 #define pmd_offset(dir, addr)		((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
577 
578 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
579 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
580 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
581 
582 #define pud_page(pud)		pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
583 
584 /* use ONLY for statically allocated translation tables */
585 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
586 
587 #else
588 
589 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
590 
591 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
592 #define pmd_set_fixmap(addr)		NULL
593 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
594 #define pmd_clear_fixmap()
595 
596 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
597 
598 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
599 
600 #if CONFIG_PGTABLE_LEVELS > 3
601 
602 #define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
603 
604 #define pgd_none(pgd)		(!pgd_val(pgd))
605 #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
606 #define pgd_present(pgd)	(pgd_val(pgd))
607 
608 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
609 {
610 	if (in_swapper_pgdir(pgdp)) {
611 		set_swapper_pgd(pgdp, pgd);
612 		return;
613 	}
614 
615 	WRITE_ONCE(*pgdp, pgd);
616 	dsb(ishst);
617 }
618 
619 static inline void pgd_clear(pgd_t *pgdp)
620 {
621 	set_pgd(pgdp, __pgd(0));
622 }
623 
624 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
625 {
626 	return __pgd_to_phys(pgd);
627 }
628 
629 /* Find an entry in the frst-level page table. */
630 #define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
631 
632 #define pud_offset_phys(dir, addr)	(pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
633 #define pud_offset(dir, addr)		((pud_t *)__va(pud_offset_phys((dir), (addr))))
634 
635 #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
636 #define pud_set_fixmap_offset(pgd, addr)	pud_set_fixmap(pud_offset_phys(pgd, addr))
637 #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
638 
639 #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
640 
641 /* use ONLY for statically allocated translation tables */
642 #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
643 
644 #else
645 
646 #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
647 
648 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
649 #define pud_set_fixmap(addr)		NULL
650 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
651 #define pud_clear_fixmap()
652 
653 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
654 
655 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
656 
657 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
658 
659 /* to find an entry in a page-table-directory */
660 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
661 
662 #define pgd_offset_raw(pgd, addr)	((pgd) + pgd_index(addr))
663 
664 #define pgd_offset(mm, addr)	(pgd_offset_raw((mm)->pgd, (addr)))
665 
666 /* to find an entry in a kernel page-table-directory */
667 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
668 
669 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
670 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
671 
672 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
673 {
674 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
675 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
676 	/* preserve the hardware dirty information */
677 	if (pte_hw_dirty(pte))
678 		pte = pte_mkdirty(pte);
679 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
680 	return pte;
681 }
682 
683 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
684 {
685 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
686 }
687 
688 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
689 extern int ptep_set_access_flags(struct vm_area_struct *vma,
690 				 unsigned long address, pte_t *ptep,
691 				 pte_t entry, int dirty);
692 
693 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
694 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
695 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
696 					unsigned long address, pmd_t *pmdp,
697 					pmd_t entry, int dirty)
698 {
699 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
700 }
701 
702 static inline int pud_devmap(pud_t pud)
703 {
704 	return 0;
705 }
706 
707 static inline int pgd_devmap(pgd_t pgd)
708 {
709 	return 0;
710 }
711 #endif
712 
713 /*
714  * Atomic pte/pmd modifications.
715  */
716 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
717 static inline int __ptep_test_and_clear_young(pte_t *ptep)
718 {
719 	pte_t old_pte, pte;
720 
721 	pte = READ_ONCE(*ptep);
722 	do {
723 		old_pte = pte;
724 		pte = pte_mkold(pte);
725 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
726 					       pte_val(old_pte), pte_val(pte));
727 	} while (pte_val(pte) != pte_val(old_pte));
728 
729 	return pte_young(pte);
730 }
731 
732 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
733 					    unsigned long address,
734 					    pte_t *ptep)
735 {
736 	return __ptep_test_and_clear_young(ptep);
737 }
738 
739 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
740 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
741 					 unsigned long address, pte_t *ptep)
742 {
743 	int young = ptep_test_and_clear_young(vma, address, ptep);
744 
745 	if (young) {
746 		/*
747 		 * We can elide the trailing DSB here since the worst that can
748 		 * happen is that a CPU continues to use the young entry in its
749 		 * TLB and we mistakenly reclaim the associated page. The
750 		 * window for such an event is bounded by the next
751 		 * context-switch, which provides a DSB to complete the TLB
752 		 * invalidation.
753 		 */
754 		flush_tlb_page_nosync(vma, address);
755 	}
756 
757 	return young;
758 }
759 
760 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
761 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
762 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
763 					    unsigned long address,
764 					    pmd_t *pmdp)
765 {
766 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
767 }
768 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
769 
770 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
771 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
772 				       unsigned long address, pte_t *ptep)
773 {
774 	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
775 }
776 
777 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
778 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
779 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
780 					    unsigned long address, pmd_t *pmdp)
781 {
782 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
783 }
784 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
785 
786 /*
787  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
788  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
789  */
790 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
791 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
792 {
793 	pte_t old_pte, pte;
794 
795 	pte = READ_ONCE(*ptep);
796 	do {
797 		old_pte = pte;
798 		/*
799 		 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
800 		 * clear), set the PTE_DIRTY bit.
801 		 */
802 		if (pte_hw_dirty(pte))
803 			pte = pte_mkdirty(pte);
804 		pte = pte_wrprotect(pte);
805 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
806 					       pte_val(old_pte), pte_val(pte));
807 	} while (pte_val(pte) != pte_val(old_pte));
808 }
809 
810 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
811 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
812 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
813 				      unsigned long address, pmd_t *pmdp)
814 {
815 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
816 }
817 
818 #define pmdp_establish pmdp_establish
819 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
820 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
821 {
822 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
823 }
824 #endif
825 
826 /*
827  * Encode and decode a swap entry:
828  *	bits 0-1:	present (must be zero)
829  *	bits 2-7:	swap type
830  *	bits 8-57:	swap offset
831  *	bit  58:	PTE_PROT_NONE (must be zero)
832  */
833 #define __SWP_TYPE_SHIFT	2
834 #define __SWP_TYPE_BITS		6
835 #define __SWP_OFFSET_BITS	50
836 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
837 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
838 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
839 
840 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
841 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
842 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
843 
844 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
845 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
846 
847 /*
848  * Ensure that there are not more swap files than can be encoded in the kernel
849  * PTEs.
850  */
851 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
852 
853 extern int kern_addr_valid(unsigned long addr);
854 
855 #include <asm-generic/pgtable.h>
856 
857 static inline void pgtable_cache_init(void) { }
858 
859 /*
860  * On AArch64, the cache coherency is handled via the set_pte_at() function.
861  */
862 static inline void update_mmu_cache(struct vm_area_struct *vma,
863 				    unsigned long addr, pte_t *ptep)
864 {
865 	/*
866 	 * We don't do anything here, so there's a very small chance of
867 	 * us retaking a user fault which we just fixed up. The alternative
868 	 * is doing a dsb(ishst), but that penalises the fastpath.
869 	 */
870 }
871 
872 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
873 
874 #define kc_vaddr_to_offset(v)	((v) & ~VA_START)
875 #define kc_offset_to_vaddr(o)	((o) | VA_START)
876 
877 #ifdef CONFIG_ARM64_PA_BITS_52
878 #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
879 #else
880 #define phys_to_ttbr(addr)	(addr)
881 #endif
882 
883 #endif /* !__ASSEMBLY__ */
884 
885 #endif /* __ASM_PGTABLE_H */
886