xref: /linux/arch/arm64/include/asm/pgtable.h (revision 3623d138213aedf0e2c89720f6a3cd0e164cb310)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18 
19 #include <asm/bug.h>
20 #include <asm/proc-fns.h>
21 
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24 
25 /*
26  * Software defined PTE bits definition.
27  */
28 #define PTE_VALID		(_AT(pteval_t, 1) << 0)
29 #define PTE_WRITE		(PTE_DBM)		 /* same as DBM (51) */
30 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
31 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
32 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
33 
34 /*
35  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36  *
37  * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
38  *	(rounded up to PUD_SIZE).
39  * VMALLOC_START: beginning of the kernel VA space
40  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
41  *	fixed mappings and modules
42  */
43 #define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
44 
45 #ifndef CONFIG_KASAN
46 #define VMALLOC_START		(VA_START)
47 #else
48 #include <asm/kasan.h>
49 #define VMALLOC_START		(KASAN_SHADOW_END + SZ_64K)
50 #endif
51 
52 #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
53 
54 #define VMEMMAP_START		(VMALLOC_END + SZ_64K)
55 #define vmemmap			((struct page *)VMEMMAP_START - \
56 				 SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
57 
58 #define FIRST_USER_ADDRESS	0UL
59 
60 #ifndef __ASSEMBLY__
61 
62 #include <linux/mmdebug.h>
63 
64 extern void __pte_error(const char *file, int line, unsigned long val);
65 extern void __pmd_error(const char *file, int line, unsigned long val);
66 extern void __pud_error(const char *file, int line, unsigned long val);
67 extern void __pgd_error(const char *file, int line, unsigned long val);
68 
69 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
70 #define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
71 
72 #define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
73 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
74 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
75 #define PROT_NORMAL_WT		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
76 #define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
77 
78 #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
79 #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
80 #define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
81 
82 #define _PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
83 
84 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
85 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
86 #define PAGE_KERNEL_ROX		__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
87 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
88 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
89 
90 #define PAGE_HYP		__pgprot(_PAGE_DEFAULT | PTE_HYP)
91 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
92 
93 #define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
94 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
95 
96 #define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
97 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
98 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
99 #define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
100 #define PAGE_COPY_EXEC		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
101 #define PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
102 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
103 
104 #define __P000  PAGE_NONE
105 #define __P001  PAGE_READONLY
106 #define __P010  PAGE_COPY
107 #define __P011  PAGE_COPY
108 #define __P100  PAGE_READONLY_EXEC
109 #define __P101  PAGE_READONLY_EXEC
110 #define __P110  PAGE_COPY_EXEC
111 #define __P111  PAGE_COPY_EXEC
112 
113 #define __S000  PAGE_NONE
114 #define __S001  PAGE_READONLY
115 #define __S010  PAGE_SHARED
116 #define __S011  PAGE_SHARED
117 #define __S100  PAGE_READONLY_EXEC
118 #define __S101  PAGE_READONLY_EXEC
119 #define __S110  PAGE_SHARED_EXEC
120 #define __S111  PAGE_SHARED_EXEC
121 
122 /*
123  * ZERO_PAGE is a global shared page that is always zero: used
124  * for zero-mapped memory areas etc..
125  */
126 extern struct page *empty_zero_page;
127 #define ZERO_PAGE(vaddr)	(empty_zero_page)
128 
129 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
130 
131 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
132 
133 #define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
134 
135 #define pte_none(pte)		(!pte_val(pte))
136 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
137 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
138 
139 /* Find an entry in the third-level page table. */
140 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
141 
142 #define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + pte_index(addr))
143 
144 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
145 #define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
146 #define pte_unmap(pte)			do { } while (0)
147 #define pte_unmap_nested(pte)		do { } while (0)
148 
149 /*
150  * The following only work if pte_present(). Undefined behaviour otherwise.
151  */
152 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
153 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
154 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
155 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
156 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
157 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
158 #define pte_user(pte)		(!!(pte_val(pte) & PTE_USER))
159 
160 #ifdef CONFIG_ARM64_HW_AFDBM
161 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
162 #else
163 #define pte_hw_dirty(pte)	(0)
164 #endif
165 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
166 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
167 
168 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
169 #define pte_valid_not_user(pte) \
170 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
171 #define pte_valid_young(pte) \
172 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
173 
174 /*
175  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
176  * so that we don't erroneously return false for pages that have been
177  * remapped as PROT_NONE but are yet to be flushed from the TLB.
178  */
179 #define pte_accessible(mm, pte)	\
180 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
181 
182 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
183 {
184 	pte_val(pte) &= ~pgprot_val(prot);
185 	return pte;
186 }
187 
188 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
189 {
190 	pte_val(pte) |= pgprot_val(prot);
191 	return pte;
192 }
193 
194 static inline pte_t pte_wrprotect(pte_t pte)
195 {
196 	return clear_pte_bit(pte, __pgprot(PTE_WRITE));
197 }
198 
199 static inline pte_t pte_mkwrite(pte_t pte)
200 {
201 	return set_pte_bit(pte, __pgprot(PTE_WRITE));
202 }
203 
204 static inline pte_t pte_mkclean(pte_t pte)
205 {
206 	return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
207 }
208 
209 static inline pte_t pte_mkdirty(pte_t pte)
210 {
211 	return set_pte_bit(pte, __pgprot(PTE_DIRTY));
212 }
213 
214 static inline pte_t pte_mkold(pte_t pte)
215 {
216 	return clear_pte_bit(pte, __pgprot(PTE_AF));
217 }
218 
219 static inline pte_t pte_mkyoung(pte_t pte)
220 {
221 	return set_pte_bit(pte, __pgprot(PTE_AF));
222 }
223 
224 static inline pte_t pte_mkspecial(pte_t pte)
225 {
226 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
227 }
228 
229 static inline pte_t pte_mkcont(pte_t pte)
230 {
231 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
232 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
233 }
234 
235 static inline pte_t pte_mknoncont(pte_t pte)
236 {
237 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
238 }
239 
240 static inline pmd_t pmd_mkcont(pmd_t pmd)
241 {
242 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
243 }
244 
245 static inline void set_pte(pte_t *ptep, pte_t pte)
246 {
247 	*ptep = pte;
248 
249 	/*
250 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
251 	 * or update_mmu_cache() have the necessary barriers.
252 	 */
253 	if (pte_valid_not_user(pte)) {
254 		dsb(ishst);
255 		isb();
256 	}
257 }
258 
259 struct mm_struct;
260 struct vm_area_struct;
261 
262 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
263 
264 /*
265  * PTE bits configuration in the presence of hardware Dirty Bit Management
266  * (PTE_WRITE == PTE_DBM):
267  *
268  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
269  *   0      0      |   1           0          0
270  *   0      1      |   1           1          0
271  *   1      0      |   1           0          1
272  *   1      1      |   0           1          x
273  *
274  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
275  * the page fault mechanism. Checking the dirty status of a pte becomes:
276  *
277  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
278  */
279 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
280 			      pte_t *ptep, pte_t pte)
281 {
282 	if (pte_valid(pte)) {
283 		if (pte_sw_dirty(pte) && pte_write(pte))
284 			pte_val(pte) &= ~PTE_RDONLY;
285 		else
286 			pte_val(pte) |= PTE_RDONLY;
287 		if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
288 			__sync_icache_dcache(pte, addr);
289 	}
290 
291 	/*
292 	 * If the existing pte is valid, check for potential race with
293 	 * hardware updates of the pte (ptep_set_access_flags safely changes
294 	 * valid ptes without going through an invalid entry).
295 	 */
296 	if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
297 	    pte_valid(*ptep) && pte_valid(pte)) {
298 		VM_WARN_ONCE(!pte_young(pte),
299 			     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
300 			     __func__, pte_val(*ptep), pte_val(pte));
301 		VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
302 			     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
303 			     __func__, pte_val(*ptep), pte_val(pte));
304 	}
305 
306 	set_pte(ptep, pte);
307 }
308 
309 /*
310  * Huge pte definitions.
311  */
312 #define pte_huge(pte)		(!(pte_val(pte) & PTE_TABLE_BIT))
313 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
314 
315 /*
316  * Hugetlb definitions.
317  */
318 #define HUGE_MAX_HSTATE		4
319 #define HPAGE_SHIFT		PMD_SHIFT
320 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
321 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
322 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
323 
324 #define __HAVE_ARCH_PTE_SPECIAL
325 
326 static inline pte_t pud_pte(pud_t pud)
327 {
328 	return __pte(pud_val(pud));
329 }
330 
331 static inline pmd_t pud_pmd(pud_t pud)
332 {
333 	return __pmd(pud_val(pud));
334 }
335 
336 static inline pte_t pmd_pte(pmd_t pmd)
337 {
338 	return __pte(pmd_val(pmd));
339 }
340 
341 static inline pmd_t pte_pmd(pte_t pte)
342 {
343 	return __pmd(pte_val(pte));
344 }
345 
346 static inline pgprot_t mk_sect_prot(pgprot_t prot)
347 {
348 	return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
349 }
350 
351 /*
352  * THP definitions.
353  */
354 
355 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
356 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
357 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
358 
359 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
360 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
361 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
362 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
363 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
364 #define pmd_mkclean(pmd)       pte_pmd(pte_mkclean(pmd_pte(pmd)))
365 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
366 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
367 #define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
368 
369 #define __HAVE_ARCH_PMD_WRITE
370 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
371 
372 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
373 
374 #define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
375 #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
376 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
377 
378 #define pud_write(pud)		pte_write(pud_pte(pud))
379 #define pud_pfn(pud)		(((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
380 
381 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
382 
383 static inline int has_transparent_hugepage(void)
384 {
385 	return 1;
386 }
387 
388 #define __pgprot_modify(prot,mask,bits) \
389 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
390 
391 /*
392  * Mark the prot value as uncacheable and unbufferable.
393  */
394 #define pgprot_noncached(prot) \
395 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
396 #define pgprot_writecombine(prot) \
397 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
398 #define pgprot_device(prot) \
399 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
400 #define __HAVE_PHYS_MEM_ACCESS_PROT
401 struct file;
402 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
403 				     unsigned long size, pgprot_t vma_prot);
404 
405 #define pmd_none(pmd)		(!pmd_val(pmd))
406 #define pmd_present(pmd)	(pmd_val(pmd))
407 
408 #define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
409 
410 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
411 				 PMD_TYPE_TABLE)
412 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
413 				 PMD_TYPE_SECT)
414 
415 #ifdef CONFIG_ARM64_64K_PAGES
416 #define pud_sect(pud)		(0)
417 #define pud_table(pud)		(1)
418 #else
419 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
420 				 PUD_TYPE_SECT)
421 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
422 				 PUD_TYPE_TABLE)
423 #endif
424 
425 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
426 {
427 	*pmdp = pmd;
428 	dsb(ishst);
429 	isb();
430 }
431 
432 static inline void pmd_clear(pmd_t *pmdp)
433 {
434 	set_pmd(pmdp, __pmd(0));
435 }
436 
437 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
438 {
439 	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
440 }
441 
442 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
443 
444 /*
445  * Conversion functions: convert a page and protection to a page entry,
446  * and a page entry and page directory to the page they refer to.
447  */
448 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
449 
450 #if CONFIG_PGTABLE_LEVELS > 2
451 
452 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
453 
454 #define pud_none(pud)		(!pud_val(pud))
455 #define pud_bad(pud)		(!(pud_val(pud) & 2))
456 #define pud_present(pud)	(pud_val(pud))
457 
458 static inline void set_pud(pud_t *pudp, pud_t pud)
459 {
460 	*pudp = pud;
461 	dsb(ishst);
462 	isb();
463 }
464 
465 static inline void pud_clear(pud_t *pudp)
466 {
467 	set_pud(pudp, __pud(0));
468 }
469 
470 static inline pmd_t *pud_page_vaddr(pud_t pud)
471 {
472 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
473 }
474 
475 /* Find an entry in the second-level page table. */
476 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
477 
478 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
479 {
480 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
481 }
482 
483 #define pud_page(pud)		pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
484 
485 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
486 
487 #if CONFIG_PGTABLE_LEVELS > 3
488 
489 #define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
490 
491 #define pgd_none(pgd)		(!pgd_val(pgd))
492 #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
493 #define pgd_present(pgd)	(pgd_val(pgd))
494 
495 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
496 {
497 	*pgdp = pgd;
498 	dsb(ishst);
499 }
500 
501 static inline void pgd_clear(pgd_t *pgdp)
502 {
503 	set_pgd(pgdp, __pgd(0));
504 }
505 
506 static inline pud_t *pgd_page_vaddr(pgd_t pgd)
507 {
508 	return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
509 }
510 
511 /* Find an entry in the frst-level page table. */
512 #define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
513 
514 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
515 {
516 	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
517 }
518 
519 #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
520 
521 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
522 
523 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
524 
525 /* to find an entry in a page-table-directory */
526 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
527 
528 #define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
529 
530 /* to find an entry in a kernel page-table-directory */
531 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
532 
533 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
534 {
535 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
536 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
537 	/* preserve the hardware dirty information */
538 	if (pte_hw_dirty(pte))
539 		pte = pte_mkdirty(pte);
540 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
541 	return pte;
542 }
543 
544 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
545 {
546 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
547 }
548 
549 #ifdef CONFIG_ARM64_HW_AFDBM
550 /*
551  * Atomic pte/pmd modifications.
552  */
553 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
554 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
555 					    unsigned long address,
556 					    pte_t *ptep)
557 {
558 	pteval_t pteval;
559 	unsigned int tmp, res;
560 
561 	asm volatile("//	ptep_test_and_clear_young\n"
562 	"	prfm	pstl1strm, %2\n"
563 	"1:	ldxr	%0, %2\n"
564 	"	ubfx	%w3, %w0, %5, #1	// extract PTE_AF (young)\n"
565 	"	and	%0, %0, %4		// clear PTE_AF\n"
566 	"	stxr	%w1, %0, %2\n"
567 	"	cbnz	%w1, 1b\n"
568 	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
569 	: "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
570 
571 	return res;
572 }
573 
574 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
575 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
576 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
577 					    unsigned long address,
578 					    pmd_t *pmdp)
579 {
580 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
581 }
582 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
583 
584 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
585 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
586 				       unsigned long address, pte_t *ptep)
587 {
588 	pteval_t old_pteval;
589 	unsigned int tmp;
590 
591 	asm volatile("//	ptep_get_and_clear\n"
592 	"	prfm	pstl1strm, %2\n"
593 	"1:	ldxr	%0, %2\n"
594 	"	stxr	%w1, xzr, %2\n"
595 	"	cbnz	%w1, 1b\n"
596 	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
597 
598 	return __pte(old_pteval);
599 }
600 
601 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
602 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
603 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
604 				       unsigned long address, pmd_t *pmdp)
605 {
606 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
607 }
608 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
609 
610 /*
611  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
612  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
613  */
614 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
615 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
616 {
617 	pteval_t pteval;
618 	unsigned long tmp;
619 
620 	asm volatile("//	ptep_set_wrprotect\n"
621 	"	prfm	pstl1strm, %2\n"
622 	"1:	ldxr	%0, %2\n"
623 	"	tst	%0, %4			// check for hw dirty (!PTE_RDONLY)\n"
624 	"	csel	%1, %3, xzr, eq		// set PTE_DIRTY|PTE_RDONLY if dirty\n"
625 	"	orr	%0, %0, %1		// if !dirty, PTE_RDONLY is already set\n"
626 	"	and	%0, %0, %5		// clear PTE_WRITE/PTE_DBM\n"
627 	"	stxr	%w1, %0, %2\n"
628 	"	cbnz	%w1, 1b\n"
629 	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
630 	: "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
631 	: "cc");
632 }
633 
634 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
635 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
636 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
637 				      unsigned long address, pmd_t *pmdp)
638 {
639 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
640 }
641 #endif
642 #endif	/* CONFIG_ARM64_HW_AFDBM */
643 
644 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
645 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
646 
647 /*
648  * Encode and decode a swap entry:
649  *	bits 0-1:	present (must be zero)
650  *	bits 2-7:	swap type
651  *	bits 8-57:	swap offset
652  */
653 #define __SWP_TYPE_SHIFT	2
654 #define __SWP_TYPE_BITS		6
655 #define __SWP_OFFSET_BITS	50
656 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
657 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
658 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
659 
660 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
661 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
662 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
663 
664 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
665 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
666 
667 /*
668  * Ensure that there are not more swap files than can be encoded in the kernel
669  * PTEs.
670  */
671 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
672 
673 extern int kern_addr_valid(unsigned long addr);
674 
675 #include <asm-generic/pgtable.h>
676 
677 void pgd_cache_init(void);
678 #define pgtable_cache_init	pgd_cache_init
679 
680 /*
681  * On AArch64, the cache coherency is handled via the set_pte_at() function.
682  */
683 static inline void update_mmu_cache(struct vm_area_struct *vma,
684 				    unsigned long addr, pte_t *ptep)
685 {
686 	/*
687 	 * We don't do anything here, so there's a very small chance of
688 	 * us retaking a user fault which we just fixed up. The alternative
689 	 * is doing a dsb(ishst), but that penalises the fastpath.
690 	 */
691 }
692 
693 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
694 
695 #define kc_vaddr_to_offset(v)	((v) & ~VA_START)
696 #define kc_offset_to_vaddr(o)	((o) | VA_START)
697 
698 #endif /* !__ASSEMBLY__ */
699 
700 #endif /* __ASM_PGTABLE_H */
701