xref: /linux/arch/um/include/asm/pgtable-4level.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright 2003 PathScale Inc
4  * Derived from include/asm-i386/pgtable.h
5  */
6 
7 #ifndef __UM_PGTABLE_4LEVEL_H
8 #define __UM_PGTABLE_4LEVEL_H
9 
10 #include <asm-generic/pgtable-nop4d.h>
11 
12 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
13 
14 #define PGDIR_SHIFT	39
15 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
16 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
17 
18 /* PUD_SHIFT determines the size of the area a third-level page table can
19  * map
20  */
21 
22 #define PUD_SHIFT	30
23 #define PUD_SIZE	(1UL << PUD_SHIFT)
24 #define PUD_MASK	(~(PUD_SIZE-1))
25 
26 /* PMD_SHIFT determines the size of the area a second-level page table can
27  * map
28  */
29 
30 #define PMD_SHIFT	21
31 #define PMD_SIZE	(1UL << PMD_SHIFT)
32 #define PMD_MASK	(~(PMD_SIZE-1))
33 
34 /*
35  * entries per page directory level
36  */
37 
38 #define PTRS_PER_PTE 512
39 #define PTRS_PER_PMD 512
40 #define PTRS_PER_PUD 512
41 #define PTRS_PER_PGD 512
42 
43 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
44 
45 #define pte_ERROR(e) \
46         printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
47 	       pte_val(e))
48 #define pmd_ERROR(e) \
49         printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
50 	       pmd_val(e))
51 #define pud_ERROR(e) \
52         printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), \
53 	       pud_val(e))
54 #define pgd_ERROR(e) \
55         printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
56 	       pgd_val(e))
57 
58 #define pud_none(x)	(!(pud_val(x) & ~_PAGE_NEEDSYNC))
59 #define	pud_bad(x)	((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
60 #define pud_present(x)	(pud_val(x) & _PAGE_PRESENT)
61 #define pud_populate(mm, pud, pmd) \
62 	set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
63 
64 #define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
65 
66 #define p4d_none(x)	(!(p4d_val(x) & ~_PAGE_NEEDSYNC))
67 #define	p4d_bad(x)	((p4d_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
68 #define p4d_present(x)	(p4d_val(x) & _PAGE_PRESENT)
69 #define p4d_populate(mm, p4d, pud) \
70 	set_p4d(p4d, __p4d(_PAGE_TABLE + __pa(pud)))
71 
72 #define set_p4d(p4dptr, p4dval) (*(p4dptr) = (p4dval))
73 
74 
75 static inline int pgd_needsync(pgd_t pgd)
76 {
77 	return pgd_val(pgd) & _PAGE_NEEDSYNC;
78 }
79 
80 static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEEDSYNC; }
81 
82 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
83 
84 static inline void pud_clear (pud_t *pud)
85 {
86 	set_pud(pud, __pud(_PAGE_NEEDSYNC));
87 }
88 
89 static inline void p4d_clear (p4d_t *p4d)
90 {
91 	set_p4d(p4d, __p4d(_PAGE_NEEDSYNC));
92 }
93 
94 #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
95 #define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK))
96 
97 #define p4d_page(p4d) phys_to_page(p4d_val(p4d) & PAGE_MASK)
98 #define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & PAGE_MASK))
99 
100 static inline unsigned long pte_pfn(pte_t pte)
101 {
102 	return phys_to_pfn(pte_val(pte));
103 }
104 
105 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
106 {
107 	pte_t pte;
108 	phys_t phys = pfn_to_phys(page_nr);
109 
110 	pte_set_val(pte, phys, pgprot);
111 	return pte;
112 }
113 
114 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
115 {
116 	return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
117 }
118 
119 #endif
120