xref: /linux/arch/um/include/asm/pgtable-3level.h (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /*
2  * Copyright 2003 PathScale Inc
3  * Derived from include/asm-i386/pgtable.h
4  * Licensed under the GPL
5  */
6 
7 #ifndef __UM_PGTABLE_3LEVEL_H
8 #define __UM_PGTABLE_3LEVEL_H
9 
10 #include <asm-generic/pgtable-nopud.h>
11 
12 /* PGDIR_SHIFT determines what a third-level page table entry can map */
13 
14 #ifdef CONFIG_64BIT
15 #define PGDIR_SHIFT	30
16 #else
17 #define PGDIR_SHIFT	31
18 #endif
19 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
20 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
21 
22 /* PMD_SHIFT determines the size of the area a second-level page table can
23  * map
24  */
25 
26 #define PMD_SHIFT	21
27 #define PMD_SIZE	(1UL << PMD_SHIFT)
28 #define PMD_MASK	(~(PMD_SIZE-1))
29 
30 /*
31  * entries per page directory level
32  */
33 
34 #define PTRS_PER_PTE 512
35 #ifdef CONFIG_64BIT
36 #define PTRS_PER_PMD 512
37 #define PTRS_PER_PGD 512
38 #else
39 #define PTRS_PER_PMD 1024
40 #define PTRS_PER_PGD 1024
41 #endif
42 
43 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
44 #define FIRST_USER_ADDRESS	0UL
45 
46 #define pte_ERROR(e) \
47         printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
48 	       pte_val(e))
49 #define pmd_ERROR(e) \
50         printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
51 	       pmd_val(e))
52 #define pgd_ERROR(e) \
53         printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
54 	       pgd_val(e))
55 
56 #define pud_none(x)	(!(pud_val(x) & ~_PAGE_NEWPAGE))
57 #define	pud_bad(x)	((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
58 #define pud_present(x)	(pud_val(x) & _PAGE_PRESENT)
59 #define pud_populate(mm, pud, pmd) \
60 	set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
61 
62 #ifdef CONFIG_64BIT
63 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
64 #else
65 #define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
66 #endif
67 
68 static inline int pgd_newpage(pgd_t pgd)
69 {
70 	return(pgd_val(pgd) & _PAGE_NEWPAGE);
71 }
72 
73 static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
74 
75 #ifdef CONFIG_64BIT
76 #define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
77 #else
78 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
79 #endif
80 
81 struct mm_struct;
82 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
83 
84 static inline void pud_clear (pud_t *pud)
85 {
86 	set_pud(pud, __pud(_PAGE_NEWPAGE));
87 }
88 
89 #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
90 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
91 
92 /* Find an entry in the second-level page table.. */
93 #define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
94 			pmd_index(address))
95 
96 static inline unsigned long pte_pfn(pte_t pte)
97 {
98 	return phys_to_pfn(pte_val(pte));
99 }
100 
101 static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
102 {
103 	pte_t pte;
104 	phys_t phys = pfn_to_phys(page_nr);
105 
106 	pte_set_val(pte, phys, pgprot);
107 	return pte;
108 }
109 
110 static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
111 {
112 	return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
113 }
114 
115 #endif
116 
117