xref: /linux/arch/mips/include/asm/pgtable-64.h (revision 2697b79a469b68e3ad3640f55284359c1396278d)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_64_H
10 #define _ASM_PGTABLE_64_H
11 
12 #include <linux/compiler.h>
13 #include <linux/linkage.h>
14 
15 #include <asm/addrspace.h>
16 #include <asm/page.h>
17 #include <asm/cachectl.h>
18 #include <asm/fixmap.h>
19 
20 #if CONFIG_PGTABLE_LEVELS == 2
21 #include <asm-generic/pgtable-nopmd.h>
22 #elif CONFIG_PGTABLE_LEVELS == 3
23 #include <asm-generic/pgtable-nopud.h>
24 #else
25 #include <asm-generic/pgtable-nop4d.h>
26 #endif
27 
28 /*
29  * Each address space has 2 4K pages as its page directory, giving 1024
30  * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
31  * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
32  * tables. Each page table is also a single 4K page, giving 512 (==
33  * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
34  * invalid_pmd_table, each pmd entry is initialized to point to
35  * invalid_pte_table, each pte is initialized to 0.
36  *
37  * Kernel mappings: kernel mappings are held in the swapper_pg_table.
38  * The layout is identical to userspace except it's indexed with the
39  * fault address - VMALLOC_START.
40  */
41 
42 
43 /* PGDIR_SHIFT determines what a third-level page table entry can map */
44 #ifdef __PAGETABLE_PMD_FOLDED
45 #define PGDIR_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
46 #else
47 
48 /* PMD_SHIFT determines the size of the area a second-level page table can map */
49 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
50 #define PMD_SIZE	(1UL << PMD_SHIFT)
51 #define PMD_MASK	(~(PMD_SIZE-1))
52 
53 # ifdef __PAGETABLE_PUD_FOLDED
54 # define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
55 # endif
56 #endif
57 
58 #ifndef __PAGETABLE_PUD_FOLDED
59 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
60 #define PUD_SIZE	(1UL << PUD_SHIFT)
61 #define PUD_MASK	(~(PUD_SIZE-1))
62 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT + PUD_TABLE_ORDER - 3))
63 #endif
64 
65 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
66 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
67 
68 /*
69  * For 4kB page size we use a 3 level page tree and an 8kB pud, which
70  * permits us mapping 40 bits of virtual address space.
71  *
72  * We used to implement 41 bits by having an order 1 pmd level but that seemed
73  * rather pointless.
74  *
75  * For 8kB page size we use a 3 level page tree which permits a total of
76  * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
77  * two levels would be easy to implement.
78  *
79  * For 16kB page size we use a 2 level page tree which permits a total of
80  * 36 bits of virtual address space.  We could add a third level but it seems
81  * like at the moment there's no need for this.
82  *
83  * For 64kB page size we use a 2 level page table tree for a total of 42 bits
84  * of virtual address space.
85  */
86 #ifdef CONFIG_PAGE_SIZE_4KB
87 # ifdef CONFIG_MIPS_VA_BITS_48
88 #  define PGD_TABLE_ORDER	0
89 #  define PUD_TABLE_ORDER	0
90 # else
91 #  define PGD_TABLE_ORDER	1
92 #  define PUD_TABLE_ORDER	aieeee_attempt_to_allocate_pud
93 # endif
94 #define PMD_TABLE_ORDER		0
95 #endif
96 #ifdef CONFIG_PAGE_SIZE_8KB
97 #define PGD_TABLE_ORDER		0
98 #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
99 #define PMD_TABLE_ORDER		0
100 #endif
101 #ifdef CONFIG_PAGE_SIZE_16KB
102 #ifdef CONFIG_MIPS_VA_BITS_48
103 #define PGD_TABLE_ORDER		1
104 #else
105 #define PGD_TABLE_ORDER		0
106 #endif
107 #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
108 #define PMD_TABLE_ORDER		0
109 #endif
110 #ifdef CONFIG_PAGE_SIZE_32KB
111 #define PGD_TABLE_ORDER		0
112 #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
113 #define PMD_TABLE_ORDER		0
114 #endif
115 #ifdef CONFIG_PAGE_SIZE_64KB
116 #define PGD_TABLE_ORDER		0
117 #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
118 #ifdef CONFIG_MIPS_VA_BITS_48
119 #define PMD_TABLE_ORDER		0
120 #else
121 #define PMD_TABLE_ORDER		aieeee_attempt_to_allocate_pmd
122 #endif
123 #endif
124 
125 #define PTRS_PER_PGD	((PAGE_SIZE << PGD_TABLE_ORDER) / sizeof(pgd_t))
126 #ifndef __PAGETABLE_PUD_FOLDED
127 #define PTRS_PER_PUD	((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t))
128 #endif
129 #ifndef __PAGETABLE_PMD_FOLDED
130 #define PTRS_PER_PMD	((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t))
131 #endif
132 #define PTRS_PER_PTE	(PAGE_SIZE / sizeof(pte_t))
133 
134 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
135 
136 /*
137  * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
138  * the first couple of pages so NULL pointer dereferences will still
139  * reliably trap.
140  */
141 #define VMALLOC_START		(MAP_BASE + (2 * PAGE_SIZE))
142 #define VMALLOC_END	\
143 	(MAP_BASE + \
144 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
145 	     (1UL << cpu_vmbits)) - (1UL << 32))
146 
147 #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
148 	VMALLOC_START != CKSSEG
149 /* Load modules into 32bit-compatible segment. */
150 #define MODULES_VADDR	CKSSEG
151 #define MODULES_END	(FIXADDR_START-2*PAGE_SIZE)
152 #endif
153 
154 #define pte_ERROR(e) \
155 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
156 #ifndef __PAGETABLE_PMD_FOLDED
157 #define pmd_ERROR(e) \
158 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
159 #endif
160 #ifndef __PAGETABLE_PUD_FOLDED
161 #define pud_ERROR(e) \
162 	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
163 #endif
164 #define pgd_ERROR(e) \
165 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
166 
167 extern pte_t invalid_pte_table[PTRS_PER_PTE];
168 
169 #ifndef __PAGETABLE_PUD_FOLDED
170 /*
171  * For 4-level pagetables we defines these ourselves, for 3-level the
172  * definitions are below, for 2-level the
173  * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
174  */
175 typedef struct { unsigned long pud; } pud_t;
176 #define pud_val(x)	((x).pud)
177 #define __pud(x)	((pud_t) { (x) })
178 
179 extern pud_t invalid_pud_table[PTRS_PER_PUD];
180 
181 /*
182  * Empty pgd entries point to the invalid_pud_table.
183  */
184 static inline int p4d_none(p4d_t p4d)
185 {
186 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
187 }
188 
189 static inline int p4d_bad(p4d_t p4d)
190 {
191 	if (unlikely(p4d_val(p4d) & ~PAGE_MASK))
192 		return 1;
193 
194 	return 0;
195 }
196 
197 static inline int p4d_present(p4d_t p4d)
198 {
199 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
200 }
201 
202 static inline void p4d_clear(p4d_t *p4dp)
203 {
204 	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
205 }
206 
207 static inline pud_t *p4d_pgtable(p4d_t p4d)
208 {
209 	return (pud_t *)p4d_val(p4d);
210 }
211 
212 #define p4d_phys(p4d)		virt_to_phys((void *)p4d_val(p4d))
213 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
214 
215 #define p4d_index(address)	(((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
216 
217 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
218 {
219 	*p4d = p4dval;
220 }
221 
222 #endif
223 
224 #ifndef __PAGETABLE_PMD_FOLDED
225 /*
226  * For 3-level pagetables we defines these ourselves, for 2-level the
227  * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
228  */
229 typedef struct { unsigned long pmd; } pmd_t;
230 #define pmd_val(x)	((x).pmd)
231 #define __pmd(x)	((pmd_t) { (x) } )
232 
233 
234 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
235 #endif
236 
237 /*
238  * Empty pgd/pmd entries point to the invalid_pte_table.
239  */
240 static inline int pmd_none(pmd_t pmd)
241 {
242 	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
243 }
244 
245 static inline int pmd_bad(pmd_t pmd)
246 {
247 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
248 	/* pmd_leaf(pmd) but inline */
249 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
250 		return 0;
251 #endif
252 
253 	if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
254 		return 1;
255 
256 	return 0;
257 }
258 
259 static inline int pmd_present(pmd_t pmd)
260 {
261 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
262 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
263 		return pmd_val(pmd) & _PAGE_PRESENT;
264 #endif
265 
266 	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
267 }
268 
269 static inline void pmd_clear(pmd_t *pmdp)
270 {
271 	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
272 }
273 #ifndef __PAGETABLE_PMD_FOLDED
274 
275 /*
276  * Empty pud entries point to the invalid_pmd_table.
277  */
278 static inline int pud_none(pud_t pud)
279 {
280 	return pud_val(pud) == (unsigned long) invalid_pmd_table;
281 }
282 
283 static inline int pud_bad(pud_t pud)
284 {
285 	return pud_val(pud) & ~PAGE_MASK;
286 }
287 
288 static inline int pud_present(pud_t pud)
289 {
290 	return pud_val(pud) != (unsigned long) invalid_pmd_table;
291 }
292 
293 static inline void pud_clear(pud_t *pudp)
294 {
295 	pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
296 }
297 #endif
298 
299 #define pte_page(x)		pfn_to_page(pte_pfn(x))
300 
301 #define pte_pfn(x)		((unsigned long)((x).pte >> PFN_PTE_SHIFT))
302 #define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
303 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
304 
305 #ifndef __PAGETABLE_PMD_FOLDED
306 static inline pmd_t *pud_pgtable(pud_t pud)
307 {
308 	return (pmd_t *)pud_val(pud);
309 }
310 #define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
311 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
312 
313 #endif
314 
315 /*
316  * Initialize a new pgd / pud / pmd table with invalid pointers.
317  */
318 extern void pgd_init(void *addr);
319 extern void pud_init(void *addr);
320 extern void pmd_init(void *addr);
321 
322 /*
323  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
324  * are !pte_none() && !pte_present().
325  *
326  * Format of swap PTEs:
327  *
328  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
329  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
330  *   <--------------------------- offset ---------------------------
331  *
332  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
333  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
334  *   --------------> E <-- type ---> <---------- zeroes ----------->
335  *
336  *  E is the exclusive marker that is not stored in swap entries.
337  */
338 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
339 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
340 
341 #define __swp_type(x)		(((x).val >> 16) & 0x7f)
342 #define __swp_offset(x)		((x).val >> 24)
343 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
344 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
345 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
346 
347 /* We borrow bit 23 to store the exclusive marker in swap PTEs. */
348 #define _PAGE_SWP_EXCLUSIVE	(1 << 23)
349 
350 #endif /* _ASM_PGTABLE_64_H */
351