xref: /linux/arch/loongarch/include/asm/pgtable.h (revision 9551a26f17d9445eed497bd7c639d48dfc3c0af4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/asm.h>
15 #include <asm/page.h>
16 #include <asm/pgtable-bits.h>
17 
18 #if CONFIG_PGTABLE_LEVELS == 2
19 #include <asm-generic/pgtable-nopmd.h>
20 #elif CONFIG_PGTABLE_LEVELS == 3
21 #include <asm-generic/pgtable-nopud.h>
22 #else
23 #include <asm-generic/pgtable-nop4d.h>
24 #endif
25 
26 #if CONFIG_PGTABLE_LEVELS == 2
27 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
28 #elif CONFIG_PGTABLE_LEVELS == 3
29 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
30 #define PMD_SIZE	(1UL << PMD_SHIFT)
31 #define PMD_MASK	(~(PMD_SIZE-1))
32 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
33 #elif CONFIG_PGTABLE_LEVELS == 4
34 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
35 #define PMD_SIZE	(1UL << PMD_SHIFT)
36 #define PMD_MASK	(~(PMD_SIZE-1))
37 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
38 #define PUD_SIZE	(1UL << PUD_SHIFT)
39 #define PUD_MASK	(~(PUD_SIZE-1))
40 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
41 #endif
42 
43 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
44 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
45 
46 #ifdef CONFIG_32BIT
47 #define VA_BITS		32
48 #else
49 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
50 #endif
51 
52 #define PTRS_PER_PGD	(PAGE_SIZE >> PTRLOG)
53 #if CONFIG_PGTABLE_LEVELS > 3
54 #define PTRS_PER_PUD	(PAGE_SIZE >> PTRLOG)
55 #endif
56 #if CONFIG_PGTABLE_LEVELS > 2
57 #define PTRS_PER_PMD	(PAGE_SIZE >> PTRLOG)
58 #endif
59 #define PTRS_PER_PTE	(PAGE_SIZE >> PTRLOG)
60 
61 #ifdef CONFIG_32BIT
62 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
63 #else
64 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
65 #endif
66 
67 #ifndef __ASSEMBLER__
68 
69 #include <linux/mm_types.h>
70 #include <linux/mmzone.h>
71 #include <asm/fixmap.h>
72 #include <asm/sparsemem.h>
73 
74 struct mm_struct;
75 struct vm_area_struct;
76 
77 /*
78  * ZERO_PAGE is a global shared page that is always zero; used
79  * for zero-mapped memory areas etc..
80  */
81 
82 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
83 
84 #define ZERO_PAGE(vaddr)	virt_to_page(empty_zero_page)
85 
86 #ifdef CONFIG_32BIT
87 
88 #define VMALLOC_START	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
89 #define VMALLOC_END	(FIXADDR_START - (2 * PAGE_SIZE))
90 
91 #endif
92 
93 #ifdef CONFIG_64BIT
94 
95 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
96 #define MODULES_END	(MODULES_VADDR + SZ_256M)
97 
98 #ifdef CONFIG_KFENCE
99 #define KFENCE_AREA_SIZE	(((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
100 #else
101 #define KFENCE_AREA_SIZE	0
102 #endif
103 
104 #define VMALLOC_START	MODULES_END
105 
106 #ifndef CONFIG_KASAN
107 #define VMALLOC_END	\
108 	(vm_map_base +	\
109 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
110 #else
111 #define VMALLOC_END	\
112 	(vm_map_base +	\
113 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
114 #endif
115 
116 #define vmemmap		((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
117 #define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
118 
119 #define KFENCE_AREA_START	(VMEMMAP_END + 1)
120 #define KFENCE_AREA_END		(KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
121 
122 #endif
123 
124 #define ptep_get(ptep) READ_ONCE(*(ptep))
125 #define pmdp_get(pmdp) READ_ONCE(*(pmdp))
126 
127 #define pte_ERROR(e) \
128 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
129 #ifndef __PAGETABLE_PMD_FOLDED
130 #define pmd_ERROR(e) \
131 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
132 #endif
133 #ifndef __PAGETABLE_PUD_FOLDED
134 #define pud_ERROR(e) \
135 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
136 #endif
137 #define pgd_ERROR(e) \
138 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
139 
140 extern pte_t invalid_pte_table[PTRS_PER_PTE];
141 
142 #ifndef __PAGETABLE_PUD_FOLDED
143 
144 typedef struct { unsigned long pud; } pud_t;
145 #define pud_val(x)	((x).pud)
146 #define __pud(x)	((pud_t) { (x) })
147 
148 extern pud_t invalid_pud_table[PTRS_PER_PUD];
149 
150 /*
151  * Empty pgd/p4d entries point to the invalid_pud_table.
152  */
p4d_none(p4d_t p4d)153 static inline int p4d_none(p4d_t p4d)
154 {
155 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
156 }
157 
p4d_bad(p4d_t p4d)158 static inline int p4d_bad(p4d_t p4d)
159 {
160 	return p4d_val(p4d) & ~PAGE_MASK;
161 }
162 
p4d_present(p4d_t p4d)163 static inline int p4d_present(p4d_t p4d)
164 {
165 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
166 }
167 
p4d_pgtable(p4d_t p4d)168 static inline pud_t *p4d_pgtable(p4d_t p4d)
169 {
170 	return (pud_t *)p4d_val(p4d);
171 }
172 
set_p4d(p4d_t * p4d,p4d_t p4dval)173 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
174 {
175 	WRITE_ONCE(*p4d, p4dval);
176 }
177 
p4d_clear(p4d_t * p4dp)178 static inline void p4d_clear(p4d_t *p4dp)
179 {
180 	set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
181 }
182 
183 #define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
184 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
185 
186 #endif
187 
188 #ifndef __PAGETABLE_PMD_FOLDED
189 
190 typedef struct { unsigned long pmd; } pmd_t;
191 #define pmd_val(x)	((x).pmd)
192 #define __pmd(x)	((pmd_t) { (x) })
193 
194 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
195 
196 /*
197  * Empty pud entries point to the invalid_pmd_table.
198  */
pud_none(pud_t pud)199 static inline int pud_none(pud_t pud)
200 {
201 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
202 }
203 
pud_bad(pud_t pud)204 static inline int pud_bad(pud_t pud)
205 {
206 	return pud_val(pud) & ~PAGE_MASK;
207 }
208 
pud_present(pud_t pud)209 static inline int pud_present(pud_t pud)
210 {
211 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
212 }
213 
pud_pgtable(pud_t pud)214 static inline pmd_t *pud_pgtable(pud_t pud)
215 {
216 	return (pmd_t *)pud_val(pud);
217 }
218 
set_pud(pud_t * pud,pud_t pudval)219 static inline void set_pud(pud_t *pud, pud_t pudval)
220 {
221 	WRITE_ONCE(*pud, pudval);
222 }
223 
pud_clear(pud_t * pudp)224 static inline void pud_clear(pud_t *pudp)
225 {
226 	set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
227 }
228 
229 #define pud_phys(pud)		PHYSADDR(pud_val(pud))
230 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
231 
232 #endif
233 
234 /*
235  * Empty pmd entries point to the invalid_pte_table.
236  */
pmd_none(pmd_t pmd)237 static inline int pmd_none(pmd_t pmd)
238 {
239 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
240 }
241 
pmd_bad(pmd_t pmd)242 static inline int pmd_bad(pmd_t pmd)
243 {
244 	return (pmd_val(pmd) & ~PAGE_MASK);
245 }
246 
pmd_present(pmd_t pmd)247 static inline int pmd_present(pmd_t pmd)
248 {
249 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
250 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
251 
252 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
253 }
254 
set_pmd(pmd_t * pmd,pmd_t pmdval)255 static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
256 {
257 	WRITE_ONCE(*pmd, pmdval);
258 }
259 
pmd_clear(pmd_t * pmdp)260 static inline void pmd_clear(pmd_t *pmdp)
261 {
262 	set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
263 }
264 
265 #define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
266 
267 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
268 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
269 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
270 
271 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
272 
273 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
274 
275 #define pte_page(x)		pfn_to_page(pte_pfn(x))
276 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
277 #define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
278 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
279 
280 /*
281  * Initialize a new pgd / pud / pmd table with invalid pointers.
282  */
283 extern void pgd_init(void *addr);
284 extern void pud_init(void *addr);
285 #define pud_init pud_init
286 extern void pmd_init(void *addr);
287 #define pmd_init pmd_init
288 extern void kernel_pte_init(void *addr);
289 #define kernel_pte_init kernel_pte_init
290 
291 /*
292  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
293  * are !pte_none() && !pte_present().
294  *
295  * Format of 32bit swap PTEs:
296  *
297  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
298  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
299  *   <------------ offset -------------> E <- type -> <-- zeroes -->
300  *
301  *   E is the exclusive marker that is not stored in swap entries.
302  *   The zero'ed bits include _PAGE_PRESENT.
303  *
304  * Format of 64bit swap PTEs:
305  *
306  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
307  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
308  *   <--------------------------- offset ---------------------------
309  *
310  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
311  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
312  *   --------------> E <--- type ---> <---------- zeroes ---------->
313  *
314  *   E is the exclusive marker that is not stored in swap entries.
315  *   The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
316  */
317 
318 #define __SWP_TYPE_BITS		(IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
319 #define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
320 #define __SWP_TYPE_SHIFT	(IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
321 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
322 
mk_swap_pte(unsigned long type,unsigned long offset)323 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
324 {
325 	pte_t pte;
326 	pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
327 	return pte;
328 }
329 
330 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
331 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
332 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
333 
334 #define __swp_entry_to_pte(x)	__pte((x).val)
335 #define __swp_entry_to_pmd(x)	__pmd((x).val | _PAGE_HUGE)
336 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
337 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
338 
pte_swp_exclusive(pte_t pte)339 static inline bool pte_swp_exclusive(pte_t pte)
340 {
341 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
342 }
343 
pte_swp_mkexclusive(pte_t pte)344 static inline pte_t pte_swp_mkexclusive(pte_t pte)
345 {
346 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
347 	return pte;
348 }
349 
pte_swp_clear_exclusive(pte_t pte)350 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
351 {
352 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
353 	return pte;
354 }
355 
356 extern void paging_init(void);
357 
358 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
359 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
360 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
361 
set_pte(pte_t * ptep,pte_t pteval)362 static inline void set_pte(pte_t *ptep, pte_t pteval)
363 {
364 	WRITE_ONCE(*ptep, pteval);
365 
366 #ifdef CONFIG_SMP
367 	if (pte_val(pteval) & _PAGE_GLOBAL)
368 		DBAR(0b11000); /* o_wrw = 0b11000 */
369 #endif
370 }
371 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)372 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
373 {
374 	pte_t pte = ptep_get(ptep);
375 	pte_val(pte) &= _PAGE_GLOBAL;
376 	set_pte(ptep, pte);
377 }
378 
379 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
380 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
381 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
382 
383 extern pgd_t swapper_pg_dir[];
384 extern pgd_t invalid_pg_dir[];
385 
386 /*
387  * The following only work if pte_present() is true.
388  * Undefined behaviour if not..
389  */
pte_write(pte_t pte)390 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
pte_young(pte_t pte)391 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
pte_dirty(pte_t pte)392 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
393 
pte_mkold(pte_t pte)394 static inline pte_t pte_mkold(pte_t pte)
395 {
396 	pte_val(pte) &= ~_PAGE_ACCESSED;
397 	return pte;
398 }
399 
pte_mkyoung(pte_t pte)400 static inline pte_t pte_mkyoung(pte_t pte)
401 {
402 	pte_val(pte) |= _PAGE_ACCESSED;
403 	return pte;
404 }
405 
pte_mkclean(pte_t pte)406 static inline pte_t pte_mkclean(pte_t pte)
407 {
408 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
409 	return pte;
410 }
411 
pte_mkdirty(pte_t pte)412 static inline pte_t pte_mkdirty(pte_t pte)
413 {
414 	pte_val(pte) |= _PAGE_MODIFIED;
415 	if (pte_val(pte) & _PAGE_WRITE)
416 		pte_val(pte) |= _PAGE_DIRTY;
417 	return pte;
418 }
419 
pte_mkwrite_novma(pte_t pte)420 static inline pte_t pte_mkwrite_novma(pte_t pte)
421 {
422 	pte_val(pte) |= _PAGE_WRITE;
423 	if (pte_val(pte) & _PAGE_MODIFIED)
424 		pte_val(pte) |= _PAGE_DIRTY;
425 	return pte;
426 }
427 
pte_wrprotect(pte_t pte)428 static inline pte_t pte_wrprotect(pte_t pte)
429 {
430 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
431 	return pte;
432 }
433 
pte_huge(pte_t pte)434 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
435 
pte_mkhuge(pte_t pte)436 static inline pte_t pte_mkhuge(pte_t pte)
437 {
438 	pte_val(pte) |= _PAGE_HUGE;
439 	return pte;
440 }
441 
442 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
pte_special(pte_t pte)443 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
pte_mkspecial(pte_t pte)444 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
445 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
446 
447 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)448 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
449 {
450 	if (pte_val(a) & _PAGE_PRESENT)
451 		return true;
452 
453 	if ((pte_val(a) & _PAGE_PROTNONE) &&
454 			atomic_read(&mm->tlb_flush_pending))
455 		return true;
456 
457 	return false;
458 }
459 
pte_modify(pte_t pte,pgprot_t newprot)460 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
461 {
462 	if (pte_val(pte) & _PAGE_DIRTY)
463 		pte_val(pte) |= _PAGE_MODIFIED;
464 
465 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
466 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
467 }
468 
469 extern void __update_tlb(struct vm_area_struct *vma,
470 			unsigned long address, pte_t *ptep);
471 
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)472 static inline void update_mmu_cache_range(struct vm_fault *vmf,
473 		struct vm_area_struct *vma, unsigned long address,
474 		pte_t *ptep, unsigned int nr)
475 {
476 	for (;;) {
477 		__update_tlb(vma, address, ptep);
478 		if (--nr == 0)
479 			break;
480 		address += PAGE_SIZE;
481 		ptep++;
482 	}
483 }
484 #define update_mmu_cache(vma, addr, ptep) \
485 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
486 
487 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
488 	update_mmu_cache_range(NULL, vma, addr, ptep, nr)
489 
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)490 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
491 			unsigned long address, pmd_t *pmdp)
492 {
493 	__update_tlb(vma, address, (pte_t *)pmdp);
494 }
495 
pmd_pfn(pmd_t pmd)496 static inline unsigned long pmd_pfn(pmd_t pmd)
497 {
498 	return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
499 }
500 
501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
502 
503 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
504 #define pmdp_establish generic_pmdp_establish
505 
pmd_trans_huge(pmd_t pmd)506 static inline int pmd_trans_huge(pmd_t pmd)
507 {
508 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
509 }
510 
pmd_mkhuge(pmd_t pmd)511 static inline pmd_t pmd_mkhuge(pmd_t pmd)
512 {
513 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
514 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
515 	pmd_val(pmd) |= _PAGE_HUGE;
516 
517 	return pmd;
518 }
519 
520 #define pmd_write pmd_write
pmd_write(pmd_t pmd)521 static inline int pmd_write(pmd_t pmd)
522 {
523 	return !!(pmd_val(pmd) & _PAGE_WRITE);
524 }
525 
pmd_mkwrite_novma(pmd_t pmd)526 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
527 {
528 	pmd_val(pmd) |= _PAGE_WRITE;
529 	if (pmd_val(pmd) & _PAGE_MODIFIED)
530 		pmd_val(pmd) |= _PAGE_DIRTY;
531 	return pmd;
532 }
533 
pmd_wrprotect(pmd_t pmd)534 static inline pmd_t pmd_wrprotect(pmd_t pmd)
535 {
536 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
537 	return pmd;
538 }
539 
540 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)541 static inline int pmd_dirty(pmd_t pmd)
542 {
543 	return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
544 }
545 
pmd_mkclean(pmd_t pmd)546 static inline pmd_t pmd_mkclean(pmd_t pmd)
547 {
548 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
549 	return pmd;
550 }
551 
pmd_mkdirty(pmd_t pmd)552 static inline pmd_t pmd_mkdirty(pmd_t pmd)
553 {
554 	pmd_val(pmd) |= _PAGE_MODIFIED;
555 	if (pmd_val(pmd) & _PAGE_WRITE)
556 		pmd_val(pmd) |= _PAGE_DIRTY;
557 	return pmd;
558 }
559 
560 #define pmd_young pmd_young
pmd_young(pmd_t pmd)561 static inline int pmd_young(pmd_t pmd)
562 {
563 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
564 }
565 
pmd_mkold(pmd_t pmd)566 static inline pmd_t pmd_mkold(pmd_t pmd)
567 {
568 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
569 	return pmd;
570 }
571 
pmd_mkyoung(pmd_t pmd)572 static inline pmd_t pmd_mkyoung(pmd_t pmd)
573 {
574 	pmd_val(pmd) |= _PAGE_ACCESSED;
575 	return pmd;
576 }
577 
pmd_page(pmd_t pmd)578 static inline struct page *pmd_page(pmd_t pmd)
579 {
580 	if (pmd_trans_huge(pmd))
581 		return pfn_to_page(pmd_pfn(pmd));
582 
583 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
584 }
585 
pmd_modify(pmd_t pmd,pgprot_t newprot)586 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
587 {
588 	if (pmd_val(pmd) & _PAGE_DIRTY)
589 		pmd_val(pmd) |= _PAGE_MODIFIED;
590 
591 	return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) |
592 		     (pgprot_val(newprot) & ~_HPAGE_CHG_MASK));
593 }
594 
pmd_mkinvalid(pmd_t pmd)595 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
596 {
597 	pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
598 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
599 
600 	return pmd;
601 }
602 
603 /*
604  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
605  * different prototype.
606  */
607 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)608 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
609 					    unsigned long address, pmd_t *pmdp)
610 {
611 	pmd_t old = pmdp_get(pmdp);
612 
613 	pmd_clear(pmdp);
614 
615 	return old;
616 }
617 
618 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
619 
620 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)621 static inline long pte_protnone(pte_t pte)
622 {
623 	return (pte_val(pte) & _PAGE_PROTNONE);
624 }
625 
pmd_protnone(pmd_t pmd)626 static inline long pmd_protnone(pmd_t pmd)
627 {
628 	return (pmd_val(pmd) & _PAGE_PROTNONE);
629 }
630 #endif /* CONFIG_NUMA_BALANCING */
631 
632 #define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
633 #define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
634 
635 /*
636  * We provide our own get_unmapped area to cope with the virtual aliasing
637  * constraints placed on us by the cache architecture.
638  */
639 #define HAVE_ARCH_UNMAPPED_AREA
640 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
641 
642 #endif /* !__ASSEMBLER__ */
643 
644 #endif /* _ASM_PGTABLE_H */
645