xref: /linux/arch/loongarch/include/asm/pgtable.h (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/asm.h>
15 #include <asm/page.h>
16 #include <asm/pgtable-bits.h>
17 
18 #if CONFIG_PGTABLE_LEVELS == 2
19 #include <asm-generic/pgtable-nopmd.h>
20 #elif CONFIG_PGTABLE_LEVELS == 3
21 #include <asm-generic/pgtable-nopud.h>
22 #else
23 #include <asm-generic/pgtable-nop4d.h>
24 #endif
25 
26 #if CONFIG_PGTABLE_LEVELS == 2
27 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
28 #elif CONFIG_PGTABLE_LEVELS == 3
29 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
30 #define PMD_SIZE	(1UL << PMD_SHIFT)
31 #define PMD_MASK	(~(PMD_SIZE-1))
32 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
33 #elif CONFIG_PGTABLE_LEVELS == 4
34 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
35 #define PMD_SIZE	(1UL << PMD_SHIFT)
36 #define PMD_MASK	(~(PMD_SIZE-1))
37 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
38 #define PUD_SIZE	(1UL << PUD_SHIFT)
39 #define PUD_MASK	(~(PUD_SIZE-1))
40 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
41 #endif
42 
43 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
44 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
45 
46 #ifdef CONFIG_32BIT
47 #define VA_BITS		32
48 #else
49 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
50 #endif
51 
52 #define PTRS_PER_PGD	(PAGE_SIZE >> PTRLOG)
53 #if CONFIG_PGTABLE_LEVELS > 3
54 #define PTRS_PER_PUD	(PAGE_SIZE >> PTRLOG)
55 #endif
56 #if CONFIG_PGTABLE_LEVELS > 2
57 #define PTRS_PER_PMD	(PAGE_SIZE >> PTRLOG)
58 #endif
59 #define PTRS_PER_PTE	(PAGE_SIZE >> PTRLOG)
60 
61 #ifdef CONFIG_32BIT
62 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
63 #else
64 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
65 #endif
66 
67 #ifndef __ASSEMBLER__
68 
69 #include <linux/mm_types.h>
70 #include <linux/mmzone.h>
71 #include <asm/fixmap.h>
72 #include <asm/sparsemem.h>
73 
74 struct mm_struct;
75 struct vm_area_struct;
76 
77 #ifdef CONFIG_32BIT
78 
79 #define VMALLOC_START	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
80 #define VMALLOC_END	(FIXADDR_START - (2 * PAGE_SIZE))
81 
82 #endif
83 
84 #ifdef CONFIG_64BIT
85 
86 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
87 #define MODULES_END	(MODULES_VADDR + SZ_256M)
88 
89 #ifdef CONFIG_KFENCE
90 #define KFENCE_AREA_SIZE	(((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
91 #else
92 #define KFENCE_AREA_SIZE	0
93 #endif
94 
95 #define VMALLOC_START	MODULES_END
96 
97 #ifndef CONFIG_KASAN
98 #define VMALLOC_END	\
99 	(vm_map_base +	\
100 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
101 #else
102 #define VMALLOC_END	\
103 	(vm_map_base +	\
104 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
105 #endif
106 
107 #define VMEMMAP_ALIGN	max(PMD_SIZE, MAX_FOLIO_VMEMMAP_ALIGN)
108 #define vmemmap		((struct page *)(ALIGN(VMALLOC_END, VMEMMAP_ALIGN)))
109 #define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
110 
111 #define KFENCE_AREA_START	(VMEMMAP_END + 1)
112 #define KFENCE_AREA_END		(KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
113 
114 #endif
115 
116 #define ptep_get(ptep) READ_ONCE(*(ptep))
117 #define pmdp_get(pmdp) READ_ONCE(*(pmdp))
118 
119 #define pte_ERROR(e) \
120 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
121 #ifndef __PAGETABLE_PMD_FOLDED
122 #define pmd_ERROR(e) \
123 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
124 #endif
125 #ifndef __PAGETABLE_PUD_FOLDED
126 #define pud_ERROR(e) \
127 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
128 #endif
129 #define pgd_ERROR(e) \
130 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
131 
132 extern pte_t invalid_pte_table[PTRS_PER_PTE];
133 
134 #ifndef __PAGETABLE_PUD_FOLDED
135 
136 typedef struct { unsigned long pud; } pud_t;
137 #define pud_val(x)	((x).pud)
138 #define __pud(x)	((pud_t) { (x) })
139 
140 extern pud_t invalid_pud_table[PTRS_PER_PUD];
141 
142 /*
143  * Empty pgd/p4d entries point to the invalid_pud_table.
144  */
145 static inline int p4d_none(p4d_t p4d)
146 {
147 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
148 }
149 
150 static inline int p4d_bad(p4d_t p4d)
151 {
152 	return p4d_val(p4d) & ~PAGE_MASK;
153 }
154 
155 static inline int p4d_present(p4d_t p4d)
156 {
157 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
158 }
159 
160 static inline pud_t *p4d_pgtable(p4d_t p4d)
161 {
162 	return (pud_t *)p4d_val(p4d);
163 }
164 
165 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
166 {
167 	WRITE_ONCE(*p4d, p4dval);
168 }
169 
170 static inline void p4d_clear(p4d_t *p4dp)
171 {
172 	set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
173 }
174 
175 #define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
176 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
177 
178 #endif
179 
180 #ifndef __PAGETABLE_PMD_FOLDED
181 
182 typedef struct { unsigned long pmd; } pmd_t;
183 #define pmd_val(x)	((x).pmd)
184 #define __pmd(x)	((pmd_t) { (x) })
185 
186 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
187 
188 /*
189  * Empty pud entries point to the invalid_pmd_table.
190  */
191 static inline int pud_none(pud_t pud)
192 {
193 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
194 }
195 
196 static inline int pud_bad(pud_t pud)
197 {
198 	return pud_val(pud) & ~PAGE_MASK;
199 }
200 
201 static inline int pud_present(pud_t pud)
202 {
203 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
204 }
205 
206 static inline pmd_t *pud_pgtable(pud_t pud)
207 {
208 	return (pmd_t *)pud_val(pud);
209 }
210 
211 static inline void set_pud(pud_t *pud, pud_t pudval)
212 {
213 	WRITE_ONCE(*pud, pudval);
214 }
215 
216 static inline void pud_clear(pud_t *pudp)
217 {
218 	set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
219 }
220 
221 #define pud_phys(pud)		PHYSADDR(pud_val(pud))
222 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
223 
224 #endif
225 
226 /*
227  * Empty pmd entries point to the invalid_pte_table.
228  */
229 static inline int pmd_none(pmd_t pmd)
230 {
231 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
232 }
233 
234 static inline int pmd_bad(pmd_t pmd)
235 {
236 	return (pmd_val(pmd) & ~PAGE_MASK);
237 }
238 
239 static inline int pmd_present(pmd_t pmd)
240 {
241 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
242 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
243 
244 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
245 }
246 
247 static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
248 {
249 	WRITE_ONCE(*pmd, pmdval);
250 }
251 
252 static inline void pmd_clear(pmd_t *pmdp)
253 {
254 	set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
255 }
256 
257 #define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
258 
259 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
260 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
261 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
262 
263 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
264 
265 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
266 
267 #define pte_page(x)		pfn_to_page(pte_pfn(x))
268 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
269 #define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
270 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
271 
272 /*
273  * Initialize a new pgd / pud / pmd table with invalid pointers.
274  */
275 extern void pgd_init(void *addr);
276 extern void pud_init(void *addr);
277 #define pud_init pud_init
278 extern void pmd_init(void *addr);
279 #define pmd_init pmd_init
280 extern void kernel_pte_init(void *addr);
281 #define kernel_pte_init kernel_pte_init
282 
283 /*
284  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
285  * are !pte_none() && !pte_present().
286  *
287  * Format of 32bit swap PTEs:
288  *
289  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
290  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
291  *   <------------ offset -------------> E <- type -> <-- zeroes -->
292  *
293  *   E is the exclusive marker that is not stored in swap entries.
294  *   The zero'ed bits include _PAGE_PRESENT.
295  *
296  * Format of 64bit swap PTEs:
297  *
298  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
299  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
300  *   <--------------------------- offset ---------------------------
301  *
302  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
303  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
304  *   --------------> E <--- type ---> <---------- zeroes ---------->
305  *
306  *   E is the exclusive marker that is not stored in swap entries.
307  *   The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
308  */
309 
310 #define __SWP_TYPE_BITS		(IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
311 #define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
312 #define __SWP_TYPE_SHIFT	(IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
313 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
314 
315 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
316 {
317 	pte_t pte;
318 	pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
319 	return pte;
320 }
321 
322 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
323 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
324 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
325 
326 #define __swp_entry_to_pte(x)	__pte((x).val)
327 #define __swp_entry_to_pmd(x)	__pmd((x).val | _PAGE_HUGE)
328 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
329 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
330 
331 static inline bool pte_swp_exclusive(pte_t pte)
332 {
333 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
334 }
335 
336 static inline pte_t pte_swp_mkexclusive(pte_t pte)
337 {
338 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
339 	return pte;
340 }
341 
342 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
343 {
344 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
345 	return pte;
346 }
347 
348 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
349 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
350 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
351 
352 static inline void set_pte(pte_t *ptep, pte_t pteval)
353 {
354 	WRITE_ONCE(*ptep, pteval);
355 
356 #ifdef CONFIG_SMP
357 	if (pte_val(pteval) & _PAGE_GLOBAL)
358 		DBAR(0b11000); /* o_wrw = 0b11000 */
359 #endif
360 }
361 
362 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
363 {
364 	pte_t pte = ptep_get(ptep);
365 	pte_val(pte) &= _PAGE_GLOBAL;
366 	set_pte(ptep, pte);
367 }
368 
369 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
370 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
371 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
372 
373 extern pgd_t swapper_pg_dir[];
374 extern pgd_t invalid_pg_dir[];
375 
376 /*
377  * The following only work if pte_present() is true.
378  * Undefined behaviour if not..
379  */
380 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
381 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
382 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
383 
384 static inline pte_t pte_mkold(pte_t pte)
385 {
386 	pte_val(pte) &= ~_PAGE_ACCESSED;
387 	return pte;
388 }
389 
390 static inline pte_t pte_mkyoung(pte_t pte)
391 {
392 	pte_val(pte) |= _PAGE_ACCESSED;
393 	return pte;
394 }
395 
396 static inline pte_t pte_mkclean(pte_t pte)
397 {
398 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
399 	return pte;
400 }
401 
402 static inline pte_t pte_mkdirty(pte_t pte)
403 {
404 	pte_val(pte) |= _PAGE_MODIFIED;
405 	if (pte_val(pte) & _PAGE_WRITE)
406 		pte_val(pte) |= _PAGE_DIRTY;
407 	return pte;
408 }
409 
410 static inline pte_t pte_mkwrite_novma(pte_t pte)
411 {
412 	pte_val(pte) |= _PAGE_WRITE;
413 	if (pte_val(pte) & _PAGE_MODIFIED)
414 		pte_val(pte) |= _PAGE_DIRTY;
415 	return pte;
416 }
417 
418 static inline pte_t pte_wrprotect(pte_t pte)
419 {
420 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
421 	return pte;
422 }
423 
424 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
425 
426 static inline pte_t pte_mkhuge(pte_t pte)
427 {
428 	pte_val(pte) |= _PAGE_HUGE;
429 	return pte;
430 }
431 
432 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
433 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
434 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
435 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
436 
437 #define pte_accessible pte_accessible
438 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
439 {
440 	if (pte_val(a) & _PAGE_PRESENT)
441 		return true;
442 
443 	if ((pte_val(a) & _PAGE_PROTNONE) &&
444 			atomic_read(&mm->tlb_flush_pending))
445 		return true;
446 
447 	return false;
448 }
449 
450 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
451 {
452 	if (pte_val(pte) & _PAGE_DIRTY)
453 		pte_val(pte) |= _PAGE_MODIFIED;
454 
455 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
456 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
457 }
458 
459 extern void __update_tlb(struct vm_area_struct *vma,
460 			unsigned long address, pte_t *ptep);
461 
462 static inline void update_mmu_cache_range(struct vm_fault *vmf,
463 		struct vm_area_struct *vma, unsigned long address,
464 		pte_t *ptep, unsigned int nr)
465 {
466 	for (;;) {
467 		__update_tlb(vma, address, ptep);
468 		if (--nr == 0)
469 			break;
470 		address += PAGE_SIZE;
471 		ptep++;
472 	}
473 }
474 #define update_mmu_cache(vma, addr, ptep) \
475 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
476 
477 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
478 	update_mmu_cache_range(NULL, vma, addr, ptep, nr)
479 
480 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
481 			unsigned long address, pmd_t *pmdp)
482 {
483 	__update_tlb(vma, address, (pte_t *)pmdp);
484 }
485 
486 static inline unsigned long pmd_pfn(pmd_t pmd)
487 {
488 	return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
489 }
490 
491 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
492 
493 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
494 #define pmdp_establish generic_pmdp_establish
495 
496 static inline int pmd_trans_huge(pmd_t pmd)
497 {
498 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
499 }
500 
501 static inline pmd_t pmd_mkhuge(pmd_t pmd)
502 {
503 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
504 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
505 	pmd_val(pmd) |= _PAGE_HUGE;
506 
507 	return pmd;
508 }
509 
510 #define pmd_write pmd_write
511 static inline int pmd_write(pmd_t pmd)
512 {
513 	return !!(pmd_val(pmd) & _PAGE_WRITE);
514 }
515 
516 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
517 {
518 	pmd_val(pmd) |= _PAGE_WRITE;
519 	if (pmd_val(pmd) & _PAGE_MODIFIED)
520 		pmd_val(pmd) |= _PAGE_DIRTY;
521 	return pmd;
522 }
523 
524 static inline pmd_t pmd_wrprotect(pmd_t pmd)
525 {
526 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
527 	return pmd;
528 }
529 
530 #define pmd_dirty pmd_dirty
531 static inline int pmd_dirty(pmd_t pmd)
532 {
533 	return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
534 }
535 
536 static inline pmd_t pmd_mkclean(pmd_t pmd)
537 {
538 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
539 	return pmd;
540 }
541 
542 static inline pmd_t pmd_mkdirty(pmd_t pmd)
543 {
544 	pmd_val(pmd) |= _PAGE_MODIFIED;
545 	if (pmd_val(pmd) & _PAGE_WRITE)
546 		pmd_val(pmd) |= _PAGE_DIRTY;
547 	return pmd;
548 }
549 
550 #define pmd_young pmd_young
551 static inline int pmd_young(pmd_t pmd)
552 {
553 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
554 }
555 
556 static inline pmd_t pmd_mkold(pmd_t pmd)
557 {
558 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
559 	return pmd;
560 }
561 
562 static inline pmd_t pmd_mkyoung(pmd_t pmd)
563 {
564 	pmd_val(pmd) |= _PAGE_ACCESSED;
565 	return pmd;
566 }
567 
568 static inline struct page *pmd_page(pmd_t pmd)
569 {
570 	if (pmd_trans_huge(pmd))
571 		return pfn_to_page(pmd_pfn(pmd));
572 
573 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
574 }
575 
576 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
577 {
578 	if (pmd_val(pmd) & _PAGE_DIRTY)
579 		pmd_val(pmd) |= _PAGE_MODIFIED;
580 
581 	return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) |
582 		     (pgprot_val(newprot) & ~_HPAGE_CHG_MASK));
583 }
584 
585 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
586 {
587 	pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
588 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
589 
590 	return pmd;
591 }
592 
593 /*
594  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
595  * different prototype.
596  */
597 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
598 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
599 					    unsigned long address, pmd_t *pmdp)
600 {
601 	pmd_t old = pmdp_get(pmdp);
602 
603 	pmd_clear(pmdp);
604 
605 	return old;
606 }
607 
608 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
609 
610 #ifdef CONFIG_NUMA_BALANCING
611 static inline long pte_protnone(pte_t pte)
612 {
613 	return (pte_val(pte) & _PAGE_PROTNONE);
614 }
615 
616 static inline long pmd_protnone(pmd_t pmd)
617 {
618 	return (pmd_val(pmd) & _PAGE_PROTNONE);
619 }
620 #endif /* CONFIG_NUMA_BALANCING */
621 
622 #define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
623 #define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
624 
625 /*
626  * We provide our own get_unmapped area to cope with the virtual aliasing
627  * constraints placed on us by the cache architecture.
628  */
629 #define HAVE_ARCH_UNMAPPED_AREA
630 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
631 
632 #endif /* !__ASSEMBLER__ */
633 
634 #endif /* _ASM_PGTABLE_H */
635