xref: /linux/arch/loongarch/include/asm/pgtable.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/page.h>
15 #include <asm/pgtable-bits.h>
16 
17 #if CONFIG_PGTABLE_LEVELS == 2
18 #include <asm-generic/pgtable-nopmd.h>
19 #elif CONFIG_PGTABLE_LEVELS == 3
20 #include <asm-generic/pgtable-nopud.h>
21 #else
22 #include <asm-generic/pgtable-nop4d.h>
23 #endif
24 
25 #if CONFIG_PGTABLE_LEVELS == 2
26 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
27 #elif CONFIG_PGTABLE_LEVELS == 3
28 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
29 #define PMD_SIZE	(1UL << PMD_SHIFT)
30 #define PMD_MASK	(~(PMD_SIZE-1))
31 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
32 #elif CONFIG_PGTABLE_LEVELS == 4
33 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
34 #define PMD_SIZE	(1UL << PMD_SHIFT)
35 #define PMD_MASK	(~(PMD_SIZE-1))
36 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
37 #define PUD_SIZE	(1UL << PUD_SHIFT)
38 #define PUD_MASK	(~(PUD_SIZE-1))
39 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - 3))
40 #endif
41 
42 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
43 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
44 
45 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - 3))
46 
47 #define PTRS_PER_PGD	(PAGE_SIZE >> 3)
48 #if CONFIG_PGTABLE_LEVELS > 3
49 #define PTRS_PER_PUD	(PAGE_SIZE >> 3)
50 #endif
51 #if CONFIG_PGTABLE_LEVELS > 2
52 #define PTRS_PER_PMD	(PAGE_SIZE >> 3)
53 #endif
54 #define PTRS_PER_PTE	(PAGE_SIZE >> 3)
55 
56 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
57 
58 #ifndef __ASSEMBLY__
59 
60 #include <linux/mm_types.h>
61 #include <linux/mmzone.h>
62 #include <asm/fixmap.h>
63 #include <asm/sparsemem.h>
64 
65 struct mm_struct;
66 struct vm_area_struct;
67 
68 /*
69  * ZERO_PAGE is a global shared page that is always zero; used
70  * for zero-mapped memory areas etc..
71  */
72 
73 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
74 
75 #define ZERO_PAGE(vaddr)	virt_to_page(empty_zero_page)
76 
77 /*
78  * TLB refill handlers may also map the vmalloc area into xkvrange.
79  * Avoid the first couple of pages so NULL pointer dereferences will
80  * still reliably trap.
81  */
82 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
83 #define MODULES_END	(MODULES_VADDR + SZ_256M)
84 
85 #ifdef CONFIG_KFENCE
86 #define KFENCE_AREA_SIZE	(((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
87 #else
88 #define KFENCE_AREA_SIZE	0
89 #endif
90 
91 #define VMALLOC_START	MODULES_END
92 
93 #ifndef CONFIG_KASAN
94 #define VMALLOC_END	\
95 	(vm_map_base +	\
96 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
97 #else
98 #define VMALLOC_END	\
99 	(vm_map_base +	\
100 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
101 #endif
102 
103 #define vmemmap		((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
104 #define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
105 
106 #define KFENCE_AREA_START	(VMEMMAP_END + 1)
107 #define KFENCE_AREA_END		(KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
108 
109 #define ptep_get(ptep) READ_ONCE(*(ptep))
110 #define pmdp_get(pmdp) READ_ONCE(*(pmdp))
111 
112 #define pte_ERROR(e) \
113 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
114 #ifndef __PAGETABLE_PMD_FOLDED
115 #define pmd_ERROR(e) \
116 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
117 #endif
118 #ifndef __PAGETABLE_PUD_FOLDED
119 #define pud_ERROR(e) \
120 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
121 #endif
122 #define pgd_ERROR(e) \
123 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
124 
125 extern pte_t invalid_pte_table[PTRS_PER_PTE];
126 
127 #ifndef __PAGETABLE_PUD_FOLDED
128 
129 typedef struct { unsigned long pud; } pud_t;
130 #define pud_val(x)	((x).pud)
131 #define __pud(x)	((pud_t) { (x) })
132 
133 extern pud_t invalid_pud_table[PTRS_PER_PUD];
134 
135 /*
136  * Empty pgd/p4d entries point to the invalid_pud_table.
137  */
138 static inline int p4d_none(p4d_t p4d)
139 {
140 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
141 }
142 
143 static inline int p4d_bad(p4d_t p4d)
144 {
145 	return p4d_val(p4d) & ~PAGE_MASK;
146 }
147 
148 static inline int p4d_present(p4d_t p4d)
149 {
150 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
151 }
152 
153 static inline pud_t *p4d_pgtable(p4d_t p4d)
154 {
155 	return (pud_t *)p4d_val(p4d);
156 }
157 
158 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
159 {
160 	WRITE_ONCE(*p4d, p4dval);
161 }
162 
163 static inline void p4d_clear(p4d_t *p4dp)
164 {
165 	set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
166 }
167 
168 #define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
169 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
170 
171 #endif
172 
173 #ifndef __PAGETABLE_PMD_FOLDED
174 
175 typedef struct { unsigned long pmd; } pmd_t;
176 #define pmd_val(x)	((x).pmd)
177 #define __pmd(x)	((pmd_t) { (x) })
178 
179 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
180 
181 /*
182  * Empty pud entries point to the invalid_pmd_table.
183  */
184 static inline int pud_none(pud_t pud)
185 {
186 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
187 }
188 
189 static inline int pud_bad(pud_t pud)
190 {
191 	return pud_val(pud) & ~PAGE_MASK;
192 }
193 
194 static inline int pud_present(pud_t pud)
195 {
196 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
197 }
198 
199 static inline pmd_t *pud_pgtable(pud_t pud)
200 {
201 	return (pmd_t *)pud_val(pud);
202 }
203 
204 static inline void set_pud(pud_t *pud, pud_t pudval)
205 {
206 	WRITE_ONCE(*pud, pudval);
207 }
208 
209 static inline void pud_clear(pud_t *pudp)
210 {
211 	set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
212 }
213 
214 #define pud_phys(pud)		PHYSADDR(pud_val(pud))
215 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
216 
217 #endif
218 
219 /*
220  * Empty pmd entries point to the invalid_pte_table.
221  */
222 static inline int pmd_none(pmd_t pmd)
223 {
224 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
225 }
226 
227 static inline int pmd_bad(pmd_t pmd)
228 {
229 	return (pmd_val(pmd) & ~PAGE_MASK);
230 }
231 
232 static inline int pmd_present(pmd_t pmd)
233 {
234 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
235 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
236 
237 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
238 }
239 
240 static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
241 {
242 	WRITE_ONCE(*pmd, pmdval);
243 }
244 
245 static inline void pmd_clear(pmd_t *pmdp)
246 {
247 	set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
248 }
249 
250 #define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
251 
252 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
253 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
254 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
255 
256 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
257 
258 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
259 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
260 
261 #define pte_page(x)		pfn_to_page(pte_pfn(x))
262 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
263 #define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
264 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
265 
266 /*
267  * Initialize a new pgd / pud / pmd table with invalid pointers.
268  */
269 extern void pgd_init(void *addr);
270 extern void pud_init(void *addr);
271 #define pud_init pud_init
272 extern void pmd_init(void *addr);
273 #define pmd_init pmd_init
274 extern void kernel_pte_init(void *addr);
275 #define kernel_pte_init kernel_pte_init
276 
277 /*
278  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
279  * are !pte_none() && !pte_present().
280  *
281  * Format of swap PTEs:
282  *
283  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
284  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
285  *   <--------------------------- offset ---------------------------
286  *
287  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
288  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
289  *   --------------> E <--- type ---> <---------- zeroes ---------->
290  *
291  *   E is the exclusive marker that is not stored in swap entries.
292  *   The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
293  */
294 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
295 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
296 
297 #define __swp_type(x)		(((x).val >> 16) & 0x7f)
298 #define __swp_offset(x)		((x).val >> 24)
299 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
300 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
301 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
302 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
303 #define __swp_entry_to_pmd(x)	((pmd_t) { (x).val | _PAGE_HUGE })
304 
305 static inline int pte_swp_exclusive(pte_t pte)
306 {
307 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
308 }
309 
310 static inline pte_t pte_swp_mkexclusive(pte_t pte)
311 {
312 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
313 	return pte;
314 }
315 
316 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
317 {
318 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
319 	return pte;
320 }
321 
322 extern void paging_init(void);
323 
324 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
325 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
326 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
327 
328 static inline void set_pte(pte_t *ptep, pte_t pteval)
329 {
330 	WRITE_ONCE(*ptep, pteval);
331 
332 #ifdef CONFIG_SMP
333 	if (pte_val(pteval) & _PAGE_GLOBAL)
334 		DBAR(0b11000); /* o_wrw = 0b11000 */
335 #endif
336 }
337 
338 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
339 {
340 	pte_t pte = ptep_get(ptep);
341 	pte_val(pte) &= _PAGE_GLOBAL;
342 	set_pte(ptep, pte);
343 }
344 
345 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
346 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
347 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
348 
349 extern pgd_t swapper_pg_dir[];
350 extern pgd_t invalid_pg_dir[];
351 
352 /*
353  * The following only work if pte_present() is true.
354  * Undefined behaviour if not..
355  */
356 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
357 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
358 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
359 
360 static inline pte_t pte_mkold(pte_t pte)
361 {
362 	pte_val(pte) &= ~_PAGE_ACCESSED;
363 	return pte;
364 }
365 
366 static inline pte_t pte_mkyoung(pte_t pte)
367 {
368 	pte_val(pte) |= _PAGE_ACCESSED;
369 	return pte;
370 }
371 
372 static inline pte_t pte_mkclean(pte_t pte)
373 {
374 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
375 	return pte;
376 }
377 
378 static inline pte_t pte_mkdirty(pte_t pte)
379 {
380 	pte_val(pte) |= _PAGE_MODIFIED;
381 	if (pte_val(pte) & _PAGE_WRITE)
382 		pte_val(pte) |= _PAGE_DIRTY;
383 	return pte;
384 }
385 
386 static inline pte_t pte_mkwrite_novma(pte_t pte)
387 {
388 	pte_val(pte) |= _PAGE_WRITE;
389 	if (pte_val(pte) & _PAGE_MODIFIED)
390 		pte_val(pte) |= _PAGE_DIRTY;
391 	return pte;
392 }
393 
394 static inline pte_t pte_wrprotect(pte_t pte)
395 {
396 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
397 	return pte;
398 }
399 
400 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
401 
402 static inline pte_t pte_mkhuge(pte_t pte)
403 {
404 	pte_val(pte) |= _PAGE_HUGE;
405 	return pte;
406 }
407 
408 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
409 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
410 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
411 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
412 
413 static inline int pte_devmap(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DEVMAP); }
414 static inline pte_t pte_mkdevmap(pte_t pte)	{ pte_val(pte) |= _PAGE_DEVMAP; return pte; }
415 
416 #define pte_accessible pte_accessible
417 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
418 {
419 	if (pte_val(a) & _PAGE_PRESENT)
420 		return true;
421 
422 	if ((pte_val(a) & _PAGE_PROTNONE) &&
423 			atomic_read(&mm->tlb_flush_pending))
424 		return true;
425 
426 	return false;
427 }
428 
429 /*
430  * Conversion functions: convert a page and protection to a page entry,
431  * and a page entry and page directory to the page they refer to.
432  */
433 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
434 
435 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
436 {
437 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
438 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
439 }
440 
441 extern void __update_tlb(struct vm_area_struct *vma,
442 			unsigned long address, pte_t *ptep);
443 
444 static inline void update_mmu_cache_range(struct vm_fault *vmf,
445 		struct vm_area_struct *vma, unsigned long address,
446 		pte_t *ptep, unsigned int nr)
447 {
448 	for (;;) {
449 		__update_tlb(vma, address, ptep);
450 		if (--nr == 0)
451 			break;
452 		address += PAGE_SIZE;
453 		ptep++;
454 	}
455 }
456 #define update_mmu_cache(vma, addr, ptep) \
457 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
458 
459 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
460 	update_mmu_cache_range(NULL, vma, addr, ptep, nr)
461 
462 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
463 			unsigned long address, pmd_t *pmdp)
464 {
465 	__update_tlb(vma, address, (pte_t *)pmdp);
466 }
467 
468 static inline unsigned long pmd_pfn(pmd_t pmd)
469 {
470 	return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
471 }
472 
473 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
474 
475 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
476 #define pmdp_establish generic_pmdp_establish
477 
478 static inline int pmd_trans_huge(pmd_t pmd)
479 {
480 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
481 }
482 
483 static inline pmd_t pmd_mkhuge(pmd_t pmd)
484 {
485 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
486 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
487 	pmd_val(pmd) |= _PAGE_HUGE;
488 
489 	return pmd;
490 }
491 
492 #define pmd_write pmd_write
493 static inline int pmd_write(pmd_t pmd)
494 {
495 	return !!(pmd_val(pmd) & _PAGE_WRITE);
496 }
497 
498 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
499 {
500 	pmd_val(pmd) |= _PAGE_WRITE;
501 	if (pmd_val(pmd) & _PAGE_MODIFIED)
502 		pmd_val(pmd) |= _PAGE_DIRTY;
503 	return pmd;
504 }
505 
506 static inline pmd_t pmd_wrprotect(pmd_t pmd)
507 {
508 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
509 	return pmd;
510 }
511 
512 #define pmd_dirty pmd_dirty
513 static inline int pmd_dirty(pmd_t pmd)
514 {
515 	return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
516 }
517 
518 static inline pmd_t pmd_mkclean(pmd_t pmd)
519 {
520 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
521 	return pmd;
522 }
523 
524 static inline pmd_t pmd_mkdirty(pmd_t pmd)
525 {
526 	pmd_val(pmd) |= _PAGE_MODIFIED;
527 	if (pmd_val(pmd) & _PAGE_WRITE)
528 		pmd_val(pmd) |= _PAGE_DIRTY;
529 	return pmd;
530 }
531 
532 #define pmd_young pmd_young
533 static inline int pmd_young(pmd_t pmd)
534 {
535 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
536 }
537 
538 static inline pmd_t pmd_mkold(pmd_t pmd)
539 {
540 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
541 	return pmd;
542 }
543 
544 static inline pmd_t pmd_mkyoung(pmd_t pmd)
545 {
546 	pmd_val(pmd) |= _PAGE_ACCESSED;
547 	return pmd;
548 }
549 
550 static inline int pmd_devmap(pmd_t pmd)
551 {
552 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
553 }
554 
555 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
556 {
557 	pmd_val(pmd) |= _PAGE_DEVMAP;
558 	return pmd;
559 }
560 
561 static inline struct page *pmd_page(pmd_t pmd)
562 {
563 	if (pmd_trans_huge(pmd))
564 		return pfn_to_page(pmd_pfn(pmd));
565 
566 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
567 }
568 
569 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
570 {
571 	pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
572 				(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
573 	return pmd;
574 }
575 
576 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
577 {
578 	pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
579 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
580 
581 	return pmd;
582 }
583 
584 /*
585  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
586  * different prototype.
587  */
588 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
589 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
590 					    unsigned long address, pmd_t *pmdp)
591 {
592 	pmd_t old = pmdp_get(pmdp);
593 
594 	pmd_clear(pmdp);
595 
596 	return old;
597 }
598 
599 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
600 
601 #ifdef CONFIG_NUMA_BALANCING
602 static inline long pte_protnone(pte_t pte)
603 {
604 	return (pte_val(pte) & _PAGE_PROTNONE);
605 }
606 
607 static inline long pmd_protnone(pmd_t pmd)
608 {
609 	return (pmd_val(pmd) & _PAGE_PROTNONE);
610 }
611 #endif /* CONFIG_NUMA_BALANCING */
612 
613 #define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
614 #define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
615 
616 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
617 #define pud_devmap(pud)		(0)
618 #define pgd_devmap(pgd)		(0)
619 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
620 
621 /*
622  * We provide our own get_unmapped area to cope with the virtual aliasing
623  * constraints placed on us by the cache architecture.
624  */
625 #define HAVE_ARCH_UNMAPPED_AREA
626 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
627 
628 #endif /* !__ASSEMBLY__ */
629 
630 #endif /* _ASM_PGTABLE_H */
631