xref: /linux/arch/loongarch/include/asm/pgtable.h (revision ff57d59200baadfdb41f94a49fed7d161a9a8124)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/asm.h>
15 #include <asm/page.h>
16 #include <asm/pgtable-bits.h>
17 
18 #if CONFIG_PGTABLE_LEVELS == 2
19 #include <asm-generic/pgtable-nopmd.h>
20 #elif CONFIG_PGTABLE_LEVELS == 3
21 #include <asm-generic/pgtable-nopud.h>
22 #else
23 #include <asm-generic/pgtable-nop4d.h>
24 #endif
25 
26 #ifdef CONFIG_HIGHMEM
27 #include <asm/highmem.h>
28 #endif
29 
30 #if CONFIG_PGTABLE_LEVELS == 2
31 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
32 #elif CONFIG_PGTABLE_LEVELS == 3
33 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
34 #define PMD_SIZE	(1UL << PMD_SHIFT)
35 #define PMD_MASK	(~(PMD_SIZE-1))
36 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
37 #elif CONFIG_PGTABLE_LEVELS == 4
38 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
39 #define PMD_SIZE	(1UL << PMD_SHIFT)
40 #define PMD_MASK	(~(PMD_SIZE-1))
41 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
42 #define PUD_SIZE	(1UL << PUD_SHIFT)
43 #define PUD_MASK	(~(PUD_SIZE-1))
44 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
45 #endif
46 
47 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
48 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
49 
50 #ifdef CONFIG_32BIT
51 #define VA_BITS		32
52 #else
53 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
54 #endif
55 
56 #define PTRS_PER_PGD	(PAGE_SIZE >> PTRLOG)
57 #if CONFIG_PGTABLE_LEVELS > 3
58 #define PTRS_PER_PUD	(PAGE_SIZE >> PTRLOG)
59 #endif
60 #if CONFIG_PGTABLE_LEVELS > 2
61 #define PTRS_PER_PMD	(PAGE_SIZE >> PTRLOG)
62 #endif
63 #define PTRS_PER_PTE	(PAGE_SIZE >> PTRLOG)
64 
65 #ifdef CONFIG_32BIT
66 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
67 #else
68 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
69 #endif
70 
71 #ifndef __ASSEMBLER__
72 
73 #include <linux/mm_types.h>
74 #include <linux/mmzone.h>
75 #include <asm/fixmap.h>
76 #include <asm/sparsemem.h>
77 
78 struct mm_struct;
79 struct vm_area_struct;
80 
81 #ifdef CONFIG_32BIT
82 
83 #define VMALLOC_START	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
84 
85 #ifdef CONFIG_HIGHMEM
86 #define VMALLOC_END	(PKMAP_BASE - (2 * PAGE_SIZE))
87 #else
88 #define VMALLOC_END	(FIXADDR_START - (2 * PAGE_SIZE))
89 #endif
90 
91 #define PKMAP_BASE	(PKMAP_END - (PAGE_SIZE * LAST_PKMAP))
92 #define PKMAP_END	((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
93 
94 #endif
95 
96 #ifdef CONFIG_64BIT
97 
98 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
99 #define MODULES_END	(MODULES_VADDR + SZ_256M)
100 
101 #ifdef CONFIG_KFENCE
102 #define KFENCE_AREA_SIZE	(((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
103 #else
104 #define KFENCE_AREA_SIZE	0
105 #endif
106 
107 #define VMALLOC_START	MODULES_END
108 
109 #ifndef CONFIG_KASAN
110 #define VMALLOC_END	\
111 	(vm_map_base +	\
112 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
113 #else
114 #define VMALLOC_END	\
115 	(vm_map_base +	\
116 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
117 #endif
118 
119 #define VMEMMAP_ALIGN	max(PMD_SIZE, MAX_FOLIO_VMEMMAP_ALIGN)
120 #define vmemmap		((struct page *)(ALIGN(VMALLOC_END, VMEMMAP_ALIGN)))
121 #define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
122 
123 #define KFENCE_AREA_START	(VMEMMAP_END + 1)
124 #define KFENCE_AREA_END		(KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
125 
126 #endif
127 
128 #define ptep_get(ptep) READ_ONCE(*(ptep))
129 #define pmdp_get(pmdp) READ_ONCE(*(pmdp))
130 
131 #define pte_ERROR(e) \
132 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
133 #ifndef __PAGETABLE_PMD_FOLDED
134 #define pmd_ERROR(e) \
135 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
136 #endif
137 #ifndef __PAGETABLE_PUD_FOLDED
138 #define pud_ERROR(e) \
139 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
140 #endif
141 #define pgd_ERROR(e) \
142 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
143 
144 extern pte_t invalid_pte_table[PTRS_PER_PTE];
145 
146 #ifndef __PAGETABLE_PUD_FOLDED
147 
148 typedef struct { unsigned long pud; } pud_t;
149 #define pud_val(x)	((x).pud)
150 #define __pud(x)	((pud_t) { (x) })
151 
152 extern pud_t invalid_pud_table[PTRS_PER_PUD];
153 
154 /*
155  * Empty pgd/p4d entries point to the invalid_pud_table.
156  */
p4d_none(p4d_t p4d)157 static inline int p4d_none(p4d_t p4d)
158 {
159 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
160 }
161 
p4d_bad(p4d_t p4d)162 static inline int p4d_bad(p4d_t p4d)
163 {
164 	return p4d_val(p4d) & ~PAGE_MASK;
165 }
166 
p4d_present(p4d_t p4d)167 static inline int p4d_present(p4d_t p4d)
168 {
169 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
170 }
171 
p4d_pgtable(p4d_t p4d)172 static inline pud_t *p4d_pgtable(p4d_t p4d)
173 {
174 	return (pud_t *)p4d_val(p4d);
175 }
176 
set_p4d(p4d_t * p4d,p4d_t p4dval)177 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
178 {
179 	WRITE_ONCE(*p4d, p4dval);
180 }
181 
p4d_clear(p4d_t * p4dp)182 static inline void p4d_clear(p4d_t *p4dp)
183 {
184 	set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
185 }
186 
187 #define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
188 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
189 
190 #endif
191 
192 #ifndef __PAGETABLE_PMD_FOLDED
193 
194 typedef struct { unsigned long pmd; } pmd_t;
195 #define pmd_val(x)	((x).pmd)
196 #define __pmd(x)	((pmd_t) { (x) })
197 
198 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
199 
200 /*
201  * Empty pud entries point to the invalid_pmd_table.
202  */
pud_none(pud_t pud)203 static inline int pud_none(pud_t pud)
204 {
205 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
206 }
207 
pud_bad(pud_t pud)208 static inline int pud_bad(pud_t pud)
209 {
210 	return pud_val(pud) & ~PAGE_MASK;
211 }
212 
pud_present(pud_t pud)213 static inline int pud_present(pud_t pud)
214 {
215 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
216 }
217 
pud_pgtable(pud_t pud)218 static inline pmd_t *pud_pgtable(pud_t pud)
219 {
220 	return (pmd_t *)pud_val(pud);
221 }
222 
set_pud(pud_t * pud,pud_t pudval)223 static inline void set_pud(pud_t *pud, pud_t pudval)
224 {
225 	WRITE_ONCE(*pud, pudval);
226 }
227 
pud_clear(pud_t * pudp)228 static inline void pud_clear(pud_t *pudp)
229 {
230 	set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
231 }
232 
233 #define pud_phys(pud)		PHYSADDR(pud_val(pud))
234 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
235 
236 #endif
237 
238 /*
239  * Empty pmd entries point to the invalid_pte_table.
240  */
pmd_none(pmd_t pmd)241 static inline int pmd_none(pmd_t pmd)
242 {
243 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
244 }
245 
pmd_bad(pmd_t pmd)246 static inline int pmd_bad(pmd_t pmd)
247 {
248 	return (pmd_val(pmd) & ~PAGE_MASK);
249 }
250 
pmd_present(pmd_t pmd)251 static inline int pmd_present(pmd_t pmd)
252 {
253 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
254 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
255 
256 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
257 }
258 
set_pmd(pmd_t * pmd,pmd_t pmdval)259 static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
260 {
261 	WRITE_ONCE(*pmd, pmdval);
262 }
263 
pmd_clear(pmd_t * pmdp)264 static inline void pmd_clear(pmd_t *pmdp)
265 {
266 	set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
267 }
268 
269 #define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
270 
271 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
272 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
273 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
274 
275 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
276 
277 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
278 
279 #define pte_page(x)		pfn_to_page(pte_pfn(x))
280 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
281 #define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
282 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
283 
284 /*
285  * Initialize a new pgd / pud / pmd table with invalid pointers.
286  */
287 extern void pgd_init(void *addr);
288 extern void pud_init(void *addr);
289 #define pud_init pud_init
290 extern void pmd_init(void *addr);
291 #define pmd_init pmd_init
292 extern void kernel_pte_init(void *addr);
293 #define kernel_pte_init kernel_pte_init
294 
295 /*
296  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
297  * are !pte_none() && !pte_present().
298  *
299  * Format of 32bit swap PTEs:
300  *
301  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
302  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
303  *   <------------ offset -------------> E <- type -> <-- zeroes -->
304  *
305  *   E is the exclusive marker that is not stored in swap entries.
306  *   The zero'ed bits include _PAGE_PRESENT.
307  *
308  * Format of 64bit swap PTEs:
309  *
310  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
311  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
312  *   <--------------------------- offset ---------------------------
313  *
314  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
315  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
316  *   --------------> E <--- type ---> <---------- zeroes ---------->
317  *
318  *   E is the exclusive marker that is not stored in swap entries.
319  *   The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
320  */
321 
322 #define __SWP_TYPE_BITS		(IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
323 #define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
324 #define __SWP_TYPE_SHIFT	(IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
325 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
326 
mk_swap_pte(unsigned long type,unsigned long offset)327 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
328 {
329 	pte_t pte;
330 	pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
331 	return pte;
332 }
333 
334 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
335 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
336 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
337 
338 #define __swp_entry_to_pte(x)	__pte((x).val)
339 #define __swp_entry_to_pmd(x)	__pmd((x).val | _PAGE_HUGE)
340 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
341 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
342 
pte_swp_exclusive(pte_t pte)343 static inline bool pte_swp_exclusive(pte_t pte)
344 {
345 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
346 }
347 
pte_swp_mkexclusive(pte_t pte)348 static inline pte_t pte_swp_mkexclusive(pte_t pte)
349 {
350 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
351 	return pte;
352 }
353 
pte_swp_clear_exclusive(pte_t pte)354 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
355 {
356 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
357 	return pte;
358 }
359 
360 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
361 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
362 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
363 
set_pte(pte_t * ptep,pte_t pteval)364 static inline void set_pte(pte_t *ptep, pte_t pteval)
365 {
366 	WRITE_ONCE(*ptep, pteval);
367 
368 #ifdef CONFIG_SMP
369 	if (pte_val(pteval) & _PAGE_GLOBAL)
370 		DBAR(0b11000); /* o_wrw = 0b11000 */
371 #endif
372 }
373 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)374 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
375 {
376 	pte_t pte = ptep_get(ptep);
377 	pte_val(pte) &= _PAGE_GLOBAL;
378 	set_pte(ptep, pte);
379 }
380 
381 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
382 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
383 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
384 
385 extern pgd_t swapper_pg_dir[];
386 extern pgd_t invalid_pg_dir[];
387 
388 /*
389  * The following only work if pte_present() is true.
390  * Undefined behaviour if not..
391  */
pte_write(pte_t pte)392 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
pte_young(pte_t pte)393 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
pte_dirty(pte_t pte)394 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
395 
pte_mkold(pte_t pte)396 static inline pte_t pte_mkold(pte_t pte)
397 {
398 	pte_val(pte) &= ~_PAGE_ACCESSED;
399 	return pte;
400 }
401 
pte_mkyoung(pte_t pte)402 static inline pte_t pte_mkyoung(pte_t pte)
403 {
404 	pte_val(pte) |= _PAGE_ACCESSED;
405 	return pte;
406 }
407 
pte_mkclean(pte_t pte)408 static inline pte_t pte_mkclean(pte_t pte)
409 {
410 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
411 	return pte;
412 }
413 
pte_mkdirty(pte_t pte)414 static inline pte_t pte_mkdirty(pte_t pte)
415 {
416 	pte_val(pte) |= _PAGE_MODIFIED;
417 	if (pte_val(pte) & _PAGE_WRITE)
418 		pte_val(pte) |= _PAGE_DIRTY;
419 	return pte;
420 }
421 
pte_mkwrite_novma(pte_t pte)422 static inline pte_t pte_mkwrite_novma(pte_t pte)
423 {
424 	pte_val(pte) |= _PAGE_WRITE;
425 	if (pte_val(pte) & _PAGE_MODIFIED)
426 		pte_val(pte) |= _PAGE_DIRTY;
427 	return pte;
428 }
429 
pte_wrprotect(pte_t pte)430 static inline pte_t pte_wrprotect(pte_t pte)
431 {
432 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
433 	return pte;
434 }
435 
pte_huge(pte_t pte)436 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
437 
pte_mkhuge(pte_t pte)438 static inline pte_t pte_mkhuge(pte_t pte)
439 {
440 	pte_val(pte) |= _PAGE_HUGE;
441 	return pte;
442 }
443 
444 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
pte_special(pte_t pte)445 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
pte_mkspecial(pte_t pte)446 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
447 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
448 
449 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)450 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
451 {
452 	if (pte_val(a) & _PAGE_PRESENT)
453 		return true;
454 
455 	if ((pte_val(a) & _PAGE_PROTNONE) &&
456 			atomic_read(&mm->tlb_flush_pending))
457 		return true;
458 
459 	return false;
460 }
461 
pte_modify(pte_t pte,pgprot_t newprot)462 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
463 {
464 	if (pte_val(pte) & _PAGE_DIRTY)
465 		pte_val(pte) |= _PAGE_MODIFIED;
466 
467 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
468 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
469 }
470 
471 extern void __update_tlb(struct vm_area_struct *vma,
472 			unsigned long address, pte_t *ptep);
473 
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)474 static inline void update_mmu_cache_range(struct vm_fault *vmf,
475 		struct vm_area_struct *vma, unsigned long address,
476 		pte_t *ptep, unsigned int nr)
477 {
478 	for (;;) {
479 		__update_tlb(vma, address, ptep);
480 		if (--nr == 0)
481 			break;
482 		address += PAGE_SIZE;
483 		ptep++;
484 	}
485 }
486 #define update_mmu_cache(vma, addr, ptep) \
487 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
488 
489 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
490 	update_mmu_cache_range(NULL, vma, addr, ptep, nr)
491 
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)492 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
493 			unsigned long address, pmd_t *pmdp)
494 {
495 	__update_tlb(vma, address, (pte_t *)pmdp);
496 }
497 
pmd_pfn(pmd_t pmd)498 static inline unsigned long pmd_pfn(pmd_t pmd)
499 {
500 	return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
501 }
502 
503 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
504 
505 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
506 #define pmdp_establish generic_pmdp_establish
507 
pmd_trans_huge(pmd_t pmd)508 static inline int pmd_trans_huge(pmd_t pmd)
509 {
510 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
511 }
512 
pmd_mkhuge(pmd_t pmd)513 static inline pmd_t pmd_mkhuge(pmd_t pmd)
514 {
515 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
516 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
517 	pmd_val(pmd) |= _PAGE_HUGE;
518 
519 	return pmd;
520 }
521 
522 #define pmd_write pmd_write
pmd_write(pmd_t pmd)523 static inline int pmd_write(pmd_t pmd)
524 {
525 	return !!(pmd_val(pmd) & _PAGE_WRITE);
526 }
527 
pmd_mkwrite_novma(pmd_t pmd)528 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
529 {
530 	pmd_val(pmd) |= _PAGE_WRITE;
531 	if (pmd_val(pmd) & _PAGE_MODIFIED)
532 		pmd_val(pmd) |= _PAGE_DIRTY;
533 	return pmd;
534 }
535 
pmd_wrprotect(pmd_t pmd)536 static inline pmd_t pmd_wrprotect(pmd_t pmd)
537 {
538 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
539 	return pmd;
540 }
541 
542 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)543 static inline int pmd_dirty(pmd_t pmd)
544 {
545 	return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
546 }
547 
pmd_mkclean(pmd_t pmd)548 static inline pmd_t pmd_mkclean(pmd_t pmd)
549 {
550 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
551 	return pmd;
552 }
553 
pmd_mkdirty(pmd_t pmd)554 static inline pmd_t pmd_mkdirty(pmd_t pmd)
555 {
556 	pmd_val(pmd) |= _PAGE_MODIFIED;
557 	if (pmd_val(pmd) & _PAGE_WRITE)
558 		pmd_val(pmd) |= _PAGE_DIRTY;
559 	return pmd;
560 }
561 
562 #define pmd_young pmd_young
pmd_young(pmd_t pmd)563 static inline int pmd_young(pmd_t pmd)
564 {
565 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
566 }
567 
pmd_mkold(pmd_t pmd)568 static inline pmd_t pmd_mkold(pmd_t pmd)
569 {
570 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
571 	return pmd;
572 }
573 
pmd_mkyoung(pmd_t pmd)574 static inline pmd_t pmd_mkyoung(pmd_t pmd)
575 {
576 	pmd_val(pmd) |= _PAGE_ACCESSED;
577 	return pmd;
578 }
579 
pmd_page(pmd_t pmd)580 static inline struct page *pmd_page(pmd_t pmd)
581 {
582 	if (pmd_trans_huge(pmd))
583 		return pfn_to_page(pmd_pfn(pmd));
584 
585 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
586 }
587 
pmd_modify(pmd_t pmd,pgprot_t newprot)588 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
589 {
590 	if (pmd_val(pmd) & _PAGE_DIRTY)
591 		pmd_val(pmd) |= _PAGE_MODIFIED;
592 
593 	return __pmd((pmd_val(pmd) & _HPAGE_CHG_MASK) |
594 		     (pgprot_val(newprot) & ~_HPAGE_CHG_MASK));
595 }
596 
pmd_mkinvalid(pmd_t pmd)597 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
598 {
599 	pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
600 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
601 
602 	return pmd;
603 }
604 
605 /*
606  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
607  * different prototype.
608  */
609 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)610 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
611 					    unsigned long address, pmd_t *pmdp)
612 {
613 	pmd_t old = pmdp_get(pmdp);
614 
615 	pmd_clear(pmdp);
616 
617 	return old;
618 }
619 
620 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
621 
622 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)623 static inline long pte_protnone(pte_t pte)
624 {
625 	return (pte_val(pte) & _PAGE_PROTNONE);
626 }
627 
pmd_protnone(pmd_t pmd)628 static inline long pmd_protnone(pmd_t pmd)
629 {
630 	return (pmd_val(pmd) & _PAGE_PROTNONE);
631 }
632 #endif /* CONFIG_NUMA_BALANCING */
633 
634 #define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
635 #define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
636 
637 /*
638  * We provide our own get_unmapped area to cope with the virtual aliasing
639  * constraints placed on us by the cache architecture.
640  */
641 #define HAVE_ARCH_UNMAPPED_AREA
642 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
643 
644 #endif /* !__ASSEMBLER__ */
645 
646 #endif /* _ASM_PGTABLE_H */
647