xref: /linux/arch/riscv/include/asm/pgtable.h (revision b6a1af0362b3232c7b474b9b46e49b862602018c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
8 
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
11 
12 #include <asm/pgtable-bits.h>
13 
14 #ifndef CONFIG_MMU
15 #define KERNEL_LINK_ADDR	PAGE_OFFSET
16 #define KERN_VIRT_SIZE		(UL(-1))
17 #else
18 
19 #define ADDRESS_SPACE_END	(UL(-1))
20 
21 #ifdef CONFIG_64BIT
22 /* Leave 2GB for kernel and BPF at the end of the address space */
23 #define KERNEL_LINK_ADDR	(ADDRESS_SPACE_END - SZ_2G + 1)
24 #else
25 #define KERNEL_LINK_ADDR	PAGE_OFFSET
26 #endif
27 
28 /* Number of entries in the page global directory */
29 #define PTRS_PER_PGD    (PAGE_SIZE / sizeof(pgd_t))
30 /* Number of entries in the page table */
31 #define PTRS_PER_PTE    (PAGE_SIZE / sizeof(pte_t))
32 
33 /*
34  * Half of the kernel address space (half of the entries of the page global
35  * directory) is for the direct mapping.
36  */
37 #define KERN_VIRT_SIZE          ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
38 
39 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
40 #define VMALLOC_END      PAGE_OFFSET
41 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
42 
43 #define BPF_JIT_REGION_SIZE	(SZ_128M)
44 #ifdef CONFIG_64BIT
45 #define BPF_JIT_REGION_START	(BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
46 #define BPF_JIT_REGION_END	(MODULES_END)
47 #else
48 #define BPF_JIT_REGION_START	(PAGE_OFFSET - BPF_JIT_REGION_SIZE)
49 #define BPF_JIT_REGION_END	(VMALLOC_END)
50 #endif
51 
52 /* Modules always live before the kernel */
53 #ifdef CONFIG_64BIT
54 /* This is used to define the end of the KASAN shadow region */
55 #define MODULES_LOWEST_VADDR	(KERNEL_LINK_ADDR - SZ_2G)
56 #define MODULES_VADDR		(PFN_ALIGN((unsigned long)&_end) - SZ_2G)
57 #define MODULES_END		(PFN_ALIGN((unsigned long)&_start))
58 #endif
59 
60 /*
61  * Roughly size the vmemmap space to be large enough to fit enough
62  * struct pages to map half the virtual address space. Then
63  * position vmemmap directly below the VMALLOC region.
64  */
65 #ifdef CONFIG_64BIT
66 #define VA_BITS		(pgtable_l5_enabled ? \
67 				57 : (pgtable_l4_enabled ? 48 : 39))
68 #else
69 #define VA_BITS		32
70 #endif
71 
72 #define VMEMMAP_SHIFT \
73 	(VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
74 #define VMEMMAP_SIZE	BIT(VMEMMAP_SHIFT)
75 #define VMEMMAP_END	VMALLOC_START
76 #define VMEMMAP_START	(VMALLOC_START - VMEMMAP_SIZE)
77 
78 /*
79  * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
80  * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
81  */
82 #define vmemmap		((struct page *)VMEMMAP_START)
83 
84 #define PCI_IO_SIZE      SZ_16M
85 #define PCI_IO_END       VMEMMAP_START
86 #define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
87 
88 #define FIXADDR_TOP      PCI_IO_START
89 #ifdef CONFIG_64BIT
90 #define FIXADDR_SIZE     PMD_SIZE
91 #else
92 #define FIXADDR_SIZE     PGDIR_SIZE
93 #endif
94 #define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
95 
96 #endif
97 
98 #ifdef CONFIG_XIP_KERNEL
99 #define XIP_OFFSET		SZ_32M
100 #define XIP_OFFSET_MASK		(SZ_32M - 1)
101 #else
102 #define XIP_OFFSET		0
103 #endif
104 
105 #ifndef __ASSEMBLY__
106 
107 #include <asm/page.h>
108 #include <asm/tlbflush.h>
109 #include <linux/mm_types.h>
110 
111 #define __page_val_to_pfn(_val)  (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
112 
113 #ifdef CONFIG_64BIT
114 #include <asm/pgtable-64.h>
115 #else
116 #include <asm/pgtable-32.h>
117 #endif /* CONFIG_64BIT */
118 
119 #include <linux/page_table_check.h>
120 
121 #ifdef CONFIG_XIP_KERNEL
122 #define XIP_FIXUP(addr) ({							\
123 	uintptr_t __a = (uintptr_t)(addr);					\
124 	(__a >= CONFIG_XIP_PHYS_ADDR && \
125 	 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ?	\
126 		__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
127 		__a;								\
128 	})
129 #else
130 #define XIP_FIXUP(addr)		(addr)
131 #endif /* CONFIG_XIP_KERNEL */
132 
133 struct pt_alloc_ops {
134 	pte_t *(*get_pte_virt)(phys_addr_t pa);
135 	phys_addr_t (*alloc_pte)(uintptr_t va);
136 #ifndef __PAGETABLE_PMD_FOLDED
137 	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
138 	phys_addr_t (*alloc_pmd)(uintptr_t va);
139 	pud_t *(*get_pud_virt)(phys_addr_t pa);
140 	phys_addr_t (*alloc_pud)(uintptr_t va);
141 	p4d_t *(*get_p4d_virt)(phys_addr_t pa);
142 	phys_addr_t (*alloc_p4d)(uintptr_t va);
143 #endif
144 };
145 
146 extern struct pt_alloc_ops pt_ops __initdata;
147 
148 #ifdef CONFIG_MMU
149 /* Number of PGD entries that a user-mode program can use */
150 #define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)
151 
152 /* Page protection bits */
153 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
154 
155 #define PAGE_NONE		__pgprot(_PAGE_PROT_NONE | _PAGE_READ)
156 #define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
157 #define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
158 #define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
159 #define PAGE_READ_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
160 #define PAGE_WRITE_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ |	\
161 					 _PAGE_EXEC | _PAGE_WRITE)
162 
163 #define PAGE_COPY		PAGE_READ
164 #define PAGE_COPY_EXEC		PAGE_EXEC
165 #define PAGE_COPY_READ_EXEC	PAGE_READ_EXEC
166 #define PAGE_SHARED		PAGE_WRITE
167 #define PAGE_SHARED_EXEC	PAGE_WRITE_EXEC
168 
169 #define _PAGE_KERNEL		(_PAGE_READ \
170 				| _PAGE_WRITE \
171 				| _PAGE_PRESENT \
172 				| _PAGE_ACCESSED \
173 				| _PAGE_DIRTY \
174 				| _PAGE_GLOBAL)
175 
176 #define PAGE_KERNEL		__pgprot(_PAGE_KERNEL)
177 #define PAGE_KERNEL_READ	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
178 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL | _PAGE_EXEC)
179 #define PAGE_KERNEL_READ_EXEC	__pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
180 					 | _PAGE_EXEC)
181 
182 #define PAGE_TABLE		__pgprot(_PAGE_TABLE)
183 
184 #define _PAGE_IOREMAP	((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
185 #define PAGE_KERNEL_IO		__pgprot(_PAGE_IOREMAP)
186 
187 extern pgd_t swapper_pg_dir[];
188 
189 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
190 static inline int pmd_present(pmd_t pmd)
191 {
192 	/*
193 	 * Checking for _PAGE_LEAF is needed too because:
194 	 * When splitting a THP, split_huge_page() will temporarily clear
195 	 * the present bit, in this situation, pmd_present() and
196 	 * pmd_trans_huge() still needs to return true.
197 	 */
198 	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
199 }
200 #else
201 static inline int pmd_present(pmd_t pmd)
202 {
203 	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
204 }
205 #endif
206 
207 static inline int pmd_none(pmd_t pmd)
208 {
209 	return (pmd_val(pmd) == 0);
210 }
211 
212 static inline int pmd_bad(pmd_t pmd)
213 {
214 	return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
215 }
216 
217 #define pmd_leaf	pmd_leaf
218 static inline int pmd_leaf(pmd_t pmd)
219 {
220 	return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
221 }
222 
223 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
224 {
225 	*pmdp = pmd;
226 }
227 
228 static inline void pmd_clear(pmd_t *pmdp)
229 {
230 	set_pmd(pmdp, __pmd(0));
231 }
232 
233 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
234 {
235 	unsigned long prot_val = pgprot_val(prot);
236 
237 	ALT_THEAD_PMA(prot_val);
238 
239 	return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
240 }
241 
242 static inline unsigned long _pgd_pfn(pgd_t pgd)
243 {
244 	return __page_val_to_pfn(pgd_val(pgd));
245 }
246 
247 static inline struct page *pmd_page(pmd_t pmd)
248 {
249 	return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
250 }
251 
252 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
253 {
254 	return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
255 }
256 
257 static inline pte_t pmd_pte(pmd_t pmd)
258 {
259 	return __pte(pmd_val(pmd));
260 }
261 
262 static inline pte_t pud_pte(pud_t pud)
263 {
264 	return __pte(pud_val(pud));
265 }
266 
267 /* Yields the page frame number (PFN) of a page table entry */
268 static inline unsigned long pte_pfn(pte_t pte)
269 {
270 	return __page_val_to_pfn(pte_val(pte));
271 }
272 
273 #define pte_page(x)     pfn_to_page(pte_pfn(x))
274 
275 /* Constructs a page table entry */
276 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
277 {
278 	unsigned long prot_val = pgprot_val(prot);
279 
280 	ALT_THEAD_PMA(prot_val);
281 
282 	return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
283 }
284 
285 #define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
286 
287 static inline int pte_present(pte_t pte)
288 {
289 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
290 }
291 
292 static inline int pte_none(pte_t pte)
293 {
294 	return (pte_val(pte) == 0);
295 }
296 
297 static inline int pte_write(pte_t pte)
298 {
299 	return pte_val(pte) & _PAGE_WRITE;
300 }
301 
302 static inline int pte_exec(pte_t pte)
303 {
304 	return pte_val(pte) & _PAGE_EXEC;
305 }
306 
307 static inline int pte_user(pte_t pte)
308 {
309 	return pte_val(pte) & _PAGE_USER;
310 }
311 
312 static inline int pte_huge(pte_t pte)
313 {
314 	return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
315 }
316 
317 static inline int pte_dirty(pte_t pte)
318 {
319 	return pte_val(pte) & _PAGE_DIRTY;
320 }
321 
322 static inline int pte_young(pte_t pte)
323 {
324 	return pte_val(pte) & _PAGE_ACCESSED;
325 }
326 
327 static inline int pte_special(pte_t pte)
328 {
329 	return pte_val(pte) & _PAGE_SPECIAL;
330 }
331 
332 /* static inline pte_t pte_rdprotect(pte_t pte) */
333 
334 static inline pte_t pte_wrprotect(pte_t pte)
335 {
336 	return __pte(pte_val(pte) & ~(_PAGE_WRITE));
337 }
338 
339 /* static inline pte_t pte_mkread(pte_t pte) */
340 
341 static inline pte_t pte_mkwrite(pte_t pte)
342 {
343 	return __pte(pte_val(pte) | _PAGE_WRITE);
344 }
345 
346 /* static inline pte_t pte_mkexec(pte_t pte) */
347 
348 static inline pte_t pte_mkdirty(pte_t pte)
349 {
350 	return __pte(pte_val(pte) | _PAGE_DIRTY);
351 }
352 
353 static inline pte_t pte_mkclean(pte_t pte)
354 {
355 	return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
356 }
357 
358 static inline pte_t pte_mkyoung(pte_t pte)
359 {
360 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
361 }
362 
363 static inline pte_t pte_mkold(pte_t pte)
364 {
365 	return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
366 }
367 
368 static inline pte_t pte_mkspecial(pte_t pte)
369 {
370 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
371 }
372 
373 static inline pte_t pte_mkhuge(pte_t pte)
374 {
375 	return pte;
376 }
377 
378 #ifdef CONFIG_NUMA_BALANCING
379 /*
380  * See the comment in include/asm-generic/pgtable.h
381  */
382 static inline int pte_protnone(pte_t pte)
383 {
384 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
385 }
386 
387 static inline int pmd_protnone(pmd_t pmd)
388 {
389 	return pte_protnone(pmd_pte(pmd));
390 }
391 #endif
392 
393 /* Modify page protection bits */
394 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
395 {
396 	unsigned long newprot_val = pgprot_val(newprot);
397 
398 	ALT_THEAD_PMA(newprot_val);
399 
400 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
401 }
402 
403 #define pgd_ERROR(e) \
404 	pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
405 
406 
407 /* Commit new configuration to MMU hardware */
408 static inline void update_mmu_cache(struct vm_area_struct *vma,
409 	unsigned long address, pte_t *ptep)
410 {
411 	/*
412 	 * The kernel assumes that TLBs don't cache invalid entries, but
413 	 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
414 	 * cache flush; it is necessary even after writing invalid entries.
415 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
416 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
417 	 */
418 	flush_tlb_page(vma, address);
419 }
420 
421 #define __HAVE_ARCH_UPDATE_MMU_TLB
422 #define update_mmu_tlb update_mmu_cache
423 
424 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
425 		unsigned long address, pmd_t *pmdp)
426 {
427 	pte_t *ptep = (pte_t *)pmdp;
428 
429 	update_mmu_cache(vma, address, ptep);
430 }
431 
432 #define __HAVE_ARCH_PTE_SAME
433 static inline int pte_same(pte_t pte_a, pte_t pte_b)
434 {
435 	return pte_val(pte_a) == pte_val(pte_b);
436 }
437 
438 /*
439  * Certain architectures need to do special things when PTEs within
440  * a page table are directly modified.  Thus, the following hook is
441  * made available.
442  */
443 static inline void set_pte(pte_t *ptep, pte_t pteval)
444 {
445 	*ptep = pteval;
446 }
447 
448 void flush_icache_pte(pte_t pte);
449 
450 static inline void __set_pte_at(struct mm_struct *mm,
451 	unsigned long addr, pte_t *ptep, pte_t pteval)
452 {
453 	if (pte_present(pteval) && pte_exec(pteval))
454 		flush_icache_pte(pteval);
455 
456 	set_pte(ptep, pteval);
457 }
458 
459 static inline void set_pte_at(struct mm_struct *mm,
460 	unsigned long addr, pte_t *ptep, pte_t pteval)
461 {
462 	page_table_check_pte_set(mm, addr, ptep, pteval);
463 	__set_pte_at(mm, addr, ptep, pteval);
464 }
465 
466 static inline void pte_clear(struct mm_struct *mm,
467 	unsigned long addr, pte_t *ptep)
468 {
469 	__set_pte_at(mm, addr, ptep, __pte(0));
470 }
471 
472 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
473 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
474 					unsigned long address, pte_t *ptep,
475 					pte_t entry, int dirty)
476 {
477 	if (!pte_same(*ptep, entry))
478 		set_pte_at(vma->vm_mm, address, ptep, entry);
479 	/*
480 	 * update_mmu_cache will unconditionally execute, handling both
481 	 * the case that the PTE changed and the spurious fault case.
482 	 */
483 	return true;
484 }
485 
486 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
487 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
488 				       unsigned long address, pte_t *ptep)
489 {
490 	pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
491 
492 	page_table_check_pte_clear(mm, address, pte);
493 
494 	return pte;
495 }
496 
497 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
498 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
499 					    unsigned long address,
500 					    pte_t *ptep)
501 {
502 	if (!pte_young(*ptep))
503 		return 0;
504 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
505 }
506 
507 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
508 static inline void ptep_set_wrprotect(struct mm_struct *mm,
509 				      unsigned long address, pte_t *ptep)
510 {
511 	atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
512 }
513 
514 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
515 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
516 					 unsigned long address, pte_t *ptep)
517 {
518 	/*
519 	 * This comment is borrowed from x86, but applies equally to RISC-V:
520 	 *
521 	 * Clearing the accessed bit without a TLB flush
522 	 * doesn't cause data corruption. [ It could cause incorrect
523 	 * page aging and the (mistaken) reclaim of hot pages, but the
524 	 * chance of that should be relatively low. ]
525 	 *
526 	 * So as a performance optimization don't flush the TLB when
527 	 * clearing the accessed bit, it will eventually be flushed by
528 	 * a context switch or a VM operation anyway. [ In the rare
529 	 * event of it not getting flushed for a long time the delay
530 	 * shouldn't really matter because there's no real memory
531 	 * pressure for swapout to react to. ]
532 	 */
533 	return ptep_test_and_clear_young(vma, address, ptep);
534 }
535 
536 #define pgprot_noncached pgprot_noncached
537 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
538 {
539 	unsigned long prot = pgprot_val(_prot);
540 
541 	prot &= ~_PAGE_MTMASK;
542 	prot |= _PAGE_IO;
543 
544 	return __pgprot(prot);
545 }
546 
547 #define pgprot_writecombine pgprot_writecombine
548 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
549 {
550 	unsigned long prot = pgprot_val(_prot);
551 
552 	prot &= ~_PAGE_MTMASK;
553 	prot |= _PAGE_NOCACHE;
554 
555 	return __pgprot(prot);
556 }
557 
558 /*
559  * THP functions
560  */
561 static inline pmd_t pte_pmd(pte_t pte)
562 {
563 	return __pmd(pte_val(pte));
564 }
565 
566 static inline pmd_t pmd_mkhuge(pmd_t pmd)
567 {
568 	return pmd;
569 }
570 
571 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
572 {
573 	return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
574 }
575 
576 #define __pmd_to_phys(pmd)  (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
577 
578 static inline unsigned long pmd_pfn(pmd_t pmd)
579 {
580 	return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
581 }
582 
583 #define __pud_to_phys(pud)  (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
584 
585 static inline unsigned long pud_pfn(pud_t pud)
586 {
587 	return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
588 }
589 
590 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
591 {
592 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
593 }
594 
595 #define pmd_write pmd_write
596 static inline int pmd_write(pmd_t pmd)
597 {
598 	return pte_write(pmd_pte(pmd));
599 }
600 
601 static inline int pmd_dirty(pmd_t pmd)
602 {
603 	return pte_dirty(pmd_pte(pmd));
604 }
605 
606 #define pmd_young pmd_young
607 static inline int pmd_young(pmd_t pmd)
608 {
609 	return pte_young(pmd_pte(pmd));
610 }
611 
612 static inline int pmd_user(pmd_t pmd)
613 {
614 	return pte_user(pmd_pte(pmd));
615 }
616 
617 static inline pmd_t pmd_mkold(pmd_t pmd)
618 {
619 	return pte_pmd(pte_mkold(pmd_pte(pmd)));
620 }
621 
622 static inline pmd_t pmd_mkyoung(pmd_t pmd)
623 {
624 	return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
625 }
626 
627 static inline pmd_t pmd_mkwrite(pmd_t pmd)
628 {
629 	return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
630 }
631 
632 static inline pmd_t pmd_wrprotect(pmd_t pmd)
633 {
634 	return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
635 }
636 
637 static inline pmd_t pmd_mkclean(pmd_t pmd)
638 {
639 	return pte_pmd(pte_mkclean(pmd_pte(pmd)));
640 }
641 
642 static inline pmd_t pmd_mkdirty(pmd_t pmd)
643 {
644 	return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
645 }
646 
647 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
648 				pmd_t *pmdp, pmd_t pmd)
649 {
650 	page_table_check_pmd_set(mm, addr, pmdp, pmd);
651 	return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
652 }
653 
654 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
655 				pud_t *pudp, pud_t pud)
656 {
657 	page_table_check_pud_set(mm, addr, pudp, pud);
658 	return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
659 }
660 
661 #ifdef CONFIG_PAGE_TABLE_CHECK
662 static inline bool pte_user_accessible_page(pte_t pte)
663 {
664 	return pte_present(pte) && pte_user(pte);
665 }
666 
667 static inline bool pmd_user_accessible_page(pmd_t pmd)
668 {
669 	return pmd_leaf(pmd) && pmd_user(pmd);
670 }
671 
672 static inline bool pud_user_accessible_page(pud_t pud)
673 {
674 	return pud_leaf(pud) && pud_user(pud);
675 }
676 #endif
677 
678 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
679 static inline int pmd_trans_huge(pmd_t pmd)
680 {
681 	return pmd_leaf(pmd);
682 }
683 
684 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
685 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
686 					unsigned long address, pmd_t *pmdp,
687 					pmd_t entry, int dirty)
688 {
689 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
690 }
691 
692 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
693 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
694 					unsigned long address, pmd_t *pmdp)
695 {
696 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
697 }
698 
699 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
700 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
701 					unsigned long address, pmd_t *pmdp)
702 {
703 	pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
704 
705 	page_table_check_pmd_clear(mm, address, pmd);
706 
707 	return pmd;
708 }
709 
710 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
711 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
712 					unsigned long address, pmd_t *pmdp)
713 {
714 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
715 }
716 
717 #define pmdp_establish pmdp_establish
718 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
719 				unsigned long address, pmd_t *pmdp, pmd_t pmd)
720 {
721 	page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
722 	return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
723 }
724 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
725 
726 /*
727  * Encode and decode a swap entry
728  *
729  * Format of swap PTE:
730  *	bit            0:	_PAGE_PRESENT (zero)
731  *	bit       1 to 3:       _PAGE_LEAF (zero)
732  *	bit            5:	_PAGE_PROT_NONE (zero)
733  *	bits      6 to 10:	swap type
734  *	bits 10 to XLEN-1:	swap offset
735  */
736 #define __SWP_TYPE_SHIFT	6
737 #define __SWP_TYPE_BITS		5
738 #define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
739 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
740 
741 #define MAX_SWAPFILES_CHECK()	\
742 	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
743 
744 #define __swp_type(x)	(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
745 #define __swp_offset(x)	((x).val >> __SWP_OFFSET_SHIFT)
746 #define __swp_entry(type, offset) ((swp_entry_t) \
747 	{ ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
748 
749 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
750 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
751 
752 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
753 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
754 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
755 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
756 
757 /*
758  * In the RV64 Linux scheme, we give the user half of the virtual-address space
759  * and give the kernel the other (upper) half.
760  */
761 #ifdef CONFIG_64BIT
762 #define KERN_VIRT_START	(-(BIT(VA_BITS)) + TASK_SIZE)
763 #else
764 #define KERN_VIRT_START	FIXADDR_START
765 #endif
766 
767 /*
768  * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
769  * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
770  * Task size is:
771  * -     0x9fc00000 (~2.5GB) for RV32.
772  * -   0x4000000000 ( 256GB) for RV64 using SV39 mmu
773  * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
774  *
775  * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
776  * Instruction Set Manual Volume II: Privileged Architecture" states that
777  * "load and store effective addresses, which are 64bits, must have bits
778  * 63–48 all equal to bit 47, or else a page-fault exception will occur."
779  */
780 #ifdef CONFIG_64BIT
781 #define TASK_SIZE_64	(PGDIR_SIZE * PTRS_PER_PGD / 2)
782 #define TASK_SIZE_MIN	(PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
783 
784 #ifdef CONFIG_COMPAT
785 #define TASK_SIZE_32	(_AC(0x80000000, UL) - PAGE_SIZE)
786 #define TASK_SIZE	(test_thread_flag(TIF_32BIT) ? \
787 			 TASK_SIZE_32 : TASK_SIZE_64)
788 #else
789 #define TASK_SIZE	TASK_SIZE_64
790 #endif
791 
792 #else
793 #define TASK_SIZE	FIXADDR_START
794 #define TASK_SIZE_MIN	TASK_SIZE
795 #endif
796 
797 #else /* CONFIG_MMU */
798 
799 #define PAGE_SHARED		__pgprot(0)
800 #define PAGE_KERNEL		__pgprot(0)
801 #define swapper_pg_dir		NULL
802 #define TASK_SIZE		0xffffffffUL
803 #define VMALLOC_START		0
804 #define VMALLOC_END		TASK_SIZE
805 
806 #endif /* !CONFIG_MMU */
807 
808 extern char _start[];
809 extern void *_dtb_early_va;
810 extern uintptr_t _dtb_early_pa;
811 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
812 #define dtb_early_va	(*(void **)XIP_FIXUP(&_dtb_early_va))
813 #define dtb_early_pa	(*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
814 #else
815 #define dtb_early_va	_dtb_early_va
816 #define dtb_early_pa	_dtb_early_pa
817 #endif /* CONFIG_XIP_KERNEL */
818 extern u64 satp_mode;
819 extern bool pgtable_l4_enabled;
820 
821 void paging_init(void);
822 void misc_mem_init(void);
823 
824 /*
825  * ZERO_PAGE is a global shared page that is always zero,
826  * used for zero-mapped memory areas, etc.
827  */
828 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
829 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
830 
831 #endif /* !__ASSEMBLY__ */
832 
833 #endif /* _ASM_RISCV_PGTABLE_H */
834