xref: /linux/arch/riscv/include/asm/pgtable.h (revision 8c994eff8fcfe8ecb1f1dbebed25b4d7bb75be12)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
8 
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
11 
12 #include <asm/pgtable-bits.h>
13 
14 #ifndef CONFIG_MMU
15 #define KERNEL_LINK_ADDR	PAGE_OFFSET
16 #define KERN_VIRT_SIZE		(UL(-1))
17 #else
18 
19 #define ADDRESS_SPACE_END	(UL(-1))
20 
21 #ifdef CONFIG_64BIT
22 /* Leave 2GB for kernel and BPF at the end of the address space */
23 #define KERNEL_LINK_ADDR	(ADDRESS_SPACE_END - SZ_2G + 1)
24 #else
25 #define KERNEL_LINK_ADDR	PAGE_OFFSET
26 #endif
27 
28 /* Number of entries in the page global directory */
29 #define PTRS_PER_PGD    (PAGE_SIZE / sizeof(pgd_t))
30 /* Number of entries in the page table */
31 #define PTRS_PER_PTE    (PAGE_SIZE / sizeof(pte_t))
32 
33 /*
34  * Half of the kernel address space (1/4 of the entries of the page global
35  * directory) is for the direct mapping.
36  */
37 #define KERN_VIRT_SIZE          ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
38 
39 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
40 #define VMALLOC_END      PAGE_OFFSET
41 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
42 
43 #define BPF_JIT_REGION_SIZE	(SZ_128M)
44 #ifdef CONFIG_64BIT
45 #define BPF_JIT_REGION_START	(BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
46 #define BPF_JIT_REGION_END	(MODULES_END)
47 #else
48 #define BPF_JIT_REGION_START	(PAGE_OFFSET - BPF_JIT_REGION_SIZE)
49 #define BPF_JIT_REGION_END	(VMALLOC_END)
50 #endif
51 
52 /* Modules always live before the kernel */
53 #ifdef CONFIG_64BIT
54 /* This is used to define the end of the KASAN shadow region */
55 #define MODULES_LOWEST_VADDR	(KERNEL_LINK_ADDR - SZ_2G)
56 #define MODULES_VADDR		(PFN_ALIGN((unsigned long)&_end) - SZ_2G)
57 #define MODULES_END		(PFN_ALIGN((unsigned long)&_start))
58 #endif
59 
60 /*
61  * Roughly size the vmemmap space to be large enough to fit enough
62  * struct pages to map half the virtual address space. Then
63  * position vmemmap directly below the VMALLOC region.
64  */
65 #define VA_BITS_SV32 32
66 #ifdef CONFIG_64BIT
67 #define VA_BITS_SV39 39
68 #define VA_BITS_SV48 48
69 #define VA_BITS_SV57 57
70 
71 #define VA_BITS		(pgtable_l5_enabled ? \
72 				VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
73 #else
74 #define VA_BITS		VA_BITS_SV32
75 #endif
76 
77 #define VMEMMAP_SHIFT \
78 	(VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
79 #define VMEMMAP_SIZE	BIT(VMEMMAP_SHIFT)
80 #define VMEMMAP_END	VMALLOC_START
81 #define VMEMMAP_START	(VMALLOC_START - VMEMMAP_SIZE)
82 
83 /*
84  * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
85  * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
86  */
87 #define vmemmap		((struct page *)VMEMMAP_START)
88 
89 #define PCI_IO_SIZE      SZ_16M
90 #define PCI_IO_END       VMEMMAP_START
91 #define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
92 
93 #define FIXADDR_TOP      PCI_IO_START
94 #ifdef CONFIG_64BIT
95 #define MAX_FDT_SIZE	 PMD_SIZE
96 #define FIX_FDT_SIZE	 (MAX_FDT_SIZE + SZ_2M)
97 #define FIXADDR_SIZE     (PMD_SIZE + FIX_FDT_SIZE)
98 #else
99 #define MAX_FDT_SIZE	 PGDIR_SIZE
100 #define FIX_FDT_SIZE	 MAX_FDT_SIZE
101 #define FIXADDR_SIZE     (PGDIR_SIZE + FIX_FDT_SIZE)
102 #endif
103 #define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
104 
105 #endif
106 
107 #ifdef CONFIG_XIP_KERNEL
108 #define XIP_OFFSET		SZ_32M
109 #define XIP_OFFSET_MASK		(SZ_32M - 1)
110 #else
111 #define XIP_OFFSET		0
112 #endif
113 
114 #ifndef __ASSEMBLY__
115 
116 #include <asm/page.h>
117 #include <asm/tlbflush.h>
118 #include <linux/mm_types.h>
119 #include <asm/compat.h>
120 
121 #define __page_val_to_pfn(_val)  (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
122 
123 #ifdef CONFIG_64BIT
124 #include <asm/pgtable-64.h>
125 
126 #define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
127 #define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
128 #define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
129 
130 #ifdef CONFIG_COMPAT
131 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
132 #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
133 #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
134 #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
135 #else
136 #define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
137 #define MMAP_MIN_VA_BITS (VA_BITS_SV39)
138 #endif /* CONFIG_COMPAT */
139 
140 #else
141 #include <asm/pgtable-32.h>
142 #endif /* CONFIG_64BIT */
143 
144 #include <linux/page_table_check.h>
145 
146 #ifdef CONFIG_XIP_KERNEL
147 #define XIP_FIXUP(addr) ({							\
148 	uintptr_t __a = (uintptr_t)(addr);					\
149 	(__a >= CONFIG_XIP_PHYS_ADDR && \
150 	 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ?	\
151 		__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
152 		__a;								\
153 	})
154 #else
155 #define XIP_FIXUP(addr)		(addr)
156 #endif /* CONFIG_XIP_KERNEL */
157 
158 struct pt_alloc_ops {
159 	pte_t *(*get_pte_virt)(phys_addr_t pa);
160 	phys_addr_t (*alloc_pte)(uintptr_t va);
161 #ifndef __PAGETABLE_PMD_FOLDED
162 	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
163 	phys_addr_t (*alloc_pmd)(uintptr_t va);
164 	pud_t *(*get_pud_virt)(phys_addr_t pa);
165 	phys_addr_t (*alloc_pud)(uintptr_t va);
166 	p4d_t *(*get_p4d_virt)(phys_addr_t pa);
167 	phys_addr_t (*alloc_p4d)(uintptr_t va);
168 #endif
169 };
170 
171 extern struct pt_alloc_ops pt_ops __initdata;
172 
173 #ifdef CONFIG_MMU
174 /* Number of PGD entries that a user-mode program can use */
175 #define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)
176 
177 /* Page protection bits */
178 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
179 
180 #define PAGE_NONE		__pgprot(_PAGE_PROT_NONE | _PAGE_READ)
181 #define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
182 #define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
183 #define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
184 #define PAGE_READ_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
185 #define PAGE_WRITE_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ |	\
186 					 _PAGE_EXEC | _PAGE_WRITE)
187 
188 #define PAGE_COPY		PAGE_READ
189 #define PAGE_COPY_EXEC		PAGE_READ_EXEC
190 #define PAGE_SHARED		PAGE_WRITE
191 #define PAGE_SHARED_EXEC	PAGE_WRITE_EXEC
192 
193 #define _PAGE_KERNEL		(_PAGE_READ \
194 				| _PAGE_WRITE \
195 				| _PAGE_PRESENT \
196 				| _PAGE_ACCESSED \
197 				| _PAGE_DIRTY \
198 				| _PAGE_GLOBAL)
199 
200 #define PAGE_KERNEL		__pgprot(_PAGE_KERNEL)
201 #define PAGE_KERNEL_READ	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
202 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL | _PAGE_EXEC)
203 #define PAGE_KERNEL_READ_EXEC	__pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
204 					 | _PAGE_EXEC)
205 
206 #define PAGE_TABLE		__pgprot(_PAGE_TABLE)
207 
208 #define _PAGE_IOREMAP	((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
209 #define PAGE_KERNEL_IO		__pgprot(_PAGE_IOREMAP)
210 
211 extern pgd_t swapper_pg_dir[];
212 extern pgd_t trampoline_pg_dir[];
213 extern pgd_t early_pg_dir[];
214 
215 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
216 static inline int pmd_present(pmd_t pmd)
217 {
218 	/*
219 	 * Checking for _PAGE_LEAF is needed too because:
220 	 * When splitting a THP, split_huge_page() will temporarily clear
221 	 * the present bit, in this situation, pmd_present() and
222 	 * pmd_trans_huge() still needs to return true.
223 	 */
224 	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
225 }
226 #else
227 static inline int pmd_present(pmd_t pmd)
228 {
229 	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
230 }
231 #endif
232 
233 static inline int pmd_none(pmd_t pmd)
234 {
235 	return (pmd_val(pmd) == 0);
236 }
237 
238 static inline int pmd_bad(pmd_t pmd)
239 {
240 	return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
241 }
242 
243 #define pmd_leaf	pmd_leaf
244 static inline int pmd_leaf(pmd_t pmd)
245 {
246 	return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
247 }
248 
249 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
250 {
251 	*pmdp = pmd;
252 }
253 
254 static inline void pmd_clear(pmd_t *pmdp)
255 {
256 	set_pmd(pmdp, __pmd(0));
257 }
258 
259 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
260 {
261 	unsigned long prot_val = pgprot_val(prot);
262 
263 	ALT_THEAD_PMA(prot_val);
264 
265 	return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
266 }
267 
268 static inline unsigned long _pgd_pfn(pgd_t pgd)
269 {
270 	return __page_val_to_pfn(pgd_val(pgd));
271 }
272 
273 static inline struct page *pmd_page(pmd_t pmd)
274 {
275 	return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
276 }
277 
278 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
279 {
280 	return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
281 }
282 
283 static inline pte_t pmd_pte(pmd_t pmd)
284 {
285 	return __pte(pmd_val(pmd));
286 }
287 
288 static inline pte_t pud_pte(pud_t pud)
289 {
290 	return __pte(pud_val(pud));
291 }
292 
293 #ifdef CONFIG_RISCV_ISA_SVNAPOT
294 
295 static __always_inline bool has_svnapot(void)
296 {
297 	return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
298 }
299 
300 static inline unsigned long pte_napot(pte_t pte)
301 {
302 	return pte_val(pte) & _PAGE_NAPOT;
303 }
304 
305 static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
306 {
307 	int pos = order - 1 + _PAGE_PFN_SHIFT;
308 	unsigned long napot_bit = BIT(pos);
309 	unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
310 
311 	return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
312 }
313 
314 #else
315 
316 static __always_inline bool has_svnapot(void) { return false; }
317 
318 static inline unsigned long pte_napot(pte_t pte)
319 {
320 	return 0;
321 }
322 
323 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
324 
325 /* Yields the page frame number (PFN) of a page table entry */
326 static inline unsigned long pte_pfn(pte_t pte)
327 {
328 	unsigned long res  = __page_val_to_pfn(pte_val(pte));
329 
330 	if (has_svnapot() && pte_napot(pte))
331 		res = res & (res - 1UL);
332 
333 	return res;
334 }
335 
336 #define pte_page(x)     pfn_to_page(pte_pfn(x))
337 
338 /* Constructs a page table entry */
339 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
340 {
341 	unsigned long prot_val = pgprot_val(prot);
342 
343 	ALT_THEAD_PMA(prot_val);
344 
345 	return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
346 }
347 
348 #define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
349 
350 static inline int pte_present(pte_t pte)
351 {
352 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
353 }
354 
355 static inline int pte_none(pte_t pte)
356 {
357 	return (pte_val(pte) == 0);
358 }
359 
360 static inline int pte_write(pte_t pte)
361 {
362 	return pte_val(pte) & _PAGE_WRITE;
363 }
364 
365 static inline int pte_exec(pte_t pte)
366 {
367 	return pte_val(pte) & _PAGE_EXEC;
368 }
369 
370 static inline int pte_user(pte_t pte)
371 {
372 	return pte_val(pte) & _PAGE_USER;
373 }
374 
375 static inline int pte_huge(pte_t pte)
376 {
377 	return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
378 }
379 
380 static inline int pte_dirty(pte_t pte)
381 {
382 	return pte_val(pte) & _PAGE_DIRTY;
383 }
384 
385 static inline int pte_young(pte_t pte)
386 {
387 	return pte_val(pte) & _PAGE_ACCESSED;
388 }
389 
390 static inline int pte_special(pte_t pte)
391 {
392 	return pte_val(pte) & _PAGE_SPECIAL;
393 }
394 
395 /* static inline pte_t pte_rdprotect(pte_t pte) */
396 
397 static inline pte_t pte_wrprotect(pte_t pte)
398 {
399 	return __pte(pte_val(pte) & ~(_PAGE_WRITE));
400 }
401 
402 /* static inline pte_t pte_mkread(pte_t pte) */
403 
404 static inline pte_t pte_mkwrite_novma(pte_t pte)
405 {
406 	return __pte(pte_val(pte) | _PAGE_WRITE);
407 }
408 
409 /* static inline pte_t pte_mkexec(pte_t pte) */
410 
411 static inline pte_t pte_mkdirty(pte_t pte)
412 {
413 	return __pte(pte_val(pte) | _PAGE_DIRTY);
414 }
415 
416 static inline pte_t pte_mkclean(pte_t pte)
417 {
418 	return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
419 }
420 
421 static inline pte_t pte_mkyoung(pte_t pte)
422 {
423 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
424 }
425 
426 static inline pte_t pte_mkold(pte_t pte)
427 {
428 	return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
429 }
430 
431 static inline pte_t pte_mkspecial(pte_t pte)
432 {
433 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
434 }
435 
436 static inline pte_t pte_mkhuge(pte_t pte)
437 {
438 	return pte;
439 }
440 
441 #ifdef CONFIG_NUMA_BALANCING
442 /*
443  * See the comment in include/asm-generic/pgtable.h
444  */
445 static inline int pte_protnone(pte_t pte)
446 {
447 	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
448 }
449 
450 static inline int pmd_protnone(pmd_t pmd)
451 {
452 	return pte_protnone(pmd_pte(pmd));
453 }
454 #endif
455 
456 /* Modify page protection bits */
457 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
458 {
459 	unsigned long newprot_val = pgprot_val(newprot);
460 
461 	ALT_THEAD_PMA(newprot_val);
462 
463 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
464 }
465 
466 #define pgd_ERROR(e) \
467 	pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
468 
469 
470 /* Commit new configuration to MMU hardware */
471 static inline void update_mmu_cache_range(struct vm_fault *vmf,
472 		struct vm_area_struct *vma, unsigned long address,
473 		pte_t *ptep, unsigned int nr)
474 {
475 	/*
476 	 * The kernel assumes that TLBs don't cache invalid entries, but
477 	 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
478 	 * cache flush; it is necessary even after writing invalid entries.
479 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
480 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
481 	 */
482 	while (nr--)
483 		local_flush_tlb_page(address + nr * PAGE_SIZE);
484 }
485 #define update_mmu_cache(vma, addr, ptep) \
486 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
487 
488 #define __HAVE_ARCH_UPDATE_MMU_TLB
489 #define update_mmu_tlb update_mmu_cache
490 
491 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
492 		unsigned long address, pmd_t *pmdp)
493 {
494 	pte_t *ptep = (pte_t *)pmdp;
495 
496 	update_mmu_cache(vma, address, ptep);
497 }
498 
499 #define __HAVE_ARCH_PTE_SAME
500 static inline int pte_same(pte_t pte_a, pte_t pte_b)
501 {
502 	return pte_val(pte_a) == pte_val(pte_b);
503 }
504 
505 /*
506  * Certain architectures need to do special things when PTEs within
507  * a page table are directly modified.  Thus, the following hook is
508  * made available.
509  */
510 static inline void set_pte(pte_t *ptep, pte_t pteval)
511 {
512 	*ptep = pteval;
513 }
514 
515 void flush_icache_pte(pte_t pte);
516 
517 static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
518 {
519 	if (pte_present(pteval) && pte_exec(pteval))
520 		flush_icache_pte(pteval);
521 
522 	set_pte(ptep, pteval);
523 }
524 
525 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
526 		pte_t *ptep, pte_t pteval, unsigned int nr)
527 {
528 	page_table_check_ptes_set(mm, ptep, pteval, nr);
529 
530 	for (;;) {
531 		__set_pte_at(ptep, pteval);
532 		if (--nr == 0)
533 			break;
534 		ptep++;
535 		pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
536 	}
537 }
538 #define set_ptes set_ptes
539 
540 static inline void pte_clear(struct mm_struct *mm,
541 	unsigned long addr, pte_t *ptep)
542 {
543 	__set_pte_at(ptep, __pte(0));
544 }
545 
546 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
547 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
548 					unsigned long address, pte_t *ptep,
549 					pte_t entry, int dirty)
550 {
551 	if (!pte_same(*ptep, entry))
552 		__set_pte_at(ptep, entry);
553 	/*
554 	 * update_mmu_cache will unconditionally execute, handling both
555 	 * the case that the PTE changed and the spurious fault case.
556 	 */
557 	return true;
558 }
559 
560 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
561 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
562 				       unsigned long address, pte_t *ptep)
563 {
564 	pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
565 
566 	page_table_check_pte_clear(mm, pte);
567 
568 	return pte;
569 }
570 
571 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
572 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
573 					    unsigned long address,
574 					    pte_t *ptep)
575 {
576 	if (!pte_young(*ptep))
577 		return 0;
578 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
579 }
580 
581 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
582 static inline void ptep_set_wrprotect(struct mm_struct *mm,
583 				      unsigned long address, pte_t *ptep)
584 {
585 	atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
586 }
587 
588 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
589 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
590 					 unsigned long address, pte_t *ptep)
591 {
592 	/*
593 	 * This comment is borrowed from x86, but applies equally to RISC-V:
594 	 *
595 	 * Clearing the accessed bit without a TLB flush
596 	 * doesn't cause data corruption. [ It could cause incorrect
597 	 * page aging and the (mistaken) reclaim of hot pages, but the
598 	 * chance of that should be relatively low. ]
599 	 *
600 	 * So as a performance optimization don't flush the TLB when
601 	 * clearing the accessed bit, it will eventually be flushed by
602 	 * a context switch or a VM operation anyway. [ In the rare
603 	 * event of it not getting flushed for a long time the delay
604 	 * shouldn't really matter because there's no real memory
605 	 * pressure for swapout to react to. ]
606 	 */
607 	return ptep_test_and_clear_young(vma, address, ptep);
608 }
609 
610 #define pgprot_noncached pgprot_noncached
611 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
612 {
613 	unsigned long prot = pgprot_val(_prot);
614 
615 	prot &= ~_PAGE_MTMASK;
616 	prot |= _PAGE_IO;
617 
618 	return __pgprot(prot);
619 }
620 
621 #define pgprot_writecombine pgprot_writecombine
622 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
623 {
624 	unsigned long prot = pgprot_val(_prot);
625 
626 	prot &= ~_PAGE_MTMASK;
627 	prot |= _PAGE_NOCACHE;
628 
629 	return __pgprot(prot);
630 }
631 
632 /*
633  * THP functions
634  */
635 static inline pmd_t pte_pmd(pte_t pte)
636 {
637 	return __pmd(pte_val(pte));
638 }
639 
640 static inline pmd_t pmd_mkhuge(pmd_t pmd)
641 {
642 	return pmd;
643 }
644 
645 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
646 {
647 	return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
648 }
649 
650 #define __pmd_to_phys(pmd)  (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
651 
652 static inline unsigned long pmd_pfn(pmd_t pmd)
653 {
654 	return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
655 }
656 
657 #define __pud_to_phys(pud)  (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
658 
659 static inline unsigned long pud_pfn(pud_t pud)
660 {
661 	return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
662 }
663 
664 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
665 {
666 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
667 }
668 
669 #define pmd_write pmd_write
670 static inline int pmd_write(pmd_t pmd)
671 {
672 	return pte_write(pmd_pte(pmd));
673 }
674 
675 static inline int pmd_dirty(pmd_t pmd)
676 {
677 	return pte_dirty(pmd_pte(pmd));
678 }
679 
680 #define pmd_young pmd_young
681 static inline int pmd_young(pmd_t pmd)
682 {
683 	return pte_young(pmd_pte(pmd));
684 }
685 
686 static inline int pmd_user(pmd_t pmd)
687 {
688 	return pte_user(pmd_pte(pmd));
689 }
690 
691 static inline pmd_t pmd_mkold(pmd_t pmd)
692 {
693 	return pte_pmd(pte_mkold(pmd_pte(pmd)));
694 }
695 
696 static inline pmd_t pmd_mkyoung(pmd_t pmd)
697 {
698 	return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
699 }
700 
701 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
702 {
703 	return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
704 }
705 
706 static inline pmd_t pmd_wrprotect(pmd_t pmd)
707 {
708 	return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
709 }
710 
711 static inline pmd_t pmd_mkclean(pmd_t pmd)
712 {
713 	return pte_pmd(pte_mkclean(pmd_pte(pmd)));
714 }
715 
716 static inline pmd_t pmd_mkdirty(pmd_t pmd)
717 {
718 	return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
719 }
720 
721 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
722 				pmd_t *pmdp, pmd_t pmd)
723 {
724 	page_table_check_pmd_set(mm, pmdp, pmd);
725 	return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
726 }
727 
728 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
729 				pud_t *pudp, pud_t pud)
730 {
731 	page_table_check_pud_set(mm, pudp, pud);
732 	return __set_pte_at((pte_t *)pudp, pud_pte(pud));
733 }
734 
735 #ifdef CONFIG_PAGE_TABLE_CHECK
736 static inline bool pte_user_accessible_page(pte_t pte)
737 {
738 	return pte_present(pte) && pte_user(pte);
739 }
740 
741 static inline bool pmd_user_accessible_page(pmd_t pmd)
742 {
743 	return pmd_leaf(pmd) && pmd_user(pmd);
744 }
745 
746 static inline bool pud_user_accessible_page(pud_t pud)
747 {
748 	return pud_leaf(pud) && pud_user(pud);
749 }
750 #endif
751 
752 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
753 static inline int pmd_trans_huge(pmd_t pmd)
754 {
755 	return pmd_leaf(pmd);
756 }
757 
758 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
759 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
760 					unsigned long address, pmd_t *pmdp,
761 					pmd_t entry, int dirty)
762 {
763 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
764 }
765 
766 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
767 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
768 					unsigned long address, pmd_t *pmdp)
769 {
770 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
771 }
772 
773 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
774 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
775 					unsigned long address, pmd_t *pmdp)
776 {
777 	pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
778 
779 	page_table_check_pmd_clear(mm, pmd);
780 
781 	return pmd;
782 }
783 
784 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
785 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
786 					unsigned long address, pmd_t *pmdp)
787 {
788 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
789 }
790 
791 #define pmdp_establish pmdp_establish
792 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
793 				unsigned long address, pmd_t *pmdp, pmd_t pmd)
794 {
795 	page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
796 	return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
797 }
798 
799 #define pmdp_collapse_flush pmdp_collapse_flush
800 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
801 				 unsigned long address, pmd_t *pmdp);
802 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
803 
804 /*
805  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
806  * are !pte_none() && !pte_present().
807  *
808  * Format of swap PTE:
809  *	bit            0:	_PAGE_PRESENT (zero)
810  *	bit       1 to 3:       _PAGE_LEAF (zero)
811  *	bit            5:	_PAGE_PROT_NONE (zero)
812  *	bit            6:	exclusive marker
813  *	bits      7 to 11:	swap type
814  *	bits 11 to XLEN-1:	swap offset
815  */
816 #define __SWP_TYPE_SHIFT	7
817 #define __SWP_TYPE_BITS		5
818 #define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
819 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
820 
821 #define MAX_SWAPFILES_CHECK()	\
822 	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
823 
824 #define __swp_type(x)	(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
825 #define __swp_offset(x)	((x).val >> __SWP_OFFSET_SHIFT)
826 #define __swp_entry(type, offset) ((swp_entry_t) \
827 	{ (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
828 	  ((offset) << __SWP_OFFSET_SHIFT) })
829 
830 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
831 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
832 
833 static inline int pte_swp_exclusive(pte_t pte)
834 {
835 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
836 }
837 
838 static inline pte_t pte_swp_mkexclusive(pte_t pte)
839 {
840 	return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
841 }
842 
843 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
844 {
845 	return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
846 }
847 
848 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
849 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
850 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
851 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
852 
853 /*
854  * In the RV64 Linux scheme, we give the user half of the virtual-address space
855  * and give the kernel the other (upper) half.
856  */
857 #ifdef CONFIG_64BIT
858 #define KERN_VIRT_START	(-(BIT(VA_BITS)) + TASK_SIZE)
859 #else
860 #define KERN_VIRT_START	FIXADDR_START
861 #endif
862 
863 /*
864  * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
865  * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
866  * Task size is:
867  * -        0x9fc00000	(~2.5GB) for RV32.
868  * -      0x4000000000	( 256GB) for RV64 using SV39 mmu
869  * -    0x800000000000	( 128TB) for RV64 using SV48 mmu
870  * - 0x100000000000000	(  64PB) for RV64 using SV57 mmu
871  *
872  * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
873  * Instruction Set Manual Volume II: Privileged Architecture" states that
874  * "load and store effective addresses, which are 64bits, must have bits
875  * 63–48 all equal to bit 47, or else a page-fault exception will occur."
876  * Similarly for SV57, bits 63–57 must be equal to bit 56.
877  */
878 #ifdef CONFIG_64BIT
879 #define TASK_SIZE_64	(PGDIR_SIZE * PTRS_PER_PGD / 2)
880 #define TASK_SIZE_MIN	(PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
881 
882 #ifdef CONFIG_COMPAT
883 #define TASK_SIZE_32	(_AC(0x80000000, UL) - PAGE_SIZE)
884 #define TASK_SIZE	(test_thread_flag(TIF_32BIT) ? \
885 			 TASK_SIZE_32 : TASK_SIZE_64)
886 #else
887 #define TASK_SIZE	TASK_SIZE_64
888 #endif
889 
890 #else
891 #define TASK_SIZE	FIXADDR_START
892 #define TASK_SIZE_MIN	TASK_SIZE
893 #endif
894 
895 #else /* CONFIG_MMU */
896 
897 #define PAGE_SHARED		__pgprot(0)
898 #define PAGE_KERNEL		__pgprot(0)
899 #define swapper_pg_dir		NULL
900 #define TASK_SIZE		0xffffffffUL
901 #define VMALLOC_START		0
902 #define VMALLOC_END		TASK_SIZE
903 
904 #endif /* !CONFIG_MMU */
905 
906 extern char _start[];
907 extern void *_dtb_early_va;
908 extern uintptr_t _dtb_early_pa;
909 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
910 #define dtb_early_va	(*(void **)XIP_FIXUP(&_dtb_early_va))
911 #define dtb_early_pa	(*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
912 #else
913 #define dtb_early_va	_dtb_early_va
914 #define dtb_early_pa	_dtb_early_pa
915 #endif /* CONFIG_XIP_KERNEL */
916 extern u64 satp_mode;
917 extern bool pgtable_l4_enabled;
918 
919 void paging_init(void);
920 void misc_mem_init(void);
921 
922 /*
923  * ZERO_PAGE is a global shared page that is always zero,
924  * used for zero-mapped memory areas, etc.
925  */
926 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
927 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
928 
929 #endif /* !__ASSEMBLY__ */
930 
931 #endif /* _ASM_RISCV_PGTABLE_H */
932