1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
8
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
11
12 #include <asm/pgtable-bits.h>
13
14 #ifndef CONFIG_MMU
15 #ifdef CONFIG_RELOCATABLE
16 #define KERNEL_LINK_ADDR UL(0)
17 #else
18 #define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL)
19 #endif
20 #define KERN_VIRT_SIZE (UL(-1))
21 #else
22
23 #define ADDRESS_SPACE_END (UL(-1))
24
25 #ifdef CONFIG_64BIT
26 /* Leave 2GB for kernel and BPF at the end of the address space */
27 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
28 #else
29 #define KERNEL_LINK_ADDR PAGE_OFFSET
30 #endif
31
32 /* Number of entries in the page global directory */
33 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
34 /* Number of entries in the page table */
35 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
36
37 /*
38 * Half of the kernel address space (1/4 of the entries of the page global
39 * directory) is for the direct mapping.
40 */
41 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
42
43 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
44 #define VMALLOC_END PAGE_OFFSET
45 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
46
47 #define BPF_JIT_REGION_SIZE (SZ_128M)
48 #ifdef CONFIG_64BIT
49 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
50 #define BPF_JIT_REGION_END (MODULES_END)
51 #else
52 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
53 #define BPF_JIT_REGION_END (VMALLOC_END)
54 #endif
55
56 /* Modules always live before the kernel */
57 #ifdef CONFIG_64BIT
58 /* This is used to define the end of the KASAN shadow region */
59 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
60 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
61 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
62 #else
63 #define MODULES_VADDR VMALLOC_START
64 #define MODULES_END VMALLOC_END
65 #endif
66
67 /*
68 * Roughly size the vmemmap space to be large enough to fit enough
69 * struct pages to map half the virtual address space. Then
70 * position vmemmap directly below the VMALLOC region.
71 */
72 #define VA_BITS_SV32 32
73 #ifdef CONFIG_64BIT
74 #define VA_BITS_SV39 39
75 #define VA_BITS_SV48 48
76 #define VA_BITS_SV57 57
77
78 #define VA_BITS (pgtable_l5_enabled ? \
79 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
80 #else
81 #define VA_BITS VA_BITS_SV32
82 #endif
83
84 #define VMEMMAP_SHIFT \
85 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
86 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
87 #define VMEMMAP_END VMALLOC_START
88 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
89
90 /*
91 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
92 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
93 */
94 #define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn)
95
96 #define PCI_IO_SIZE SZ_16M
97 #define PCI_IO_END VMEMMAP_START
98 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
99
100 #define FIXADDR_TOP PCI_IO_START
101 #ifdef CONFIG_64BIT
102 #define MAX_FDT_SIZE PMD_SIZE
103 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
104 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
105 #else
106 #define MAX_FDT_SIZE PGDIR_SIZE
107 #define FIX_FDT_SIZE MAX_FDT_SIZE
108 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
109 #endif
110 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
111
112 #endif
113
114 #ifndef __ASSEMBLER__
115
116 #include <asm/page.h>
117 #include <asm/tlbflush.h>
118 #include <linux/mm_types.h>
119 #include <asm/compat.h>
120 #include <asm/cpufeature.h>
121
122 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
123
124 #ifdef CONFIG_64BIT
125 #include <asm/pgtable-64.h>
126
127 #define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
128 #define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
129 #define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
130
131 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
132 #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
133 #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
134 #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
135 #else
136 #include <asm/pgtable-32.h>
137 #endif /* CONFIG_64BIT */
138
139 #include <linux/page_table_check.h>
140
141 #ifdef CONFIG_XIP_KERNEL
142 #define XIP_FIXUP(addr) ({ \
143 extern char _sdata[], _start[], _end[]; \
144 uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
145 + (uintptr_t)&_sdata - (uintptr_t)&_start; \
146 uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
147 + (uintptr_t)&_end - (uintptr_t)&_start; \
148 uintptr_t __a = (uintptr_t)(addr); \
149 (__a >= __rom_start_data && __a < __rom_end_data) ? \
150 __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
151 })
152 #else
153 #define XIP_FIXUP(addr) (addr)
154 #endif /* CONFIG_XIP_KERNEL */
155
156 struct pt_alloc_ops {
157 pte_t *(*get_pte_virt)(phys_addr_t pa);
158 phys_addr_t (*alloc_pte)(uintptr_t va);
159 #ifndef __PAGETABLE_PMD_FOLDED
160 pmd_t *(*get_pmd_virt)(phys_addr_t pa);
161 phys_addr_t (*alloc_pmd)(uintptr_t va);
162 pud_t *(*get_pud_virt)(phys_addr_t pa);
163 phys_addr_t (*alloc_pud)(uintptr_t va);
164 p4d_t *(*get_p4d_virt)(phys_addr_t pa);
165 phys_addr_t (*alloc_p4d)(uintptr_t va);
166 #endif
167 };
168
169 extern struct pt_alloc_ops pt_ops __meminitdata;
170
171 #ifdef CONFIG_MMU
172 /* Number of PGD entries that a user-mode program can use */
173 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
174
175 /* Page protection bits */
176 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
177
178 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
179 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
180 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
181 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
182 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
183 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
184 _PAGE_EXEC | _PAGE_WRITE)
185
186 #define PAGE_COPY PAGE_READ
187 #define PAGE_COPY_EXEC PAGE_READ_EXEC
188 #define PAGE_SHARED PAGE_WRITE
189 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
190
191 #define _PAGE_KERNEL (_PAGE_READ \
192 | _PAGE_WRITE \
193 | _PAGE_PRESENT \
194 | _PAGE_ACCESSED \
195 | _PAGE_DIRTY \
196 | _PAGE_GLOBAL)
197
198 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
199 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
200 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
201 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
202 | _PAGE_EXEC)
203
204 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
205
206 #define _PAGE_KERNEL_NC ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_NOCACHE)
207 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
208 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
209
210 extern pgd_t swapper_pg_dir[];
211 extern pgd_t trampoline_pg_dir[];
212 extern pgd_t early_pg_dir[];
213
214 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_present(pmd_t pmd)215 static inline int pmd_present(pmd_t pmd)
216 {
217 /*
218 * Checking for _PAGE_LEAF is needed too because:
219 * When splitting a THP, split_huge_page() will temporarily clear
220 * the present bit, in this situation, pmd_present() and
221 * pmd_trans_huge() still needs to return true.
222 */
223 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
224 }
225 #else
pmd_present(pmd_t pmd)226 static inline int pmd_present(pmd_t pmd)
227 {
228 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
229 }
230 #endif
231
pmd_none(pmd_t pmd)232 static inline int pmd_none(pmd_t pmd)
233 {
234 return (pmd_val(pmd) == 0);
235 }
236
pmd_bad(pmd_t pmd)237 static inline int pmd_bad(pmd_t pmd)
238 {
239 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
240 }
241
242 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)243 static inline bool pmd_leaf(pmd_t pmd)
244 {
245 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
246 }
247
set_pmd(pmd_t * pmdp,pmd_t pmd)248 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
249 {
250 WRITE_ONCE(*pmdp, pmd);
251 }
252
pmd_clear(pmd_t * pmdp)253 static inline void pmd_clear(pmd_t *pmdp)
254 {
255 set_pmd(pmdp, __pmd(0));
256 }
257
pfn_pgd(unsigned long pfn,pgprot_t prot)258 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
259 {
260 unsigned long prot_val = pgprot_val(prot);
261
262 ALT_THEAD_PMA(prot_val);
263
264 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
265 }
266
_pgd_pfn(pgd_t pgd)267 static inline unsigned long _pgd_pfn(pgd_t pgd)
268 {
269 return __page_val_to_pfn(pgd_val(pgd));
270 }
271
pmd_page(pmd_t pmd)272 static inline struct page *pmd_page(pmd_t pmd)
273 {
274 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
275 }
276
pmd_page_vaddr(pmd_t pmd)277 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
278 {
279 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
280 }
281
pmd_pte(pmd_t pmd)282 static inline pte_t pmd_pte(pmd_t pmd)
283 {
284 return __pte(pmd_val(pmd));
285 }
286
pud_pte(pud_t pud)287 static inline pte_t pud_pte(pud_t pud)
288 {
289 return __pte(pud_val(pud));
290 }
291
292 #ifdef CONFIG_RISCV_ISA_SVNAPOT
293
has_svnapot(void)294 static __always_inline bool has_svnapot(void)
295 {
296 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
297 }
298
pte_napot(pte_t pte)299 static inline unsigned long pte_napot(pte_t pte)
300 {
301 return pte_val(pte) & _PAGE_NAPOT;
302 }
303
pte_mknapot(pte_t pte,unsigned int order)304 static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
305 {
306 int pos = order - 1 + _PAGE_PFN_SHIFT;
307 unsigned long napot_bit = BIT(pos);
308 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
309
310 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
311 }
312
313 #else
314
has_svnapot(void)315 static __always_inline bool has_svnapot(void) { return false; }
316
pte_napot(pte_t pte)317 static inline unsigned long pte_napot(pte_t pte)
318 {
319 return 0;
320 }
321
322 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
323
324 /* Yields the page frame number (PFN) of a page table entry */
pte_pfn(pte_t pte)325 static inline unsigned long pte_pfn(pte_t pte)
326 {
327 unsigned long res = __page_val_to_pfn(pte_val(pte));
328
329 if (has_svnapot() && pte_napot(pte))
330 res = res & (res - 1UL);
331
332 return res;
333 }
334
335 #define pte_page(x) pfn_to_page(pte_pfn(x))
336
337 /* Constructs a page table entry */
pfn_pte(unsigned long pfn,pgprot_t prot)338 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
339 {
340 unsigned long prot_val = pgprot_val(prot);
341
342 ALT_THEAD_PMA(prot_val);
343
344 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
345 }
346
347 #define pte_pgprot pte_pgprot
pte_pgprot(pte_t pte)348 static inline pgprot_t pte_pgprot(pte_t pte)
349 {
350 unsigned long pfn = pte_pfn(pte);
351
352 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
353 }
354
pte_present(pte_t pte)355 static inline int pte_present(pte_t pte)
356 {
357 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
358 }
359
360 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)361 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
362 {
363 if (pte_val(a) & _PAGE_PRESENT)
364 return true;
365
366 if ((pte_val(a) & _PAGE_PROT_NONE) &&
367 atomic_read(&mm->tlb_flush_pending))
368 return true;
369
370 return false;
371 }
372
pte_none(pte_t pte)373 static inline int pte_none(pte_t pte)
374 {
375 return (pte_val(pte) == 0);
376 }
377
pte_write(pte_t pte)378 static inline int pte_write(pte_t pte)
379 {
380 return pte_val(pte) & _PAGE_WRITE;
381 }
382
pte_exec(pte_t pte)383 static inline int pte_exec(pte_t pte)
384 {
385 return pte_val(pte) & _PAGE_EXEC;
386 }
387
pte_user(pte_t pte)388 static inline int pte_user(pte_t pte)
389 {
390 return pte_val(pte) & _PAGE_USER;
391 }
392
pte_huge(pte_t pte)393 static inline int pte_huge(pte_t pte)
394 {
395 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
396 }
397
pte_dirty(pte_t pte)398 static inline int pte_dirty(pte_t pte)
399 {
400 return pte_val(pte) & _PAGE_DIRTY;
401 }
402
pte_young(pte_t pte)403 static inline int pte_young(pte_t pte)
404 {
405 return pte_val(pte) & _PAGE_ACCESSED;
406 }
407
pte_special(pte_t pte)408 static inline int pte_special(pte_t pte)
409 {
410 return pte_val(pte) & _PAGE_SPECIAL;
411 }
412
413 /* static inline pte_t pte_rdprotect(pte_t pte) */
414
pte_wrprotect(pte_t pte)415 static inline pte_t pte_wrprotect(pte_t pte)
416 {
417 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
418 }
419
420 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
421 #define pgtable_supports_uffd_wp() \
422 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)
423
pte_uffd_wp(pte_t pte)424 static inline bool pte_uffd_wp(pte_t pte)
425 {
426 return !!(pte_val(pte) & _PAGE_UFFD_WP);
427 }
428
pte_mkuffd_wp(pte_t pte)429 static inline pte_t pte_mkuffd_wp(pte_t pte)
430 {
431 return pte_wrprotect(__pte(pte_val(pte) | _PAGE_UFFD_WP));
432 }
433
pte_clear_uffd_wp(pte_t pte)434 static inline pte_t pte_clear_uffd_wp(pte_t pte)
435 {
436 return __pte(pte_val(pte) & ~(_PAGE_UFFD_WP));
437 }
438
pte_swp_uffd_wp(pte_t pte)439 static inline bool pte_swp_uffd_wp(pte_t pte)
440 {
441 return !!(pte_val(pte) & _PAGE_SWP_UFFD_WP);
442 }
443
pte_swp_mkuffd_wp(pte_t pte)444 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
445 {
446 return __pte(pte_val(pte) | _PAGE_SWP_UFFD_WP);
447 }
448
pte_swp_clear_uffd_wp(pte_t pte)449 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
450 {
451 return __pte(pte_val(pte) & ~(_PAGE_SWP_UFFD_WP));
452 }
453 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
454
455 /* static inline pte_t pte_mkread(pte_t pte) */
456
pte_mkwrite_novma(pte_t pte)457 static inline pte_t pte_mkwrite_novma(pte_t pte)
458 {
459 return __pte(pte_val(pte) | _PAGE_WRITE);
460 }
461
462 /* static inline pte_t pte_mkexec(pte_t pte) */
463
pte_mkdirty(pte_t pte)464 static inline pte_t pte_mkdirty(pte_t pte)
465 {
466 return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
467 }
468
pte_mkclean(pte_t pte)469 static inline pte_t pte_mkclean(pte_t pte)
470 {
471 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
472 }
473
pte_mkyoung(pte_t pte)474 static inline pte_t pte_mkyoung(pte_t pte)
475 {
476 return __pte(pte_val(pte) | _PAGE_ACCESSED);
477 }
478
pte_mkold(pte_t pte)479 static inline pte_t pte_mkold(pte_t pte)
480 {
481 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
482 }
483
pte_mkspecial(pte_t pte)484 static inline pte_t pte_mkspecial(pte_t pte)
485 {
486 return __pte(pte_val(pte) | _PAGE_SPECIAL);
487 }
488
pte_mkhuge(pte_t pte)489 static inline pte_t pte_mkhuge(pte_t pte)
490 {
491 return pte;
492 }
493
494 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
495 #define pgtable_supports_soft_dirty() \
496 (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && \
497 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B))
498
pte_soft_dirty(pte_t pte)499 static inline bool pte_soft_dirty(pte_t pte)
500 {
501 return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
502 }
503
pte_mksoft_dirty(pte_t pte)504 static inline pte_t pte_mksoft_dirty(pte_t pte)
505 {
506 return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
507 }
508
pte_clear_soft_dirty(pte_t pte)509 static inline pte_t pte_clear_soft_dirty(pte_t pte)
510 {
511 return __pte(pte_val(pte) & ~(_PAGE_SOFT_DIRTY));
512 }
513
pte_swp_soft_dirty(pte_t pte)514 static inline bool pte_swp_soft_dirty(pte_t pte)
515 {
516 return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
517 }
518
pte_swp_mksoft_dirty(pte_t pte)519 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
520 {
521 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
522 }
523
pte_swp_clear_soft_dirty(pte_t pte)524 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
525 {
526 return __pte(pte_val(pte) & ~(_PAGE_SWP_SOFT_DIRTY));
527 }
528 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
529
530 #ifdef CONFIG_RISCV_ISA_SVNAPOT
531 #define pte_leaf_size(pte) (pte_napot(pte) ? \
532 napot_cont_size(napot_cont_order(pte)) :\
533 PAGE_SIZE)
534 #endif
535
536 #ifdef CONFIG_NUMA_BALANCING
537 /*
538 * See the comment in include/asm-generic/pgtable.h
539 */
pte_protnone(pte_t pte)540 static inline int pte_protnone(pte_t pte)
541 {
542 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
543 }
544
pmd_protnone(pmd_t pmd)545 static inline int pmd_protnone(pmd_t pmd)
546 {
547 return pte_protnone(pmd_pte(pmd));
548 }
549 #endif
550
551 /* Modify page protection bits */
pte_modify(pte_t pte,pgprot_t newprot)552 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
553 {
554 unsigned long newprot_val = pgprot_val(newprot);
555
556 ALT_THEAD_PMA(newprot_val);
557
558 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
559 }
560
561 #define pgd_ERROR(e) \
562 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
563
564
565 /* Commit new configuration to MMU hardware */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)566 static inline void update_mmu_cache_range(struct vm_fault *vmf,
567 struct vm_area_struct *vma, unsigned long address,
568 pte_t *ptep, unsigned int nr)
569 {
570 /*
571 * Svvptc guarantees that the new valid pte will be visible within
572 * a bounded timeframe, so when the uarch does not cache invalid
573 * entries, we don't have to do anything.
574 */
575 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
576 return;
577
578 /*
579 * The kernel assumes that TLBs don't cache invalid entries, but
580 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
581 * cache flush; it is necessary even after writing invalid entries.
582 * Relying on flush_tlb_fix_spurious_fault would suffice, but
583 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
584 */
585 while (nr--)
586 local_flush_tlb_page(address + nr * PAGE_SIZE);
587
588 }
589 #define update_mmu_cache(vma, addr, ptep) \
590 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
591
592 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
593 update_mmu_cache_range(NULL, vma, addr, ptep, nr)
594
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)595 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
596 unsigned long address, pmd_t *pmdp)
597 {
598 pte_t *ptep = (pte_t *)pmdp;
599
600 update_mmu_cache(vma, address, ptep);
601 }
602
603 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)604 static inline int pte_same(pte_t pte_a, pte_t pte_b)
605 {
606 return pte_val(pte_a) == pte_val(pte_b);
607 }
608
609 /*
610 * Certain architectures need to do special things when PTEs within
611 * a page table are directly modified. Thus, the following hook is
612 * made available.
613 */
set_pte(pte_t * ptep,pte_t pteval)614 static inline void set_pte(pte_t *ptep, pte_t pteval)
615 {
616 WRITE_ONCE(*ptep, pteval);
617 }
618
619 void flush_icache_pte(struct mm_struct *mm, pte_t pte);
620
__set_pte_at(struct mm_struct * mm,pte_t * ptep,pte_t pteval)621 static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
622 {
623 if (pte_present(pteval) && pte_exec(pteval))
624 flush_icache_pte(mm, pteval);
625
626 set_pte(ptep, pteval);
627 }
628
629 #define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
630
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval,unsigned int nr)631 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
632 pte_t *ptep, pte_t pteval, unsigned int nr)
633 {
634 page_table_check_ptes_set(mm, ptep, pteval, nr);
635
636 for (;;) {
637 __set_pte_at(mm, ptep, pteval);
638 if (--nr == 0)
639 break;
640 ptep++;
641 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
642 }
643 }
644 #define set_ptes set_ptes
645
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)646 static inline void pte_clear(struct mm_struct *mm,
647 unsigned long addr, pte_t *ptep)
648 {
649 __set_pte_at(mm, ptep, __pte(0));
650 }
651
652 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
653 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
654 pte_t *ptep, pte_t entry, int dirty);
655 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
656 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
657 pte_t *ptep);
658
659 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)660 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
661 unsigned long address, pte_t *ptep)
662 {
663 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
664
665 page_table_check_pte_clear(mm, pte);
666
667 return pte;
668 }
669
670 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)671 static inline void ptep_set_wrprotect(struct mm_struct *mm,
672 unsigned long address, pte_t *ptep)
673 {
674 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
675 }
676
677 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)678 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
679 unsigned long address, pte_t *ptep)
680 {
681 /*
682 * This comment is borrowed from x86, but applies equally to RISC-V:
683 *
684 * Clearing the accessed bit without a TLB flush
685 * doesn't cause data corruption. [ It could cause incorrect
686 * page aging and the (mistaken) reclaim of hot pages, but the
687 * chance of that should be relatively low. ]
688 *
689 * So as a performance optimization don't flush the TLB when
690 * clearing the accessed bit, it will eventually be flushed by
691 * a context switch or a VM operation anyway. [ In the rare
692 * event of it not getting flushed for a long time the delay
693 * shouldn't really matter because there's no real memory
694 * pressure for swapout to react to. ]
695 */
696 return ptep_test_and_clear_young(vma, address, ptep);
697 }
698
699 #define pgprot_nx pgprot_nx
pgprot_nx(pgprot_t _prot)700 static inline pgprot_t pgprot_nx(pgprot_t _prot)
701 {
702 return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
703 }
704
705 #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t _prot)706 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
707 {
708 unsigned long prot = pgprot_val(_prot);
709
710 prot &= ~_PAGE_MTMASK;
711 prot |= _PAGE_IO;
712
713 return __pgprot(prot);
714 }
715
716 #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t _prot)717 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
718 {
719 unsigned long prot = pgprot_val(_prot);
720
721 prot &= ~_PAGE_MTMASK;
722 prot |= _PAGE_NOCACHE;
723
724 return __pgprot(prot);
725 }
726
727 #define pgprot_dmacoherent pgprot_writecombine
728
729 /*
730 * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
731 * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
732 * DT.
733 */
734 #define arch_has_hw_pte_young arch_has_hw_pte_young
arch_has_hw_pte_young(void)735 static inline bool arch_has_hw_pte_young(void)
736 {
737 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU);
738 }
739
740 /*
741 * THP functions
742 */
pte_pmd(pte_t pte)743 static inline pmd_t pte_pmd(pte_t pte)
744 {
745 return __pmd(pte_val(pte));
746 }
747
pte_pud(pte_t pte)748 static inline pud_t pte_pud(pte_t pte)
749 {
750 return __pud(pte_val(pte));
751 }
752
pmd_mkhuge(pmd_t pmd)753 static inline pmd_t pmd_mkhuge(pmd_t pmd)
754 {
755 return pmd;
756 }
757
pmd_mkinvalid(pmd_t pmd)758 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
759 {
760 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
761 }
762
763 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
764
pmd_pfn(pmd_t pmd)765 static inline unsigned long pmd_pfn(pmd_t pmd)
766 {
767 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
768 }
769
770 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
771
772 #define pud_pfn pud_pfn
pud_pfn(pud_t pud)773 static inline unsigned long pud_pfn(pud_t pud)
774 {
775 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
776 }
777
778 #define pmd_pgprot pmd_pgprot
pmd_pgprot(pmd_t pmd)779 static inline pgprot_t pmd_pgprot(pmd_t pmd)
780 {
781 return pte_pgprot(pmd_pte(pmd));
782 }
783
784 #define pud_pgprot pud_pgprot
pud_pgprot(pud_t pud)785 static inline pgprot_t pud_pgprot(pud_t pud)
786 {
787 return pte_pgprot(pud_pte(pud));
788 }
789
pmd_modify(pmd_t pmd,pgprot_t newprot)790 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
791 {
792 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
793 }
794
795 #define pmd_write pmd_write
pmd_write(pmd_t pmd)796 static inline int pmd_write(pmd_t pmd)
797 {
798 return pte_write(pmd_pte(pmd));
799 }
800
801 #define pud_write pud_write
pud_write(pud_t pud)802 static inline int pud_write(pud_t pud)
803 {
804 return pte_write(pud_pte(pud));
805 }
806
807 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)808 static inline int pmd_dirty(pmd_t pmd)
809 {
810 return pte_dirty(pmd_pte(pmd));
811 }
812
813 #define pmd_young pmd_young
pmd_young(pmd_t pmd)814 static inline int pmd_young(pmd_t pmd)
815 {
816 return pte_young(pmd_pte(pmd));
817 }
818
pmd_user(pmd_t pmd)819 static inline int pmd_user(pmd_t pmd)
820 {
821 return pte_user(pmd_pte(pmd));
822 }
823
pmd_mkold(pmd_t pmd)824 static inline pmd_t pmd_mkold(pmd_t pmd)
825 {
826 return pte_pmd(pte_mkold(pmd_pte(pmd)));
827 }
828
pmd_mkyoung(pmd_t pmd)829 static inline pmd_t pmd_mkyoung(pmd_t pmd)
830 {
831 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
832 }
833
pmd_mkwrite_novma(pmd_t pmd)834 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
835 {
836 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
837 }
838
pmd_wrprotect(pmd_t pmd)839 static inline pmd_t pmd_wrprotect(pmd_t pmd)
840 {
841 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
842 }
843
pmd_mkclean(pmd_t pmd)844 static inline pmd_t pmd_mkclean(pmd_t pmd)
845 {
846 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
847 }
848
pmd_mkdirty(pmd_t pmd)849 static inline pmd_t pmd_mkdirty(pmd_t pmd)
850 {
851 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
852 }
853
854 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
pmd_special(pmd_t pmd)855 static inline bool pmd_special(pmd_t pmd)
856 {
857 return pte_special(pmd_pte(pmd));
858 }
859
pmd_mkspecial(pmd_t pmd)860 static inline pmd_t pmd_mkspecial(pmd_t pmd)
861 {
862 return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
863 }
864 #endif
865
866 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
pud_special(pud_t pud)867 static inline bool pud_special(pud_t pud)
868 {
869 return pte_special(pud_pte(pud));
870 }
871
pud_mkspecial(pud_t pud)872 static inline pud_t pud_mkspecial(pud_t pud)
873 {
874 return pte_pud(pte_mkspecial(pud_pte(pud)));
875 }
876 #endif
877
878 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pmd_uffd_wp(pmd_t pmd)879 static inline bool pmd_uffd_wp(pmd_t pmd)
880 {
881 return pte_uffd_wp(pmd_pte(pmd));
882 }
883
pmd_mkuffd_wp(pmd_t pmd)884 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
885 {
886 return pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)));
887 }
888
pmd_clear_uffd_wp(pmd_t pmd)889 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
890 {
891 return pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)));
892 }
893
pmd_swp_uffd_wp(pmd_t pmd)894 static inline bool pmd_swp_uffd_wp(pmd_t pmd)
895 {
896 return pte_swp_uffd_wp(pmd_pte(pmd));
897 }
898
pmd_swp_mkuffd_wp(pmd_t pmd)899 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
900 {
901 return pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)));
902 }
903
pmd_swp_clear_uffd_wp(pmd_t pmd)904 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
905 {
906 return pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)));
907 }
908 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
909
910 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pmd_soft_dirty(pmd_t pmd)911 static inline bool pmd_soft_dirty(pmd_t pmd)
912 {
913 return pte_soft_dirty(pmd_pte(pmd));
914 }
915
pmd_mksoft_dirty(pmd_t pmd)916 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
917 {
918 return pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)));
919 }
920
pmd_clear_soft_dirty(pmd_t pmd)921 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
922 {
923 return pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)));
924 }
925
926 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_soft_dirty(pmd_t pmd)927 static inline bool pmd_swp_soft_dirty(pmd_t pmd)
928 {
929 return pte_swp_soft_dirty(pmd_pte(pmd));
930 }
931
pmd_swp_mksoft_dirty(pmd_t pmd)932 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
933 {
934 return pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)));
935 }
936
pmd_swp_clear_soft_dirty(pmd_t pmd)937 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
938 {
939 return pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)));
940 }
941 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
942 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
943
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)944 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
945 pmd_t *pmdp, pmd_t pmd)
946 {
947 page_table_check_pmd_set(mm, pmdp, pmd);
948 return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
949 }
950
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)951 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
952 pud_t *pudp, pud_t pud)
953 {
954 page_table_check_pud_set(mm, pudp, pud);
955 return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
956 }
957
958 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(pte_t pte)959 static inline bool pte_user_accessible_page(pte_t pte)
960 {
961 return pte_present(pte) && pte_user(pte);
962 }
963
pmd_user_accessible_page(pmd_t pmd)964 static inline bool pmd_user_accessible_page(pmd_t pmd)
965 {
966 return pmd_leaf(pmd) && pmd_user(pmd);
967 }
968
pud_user_accessible_page(pud_t pud)969 static inline bool pud_user_accessible_page(pud_t pud)
970 {
971 return pud_leaf(pud) && pud_user(pud);
972 }
973 #endif
974
975 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)976 static inline int pmd_trans_huge(pmd_t pmd)
977 {
978 return pmd_leaf(pmd);
979 }
980
981 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)982 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
983 unsigned long address, pmd_t *pmdp,
984 pmd_t entry, int dirty)
985 {
986 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
987 }
988
989 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)990 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
991 unsigned long address, pmd_t *pmdp)
992 {
993 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
994 }
995
996 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)997 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
998 unsigned long address, pmd_t *pmdp)
999 {
1000 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
1001
1002 page_table_check_pmd_clear(mm, pmd);
1003
1004 return pmd;
1005 }
1006
1007 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1008 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1009 unsigned long address, pmd_t *pmdp)
1010 {
1011 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1012 }
1013
1014 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1015 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1016 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1017 {
1018 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1019 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
1020 }
1021
1022 #define pmdp_collapse_flush pmdp_collapse_flush
1023 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1024 unsigned long address, pmd_t *pmdp);
1025
pud_wrprotect(pud_t pud)1026 static inline pud_t pud_wrprotect(pud_t pud)
1027 {
1028 return pte_pud(pte_wrprotect(pud_pte(pud)));
1029 }
1030
pud_trans_huge(pud_t pud)1031 static inline int pud_trans_huge(pud_t pud)
1032 {
1033 return pud_leaf(pud);
1034 }
1035
pud_dirty(pud_t pud)1036 static inline int pud_dirty(pud_t pud)
1037 {
1038 return pte_dirty(pud_pte(pud));
1039 }
1040
pud_mkyoung(pud_t pud)1041 static inline pud_t pud_mkyoung(pud_t pud)
1042 {
1043 return pte_pud(pte_mkyoung(pud_pte(pud)));
1044 }
1045
pud_mkold(pud_t pud)1046 static inline pud_t pud_mkold(pud_t pud)
1047 {
1048 return pte_pud(pte_mkold(pud_pte(pud)));
1049 }
1050
pud_mkdirty(pud_t pud)1051 static inline pud_t pud_mkdirty(pud_t pud)
1052 {
1053 return pte_pud(pte_mkdirty(pud_pte(pud)));
1054 }
1055
pud_mkclean(pud_t pud)1056 static inline pud_t pud_mkclean(pud_t pud)
1057 {
1058 return pte_pud(pte_mkclean(pud_pte(pud)));
1059 }
1060
pud_mkwrite(pud_t pud)1061 static inline pud_t pud_mkwrite(pud_t pud)
1062 {
1063 return pte_pud(pte_mkwrite_novma(pud_pte(pud)));
1064 }
1065
pud_mkhuge(pud_t pud)1066 static inline pud_t pud_mkhuge(pud_t pud)
1067 {
1068 return pud;
1069 }
1070
pudp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t entry,int dirty)1071 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
1072 unsigned long address, pud_t *pudp,
1073 pud_t entry, int dirty)
1074 {
1075 return ptep_set_access_flags(vma, address, (pte_t *)pudp, pud_pte(entry), dirty);
1076 }
1077
pudp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)1078 static inline int pudp_test_and_clear_young(struct vm_area_struct *vma,
1079 unsigned long address, pud_t *pudp)
1080 {
1081 return ptep_test_and_clear_young(vma, address, (pte_t *)pudp);
1082 }
1083
1084 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pud_t * pudp)1085 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1086 unsigned long address, pud_t *pudp)
1087 {
1088 #ifdef CONFIG_SMP
1089 pud_t pud = __pud(xchg(&pudp->pud, 0));
1090 #else
1091 pud_t pud = *pudp;
1092
1093 pud_clear(pudp);
1094 #endif
1095
1096 page_table_check_pud_clear(mm, pud);
1097
1098 return pud;
1099 }
1100
pud_young(pud_t pud)1101 static inline int pud_young(pud_t pud)
1102 {
1103 return pte_young(pud_pte(pud));
1104 }
1105
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)1106 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1107 unsigned long address, pud_t *pudp)
1108 {
1109 pte_t *ptep = (pte_t *)pudp;
1110
1111 update_mmu_cache(vma, address, ptep);
1112 }
1113
pudp_establish(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t pud)1114 static inline pud_t pudp_establish(struct vm_area_struct *vma,
1115 unsigned long address, pud_t *pudp, pud_t pud)
1116 {
1117 page_table_check_pud_set(vma->vm_mm, pudp, pud);
1118 return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud)));
1119 }
1120
pud_mkinvalid(pud_t pud)1121 static inline pud_t pud_mkinvalid(pud_t pud)
1122 {
1123 return __pud(pud_val(pud) & ~(_PAGE_PRESENT | _PAGE_PROT_NONE));
1124 }
1125
1126 extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
1127 pud_t *pudp);
1128
pud_modify(pud_t pud,pgprot_t newprot)1129 static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
1130 {
1131 return pte_pud(pte_modify(pud_pte(pud), newprot));
1132 }
1133
1134 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1135
1136 /*
1137 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
1138 * are !pte_none() && !pte_present().
1139 *
1140 * Format of swap PTE:
1141 * bit 0: _PAGE_PRESENT (zero)
1142 * bit 1 to 2: (zero)
1143 * bit 3: _PAGE_SWP_SOFT_DIRTY
1144 * bit 4: _PAGE_SWP_UFFD_WP
1145 * bit 5: _PAGE_PROT_NONE (zero)
1146 * bit 6: exclusive marker
1147 * bits 7 to 11: swap type
1148 * bits 12 to XLEN-1: swap offset
1149 */
1150 #define __SWP_TYPE_SHIFT 7
1151 #define __SWP_TYPE_BITS 5
1152 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
1153 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
1154
1155 #define MAX_SWAPFILES_CHECK() \
1156 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1157
1158 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1159 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
1160 #define __swp_entry(type, offset) ((swp_entry_t) \
1161 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
1162 ((offset) << __SWP_OFFSET_SHIFT) })
1163
1164 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1165 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1166
pte_swp_exclusive(pte_t pte)1167 static inline bool pte_swp_exclusive(pte_t pte)
1168 {
1169 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
1170 }
1171
pte_swp_mkexclusive(pte_t pte)1172 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1173 {
1174 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
1175 }
1176
pte_swp_clear_exclusive(pte_t pte)1177 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1178 {
1179 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
1180 }
1181
1182 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1183 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
1184 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
1185 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1186
1187 /*
1188 * In the RV64 Linux scheme, we give the user half of the virtual-address space
1189 * and give the kernel the other (upper) half.
1190 */
1191 #ifdef CONFIG_64BIT
1192 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
1193 #else
1194 #define KERN_VIRT_START FIXADDR_START
1195 #endif
1196
1197 /*
1198 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
1199 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
1200 * Task size is:
1201 * - 0x9fc00000 (~2.5GB) for RV32.
1202 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
1203 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
1204 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
1205 *
1206 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
1207 * Instruction Set Manual Volume II: Privileged Architecture" states that
1208 * "load and store effective addresses, which are 64bits, must have bits
1209 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
1210 * Similarly for SV57, bits 63–57 must be equal to bit 56.
1211 */
1212 #ifdef CONFIG_64BIT
1213 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
1214
1215 #ifdef CONFIG_COMPAT
1216 #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
1217 #define TASK_SIZE (is_compat_task() ? \
1218 TASK_SIZE_32 : TASK_SIZE_64)
1219 #else
1220 #define TASK_SIZE TASK_SIZE_64
1221 #endif
1222
1223 #else
1224 #define TASK_SIZE FIXADDR_START
1225 #endif
1226
1227 #else /* CONFIG_MMU */
1228
1229 #define PAGE_SHARED __pgprot(0)
1230 #define PAGE_KERNEL __pgprot(0)
1231 #define swapper_pg_dir NULL
1232 #define TASK_SIZE _AC(-1, UL)
1233 #define VMALLOC_START _AC(0, UL)
1234 #define VMALLOC_END TASK_SIZE
1235
1236 #endif /* !CONFIG_MMU */
1237
1238 extern char _start[];
1239 extern void *_dtb_early_va;
1240 extern uintptr_t _dtb_early_pa;
1241 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
1242 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
1243 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
1244 #else
1245 #define dtb_early_va _dtb_early_va
1246 #define dtb_early_pa _dtb_early_pa
1247 #endif /* CONFIG_XIP_KERNEL */
1248 extern u64 satp_mode;
1249
1250 void paging_init(void);
1251 void misc_mem_init(void);
1252
1253 /*
1254 * ZERO_PAGE is a global shared page that is always zero,
1255 * used for zero-mapped memory areas, etc.
1256 */
1257 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
1258 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
1259
1260 /*
1261 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
1262 * TLB flush will be required as a result of the "set". For example, use
1263 * in scenarios where it is known ahead of time that the routine is
1264 * setting non-present entries, or re-setting an existing entry to the
1265 * same value. Otherwise, use the typical "set" helpers and flush the
1266 * TLB.
1267 */
1268 #define set_p4d_safe(p4dp, p4d) \
1269 ({ \
1270 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
1271 set_p4d(p4dp, p4d); \
1272 })
1273
1274 #define set_pgd_safe(pgdp, pgd) \
1275 ({ \
1276 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
1277 set_pgd(pgdp, pgd); \
1278 })
1279 #endif /* !__ASSEMBLER__ */
1280
1281 #endif /* _ASM_RISCV_PGTABLE_H */
1282