1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
8
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
11
12 #include <asm/pgtable-bits.h>
13
14 #ifndef CONFIG_MMU
15 #ifdef CONFIG_RELOCATABLE
16 #define KERNEL_LINK_ADDR UL(0)
17 #else
18 #define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL)
19 #endif
20 #define KERN_VIRT_SIZE (UL(-1))
21 #else
22
23 #define ADDRESS_SPACE_END (UL(-1))
24
25 #ifdef CONFIG_64BIT
26 /* Leave 2GB for kernel and BPF at the end of the address space */
27 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
28 #else
29 #define KERNEL_LINK_ADDR PAGE_OFFSET
30 #endif
31
32 /* Number of entries in the page global directory */
33 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
34 /* Number of entries in the page table */
35 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
36
37 /*
38 * Half of the kernel address space (1/4 of the entries of the page global
39 * directory) is for the direct mapping.
40 */
41 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
42
43 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
44 #define VMALLOC_END PAGE_OFFSET
45 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
46
47 #define BPF_JIT_REGION_SIZE (SZ_128M)
48 #ifdef CONFIG_64BIT
49 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
50 #define BPF_JIT_REGION_END (MODULES_END)
51 #else
52 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
53 #define BPF_JIT_REGION_END (VMALLOC_END)
54 #endif
55
56 /* Modules always live before the kernel */
57 #ifdef CONFIG_64BIT
58 /* This is used to define the end of the KASAN shadow region */
59 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
60 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
61 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
62 #else
63 #define MODULES_VADDR VMALLOC_START
64 #define MODULES_END VMALLOC_END
65 #endif
66
67 /*
68 * Roughly size the vmemmap space to be large enough to fit enough
69 * struct pages to map half the virtual address space. Then
70 * position vmemmap directly below the VMALLOC region.
71 */
72 #define VA_BITS_SV32 32
73 #ifdef CONFIG_64BIT
74 #define VA_BITS_SV39 39
75 #define VA_BITS_SV48 48
76 #define VA_BITS_SV57 57
77
78 #define VA_BITS (pgtable_l5_enabled ? \
79 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
80 #else
81 #define VA_BITS VA_BITS_SV32
82 #endif
83
84 #define VMEMMAP_SHIFT \
85 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
86 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
87 #define VMEMMAP_END VMALLOC_START
88 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
89
90 /*
91 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
92 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
93 */
94 #define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn)
95
96 #define PCI_IO_SIZE SZ_16M
97 #define PCI_IO_END VMEMMAP_START
98 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
99
100 #define FIXADDR_TOP PCI_IO_START
101 #ifdef CONFIG_64BIT
102 #define MAX_FDT_SIZE PMD_SIZE
103 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
104 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
105 #else
106 #define MAX_FDT_SIZE PGDIR_SIZE
107 #define FIX_FDT_SIZE MAX_FDT_SIZE
108 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
109 #endif
110 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
111
112 #endif
113
114 #ifndef __ASSEMBLER__
115
116 #include <asm/page.h>
117 #include <asm/tlbflush.h>
118 #include <linux/mm_types.h>
119 #include <asm/compat.h>
120 #include <asm/cpufeature.h>
121
122 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
123
124 #ifdef CONFIG_64BIT
125 #include <asm/pgtable-64.h>
126
127 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
128 #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
129 #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
130 #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
131 #else
132 #include <asm/pgtable-32.h>
133 #endif /* CONFIG_64BIT */
134
135 #include <linux/page_table_check.h>
136
137 struct pt_alloc_ops {
138 pte_t *(*get_pte_virt)(phys_addr_t pa);
139 phys_addr_t (*alloc_pte)(uintptr_t va);
140 #ifndef __PAGETABLE_PMD_FOLDED
141 pmd_t *(*get_pmd_virt)(phys_addr_t pa);
142 phys_addr_t (*alloc_pmd)(uintptr_t va);
143 pud_t *(*get_pud_virt)(phys_addr_t pa);
144 phys_addr_t (*alloc_pud)(uintptr_t va);
145 p4d_t *(*get_p4d_virt)(phys_addr_t pa);
146 phys_addr_t (*alloc_p4d)(uintptr_t va);
147 #endif
148 };
149
150 extern struct pt_alloc_ops pt_ops __meminitdata;
151
152 #ifdef CONFIG_MMU
153 /* Number of PGD entries that a user-mode program can use */
154 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
155
156 /* Page protection bits */
157 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
158
159 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
160 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
161 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
162 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
163 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
164 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
165 _PAGE_EXEC | _PAGE_WRITE)
166 #define PAGE_SHADOWSTACK __pgprot(_PAGE_BASE | _PAGE_WRITE)
167
168 #define PAGE_COPY PAGE_READ
169 #define PAGE_COPY_EXEC PAGE_READ_EXEC
170 #define PAGE_SHARED PAGE_WRITE
171 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
172
173 #define _PAGE_KERNEL (_PAGE_READ \
174 | _PAGE_WRITE \
175 | _PAGE_PRESENT \
176 | _PAGE_ACCESSED \
177 | _PAGE_DIRTY \
178 | _PAGE_GLOBAL)
179
180 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
181 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
182 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
183 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
184 | _PAGE_EXEC)
185
186 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
187
188 #define _PAGE_KERNEL_NC ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_NOCACHE)
189 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
190 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
191
192 extern pgd_t swapper_pg_dir[];
193 extern pgd_t trampoline_pg_dir[];
194 extern pgd_t early_pg_dir[];
195
196 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_present(pmd_t pmd)197 static inline int pmd_present(pmd_t pmd)
198 {
199 /*
200 * Checking for _PAGE_LEAF is needed too because:
201 * When splitting a THP, split_huge_page() will temporarily clear
202 * the present bit, in this situation, pmd_present() and
203 * pmd_trans_huge() still needs to return true.
204 */
205 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
206 }
207 #else
pmd_present(pmd_t pmd)208 static inline int pmd_present(pmd_t pmd)
209 {
210 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
211 }
212 #endif
213
pmd_none(pmd_t pmd)214 static inline int pmd_none(pmd_t pmd)
215 {
216 return (pmd_val(pmd) == 0);
217 }
218
pmd_bad(pmd_t pmd)219 static inline int pmd_bad(pmd_t pmd)
220 {
221 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
222 }
223
224 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)225 static inline bool pmd_leaf(pmd_t pmd)
226 {
227 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
228 }
229
set_pmd(pmd_t * pmdp,pmd_t pmd)230 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
231 {
232 WRITE_ONCE(*pmdp, pmd);
233 }
234
pmd_clear(pmd_t * pmdp)235 static inline void pmd_clear(pmd_t *pmdp)
236 {
237 set_pmd(pmdp, __pmd(0));
238 }
239
pfn_pgd(unsigned long pfn,pgprot_t prot)240 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
241 {
242 unsigned long prot_val = pgprot_val(prot);
243
244 ALT_THEAD_PMA(prot_val);
245
246 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
247 }
248
_pgd_pfn(pgd_t pgd)249 static inline unsigned long _pgd_pfn(pgd_t pgd)
250 {
251 return __page_val_to_pfn(pgd_val(pgd));
252 }
253
pmd_page(pmd_t pmd)254 static inline struct page *pmd_page(pmd_t pmd)
255 {
256 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
257 }
258
pmd_page_vaddr(pmd_t pmd)259 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
260 {
261 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
262 }
263
pmd_pte(pmd_t pmd)264 static inline pte_t pmd_pte(pmd_t pmd)
265 {
266 return __pte(pmd_val(pmd));
267 }
268
pud_pte(pud_t pud)269 static inline pte_t pud_pte(pud_t pud)
270 {
271 return __pte(pud_val(pud));
272 }
273
274 #ifdef CONFIG_RISCV_ISA_SVNAPOT
275
has_svnapot(void)276 static __always_inline bool has_svnapot(void)
277 {
278 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
279 }
280
pte_napot(pte_t pte)281 static inline unsigned long pte_napot(pte_t pte)
282 {
283 return pte_val(pte) & _PAGE_NAPOT;
284 }
285
pte_mknapot(pte_t pte,unsigned int order)286 static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
287 {
288 int pos = order - 1 + _PAGE_PFN_SHIFT;
289 unsigned long napot_bit = BIT(pos);
290 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
291
292 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
293 }
294
295 #else
296
has_svnapot(void)297 static __always_inline bool has_svnapot(void) { return false; }
298
pte_napot(pte_t pte)299 static inline unsigned long pte_napot(pte_t pte)
300 {
301 return 0;
302 }
303
304 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
305
306 /* Yields the page frame number (PFN) of a page table entry */
pte_pfn(pte_t pte)307 static inline unsigned long pte_pfn(pte_t pte)
308 {
309 unsigned long res = __page_val_to_pfn(pte_val(pte));
310
311 if (has_svnapot() && pte_napot(pte))
312 res = res & (res - 1UL);
313
314 return res;
315 }
316
317 #define pte_page(x) pfn_to_page(pte_pfn(x))
318
319 /* Constructs a page table entry */
pfn_pte(unsigned long pfn,pgprot_t prot)320 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
321 {
322 unsigned long prot_val = pgprot_val(prot);
323
324 ALT_THEAD_PMA(prot_val);
325
326 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
327 }
328
329 #define pte_pgprot pte_pgprot
pte_pgprot(pte_t pte)330 static inline pgprot_t pte_pgprot(pte_t pte)
331 {
332 unsigned long pfn = pte_pfn(pte);
333
334 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
335 }
336
pte_present(pte_t pte)337 static inline int pte_present(pte_t pte)
338 {
339 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
340 }
341
342 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)343 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
344 {
345 if (pte_val(a) & _PAGE_PRESENT)
346 return true;
347
348 if ((pte_val(a) & _PAGE_PROT_NONE) &&
349 atomic_read(&mm->tlb_flush_pending))
350 return true;
351
352 return false;
353 }
354
pte_none(pte_t pte)355 static inline int pte_none(pte_t pte)
356 {
357 return (pte_val(pte) == 0);
358 }
359
pte_write(pte_t pte)360 static inline int pte_write(pte_t pte)
361 {
362 return pte_val(pte) & _PAGE_WRITE;
363 }
364
pte_exec(pte_t pte)365 static inline int pte_exec(pte_t pte)
366 {
367 return pte_val(pte) & _PAGE_EXEC;
368 }
369
pte_user(pte_t pte)370 static inline int pte_user(pte_t pte)
371 {
372 return pte_val(pte) & _PAGE_USER;
373 }
374
pte_huge(pte_t pte)375 static inline int pte_huge(pte_t pte)
376 {
377 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
378 }
379
pte_dirty(pte_t pte)380 static inline int pte_dirty(pte_t pte)
381 {
382 return pte_val(pte) & _PAGE_DIRTY;
383 }
384
pte_young(pte_t pte)385 static inline int pte_young(pte_t pte)
386 {
387 return pte_val(pte) & _PAGE_ACCESSED;
388 }
389
pte_special(pte_t pte)390 static inline int pte_special(pte_t pte)
391 {
392 return pte_val(pte) & _PAGE_SPECIAL;
393 }
394
395 /* static inline pte_t pte_rdprotect(pte_t pte) */
396
pte_wrprotect(pte_t pte)397 static inline pte_t pte_wrprotect(pte_t pte)
398 {
399 return __pte((pte_val(pte) & ~(_PAGE_WRITE)) | (_PAGE_READ));
400 }
401
402 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
403 #define pgtable_supports_uffd_wp() \
404 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)
405
pte_uffd_wp(pte_t pte)406 static inline bool pte_uffd_wp(pte_t pte)
407 {
408 return !!(pte_val(pte) & _PAGE_UFFD_WP);
409 }
410
pte_mkuffd_wp(pte_t pte)411 static inline pte_t pte_mkuffd_wp(pte_t pte)
412 {
413 return pte_wrprotect(__pte(pte_val(pte) | _PAGE_UFFD_WP));
414 }
415
pte_clear_uffd_wp(pte_t pte)416 static inline pte_t pte_clear_uffd_wp(pte_t pte)
417 {
418 return __pte(pte_val(pte) & ~(_PAGE_UFFD_WP));
419 }
420
pte_swp_uffd_wp(pte_t pte)421 static inline bool pte_swp_uffd_wp(pte_t pte)
422 {
423 return !!(pte_val(pte) & _PAGE_SWP_UFFD_WP);
424 }
425
pte_swp_mkuffd_wp(pte_t pte)426 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
427 {
428 return __pte(pte_val(pte) | _PAGE_SWP_UFFD_WP);
429 }
430
pte_swp_clear_uffd_wp(pte_t pte)431 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
432 {
433 return __pte(pte_val(pte) & ~(_PAGE_SWP_UFFD_WP));
434 }
435 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
436
437 /* static inline pte_t pte_mkread(pte_t pte) */
438
439 struct vm_area_struct;
440 pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma);
441 #define pte_mkwrite pte_mkwrite
442
pte_mkwrite_novma(pte_t pte)443 static inline pte_t pte_mkwrite_novma(pte_t pte)
444 {
445 return __pte(pte_val(pte) | _PAGE_WRITE);
446 }
447
pte_mkwrite_shstk(pte_t pte)448 static inline pte_t pte_mkwrite_shstk(pte_t pte)
449 {
450 return __pte((pte_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE);
451 }
452
453 /* static inline pte_t pte_mkexec(pte_t pte) */
454
pte_mkdirty(pte_t pte)455 static inline pte_t pte_mkdirty(pte_t pte)
456 {
457 return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
458 }
459
pte_mkclean(pte_t pte)460 static inline pte_t pte_mkclean(pte_t pte)
461 {
462 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
463 }
464
pte_mkyoung(pte_t pte)465 static inline pte_t pte_mkyoung(pte_t pte)
466 {
467 return __pte(pte_val(pte) | _PAGE_ACCESSED);
468 }
469
pte_mkold(pte_t pte)470 static inline pte_t pte_mkold(pte_t pte)
471 {
472 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
473 }
474
pte_mkspecial(pte_t pte)475 static inline pte_t pte_mkspecial(pte_t pte)
476 {
477 return __pte(pte_val(pte) | _PAGE_SPECIAL);
478 }
479
pte_mkhuge(pte_t pte)480 static inline pte_t pte_mkhuge(pte_t pte)
481 {
482 return pte;
483 }
484
485 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
486 #define pgtable_supports_soft_dirty() \
487 (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && \
488 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B))
489
pte_soft_dirty(pte_t pte)490 static inline bool pte_soft_dirty(pte_t pte)
491 {
492 return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
493 }
494
pte_mksoft_dirty(pte_t pte)495 static inline pte_t pte_mksoft_dirty(pte_t pte)
496 {
497 return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
498 }
499
pte_clear_soft_dirty(pte_t pte)500 static inline pte_t pte_clear_soft_dirty(pte_t pte)
501 {
502 return __pte(pte_val(pte) & ~(_PAGE_SOFT_DIRTY));
503 }
504
pte_swp_soft_dirty(pte_t pte)505 static inline bool pte_swp_soft_dirty(pte_t pte)
506 {
507 return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
508 }
509
pte_swp_mksoft_dirty(pte_t pte)510 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
511 {
512 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
513 }
514
pte_swp_clear_soft_dirty(pte_t pte)515 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
516 {
517 return __pte(pte_val(pte) & ~(_PAGE_SWP_SOFT_DIRTY));
518 }
519 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
520
521 #ifdef CONFIG_RISCV_ISA_SVNAPOT
522 #define pte_leaf_size(pte) (pte_napot(pte) ? \
523 napot_cont_size(napot_cont_order(pte)) :\
524 PAGE_SIZE)
525 #endif
526
527 #ifdef CONFIG_NUMA_BALANCING
528 /*
529 * See the comment in include/asm-generic/pgtable.h
530 */
pte_protnone(pte_t pte)531 static inline int pte_protnone(pte_t pte)
532 {
533 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
534 }
535
pmd_protnone(pmd_t pmd)536 static inline int pmd_protnone(pmd_t pmd)
537 {
538 return pte_protnone(pmd_pte(pmd));
539 }
540 #endif
541
542 /* Modify page protection bits */
pte_modify(pte_t pte,pgprot_t newprot)543 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
544 {
545 unsigned long newprot_val = pgprot_val(newprot);
546
547 ALT_THEAD_PMA(newprot_val);
548
549 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
550 }
551
552 #define pgd_ERROR(e) \
553 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
554
555
556 /* Commit new configuration to MMU hardware */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)557 static inline void update_mmu_cache_range(struct vm_fault *vmf,
558 struct vm_area_struct *vma, unsigned long address,
559 pte_t *ptep, unsigned int nr)
560 {
561 /*
562 * Svvptc guarantees that the new valid pte will be visible within
563 * a bounded timeframe, so when the uarch does not cache invalid
564 * entries, we don't have to do anything.
565 */
566 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
567 return;
568
569 /*
570 * The kernel assumes that TLBs don't cache invalid entries, but
571 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
572 * cache flush; it is necessary even after writing invalid entries.
573 * Relying on flush_tlb_fix_spurious_fault would suffice, but
574 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
575 */
576 while (nr--)
577 local_flush_tlb_page(address + nr * PAGE_SIZE);
578
579 }
580 #define update_mmu_cache(vma, addr, ptep) \
581 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
582
583 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
584 update_mmu_cache_range(NULL, vma, addr, ptep, nr)
585
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)586 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
587 unsigned long address, pmd_t *pmdp)
588 {
589 pte_t *ptep = (pte_t *)pmdp;
590
591 update_mmu_cache(vma, address, ptep);
592 }
593
594 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)595 static inline int pte_same(pte_t pte_a, pte_t pte_b)
596 {
597 return pte_val(pte_a) == pte_val(pte_b);
598 }
599
600 /*
601 * Certain architectures need to do special things when PTEs within
602 * a page table are directly modified. Thus, the following hook is
603 * made available.
604 */
set_pte(pte_t * ptep,pte_t pteval)605 static inline void set_pte(pte_t *ptep, pte_t pteval)
606 {
607 WRITE_ONCE(*ptep, pteval);
608 }
609
610 void flush_icache_pte(struct mm_struct *mm, pte_t pte);
611
__set_pte_at(struct mm_struct * mm,pte_t * ptep,pte_t pteval)612 static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
613 {
614 if (pte_present(pteval) && pte_exec(pteval))
615 flush_icache_pte(mm, pteval);
616
617 set_pte(ptep, pteval);
618 }
619
620 #define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
621
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval,unsigned int nr)622 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
623 pte_t *ptep, pte_t pteval, unsigned int nr)
624 {
625 page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
626
627 for (;;) {
628 __set_pte_at(mm, ptep, pteval);
629 if (--nr == 0)
630 break;
631 ptep++;
632 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
633 }
634 }
635 #define set_ptes set_ptes
636
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)637 static inline void pte_clear(struct mm_struct *mm,
638 unsigned long addr, pte_t *ptep)
639 {
640 __set_pte_at(mm, ptep, __pte(0));
641 }
642
643 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
644 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
645 pte_t *ptep, pte_t entry, int dirty);
646 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
647 bool ptep_test_and_clear_young(struct vm_area_struct *vma,
648 unsigned long address, pte_t *ptep);
649
650 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)651 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
652 unsigned long address, pte_t *ptep)
653 {
654 #ifdef CONFIG_SMP
655 pte_t pte = __pte(xchg(&ptep->pte, 0));
656 #else
657 pte_t pte = *ptep;
658
659 set_pte(ptep, __pte(0));
660 #endif
661
662 page_table_check_pte_clear(mm, address, pte);
663
664 return pte;
665 }
666
667 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)668 static inline void ptep_set_wrprotect(struct mm_struct *mm,
669 unsigned long address, pte_t *ptep)
670 {
671 pte_t read_pte = READ_ONCE(*ptep);
672 /*
673 * ptep_set_wrprotect can be called for shadow stack ranges too.
674 * shadow stack memory is XWR = 010 and thus clearing _PAGE_WRITE will lead to
675 * encoding 000b which is wrong encoding with V = 1. This should lead to page fault
676 * but we dont want this wrong configuration to be set in page tables.
677 */
678 atomic_long_set((atomic_long_t *)ptep,
679 ((pte_val(read_pte) & ~(unsigned long)_PAGE_WRITE) | _PAGE_READ));
680 }
681
682 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)683 static inline bool ptep_clear_flush_young(struct vm_area_struct *vma,
684 unsigned long address, pte_t *ptep)
685 {
686 /*
687 * This comment is borrowed from x86, but applies equally to RISC-V:
688 *
689 * Clearing the accessed bit without a TLB flush
690 * doesn't cause data corruption. [ It could cause incorrect
691 * page aging and the (mistaken) reclaim of hot pages, but the
692 * chance of that should be relatively low. ]
693 *
694 * So as a performance optimization don't flush the TLB when
695 * clearing the accessed bit, it will eventually be flushed by
696 * a context switch or a VM operation anyway. [ In the rare
697 * event of it not getting flushed for a long time the delay
698 * shouldn't really matter because there's no real memory
699 * pressure for swapout to react to. ]
700 */
701 return ptep_test_and_clear_young(vma, address, ptep);
702 }
703
704 #define pgprot_nx pgprot_nx
pgprot_nx(pgprot_t _prot)705 static inline pgprot_t pgprot_nx(pgprot_t _prot)
706 {
707 return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
708 }
709
710 #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t _prot)711 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
712 {
713 unsigned long prot = pgprot_val(_prot);
714
715 prot &= ~_PAGE_MTMASK;
716 prot |= _PAGE_IO;
717
718 return __pgprot(prot);
719 }
720
721 #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t _prot)722 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
723 {
724 unsigned long prot = pgprot_val(_prot);
725
726 prot &= ~_PAGE_MTMASK;
727 prot |= _PAGE_NOCACHE;
728
729 return __pgprot(prot);
730 }
731
732 #define pgprot_dmacoherent pgprot_writecombine
733
734 /*
735 * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
736 * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
737 * DT.
738 */
739 #define arch_has_hw_pte_young arch_has_hw_pte_young
arch_has_hw_pte_young(void)740 static inline bool arch_has_hw_pte_young(void)
741 {
742 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU);
743 }
744
745 /*
746 * THP functions
747 */
pte_pmd(pte_t pte)748 static inline pmd_t pte_pmd(pte_t pte)
749 {
750 return __pmd(pte_val(pte));
751 }
752
pte_pud(pte_t pte)753 static inline pud_t pte_pud(pte_t pte)
754 {
755 return __pud(pte_val(pte));
756 }
757
pmd_mkhuge(pmd_t pmd)758 static inline pmd_t pmd_mkhuge(pmd_t pmd)
759 {
760 return pmd;
761 }
762
pmd_mkinvalid(pmd_t pmd)763 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
764 {
765 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
766 }
767
768 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
769
pmd_pfn(pmd_t pmd)770 static inline unsigned long pmd_pfn(pmd_t pmd)
771 {
772 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
773 }
774
775 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
776
777 #define pud_pfn pud_pfn
pud_pfn(pud_t pud)778 static inline unsigned long pud_pfn(pud_t pud)
779 {
780 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
781 }
782
783 #define pmd_pgprot pmd_pgprot
pmd_pgprot(pmd_t pmd)784 static inline pgprot_t pmd_pgprot(pmd_t pmd)
785 {
786 return pte_pgprot(pmd_pte(pmd));
787 }
788
789 #define pud_pgprot pud_pgprot
pud_pgprot(pud_t pud)790 static inline pgprot_t pud_pgprot(pud_t pud)
791 {
792 return pte_pgprot(pud_pte(pud));
793 }
794
pmd_modify(pmd_t pmd,pgprot_t newprot)795 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
796 {
797 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
798 }
799
800 #define pmd_write pmd_write
pmd_write(pmd_t pmd)801 static inline int pmd_write(pmd_t pmd)
802 {
803 return pte_write(pmd_pte(pmd));
804 }
805
806 #define pud_write pud_write
pud_write(pud_t pud)807 static inline int pud_write(pud_t pud)
808 {
809 return pte_write(pud_pte(pud));
810 }
811
812 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)813 static inline int pmd_dirty(pmd_t pmd)
814 {
815 return pte_dirty(pmd_pte(pmd));
816 }
817
818 #define pmd_young pmd_young
pmd_young(pmd_t pmd)819 static inline int pmd_young(pmd_t pmd)
820 {
821 return pte_young(pmd_pte(pmd));
822 }
823
pmd_user(pmd_t pmd)824 static inline int pmd_user(pmd_t pmd)
825 {
826 return pte_user(pmd_pte(pmd));
827 }
828
pmd_mkold(pmd_t pmd)829 static inline pmd_t pmd_mkold(pmd_t pmd)
830 {
831 return pte_pmd(pte_mkold(pmd_pte(pmd)));
832 }
833
pmd_mkyoung(pmd_t pmd)834 static inline pmd_t pmd_mkyoung(pmd_t pmd)
835 {
836 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
837 }
838
839 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
840 #define pmd_mkwrite pmd_mkwrite
841
pmd_mkwrite_novma(pmd_t pmd)842 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
843 {
844 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
845 }
846
pmd_mkwrite_shstk(pmd_t pte)847 static inline pmd_t pmd_mkwrite_shstk(pmd_t pte)
848 {
849 return __pmd((pmd_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE);
850 }
851
pmd_wrprotect(pmd_t pmd)852 static inline pmd_t pmd_wrprotect(pmd_t pmd)
853 {
854 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
855 }
856
pmd_mkclean(pmd_t pmd)857 static inline pmd_t pmd_mkclean(pmd_t pmd)
858 {
859 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
860 }
861
pmd_mkdirty(pmd_t pmd)862 static inline pmd_t pmd_mkdirty(pmd_t pmd)
863 {
864 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
865 }
866
867 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
pmd_special(pmd_t pmd)868 static inline bool pmd_special(pmd_t pmd)
869 {
870 return pte_special(pmd_pte(pmd));
871 }
872
pmd_mkspecial(pmd_t pmd)873 static inline pmd_t pmd_mkspecial(pmd_t pmd)
874 {
875 return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
876 }
877 #endif
878
879 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
pud_special(pud_t pud)880 static inline bool pud_special(pud_t pud)
881 {
882 return pte_special(pud_pte(pud));
883 }
884
pud_mkspecial(pud_t pud)885 static inline pud_t pud_mkspecial(pud_t pud)
886 {
887 return pte_pud(pte_mkspecial(pud_pte(pud)));
888 }
889 #endif
890
891 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pmd_uffd_wp(pmd_t pmd)892 static inline bool pmd_uffd_wp(pmd_t pmd)
893 {
894 return pte_uffd_wp(pmd_pte(pmd));
895 }
896
pmd_mkuffd_wp(pmd_t pmd)897 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
898 {
899 return pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)));
900 }
901
pmd_clear_uffd_wp(pmd_t pmd)902 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
903 {
904 return pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)));
905 }
906
pmd_swp_uffd_wp(pmd_t pmd)907 static inline bool pmd_swp_uffd_wp(pmd_t pmd)
908 {
909 return pte_swp_uffd_wp(pmd_pte(pmd));
910 }
911
pmd_swp_mkuffd_wp(pmd_t pmd)912 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
913 {
914 return pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)));
915 }
916
pmd_swp_clear_uffd_wp(pmd_t pmd)917 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
918 {
919 return pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)));
920 }
921 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
922
923 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pmd_soft_dirty(pmd_t pmd)924 static inline bool pmd_soft_dirty(pmd_t pmd)
925 {
926 return pte_soft_dirty(pmd_pte(pmd));
927 }
928
pmd_mksoft_dirty(pmd_t pmd)929 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
930 {
931 return pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)));
932 }
933
pmd_clear_soft_dirty(pmd_t pmd)934 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
935 {
936 return pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)));
937 }
938
939 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_soft_dirty(pmd_t pmd)940 static inline bool pmd_swp_soft_dirty(pmd_t pmd)
941 {
942 return pte_swp_soft_dirty(pmd_pte(pmd));
943 }
944
pmd_swp_mksoft_dirty(pmd_t pmd)945 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
946 {
947 return pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)));
948 }
949
pmd_swp_clear_soft_dirty(pmd_t pmd)950 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
951 {
952 return pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)));
953 }
954 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
955 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
956
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)957 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
958 pmd_t *pmdp, pmd_t pmd)
959 {
960 page_table_check_pmd_set(mm, addr, pmdp, pmd);
961 return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
962 }
963
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)964 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
965 pud_t *pudp, pud_t pud)
966 {
967 page_table_check_pud_set(mm, addr, pudp, pud);
968 return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
969 }
970
971 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(struct mm_struct * mm,unsigned long addr,pte_t pte)972 static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
973 {
974 return pte_present(pte) && pte_user(pte);
975 }
976
pmd_user_accessible_page(struct mm_struct * mm,unsigned long addr,pmd_t pmd)977 static inline bool pmd_user_accessible_page(struct mm_struct *mm, unsigned long addr, pmd_t pmd)
978 {
979 return pmd_leaf(pmd) && pmd_user(pmd);
980 }
981
pud_user_accessible_page(struct mm_struct * mm,unsigned long addr,pud_t pud)982 static inline bool pud_user_accessible_page(struct mm_struct *mm, unsigned long addr, pud_t pud)
983 {
984 return pud_leaf(pud) && pud_user(pud);
985 }
986 #endif
987
988 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)989 static inline int pmd_trans_huge(pmd_t pmd)
990 {
991 return pmd_leaf(pmd);
992 }
993
994 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)995 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
996 unsigned long address, pmd_t *pmdp,
997 pmd_t entry, int dirty)
998 {
999 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
1000 }
1001
1002 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1003 static inline bool pmdp_test_and_clear_young(struct vm_area_struct *vma,
1004 unsigned long address, pmd_t *pmdp)
1005 {
1006 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
1007 }
1008
1009 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1010 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1011 unsigned long address, pmd_t *pmdp)
1012 {
1013 #ifdef CONFIG_SMP
1014 pmd_t pmd = __pmd(xchg(&pmdp->pmd, 0));
1015 #else
1016 pmd_t pmd = *pmdp;
1017
1018 pmd_clear(pmdp);
1019 #endif
1020
1021 page_table_check_pmd_clear(mm, address, pmd);
1022
1023 return pmd;
1024 }
1025
1026 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1027 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1028 unsigned long address, pmd_t *pmdp)
1029 {
1030 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1031 }
1032
1033 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1034 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1035 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1036 {
1037 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
1038 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
1039 }
1040
1041 #define pmdp_collapse_flush pmdp_collapse_flush
1042 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1043 unsigned long address, pmd_t *pmdp);
1044
pud_wrprotect(pud_t pud)1045 static inline pud_t pud_wrprotect(pud_t pud)
1046 {
1047 return pte_pud(pte_wrprotect(pud_pte(pud)));
1048 }
1049
pud_trans_huge(pud_t pud)1050 static inline int pud_trans_huge(pud_t pud)
1051 {
1052 return pud_leaf(pud);
1053 }
1054
pud_dirty(pud_t pud)1055 static inline int pud_dirty(pud_t pud)
1056 {
1057 return pte_dirty(pud_pte(pud));
1058 }
1059
pud_mkyoung(pud_t pud)1060 static inline pud_t pud_mkyoung(pud_t pud)
1061 {
1062 return pte_pud(pte_mkyoung(pud_pte(pud)));
1063 }
1064
pud_mkold(pud_t pud)1065 static inline pud_t pud_mkold(pud_t pud)
1066 {
1067 return pte_pud(pte_mkold(pud_pte(pud)));
1068 }
1069
pud_mkdirty(pud_t pud)1070 static inline pud_t pud_mkdirty(pud_t pud)
1071 {
1072 return pte_pud(pte_mkdirty(pud_pte(pud)));
1073 }
1074
pud_mkclean(pud_t pud)1075 static inline pud_t pud_mkclean(pud_t pud)
1076 {
1077 return pte_pud(pte_mkclean(pud_pte(pud)));
1078 }
1079
pud_mkwrite(pud_t pud)1080 static inline pud_t pud_mkwrite(pud_t pud)
1081 {
1082 return pte_pud(pte_mkwrite_novma(pud_pte(pud)));
1083 }
1084
pud_mkhuge(pud_t pud)1085 static inline pud_t pud_mkhuge(pud_t pud)
1086 {
1087 return pud;
1088 }
1089
pudp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t entry,int dirty)1090 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
1091 unsigned long address, pud_t *pudp,
1092 pud_t entry, int dirty)
1093 {
1094 return ptep_set_access_flags(vma, address, (pte_t *)pudp, pud_pte(entry), dirty);
1095 }
1096
pudp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)1097 static inline bool pudp_test_and_clear_young(struct vm_area_struct *vma,
1098 unsigned long address, pud_t *pudp)
1099 {
1100 return ptep_test_and_clear_young(vma, address, (pte_t *)pudp);
1101 }
1102
1103 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pud_t * pudp)1104 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1105 unsigned long address, pud_t *pudp)
1106 {
1107 #ifdef CONFIG_SMP
1108 pud_t pud = __pud(xchg(&pudp->pud, 0));
1109 #else
1110 pud_t pud = *pudp;
1111
1112 pud_clear(pudp);
1113 #endif
1114
1115 page_table_check_pud_clear(mm, address, pud);
1116
1117 return pud;
1118 }
1119
pud_young(pud_t pud)1120 static inline int pud_young(pud_t pud)
1121 {
1122 return pte_young(pud_pte(pud));
1123 }
1124
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)1125 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1126 unsigned long address, pud_t *pudp)
1127 {
1128 pte_t *ptep = (pte_t *)pudp;
1129
1130 update_mmu_cache(vma, address, ptep);
1131 }
1132
pudp_establish(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t pud)1133 static inline pud_t pudp_establish(struct vm_area_struct *vma,
1134 unsigned long address, pud_t *pudp, pud_t pud)
1135 {
1136 page_table_check_pud_set(vma->vm_mm, address, pudp, pud);
1137 return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud)));
1138 }
1139
pud_mkinvalid(pud_t pud)1140 static inline pud_t pud_mkinvalid(pud_t pud)
1141 {
1142 return __pud(pud_val(pud) & ~(_PAGE_PRESENT | _PAGE_PROT_NONE));
1143 }
1144
1145 extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
1146 pud_t *pudp);
1147
pud_modify(pud_t pud,pgprot_t newprot)1148 static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
1149 {
1150 return pte_pud(pte_modify(pud_pte(pud), newprot));
1151 }
1152
1153 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1154
1155 /*
1156 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
1157 * are !pte_none() && !pte_present().
1158 *
1159 * Format of swap PTE:
1160 * bit 0: _PAGE_PRESENT (zero)
1161 * bit 1 to 2: (zero)
1162 * bit 3: _PAGE_SWP_SOFT_DIRTY
1163 * bit 4: _PAGE_SWP_UFFD_WP
1164 * bit 5: _PAGE_PROT_NONE (zero)
1165 * bit 6: exclusive marker
1166 * bits 7 to 11: swap type
1167 * bits 12 to XLEN-1: swap offset
1168 */
1169 #define __SWP_TYPE_SHIFT 7
1170 #define __SWP_TYPE_BITS 5
1171 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
1172 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
1173
1174 #define MAX_SWAPFILES_CHECK() \
1175 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1176
1177 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1178 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
1179 #define __swp_entry(type, offset) ((swp_entry_t) \
1180 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
1181 ((offset) << __SWP_OFFSET_SHIFT) })
1182
1183 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1184 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1185
pte_swp_exclusive(pte_t pte)1186 static inline bool pte_swp_exclusive(pte_t pte)
1187 {
1188 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
1189 }
1190
pte_swp_mkexclusive(pte_t pte)1191 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1192 {
1193 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
1194 }
1195
pte_swp_clear_exclusive(pte_t pte)1196 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1197 {
1198 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
1199 }
1200
1201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1202 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
1203 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
1204 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1205
1206 /*
1207 * In the RV64 Linux scheme, we give the user half of the virtual-address space
1208 * and give the kernel the other (upper) half.
1209 */
1210 #ifdef CONFIG_64BIT
1211 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
1212 #else
1213 #define KERN_VIRT_START FIXADDR_START
1214 #endif
1215
1216 /*
1217 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
1218 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
1219 * Task size is:
1220 * - 0x9fc00000 (~2.5GB) for RV32.
1221 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
1222 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
1223 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
1224 *
1225 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
1226 * Instruction Set Manual Volume II: Privileged Architecture" states that
1227 * "load and store effective addresses, which are 64bits, must have bits
1228 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
1229 * Similarly for SV57, bits 63–57 must be equal to bit 56.
1230 */
1231 #ifdef CONFIG_64BIT
1232 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
1233
1234 #ifdef CONFIG_COMPAT
1235 #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
1236 #define TASK_SIZE (is_compat_task() ? \
1237 TASK_SIZE_32 : TASK_SIZE_64)
1238 #else
1239 #define TASK_SIZE TASK_SIZE_64
1240 #endif
1241
1242 #else
1243 #define TASK_SIZE FIXADDR_START
1244 #endif
1245
1246 #else /* CONFIG_MMU */
1247
1248 #define PAGE_SHARED __pgprot(0)
1249 #define PAGE_KERNEL __pgprot(0)
1250 #define swapper_pg_dir NULL
1251 #define TASK_SIZE _AC(-1, UL)
1252 #define VMALLOC_START _AC(0, UL)
1253 #define VMALLOC_END TASK_SIZE
1254
1255 #endif /* !CONFIG_MMU */
1256
1257 extern char _start[];
1258 extern void *_dtb_early_va;
1259 extern uintptr_t _dtb_early_pa;
1260 #define dtb_early_va _dtb_early_va
1261 #define dtb_early_pa _dtb_early_pa
1262 extern u64 satp_mode;
1263
1264 void paging_init(void);
1265 void misc_mem_init(void);
1266
1267 /*
1268 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
1269 * TLB flush will be required as a result of the "set". For example, use
1270 * in scenarios where it is known ahead of time that the routine is
1271 * setting non-present entries, or re-setting an existing entry to the
1272 * same value. Otherwise, use the typical "set" helpers and flush the
1273 * TLB.
1274 */
1275 #define set_p4d_safe(p4dp, p4d) \
1276 ({ \
1277 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
1278 set_p4d(p4dp, p4d); \
1279 })
1280
1281 #define set_pgd_safe(pgdp, pgd) \
1282 ({ \
1283 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
1284 set_pgd(pgdp, pgd); \
1285 })
1286 #endif /* !__ASSEMBLER__ */
1287
1288 #endif /* _ASM_RISCV_PGTABLE_H */
1289