1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
8
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
11
12 #include <asm/pgtable-bits.h>
13
14 #ifndef CONFIG_MMU
15 #ifdef CONFIG_RELOCATABLE
16 #define KERNEL_LINK_ADDR UL(0)
17 #else
18 #define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL)
19 #endif
20 #define KERN_VIRT_SIZE (UL(-1))
21 #else
22
23 #define ADDRESS_SPACE_END (UL(-1))
24
25 #ifdef CONFIG_64BIT
26 /* Leave 2GB for kernel and BPF at the end of the address space */
27 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
28 #else
29 #define KERNEL_LINK_ADDR PAGE_OFFSET
30 #endif
31
32 /* Number of entries in the page global directory */
33 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
34 /* Number of entries in the page table */
35 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
36
37 /*
38 * Half of the kernel address space (1/4 of the entries of the page global
39 * directory) is for the direct mapping.
40 */
41 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
42
43 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
44 #define VMALLOC_END PAGE_OFFSET
45 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
46
47 #define BPF_JIT_REGION_SIZE (SZ_128M)
48 #ifdef CONFIG_64BIT
49 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
50 #define BPF_JIT_REGION_END (MODULES_END)
51 #else
52 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
53 #define BPF_JIT_REGION_END (VMALLOC_END)
54 #endif
55
56 /* Modules always live before the kernel */
57 #ifdef CONFIG_64BIT
58 /* This is used to define the end of the KASAN shadow region */
59 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
60 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
61 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
62 #else
63 #define MODULES_VADDR VMALLOC_START
64 #define MODULES_END VMALLOC_END
65 #endif
66
67 /*
68 * Roughly size the vmemmap space to be large enough to fit enough
69 * struct pages to map half the virtual address space. Then
70 * position vmemmap directly below the VMALLOC region.
71 */
72 #define VA_BITS_SV32 32
73 #ifdef CONFIG_64BIT
74 #define VA_BITS_SV39 39
75 #define VA_BITS_SV48 48
76 #define VA_BITS_SV57 57
77
78 #define VA_BITS (pgtable_l5_enabled ? \
79 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
80 #else
81 #define VA_BITS VA_BITS_SV32
82 #endif
83
84 #define VMEMMAP_SHIFT \
85 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
86 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
87 #define VMEMMAP_END VMALLOC_START
88 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
89
90 /*
91 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
92 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
93 */
94 #define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn)
95
96 #define PCI_IO_SIZE SZ_16M
97 #define PCI_IO_END VMEMMAP_START
98 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
99
100 #define FIXADDR_TOP PCI_IO_START
101 #ifdef CONFIG_64BIT
102 #define MAX_FDT_SIZE PMD_SIZE
103 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
104 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
105 #else
106 #define MAX_FDT_SIZE PGDIR_SIZE
107 #define FIX_FDT_SIZE MAX_FDT_SIZE
108 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
109 #endif
110 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
111
112 #endif
113
114 #ifndef __ASSEMBLER__
115
116 #include <asm/page.h>
117 #include <asm/tlbflush.h>
118 #include <linux/mm_types.h>
119 #include <asm/compat.h>
120 #include <asm/cpufeature.h>
121
122 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
123
124 #ifdef CONFIG_64BIT
125 #include <asm/pgtable-64.h>
126
127 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
128 #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
129 #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
130 #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
131 #else
132 #include <asm/pgtable-32.h>
133 #endif /* CONFIG_64BIT */
134
135 #include <linux/page_table_check.h>
136
137 #ifdef CONFIG_XIP_KERNEL
138 #define XIP_FIXUP(addr) ({ \
139 extern char _sdata[], _start[], _end[]; \
140 uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
141 + (uintptr_t)&_sdata - (uintptr_t)&_start; \
142 uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
143 + (uintptr_t)&_end - (uintptr_t)&_start; \
144 uintptr_t __a = (uintptr_t)(addr); \
145 (__a >= __rom_start_data && __a < __rom_end_data) ? \
146 __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
147 })
148 #else
149 #define XIP_FIXUP(addr) (addr)
150 #endif /* CONFIG_XIP_KERNEL */
151
152 struct pt_alloc_ops {
153 pte_t *(*get_pte_virt)(phys_addr_t pa);
154 phys_addr_t (*alloc_pte)(uintptr_t va);
155 #ifndef __PAGETABLE_PMD_FOLDED
156 pmd_t *(*get_pmd_virt)(phys_addr_t pa);
157 phys_addr_t (*alloc_pmd)(uintptr_t va);
158 pud_t *(*get_pud_virt)(phys_addr_t pa);
159 phys_addr_t (*alloc_pud)(uintptr_t va);
160 p4d_t *(*get_p4d_virt)(phys_addr_t pa);
161 phys_addr_t (*alloc_p4d)(uintptr_t va);
162 #endif
163 };
164
165 extern struct pt_alloc_ops pt_ops __meminitdata;
166
167 #ifdef CONFIG_MMU
168 /* Number of PGD entries that a user-mode program can use */
169 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
170
171 /* Page protection bits */
172 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
173
174 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
175 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
176 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
177 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
178 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
179 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
180 _PAGE_EXEC | _PAGE_WRITE)
181 #define PAGE_SHADOWSTACK __pgprot(_PAGE_BASE | _PAGE_WRITE)
182
183 #define PAGE_COPY PAGE_READ
184 #define PAGE_COPY_EXEC PAGE_READ_EXEC
185 #define PAGE_SHARED PAGE_WRITE
186 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
187
188 #define _PAGE_KERNEL (_PAGE_READ \
189 | _PAGE_WRITE \
190 | _PAGE_PRESENT \
191 | _PAGE_ACCESSED \
192 | _PAGE_DIRTY \
193 | _PAGE_GLOBAL)
194
195 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
196 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
197 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
198 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
199 | _PAGE_EXEC)
200
201 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
202
203 #define _PAGE_KERNEL_NC ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_NOCACHE)
204 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
205 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
206
207 extern pgd_t swapper_pg_dir[];
208 extern pgd_t trampoline_pg_dir[];
209 extern pgd_t early_pg_dir[];
210
211 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_present(pmd_t pmd)212 static inline int pmd_present(pmd_t pmd)
213 {
214 /*
215 * Checking for _PAGE_LEAF is needed too because:
216 * When splitting a THP, split_huge_page() will temporarily clear
217 * the present bit, in this situation, pmd_present() and
218 * pmd_trans_huge() still needs to return true.
219 */
220 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
221 }
222 #else
pmd_present(pmd_t pmd)223 static inline int pmd_present(pmd_t pmd)
224 {
225 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
226 }
227 #endif
228
pmd_none(pmd_t pmd)229 static inline int pmd_none(pmd_t pmd)
230 {
231 return (pmd_val(pmd) == 0);
232 }
233
pmd_bad(pmd_t pmd)234 static inline int pmd_bad(pmd_t pmd)
235 {
236 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
237 }
238
239 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)240 static inline bool pmd_leaf(pmd_t pmd)
241 {
242 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
243 }
244
set_pmd(pmd_t * pmdp,pmd_t pmd)245 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
246 {
247 WRITE_ONCE(*pmdp, pmd);
248 }
249
pmd_clear(pmd_t * pmdp)250 static inline void pmd_clear(pmd_t *pmdp)
251 {
252 set_pmd(pmdp, __pmd(0));
253 }
254
pfn_pgd(unsigned long pfn,pgprot_t prot)255 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
256 {
257 unsigned long prot_val = pgprot_val(prot);
258
259 ALT_THEAD_PMA(prot_val);
260
261 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
262 }
263
_pgd_pfn(pgd_t pgd)264 static inline unsigned long _pgd_pfn(pgd_t pgd)
265 {
266 return __page_val_to_pfn(pgd_val(pgd));
267 }
268
pmd_page(pmd_t pmd)269 static inline struct page *pmd_page(pmd_t pmd)
270 {
271 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
272 }
273
pmd_page_vaddr(pmd_t pmd)274 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
275 {
276 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
277 }
278
pmd_pte(pmd_t pmd)279 static inline pte_t pmd_pte(pmd_t pmd)
280 {
281 return __pte(pmd_val(pmd));
282 }
283
pud_pte(pud_t pud)284 static inline pte_t pud_pte(pud_t pud)
285 {
286 return __pte(pud_val(pud));
287 }
288
289 #ifdef CONFIG_RISCV_ISA_SVNAPOT
290
has_svnapot(void)291 static __always_inline bool has_svnapot(void)
292 {
293 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
294 }
295
pte_napot(pte_t pte)296 static inline unsigned long pte_napot(pte_t pte)
297 {
298 return pte_val(pte) & _PAGE_NAPOT;
299 }
300
pte_mknapot(pte_t pte,unsigned int order)301 static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
302 {
303 int pos = order - 1 + _PAGE_PFN_SHIFT;
304 unsigned long napot_bit = BIT(pos);
305 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
306
307 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
308 }
309
310 #else
311
has_svnapot(void)312 static __always_inline bool has_svnapot(void) { return false; }
313
pte_napot(pte_t pte)314 static inline unsigned long pte_napot(pte_t pte)
315 {
316 return 0;
317 }
318
319 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
320
321 /* Yields the page frame number (PFN) of a page table entry */
pte_pfn(pte_t pte)322 static inline unsigned long pte_pfn(pte_t pte)
323 {
324 unsigned long res = __page_val_to_pfn(pte_val(pte));
325
326 if (has_svnapot() && pte_napot(pte))
327 res = res & (res - 1UL);
328
329 return res;
330 }
331
332 #define pte_page(x) pfn_to_page(pte_pfn(x))
333
334 /* Constructs a page table entry */
pfn_pte(unsigned long pfn,pgprot_t prot)335 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
336 {
337 unsigned long prot_val = pgprot_val(prot);
338
339 ALT_THEAD_PMA(prot_val);
340
341 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
342 }
343
344 #define pte_pgprot pte_pgprot
pte_pgprot(pte_t pte)345 static inline pgprot_t pte_pgprot(pte_t pte)
346 {
347 unsigned long pfn = pte_pfn(pte);
348
349 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
350 }
351
pte_present(pte_t pte)352 static inline int pte_present(pte_t pte)
353 {
354 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
355 }
356
357 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)358 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
359 {
360 if (pte_val(a) & _PAGE_PRESENT)
361 return true;
362
363 if ((pte_val(a) & _PAGE_PROT_NONE) &&
364 atomic_read(&mm->tlb_flush_pending))
365 return true;
366
367 return false;
368 }
369
pte_none(pte_t pte)370 static inline int pte_none(pte_t pte)
371 {
372 return (pte_val(pte) == 0);
373 }
374
pte_write(pte_t pte)375 static inline int pte_write(pte_t pte)
376 {
377 return pte_val(pte) & _PAGE_WRITE;
378 }
379
pte_exec(pte_t pte)380 static inline int pte_exec(pte_t pte)
381 {
382 return pte_val(pte) & _PAGE_EXEC;
383 }
384
pte_user(pte_t pte)385 static inline int pte_user(pte_t pte)
386 {
387 return pte_val(pte) & _PAGE_USER;
388 }
389
pte_huge(pte_t pte)390 static inline int pte_huge(pte_t pte)
391 {
392 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
393 }
394
pte_dirty(pte_t pte)395 static inline int pte_dirty(pte_t pte)
396 {
397 return pte_val(pte) & _PAGE_DIRTY;
398 }
399
pte_young(pte_t pte)400 static inline int pte_young(pte_t pte)
401 {
402 return pte_val(pte) & _PAGE_ACCESSED;
403 }
404
pte_special(pte_t pte)405 static inline int pte_special(pte_t pte)
406 {
407 return pte_val(pte) & _PAGE_SPECIAL;
408 }
409
410 /* static inline pte_t pte_rdprotect(pte_t pte) */
411
pte_wrprotect(pte_t pte)412 static inline pte_t pte_wrprotect(pte_t pte)
413 {
414 return __pte((pte_val(pte) & ~(_PAGE_WRITE)) | (_PAGE_READ));
415 }
416
417 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
418 #define pgtable_supports_uffd_wp() \
419 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)
420
pte_uffd_wp(pte_t pte)421 static inline bool pte_uffd_wp(pte_t pte)
422 {
423 return !!(pte_val(pte) & _PAGE_UFFD_WP);
424 }
425
pte_mkuffd_wp(pte_t pte)426 static inline pte_t pte_mkuffd_wp(pte_t pte)
427 {
428 return pte_wrprotect(__pte(pte_val(pte) | _PAGE_UFFD_WP));
429 }
430
pte_clear_uffd_wp(pte_t pte)431 static inline pte_t pte_clear_uffd_wp(pte_t pte)
432 {
433 return __pte(pte_val(pte) & ~(_PAGE_UFFD_WP));
434 }
435
pte_swp_uffd_wp(pte_t pte)436 static inline bool pte_swp_uffd_wp(pte_t pte)
437 {
438 return !!(pte_val(pte) & _PAGE_SWP_UFFD_WP);
439 }
440
pte_swp_mkuffd_wp(pte_t pte)441 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
442 {
443 return __pte(pte_val(pte) | _PAGE_SWP_UFFD_WP);
444 }
445
pte_swp_clear_uffd_wp(pte_t pte)446 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
447 {
448 return __pte(pte_val(pte) & ~(_PAGE_SWP_UFFD_WP));
449 }
450 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
451
452 /* static inline pte_t pte_mkread(pte_t pte) */
453
454 struct vm_area_struct;
455 pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma);
456 #define pte_mkwrite pte_mkwrite
457
pte_mkwrite_novma(pte_t pte)458 static inline pte_t pte_mkwrite_novma(pte_t pte)
459 {
460 return __pte(pte_val(pte) | _PAGE_WRITE);
461 }
462
pte_mkwrite_shstk(pte_t pte)463 static inline pte_t pte_mkwrite_shstk(pte_t pte)
464 {
465 return __pte((pte_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE);
466 }
467
468 /* static inline pte_t pte_mkexec(pte_t pte) */
469
pte_mkdirty(pte_t pte)470 static inline pte_t pte_mkdirty(pte_t pte)
471 {
472 return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
473 }
474
pte_mkclean(pte_t pte)475 static inline pte_t pte_mkclean(pte_t pte)
476 {
477 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
478 }
479
pte_mkyoung(pte_t pte)480 static inline pte_t pte_mkyoung(pte_t pte)
481 {
482 return __pte(pte_val(pte) | _PAGE_ACCESSED);
483 }
484
pte_mkold(pte_t pte)485 static inline pte_t pte_mkold(pte_t pte)
486 {
487 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
488 }
489
pte_mkspecial(pte_t pte)490 static inline pte_t pte_mkspecial(pte_t pte)
491 {
492 return __pte(pte_val(pte) | _PAGE_SPECIAL);
493 }
494
pte_mkhuge(pte_t pte)495 static inline pte_t pte_mkhuge(pte_t pte)
496 {
497 return pte;
498 }
499
500 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
501 #define pgtable_supports_soft_dirty() \
502 (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && \
503 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B))
504
pte_soft_dirty(pte_t pte)505 static inline bool pte_soft_dirty(pte_t pte)
506 {
507 return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
508 }
509
pte_mksoft_dirty(pte_t pte)510 static inline pte_t pte_mksoft_dirty(pte_t pte)
511 {
512 return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
513 }
514
pte_clear_soft_dirty(pte_t pte)515 static inline pte_t pte_clear_soft_dirty(pte_t pte)
516 {
517 return __pte(pte_val(pte) & ~(_PAGE_SOFT_DIRTY));
518 }
519
pte_swp_soft_dirty(pte_t pte)520 static inline bool pte_swp_soft_dirty(pte_t pte)
521 {
522 return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
523 }
524
pte_swp_mksoft_dirty(pte_t pte)525 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
526 {
527 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
528 }
529
pte_swp_clear_soft_dirty(pte_t pte)530 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
531 {
532 return __pte(pte_val(pte) & ~(_PAGE_SWP_SOFT_DIRTY));
533 }
534 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
535
536 #ifdef CONFIG_RISCV_ISA_SVNAPOT
537 #define pte_leaf_size(pte) (pte_napot(pte) ? \
538 napot_cont_size(napot_cont_order(pte)) :\
539 PAGE_SIZE)
540 #endif
541
542 #ifdef CONFIG_NUMA_BALANCING
543 /*
544 * See the comment in include/asm-generic/pgtable.h
545 */
pte_protnone(pte_t pte)546 static inline int pte_protnone(pte_t pte)
547 {
548 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
549 }
550
pmd_protnone(pmd_t pmd)551 static inline int pmd_protnone(pmd_t pmd)
552 {
553 return pte_protnone(pmd_pte(pmd));
554 }
555 #endif
556
557 /* Modify page protection bits */
pte_modify(pte_t pte,pgprot_t newprot)558 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
559 {
560 unsigned long newprot_val = pgprot_val(newprot);
561
562 ALT_THEAD_PMA(newprot_val);
563
564 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
565 }
566
567 #define pgd_ERROR(e) \
568 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
569
570
571 /* Commit new configuration to MMU hardware */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)572 static inline void update_mmu_cache_range(struct vm_fault *vmf,
573 struct vm_area_struct *vma, unsigned long address,
574 pte_t *ptep, unsigned int nr)
575 {
576 /*
577 * Svvptc guarantees that the new valid pte will be visible within
578 * a bounded timeframe, so when the uarch does not cache invalid
579 * entries, we don't have to do anything.
580 */
581 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
582 return;
583
584 /*
585 * The kernel assumes that TLBs don't cache invalid entries, but
586 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
587 * cache flush; it is necessary even after writing invalid entries.
588 * Relying on flush_tlb_fix_spurious_fault would suffice, but
589 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
590 */
591 while (nr--)
592 local_flush_tlb_page(address + nr * PAGE_SIZE);
593
594 }
595 #define update_mmu_cache(vma, addr, ptep) \
596 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
597
598 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
599 update_mmu_cache_range(NULL, vma, addr, ptep, nr)
600
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)601 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
602 unsigned long address, pmd_t *pmdp)
603 {
604 pte_t *ptep = (pte_t *)pmdp;
605
606 update_mmu_cache(vma, address, ptep);
607 }
608
609 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)610 static inline int pte_same(pte_t pte_a, pte_t pte_b)
611 {
612 return pte_val(pte_a) == pte_val(pte_b);
613 }
614
615 /*
616 * Certain architectures need to do special things when PTEs within
617 * a page table are directly modified. Thus, the following hook is
618 * made available.
619 */
set_pte(pte_t * ptep,pte_t pteval)620 static inline void set_pte(pte_t *ptep, pte_t pteval)
621 {
622 WRITE_ONCE(*ptep, pteval);
623 }
624
625 void flush_icache_pte(struct mm_struct *mm, pte_t pte);
626
__set_pte_at(struct mm_struct * mm,pte_t * ptep,pte_t pteval)627 static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
628 {
629 if (pte_present(pteval) && pte_exec(pteval))
630 flush_icache_pte(mm, pteval);
631
632 set_pte(ptep, pteval);
633 }
634
635 #define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
636
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval,unsigned int nr)637 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
638 pte_t *ptep, pte_t pteval, unsigned int nr)
639 {
640 page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
641
642 for (;;) {
643 __set_pte_at(mm, ptep, pteval);
644 if (--nr == 0)
645 break;
646 ptep++;
647 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
648 }
649 }
650 #define set_ptes set_ptes
651
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)652 static inline void pte_clear(struct mm_struct *mm,
653 unsigned long addr, pte_t *ptep)
654 {
655 __set_pte_at(mm, ptep, __pte(0));
656 }
657
658 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
659 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
660 pte_t *ptep, pte_t entry, int dirty);
661 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
662 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
663 pte_t *ptep);
664
665 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)666 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
667 unsigned long address, pte_t *ptep)
668 {
669 #ifdef CONFIG_SMP
670 pte_t pte = __pte(xchg(&ptep->pte, 0));
671 #else
672 pte_t pte = *ptep;
673
674 set_pte(ptep, __pte(0));
675 #endif
676
677 page_table_check_pte_clear(mm, address, pte);
678
679 return pte;
680 }
681
682 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)683 static inline void ptep_set_wrprotect(struct mm_struct *mm,
684 unsigned long address, pte_t *ptep)
685 {
686 pte_t read_pte = READ_ONCE(*ptep);
687 /*
688 * ptep_set_wrprotect can be called for shadow stack ranges too.
689 * shadow stack memory is XWR = 010 and thus clearing _PAGE_WRITE will lead to
690 * encoding 000b which is wrong encoding with V = 1. This should lead to page fault
691 * but we dont want this wrong configuration to be set in page tables.
692 */
693 atomic_long_set((atomic_long_t *)ptep,
694 ((pte_val(read_pte) & ~(unsigned long)_PAGE_WRITE) | _PAGE_READ));
695 }
696
697 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)698 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
699 unsigned long address, pte_t *ptep)
700 {
701 /*
702 * This comment is borrowed from x86, but applies equally to RISC-V:
703 *
704 * Clearing the accessed bit without a TLB flush
705 * doesn't cause data corruption. [ It could cause incorrect
706 * page aging and the (mistaken) reclaim of hot pages, but the
707 * chance of that should be relatively low. ]
708 *
709 * So as a performance optimization don't flush the TLB when
710 * clearing the accessed bit, it will eventually be flushed by
711 * a context switch or a VM operation anyway. [ In the rare
712 * event of it not getting flushed for a long time the delay
713 * shouldn't really matter because there's no real memory
714 * pressure for swapout to react to. ]
715 */
716 return ptep_test_and_clear_young(vma, address, ptep);
717 }
718
719 #define pgprot_nx pgprot_nx
pgprot_nx(pgprot_t _prot)720 static inline pgprot_t pgprot_nx(pgprot_t _prot)
721 {
722 return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
723 }
724
725 #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t _prot)726 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
727 {
728 unsigned long prot = pgprot_val(_prot);
729
730 prot &= ~_PAGE_MTMASK;
731 prot |= _PAGE_IO;
732
733 return __pgprot(prot);
734 }
735
736 #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t _prot)737 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
738 {
739 unsigned long prot = pgprot_val(_prot);
740
741 prot &= ~_PAGE_MTMASK;
742 prot |= _PAGE_NOCACHE;
743
744 return __pgprot(prot);
745 }
746
747 #define pgprot_dmacoherent pgprot_writecombine
748
749 /*
750 * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
751 * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
752 * DT.
753 */
754 #define arch_has_hw_pte_young arch_has_hw_pte_young
arch_has_hw_pte_young(void)755 static inline bool arch_has_hw_pte_young(void)
756 {
757 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU);
758 }
759
760 /*
761 * THP functions
762 */
pte_pmd(pte_t pte)763 static inline pmd_t pte_pmd(pte_t pte)
764 {
765 return __pmd(pte_val(pte));
766 }
767
pte_pud(pte_t pte)768 static inline pud_t pte_pud(pte_t pte)
769 {
770 return __pud(pte_val(pte));
771 }
772
pmd_mkhuge(pmd_t pmd)773 static inline pmd_t pmd_mkhuge(pmd_t pmd)
774 {
775 return pmd;
776 }
777
pmd_mkinvalid(pmd_t pmd)778 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
779 {
780 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
781 }
782
783 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
784
pmd_pfn(pmd_t pmd)785 static inline unsigned long pmd_pfn(pmd_t pmd)
786 {
787 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
788 }
789
790 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
791
792 #define pud_pfn pud_pfn
pud_pfn(pud_t pud)793 static inline unsigned long pud_pfn(pud_t pud)
794 {
795 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
796 }
797
798 #define pmd_pgprot pmd_pgprot
pmd_pgprot(pmd_t pmd)799 static inline pgprot_t pmd_pgprot(pmd_t pmd)
800 {
801 return pte_pgprot(pmd_pte(pmd));
802 }
803
804 #define pud_pgprot pud_pgprot
pud_pgprot(pud_t pud)805 static inline pgprot_t pud_pgprot(pud_t pud)
806 {
807 return pte_pgprot(pud_pte(pud));
808 }
809
pmd_modify(pmd_t pmd,pgprot_t newprot)810 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
811 {
812 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
813 }
814
815 #define pmd_write pmd_write
pmd_write(pmd_t pmd)816 static inline int pmd_write(pmd_t pmd)
817 {
818 return pte_write(pmd_pte(pmd));
819 }
820
821 #define pud_write pud_write
pud_write(pud_t pud)822 static inline int pud_write(pud_t pud)
823 {
824 return pte_write(pud_pte(pud));
825 }
826
827 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)828 static inline int pmd_dirty(pmd_t pmd)
829 {
830 return pte_dirty(pmd_pte(pmd));
831 }
832
833 #define pmd_young pmd_young
pmd_young(pmd_t pmd)834 static inline int pmd_young(pmd_t pmd)
835 {
836 return pte_young(pmd_pte(pmd));
837 }
838
pmd_user(pmd_t pmd)839 static inline int pmd_user(pmd_t pmd)
840 {
841 return pte_user(pmd_pte(pmd));
842 }
843
pmd_mkold(pmd_t pmd)844 static inline pmd_t pmd_mkold(pmd_t pmd)
845 {
846 return pte_pmd(pte_mkold(pmd_pte(pmd)));
847 }
848
pmd_mkyoung(pmd_t pmd)849 static inline pmd_t pmd_mkyoung(pmd_t pmd)
850 {
851 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
852 }
853
854 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
855 #define pmd_mkwrite pmd_mkwrite
856
pmd_mkwrite_novma(pmd_t pmd)857 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
858 {
859 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
860 }
861
pmd_mkwrite_shstk(pmd_t pte)862 static inline pmd_t pmd_mkwrite_shstk(pmd_t pte)
863 {
864 return __pmd((pmd_val(pte) & ~(_PAGE_LEAF)) | _PAGE_WRITE);
865 }
866
pmd_wrprotect(pmd_t pmd)867 static inline pmd_t pmd_wrprotect(pmd_t pmd)
868 {
869 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
870 }
871
pmd_mkclean(pmd_t pmd)872 static inline pmd_t pmd_mkclean(pmd_t pmd)
873 {
874 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
875 }
876
pmd_mkdirty(pmd_t pmd)877 static inline pmd_t pmd_mkdirty(pmd_t pmd)
878 {
879 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
880 }
881
882 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
pmd_special(pmd_t pmd)883 static inline bool pmd_special(pmd_t pmd)
884 {
885 return pte_special(pmd_pte(pmd));
886 }
887
pmd_mkspecial(pmd_t pmd)888 static inline pmd_t pmd_mkspecial(pmd_t pmd)
889 {
890 return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
891 }
892 #endif
893
894 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
pud_special(pud_t pud)895 static inline bool pud_special(pud_t pud)
896 {
897 return pte_special(pud_pte(pud));
898 }
899
pud_mkspecial(pud_t pud)900 static inline pud_t pud_mkspecial(pud_t pud)
901 {
902 return pte_pud(pte_mkspecial(pud_pte(pud)));
903 }
904 #endif
905
906 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pmd_uffd_wp(pmd_t pmd)907 static inline bool pmd_uffd_wp(pmd_t pmd)
908 {
909 return pte_uffd_wp(pmd_pte(pmd));
910 }
911
pmd_mkuffd_wp(pmd_t pmd)912 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
913 {
914 return pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)));
915 }
916
pmd_clear_uffd_wp(pmd_t pmd)917 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
918 {
919 return pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)));
920 }
921
pmd_swp_uffd_wp(pmd_t pmd)922 static inline bool pmd_swp_uffd_wp(pmd_t pmd)
923 {
924 return pte_swp_uffd_wp(pmd_pte(pmd));
925 }
926
pmd_swp_mkuffd_wp(pmd_t pmd)927 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
928 {
929 return pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)));
930 }
931
pmd_swp_clear_uffd_wp(pmd_t pmd)932 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
933 {
934 return pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)));
935 }
936 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
937
938 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pmd_soft_dirty(pmd_t pmd)939 static inline bool pmd_soft_dirty(pmd_t pmd)
940 {
941 return pte_soft_dirty(pmd_pte(pmd));
942 }
943
pmd_mksoft_dirty(pmd_t pmd)944 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
945 {
946 return pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)));
947 }
948
pmd_clear_soft_dirty(pmd_t pmd)949 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
950 {
951 return pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)));
952 }
953
954 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_soft_dirty(pmd_t pmd)955 static inline bool pmd_swp_soft_dirty(pmd_t pmd)
956 {
957 return pte_swp_soft_dirty(pmd_pte(pmd));
958 }
959
pmd_swp_mksoft_dirty(pmd_t pmd)960 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
961 {
962 return pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)));
963 }
964
pmd_swp_clear_soft_dirty(pmd_t pmd)965 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
966 {
967 return pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)));
968 }
969 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
970 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
971
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)972 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
973 pmd_t *pmdp, pmd_t pmd)
974 {
975 page_table_check_pmd_set(mm, addr, pmdp, pmd);
976 return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
977 }
978
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)979 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
980 pud_t *pudp, pud_t pud)
981 {
982 page_table_check_pud_set(mm, addr, pudp, pud);
983 return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
984 }
985
986 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(pte_t pte,unsigned long addr)987 static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
988 {
989 return pte_present(pte) && pte_user(pte);
990 }
991
pmd_user_accessible_page(pmd_t pmd,unsigned long addr)992 static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
993 {
994 return pmd_leaf(pmd) && pmd_user(pmd);
995 }
996
pud_user_accessible_page(pud_t pud,unsigned long addr)997 static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
998 {
999 return pud_leaf(pud) && pud_user(pud);
1000 }
1001 #endif
1002
1003 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)1004 static inline int pmd_trans_huge(pmd_t pmd)
1005 {
1006 return pmd_leaf(pmd);
1007 }
1008
1009 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)1010 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1011 unsigned long address, pmd_t *pmdp,
1012 pmd_t entry, int dirty)
1013 {
1014 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
1015 }
1016
1017 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1018 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1019 unsigned long address, pmd_t *pmdp)
1020 {
1021 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
1022 }
1023
1024 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1025 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1026 unsigned long address, pmd_t *pmdp)
1027 {
1028 #ifdef CONFIG_SMP
1029 pmd_t pmd = __pmd(xchg(&pmdp->pmd, 0));
1030 #else
1031 pmd_t pmd = *pmdp;
1032
1033 pmd_clear(pmdp);
1034 #endif
1035
1036 page_table_check_pmd_clear(mm, address, pmd);
1037
1038 return pmd;
1039 }
1040
1041 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1042 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1043 unsigned long address, pmd_t *pmdp)
1044 {
1045 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1046 }
1047
1048 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1049 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1050 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1051 {
1052 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
1053 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
1054 }
1055
1056 #define pmdp_collapse_flush pmdp_collapse_flush
1057 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1058 unsigned long address, pmd_t *pmdp);
1059
pud_wrprotect(pud_t pud)1060 static inline pud_t pud_wrprotect(pud_t pud)
1061 {
1062 return pte_pud(pte_wrprotect(pud_pte(pud)));
1063 }
1064
pud_trans_huge(pud_t pud)1065 static inline int pud_trans_huge(pud_t pud)
1066 {
1067 return pud_leaf(pud);
1068 }
1069
pud_dirty(pud_t pud)1070 static inline int pud_dirty(pud_t pud)
1071 {
1072 return pte_dirty(pud_pte(pud));
1073 }
1074
pud_mkyoung(pud_t pud)1075 static inline pud_t pud_mkyoung(pud_t pud)
1076 {
1077 return pte_pud(pte_mkyoung(pud_pte(pud)));
1078 }
1079
pud_mkold(pud_t pud)1080 static inline pud_t pud_mkold(pud_t pud)
1081 {
1082 return pte_pud(pte_mkold(pud_pte(pud)));
1083 }
1084
pud_mkdirty(pud_t pud)1085 static inline pud_t pud_mkdirty(pud_t pud)
1086 {
1087 return pte_pud(pte_mkdirty(pud_pte(pud)));
1088 }
1089
pud_mkclean(pud_t pud)1090 static inline pud_t pud_mkclean(pud_t pud)
1091 {
1092 return pte_pud(pte_mkclean(pud_pte(pud)));
1093 }
1094
pud_mkwrite(pud_t pud)1095 static inline pud_t pud_mkwrite(pud_t pud)
1096 {
1097 return pte_pud(pte_mkwrite_novma(pud_pte(pud)));
1098 }
1099
pud_mkhuge(pud_t pud)1100 static inline pud_t pud_mkhuge(pud_t pud)
1101 {
1102 return pud;
1103 }
1104
pudp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t entry,int dirty)1105 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
1106 unsigned long address, pud_t *pudp,
1107 pud_t entry, int dirty)
1108 {
1109 return ptep_set_access_flags(vma, address, (pte_t *)pudp, pud_pte(entry), dirty);
1110 }
1111
pudp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)1112 static inline int pudp_test_and_clear_young(struct vm_area_struct *vma,
1113 unsigned long address, pud_t *pudp)
1114 {
1115 return ptep_test_and_clear_young(vma, address, (pte_t *)pudp);
1116 }
1117
1118 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pud_t * pudp)1119 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1120 unsigned long address, pud_t *pudp)
1121 {
1122 #ifdef CONFIG_SMP
1123 pud_t pud = __pud(xchg(&pudp->pud, 0));
1124 #else
1125 pud_t pud = *pudp;
1126
1127 pud_clear(pudp);
1128 #endif
1129
1130 page_table_check_pud_clear(mm, address, pud);
1131
1132 return pud;
1133 }
1134
pud_young(pud_t pud)1135 static inline int pud_young(pud_t pud)
1136 {
1137 return pte_young(pud_pte(pud));
1138 }
1139
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)1140 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1141 unsigned long address, pud_t *pudp)
1142 {
1143 pte_t *ptep = (pte_t *)pudp;
1144
1145 update_mmu_cache(vma, address, ptep);
1146 }
1147
pudp_establish(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t pud)1148 static inline pud_t pudp_establish(struct vm_area_struct *vma,
1149 unsigned long address, pud_t *pudp, pud_t pud)
1150 {
1151 page_table_check_pud_set(vma->vm_mm, address, pudp, pud);
1152 return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud)));
1153 }
1154
pud_mkinvalid(pud_t pud)1155 static inline pud_t pud_mkinvalid(pud_t pud)
1156 {
1157 return __pud(pud_val(pud) & ~(_PAGE_PRESENT | _PAGE_PROT_NONE));
1158 }
1159
1160 extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
1161 pud_t *pudp);
1162
pud_modify(pud_t pud,pgprot_t newprot)1163 static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
1164 {
1165 return pte_pud(pte_modify(pud_pte(pud), newprot));
1166 }
1167
1168 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1169
1170 /*
1171 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
1172 * are !pte_none() && !pte_present().
1173 *
1174 * Format of swap PTE:
1175 * bit 0: _PAGE_PRESENT (zero)
1176 * bit 1 to 2: (zero)
1177 * bit 3: _PAGE_SWP_SOFT_DIRTY
1178 * bit 4: _PAGE_SWP_UFFD_WP
1179 * bit 5: _PAGE_PROT_NONE (zero)
1180 * bit 6: exclusive marker
1181 * bits 7 to 11: swap type
1182 * bits 12 to XLEN-1: swap offset
1183 */
1184 #define __SWP_TYPE_SHIFT 7
1185 #define __SWP_TYPE_BITS 5
1186 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
1187 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
1188
1189 #define MAX_SWAPFILES_CHECK() \
1190 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1191
1192 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1193 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
1194 #define __swp_entry(type, offset) ((swp_entry_t) \
1195 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
1196 ((offset) << __SWP_OFFSET_SHIFT) })
1197
1198 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1199 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1200
pte_swp_exclusive(pte_t pte)1201 static inline bool pte_swp_exclusive(pte_t pte)
1202 {
1203 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
1204 }
1205
pte_swp_mkexclusive(pte_t pte)1206 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1207 {
1208 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
1209 }
1210
pte_swp_clear_exclusive(pte_t pte)1211 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1212 {
1213 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
1214 }
1215
1216 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1217 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
1218 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
1219 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1220
1221 /*
1222 * In the RV64 Linux scheme, we give the user half of the virtual-address space
1223 * and give the kernel the other (upper) half.
1224 */
1225 #ifdef CONFIG_64BIT
1226 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
1227 #else
1228 #define KERN_VIRT_START FIXADDR_START
1229 #endif
1230
1231 /*
1232 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
1233 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
1234 * Task size is:
1235 * - 0x9fc00000 (~2.5GB) for RV32.
1236 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
1237 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
1238 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
1239 *
1240 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
1241 * Instruction Set Manual Volume II: Privileged Architecture" states that
1242 * "load and store effective addresses, which are 64bits, must have bits
1243 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
1244 * Similarly for SV57, bits 63–57 must be equal to bit 56.
1245 */
1246 #ifdef CONFIG_64BIT
1247 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
1248
1249 #ifdef CONFIG_COMPAT
1250 #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
1251 #define TASK_SIZE (is_compat_task() ? \
1252 TASK_SIZE_32 : TASK_SIZE_64)
1253 #else
1254 #define TASK_SIZE TASK_SIZE_64
1255 #endif
1256
1257 #else
1258 #define TASK_SIZE FIXADDR_START
1259 #endif
1260
1261 #else /* CONFIG_MMU */
1262
1263 #define PAGE_SHARED __pgprot(0)
1264 #define PAGE_KERNEL __pgprot(0)
1265 #define swapper_pg_dir NULL
1266 #define TASK_SIZE _AC(-1, UL)
1267 #define VMALLOC_START _AC(0, UL)
1268 #define VMALLOC_END TASK_SIZE
1269
1270 #endif /* !CONFIG_MMU */
1271
1272 extern char _start[];
1273 extern void *_dtb_early_va;
1274 extern uintptr_t _dtb_early_pa;
1275 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
1276 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
1277 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
1278 #else
1279 #define dtb_early_va _dtb_early_va
1280 #define dtb_early_pa _dtb_early_pa
1281 #endif /* CONFIG_XIP_KERNEL */
1282 extern u64 satp_mode;
1283
1284 void paging_init(void);
1285 void misc_mem_init(void);
1286
1287 /*
1288 * ZERO_PAGE is a global shared page that is always zero,
1289 * used for zero-mapped memory areas, etc.
1290 */
1291 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
1292 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
1293
1294 /*
1295 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
1296 * TLB flush will be required as a result of the "set". For example, use
1297 * in scenarios where it is known ahead of time that the routine is
1298 * setting non-present entries, or re-setting an existing entry to the
1299 * same value. Otherwise, use the typical "set" helpers and flush the
1300 * TLB.
1301 */
1302 #define set_p4d_safe(p4dp, p4d) \
1303 ({ \
1304 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
1305 set_p4d(p4dp, p4d); \
1306 })
1307
1308 #define set_pgd_safe(pgdp, pgd) \
1309 ({ \
1310 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
1311 set_pgd(pgdp, pgd); \
1312 })
1313 #endif /* !__ASSEMBLER__ */
1314
1315 #endif /* _ASM_RISCV_PGTABLE_H */
1316