1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #ifndef _ASM_RISCV_PGTABLE_H 7 #define _ASM_RISCV_PGTABLE_H 8 9 #include <linux/mmzone.h> 10 #include <linux/sizes.h> 11 12 #include <asm/pgtable-bits.h> 13 14 #ifndef CONFIG_MMU 15 #ifdef CONFIG_RELOCATABLE 16 #define KERNEL_LINK_ADDR UL(0) 17 #else 18 #define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL) 19 #endif 20 #define KERN_VIRT_SIZE (UL(-1)) 21 #else 22 23 #define ADDRESS_SPACE_END (UL(-1)) 24 25 #ifdef CONFIG_64BIT 26 /* Leave 2GB for kernel and BPF at the end of the address space */ 27 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) 28 #else 29 #define KERNEL_LINK_ADDR PAGE_OFFSET 30 #endif 31 32 /* Number of entries in the page global directory */ 33 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 34 /* Number of entries in the page table */ 35 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 36 37 /* 38 * Half of the kernel address space (1/4 of the entries of the page global 39 * directory) is for the direct mapping. 40 */ 41 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) 42 43 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 44 #define VMALLOC_END PAGE_OFFSET 45 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 46 47 #define BPF_JIT_REGION_SIZE (SZ_128M) 48 #ifdef CONFIG_64BIT 49 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE) 50 #define BPF_JIT_REGION_END (MODULES_END) 51 #else 52 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) 53 #define BPF_JIT_REGION_END (VMALLOC_END) 54 #endif 55 56 /* Modules always live before the kernel */ 57 #ifdef CONFIG_64BIT 58 /* This is used to define the end of the KASAN shadow region */ 59 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) 60 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) 61 #define MODULES_END (PFN_ALIGN((unsigned long)&_start)) 62 #else 63 #define MODULES_VADDR VMALLOC_START 64 #define MODULES_END VMALLOC_END 65 #endif 66 67 /* 68 * Roughly size the vmemmap space to be large enough to fit enough 69 * struct pages to map half the virtual address space. Then 70 * position vmemmap directly below the VMALLOC region. 71 */ 72 #define VA_BITS_SV32 32 73 #ifdef CONFIG_64BIT 74 #define VA_BITS_SV39 39 75 #define VA_BITS_SV48 48 76 #define VA_BITS_SV57 57 77 78 #define VA_BITS (pgtable_l5_enabled ? \ 79 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39)) 80 #else 81 #define VA_BITS VA_BITS_SV32 82 #endif 83 84 #define VMEMMAP_SHIFT \ 85 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 86 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 87 #define VMEMMAP_END VMALLOC_START 88 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 89 90 /* 91 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel 92 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. 93 */ 94 #define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn) 95 96 #define PCI_IO_SIZE SZ_16M 97 #define PCI_IO_END VMEMMAP_START 98 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 99 100 #define FIXADDR_TOP PCI_IO_START 101 #ifdef CONFIG_64BIT 102 #define MAX_FDT_SIZE PMD_SIZE 103 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) 104 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) 105 #else 106 #define MAX_FDT_SIZE PGDIR_SIZE 107 #define FIX_FDT_SIZE MAX_FDT_SIZE 108 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) 109 #endif 110 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 111 112 #endif 113 114 #ifndef __ASSEMBLY__ 115 116 #include <asm/page.h> 117 #include <asm/tlbflush.h> 118 #include <linux/mm_types.h> 119 #include <asm/compat.h> 120 #include <asm/cpufeature.h> 121 122 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) 123 124 #ifdef CONFIG_64BIT 125 #include <asm/pgtable-64.h> 126 127 #define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1)) 128 #define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1)) 129 #define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1)) 130 131 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) 132 #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39) 133 #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64) 134 #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64) 135 #else 136 #include <asm/pgtable-32.h> 137 #endif /* CONFIG_64BIT */ 138 139 #include <linux/page_table_check.h> 140 141 #ifdef CONFIG_XIP_KERNEL 142 #define XIP_FIXUP(addr) ({ \ 143 extern char _sdata[], _start[], _end[]; \ 144 uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \ 145 + (uintptr_t)&_sdata - (uintptr_t)&_start; \ 146 uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \ 147 + (uintptr_t)&_end - (uintptr_t)&_start; \ 148 uintptr_t __a = (uintptr_t)(addr); \ 149 (__a >= __rom_start_data && __a < __rom_end_data) ? \ 150 __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \ 151 }) 152 #else 153 #define XIP_FIXUP(addr) (addr) 154 #endif /* CONFIG_XIP_KERNEL */ 155 156 struct pt_alloc_ops { 157 pte_t *(*get_pte_virt)(phys_addr_t pa); 158 phys_addr_t (*alloc_pte)(uintptr_t va); 159 #ifndef __PAGETABLE_PMD_FOLDED 160 pmd_t *(*get_pmd_virt)(phys_addr_t pa); 161 phys_addr_t (*alloc_pmd)(uintptr_t va); 162 pud_t *(*get_pud_virt)(phys_addr_t pa); 163 phys_addr_t (*alloc_pud)(uintptr_t va); 164 p4d_t *(*get_p4d_virt)(phys_addr_t pa); 165 phys_addr_t (*alloc_p4d)(uintptr_t va); 166 #endif 167 }; 168 169 extern struct pt_alloc_ops pt_ops __meminitdata; 170 171 #ifdef CONFIG_MMU 172 /* Number of PGD entries that a user-mode program can use */ 173 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 174 175 /* Page protection bits */ 176 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 177 178 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ) 179 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 180 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 181 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 182 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) 183 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ 184 _PAGE_EXEC | _PAGE_WRITE) 185 186 #define PAGE_COPY PAGE_READ 187 #define PAGE_COPY_EXEC PAGE_READ_EXEC 188 #define PAGE_SHARED PAGE_WRITE 189 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC 190 191 #define _PAGE_KERNEL (_PAGE_READ \ 192 | _PAGE_WRITE \ 193 | _PAGE_PRESENT \ 194 | _PAGE_ACCESSED \ 195 | _PAGE_DIRTY \ 196 | _PAGE_GLOBAL) 197 198 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 199 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 200 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) 201 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ 202 | _PAGE_EXEC) 203 204 #define PAGE_TABLE __pgprot(_PAGE_TABLE) 205 206 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) 207 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) 208 209 extern pgd_t swapper_pg_dir[]; 210 extern pgd_t trampoline_pg_dir[]; 211 extern pgd_t early_pg_dir[]; 212 213 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 214 static inline int pmd_present(pmd_t pmd) 215 { 216 /* 217 * Checking for _PAGE_LEAF is needed too because: 218 * When splitting a THP, split_huge_page() will temporarily clear 219 * the present bit, in this situation, pmd_present() and 220 * pmd_trans_huge() still needs to return true. 221 */ 222 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); 223 } 224 #else 225 static inline int pmd_present(pmd_t pmd) 226 { 227 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 228 } 229 #endif 230 231 static inline int pmd_none(pmd_t pmd) 232 { 233 return (pmd_val(pmd) == 0); 234 } 235 236 static inline int pmd_bad(pmd_t pmd) 237 { 238 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); 239 } 240 241 #define pmd_leaf pmd_leaf 242 static inline bool pmd_leaf(pmd_t pmd) 243 { 244 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); 245 } 246 247 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 248 { 249 WRITE_ONCE(*pmdp, pmd); 250 } 251 252 static inline void pmd_clear(pmd_t *pmdp) 253 { 254 set_pmd(pmdp, __pmd(0)); 255 } 256 257 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) 258 { 259 unsigned long prot_val = pgprot_val(prot); 260 261 ALT_THEAD_PMA(prot_val); 262 263 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val); 264 } 265 266 static inline unsigned long _pgd_pfn(pgd_t pgd) 267 { 268 return __page_val_to_pfn(pgd_val(pgd)); 269 } 270 271 static inline struct page *pmd_page(pmd_t pmd) 272 { 273 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd))); 274 } 275 276 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 277 { 278 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd))); 279 } 280 281 static inline pte_t pmd_pte(pmd_t pmd) 282 { 283 return __pte(pmd_val(pmd)); 284 } 285 286 static inline pte_t pud_pte(pud_t pud) 287 { 288 return __pte(pud_val(pud)); 289 } 290 291 #ifdef CONFIG_RISCV_ISA_SVNAPOT 292 293 static __always_inline bool has_svnapot(void) 294 { 295 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT); 296 } 297 298 static inline unsigned long pte_napot(pte_t pte) 299 { 300 return pte_val(pte) & _PAGE_NAPOT; 301 } 302 303 static inline pte_t pte_mknapot(pte_t pte, unsigned int order) 304 { 305 int pos = order - 1 + _PAGE_PFN_SHIFT; 306 unsigned long napot_bit = BIT(pos); 307 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT); 308 309 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT); 310 } 311 312 #else 313 314 static __always_inline bool has_svnapot(void) { return false; } 315 316 static inline unsigned long pte_napot(pte_t pte) 317 { 318 return 0; 319 } 320 321 #endif /* CONFIG_RISCV_ISA_SVNAPOT */ 322 323 /* Yields the page frame number (PFN) of a page table entry */ 324 static inline unsigned long pte_pfn(pte_t pte) 325 { 326 unsigned long res = __page_val_to_pfn(pte_val(pte)); 327 328 if (has_svnapot() && pte_napot(pte)) 329 res = res & (res - 1UL); 330 331 return res; 332 } 333 334 #define pte_page(x) pfn_to_page(pte_pfn(x)) 335 336 /* Constructs a page table entry */ 337 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 338 { 339 unsigned long prot_val = pgprot_val(prot); 340 341 ALT_THEAD_PMA(prot_val); 342 343 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val); 344 } 345 346 #define pte_pgprot pte_pgprot 347 static inline pgprot_t pte_pgprot(pte_t pte) 348 { 349 unsigned long pfn = pte_pfn(pte); 350 351 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 352 } 353 354 static inline int pte_present(pte_t pte) 355 { 356 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 357 } 358 359 #define pte_accessible pte_accessible 360 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 361 { 362 if (pte_val(a) & _PAGE_PRESENT) 363 return true; 364 365 if ((pte_val(a) & _PAGE_PROT_NONE) && 366 atomic_read(&mm->tlb_flush_pending)) 367 return true; 368 369 return false; 370 } 371 372 static inline int pte_none(pte_t pte) 373 { 374 return (pte_val(pte) == 0); 375 } 376 377 static inline int pte_write(pte_t pte) 378 { 379 return pte_val(pte) & _PAGE_WRITE; 380 } 381 382 static inline int pte_exec(pte_t pte) 383 { 384 return pte_val(pte) & _PAGE_EXEC; 385 } 386 387 static inline int pte_user(pte_t pte) 388 { 389 return pte_val(pte) & _PAGE_USER; 390 } 391 392 static inline int pte_huge(pte_t pte) 393 { 394 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF); 395 } 396 397 static inline int pte_dirty(pte_t pte) 398 { 399 return pte_val(pte) & _PAGE_DIRTY; 400 } 401 402 static inline int pte_young(pte_t pte) 403 { 404 return pte_val(pte) & _PAGE_ACCESSED; 405 } 406 407 static inline int pte_special(pte_t pte) 408 { 409 return pte_val(pte) & _PAGE_SPECIAL; 410 } 411 412 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 413 static inline int pte_devmap(pte_t pte) 414 { 415 return pte_val(pte) & _PAGE_DEVMAP; 416 } 417 #endif 418 419 /* static inline pte_t pte_rdprotect(pte_t pte) */ 420 421 static inline pte_t pte_wrprotect(pte_t pte) 422 { 423 return __pte(pte_val(pte) & ~(_PAGE_WRITE)); 424 } 425 426 /* static inline pte_t pte_mkread(pte_t pte) */ 427 428 static inline pte_t pte_mkwrite_novma(pte_t pte) 429 { 430 return __pte(pte_val(pte) | _PAGE_WRITE); 431 } 432 433 /* static inline pte_t pte_mkexec(pte_t pte) */ 434 435 static inline pte_t pte_mkdirty(pte_t pte) 436 { 437 return __pte(pte_val(pte) | _PAGE_DIRTY); 438 } 439 440 static inline pte_t pte_mkclean(pte_t pte) 441 { 442 return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); 443 } 444 445 static inline pte_t pte_mkyoung(pte_t pte) 446 { 447 return __pte(pte_val(pte) | _PAGE_ACCESSED); 448 } 449 450 static inline pte_t pte_mkold(pte_t pte) 451 { 452 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); 453 } 454 455 static inline pte_t pte_mkspecial(pte_t pte) 456 { 457 return __pte(pte_val(pte) | _PAGE_SPECIAL); 458 } 459 460 static inline pte_t pte_mkdevmap(pte_t pte) 461 { 462 return __pte(pte_val(pte) | _PAGE_DEVMAP); 463 } 464 465 static inline pte_t pte_mkhuge(pte_t pte) 466 { 467 return pte; 468 } 469 470 #ifdef CONFIG_RISCV_ISA_SVNAPOT 471 #define pte_leaf_size(pte) (pte_napot(pte) ? \ 472 napot_cont_size(napot_cont_order(pte)) :\ 473 PAGE_SIZE) 474 #endif 475 476 #ifdef CONFIG_NUMA_BALANCING 477 /* 478 * See the comment in include/asm-generic/pgtable.h 479 */ 480 static inline int pte_protnone(pte_t pte) 481 { 482 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE; 483 } 484 485 static inline int pmd_protnone(pmd_t pmd) 486 { 487 return pte_protnone(pmd_pte(pmd)); 488 } 489 #endif 490 491 /* Modify page protection bits */ 492 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 493 { 494 unsigned long newprot_val = pgprot_val(newprot); 495 496 ALT_THEAD_PMA(newprot_val); 497 498 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val); 499 } 500 501 #define pgd_ERROR(e) \ 502 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) 503 504 505 /* Commit new configuration to MMU hardware */ 506 static inline void update_mmu_cache_range(struct vm_fault *vmf, 507 struct vm_area_struct *vma, unsigned long address, 508 pte_t *ptep, unsigned int nr) 509 { 510 asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) 511 : : : : svvptc); 512 513 /* 514 * The kernel assumes that TLBs don't cache invalid entries, but 515 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a 516 * cache flush; it is necessary even after writing invalid entries. 517 * Relying on flush_tlb_fix_spurious_fault would suffice, but 518 * the extra traps reduce performance. So, eagerly SFENCE.VMA. 519 */ 520 while (nr--) 521 local_flush_tlb_page(address + nr * PAGE_SIZE); 522 523 svvptc:; 524 /* 525 * Svvptc guarantees that the new valid pte will be visible within 526 * a bounded timeframe, so when the uarch does not cache invalid 527 * entries, we don't have to do anything. 528 */ 529 } 530 #define update_mmu_cache(vma, addr, ptep) \ 531 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 532 533 #define update_mmu_tlb_range(vma, addr, ptep, nr) \ 534 update_mmu_cache_range(NULL, vma, addr, ptep, nr) 535 536 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 537 unsigned long address, pmd_t *pmdp) 538 { 539 pte_t *ptep = (pte_t *)pmdp; 540 541 update_mmu_cache(vma, address, ptep); 542 } 543 544 #define __HAVE_ARCH_PTE_SAME 545 static inline int pte_same(pte_t pte_a, pte_t pte_b) 546 { 547 return pte_val(pte_a) == pte_val(pte_b); 548 } 549 550 /* 551 * Certain architectures need to do special things when PTEs within 552 * a page table are directly modified. Thus, the following hook is 553 * made available. 554 */ 555 static inline void set_pte(pte_t *ptep, pte_t pteval) 556 { 557 WRITE_ONCE(*ptep, pteval); 558 } 559 560 void flush_icache_pte(struct mm_struct *mm, pte_t pte); 561 562 static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) 563 { 564 if (pte_present(pteval) && pte_exec(pteval)) 565 flush_icache_pte(mm, pteval); 566 567 set_pte(ptep, pteval); 568 } 569 570 #define PFN_PTE_SHIFT _PAGE_PFN_SHIFT 571 572 static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 573 pte_t *ptep, pte_t pteval, unsigned int nr) 574 { 575 page_table_check_ptes_set(mm, ptep, pteval, nr); 576 577 for (;;) { 578 __set_pte_at(mm, ptep, pteval); 579 if (--nr == 0) 580 break; 581 ptep++; 582 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT; 583 } 584 } 585 #define set_ptes set_ptes 586 587 static inline void pte_clear(struct mm_struct *mm, 588 unsigned long addr, pte_t *ptep) 589 { 590 __set_pte_at(mm, ptep, __pte(0)); 591 } 592 593 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ 594 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 595 pte_t *ptep, pte_t entry, int dirty); 596 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */ 597 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, 598 pte_t *ptep); 599 600 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 601 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 602 unsigned long address, pte_t *ptep) 603 { 604 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); 605 606 page_table_check_pte_clear(mm, pte); 607 608 return pte; 609 } 610 611 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 612 static inline void ptep_set_wrprotect(struct mm_struct *mm, 613 unsigned long address, pte_t *ptep) 614 { 615 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); 616 } 617 618 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 619 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 620 unsigned long address, pte_t *ptep) 621 { 622 /* 623 * This comment is borrowed from x86, but applies equally to RISC-V: 624 * 625 * Clearing the accessed bit without a TLB flush 626 * doesn't cause data corruption. [ It could cause incorrect 627 * page aging and the (mistaken) reclaim of hot pages, but the 628 * chance of that should be relatively low. ] 629 * 630 * So as a performance optimization don't flush the TLB when 631 * clearing the accessed bit, it will eventually be flushed by 632 * a context switch or a VM operation anyway. [ In the rare 633 * event of it not getting flushed for a long time the delay 634 * shouldn't really matter because there's no real memory 635 * pressure for swapout to react to. ] 636 */ 637 return ptep_test_and_clear_young(vma, address, ptep); 638 } 639 640 #define pgprot_nx pgprot_nx 641 static inline pgprot_t pgprot_nx(pgprot_t _prot) 642 { 643 return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC); 644 } 645 646 #define pgprot_noncached pgprot_noncached 647 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 648 { 649 unsigned long prot = pgprot_val(_prot); 650 651 prot &= ~_PAGE_MTMASK; 652 prot |= _PAGE_IO; 653 654 return __pgprot(prot); 655 } 656 657 #define pgprot_writecombine pgprot_writecombine 658 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 659 { 660 unsigned long prot = pgprot_val(_prot); 661 662 prot &= ~_PAGE_MTMASK; 663 prot |= _PAGE_NOCACHE; 664 665 return __pgprot(prot); 666 } 667 668 /* 669 * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By 670 * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in 671 * DT. 672 */ 673 #define arch_has_hw_pte_young arch_has_hw_pte_young 674 static inline bool arch_has_hw_pte_young(void) 675 { 676 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU); 677 } 678 679 /* 680 * THP functions 681 */ 682 static inline pmd_t pte_pmd(pte_t pte) 683 { 684 return __pmd(pte_val(pte)); 685 } 686 687 static inline pud_t pte_pud(pte_t pte) 688 { 689 return __pud(pte_val(pte)); 690 } 691 692 static inline pmd_t pmd_mkhuge(pmd_t pmd) 693 { 694 return pmd; 695 } 696 697 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 698 { 699 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE)); 700 } 701 702 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT) 703 704 static inline unsigned long pmd_pfn(pmd_t pmd) 705 { 706 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT); 707 } 708 709 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) 710 711 #define pud_pfn pud_pfn 712 static inline unsigned long pud_pfn(pud_t pud) 713 { 714 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); 715 } 716 717 #define pmd_pgprot pmd_pgprot 718 static inline pgprot_t pmd_pgprot(pmd_t pmd) 719 { 720 return pte_pgprot(pmd_pte(pmd)); 721 } 722 723 #define pud_pgprot pud_pgprot 724 static inline pgprot_t pud_pgprot(pud_t pud) 725 { 726 return pte_pgprot(pud_pte(pud)); 727 } 728 729 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 730 { 731 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 732 } 733 734 #define pmd_write pmd_write 735 static inline int pmd_write(pmd_t pmd) 736 { 737 return pte_write(pmd_pte(pmd)); 738 } 739 740 #define pud_write pud_write 741 static inline int pud_write(pud_t pud) 742 { 743 return pte_write(pud_pte(pud)); 744 } 745 746 #define pmd_dirty pmd_dirty 747 static inline int pmd_dirty(pmd_t pmd) 748 { 749 return pte_dirty(pmd_pte(pmd)); 750 } 751 752 #define pmd_young pmd_young 753 static inline int pmd_young(pmd_t pmd) 754 { 755 return pte_young(pmd_pte(pmd)); 756 } 757 758 static inline int pmd_user(pmd_t pmd) 759 { 760 return pte_user(pmd_pte(pmd)); 761 } 762 763 static inline pmd_t pmd_mkold(pmd_t pmd) 764 { 765 return pte_pmd(pte_mkold(pmd_pte(pmd))); 766 } 767 768 static inline pmd_t pmd_mkyoung(pmd_t pmd) 769 { 770 return pte_pmd(pte_mkyoung(pmd_pte(pmd))); 771 } 772 773 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 774 { 775 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))); 776 } 777 778 static inline pmd_t pmd_wrprotect(pmd_t pmd) 779 { 780 return pte_pmd(pte_wrprotect(pmd_pte(pmd))); 781 } 782 783 static inline pmd_t pmd_mkclean(pmd_t pmd) 784 { 785 return pte_pmd(pte_mkclean(pmd_pte(pmd))); 786 } 787 788 static inline pmd_t pmd_mkdirty(pmd_t pmd) 789 { 790 return pte_pmd(pte_mkdirty(pmd_pte(pmd))); 791 } 792 793 static inline pmd_t pmd_mkdevmap(pmd_t pmd) 794 { 795 return pte_pmd(pte_mkdevmap(pmd_pte(pmd))); 796 } 797 798 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 799 static inline bool pmd_special(pmd_t pmd) 800 { 801 return pte_special(pmd_pte(pmd)); 802 } 803 804 static inline pmd_t pmd_mkspecial(pmd_t pmd) 805 { 806 return pte_pmd(pte_mkspecial(pmd_pte(pmd))); 807 } 808 #endif 809 810 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP 811 static inline bool pud_special(pud_t pud) 812 { 813 return pte_special(pud_pte(pud)); 814 } 815 816 static inline pud_t pud_mkspecial(pud_t pud) 817 { 818 return pte_pud(pte_mkspecial(pud_pte(pud))); 819 } 820 #endif 821 822 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 823 pmd_t *pmdp, pmd_t pmd) 824 { 825 page_table_check_pmd_set(mm, pmdp, pmd); 826 return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); 827 } 828 829 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 830 pud_t *pudp, pud_t pud) 831 { 832 page_table_check_pud_set(mm, pudp, pud); 833 return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); 834 } 835 836 #ifdef CONFIG_PAGE_TABLE_CHECK 837 static inline bool pte_user_accessible_page(pte_t pte) 838 { 839 return pte_present(pte) && pte_user(pte); 840 } 841 842 static inline bool pmd_user_accessible_page(pmd_t pmd) 843 { 844 return pmd_leaf(pmd) && pmd_user(pmd); 845 } 846 847 static inline bool pud_user_accessible_page(pud_t pud) 848 { 849 return pud_leaf(pud) && pud_user(pud); 850 } 851 #endif 852 853 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 854 static inline int pmd_trans_huge(pmd_t pmd) 855 { 856 return pmd_leaf(pmd); 857 } 858 859 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 860 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 861 unsigned long address, pmd_t *pmdp, 862 pmd_t entry, int dirty) 863 { 864 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 865 } 866 867 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 868 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 869 unsigned long address, pmd_t *pmdp) 870 { 871 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 872 } 873 874 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 875 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 876 unsigned long address, pmd_t *pmdp) 877 { 878 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0)); 879 880 page_table_check_pmd_clear(mm, pmd); 881 882 return pmd; 883 } 884 885 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 886 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 887 unsigned long address, pmd_t *pmdp) 888 { 889 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 890 } 891 892 #define pmdp_establish pmdp_establish 893 static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 894 unsigned long address, pmd_t *pmdp, pmd_t pmd) 895 { 896 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 897 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); 898 } 899 900 #define pmdp_collapse_flush pmdp_collapse_flush 901 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 902 unsigned long address, pmd_t *pmdp); 903 904 static inline pud_t pud_wrprotect(pud_t pud) 905 { 906 return pte_pud(pte_wrprotect(pud_pte(pud))); 907 } 908 909 static inline int pud_trans_huge(pud_t pud) 910 { 911 return pud_leaf(pud); 912 } 913 914 static inline int pud_dirty(pud_t pud) 915 { 916 return pte_dirty(pud_pte(pud)); 917 } 918 919 static inline pud_t pud_mkyoung(pud_t pud) 920 { 921 return pte_pud(pte_mkyoung(pud_pte(pud))); 922 } 923 924 static inline pud_t pud_mkold(pud_t pud) 925 { 926 return pte_pud(pte_mkold(pud_pte(pud))); 927 } 928 929 static inline pud_t pud_mkdirty(pud_t pud) 930 { 931 return pte_pud(pte_mkdirty(pud_pte(pud))); 932 } 933 934 static inline pud_t pud_mkclean(pud_t pud) 935 { 936 return pte_pud(pte_mkclean(pud_pte(pud))); 937 } 938 939 static inline pud_t pud_mkwrite(pud_t pud) 940 { 941 return pte_pud(pte_mkwrite_novma(pud_pte(pud))); 942 } 943 944 static inline pud_t pud_mkhuge(pud_t pud) 945 { 946 return pud; 947 } 948 949 static inline pud_t pud_mkdevmap(pud_t pud) 950 { 951 return pte_pud(pte_mkdevmap(pud_pte(pud))); 952 } 953 954 static inline int pudp_set_access_flags(struct vm_area_struct *vma, 955 unsigned long address, pud_t *pudp, 956 pud_t entry, int dirty) 957 { 958 return ptep_set_access_flags(vma, address, (pte_t *)pudp, pud_pte(entry), dirty); 959 } 960 961 static inline int pudp_test_and_clear_young(struct vm_area_struct *vma, 962 unsigned long address, pud_t *pudp) 963 { 964 return ptep_test_and_clear_young(vma, address, (pte_t *)pudp); 965 } 966 967 static inline int pud_young(pud_t pud) 968 { 969 return pte_young(pud_pte(pud)); 970 } 971 972 static inline void update_mmu_cache_pud(struct vm_area_struct *vma, 973 unsigned long address, pud_t *pudp) 974 { 975 pte_t *ptep = (pte_t *)pudp; 976 977 update_mmu_cache(vma, address, ptep); 978 } 979 980 static inline pud_t pudp_establish(struct vm_area_struct *vma, 981 unsigned long address, pud_t *pudp, pud_t pud) 982 { 983 page_table_check_pud_set(vma->vm_mm, pudp, pud); 984 return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud))); 985 } 986 987 static inline pud_t pud_mkinvalid(pud_t pud) 988 { 989 return __pud(pud_val(pud) & ~(_PAGE_PRESENT | _PAGE_PROT_NONE)); 990 } 991 992 extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, 993 pud_t *pudp); 994 995 static inline pud_t pud_modify(pud_t pud, pgprot_t newprot) 996 { 997 return pte_pud(pte_modify(pud_pte(pud), newprot)); 998 } 999 1000 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1001 1002 /* 1003 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 1004 * are !pte_none() && !pte_present(). 1005 * 1006 * Format of swap PTE: 1007 * bit 0: _PAGE_PRESENT (zero) 1008 * bit 1 to 3: _PAGE_LEAF (zero) 1009 * bit 5: _PAGE_PROT_NONE (zero) 1010 * bit 6: exclusive marker 1011 * bits 7 to 11: swap type 1012 * bits 12 to XLEN-1: swap offset 1013 */ 1014 #define __SWP_TYPE_SHIFT 7 1015 #define __SWP_TYPE_BITS 5 1016 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) 1017 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 1018 1019 #define MAX_SWAPFILES_CHECK() \ 1020 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1021 1022 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1023 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 1024 #define __swp_entry(type, offset) ((swp_entry_t) \ 1025 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \ 1026 ((offset) << __SWP_OFFSET_SHIFT) }) 1027 1028 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1029 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1030 1031 static inline bool pte_swp_exclusive(pte_t pte) 1032 { 1033 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 1034 } 1035 1036 static inline pte_t pte_swp_mkexclusive(pte_t pte) 1037 { 1038 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); 1039 } 1040 1041 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 1042 { 1043 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); 1044 } 1045 1046 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1047 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1048 #define __swp_entry_to_pmd(swp) __pmd((swp).val) 1049 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1050 1051 /* 1052 * In the RV64 Linux scheme, we give the user half of the virtual-address space 1053 * and give the kernel the other (upper) half. 1054 */ 1055 #ifdef CONFIG_64BIT 1056 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE) 1057 #else 1058 #define KERN_VIRT_START FIXADDR_START 1059 #endif 1060 1061 /* 1062 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 1063 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 1064 * Task size is: 1065 * - 0x9fc00000 (~2.5GB) for RV32. 1066 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu 1067 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu 1068 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu 1069 * 1070 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V 1071 * Instruction Set Manual Volume II: Privileged Architecture" states that 1072 * "load and store effective addresses, which are 64bits, must have bits 1073 * 63–48 all equal to bit 47, or else a page-fault exception will occur." 1074 * Similarly for SV57, bits 63–57 must be equal to bit 56. 1075 */ 1076 #ifdef CONFIG_64BIT 1077 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) 1078 #define TASK_SIZE_MAX LONG_MAX 1079 1080 #ifdef CONFIG_COMPAT 1081 #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) 1082 #define TASK_SIZE (is_compat_task() ? \ 1083 TASK_SIZE_32 : TASK_SIZE_64) 1084 #else 1085 #define TASK_SIZE TASK_SIZE_64 1086 #endif 1087 1088 #else 1089 #define TASK_SIZE FIXADDR_START 1090 #endif 1091 1092 #else /* CONFIG_MMU */ 1093 1094 #define PAGE_SHARED __pgprot(0) 1095 #define PAGE_KERNEL __pgprot(0) 1096 #define swapper_pg_dir NULL 1097 #define TASK_SIZE _AC(-1, UL) 1098 #define VMALLOC_START _AC(0, UL) 1099 #define VMALLOC_END TASK_SIZE 1100 1101 #endif /* !CONFIG_MMU */ 1102 1103 extern char _start[]; 1104 extern void *_dtb_early_va; 1105 extern uintptr_t _dtb_early_pa; 1106 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) 1107 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) 1108 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) 1109 #else 1110 #define dtb_early_va _dtb_early_va 1111 #define dtb_early_pa _dtb_early_pa 1112 #endif /* CONFIG_XIP_KERNEL */ 1113 extern u64 satp_mode; 1114 1115 void paging_init(void); 1116 void misc_mem_init(void); 1117 1118 /* 1119 * ZERO_PAGE is a global shared page that is always zero, 1120 * used for zero-mapped memory areas, etc. 1121 */ 1122 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 1123 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 1124 1125 /* 1126 * Use set_p*_safe(), and elide TLB flushing, when confident that *no* 1127 * TLB flush will be required as a result of the "set". For example, use 1128 * in scenarios where it is known ahead of time that the routine is 1129 * setting non-present entries, or re-setting an existing entry to the 1130 * same value. Otherwise, use the typical "set" helpers and flush the 1131 * TLB. 1132 */ 1133 #define set_p4d_safe(p4dp, p4d) \ 1134 ({ \ 1135 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ 1136 set_p4d(p4dp, p4d); \ 1137 }) 1138 1139 #define set_pgd_safe(pgdp, pgd) \ 1140 ({ \ 1141 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ 1142 set_pgd(pgdp, pgd); \ 1143 }) 1144 #endif /* !__ASSEMBLY__ */ 1145 1146 #endif /* _ASM_RISCV_PGTABLE_H */ 1147