1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PGTABLE_H 3 #define _ASM_X86_PGTABLE_H 4 5 #include <linux/mem_encrypt.h> 6 #include <asm/page.h> 7 #include <asm/pgtable_types.h> 8 9 /* 10 * Macro to mark a page protection value as UC- 11 */ 12 #define pgprot_noncached(prot) \ 13 ((boot_cpu_data.x86 > 3) \ 14 ? (__pgprot(pgprot_val(prot) | \ 15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ 16 : (prot)) 17 18 #ifndef __ASSEMBLER__ 19 #include <linux/spinlock.h> 20 #include <asm/x86_init.h> 21 #include <asm/pkru.h> 22 #include <asm/fpu/api.h> 23 #include <asm/coco.h> 24 #include <asm-generic/pgtable_uffd.h> 25 #include <linux/page_table_check.h> 26 27 extern pgd_t early_top_pgt[PTRS_PER_PGD]; 28 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd); 29 30 struct seq_file; 31 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm); 32 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm, 33 bool user); 34 bool ptdump_walk_pgd_level_checkwx(void); 35 #define ptdump_check_wx ptdump_walk_pgd_level_checkwx 36 void ptdump_walk_user_pgd_level_checkwx(void); 37 38 /* 39 * Macros to add or remove encryption attribute 40 */ 41 #define pgprot_encrypted(prot) __pgprot(cc_mkenc(pgprot_val(prot))) 42 #define pgprot_decrypted(prot) __pgprot(cc_mkdec(pgprot_val(prot))) 43 44 #ifdef CONFIG_DEBUG_WX 45 #define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx() 46 #else 47 #define debug_checkwx_user() do { } while (0) 48 #endif 49 50 /* 51 * ZERO_PAGE is a global shared page that is always zero: used 52 * for zero-mapped memory areas etc.. 53 */ 54 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 55 __visible; 56 #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page)) 57 58 extern spinlock_t pgd_lock; 59 extern struct list_head pgd_list; 60 61 extern struct mm_struct *pgd_page_get_mm(struct page *page); 62 63 extern pmdval_t early_pmd_flags; 64 65 #ifdef CONFIG_PARAVIRT_XXL 66 #include <asm/paravirt.h> 67 #else /* !CONFIG_PARAVIRT_XXL */ 68 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 69 70 #define set_pte_atomic(ptep, pte) \ 71 native_set_pte_atomic(ptep, pte) 72 73 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) 74 75 #ifndef __PAGETABLE_P4D_FOLDED 76 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 77 #define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0) 78 #endif 79 80 #ifndef set_p4d 81 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d) 82 #endif 83 84 #ifndef __PAGETABLE_PUD_FOLDED 85 #define p4d_clear(p4d) native_p4d_clear(p4d) 86 #endif 87 88 #ifndef set_pud 89 # define set_pud(pudp, pud) native_set_pud(pudp, pud) 90 #endif 91 92 #ifndef __PAGETABLE_PUD_FOLDED 93 #define pud_clear(pud) native_pud_clear(pud) 94 #endif 95 96 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 97 #define pmd_clear(pmd) native_pmd_clear(pmd) 98 99 #define pgd_val(x) native_pgd_val(x) 100 #define __pgd(x) native_make_pgd(x) 101 102 #ifndef __PAGETABLE_P4D_FOLDED 103 #define p4d_val(x) native_p4d_val(x) 104 #define __p4d(x) native_make_p4d(x) 105 #endif 106 107 #ifndef __PAGETABLE_PUD_FOLDED 108 #define pud_val(x) native_pud_val(x) 109 #define __pud(x) native_make_pud(x) 110 #endif 111 112 #ifndef __PAGETABLE_PMD_FOLDED 113 #define pmd_val(x) native_pmd_val(x) 114 #define __pmd(x) native_make_pmd(x) 115 #endif 116 117 #define pte_val(x) native_pte_val(x) 118 #define __pte(x) native_make_pte(x) 119 120 #define arch_end_context_switch(prev) do {} while(0) 121 static inline void arch_flush_lazy_mmu_mode(void) {} 122 #endif /* CONFIG_PARAVIRT_XXL */ 123 124 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) 125 { 126 pmdval_t v = native_pmd_val(pmd); 127 128 return native_make_pmd(v | set); 129 } 130 131 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) 132 { 133 pmdval_t v = native_pmd_val(pmd); 134 135 return native_make_pmd(v & ~clear); 136 } 137 138 static inline pud_t pud_set_flags(pud_t pud, pudval_t set) 139 { 140 pudval_t v = native_pud_val(pud); 141 142 return native_make_pud(v | set); 143 } 144 145 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) 146 { 147 pudval_t v = native_pud_val(pud); 148 149 return native_make_pud(v & ~clear); 150 } 151 152 /* 153 * The following only work if pte_present() is true. 154 * Undefined behaviour if not.. 155 */ 156 static inline bool pte_dirty(pte_t pte) 157 { 158 return pte_flags(pte) & _PAGE_DIRTY_BITS; 159 } 160 161 static inline bool pte_shstk(pte_t pte) 162 { 163 return cpu_feature_enabled(X86_FEATURE_SHSTK) && 164 (pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY; 165 } 166 167 static inline int pte_young(pte_t pte) 168 { 169 return pte_flags(pte) & _PAGE_ACCESSED; 170 } 171 172 static inline bool pte_decrypted(pte_t pte) 173 { 174 return cc_mkdec(pte_val(pte)) == pte_val(pte); 175 } 176 177 #define pmd_dirty pmd_dirty 178 static inline bool pmd_dirty(pmd_t pmd) 179 { 180 return pmd_flags(pmd) & _PAGE_DIRTY_BITS; 181 } 182 183 static inline bool pmd_shstk(pmd_t pmd) 184 { 185 return cpu_feature_enabled(X86_FEATURE_SHSTK) && 186 (pmd_flags(pmd) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) == 187 (_PAGE_DIRTY | _PAGE_PSE); 188 } 189 190 #define pmd_young pmd_young 191 static inline int pmd_young(pmd_t pmd) 192 { 193 return pmd_flags(pmd) & _PAGE_ACCESSED; 194 } 195 196 static inline bool pud_dirty(pud_t pud) 197 { 198 return pud_flags(pud) & _PAGE_DIRTY_BITS; 199 } 200 201 static inline int pud_young(pud_t pud) 202 { 203 return pud_flags(pud) & _PAGE_ACCESSED; 204 } 205 206 static inline bool pud_shstk(pud_t pud) 207 { 208 return cpu_feature_enabled(X86_FEATURE_SHSTK) && 209 (pud_flags(pud) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) == 210 (_PAGE_DIRTY | _PAGE_PSE); 211 } 212 213 static inline int pte_write(pte_t pte) 214 { 215 /* 216 * Shadow stack pages are logically writable, but do not have 217 * _PAGE_RW. Check for them separately from _PAGE_RW itself. 218 */ 219 return (pte_flags(pte) & _PAGE_RW) || pte_shstk(pte); 220 } 221 222 #define pmd_write pmd_write 223 static inline int pmd_write(pmd_t pmd) 224 { 225 /* 226 * Shadow stack pages are logically writable, but do not have 227 * _PAGE_RW. Check for them separately from _PAGE_RW itself. 228 */ 229 return (pmd_flags(pmd) & _PAGE_RW) || pmd_shstk(pmd); 230 } 231 232 #define pud_write pud_write 233 static inline int pud_write(pud_t pud) 234 { 235 return pud_flags(pud) & _PAGE_RW; 236 } 237 238 static inline int pte_huge(pte_t pte) 239 { 240 return pte_flags(pte) & _PAGE_PSE; 241 } 242 243 static inline int pte_global(pte_t pte) 244 { 245 return pte_flags(pte) & _PAGE_GLOBAL; 246 } 247 248 static inline int pte_exec(pte_t pte) 249 { 250 return !(pte_flags(pte) & _PAGE_NX); 251 } 252 253 static inline int pte_special(pte_t pte) 254 { 255 return pte_flags(pte) & _PAGE_SPECIAL; 256 } 257 258 /* Entries that were set to PROT_NONE are inverted */ 259 260 static inline u64 protnone_mask(u64 val); 261 262 #define PFN_PTE_SHIFT PAGE_SHIFT 263 264 static inline unsigned long pte_pfn(pte_t pte) 265 { 266 phys_addr_t pfn = pte_val(pte); 267 pfn ^= protnone_mask(pfn); 268 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT; 269 } 270 271 static inline unsigned long pmd_pfn(pmd_t pmd) 272 { 273 phys_addr_t pfn = pmd_val(pmd); 274 pfn ^= protnone_mask(pfn); 275 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; 276 } 277 278 #define pud_pfn pud_pfn 279 static inline unsigned long pud_pfn(pud_t pud) 280 { 281 phys_addr_t pfn = pud_val(pud); 282 pfn ^= protnone_mask(pfn); 283 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT; 284 } 285 286 static inline unsigned long p4d_pfn(p4d_t p4d) 287 { 288 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT; 289 } 290 291 static inline unsigned long pgd_pfn(pgd_t pgd) 292 { 293 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; 294 } 295 296 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 297 298 #define pmd_leaf pmd_leaf 299 static inline bool pmd_leaf(pmd_t pte) 300 { 301 return pmd_flags(pte) & _PAGE_PSE; 302 } 303 304 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 305 static inline int pmd_trans_huge(pmd_t pmd) 306 { 307 return (pmd_val(pmd) & _PAGE_PSE) == _PAGE_PSE; 308 } 309 310 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 311 static inline int pud_trans_huge(pud_t pud) 312 { 313 return (pud_val(pud) & _PAGE_PSE) == _PAGE_PSE; 314 } 315 #endif 316 317 #define has_transparent_hugepage has_transparent_hugepage 318 static inline int has_transparent_hugepage(void) 319 { 320 return boot_cpu_has(X86_FEATURE_PSE); 321 } 322 323 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 324 static inline bool pmd_special(pmd_t pmd) 325 { 326 return pmd_flags(pmd) & _PAGE_SPECIAL; 327 } 328 329 static inline pmd_t pmd_mkspecial(pmd_t pmd) 330 { 331 return pmd_set_flags(pmd, _PAGE_SPECIAL); 332 } 333 #endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */ 334 335 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP 336 static inline bool pud_special(pud_t pud) 337 { 338 return pud_flags(pud) & _PAGE_SPECIAL; 339 } 340 341 static inline pud_t pud_mkspecial(pud_t pud) 342 { 343 return pud_set_flags(pud, _PAGE_SPECIAL); 344 } 345 #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ 346 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 347 348 static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 349 { 350 pteval_t v = native_pte_val(pte); 351 352 return native_make_pte(v | set); 353 } 354 355 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) 356 { 357 pteval_t v = native_pte_val(pte); 358 359 return native_make_pte(v & ~clear); 360 } 361 362 /* 363 * Write protection operations can result in Dirty=1,Write=0 PTEs. But in the 364 * case of X86_FEATURE_USER_SHSTK, these PTEs denote shadow stack memory. So 365 * when creating dirty, write-protected memory, a software bit is used: 366 * _PAGE_BIT_SAVED_DIRTY. The following functions take a PTE and transition the 367 * Dirty bit to SavedDirty, and vice-vesra. 368 * 369 * This shifting is only done if needed. In the case of shifting 370 * Dirty->SavedDirty, the condition is if the PTE is Write=0. In the case of 371 * shifting SavedDirty->Dirty, the condition is Write=1. 372 */ 373 static inline pgprotval_t mksaveddirty_shift(pgprotval_t v) 374 { 375 pgprotval_t cond = (~v >> _PAGE_BIT_RW) & 1; 376 377 v |= ((v >> _PAGE_BIT_DIRTY) & cond) << _PAGE_BIT_SAVED_DIRTY; 378 v &= ~(cond << _PAGE_BIT_DIRTY); 379 380 return v; 381 } 382 383 static inline pgprotval_t clear_saveddirty_shift(pgprotval_t v) 384 { 385 pgprotval_t cond = (v >> _PAGE_BIT_RW) & 1; 386 387 v |= ((v >> _PAGE_BIT_SAVED_DIRTY) & cond) << _PAGE_BIT_DIRTY; 388 v &= ~(cond << _PAGE_BIT_SAVED_DIRTY); 389 390 return v; 391 } 392 393 static inline pte_t pte_mksaveddirty(pte_t pte) 394 { 395 pteval_t v = native_pte_val(pte); 396 397 v = mksaveddirty_shift(v); 398 return native_make_pte(v); 399 } 400 401 static inline pte_t pte_clear_saveddirty(pte_t pte) 402 { 403 pteval_t v = native_pte_val(pte); 404 405 v = clear_saveddirty_shift(v); 406 return native_make_pte(v); 407 } 408 409 static inline pte_t pte_wrprotect(pte_t pte) 410 { 411 pte = pte_clear_flags(pte, _PAGE_RW); 412 413 /* 414 * Blindly clearing _PAGE_RW might accidentally create 415 * a shadow stack PTE (Write=0,Dirty=1). Move the hardware 416 * dirty value to the software bit, if present. 417 */ 418 return pte_mksaveddirty(pte); 419 } 420 421 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 422 static inline int pte_uffd_wp(pte_t pte) 423 { 424 return pte_flags(pte) & _PAGE_UFFD_WP; 425 } 426 427 static inline pte_t pte_mkuffd_wp(pte_t pte) 428 { 429 return pte_wrprotect(pte_set_flags(pte, _PAGE_UFFD_WP)); 430 } 431 432 static inline pte_t pte_clear_uffd_wp(pte_t pte) 433 { 434 return pte_clear_flags(pte, _PAGE_UFFD_WP); 435 } 436 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 437 438 static inline pte_t pte_mkclean(pte_t pte) 439 { 440 return pte_clear_flags(pte, _PAGE_DIRTY_BITS); 441 } 442 443 static inline pte_t pte_mkold(pte_t pte) 444 { 445 return pte_clear_flags(pte, _PAGE_ACCESSED); 446 } 447 448 static inline pte_t pte_mkexec(pte_t pte) 449 { 450 return pte_clear_flags(pte, _PAGE_NX); 451 } 452 453 static inline pte_t pte_mkdirty(pte_t pte) 454 { 455 pte = pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 456 457 return pte_mksaveddirty(pte); 458 } 459 460 static inline pte_t pte_mkwrite_shstk(pte_t pte) 461 { 462 pte = pte_clear_flags(pte, _PAGE_RW); 463 464 return pte_set_flags(pte, _PAGE_DIRTY); 465 } 466 467 static inline pte_t pte_mkyoung(pte_t pte) 468 { 469 return pte_set_flags(pte, _PAGE_ACCESSED); 470 } 471 472 static inline pte_t pte_mkwrite_novma(pte_t pte) 473 { 474 return pte_set_flags(pte, _PAGE_RW); 475 } 476 477 struct vm_area_struct; 478 pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma); 479 #define pte_mkwrite pte_mkwrite 480 481 static inline pte_t pte_mkhuge(pte_t pte) 482 { 483 return pte_set_flags(pte, _PAGE_PSE); 484 } 485 486 static inline pte_t pte_clrhuge(pte_t pte) 487 { 488 return pte_clear_flags(pte, _PAGE_PSE); 489 } 490 491 static inline pte_t pte_mkglobal(pte_t pte) 492 { 493 return pte_set_flags(pte, _PAGE_GLOBAL); 494 } 495 496 static inline pte_t pte_clrglobal(pte_t pte) 497 { 498 return pte_clear_flags(pte, _PAGE_GLOBAL); 499 } 500 501 static inline pte_t pte_mkspecial(pte_t pte) 502 { 503 return pte_set_flags(pte, _PAGE_SPECIAL); 504 } 505 506 /* See comments above mksaveddirty_shift() */ 507 static inline pmd_t pmd_mksaveddirty(pmd_t pmd) 508 { 509 pmdval_t v = native_pmd_val(pmd); 510 511 v = mksaveddirty_shift(v); 512 return native_make_pmd(v); 513 } 514 515 /* See comments above mksaveddirty_shift() */ 516 static inline pmd_t pmd_clear_saveddirty(pmd_t pmd) 517 { 518 pmdval_t v = native_pmd_val(pmd); 519 520 v = clear_saveddirty_shift(v); 521 return native_make_pmd(v); 522 } 523 524 static inline pmd_t pmd_wrprotect(pmd_t pmd) 525 { 526 pmd = pmd_clear_flags(pmd, _PAGE_RW); 527 528 /* 529 * Blindly clearing _PAGE_RW might accidentally create 530 * a shadow stack PMD (RW=0, Dirty=1). Move the hardware 531 * dirty value to the software bit. 532 */ 533 return pmd_mksaveddirty(pmd); 534 } 535 536 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 537 static inline int pmd_uffd_wp(pmd_t pmd) 538 { 539 return pmd_flags(pmd) & _PAGE_UFFD_WP; 540 } 541 542 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd) 543 { 544 return pmd_wrprotect(pmd_set_flags(pmd, _PAGE_UFFD_WP)); 545 } 546 547 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd) 548 { 549 return pmd_clear_flags(pmd, _PAGE_UFFD_WP); 550 } 551 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 552 553 static inline pmd_t pmd_mkold(pmd_t pmd) 554 { 555 return pmd_clear_flags(pmd, _PAGE_ACCESSED); 556 } 557 558 static inline pmd_t pmd_mkclean(pmd_t pmd) 559 { 560 return pmd_clear_flags(pmd, _PAGE_DIRTY_BITS); 561 } 562 563 static inline pmd_t pmd_mkdirty(pmd_t pmd) 564 { 565 pmd = pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 566 567 return pmd_mksaveddirty(pmd); 568 } 569 570 static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd) 571 { 572 pmd = pmd_clear_flags(pmd, _PAGE_RW); 573 574 return pmd_set_flags(pmd, _PAGE_DIRTY); 575 } 576 577 static inline pmd_t pmd_mkhuge(pmd_t pmd) 578 { 579 return pmd_set_flags(pmd, _PAGE_PSE); 580 } 581 582 static inline pmd_t pmd_mkyoung(pmd_t pmd) 583 { 584 return pmd_set_flags(pmd, _PAGE_ACCESSED); 585 } 586 587 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 588 { 589 return pmd_set_flags(pmd, _PAGE_RW); 590 } 591 592 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); 593 #define pmd_mkwrite pmd_mkwrite 594 595 /* See comments above mksaveddirty_shift() */ 596 static inline pud_t pud_mksaveddirty(pud_t pud) 597 { 598 pudval_t v = native_pud_val(pud); 599 600 v = mksaveddirty_shift(v); 601 return native_make_pud(v); 602 } 603 604 /* See comments above mksaveddirty_shift() */ 605 static inline pud_t pud_clear_saveddirty(pud_t pud) 606 { 607 pudval_t v = native_pud_val(pud); 608 609 v = clear_saveddirty_shift(v); 610 return native_make_pud(v); 611 } 612 613 static inline pud_t pud_mkold(pud_t pud) 614 { 615 return pud_clear_flags(pud, _PAGE_ACCESSED); 616 } 617 618 static inline pud_t pud_mkclean(pud_t pud) 619 { 620 return pud_clear_flags(pud, _PAGE_DIRTY_BITS); 621 } 622 623 static inline pud_t pud_wrprotect(pud_t pud) 624 { 625 pud = pud_clear_flags(pud, _PAGE_RW); 626 627 /* 628 * Blindly clearing _PAGE_RW might accidentally create 629 * a shadow stack PUD (RW=0, Dirty=1). Move the hardware 630 * dirty value to the software bit. 631 */ 632 return pud_mksaveddirty(pud); 633 } 634 635 static inline pud_t pud_mkdirty(pud_t pud) 636 { 637 pud = pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 638 639 return pud_mksaveddirty(pud); 640 } 641 642 static inline pud_t pud_mkhuge(pud_t pud) 643 { 644 return pud_set_flags(pud, _PAGE_PSE); 645 } 646 647 static inline pud_t pud_mkyoung(pud_t pud) 648 { 649 return pud_set_flags(pud, _PAGE_ACCESSED); 650 } 651 652 static inline pud_t pud_mkwrite(pud_t pud) 653 { 654 pud = pud_set_flags(pud, _PAGE_RW); 655 656 return pud_clear_saveddirty(pud); 657 } 658 659 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 660 static inline int pte_soft_dirty(pte_t pte) 661 { 662 return pte_flags(pte) & _PAGE_SOFT_DIRTY; 663 } 664 665 static inline int pmd_soft_dirty(pmd_t pmd) 666 { 667 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; 668 } 669 670 static inline int pud_soft_dirty(pud_t pud) 671 { 672 return pud_flags(pud) & _PAGE_SOFT_DIRTY; 673 } 674 675 static inline pte_t pte_mksoft_dirty(pte_t pte) 676 { 677 return pte_set_flags(pte, _PAGE_SOFT_DIRTY); 678 } 679 680 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 681 { 682 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 683 } 684 685 static inline pud_t pud_mksoft_dirty(pud_t pud) 686 { 687 return pud_set_flags(pud, _PAGE_SOFT_DIRTY); 688 } 689 690 static inline pte_t pte_clear_soft_dirty(pte_t pte) 691 { 692 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); 693 } 694 695 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 696 { 697 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); 698 } 699 700 static inline pud_t pud_clear_soft_dirty(pud_t pud) 701 { 702 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY); 703 } 704 705 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 706 707 /* 708 * Mask out unsupported bits in a present pgprot. Non-present pgprots 709 * can use those bits for other purposes, so leave them be. 710 */ 711 static inline pgprotval_t massage_pgprot(pgprot_t pgprot) 712 { 713 pgprotval_t protval = pgprot_val(pgprot); 714 715 if (protval & _PAGE_PRESENT) 716 protval &= __supported_pte_mask; 717 718 return protval; 719 } 720 721 static inline pgprotval_t check_pgprot(pgprot_t pgprot) 722 { 723 pgprotval_t massaged_val = massage_pgprot(pgprot); 724 725 /* mmdebug.h can not be included here because of dependencies */ 726 #ifdef CONFIG_DEBUG_VM 727 WARN_ONCE(pgprot_val(pgprot) != massaged_val, 728 "attempted to set unsupported pgprot: %016llx " 729 "bits: %016llx supported: %016llx\n", 730 (u64)pgprot_val(pgprot), 731 (u64)pgprot_val(pgprot) ^ massaged_val, 732 (u64)__supported_pte_mask); 733 #endif 734 735 return massaged_val; 736 } 737 738 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 739 { 740 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; 741 /* This bit combination is used to mark shadow stacks */ 742 WARN_ON_ONCE((pgprot_val(pgprot) & (_PAGE_DIRTY | _PAGE_RW)) == 743 _PAGE_DIRTY); 744 pfn ^= protnone_mask(pgprot_val(pgprot)); 745 pfn &= PTE_PFN_MASK; 746 return __pte(pfn | check_pgprot(pgprot)); 747 } 748 749 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 750 { 751 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; 752 pfn ^= protnone_mask(pgprot_val(pgprot)); 753 pfn &= PHYSICAL_PMD_PAGE_MASK; 754 return __pmd(pfn | check_pgprot(pgprot)); 755 } 756 757 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) 758 { 759 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; 760 pfn ^= protnone_mask(pgprot_val(pgprot)); 761 pfn &= PHYSICAL_PUD_PAGE_MASK; 762 return __pud(pfn | check_pgprot(pgprot)); 763 } 764 765 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 766 { 767 return pfn_pmd(pmd_pfn(pmd), 768 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); 769 } 770 771 static inline pud_t pud_mkinvalid(pud_t pud) 772 { 773 return pfn_pud(pud_pfn(pud), 774 __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); 775 } 776 777 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); 778 779 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 780 { 781 pteval_t val = pte_val(pte), oldval = val; 782 pte_t pte_result; 783 784 /* 785 * Chop off the NX bit (if present), and add the NX portion of 786 * the newprot (if present): 787 */ 788 val &= _PAGE_CHG_MASK; 789 val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK; 790 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK); 791 792 pte_result = __pte(val); 793 794 /* 795 * To avoid creating Write=0,Dirty=1 PTEs, pte_modify() needs to avoid: 796 * 1. Marking Write=0 PTEs Dirty=1 797 * 2. Marking Dirty=1 PTEs Write=0 798 * 799 * The first case cannot happen because the _PAGE_CHG_MASK will filter 800 * out any Dirty bit passed in newprot. Handle the second case by 801 * going through the mksaveddirty exercise. Only do this if the old 802 * value was Write=1 to avoid doing this on Shadow Stack PTEs. 803 */ 804 if (oldval & _PAGE_RW) 805 pte_result = pte_mksaveddirty(pte_result); 806 else 807 pte_result = pte_clear_saveddirty(pte_result); 808 809 return pte_result; 810 } 811 812 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 813 { 814 pmdval_t val = pmd_val(pmd), oldval = val; 815 pmd_t pmd_result; 816 817 val &= (_HPAGE_CHG_MASK & ~_PAGE_DIRTY); 818 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK; 819 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK); 820 821 pmd_result = __pmd(val); 822 823 /* 824 * Avoid creating shadow stack PMD by accident. See comment in 825 * pte_modify(). 826 */ 827 if (oldval & _PAGE_RW) 828 pmd_result = pmd_mksaveddirty(pmd_result); 829 else 830 pmd_result = pmd_clear_saveddirty(pmd_result); 831 832 return pmd_result; 833 } 834 835 static inline pud_t pud_modify(pud_t pud, pgprot_t newprot) 836 { 837 pudval_t val = pud_val(pud), oldval = val; 838 pud_t pud_result; 839 840 val &= _HPAGE_CHG_MASK; 841 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK; 842 val = flip_protnone_guard(oldval, val, PHYSICAL_PUD_PAGE_MASK); 843 844 pud_result = __pud(val); 845 846 /* 847 * Avoid creating shadow stack PUD by accident. See comment in 848 * pte_modify(). 849 */ 850 if (oldval & _PAGE_RW) 851 pud_result = pud_mksaveddirty(pud_result); 852 else 853 pud_result = pud_clear_saveddirty(pud_result); 854 855 return pud_result; 856 } 857 858 /* 859 * mprotect needs to preserve PAT and encryption bits when updating 860 * vm_page_prot 861 */ 862 #define pgprot_modify pgprot_modify 863 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 864 { 865 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; 866 pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK; 867 return __pgprot(preservebits | addbits); 868 } 869 870 #define pte_pgprot(x) __pgprot(pte_flags(x)) 871 #define pmd_pgprot(x) __pgprot(pmd_flags(x)) 872 #define pud_pgprot(x) __pgprot(pud_flags(x)) 873 #define p4d_pgprot(x) __pgprot(p4d_flags(x)) 874 875 #define canon_pgprot(p) __pgprot(massage_pgprot(p)) 876 877 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, 878 enum page_cache_mode pcm, 879 enum page_cache_mode new_pcm) 880 { 881 /* 882 * PAT type is always WB for untracked ranges, so no need to check. 883 */ 884 if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) 885 return 1; 886 887 /* 888 * Certain new memtypes are not allowed with certain 889 * requested memtype: 890 * - request is uncached, return cannot be write-back 891 * - request is write-combine, return cannot be write-back 892 * - request is write-through, return cannot be write-back 893 * - request is write-through, return cannot be write-combine 894 */ 895 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && 896 new_pcm == _PAGE_CACHE_MODE_WB) || 897 (pcm == _PAGE_CACHE_MODE_WC && 898 new_pcm == _PAGE_CACHE_MODE_WB) || 899 (pcm == _PAGE_CACHE_MODE_WT && 900 new_pcm == _PAGE_CACHE_MODE_WB) || 901 (pcm == _PAGE_CACHE_MODE_WT && 902 new_pcm == _PAGE_CACHE_MODE_WC)) { 903 return 0; 904 } 905 906 return 1; 907 } 908 909 pmd_t *populate_extra_pmd(unsigned long vaddr); 910 pte_t *populate_extra_pte(unsigned long vaddr); 911 912 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION 913 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd); 914 915 /* 916 * Take a PGD location (pgdp) and a pgd value that needs to be set there. 917 * Populates the user and returns the resulting PGD that must be set in 918 * the kernel copy of the page tables. 919 */ 920 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) 921 { 922 if (!static_cpu_has(X86_FEATURE_PTI)) 923 return pgd; 924 return __pti_set_user_pgtbl(pgdp, pgd); 925 } 926 #else /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */ 927 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) 928 { 929 return pgd; 930 } 931 #endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */ 932 933 #endif /* __ASSEMBLER__ */ 934 935 936 #ifdef CONFIG_X86_32 937 # include <asm/pgtable_32.h> 938 #else 939 # include <asm/pgtable_64.h> 940 #endif 941 942 #ifndef __ASSEMBLER__ 943 #include <linux/mm_types.h> 944 #include <linux/mmdebug.h> 945 #include <linux/log2.h> 946 #include <asm/fixmap.h> 947 948 static inline int pte_none(pte_t pte) 949 { 950 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK)); 951 } 952 953 #define __HAVE_ARCH_PTE_SAME 954 static inline int pte_same(pte_t a, pte_t b) 955 { 956 return a.pte == b.pte; 957 } 958 959 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 960 { 961 if (__pte_needs_invert(pte_val(pte))) 962 return __pte(pte_val(pte) - (nr << PFN_PTE_SHIFT)); 963 return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); 964 } 965 #define pte_advance_pfn pte_advance_pfn 966 967 static inline int pte_present(pte_t a) 968 { 969 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 970 } 971 972 #define pte_accessible pte_accessible 973 static inline bool pte_accessible(struct mm_struct *mm, pte_t a) 974 { 975 if (pte_flags(a) & _PAGE_PRESENT) 976 return true; 977 978 if ((pte_flags(a) & _PAGE_PROTNONE) && 979 atomic_read(&mm->tlb_flush_pending)) 980 return true; 981 982 return false; 983 } 984 985 static inline int pmd_present(pmd_t pmd) 986 { 987 /* 988 * Checking for _PAGE_PSE is needed too because 989 * split_huge_page will temporarily clear the present bit (but 990 * the _PAGE_PSE flag will remain set at all times while the 991 * _PAGE_PRESENT bit is clear). 992 */ 993 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); 994 } 995 996 #ifdef CONFIG_NUMA_BALANCING 997 /* 998 * These work without NUMA balancing but the kernel does not care. See the 999 * comment in include/linux/pgtable.h 1000 */ 1001 static inline int pte_protnone(pte_t pte) 1002 { 1003 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) 1004 == _PAGE_PROTNONE; 1005 } 1006 1007 static inline int pmd_protnone(pmd_t pmd) 1008 { 1009 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) 1010 == _PAGE_PROTNONE; 1011 } 1012 #endif /* CONFIG_NUMA_BALANCING */ 1013 1014 static inline int pmd_none(pmd_t pmd) 1015 { 1016 /* Only check low word on 32-bit platforms, since it might be 1017 out of sync with upper half. */ 1018 unsigned long val = native_pmd_val(pmd); 1019 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0; 1020 } 1021 1022 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 1023 { 1024 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); 1025 } 1026 1027 /* 1028 * Currently stuck as a macro due to indirect forward reference to 1029 * linux/mmzone.h's __section_mem_map_addr() definition: 1030 */ 1031 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1032 1033 static inline int pmd_bad(pmd_t pmd) 1034 { 1035 return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) != 1036 (_KERNPG_TABLE & ~_PAGE_ACCESSED); 1037 } 1038 1039 static inline unsigned long pages_to_mb(unsigned long npg) 1040 { 1041 return npg >> (20 - PAGE_SHIFT); 1042 } 1043 1044 #if CONFIG_PGTABLE_LEVELS > 2 1045 static inline int pud_none(pud_t pud) 1046 { 1047 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; 1048 } 1049 1050 static inline int pud_present(pud_t pud) 1051 { 1052 return pud_flags(pud) & _PAGE_PRESENT; 1053 } 1054 1055 static inline pmd_t *pud_pgtable(pud_t pud) 1056 { 1057 return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud)); 1058 } 1059 1060 /* 1061 * Currently stuck as a macro due to indirect forward reference to 1062 * linux/mmzone.h's __section_mem_map_addr() definition: 1063 */ 1064 #define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1065 1066 #define pud_leaf pud_leaf 1067 static inline bool pud_leaf(pud_t pud) 1068 { 1069 return pud_val(pud) & _PAGE_PSE; 1070 } 1071 1072 static inline int pud_bad(pud_t pud) 1073 { 1074 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 1075 } 1076 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 1077 1078 #if CONFIG_PGTABLE_LEVELS > 3 1079 static inline int p4d_none(p4d_t p4d) 1080 { 1081 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; 1082 } 1083 1084 static inline int p4d_present(p4d_t p4d) 1085 { 1086 return p4d_flags(p4d) & _PAGE_PRESENT; 1087 } 1088 1089 static inline pud_t *p4d_pgtable(p4d_t p4d) 1090 { 1091 return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d)); 1092 } 1093 1094 /* 1095 * Currently stuck as a macro due to indirect forward reference to 1096 * linux/mmzone.h's __section_mem_map_addr() definition: 1097 */ 1098 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) 1099 1100 static inline int p4d_bad(p4d_t p4d) 1101 { 1102 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER; 1103 1104 if (IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION)) 1105 ignore_flags |= _PAGE_NX; 1106 1107 return (p4d_flags(p4d) & ~ignore_flags) != 0; 1108 } 1109 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 1110 1111 static inline unsigned long p4d_index(unsigned long address) 1112 { 1113 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1); 1114 } 1115 1116 #if CONFIG_PGTABLE_LEVELS > 4 1117 static inline int pgd_present(pgd_t pgd) 1118 { 1119 if (!pgtable_l5_enabled()) 1120 return 1; 1121 return pgd_flags(pgd) & _PAGE_PRESENT; 1122 } 1123 1124 static inline unsigned long pgd_page_vaddr(pgd_t pgd) 1125 { 1126 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); 1127 } 1128 1129 /* 1130 * Currently stuck as a macro due to indirect forward reference to 1131 * linux/mmzone.h's __section_mem_map_addr() definition: 1132 */ 1133 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) 1134 1135 /* to find an entry in a page-table-directory. */ 1136 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 1137 { 1138 if (!pgtable_l5_enabled()) 1139 return (p4d_t *)pgd; 1140 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); 1141 } 1142 1143 static inline int pgd_bad(pgd_t pgd) 1144 { 1145 unsigned long ignore_flags = _PAGE_USER; 1146 1147 if (!pgtable_l5_enabled()) 1148 return 0; 1149 1150 if (IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION)) 1151 ignore_flags |= _PAGE_NX; 1152 1153 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE; 1154 } 1155 1156 static inline int pgd_none(pgd_t pgd) 1157 { 1158 if (!pgtable_l5_enabled()) 1159 return 0; 1160 /* 1161 * There is no need to do a workaround for the KNL stray 1162 * A/D bit erratum here. PGDs only point to page tables 1163 * except on 32-bit non-PAE which is not supported on 1164 * KNL. 1165 */ 1166 return !native_pgd_val(pgd); 1167 } 1168 #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1169 1170 #endif /* __ASSEMBLER__ */ 1171 1172 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 1173 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 1174 1175 #ifndef __ASSEMBLER__ 1176 1177 extern int direct_gbpages; 1178 void init_mem_mapping(void); 1179 void early_alloc_pgt_buf(void); 1180 void __init poking_init(void); 1181 unsigned long init_memory_mapping(unsigned long start, 1182 unsigned long end, pgprot_t prot); 1183 1184 #ifdef CONFIG_X86_64 1185 extern pgd_t trampoline_pgd_entry; 1186 #endif 1187 1188 /* local pte updates need not use xchg for locking */ 1189 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 1190 { 1191 pte_t res = *ptep; 1192 1193 /* Pure native function needs no input for mm, addr */ 1194 native_pte_clear(NULL, 0, ptep); 1195 return res; 1196 } 1197 1198 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) 1199 { 1200 pmd_t res = *pmdp; 1201 1202 native_pmd_clear(pmdp); 1203 return res; 1204 } 1205 1206 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) 1207 { 1208 pud_t res = *pudp; 1209 1210 native_pud_clear(pudp); 1211 return res; 1212 } 1213 1214 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1215 pmd_t *pmdp, pmd_t pmd) 1216 { 1217 page_table_check_pmd_set(mm, addr, pmdp, pmd); 1218 set_pmd(pmdp, pmd); 1219 } 1220 1221 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 1222 pud_t *pudp, pud_t pud) 1223 { 1224 page_table_check_pud_set(mm, addr, pudp, pud); 1225 native_set_pud(pudp, pud); 1226 } 1227 1228 /* 1229 * We only update the dirty/accessed state if we set 1230 * the dirty bit by hand in the kernel, since the hardware 1231 * will do the accessed bit for us, and we don't want to 1232 * race with other CPU's that might be updating the dirty 1233 * bit at the same time. 1234 */ 1235 struct vm_area_struct; 1236 1237 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1238 extern int ptep_set_access_flags(struct vm_area_struct *vma, 1239 unsigned long address, pte_t *ptep, 1240 pte_t entry, int dirty); 1241 1242 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1243 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, 1244 unsigned long addr, pte_t *ptep); 1245 1246 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1247 extern int ptep_clear_flush_young(struct vm_area_struct *vma, 1248 unsigned long address, pte_t *ptep); 1249 1250 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1251 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 1252 pte_t *ptep) 1253 { 1254 pte_t pte = native_ptep_get_and_clear(ptep); 1255 page_table_check_pte_clear(mm, addr, pte); 1256 return pte; 1257 } 1258 1259 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1260 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1261 unsigned long addr, pte_t *ptep, 1262 int full) 1263 { 1264 pte_t pte; 1265 if (full) { 1266 /* 1267 * Full address destruction in progress; paravirt does not 1268 * care about updates and native needs no locking 1269 */ 1270 pte = native_local_ptep_get_and_clear(ptep); 1271 page_table_check_pte_clear(mm, addr, pte); 1272 } else { 1273 pte = ptep_get_and_clear(mm, addr, ptep); 1274 } 1275 return pte; 1276 } 1277 1278 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1279 static inline void ptep_set_wrprotect(struct mm_struct *mm, 1280 unsigned long addr, pte_t *ptep) 1281 { 1282 /* 1283 * Avoid accidentally creating shadow stack PTEs 1284 * (Write=0,Dirty=1). Use cmpxchg() to prevent races with 1285 * the hardware setting Dirty=1. 1286 */ 1287 pte_t old_pte, new_pte; 1288 1289 old_pte = READ_ONCE(*ptep); 1290 do { 1291 new_pte = pte_wrprotect(old_pte); 1292 } while (!try_cmpxchg((long *)&ptep->pte, (long *)&old_pte, *(long *)&new_pte)); 1293 } 1294 1295 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) 1296 1297 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1298 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 1299 unsigned long address, pmd_t *pmdp, 1300 pmd_t entry, int dirty); 1301 extern int pudp_set_access_flags(struct vm_area_struct *vma, 1302 unsigned long address, pud_t *pudp, 1303 pud_t entry, int dirty); 1304 1305 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1306 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1307 unsigned long addr, pmd_t *pmdp); 1308 extern int pudp_test_and_clear_young(struct vm_area_struct *vma, 1309 unsigned long addr, pud_t *pudp); 1310 1311 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 1312 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 1313 unsigned long address, pmd_t *pmdp); 1314 1315 1316 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1317 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, 1318 pmd_t *pmdp) 1319 { 1320 pmd_t pmd = native_pmdp_get_and_clear(pmdp); 1321 1322 page_table_check_pmd_clear(mm, addr, pmd); 1323 1324 return pmd; 1325 } 1326 1327 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR 1328 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, 1329 unsigned long addr, pud_t *pudp) 1330 { 1331 pud_t pud = native_pudp_get_and_clear(pudp); 1332 1333 page_table_check_pud_clear(mm, addr, pud); 1334 1335 return pud; 1336 } 1337 1338 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1339 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1340 unsigned long addr, pmd_t *pmdp) 1341 { 1342 /* 1343 * Avoid accidentally creating shadow stack PTEs 1344 * (Write=0,Dirty=1). Use cmpxchg() to prevent races with 1345 * the hardware setting Dirty=1. 1346 */ 1347 pmd_t old_pmd, new_pmd; 1348 1349 old_pmd = READ_ONCE(*pmdp); 1350 do { 1351 new_pmd = pmd_wrprotect(old_pmd); 1352 } while (!try_cmpxchg((long *)pmdp, (long *)&old_pmd, *(long *)&new_pmd)); 1353 } 1354 1355 #ifndef pmdp_establish 1356 #define pmdp_establish pmdp_establish 1357 static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1358 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1359 { 1360 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); 1361 if (IS_ENABLED(CONFIG_SMP)) { 1362 return xchg(pmdp, pmd); 1363 } else { 1364 pmd_t old = *pmdp; 1365 WRITE_ONCE(*pmdp, pmd); 1366 return old; 1367 } 1368 } 1369 #endif 1370 1371 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1372 static inline pud_t pudp_establish(struct vm_area_struct *vma, 1373 unsigned long address, pud_t *pudp, pud_t pud) 1374 { 1375 page_table_check_pud_set(vma->vm_mm, address, pudp, pud); 1376 if (IS_ENABLED(CONFIG_SMP)) { 1377 return xchg(pudp, pud); 1378 } else { 1379 pud_t old = *pudp; 1380 WRITE_ONCE(*pudp, pud); 1381 return old; 1382 } 1383 } 1384 #endif 1385 1386 #define __HAVE_ARCH_PMDP_INVALIDATE_AD 1387 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, 1388 unsigned long address, pmd_t *pmdp); 1389 1390 pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, 1391 pud_t *pudp); 1392 1393 /* 1394 * Page table pages are page-aligned. The lower half of the top 1395 * level is used for userspace and the top half for the kernel. 1396 * 1397 * Returns true for parts of the PGD that map userspace and 1398 * false for the parts that map the kernel. 1399 */ 1400 static inline bool pgdp_maps_userspace(void *__ptr) 1401 { 1402 unsigned long ptr = (unsigned long)__ptr; 1403 1404 return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START); 1405 } 1406 1407 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION 1408 /* 1409 * All top-level MITIGATION_PAGE_TABLE_ISOLATION page tables are order-1 pages 1410 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and 1411 * the user one is in the last 4k. To switch between them, you 1412 * just need to flip the 12th bit in their addresses. 1413 */ 1414 #define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT 1415 1416 /* 1417 * This generates better code than the inline assembly in 1418 * __set_bit(). 1419 */ 1420 static inline void *ptr_set_bit(void *ptr, int bit) 1421 { 1422 unsigned long __ptr = (unsigned long)ptr; 1423 1424 __ptr |= BIT(bit); 1425 return (void *)__ptr; 1426 } 1427 static inline void *ptr_clear_bit(void *ptr, int bit) 1428 { 1429 unsigned long __ptr = (unsigned long)ptr; 1430 1431 __ptr &= ~BIT(bit); 1432 return (void *)__ptr; 1433 } 1434 1435 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) 1436 { 1437 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); 1438 } 1439 1440 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) 1441 { 1442 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); 1443 } 1444 1445 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) 1446 { 1447 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); 1448 } 1449 1450 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) 1451 { 1452 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); 1453 } 1454 #endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */ 1455 1456 /* 1457 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 1458 * 1459 * dst - pointer to pgd range anywhere on a pgd page 1460 * src - "" 1461 * count - the number of pgds to copy. 1462 * 1463 * dst and src can be on the same page, but the range must not overlap, 1464 * and must not cross a page boundary. 1465 */ 1466 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) 1467 { 1468 memcpy(dst, src, count * sizeof(pgd_t)); 1469 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION 1470 if (!static_cpu_has(X86_FEATURE_PTI)) 1471 return; 1472 /* Clone the user space pgd as well */ 1473 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src), 1474 count * sizeof(pgd_t)); 1475 #endif 1476 } 1477 1478 #define PTE_SHIFT ilog2(PTRS_PER_PTE) 1479 static inline int page_level_shift(enum pg_level level) 1480 { 1481 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT; 1482 } 1483 static inline unsigned long page_level_size(enum pg_level level) 1484 { 1485 return 1UL << page_level_shift(level); 1486 } 1487 static inline unsigned long page_level_mask(enum pg_level level) 1488 { 1489 return ~(page_level_size(level) - 1); 1490 } 1491 1492 /* 1493 * The x86 doesn't have any external MMU info: the kernel page 1494 * tables contain all the necessary information. 1495 */ 1496 static inline void update_mmu_cache(struct vm_area_struct *vma, 1497 unsigned long addr, pte_t *ptep) 1498 { 1499 } 1500 static inline void update_mmu_cache_range(struct vm_fault *vmf, 1501 struct vm_area_struct *vma, unsigned long addr, 1502 pte_t *ptep, unsigned int nr) 1503 { 1504 } 1505 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 1506 unsigned long addr, pmd_t *pmd) 1507 { 1508 } 1509 static inline void update_mmu_cache_pud(struct vm_area_struct *vma, 1510 unsigned long addr, pud_t *pud) 1511 { 1512 } 1513 static inline pte_t pte_swp_mkexclusive(pte_t pte) 1514 { 1515 return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE); 1516 } 1517 1518 static inline bool pte_swp_exclusive(pte_t pte) 1519 { 1520 return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE; 1521 } 1522 1523 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 1524 { 1525 return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE); 1526 } 1527 1528 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 1529 static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 1530 { 1531 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); 1532 } 1533 1534 static inline int pte_swp_soft_dirty(pte_t pte) 1535 { 1536 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; 1537 } 1538 1539 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 1540 { 1541 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); 1542 } 1543 1544 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1545 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1546 { 1547 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY); 1548 } 1549 1550 static inline int pmd_swp_soft_dirty(pmd_t pmd) 1551 { 1552 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY; 1553 } 1554 1555 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1556 { 1557 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY); 1558 } 1559 #endif 1560 #endif 1561 1562 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 1563 static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 1564 { 1565 return pte_set_flags(pte, _PAGE_SWP_UFFD_WP); 1566 } 1567 1568 static inline int pte_swp_uffd_wp(pte_t pte) 1569 { 1570 return pte_flags(pte) & _PAGE_SWP_UFFD_WP; 1571 } 1572 1573 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 1574 { 1575 return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP); 1576 } 1577 1578 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd) 1579 { 1580 return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP); 1581 } 1582 1583 static inline int pmd_swp_uffd_wp(pmd_t pmd) 1584 { 1585 return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP; 1586 } 1587 1588 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd) 1589 { 1590 return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP); 1591 } 1592 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 1593 1594 static inline u16 pte_flags_pkey(unsigned long pte_flags) 1595 { 1596 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 1597 /* ifdef to avoid doing 59-bit shift on 32-bit values */ 1598 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0; 1599 #else 1600 return 0; 1601 #endif 1602 } 1603 1604 static inline bool __pkru_allows_pkey(u16 pkey, bool write) 1605 { 1606 u32 pkru = read_pkru(); 1607 1608 if (!__pkru_allows_read(pkru, pkey)) 1609 return false; 1610 if (write && !__pkru_allows_write(pkru, pkey)) 1611 return false; 1612 1613 return true; 1614 } 1615 1616 /* 1617 * 'pteval' can come from a PTE, PMD or PUD. We only check 1618 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the 1619 * same value on all 3 types. 1620 */ 1621 static inline bool __pte_access_permitted(unsigned long pteval, bool write) 1622 { 1623 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER; 1624 1625 /* 1626 * Write=0,Dirty=1 PTEs are shadow stack, which the kernel 1627 * shouldn't generally allow access to, but since they 1628 * are already Write=0, the below logic covers both cases. 1629 */ 1630 if (write) 1631 need_pte_bits |= _PAGE_RW; 1632 1633 if ((pteval & need_pte_bits) != need_pte_bits) 1634 return 0; 1635 1636 return __pkru_allows_pkey(pte_flags_pkey(pteval), write); 1637 } 1638 1639 #define pte_access_permitted pte_access_permitted 1640 static inline bool pte_access_permitted(pte_t pte, bool write) 1641 { 1642 return __pte_access_permitted(pte_val(pte), write); 1643 } 1644 1645 #define pmd_access_permitted pmd_access_permitted 1646 static inline bool pmd_access_permitted(pmd_t pmd, bool write) 1647 { 1648 return __pte_access_permitted(pmd_val(pmd), write); 1649 } 1650 1651 #define pud_access_permitted pud_access_permitted 1652 static inline bool pud_access_permitted(pud_t pud, bool write) 1653 { 1654 return __pte_access_permitted(pud_val(pud), write); 1655 } 1656 1657 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1 1658 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot); 1659 1660 static inline bool arch_has_pfn_modify_check(void) 1661 { 1662 return boot_cpu_has_bug(X86_BUG_L1TF); 1663 } 1664 1665 #define arch_check_zapped_pte arch_check_zapped_pte 1666 void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte); 1667 1668 #define arch_check_zapped_pmd arch_check_zapped_pmd 1669 void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd); 1670 1671 #define arch_check_zapped_pud arch_check_zapped_pud 1672 void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud); 1673 1674 #ifdef CONFIG_XEN_PV 1675 #define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young 1676 static inline bool arch_has_hw_nonleaf_pmd_young(void) 1677 { 1678 return !cpu_feature_enabled(X86_FEATURE_XENPV); 1679 } 1680 #endif 1681 1682 #ifdef CONFIG_PAGE_TABLE_CHECK 1683 static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr) 1684 { 1685 return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER); 1686 } 1687 1688 static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr) 1689 { 1690 return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER); 1691 } 1692 1693 static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr) 1694 { 1695 return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER); 1696 } 1697 #endif 1698 1699 #ifdef CONFIG_X86_SGX 1700 int arch_memory_failure(unsigned long pfn, int flags); 1701 #define arch_memory_failure arch_memory_failure 1702 1703 bool arch_is_platform_page(u64 paddr); 1704 #define arch_is_platform_page arch_is_platform_page 1705 #endif 1706 1707 /* 1708 * Use set_p*_safe(), and elide TLB flushing, when confident that *no* 1709 * TLB flush will be required as a result of the "set". For example, use 1710 * in scenarios where it is known ahead of time that the routine is 1711 * setting non-present entries, or re-setting an existing entry to the 1712 * same value. Otherwise, use the typical "set" helpers and flush the 1713 * TLB. 1714 */ 1715 #define set_pte_safe(ptep, pte) \ 1716 ({ \ 1717 WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ 1718 set_pte(ptep, pte); \ 1719 }) 1720 1721 #define set_pmd_safe(pmdp, pmd) \ 1722 ({ \ 1723 WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ 1724 set_pmd(pmdp, pmd); \ 1725 }) 1726 1727 #define set_pud_safe(pudp, pud) \ 1728 ({ \ 1729 WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ 1730 set_pud(pudp, pud); \ 1731 }) 1732 1733 #define set_p4d_safe(p4dp, p4d) \ 1734 ({ \ 1735 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ 1736 set_p4d(p4dp, p4d); \ 1737 }) 1738 1739 #define set_pgd_safe(pgdp, pgd) \ 1740 ({ \ 1741 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ 1742 set_pgd(pgdp, pgd); \ 1743 }) 1744 #endif /* __ASSEMBLER__ */ 1745 1746 #endif /* _ASM_X86_PGTABLE_H */ 1747