1 #ifndef _ASM_X86_PGTABLE_H 2 #define _ASM_X86_PGTABLE_H 3 4 #include <asm/page.h> 5 #include <asm/e820.h> 6 7 #include <asm/pgtable_types.h> 8 9 /* 10 * Macro to mark a page protection value as UC- 11 */ 12 #define pgprot_noncached(prot) \ 13 ((boot_cpu_data.x86 > 3) \ 14 ? (__pgprot(pgprot_val(prot) | \ 15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ 16 : (prot)) 17 18 #ifndef __ASSEMBLY__ 19 #include <asm/x86_init.h> 20 21 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); 22 void ptdump_walk_pgd_level_checkwx(void); 23 24 #ifdef CONFIG_DEBUG_WX 25 #define debug_checkwx() ptdump_walk_pgd_level_checkwx() 26 #else 27 #define debug_checkwx() do { } while (0) 28 #endif 29 30 /* 31 * ZERO_PAGE is a global shared page that is always zero: used 32 * for zero-mapped memory areas etc.. 33 */ 34 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 35 __visible; 36 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 37 38 extern spinlock_t pgd_lock; 39 extern struct list_head pgd_list; 40 41 extern struct mm_struct *pgd_page_get_mm(struct page *page); 42 43 #ifdef CONFIG_PARAVIRT 44 #include <asm/paravirt.h> 45 #else /* !CONFIG_PARAVIRT */ 46 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 47 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 48 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) 49 50 #define set_pte_atomic(ptep, pte) \ 51 native_set_pte_atomic(ptep, pte) 52 53 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) 54 55 #ifndef __PAGETABLE_PUD_FOLDED 56 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 57 #define pgd_clear(pgd) native_pgd_clear(pgd) 58 #endif 59 60 #ifndef set_pud 61 # define set_pud(pudp, pud) native_set_pud(pudp, pud) 62 #endif 63 64 #ifndef __PAGETABLE_PMD_FOLDED 65 #define pud_clear(pud) native_pud_clear(pud) 66 #endif 67 68 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 69 #define pmd_clear(pmd) native_pmd_clear(pmd) 70 71 #define pte_update(mm, addr, ptep) do { } while (0) 72 73 #define pgd_val(x) native_pgd_val(x) 74 #define __pgd(x) native_make_pgd(x) 75 76 #ifndef __PAGETABLE_PUD_FOLDED 77 #define pud_val(x) native_pud_val(x) 78 #define __pud(x) native_make_pud(x) 79 #endif 80 81 #ifndef __PAGETABLE_PMD_FOLDED 82 #define pmd_val(x) native_pmd_val(x) 83 #define __pmd(x) native_make_pmd(x) 84 #endif 85 86 #define pte_val(x) native_pte_val(x) 87 #define __pte(x) native_make_pte(x) 88 89 #define arch_end_context_switch(prev) do {} while(0) 90 91 #endif /* CONFIG_PARAVIRT */ 92 93 /* 94 * The following only work if pte_present() is true. 95 * Undefined behaviour if not.. 96 */ 97 static inline int pte_dirty(pte_t pte) 98 { 99 return pte_flags(pte) & _PAGE_DIRTY; 100 } 101 102 103 static inline u32 read_pkru(void) 104 { 105 if (boot_cpu_has(X86_FEATURE_OSPKE)) 106 return __read_pkru(); 107 return 0; 108 } 109 110 static inline void write_pkru(u32 pkru) 111 { 112 if (boot_cpu_has(X86_FEATURE_OSPKE)) 113 __write_pkru(pkru); 114 } 115 116 static inline int pte_young(pte_t pte) 117 { 118 return pte_flags(pte) & _PAGE_ACCESSED; 119 } 120 121 static inline int pmd_dirty(pmd_t pmd) 122 { 123 return pmd_flags(pmd) & _PAGE_DIRTY; 124 } 125 126 static inline int pmd_young(pmd_t pmd) 127 { 128 return pmd_flags(pmd) & _PAGE_ACCESSED; 129 } 130 131 static inline int pte_write(pte_t pte) 132 { 133 return pte_flags(pte) & _PAGE_RW; 134 } 135 136 static inline int pte_huge(pte_t pte) 137 { 138 return pte_flags(pte) & _PAGE_PSE; 139 } 140 141 static inline int pte_global(pte_t pte) 142 { 143 return pte_flags(pte) & _PAGE_GLOBAL; 144 } 145 146 static inline int pte_exec(pte_t pte) 147 { 148 return !(pte_flags(pte) & _PAGE_NX); 149 } 150 151 static inline int pte_special(pte_t pte) 152 { 153 return pte_flags(pte) & _PAGE_SPECIAL; 154 } 155 156 static inline unsigned long pte_pfn(pte_t pte) 157 { 158 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 159 } 160 161 static inline unsigned long pmd_pfn(pmd_t pmd) 162 { 163 return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; 164 } 165 166 static inline unsigned long pud_pfn(pud_t pud) 167 { 168 return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; 169 } 170 171 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 172 173 static inline int pmd_large(pmd_t pte) 174 { 175 return pmd_flags(pte) & _PAGE_PSE; 176 } 177 178 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 179 static inline int pmd_trans_huge(pmd_t pmd) 180 { 181 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; 182 } 183 184 #define has_transparent_hugepage has_transparent_hugepage 185 static inline int has_transparent_hugepage(void) 186 { 187 return boot_cpu_has(X86_FEATURE_PSE); 188 } 189 190 #ifdef __HAVE_ARCH_PTE_DEVMAP 191 static inline int pmd_devmap(pmd_t pmd) 192 { 193 return !!(pmd_val(pmd) & _PAGE_DEVMAP); 194 } 195 #endif 196 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 197 198 static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 199 { 200 pteval_t v = native_pte_val(pte); 201 202 return native_make_pte(v | set); 203 } 204 205 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) 206 { 207 pteval_t v = native_pte_val(pte); 208 209 return native_make_pte(v & ~clear); 210 } 211 212 static inline pte_t pte_mkclean(pte_t pte) 213 { 214 return pte_clear_flags(pte, _PAGE_DIRTY); 215 } 216 217 static inline pte_t pte_mkold(pte_t pte) 218 { 219 return pte_clear_flags(pte, _PAGE_ACCESSED); 220 } 221 222 static inline pte_t pte_wrprotect(pte_t pte) 223 { 224 return pte_clear_flags(pte, _PAGE_RW); 225 } 226 227 static inline pte_t pte_mkexec(pte_t pte) 228 { 229 return pte_clear_flags(pte, _PAGE_NX); 230 } 231 232 static inline pte_t pte_mkdirty(pte_t pte) 233 { 234 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 235 } 236 237 static inline pte_t pte_mkyoung(pte_t pte) 238 { 239 return pte_set_flags(pte, _PAGE_ACCESSED); 240 } 241 242 static inline pte_t pte_mkwrite(pte_t pte) 243 { 244 return pte_set_flags(pte, _PAGE_RW); 245 } 246 247 static inline pte_t pte_mkhuge(pte_t pte) 248 { 249 return pte_set_flags(pte, _PAGE_PSE); 250 } 251 252 static inline pte_t pte_clrhuge(pte_t pte) 253 { 254 return pte_clear_flags(pte, _PAGE_PSE); 255 } 256 257 static inline pte_t pte_mkglobal(pte_t pte) 258 { 259 return pte_set_flags(pte, _PAGE_GLOBAL); 260 } 261 262 static inline pte_t pte_clrglobal(pte_t pte) 263 { 264 return pte_clear_flags(pte, _PAGE_GLOBAL); 265 } 266 267 static inline pte_t pte_mkspecial(pte_t pte) 268 { 269 return pte_set_flags(pte, _PAGE_SPECIAL); 270 } 271 272 static inline pte_t pte_mkdevmap(pte_t pte) 273 { 274 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); 275 } 276 277 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) 278 { 279 pmdval_t v = native_pmd_val(pmd); 280 281 return __pmd(v | set); 282 } 283 284 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) 285 { 286 pmdval_t v = native_pmd_val(pmd); 287 288 return __pmd(v & ~clear); 289 } 290 291 static inline pmd_t pmd_mkold(pmd_t pmd) 292 { 293 return pmd_clear_flags(pmd, _PAGE_ACCESSED); 294 } 295 296 static inline pmd_t pmd_mkclean(pmd_t pmd) 297 { 298 return pmd_clear_flags(pmd, _PAGE_DIRTY); 299 } 300 301 static inline pmd_t pmd_wrprotect(pmd_t pmd) 302 { 303 return pmd_clear_flags(pmd, _PAGE_RW); 304 } 305 306 static inline pmd_t pmd_mkdirty(pmd_t pmd) 307 { 308 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 309 } 310 311 static inline pmd_t pmd_mkdevmap(pmd_t pmd) 312 { 313 return pmd_set_flags(pmd, _PAGE_DEVMAP); 314 } 315 316 static inline pmd_t pmd_mkhuge(pmd_t pmd) 317 { 318 return pmd_set_flags(pmd, _PAGE_PSE); 319 } 320 321 static inline pmd_t pmd_mkyoung(pmd_t pmd) 322 { 323 return pmd_set_flags(pmd, _PAGE_ACCESSED); 324 } 325 326 static inline pmd_t pmd_mkwrite(pmd_t pmd) 327 { 328 return pmd_set_flags(pmd, _PAGE_RW); 329 } 330 331 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 332 { 333 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); 334 } 335 336 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 337 static inline int pte_soft_dirty(pte_t pte) 338 { 339 return pte_flags(pte) & _PAGE_SOFT_DIRTY; 340 } 341 342 static inline int pmd_soft_dirty(pmd_t pmd) 343 { 344 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; 345 } 346 347 static inline pte_t pte_mksoft_dirty(pte_t pte) 348 { 349 return pte_set_flags(pte, _PAGE_SOFT_DIRTY); 350 } 351 352 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 353 { 354 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 355 } 356 357 static inline pte_t pte_clear_soft_dirty(pte_t pte) 358 { 359 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); 360 } 361 362 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 363 { 364 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); 365 } 366 367 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 368 369 /* 370 * Mask out unsupported bits in a present pgprot. Non-present pgprots 371 * can use those bits for other purposes, so leave them be. 372 */ 373 static inline pgprotval_t massage_pgprot(pgprot_t pgprot) 374 { 375 pgprotval_t protval = pgprot_val(pgprot); 376 377 if (protval & _PAGE_PRESENT) 378 protval &= __supported_pte_mask; 379 380 return protval; 381 } 382 383 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 384 { 385 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | 386 massage_pgprot(pgprot)); 387 } 388 389 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 390 { 391 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | 392 massage_pgprot(pgprot)); 393 } 394 395 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 396 { 397 pteval_t val = pte_val(pte); 398 399 /* 400 * Chop off the NX bit (if present), and add the NX portion of 401 * the newprot (if present): 402 */ 403 val &= _PAGE_CHG_MASK; 404 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; 405 406 return __pte(val); 407 } 408 409 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 410 { 411 pmdval_t val = pmd_val(pmd); 412 413 val &= _HPAGE_CHG_MASK; 414 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; 415 416 return __pmd(val); 417 } 418 419 /* mprotect needs to preserve PAT bits when updating vm_page_prot */ 420 #define pgprot_modify pgprot_modify 421 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 422 { 423 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; 424 pgprotval_t addbits = pgprot_val(newprot); 425 return __pgprot(preservebits | addbits); 426 } 427 428 #define pte_pgprot(x) __pgprot(pte_flags(x)) 429 #define pmd_pgprot(x) __pgprot(pmd_flags(x)) 430 #define pud_pgprot(x) __pgprot(pud_flags(x)) 431 432 #define canon_pgprot(p) __pgprot(massage_pgprot(p)) 433 434 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, 435 enum page_cache_mode pcm, 436 enum page_cache_mode new_pcm) 437 { 438 /* 439 * PAT type is always WB for untracked ranges, so no need to check. 440 */ 441 if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) 442 return 1; 443 444 /* 445 * Certain new memtypes are not allowed with certain 446 * requested memtype: 447 * - request is uncached, return cannot be write-back 448 * - request is write-combine, return cannot be write-back 449 * - request is write-through, return cannot be write-back 450 * - request is write-through, return cannot be write-combine 451 */ 452 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && 453 new_pcm == _PAGE_CACHE_MODE_WB) || 454 (pcm == _PAGE_CACHE_MODE_WC && 455 new_pcm == _PAGE_CACHE_MODE_WB) || 456 (pcm == _PAGE_CACHE_MODE_WT && 457 new_pcm == _PAGE_CACHE_MODE_WB) || 458 (pcm == _PAGE_CACHE_MODE_WT && 459 new_pcm == _PAGE_CACHE_MODE_WC)) { 460 return 0; 461 } 462 463 return 1; 464 } 465 466 pmd_t *populate_extra_pmd(unsigned long vaddr); 467 pte_t *populate_extra_pte(unsigned long vaddr); 468 #endif /* __ASSEMBLY__ */ 469 470 #ifdef CONFIG_X86_32 471 # include <asm/pgtable_32.h> 472 #else 473 # include <asm/pgtable_64.h> 474 #endif 475 476 #ifndef __ASSEMBLY__ 477 #include <linux/mm_types.h> 478 #include <linux/mmdebug.h> 479 #include <linux/log2.h> 480 481 static inline int pte_none(pte_t pte) 482 { 483 return !pte.pte; 484 } 485 486 #define __HAVE_ARCH_PTE_SAME 487 static inline int pte_same(pte_t a, pte_t b) 488 { 489 return a.pte == b.pte; 490 } 491 492 static inline int pte_present(pte_t a) 493 { 494 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 495 } 496 497 #ifdef __HAVE_ARCH_PTE_DEVMAP 498 static inline int pte_devmap(pte_t a) 499 { 500 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; 501 } 502 #endif 503 504 #define pte_accessible pte_accessible 505 static inline bool pte_accessible(struct mm_struct *mm, pte_t a) 506 { 507 if (pte_flags(a) & _PAGE_PRESENT) 508 return true; 509 510 if ((pte_flags(a) & _PAGE_PROTNONE) && 511 mm_tlb_flush_pending(mm)) 512 return true; 513 514 return false; 515 } 516 517 static inline int pte_hidden(pte_t pte) 518 { 519 return pte_flags(pte) & _PAGE_HIDDEN; 520 } 521 522 static inline int pmd_present(pmd_t pmd) 523 { 524 /* 525 * Checking for _PAGE_PSE is needed too because 526 * split_huge_page will temporarily clear the present bit (but 527 * the _PAGE_PSE flag will remain set at all times while the 528 * _PAGE_PRESENT bit is clear). 529 */ 530 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); 531 } 532 533 #ifdef CONFIG_NUMA_BALANCING 534 /* 535 * These work without NUMA balancing but the kernel does not care. See the 536 * comment in include/asm-generic/pgtable.h 537 */ 538 static inline int pte_protnone(pte_t pte) 539 { 540 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) 541 == _PAGE_PROTNONE; 542 } 543 544 static inline int pmd_protnone(pmd_t pmd) 545 { 546 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) 547 == _PAGE_PROTNONE; 548 } 549 #endif /* CONFIG_NUMA_BALANCING */ 550 551 static inline int pmd_none(pmd_t pmd) 552 { 553 /* Only check low word on 32-bit platforms, since it might be 554 out of sync with upper half. */ 555 return (unsigned long)native_pmd_val(pmd) == 0; 556 } 557 558 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 559 { 560 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); 561 } 562 563 /* 564 * Currently stuck as a macro due to indirect forward reference to 565 * linux/mmzone.h's __section_mem_map_addr() definition: 566 */ 567 #define pmd_page(pmd) \ 568 pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT) 569 570 /* 571 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 572 * 573 * this macro returns the index of the entry in the pmd page which would 574 * control the given virtual address 575 */ 576 static inline unsigned long pmd_index(unsigned long address) 577 { 578 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 579 } 580 581 /* 582 * Conversion functions: convert a page and protection to a page entry, 583 * and a page entry and page directory to the page they refer to. 584 * 585 * (Currently stuck as a macro because of indirect forward reference 586 * to linux/mm.h:page_to_nid()) 587 */ 588 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 589 590 /* 591 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 592 * 593 * this function returns the index of the entry in the pte page which would 594 * control the given virtual address 595 */ 596 static inline unsigned long pte_index(unsigned long address) 597 { 598 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 599 } 600 601 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) 602 { 603 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 604 } 605 606 static inline int pmd_bad(pmd_t pmd) 607 { 608 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; 609 } 610 611 static inline unsigned long pages_to_mb(unsigned long npg) 612 { 613 return npg >> (20 - PAGE_SHIFT); 614 } 615 616 #if CONFIG_PGTABLE_LEVELS > 2 617 static inline int pud_none(pud_t pud) 618 { 619 return native_pud_val(pud) == 0; 620 } 621 622 static inline int pud_present(pud_t pud) 623 { 624 return pud_flags(pud) & _PAGE_PRESENT; 625 } 626 627 static inline unsigned long pud_page_vaddr(pud_t pud) 628 { 629 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); 630 } 631 632 /* 633 * Currently stuck as a macro due to indirect forward reference to 634 * linux/mmzone.h's __section_mem_map_addr() definition: 635 */ 636 #define pud_page(pud) \ 637 pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT) 638 639 /* Find an entry in the second-level page table.. */ 640 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 641 { 642 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); 643 } 644 645 static inline int pud_large(pud_t pud) 646 { 647 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == 648 (_PAGE_PSE | _PAGE_PRESENT); 649 } 650 651 static inline int pud_bad(pud_t pud) 652 { 653 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 654 } 655 #else 656 static inline int pud_large(pud_t pud) 657 { 658 return 0; 659 } 660 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 661 662 #if CONFIG_PGTABLE_LEVELS > 3 663 static inline int pgd_present(pgd_t pgd) 664 { 665 return pgd_flags(pgd) & _PAGE_PRESENT; 666 } 667 668 static inline unsigned long pgd_page_vaddr(pgd_t pgd) 669 { 670 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); 671 } 672 673 /* 674 * Currently stuck as a macro due to indirect forward reference to 675 * linux/mmzone.h's __section_mem_map_addr() definition: 676 */ 677 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 678 679 /* to find an entry in a page-table-directory. */ 680 static inline unsigned long pud_index(unsigned long address) 681 { 682 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 683 } 684 685 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 686 { 687 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); 688 } 689 690 static inline int pgd_bad(pgd_t pgd) 691 { 692 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; 693 } 694 695 static inline int pgd_none(pgd_t pgd) 696 { 697 return !native_pgd_val(pgd); 698 } 699 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 700 701 #endif /* __ASSEMBLY__ */ 702 703 /* 704 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 705 * 706 * this macro returns the index of the entry in the pgd page which would 707 * control the given virtual address 708 */ 709 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 710 711 /* 712 * pgd_offset() returns a (pgd_t *) 713 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 714 */ 715 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) 716 /* 717 * a shortcut which implies the use of the kernel's pgd, instead 718 * of a process's 719 */ 720 #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 721 722 723 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 724 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 725 726 #ifndef __ASSEMBLY__ 727 728 extern int direct_gbpages; 729 void init_mem_mapping(void); 730 void early_alloc_pgt_buf(void); 731 732 /* local pte updates need not use xchg for locking */ 733 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 734 { 735 pte_t res = *ptep; 736 737 /* Pure native function needs no input for mm, addr */ 738 native_pte_clear(NULL, 0, ptep); 739 return res; 740 } 741 742 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) 743 { 744 pmd_t res = *pmdp; 745 746 native_pmd_clear(pmdp); 747 return res; 748 } 749 750 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, 751 pte_t *ptep , pte_t pte) 752 { 753 native_set_pte(ptep, pte); 754 } 755 756 static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, 757 pmd_t *pmdp , pmd_t pmd) 758 { 759 native_set_pmd(pmdp, pmd); 760 } 761 762 #ifndef CONFIG_PARAVIRT 763 /* 764 * Rules for using pte_update - it must be called after any PTE update which 765 * has not been done using the set_pte / clear_pte interfaces. It is used by 766 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE 767 * updates should either be sets, clears, or set_pte_atomic for P->P 768 * transitions, which means this hook should only be called for user PTEs. 769 * This hook implies a P->P protection or access change has taken place, which 770 * requires a subsequent TLB flush. 771 */ 772 #define pte_update(mm, addr, ptep) do { } while (0) 773 #endif 774 775 /* 776 * We only update the dirty/accessed state if we set 777 * the dirty bit by hand in the kernel, since the hardware 778 * will do the accessed bit for us, and we don't want to 779 * race with other CPU's that might be updating the dirty 780 * bit at the same time. 781 */ 782 struct vm_area_struct; 783 784 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 785 extern int ptep_set_access_flags(struct vm_area_struct *vma, 786 unsigned long address, pte_t *ptep, 787 pte_t entry, int dirty); 788 789 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 790 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, 791 unsigned long addr, pte_t *ptep); 792 793 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 794 extern int ptep_clear_flush_young(struct vm_area_struct *vma, 795 unsigned long address, pte_t *ptep); 796 797 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 798 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 799 pte_t *ptep) 800 { 801 pte_t pte = native_ptep_get_and_clear(ptep); 802 pte_update(mm, addr, ptep); 803 return pte; 804 } 805 806 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 807 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 808 unsigned long addr, pte_t *ptep, 809 int full) 810 { 811 pte_t pte; 812 if (full) { 813 /* 814 * Full address destruction in progress; paravirt does not 815 * care about updates and native needs no locking 816 */ 817 pte = native_local_ptep_get_and_clear(ptep); 818 } else { 819 pte = ptep_get_and_clear(mm, addr, ptep); 820 } 821 return pte; 822 } 823 824 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 825 static inline void ptep_set_wrprotect(struct mm_struct *mm, 826 unsigned long addr, pte_t *ptep) 827 { 828 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 829 pte_update(mm, addr, ptep); 830 } 831 832 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) 833 834 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 835 836 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 837 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 838 unsigned long address, pmd_t *pmdp, 839 pmd_t entry, int dirty); 840 841 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 842 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 843 unsigned long addr, pmd_t *pmdp); 844 845 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 846 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 847 unsigned long address, pmd_t *pmdp); 848 849 850 #define __HAVE_ARCH_PMD_WRITE 851 static inline int pmd_write(pmd_t pmd) 852 { 853 return pmd_flags(pmd) & _PAGE_RW; 854 } 855 856 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 857 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, 858 pmd_t *pmdp) 859 { 860 return native_pmdp_get_and_clear(pmdp); 861 } 862 863 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 864 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 865 unsigned long addr, pmd_t *pmdp) 866 { 867 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 868 } 869 870 /* 871 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 872 * 873 * dst - pointer to pgd range anwhere on a pgd page 874 * src - "" 875 * count - the number of pgds to copy. 876 * 877 * dst and src can be on the same page, but the range must not overlap, 878 * and must not cross a page boundary. 879 */ 880 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) 881 { 882 memcpy(dst, src, count * sizeof(pgd_t)); 883 } 884 885 #define PTE_SHIFT ilog2(PTRS_PER_PTE) 886 static inline int page_level_shift(enum pg_level level) 887 { 888 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT; 889 } 890 static inline unsigned long page_level_size(enum pg_level level) 891 { 892 return 1UL << page_level_shift(level); 893 } 894 static inline unsigned long page_level_mask(enum pg_level level) 895 { 896 return ~(page_level_size(level) - 1); 897 } 898 899 /* 900 * The x86 doesn't have any external MMU info: the kernel page 901 * tables contain all the necessary information. 902 */ 903 static inline void update_mmu_cache(struct vm_area_struct *vma, 904 unsigned long addr, pte_t *ptep) 905 { 906 } 907 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 908 unsigned long addr, pmd_t *pmd) 909 { 910 } 911 912 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 913 static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 914 { 915 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); 916 } 917 918 static inline int pte_swp_soft_dirty(pte_t pte) 919 { 920 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; 921 } 922 923 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 924 { 925 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); 926 } 927 #endif 928 929 #define PKRU_AD_BIT 0x1 930 #define PKRU_WD_BIT 0x2 931 #define PKRU_BITS_PER_PKEY 2 932 933 static inline bool __pkru_allows_read(u32 pkru, u16 pkey) 934 { 935 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; 936 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits)); 937 } 938 939 static inline bool __pkru_allows_write(u32 pkru, u16 pkey) 940 { 941 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; 942 /* 943 * Access-disable disables writes too so we need to check 944 * both bits here. 945 */ 946 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits)); 947 } 948 949 static inline u16 pte_flags_pkey(unsigned long pte_flags) 950 { 951 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 952 /* ifdef to avoid doing 59-bit shift on 32-bit values */ 953 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0; 954 #else 955 return 0; 956 #endif 957 } 958 959 #include <asm-generic/pgtable.h> 960 #endif /* __ASSEMBLY__ */ 961 962 #endif /* _ASM_X86_PGTABLE_H */ 963