1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 * 5 * Derived from MIPS: 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_PGTABLE_H 10 #define _ASM_PGTABLE_H 11 12 #include <linux/compiler.h> 13 #include <asm/addrspace.h> 14 #include <asm/page.h> 15 #include <asm/pgtable-bits.h> 16 17 #if CONFIG_PGTABLE_LEVELS == 2 18 #include <asm-generic/pgtable-nopmd.h> 19 #elif CONFIG_PGTABLE_LEVELS == 3 20 #include <asm-generic/pgtable-nopud.h> 21 #else 22 #include <asm-generic/pgtable-nop4d.h> 23 #endif 24 25 #if CONFIG_PGTABLE_LEVELS == 2 26 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 27 #elif CONFIG_PGTABLE_LEVELS == 3 28 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 29 #define PMD_SIZE (1UL << PMD_SHIFT) 30 #define PMD_MASK (~(PMD_SIZE-1)) 31 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 32 #elif CONFIG_PGTABLE_LEVELS == 4 33 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 34 #define PMD_SIZE (1UL << PMD_SHIFT) 35 #define PMD_MASK (~(PMD_SIZE-1)) 36 #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 37 #define PUD_SIZE (1UL << PUD_SHIFT) 38 #define PUD_MASK (~(PUD_SIZE-1)) 39 #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) 40 #endif 41 42 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 43 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 44 45 #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) 46 47 #define PTRS_PER_PGD (PAGE_SIZE >> 3) 48 #if CONFIG_PGTABLE_LEVELS > 3 49 #define PTRS_PER_PUD (PAGE_SIZE >> 3) 50 #endif 51 #if CONFIG_PGTABLE_LEVELS > 2 52 #define PTRS_PER_PMD (PAGE_SIZE >> 3) 53 #endif 54 #define PTRS_PER_PTE (PAGE_SIZE >> 3) 55 56 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) 57 58 #ifndef __ASSEMBLY__ 59 60 #include <linux/mm_types.h> 61 #include <linux/mmzone.h> 62 #include <asm/fixmap.h> 63 #include <asm/sparsemem.h> 64 65 struct mm_struct; 66 struct vm_area_struct; 67 68 /* 69 * ZERO_PAGE is a global shared page that is always zero; used 70 * for zero-mapped memory areas etc.. 71 */ 72 73 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 74 75 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 76 77 /* 78 * TLB refill handlers may also map the vmalloc area into xkvrange. 79 * Avoid the first couple of pages so NULL pointer dereferences will 80 * still reliably trap. 81 */ 82 #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) 83 #define MODULES_END (MODULES_VADDR + SZ_256M) 84 85 #ifdef CONFIG_KFENCE 86 #define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE) 87 #else 88 #define KFENCE_AREA_SIZE 0 89 #endif 90 91 #define VMALLOC_START MODULES_END 92 93 #ifndef CONFIG_KASAN 94 #define VMALLOC_END \ 95 (vm_map_base + \ 96 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 97 #else 98 #define VMALLOC_END \ 99 (vm_map_base + \ 100 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 101 #endif 102 103 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK)) 104 #define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1) 105 106 #define KFENCE_AREA_START (VMEMMAP_END + 1) 107 #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) 108 109 #define ptep_get(ptep) READ_ONCE(*(ptep)) 110 #define pmdp_get(pmdp) READ_ONCE(*(pmdp)) 111 112 #define pte_ERROR(e) \ 113 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 114 #ifndef __PAGETABLE_PMD_FOLDED 115 #define pmd_ERROR(e) \ 116 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 117 #endif 118 #ifndef __PAGETABLE_PUD_FOLDED 119 #define pud_ERROR(e) \ 120 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 121 #endif 122 #define pgd_ERROR(e) \ 123 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 124 125 extern pte_t invalid_pte_table[PTRS_PER_PTE]; 126 127 #ifndef __PAGETABLE_PUD_FOLDED 128 129 typedef struct { unsigned long pud; } pud_t; 130 #define pud_val(x) ((x).pud) 131 #define __pud(x) ((pud_t) { (x) }) 132 133 extern pud_t invalid_pud_table[PTRS_PER_PUD]; 134 135 /* 136 * Empty pgd/p4d entries point to the invalid_pud_table. 137 */ 138 static inline int p4d_none(p4d_t p4d) 139 { 140 return p4d_val(p4d) == (unsigned long)invalid_pud_table; 141 } 142 143 static inline int p4d_bad(p4d_t p4d) 144 { 145 return p4d_val(p4d) & ~PAGE_MASK; 146 } 147 148 static inline int p4d_present(p4d_t p4d) 149 { 150 return p4d_val(p4d) != (unsigned long)invalid_pud_table; 151 } 152 153 static inline pud_t *p4d_pgtable(p4d_t p4d) 154 { 155 return (pud_t *)p4d_val(p4d); 156 } 157 158 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) 159 { 160 WRITE_ONCE(*p4d, p4dval); 161 } 162 163 static inline void p4d_clear(p4d_t *p4dp) 164 { 165 set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table)); 166 } 167 168 #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) 169 #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) 170 171 #endif 172 173 #ifndef __PAGETABLE_PMD_FOLDED 174 175 typedef struct { unsigned long pmd; } pmd_t; 176 #define pmd_val(x) ((x).pmd) 177 #define __pmd(x) ((pmd_t) { (x) }) 178 179 extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; 180 181 /* 182 * Empty pud entries point to the invalid_pmd_table. 183 */ 184 static inline int pud_none(pud_t pud) 185 { 186 return pud_val(pud) == (unsigned long)invalid_pmd_table; 187 } 188 189 static inline int pud_bad(pud_t pud) 190 { 191 return pud_val(pud) & ~PAGE_MASK; 192 } 193 194 static inline int pud_present(pud_t pud) 195 { 196 return pud_val(pud) != (unsigned long)invalid_pmd_table; 197 } 198 199 static inline pmd_t *pud_pgtable(pud_t pud) 200 { 201 return (pmd_t *)pud_val(pud); 202 } 203 204 static inline void set_pud(pud_t *pud, pud_t pudval) 205 { 206 WRITE_ONCE(*pud, pudval); 207 } 208 209 static inline void pud_clear(pud_t *pudp) 210 { 211 set_pud(pudp, __pud((unsigned long)invalid_pmd_table)); 212 } 213 214 #define pud_phys(pud) PHYSADDR(pud_val(pud)) 215 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) 216 217 #endif 218 219 /* 220 * Empty pmd entries point to the invalid_pte_table. 221 */ 222 static inline int pmd_none(pmd_t pmd) 223 { 224 return pmd_val(pmd) == (unsigned long)invalid_pte_table; 225 } 226 227 static inline int pmd_bad(pmd_t pmd) 228 { 229 return (pmd_val(pmd) & ~PAGE_MASK); 230 } 231 232 static inline int pmd_present(pmd_t pmd) 233 { 234 if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) 235 return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID)); 236 237 return pmd_val(pmd) != (unsigned long)invalid_pte_table; 238 } 239 240 static inline void set_pmd(pmd_t *pmd, pmd_t pmdval) 241 { 242 WRITE_ONCE(*pmd, pmdval); 243 } 244 245 static inline void pmd_clear(pmd_t *pmdp) 246 { 247 set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table)); 248 } 249 250 #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) 251 252 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 253 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 254 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 255 256 #define pmd_page_vaddr(pmd) pmd_val(pmd) 257 258 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 259 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); 260 261 #define pte_page(x) pfn_to_page(pte_pfn(x)) 262 #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT)) 263 #define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) 264 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) 265 266 /* 267 * Initialize a new pgd / pud / pmd table with invalid pointers. 268 */ 269 extern void pgd_init(void *addr); 270 extern void pud_init(void *addr); 271 extern void pmd_init(void *addr); 272 273 /* 274 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 275 * are !pte_none() && !pte_present(). 276 * 277 * Format of swap PTEs: 278 * 279 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 280 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 281 * <--------------------------- offset --------------------------- 282 * 283 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 284 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 285 * --------------> E <--- type ---> <---------- zeroes ----------> 286 * 287 * E is the exclusive marker that is not stored in swap entries. 288 * The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE. 289 */ 290 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 291 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; } 292 293 #define __swp_type(x) (((x).val >> 16) & 0x7f) 294 #define __swp_offset(x) ((x).val >> 24) 295 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) 296 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 297 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 298 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 299 #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) 300 301 static inline int pte_swp_exclusive(pte_t pte) 302 { 303 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 304 } 305 306 static inline pte_t pte_swp_mkexclusive(pte_t pte) 307 { 308 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 309 return pte; 310 } 311 312 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 313 { 314 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 315 return pte; 316 } 317 318 extern void paging_init(void); 319 320 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 321 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 322 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 323 324 static inline void set_pte(pte_t *ptep, pte_t pteval) 325 { 326 WRITE_ONCE(*ptep, pteval); 327 328 if (pte_val(pteval) & _PAGE_GLOBAL) { 329 pte_t *buddy = ptep_buddy(ptep); 330 /* 331 * Make sure the buddy is global too (if it's !none, 332 * it better already be global) 333 */ 334 if (pte_none(ptep_get(buddy))) { 335 #ifdef CONFIG_SMP 336 /* 337 * For SMP, multiple CPUs can race, so we need 338 * to do this atomically. 339 */ 340 __asm__ __volatile__( 341 __AMOR "$zero, %[global], %[buddy] \n" 342 : [buddy] "+ZB" (buddy->pte) 343 : [global] "r" (_PAGE_GLOBAL) 344 : "memory"); 345 346 DBAR(0b11000); /* o_wrw = 0b11000 */ 347 #else /* !CONFIG_SMP */ 348 WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); 349 #endif /* CONFIG_SMP */ 350 } 351 } 352 } 353 354 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 355 { 356 /* Preserve global status for the pair */ 357 if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL) 358 set_pte(ptep, __pte(_PAGE_GLOBAL)); 359 else 360 set_pte(ptep, __pte(0)); 361 } 362 363 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 364 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 365 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 366 367 extern pgd_t swapper_pg_dir[]; 368 extern pgd_t invalid_pg_dir[]; 369 370 /* 371 * The following only work if pte_present() is true. 372 * Undefined behaviour if not.. 373 */ 374 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 375 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 376 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); } 377 378 static inline pte_t pte_mkold(pte_t pte) 379 { 380 pte_val(pte) &= ~_PAGE_ACCESSED; 381 return pte; 382 } 383 384 static inline pte_t pte_mkyoung(pte_t pte) 385 { 386 pte_val(pte) |= _PAGE_ACCESSED; 387 return pte; 388 } 389 390 static inline pte_t pte_mkclean(pte_t pte) 391 { 392 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 393 return pte; 394 } 395 396 static inline pte_t pte_mkdirty(pte_t pte) 397 { 398 pte_val(pte) |= _PAGE_MODIFIED; 399 if (pte_val(pte) & _PAGE_WRITE) 400 pte_val(pte) |= _PAGE_DIRTY; 401 return pte; 402 } 403 404 static inline pte_t pte_mkwrite_novma(pte_t pte) 405 { 406 pte_val(pte) |= _PAGE_WRITE; 407 if (pte_val(pte) & _PAGE_MODIFIED) 408 pte_val(pte) |= _PAGE_DIRTY; 409 return pte; 410 } 411 412 static inline pte_t pte_wrprotect(pte_t pte) 413 { 414 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 415 return pte; 416 } 417 418 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 419 420 static inline pte_t pte_mkhuge(pte_t pte) 421 { 422 pte_val(pte) |= _PAGE_HUGE; 423 return pte; 424 } 425 426 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 427 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 428 static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } 429 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 430 431 static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); } 432 static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; } 433 434 #define pte_accessible pte_accessible 435 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 436 { 437 if (pte_val(a) & _PAGE_PRESENT) 438 return true; 439 440 if ((pte_val(a) & _PAGE_PROTNONE) && 441 atomic_read(&mm->tlb_flush_pending)) 442 return true; 443 444 return false; 445 } 446 447 /* 448 * Conversion functions: convert a page and protection to a page entry, 449 * and a page entry and page directory to the page they refer to. 450 */ 451 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 452 453 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 454 { 455 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | 456 (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); 457 } 458 459 extern void __update_tlb(struct vm_area_struct *vma, 460 unsigned long address, pte_t *ptep); 461 462 static inline void update_mmu_cache_range(struct vm_fault *vmf, 463 struct vm_area_struct *vma, unsigned long address, 464 pte_t *ptep, unsigned int nr) 465 { 466 for (;;) { 467 __update_tlb(vma, address, ptep); 468 if (--nr == 0) 469 break; 470 address += PAGE_SIZE; 471 ptep++; 472 } 473 } 474 #define update_mmu_cache(vma, addr, ptep) \ 475 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 476 477 #define update_mmu_tlb_range(vma, addr, ptep, nr) \ 478 update_mmu_cache_range(NULL, vma, addr, ptep, nr) 479 480 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 481 unsigned long address, pmd_t *pmdp) 482 { 483 __update_tlb(vma, address, (pte_t *)pmdp); 484 } 485 486 static inline unsigned long pmd_pfn(pmd_t pmd) 487 { 488 return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT; 489 } 490 491 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 492 493 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 494 #define pmdp_establish generic_pmdp_establish 495 496 static inline int pmd_trans_huge(pmd_t pmd) 497 { 498 return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd); 499 } 500 501 static inline pmd_t pmd_mkhuge(pmd_t pmd) 502 { 503 pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) | 504 ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); 505 pmd_val(pmd) |= _PAGE_HUGE; 506 507 return pmd; 508 } 509 510 #define pmd_write pmd_write 511 static inline int pmd_write(pmd_t pmd) 512 { 513 return !!(pmd_val(pmd) & _PAGE_WRITE); 514 } 515 516 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 517 { 518 pmd_val(pmd) |= _PAGE_WRITE; 519 if (pmd_val(pmd) & _PAGE_MODIFIED) 520 pmd_val(pmd) |= _PAGE_DIRTY; 521 return pmd; 522 } 523 524 static inline pmd_t pmd_wrprotect(pmd_t pmd) 525 { 526 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 527 return pmd; 528 } 529 530 #define pmd_dirty pmd_dirty 531 static inline int pmd_dirty(pmd_t pmd) 532 { 533 return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED)); 534 } 535 536 static inline pmd_t pmd_mkclean(pmd_t pmd) 537 { 538 pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 539 return pmd; 540 } 541 542 static inline pmd_t pmd_mkdirty(pmd_t pmd) 543 { 544 pmd_val(pmd) |= _PAGE_MODIFIED; 545 if (pmd_val(pmd) & _PAGE_WRITE) 546 pmd_val(pmd) |= _PAGE_DIRTY; 547 return pmd; 548 } 549 550 #define pmd_young pmd_young 551 static inline int pmd_young(pmd_t pmd) 552 { 553 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 554 } 555 556 static inline pmd_t pmd_mkold(pmd_t pmd) 557 { 558 pmd_val(pmd) &= ~_PAGE_ACCESSED; 559 return pmd; 560 } 561 562 static inline pmd_t pmd_mkyoung(pmd_t pmd) 563 { 564 pmd_val(pmd) |= _PAGE_ACCESSED; 565 return pmd; 566 } 567 568 static inline int pmd_devmap(pmd_t pmd) 569 { 570 return !!(pmd_val(pmd) & _PAGE_DEVMAP); 571 } 572 573 static inline pmd_t pmd_mkdevmap(pmd_t pmd) 574 { 575 pmd_val(pmd) |= _PAGE_DEVMAP; 576 return pmd; 577 } 578 579 static inline struct page *pmd_page(pmd_t pmd) 580 { 581 if (pmd_trans_huge(pmd)) 582 return pfn_to_page(pmd_pfn(pmd)); 583 584 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 585 } 586 587 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 588 { 589 pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) | 590 (pgprot_val(newprot) & ~_HPAGE_CHG_MASK); 591 return pmd; 592 } 593 594 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 595 { 596 pmd_val(pmd) |= _PAGE_PRESENT_INVALID; 597 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE); 598 599 return pmd; 600 } 601 602 /* 603 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 604 * different prototype. 605 */ 606 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 607 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 608 unsigned long address, pmd_t *pmdp) 609 { 610 pmd_t old = pmdp_get(pmdp); 611 612 pmd_clear(pmdp); 613 614 return old; 615 } 616 617 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 618 619 #ifdef CONFIG_NUMA_BALANCING 620 static inline long pte_protnone(pte_t pte) 621 { 622 return (pte_val(pte) & _PAGE_PROTNONE); 623 } 624 625 static inline long pmd_protnone(pmd_t pmd) 626 { 627 return (pmd_val(pmd) & _PAGE_PROTNONE); 628 } 629 #endif /* CONFIG_NUMA_BALANCING */ 630 631 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) 632 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) 633 634 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 635 #define pud_devmap(pud) (0) 636 #define pgd_devmap(pgd) (0) 637 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 638 639 /* 640 * We provide our own get_unmapped area to cope with the virtual aliasing 641 * constraints placed on us by the cache architecture. 642 */ 643 #define HAVE_ARCH_UNMAPPED_AREA 644 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 645 646 #endif /* !__ASSEMBLY__ */ 647 648 #endif /* _ASM_PGTABLE_H */ 649