1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 * 5 * Derived from MIPS: 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_PGTABLE_H 10 #define _ASM_PGTABLE_H 11 12 #include <linux/compiler.h> 13 #include <asm/addrspace.h> 14 #include <asm/page.h> 15 #include <asm/pgtable-bits.h> 16 17 #if CONFIG_PGTABLE_LEVELS == 2 18 #include <asm-generic/pgtable-nopmd.h> 19 #elif CONFIG_PGTABLE_LEVELS == 3 20 #include <asm-generic/pgtable-nopud.h> 21 #else 22 #include <asm-generic/pgtable-nop4d.h> 23 #endif 24 25 #if CONFIG_PGTABLE_LEVELS == 2 26 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 27 #elif CONFIG_PGTABLE_LEVELS == 3 28 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 29 #define PMD_SIZE (1UL << PMD_SHIFT) 30 #define PMD_MASK (~(PMD_SIZE-1)) 31 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 32 #elif CONFIG_PGTABLE_LEVELS == 4 33 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 34 #define PMD_SIZE (1UL << PMD_SHIFT) 35 #define PMD_MASK (~(PMD_SIZE-1)) 36 #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 37 #define PUD_SIZE (1UL << PUD_SHIFT) 38 #define PUD_MASK (~(PUD_SIZE-1)) 39 #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) 40 #endif 41 42 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 43 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 44 45 #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) 46 47 #define PTRS_PER_PGD (PAGE_SIZE >> 3) 48 #if CONFIG_PGTABLE_LEVELS > 3 49 #define PTRS_PER_PUD (PAGE_SIZE >> 3) 50 #endif 51 #if CONFIG_PGTABLE_LEVELS > 2 52 #define PTRS_PER_PMD (PAGE_SIZE >> 3) 53 #endif 54 #define PTRS_PER_PTE (PAGE_SIZE >> 3) 55 56 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) 57 58 #ifndef __ASSEMBLY__ 59 60 #include <linux/mm_types.h> 61 #include <linux/mmzone.h> 62 #include <asm/fixmap.h> 63 #include <asm/sparsemem.h> 64 65 struct mm_struct; 66 struct vm_area_struct; 67 68 /* 69 * ZERO_PAGE is a global shared page that is always zero; used 70 * for zero-mapped memory areas etc.. 71 */ 72 73 extern unsigned long empty_zero_page; 74 extern unsigned long zero_page_mask; 75 76 #define ZERO_PAGE(vaddr) \ 77 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 78 #define __HAVE_COLOR_ZERO_PAGE 79 80 /* 81 * TLB refill handlers may also map the vmalloc area into xkvrange. 82 * Avoid the first couple of pages so NULL pointer dereferences will 83 * still reliably trap. 84 */ 85 #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) 86 #define MODULES_END (MODULES_VADDR + SZ_256M) 87 88 #define VMALLOC_START MODULES_END 89 #define VMALLOC_END \ 90 (vm_map_base + \ 91 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE) 92 93 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK)) 94 #define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1) 95 96 #define pte_ERROR(e) \ 97 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 98 #ifndef __PAGETABLE_PMD_FOLDED 99 #define pmd_ERROR(e) \ 100 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 101 #endif 102 #ifndef __PAGETABLE_PUD_FOLDED 103 #define pud_ERROR(e) \ 104 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 105 #endif 106 #define pgd_ERROR(e) \ 107 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 108 109 extern pte_t invalid_pte_table[PTRS_PER_PTE]; 110 111 #ifndef __PAGETABLE_PUD_FOLDED 112 113 typedef struct { unsigned long pud; } pud_t; 114 #define pud_val(x) ((x).pud) 115 #define __pud(x) ((pud_t) { (x) }) 116 117 extern pud_t invalid_pud_table[PTRS_PER_PUD]; 118 119 /* 120 * Empty pgd/p4d entries point to the invalid_pud_table. 121 */ 122 static inline int p4d_none(p4d_t p4d) 123 { 124 return p4d_val(p4d) == (unsigned long)invalid_pud_table; 125 } 126 127 static inline int p4d_bad(p4d_t p4d) 128 { 129 return p4d_val(p4d) & ~PAGE_MASK; 130 } 131 132 static inline int p4d_present(p4d_t p4d) 133 { 134 return p4d_val(p4d) != (unsigned long)invalid_pud_table; 135 } 136 137 static inline void p4d_clear(p4d_t *p4dp) 138 { 139 p4d_val(*p4dp) = (unsigned long)invalid_pud_table; 140 } 141 142 static inline pud_t *p4d_pgtable(p4d_t p4d) 143 { 144 return (pud_t *)p4d_val(p4d); 145 } 146 147 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) 148 { 149 *p4d = p4dval; 150 } 151 152 #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) 153 #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) 154 155 #endif 156 157 #ifndef __PAGETABLE_PMD_FOLDED 158 159 typedef struct { unsigned long pmd; } pmd_t; 160 #define pmd_val(x) ((x).pmd) 161 #define __pmd(x) ((pmd_t) { (x) }) 162 163 extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; 164 165 /* 166 * Empty pud entries point to the invalid_pmd_table. 167 */ 168 static inline int pud_none(pud_t pud) 169 { 170 return pud_val(pud) == (unsigned long)invalid_pmd_table; 171 } 172 173 static inline int pud_bad(pud_t pud) 174 { 175 return pud_val(pud) & ~PAGE_MASK; 176 } 177 178 static inline int pud_present(pud_t pud) 179 { 180 return pud_val(pud) != (unsigned long)invalid_pmd_table; 181 } 182 183 static inline void pud_clear(pud_t *pudp) 184 { 185 pud_val(*pudp) = ((unsigned long)invalid_pmd_table); 186 } 187 188 static inline pmd_t *pud_pgtable(pud_t pud) 189 { 190 return (pmd_t *)pud_val(pud); 191 } 192 193 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) 194 195 #define pud_phys(pud) PHYSADDR(pud_val(pud)) 196 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) 197 198 #endif 199 200 /* 201 * Empty pmd entries point to the invalid_pte_table. 202 */ 203 static inline int pmd_none(pmd_t pmd) 204 { 205 return pmd_val(pmd) == (unsigned long)invalid_pte_table; 206 } 207 208 static inline int pmd_bad(pmd_t pmd) 209 { 210 return (pmd_val(pmd) & ~PAGE_MASK); 211 } 212 213 static inline int pmd_present(pmd_t pmd) 214 { 215 if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) 216 return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID)); 217 218 return pmd_val(pmd) != (unsigned long)invalid_pte_table; 219 } 220 221 static inline void pmd_clear(pmd_t *pmdp) 222 { 223 pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); 224 } 225 226 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) 227 228 #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) 229 230 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 231 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 232 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 233 234 #define pmd_page_vaddr(pmd) pmd_val(pmd) 235 236 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 237 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); 238 239 #define pte_page(x) pfn_to_page(pte_pfn(x)) 240 #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT)) 241 #define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) 242 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) 243 244 /* 245 * Initialize a new pgd / pud / pmd table with invalid pointers. 246 */ 247 extern void pgd_init(void *addr); 248 extern void pud_init(void *addr); 249 extern void pmd_init(void *addr); 250 251 /* 252 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 253 * are !pte_none() && !pte_present(). 254 * 255 * Format of swap PTEs: 256 * 257 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 258 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 259 * <--------------------------- offset --------------------------- 260 * 261 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 262 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 263 * --------------> E <--- type ---> <---------- zeroes ----------> 264 * 265 * E is the exclusive marker that is not stored in swap entries. 266 * The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE. 267 */ 268 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 269 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; } 270 271 #define __swp_type(x) (((x).val >> 16) & 0x7f) 272 #define __swp_offset(x) ((x).val >> 24) 273 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) 274 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 275 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 276 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 277 #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) 278 279 static inline int pte_swp_exclusive(pte_t pte) 280 { 281 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 282 } 283 284 static inline pte_t pte_swp_mkexclusive(pte_t pte) 285 { 286 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 287 return pte; 288 } 289 290 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 291 { 292 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 293 return pte; 294 } 295 296 extern void paging_init(void); 297 298 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 299 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 300 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 301 302 static inline void set_pte(pte_t *ptep, pte_t pteval) 303 { 304 *ptep = pteval; 305 if (pte_val(pteval) & _PAGE_GLOBAL) { 306 pte_t *buddy = ptep_buddy(ptep); 307 /* 308 * Make sure the buddy is global too (if it's !none, 309 * it better already be global) 310 */ 311 #ifdef CONFIG_SMP 312 /* 313 * For SMP, multiple CPUs can race, so we need to do 314 * this atomically. 315 */ 316 unsigned long page_global = _PAGE_GLOBAL; 317 unsigned long tmp; 318 319 __asm__ __volatile__ ( 320 "1:" __LL "%[tmp], %[buddy] \n" 321 " bnez %[tmp], 2f \n" 322 " or %[tmp], %[tmp], %[global] \n" 323 __SC "%[tmp], %[buddy] \n" 324 " beqz %[tmp], 1b \n" 325 " nop \n" 326 "2: \n" 327 __WEAK_LLSC_MB 328 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 329 : [global] "r" (page_global)); 330 #else /* !CONFIG_SMP */ 331 if (pte_none(*buddy)) 332 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 333 #endif /* CONFIG_SMP */ 334 } 335 } 336 337 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 338 { 339 /* Preserve global status for the pair */ 340 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 341 set_pte(ptep, __pte(_PAGE_GLOBAL)); 342 else 343 set_pte(ptep, __pte(0)); 344 } 345 346 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 347 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 348 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 349 350 extern pgd_t swapper_pg_dir[]; 351 extern pgd_t invalid_pg_dir[]; 352 353 /* 354 * The following only work if pte_present() is true. 355 * Undefined behaviour if not.. 356 */ 357 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 358 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 359 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); } 360 361 static inline pte_t pte_mkold(pte_t pte) 362 { 363 pte_val(pte) &= ~_PAGE_ACCESSED; 364 return pte; 365 } 366 367 static inline pte_t pte_mkyoung(pte_t pte) 368 { 369 pte_val(pte) |= _PAGE_ACCESSED; 370 return pte; 371 } 372 373 static inline pte_t pte_mkclean(pte_t pte) 374 { 375 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 376 return pte; 377 } 378 379 static inline pte_t pte_mkdirty(pte_t pte) 380 { 381 pte_val(pte) |= _PAGE_MODIFIED; 382 if (pte_val(pte) & _PAGE_WRITE) 383 pte_val(pte) |= _PAGE_DIRTY; 384 return pte; 385 } 386 387 static inline pte_t pte_mkwrite_novma(pte_t pte) 388 { 389 pte_val(pte) |= _PAGE_WRITE; 390 if (pte_val(pte) & _PAGE_MODIFIED) 391 pte_val(pte) |= _PAGE_DIRTY; 392 return pte; 393 } 394 395 static inline pte_t pte_wrprotect(pte_t pte) 396 { 397 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 398 return pte; 399 } 400 401 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 402 403 static inline pte_t pte_mkhuge(pte_t pte) 404 { 405 pte_val(pte) |= _PAGE_HUGE; 406 return pte; 407 } 408 409 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 410 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 411 static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } 412 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 413 414 #define pte_accessible pte_accessible 415 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 416 { 417 if (pte_val(a) & _PAGE_PRESENT) 418 return true; 419 420 if ((pte_val(a) & _PAGE_PROTNONE) && 421 atomic_read(&mm->tlb_flush_pending)) 422 return true; 423 424 return false; 425 } 426 427 /* 428 * Conversion functions: convert a page and protection to a page entry, 429 * and a page entry and page directory to the page they refer to. 430 */ 431 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 432 433 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 434 { 435 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | 436 (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); 437 } 438 439 extern void __update_tlb(struct vm_area_struct *vma, 440 unsigned long address, pte_t *ptep); 441 442 static inline void update_mmu_cache_range(struct vm_fault *vmf, 443 struct vm_area_struct *vma, unsigned long address, 444 pte_t *ptep, unsigned int nr) 445 { 446 for (;;) { 447 __update_tlb(vma, address, ptep); 448 if (--nr == 0) 449 break; 450 address += PAGE_SIZE; 451 ptep++; 452 } 453 } 454 #define update_mmu_cache(vma, addr, ptep) \ 455 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 456 457 #define __HAVE_ARCH_UPDATE_MMU_TLB 458 #define update_mmu_tlb update_mmu_cache 459 460 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 461 unsigned long address, pmd_t *pmdp) 462 { 463 __update_tlb(vma, address, (pte_t *)pmdp); 464 } 465 466 static inline unsigned long pmd_pfn(pmd_t pmd) 467 { 468 return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT; 469 } 470 471 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 472 473 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 474 #define pmdp_establish generic_pmdp_establish 475 476 static inline int pmd_trans_huge(pmd_t pmd) 477 { 478 return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd); 479 } 480 481 static inline pmd_t pmd_mkhuge(pmd_t pmd) 482 { 483 pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) | 484 ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); 485 pmd_val(pmd) |= _PAGE_HUGE; 486 487 return pmd; 488 } 489 490 #define pmd_write pmd_write 491 static inline int pmd_write(pmd_t pmd) 492 { 493 return !!(pmd_val(pmd) & _PAGE_WRITE); 494 } 495 496 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 497 { 498 pmd_val(pmd) |= _PAGE_WRITE; 499 if (pmd_val(pmd) & _PAGE_MODIFIED) 500 pmd_val(pmd) |= _PAGE_DIRTY; 501 return pmd; 502 } 503 504 static inline pmd_t pmd_wrprotect(pmd_t pmd) 505 { 506 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 507 return pmd; 508 } 509 510 static inline int pmd_dirty(pmd_t pmd) 511 { 512 return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED)); 513 } 514 515 static inline pmd_t pmd_mkclean(pmd_t pmd) 516 { 517 pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 518 return pmd; 519 } 520 521 static inline pmd_t pmd_mkdirty(pmd_t pmd) 522 { 523 pmd_val(pmd) |= _PAGE_MODIFIED; 524 if (pmd_val(pmd) & _PAGE_WRITE) 525 pmd_val(pmd) |= _PAGE_DIRTY; 526 return pmd; 527 } 528 529 #define pmd_young pmd_young 530 static inline int pmd_young(pmd_t pmd) 531 { 532 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 533 } 534 535 static inline pmd_t pmd_mkold(pmd_t pmd) 536 { 537 pmd_val(pmd) &= ~_PAGE_ACCESSED; 538 return pmd; 539 } 540 541 static inline pmd_t pmd_mkyoung(pmd_t pmd) 542 { 543 pmd_val(pmd) |= _PAGE_ACCESSED; 544 return pmd; 545 } 546 547 static inline struct page *pmd_page(pmd_t pmd) 548 { 549 if (pmd_trans_huge(pmd)) 550 return pfn_to_page(pmd_pfn(pmd)); 551 552 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 553 } 554 555 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 556 { 557 pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) | 558 (pgprot_val(newprot) & ~_HPAGE_CHG_MASK); 559 return pmd; 560 } 561 562 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 563 { 564 pmd_val(pmd) |= _PAGE_PRESENT_INVALID; 565 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE); 566 567 return pmd; 568 } 569 570 /* 571 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 572 * different prototype. 573 */ 574 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 575 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 576 unsigned long address, pmd_t *pmdp) 577 { 578 pmd_t old = *pmdp; 579 580 pmd_clear(pmdp); 581 582 return old; 583 } 584 585 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 586 587 #ifdef CONFIG_NUMA_BALANCING 588 static inline long pte_protnone(pte_t pte) 589 { 590 return (pte_val(pte) & _PAGE_PROTNONE); 591 } 592 593 static inline long pmd_protnone(pmd_t pmd) 594 { 595 return (pmd_val(pmd) & _PAGE_PROTNONE); 596 } 597 #endif /* CONFIG_NUMA_BALANCING */ 598 599 /* 600 * We provide our own get_unmapped area to cope with the virtual aliasing 601 * constraints placed on us by the cache architecture. 602 */ 603 #define HAVE_ARCH_UNMAPPED_AREA 604 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 605 606 #endif /* !__ASSEMBLY__ */ 607 608 #endif /* _ASM_PGTABLE_H */ 609