1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003 Ralf Baechle 7 */ 8 #ifndef _ASM_PGTABLE_H 9 #define _ASM_PGTABLE_H 10 11 #include <linux/mm_types.h> 12 #include <linux/mmzone.h> 13 #ifdef CONFIG_32BIT 14 #include <asm/pgtable-32.h> 15 #endif 16 #ifdef CONFIG_64BIT 17 #include <asm/pgtable-64.h> 18 #endif 19 20 #include <asm/cmpxchg.h> 21 #include <asm/io.h> 22 #include <asm/pgtable-bits.h> 23 #include <asm/cpu-features.h> 24 25 struct mm_struct; 26 struct vm_area_struct; 27 28 #define PAGE_SHARED vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED) 29 30 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 31 _PAGE_GLOBAL | _page_cachable_default) 32 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 33 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 34 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 35 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 36 37 /* 38 * If _PAGE_NO_EXEC is not defined, we can't do page protection for 39 * execute, and consider it to be the same as read. Also, write 40 * permissions imply read permissions. This is the closest we can get 41 * by reasonable means.. 42 */ 43 44 extern unsigned long _page_cachable_default; 45 extern void __update_cache(unsigned long address, pte_t pte); 46 47 /* 48 * ZERO_PAGE is a global shared page that is always zero; used 49 * for zero-mapped memory areas etc.. 50 */ 51 52 extern unsigned long empty_zero_page; 53 extern unsigned long zero_page_mask; 54 55 #define ZERO_PAGE(vaddr) \ 56 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 57 #define __HAVE_COLOR_ZERO_PAGE 58 59 extern void paging_init(void); 60 61 /* 62 * Conversion functions: convert a page and protection to a page entry, 63 * and a page entry and page directory to the page they refer to. 64 */ 65 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 66 67 static inline unsigned long pmd_pfn(pmd_t pmd) 68 { 69 return pmd_val(pmd) >> PFN_PTE_SHIFT; 70 } 71 72 #ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT 73 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 74 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 75 76 #define pmd_page_vaddr(pmd) pmd_val(pmd) 77 78 #define htw_stop() \ 79 do { \ 80 unsigned long __flags; \ 81 \ 82 if (cpu_has_htw) { \ 83 local_irq_save(__flags); \ 84 if(!raw_current_cpu_data.htw_seq++) { \ 85 write_c0_pwctl(read_c0_pwctl() & \ 86 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 87 back_to_back_c0_hazard(); \ 88 } \ 89 local_irq_restore(__flags); \ 90 } \ 91 } while(0) 92 93 #define htw_start() \ 94 do { \ 95 unsigned long __flags; \ 96 \ 97 if (cpu_has_htw) { \ 98 local_irq_save(__flags); \ 99 if (!--raw_current_cpu_data.htw_seq) { \ 100 write_c0_pwctl(read_c0_pwctl() | \ 101 (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 102 back_to_back_c0_hazard(); \ 103 } \ 104 local_irq_restore(__flags); \ 105 } \ 106 } while(0) 107 108 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 109 110 #ifdef CONFIG_XPA 111 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 112 #else 113 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 114 #endif 115 116 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 117 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) 118 119 static inline void set_pte(pte_t *ptep, pte_t pte) 120 { 121 ptep->pte_high = pte.pte_high; 122 smp_wmb(); 123 ptep->pte_low = pte.pte_low; 124 125 #ifdef CONFIG_XPA 126 if (pte.pte_high & _PAGE_GLOBAL) { 127 #else 128 if (pte.pte_low & _PAGE_GLOBAL) { 129 #endif 130 pte_t *buddy = ptep_buddy(ptep); 131 /* 132 * Make sure the buddy is global too (if it's !none, 133 * it better already be global) 134 */ 135 if (pte_none(*buddy)) { 136 if (!IS_ENABLED(CONFIG_XPA)) 137 buddy->pte_low |= _PAGE_GLOBAL; 138 buddy->pte_high |= _PAGE_GLOBAL; 139 } 140 } 141 } 142 143 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 144 { 145 pte_t null = __pte(0); 146 147 htw_stop(); 148 /* Preserve global status for the pair */ 149 if (IS_ENABLED(CONFIG_XPA)) { 150 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 151 null.pte_high = _PAGE_GLOBAL; 152 } else { 153 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 154 null.pte_low = null.pte_high = _PAGE_GLOBAL; 155 } 156 157 set_pte(ptep, null); 158 htw_start(); 159 } 160 #else 161 162 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 163 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 164 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 165 166 /* 167 * Certain architectures need to do special things when pte's 168 * within a page table are directly modified. Thus, the following 169 * hook is made available. 170 */ 171 static inline void set_pte(pte_t *ptep, pte_t pteval) 172 { 173 *ptep = pteval; 174 #if !defined(CONFIG_CPU_R3K_TLB) 175 if (pte_val(pteval) & _PAGE_GLOBAL) { 176 pte_t *buddy = ptep_buddy(ptep); 177 /* 178 * Make sure the buddy is global too (if it's !none, 179 * it better already be global) 180 */ 181 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32) 182 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); 183 # else 184 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); 185 # endif 186 } 187 #endif 188 } 189 190 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 191 { 192 htw_stop(); 193 #if !defined(CONFIG_CPU_R3K_TLB) 194 /* Preserve global status for the pair */ 195 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 196 set_pte(ptep, __pte(_PAGE_GLOBAL)); 197 else 198 #endif 199 set_pte(ptep, __pte(0)); 200 htw_start(); 201 } 202 #endif 203 204 static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 205 pte_t *ptep, pte_t pte, unsigned int nr) 206 { 207 unsigned int i; 208 bool do_sync = false; 209 210 for (i = 0; i < nr; i++) { 211 if (!pte_present(pte)) 212 continue; 213 if (pte_present(ptep[i]) && 214 (pte_pfn(ptep[i]) == pte_pfn(pte))) 215 continue; 216 do_sync = true; 217 } 218 219 if (do_sync) 220 __update_cache(addr, pte); 221 222 for (;;) { 223 set_pte(ptep, pte); 224 if (--nr == 0) 225 break; 226 ptep++; 227 pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 228 } 229 } 230 #define set_ptes set_ptes 231 232 /* 233 * (pmds are folded into puds so this doesn't get actually called, 234 * but the define is needed for a generic inline function.) 235 */ 236 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 237 238 #ifndef __PAGETABLE_PMD_FOLDED 239 /* 240 * (puds are folded into pgds so this doesn't get actually called, 241 * but the define is needed for a generic inline function.) 242 */ 243 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 244 #endif 245 246 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 247 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 248 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 249 250 /* 251 * We used to declare this array with size but gcc 3.3 and older are not able 252 * to find that this expression is a constant, so the size is dropped. 253 */ 254 extern pgd_t swapper_pg_dir[]; 255 256 /* 257 * Platform specific pte_special() and pte_mkspecial() definitions 258 * are required only when ARCH_HAS_PTE_SPECIAL is enabled. 259 */ 260 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 261 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 262 static inline int pte_special(pte_t pte) 263 { 264 return pte.pte_low & _PAGE_SPECIAL; 265 } 266 267 static inline pte_t pte_mkspecial(pte_t pte) 268 { 269 pte.pte_low |= _PAGE_SPECIAL; 270 return pte; 271 } 272 #else 273 static inline int pte_special(pte_t pte) 274 { 275 return pte_val(pte) & _PAGE_SPECIAL; 276 } 277 278 static inline pte_t pte_mkspecial(pte_t pte) 279 { 280 pte_val(pte) |= _PAGE_SPECIAL; 281 return pte; 282 } 283 #endif 284 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 285 286 /* 287 * The following only work if pte_present() is true. 288 * Undefined behaviour if not.. 289 */ 290 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 291 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 292 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 293 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 294 295 static inline pte_t pte_wrprotect(pte_t pte) 296 { 297 pte.pte_low &= ~_PAGE_WRITE; 298 if (!IS_ENABLED(CONFIG_XPA)) 299 pte.pte_low &= ~_PAGE_SILENT_WRITE; 300 pte.pte_high &= ~_PAGE_SILENT_WRITE; 301 return pte; 302 } 303 304 static inline pte_t pte_mkclean(pte_t pte) 305 { 306 pte.pte_low &= ~_PAGE_MODIFIED; 307 if (!IS_ENABLED(CONFIG_XPA)) 308 pte.pte_low &= ~_PAGE_SILENT_WRITE; 309 pte.pte_high &= ~_PAGE_SILENT_WRITE; 310 return pte; 311 } 312 313 static inline pte_t pte_mkold(pte_t pte) 314 { 315 pte.pte_low &= ~_PAGE_ACCESSED; 316 if (!IS_ENABLED(CONFIG_XPA)) 317 pte.pte_low &= ~_PAGE_SILENT_READ; 318 pte.pte_high &= ~_PAGE_SILENT_READ; 319 return pte; 320 } 321 322 static inline pte_t pte_mkwrite_novma(pte_t pte) 323 { 324 pte.pte_low |= _PAGE_WRITE; 325 if (pte.pte_low & _PAGE_MODIFIED) { 326 if (!IS_ENABLED(CONFIG_XPA)) 327 pte.pte_low |= _PAGE_SILENT_WRITE; 328 pte.pte_high |= _PAGE_SILENT_WRITE; 329 } 330 return pte; 331 } 332 333 static inline pte_t pte_mkdirty(pte_t pte) 334 { 335 pte.pte_low |= _PAGE_MODIFIED; 336 if (pte.pte_low & _PAGE_WRITE) { 337 if (!IS_ENABLED(CONFIG_XPA)) 338 pte.pte_low |= _PAGE_SILENT_WRITE; 339 pte.pte_high |= _PAGE_SILENT_WRITE; 340 } 341 return pte; 342 } 343 344 static inline pte_t pte_mkyoung(pte_t pte) 345 { 346 pte.pte_low |= _PAGE_ACCESSED; 347 if (!(pte.pte_low & _PAGE_NO_READ)) { 348 if (!IS_ENABLED(CONFIG_XPA)) 349 pte.pte_low |= _PAGE_SILENT_READ; 350 pte.pte_high |= _PAGE_SILENT_READ; 351 } 352 return pte; 353 } 354 #else 355 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 356 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 357 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 358 359 static inline pte_t pte_wrprotect(pte_t pte) 360 { 361 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 362 return pte; 363 } 364 365 static inline pte_t pte_mkclean(pte_t pte) 366 { 367 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 368 return pte; 369 } 370 371 static inline pte_t pte_mkold(pte_t pte) 372 { 373 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 374 return pte; 375 } 376 377 static inline pte_t pte_mkwrite_novma(pte_t pte) 378 { 379 pte_val(pte) |= _PAGE_WRITE; 380 if (pte_val(pte) & _PAGE_MODIFIED) 381 pte_val(pte) |= _PAGE_SILENT_WRITE; 382 return pte; 383 } 384 385 static inline pte_t pte_mkdirty(pte_t pte) 386 { 387 pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 388 if (pte_val(pte) & _PAGE_WRITE) 389 pte_val(pte) |= _PAGE_SILENT_WRITE; 390 return pte; 391 } 392 393 static inline pte_t pte_mkyoung(pte_t pte) 394 { 395 pte_val(pte) |= _PAGE_ACCESSED; 396 if (!(pte_val(pte) & _PAGE_NO_READ)) 397 pte_val(pte) |= _PAGE_SILENT_READ; 398 return pte; 399 } 400 401 #define pte_sw_mkyoung pte_mkyoung 402 403 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 404 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 405 406 static inline pte_t pte_mkhuge(pte_t pte) 407 { 408 pte_val(pte) |= _PAGE_HUGE; 409 return pte; 410 } 411 412 #define pmd_write pmd_write 413 static inline int pmd_write(pmd_t pmd) 414 { 415 return !!(pmd_val(pmd) & _PAGE_WRITE); 416 } 417 418 static inline struct page *pmd_page(pmd_t pmd) 419 { 420 if (pmd_val(pmd) & _PAGE_HUGE) 421 return pfn_to_page(pmd_pfn(pmd)); 422 423 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 424 } 425 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 426 427 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 428 static inline bool pte_soft_dirty(pte_t pte) 429 { 430 return pte_val(pte) & _PAGE_SOFT_DIRTY; 431 } 432 #define pte_swp_soft_dirty pte_soft_dirty 433 434 static inline pte_t pte_mksoft_dirty(pte_t pte) 435 { 436 pte_val(pte) |= _PAGE_SOFT_DIRTY; 437 return pte; 438 } 439 #define pte_swp_mksoft_dirty pte_mksoft_dirty 440 441 static inline pte_t pte_clear_soft_dirty(pte_t pte) 442 { 443 pte_val(pte) &= ~(_PAGE_SOFT_DIRTY); 444 return pte; 445 } 446 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 447 448 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 449 450 #endif 451 452 /* 453 * Macro to make mark a page protection value as "uncacheable". Note 454 * that "protection" is really a misnomer here as the protection value 455 * contains the memory attribute bits, dirty bits, and various other 456 * bits as well. 457 */ 458 #define pgprot_noncached pgprot_noncached 459 460 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 461 { 462 unsigned long prot = pgprot_val(_prot); 463 464 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 465 466 return __pgprot(prot); 467 } 468 469 #define pgprot_writecombine pgprot_writecombine 470 471 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 472 { 473 unsigned long prot = pgprot_val(_prot); 474 475 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ 476 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; 477 478 return __pgprot(prot); 479 } 480 481 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, 482 unsigned long address, 483 pte_t *ptep) 484 { 485 } 486 487 #define __HAVE_ARCH_PTE_SAME 488 static inline int pte_same(pte_t pte_a, pte_t pte_b) 489 { 490 return pte_val(pte_a) == pte_val(pte_b); 491 } 492 493 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 494 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 495 unsigned long address, pte_t *ptep, 496 pte_t entry, int dirty) 497 { 498 if (!pte_same(*ptep, entry)) 499 set_pte(ptep, entry); 500 /* 501 * update_mmu_cache will unconditionally execute, handling both 502 * the case that the PTE changed and the spurious fault case. 503 */ 504 return true; 505 } 506 507 /* 508 * Conversion functions: convert a page and protection to a page entry, 509 * and a page entry and page directory to the page they refer to. 510 */ 511 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 512 513 #if defined(CONFIG_XPA) 514 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 515 { 516 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 517 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 518 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 519 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 520 return pte; 521 } 522 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 523 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 524 { 525 pte.pte_low &= _PAGE_CHG_MASK; 526 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 527 pte.pte_low |= pgprot_val(newprot); 528 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 529 return pte; 530 } 531 #else 532 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 533 { 534 pte_val(pte) &= _PAGE_CHG_MASK; 535 pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK; 536 if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ)) 537 pte_val(pte) |= _PAGE_SILENT_READ; 538 return pte; 539 } 540 #endif 541 542 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 543 static inline int pte_swp_exclusive(pte_t pte) 544 { 545 return pte.pte_low & _PAGE_SWP_EXCLUSIVE; 546 } 547 548 static inline pte_t pte_swp_mkexclusive(pte_t pte) 549 { 550 pte.pte_low |= _PAGE_SWP_EXCLUSIVE; 551 return pte; 552 } 553 554 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 555 { 556 pte.pte_low &= ~_PAGE_SWP_EXCLUSIVE; 557 return pte; 558 } 559 #else 560 static inline int pte_swp_exclusive(pte_t pte) 561 { 562 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 563 } 564 565 static inline pte_t pte_swp_mkexclusive(pte_t pte) 566 { 567 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 568 return pte; 569 } 570 571 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 572 { 573 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 574 return pte; 575 } 576 #endif 577 578 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 579 pte_t pte); 580 581 static inline void update_mmu_cache_range(struct vm_fault *vmf, 582 struct vm_area_struct *vma, unsigned long address, 583 pte_t *ptep, unsigned int nr) 584 { 585 for (;;) { 586 pte_t pte = *ptep; 587 __update_tlb(vma, address, pte); 588 if (--nr == 0) 589 break; 590 ptep++; 591 address += PAGE_SIZE; 592 } 593 } 594 #define update_mmu_cache(vma, address, ptep) \ 595 update_mmu_cache_range(NULL, vma, address, ptep, 1) 596 597 #define update_mmu_tlb_range(vma, address, ptep, nr) \ 598 update_mmu_cache_range(NULL, vma, address, ptep, nr) 599 600 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 601 unsigned long address, pmd_t *pmdp) 602 { 603 pte_t pte = *(pte_t *)pmdp; 604 605 __update_tlb(vma, address, pte); 606 } 607 608 /* 609 * Allow physical addresses to be fixed up to help 36-bit peripherals. 610 */ 611 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR 612 phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size); 613 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr, 614 unsigned long pfn, unsigned long size, pgprot_t prot); 615 #define io_remap_pfn_range io_remap_pfn_range 616 #else 617 #define fixup_bigphys_addr(addr, size) (addr) 618 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */ 619 620 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 621 622 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 623 #define pmdp_establish generic_pmdp_establish 624 625 #define has_transparent_hugepage has_transparent_hugepage 626 extern int has_transparent_hugepage(void); 627 628 static inline int pmd_trans_huge(pmd_t pmd) 629 { 630 return !!(pmd_val(pmd) & _PAGE_HUGE); 631 } 632 633 static inline pmd_t pmd_mkhuge(pmd_t pmd) 634 { 635 pmd_val(pmd) |= _PAGE_HUGE; 636 637 return pmd; 638 } 639 640 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 641 pmd_t *pmdp, pmd_t pmd); 642 643 static inline pmd_t pmd_wrprotect(pmd_t pmd) 644 { 645 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 646 return pmd; 647 } 648 649 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 650 { 651 pmd_val(pmd) |= _PAGE_WRITE; 652 if (pmd_val(pmd) & _PAGE_MODIFIED) 653 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 654 655 return pmd; 656 } 657 658 #define pmd_dirty pmd_dirty 659 static inline int pmd_dirty(pmd_t pmd) 660 { 661 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 662 } 663 664 static inline pmd_t pmd_mkclean(pmd_t pmd) 665 { 666 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 667 return pmd; 668 } 669 670 static inline pmd_t pmd_mkdirty(pmd_t pmd) 671 { 672 pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 673 if (pmd_val(pmd) & _PAGE_WRITE) 674 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 675 676 return pmd; 677 } 678 679 #define pmd_young pmd_young 680 static inline int pmd_young(pmd_t pmd) 681 { 682 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 683 } 684 685 static inline pmd_t pmd_mkold(pmd_t pmd) 686 { 687 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 688 689 return pmd; 690 } 691 692 static inline pmd_t pmd_mkyoung(pmd_t pmd) 693 { 694 pmd_val(pmd) |= _PAGE_ACCESSED; 695 696 if (!(pmd_val(pmd) & _PAGE_NO_READ)) 697 pmd_val(pmd) |= _PAGE_SILENT_READ; 698 699 return pmd; 700 } 701 702 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 703 static inline int pmd_soft_dirty(pmd_t pmd) 704 { 705 return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY); 706 } 707 708 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 709 { 710 pmd_val(pmd) |= _PAGE_SOFT_DIRTY; 711 return pmd; 712 } 713 714 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 715 { 716 pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY); 717 return pmd; 718 } 719 720 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 721 722 /* Extern to avoid header file madness */ 723 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 724 725 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 726 { 727 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) | 728 (pgprot_val(newprot) & ~_PAGE_CHG_MASK); 729 return pmd; 730 } 731 732 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 733 { 734 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 735 736 return pmd; 737 } 738 739 /* 740 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 741 * different prototype. 742 */ 743 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 744 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 745 unsigned long address, pmd_t *pmdp) 746 { 747 pmd_t old = *pmdp; 748 749 pmd_clear(pmdp); 750 751 return old; 752 } 753 754 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 755 756 #ifdef _PAGE_HUGE 757 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) 758 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) 759 #endif 760 761 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases) 762 763 /* 764 * We provide our own get_unmapped area to cope with the virtual aliasing 765 * constraints placed on us by the cache architecture. 766 */ 767 #define HAVE_ARCH_UNMAPPED_AREA 768 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 769 770 #endif /* _ASM_PGTABLE_H */ 771