1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003 Ralf Baechle 7 */ 8 #ifndef _ASM_PGTABLE_H 9 #define _ASM_PGTABLE_H 10 11 #include <linux/mm_types.h> 12 #include <linux/mmzone.h> 13 #ifdef CONFIG_32BIT 14 #include <asm/pgtable-32.h> 15 #endif 16 #ifdef CONFIG_64BIT 17 #include <asm/pgtable-64.h> 18 #endif 19 20 #include <asm/cmpxchg.h> 21 #include <asm/io.h> 22 #include <asm/pgtable-bits.h> 23 #include <asm/cpu-features.h> 24 25 struct mm_struct; 26 struct vm_area_struct; 27 28 #define PAGE_SHARED vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED) 29 30 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 31 _PAGE_GLOBAL | _page_cachable_default) 32 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 33 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 34 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 35 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 36 37 /* 38 * If _PAGE_NO_EXEC is not defined, we can't do page protection for 39 * execute, and consider it to be the same as read. Also, write 40 * permissions imply read permissions. This is the closest we can get 41 * by reasonable means.. 42 */ 43 44 extern unsigned long _page_cachable_default; 45 extern void __update_cache(unsigned long address, pte_t pte); 46 47 /* 48 * ZERO_PAGE is a global shared page that is always zero; used 49 * for zero-mapped memory areas etc.. 50 */ 51 52 extern unsigned long empty_zero_page; 53 extern unsigned long zero_page_mask; 54 55 #define ZERO_PAGE(vaddr) \ 56 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 57 #define __HAVE_COLOR_ZERO_PAGE 58 59 extern void paging_init(void); 60 61 /* 62 * Conversion functions: convert a page and protection to a page entry, 63 * and a page entry and page directory to the page they refer to. 64 */ 65 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 66 67 static inline unsigned long pmd_pfn(pmd_t pmd) 68 { 69 return pmd_val(pmd) >> PFN_PTE_SHIFT; 70 } 71 72 #ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT 73 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 74 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 75 76 #define pmd_page_vaddr(pmd) pmd_val(pmd) 77 78 #define htw_stop() \ 79 do { \ 80 unsigned long __flags; \ 81 \ 82 if (cpu_has_htw) { \ 83 local_irq_save(__flags); \ 84 if(!raw_current_cpu_data.htw_seq++) { \ 85 write_c0_pwctl(read_c0_pwctl() & \ 86 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 87 back_to_back_c0_hazard(); \ 88 } \ 89 local_irq_restore(__flags); \ 90 } \ 91 } while(0) 92 93 #define htw_start() \ 94 do { \ 95 unsigned long __flags; \ 96 \ 97 if (cpu_has_htw) { \ 98 local_irq_save(__flags); \ 99 if (!--raw_current_cpu_data.htw_seq) { \ 100 write_c0_pwctl(read_c0_pwctl() | \ 101 (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 102 back_to_back_c0_hazard(); \ 103 } \ 104 local_irq_restore(__flags); \ 105 } \ 106 } while(0) 107 108 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 109 110 #ifdef CONFIG_XPA 111 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 112 #else 113 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 114 #endif 115 116 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 117 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) 118 119 static inline void set_pte(pte_t *ptep, pte_t pte) 120 { 121 ptep->pte_high = pte.pte_high; 122 smp_wmb(); 123 ptep->pte_low = pte.pte_low; 124 125 #ifdef CONFIG_XPA 126 if (pte.pte_high & _PAGE_GLOBAL) { 127 #else 128 if (pte.pte_low & _PAGE_GLOBAL) { 129 #endif 130 pte_t *buddy = ptep_buddy(ptep); 131 /* 132 * Make sure the buddy is global too (if it's !none, 133 * it better already be global) 134 */ 135 if (pte_none(*buddy)) { 136 if (!IS_ENABLED(CONFIG_XPA)) 137 buddy->pte_low |= _PAGE_GLOBAL; 138 buddy->pte_high |= _PAGE_GLOBAL; 139 } 140 } 141 } 142 143 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 144 { 145 pte_t null = __pte(0); 146 147 htw_stop(); 148 /* Preserve global status for the pair */ 149 if (IS_ENABLED(CONFIG_XPA)) { 150 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 151 null.pte_high = _PAGE_GLOBAL; 152 } else { 153 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 154 null.pte_low = null.pte_high = _PAGE_GLOBAL; 155 } 156 157 set_pte(ptep, null); 158 htw_start(); 159 } 160 #else 161 162 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 163 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 164 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 165 166 /* 167 * Certain architectures need to do special things when pte's 168 * within a page table are directly modified. Thus, the following 169 * hook is made available. 170 */ 171 static inline void set_pte(pte_t *ptep, pte_t pteval) 172 { 173 *ptep = pteval; 174 #if !defined(CONFIG_CPU_R3K_TLB) 175 if (pte_val(pteval) & _PAGE_GLOBAL) { 176 pte_t *buddy = ptep_buddy(ptep); 177 /* 178 * Make sure the buddy is global too (if it's !none, 179 * it better already be global) 180 */ 181 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32) 182 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); 183 # else 184 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); 185 # endif 186 } 187 #endif 188 } 189 190 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 191 { 192 htw_stop(); 193 #if !defined(CONFIG_CPU_R3K_TLB) 194 /* Preserve global status for the pair */ 195 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 196 set_pte(ptep, __pte(_PAGE_GLOBAL)); 197 else 198 #endif 199 set_pte(ptep, __pte(0)); 200 htw_start(); 201 } 202 #endif 203 204 static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 205 pte_t *ptep, pte_t pte, unsigned int nr) 206 { 207 unsigned int i; 208 bool do_sync = false; 209 210 for (i = 0; i < nr; i++) { 211 if (!pte_present(pte)) 212 continue; 213 if (pte_present(ptep[i]) && 214 (pte_pfn(ptep[i]) == pte_pfn(pte))) 215 continue; 216 do_sync = true; 217 } 218 219 if (do_sync) 220 __update_cache(addr, pte); 221 222 for (;;) { 223 set_pte(ptep, pte); 224 if (--nr == 0) 225 break; 226 ptep++; 227 pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 228 } 229 } 230 #define set_ptes set_ptes 231 232 /* 233 * (pmds are folded into puds so this doesn't get actually called, 234 * but the define is needed for a generic inline function.) 235 */ 236 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 237 238 #ifndef __PAGETABLE_PMD_FOLDED 239 /* 240 * (puds are folded into pgds so this doesn't get actually called, 241 * but the define is needed for a generic inline function.) 242 */ 243 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 244 #endif 245 246 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 247 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 248 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 249 250 /* 251 * We used to declare this array with size but gcc 3.3 and older are not able 252 * to find that this expression is a constant, so the size is dropped. 253 */ 254 extern pgd_t swapper_pg_dir[]; 255 256 /* 257 * Platform specific pte_special() and pte_mkspecial() definitions 258 * are required only when ARCH_HAS_PTE_SPECIAL is enabled. 259 */ 260 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 261 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 262 static inline int pte_special(pte_t pte) 263 { 264 return pte.pte_low & _PAGE_SPECIAL; 265 } 266 267 static inline pte_t pte_mkspecial(pte_t pte) 268 { 269 pte.pte_low |= _PAGE_SPECIAL; 270 return pte; 271 } 272 #else 273 static inline int pte_special(pte_t pte) 274 { 275 return pte_val(pte) & _PAGE_SPECIAL; 276 } 277 278 static inline pte_t pte_mkspecial(pte_t pte) 279 { 280 pte_val(pte) |= _PAGE_SPECIAL; 281 return pte; 282 } 283 #endif 284 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 285 286 /* 287 * The following only work if pte_present() is true. 288 * Undefined behaviour if not.. 289 */ 290 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 291 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 292 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 293 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 294 295 static inline pte_t pte_wrprotect(pte_t pte) 296 { 297 pte.pte_low &= ~_PAGE_WRITE; 298 if (!IS_ENABLED(CONFIG_XPA)) 299 pte.pte_low &= ~_PAGE_SILENT_WRITE; 300 pte.pte_high &= ~_PAGE_SILENT_WRITE; 301 return pte; 302 } 303 304 static inline pte_t pte_mkclean(pte_t pte) 305 { 306 pte.pte_low &= ~_PAGE_MODIFIED; 307 if (!IS_ENABLED(CONFIG_XPA)) 308 pte.pte_low &= ~_PAGE_SILENT_WRITE; 309 pte.pte_high &= ~_PAGE_SILENT_WRITE; 310 return pte; 311 } 312 313 static inline pte_t pte_mkold(pte_t pte) 314 { 315 pte.pte_low &= ~_PAGE_ACCESSED; 316 if (!IS_ENABLED(CONFIG_XPA)) 317 pte.pte_low &= ~_PAGE_SILENT_READ; 318 pte.pte_high &= ~_PAGE_SILENT_READ; 319 return pte; 320 } 321 322 static inline pte_t pte_mkwrite_novma(pte_t pte) 323 { 324 pte.pte_low |= _PAGE_WRITE; 325 if (pte.pte_low & _PAGE_MODIFIED) { 326 if (!IS_ENABLED(CONFIG_XPA)) 327 pte.pte_low |= _PAGE_SILENT_WRITE; 328 pte.pte_high |= _PAGE_SILENT_WRITE; 329 } 330 return pte; 331 } 332 333 static inline pte_t pte_mkdirty(pte_t pte) 334 { 335 pte.pte_low |= _PAGE_MODIFIED; 336 if (pte.pte_low & _PAGE_WRITE) { 337 if (!IS_ENABLED(CONFIG_XPA)) 338 pte.pte_low |= _PAGE_SILENT_WRITE; 339 pte.pte_high |= _PAGE_SILENT_WRITE; 340 } 341 return pte; 342 } 343 344 static inline pte_t pte_mkyoung(pte_t pte) 345 { 346 pte.pte_low |= _PAGE_ACCESSED; 347 if (!(pte.pte_low & _PAGE_NO_READ)) { 348 if (!IS_ENABLED(CONFIG_XPA)) 349 pte.pte_low |= _PAGE_SILENT_READ; 350 pte.pte_high |= _PAGE_SILENT_READ; 351 } 352 return pte; 353 } 354 #else 355 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 356 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 357 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 358 359 static inline pte_t pte_wrprotect(pte_t pte) 360 { 361 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 362 return pte; 363 } 364 365 static inline pte_t pte_mkclean(pte_t pte) 366 { 367 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 368 return pte; 369 } 370 371 static inline pte_t pte_mkold(pte_t pte) 372 { 373 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 374 return pte; 375 } 376 377 static inline pte_t pte_mkwrite_novma(pte_t pte) 378 { 379 pte_val(pte) |= _PAGE_WRITE; 380 if (pte_val(pte) & _PAGE_MODIFIED) 381 pte_val(pte) |= _PAGE_SILENT_WRITE; 382 return pte; 383 } 384 385 static inline pte_t pte_mkdirty(pte_t pte) 386 { 387 pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 388 if (pte_val(pte) & _PAGE_WRITE) 389 pte_val(pte) |= _PAGE_SILENT_WRITE; 390 return pte; 391 } 392 393 static inline pte_t pte_mkyoung(pte_t pte) 394 { 395 pte_val(pte) |= _PAGE_ACCESSED; 396 if (!(pte_val(pte) & _PAGE_NO_READ)) 397 pte_val(pte) |= _PAGE_SILENT_READ; 398 return pte; 399 } 400 401 #define pte_sw_mkyoung pte_mkyoung 402 403 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 404 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 405 406 static inline pte_t pte_mkhuge(pte_t pte) 407 { 408 pte_val(pte) |= _PAGE_HUGE; 409 return pte; 410 } 411 412 #define pmd_write pmd_write 413 static inline int pmd_write(pmd_t pmd) 414 { 415 return !!(pmd_val(pmd) & _PAGE_WRITE); 416 } 417 418 static inline struct page *pmd_page(pmd_t pmd) 419 { 420 if (pmd_val(pmd) & _PAGE_HUGE) 421 return pfn_to_page(pmd_pfn(pmd)); 422 423 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 424 } 425 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 426 427 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 428 static inline bool pte_soft_dirty(pte_t pte) 429 { 430 return pte_val(pte) & _PAGE_SOFT_DIRTY; 431 } 432 #define pte_swp_soft_dirty pte_soft_dirty 433 434 static inline pte_t pte_mksoft_dirty(pte_t pte) 435 { 436 pte_val(pte) |= _PAGE_SOFT_DIRTY; 437 return pte; 438 } 439 #define pte_swp_mksoft_dirty pte_mksoft_dirty 440 441 static inline pte_t pte_clear_soft_dirty(pte_t pte) 442 { 443 pte_val(pte) &= ~(_PAGE_SOFT_DIRTY); 444 return pte; 445 } 446 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 447 448 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 449 450 #endif 451 452 /* 453 * Macro to make mark a page protection value as "uncacheable". Note 454 * that "protection" is really a misnomer here as the protection value 455 * contains the memory attribute bits, dirty bits, and various other 456 * bits as well. 457 */ 458 #define pgprot_noncached pgprot_noncached 459 460 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 461 { 462 unsigned long prot = pgprot_val(_prot); 463 464 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 465 466 return __pgprot(prot); 467 } 468 469 #define pgprot_writecombine pgprot_writecombine 470 471 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 472 { 473 unsigned long prot = pgprot_val(_prot); 474 475 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ 476 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; 477 478 return __pgprot(prot); 479 } 480 481 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, 482 unsigned long address, 483 pte_t *ptep) 484 { 485 } 486 487 #define __HAVE_ARCH_PTE_SAME 488 static inline int pte_same(pte_t pte_a, pte_t pte_b) 489 { 490 return pte_val(pte_a) == pte_val(pte_b); 491 } 492 493 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 494 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 495 unsigned long address, pte_t *ptep, 496 pte_t entry, int dirty) 497 { 498 if (!pte_same(*ptep, entry)) 499 set_pte(ptep, entry); 500 /* 501 * update_mmu_cache will unconditionally execute, handling both 502 * the case that the PTE changed and the spurious fault case. 503 */ 504 return true; 505 } 506 507 #if defined(CONFIG_XPA) 508 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 509 { 510 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 511 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 512 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 513 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 514 return pte; 515 } 516 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 517 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 518 { 519 pte.pte_low &= _PAGE_CHG_MASK; 520 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 521 pte.pte_low |= pgprot_val(newprot); 522 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 523 return pte; 524 } 525 #else 526 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 527 { 528 pte_val(pte) &= _PAGE_CHG_MASK; 529 pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK; 530 if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ)) 531 pte_val(pte) |= _PAGE_SILENT_READ; 532 return pte; 533 } 534 #endif 535 536 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 537 static inline bool pte_swp_exclusive(pte_t pte) 538 { 539 return pte.pte_low & _PAGE_SWP_EXCLUSIVE; 540 } 541 542 static inline pte_t pte_swp_mkexclusive(pte_t pte) 543 { 544 pte.pte_low |= _PAGE_SWP_EXCLUSIVE; 545 return pte; 546 } 547 548 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 549 { 550 pte.pte_low &= ~_PAGE_SWP_EXCLUSIVE; 551 return pte; 552 } 553 #else 554 static inline bool pte_swp_exclusive(pte_t pte) 555 { 556 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 557 } 558 559 static inline pte_t pte_swp_mkexclusive(pte_t pte) 560 { 561 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 562 return pte; 563 } 564 565 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 566 { 567 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 568 return pte; 569 } 570 #endif 571 572 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 573 pte_t pte); 574 575 static inline void update_mmu_cache_range(struct vm_fault *vmf, 576 struct vm_area_struct *vma, unsigned long address, 577 pte_t *ptep, unsigned int nr) 578 { 579 for (;;) { 580 pte_t pte = *ptep; 581 __update_tlb(vma, address, pte); 582 if (--nr == 0) 583 break; 584 ptep++; 585 address += PAGE_SIZE; 586 } 587 } 588 #define update_mmu_cache(vma, address, ptep) \ 589 update_mmu_cache_range(NULL, vma, address, ptep, 1) 590 591 #define update_mmu_tlb_range(vma, address, ptep, nr) \ 592 update_mmu_cache_range(NULL, vma, address, ptep, nr) 593 594 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 595 unsigned long address, pmd_t *pmdp) 596 { 597 pte_t pte = *(pte_t *)pmdp; 598 599 __update_tlb(vma, address, pte); 600 } 601 602 /* 603 * Allow physical addresses to be fixed up to help 36-bit peripherals. 604 */ 605 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR 606 phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size); 607 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr, 608 unsigned long pfn, unsigned long size, pgprot_t prot); 609 #define io_remap_pfn_range io_remap_pfn_range 610 #else 611 #define fixup_bigphys_addr(addr, size) (addr) 612 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */ 613 614 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 615 616 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 617 #define pmdp_establish generic_pmdp_establish 618 619 #define has_transparent_hugepage has_transparent_hugepage 620 extern int has_transparent_hugepage(void); 621 622 static inline int pmd_trans_huge(pmd_t pmd) 623 { 624 return !!(pmd_val(pmd) & _PAGE_HUGE); 625 } 626 627 static inline pmd_t pmd_mkhuge(pmd_t pmd) 628 { 629 pmd_val(pmd) |= _PAGE_HUGE; 630 631 return pmd; 632 } 633 634 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 635 pmd_t *pmdp, pmd_t pmd); 636 637 static inline pmd_t pmd_wrprotect(pmd_t pmd) 638 { 639 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 640 return pmd; 641 } 642 643 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 644 { 645 pmd_val(pmd) |= _PAGE_WRITE; 646 if (pmd_val(pmd) & _PAGE_MODIFIED) 647 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 648 649 return pmd; 650 } 651 652 #define pmd_dirty pmd_dirty 653 static inline int pmd_dirty(pmd_t pmd) 654 { 655 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 656 } 657 658 static inline pmd_t pmd_mkclean(pmd_t pmd) 659 { 660 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 661 return pmd; 662 } 663 664 static inline pmd_t pmd_mkdirty(pmd_t pmd) 665 { 666 pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 667 if (pmd_val(pmd) & _PAGE_WRITE) 668 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 669 670 return pmd; 671 } 672 673 #define pmd_young pmd_young 674 static inline int pmd_young(pmd_t pmd) 675 { 676 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 677 } 678 679 static inline pmd_t pmd_mkold(pmd_t pmd) 680 { 681 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 682 683 return pmd; 684 } 685 686 static inline pmd_t pmd_mkyoung(pmd_t pmd) 687 { 688 pmd_val(pmd) |= _PAGE_ACCESSED; 689 690 if (!(pmd_val(pmd) & _PAGE_NO_READ)) 691 pmd_val(pmd) |= _PAGE_SILENT_READ; 692 693 return pmd; 694 } 695 696 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 697 static inline int pmd_soft_dirty(pmd_t pmd) 698 { 699 return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY); 700 } 701 702 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 703 { 704 pmd_val(pmd) |= _PAGE_SOFT_DIRTY; 705 return pmd; 706 } 707 708 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 709 { 710 pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY); 711 return pmd; 712 } 713 714 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 715 716 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 717 { 718 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) | 719 (pgprot_val(newprot) & ~_PAGE_CHG_MASK); 720 return pmd; 721 } 722 723 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 724 { 725 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 726 727 return pmd; 728 } 729 730 /* 731 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 732 * different prototype. 733 */ 734 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 735 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 736 unsigned long address, pmd_t *pmdp) 737 { 738 pmd_t old = *pmdp; 739 740 pmd_clear(pmdp); 741 742 return old; 743 } 744 745 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 746 747 #ifdef _PAGE_HUGE 748 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) 749 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) 750 #endif 751 752 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases) 753 754 /* 755 * We provide our own get_unmapped area to cope with the virtual aliasing 756 * constraints placed on us by the cache architecture. 757 */ 758 #define HAVE_ARCH_UNMAPPED_AREA 759 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 760 761 #endif /* _ASM_PGTABLE_H */ 762