1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003 Ralf Baechle 7 */ 8 #ifndef _ASM_PGTABLE_H 9 #define _ASM_PGTABLE_H 10 11 #include <linux/mm_types.h> 12 #include <linux/mmzone.h> 13 #ifdef CONFIG_32BIT 14 #include <asm/pgtable-32.h> 15 #endif 16 #ifdef CONFIG_64BIT 17 #include <asm/pgtable-64.h> 18 #endif 19 20 #include <asm/cmpxchg.h> 21 #include <asm/io.h> 22 #include <asm/pgtable-bits.h> 23 #include <asm/cpu-features.h> 24 25 struct mm_struct; 26 struct vm_area_struct; 27 28 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \ 29 _page_cachable_default) 30 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ 31 _page_cachable_default) 32 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \ 33 _page_cachable_default) 34 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | \ 35 _page_cachable_default) 36 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 37 _PAGE_GLOBAL | _page_cachable_default) 38 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 39 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 40 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ 41 _page_cachable_default) 42 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 43 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 44 45 /* 46 * If _PAGE_NO_EXEC is not defined, we can't do page protection for 47 * execute, and consider it to be the same as read. Also, write 48 * permissions imply read permissions. This is the closest we can get 49 * by reasonable means.. 50 */ 51 52 /* 53 * Dummy values to fill the table in mmap.c 54 * The real values will be generated at runtime 55 */ 56 #define __P000 __pgprot(0) 57 #define __P001 __pgprot(0) 58 #define __P010 __pgprot(0) 59 #define __P011 __pgprot(0) 60 #define __P100 __pgprot(0) 61 #define __P101 __pgprot(0) 62 #define __P110 __pgprot(0) 63 #define __P111 __pgprot(0) 64 65 #define __S000 __pgprot(0) 66 #define __S001 __pgprot(0) 67 #define __S010 __pgprot(0) 68 #define __S011 __pgprot(0) 69 #define __S100 __pgprot(0) 70 #define __S101 __pgprot(0) 71 #define __S110 __pgprot(0) 72 #define __S111 __pgprot(0) 73 74 extern unsigned long _page_cachable_default; 75 76 /* 77 * ZERO_PAGE is a global shared page that is always zero; used 78 * for zero-mapped memory areas etc.. 79 */ 80 81 extern unsigned long empty_zero_page; 82 extern unsigned long zero_page_mask; 83 84 #define ZERO_PAGE(vaddr) \ 85 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 86 #define __HAVE_COLOR_ZERO_PAGE 87 88 extern void paging_init(void); 89 90 /* 91 * Conversion functions: convert a page and protection to a page entry, 92 * and a page entry and page directory to the page they refer to. 93 */ 94 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 95 96 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 97 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 98 #define pmd_page(pmd) __pmd_page(pmd) 99 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 100 101 #define pmd_page_vaddr(pmd) pmd_val(pmd) 102 103 #define htw_stop() \ 104 do { \ 105 unsigned long flags; \ 106 \ 107 if (cpu_has_htw) { \ 108 local_irq_save(flags); \ 109 if(!raw_current_cpu_data.htw_seq++) { \ 110 write_c0_pwctl(read_c0_pwctl() & \ 111 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 112 back_to_back_c0_hazard(); \ 113 } \ 114 local_irq_restore(flags); \ 115 } \ 116 } while(0) 117 118 #define htw_start() \ 119 do { \ 120 unsigned long flags; \ 121 \ 122 if (cpu_has_htw) { \ 123 local_irq_save(flags); \ 124 if (!--raw_current_cpu_data.htw_seq) { \ 125 write_c0_pwctl(read_c0_pwctl() | \ 126 (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 127 back_to_back_c0_hazard(); \ 128 } \ 129 local_irq_restore(flags); \ 130 } \ 131 } while(0) 132 133 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 134 pte_t *ptep, pte_t pteval); 135 136 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 137 138 #ifdef CONFIG_XPA 139 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 140 #else 141 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 142 #endif 143 144 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 145 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) 146 147 static inline void set_pte(pte_t *ptep, pte_t pte) 148 { 149 ptep->pte_high = pte.pte_high; 150 smp_wmb(); 151 ptep->pte_low = pte.pte_low; 152 153 #ifdef CONFIG_XPA 154 if (pte.pte_high & _PAGE_GLOBAL) { 155 #else 156 if (pte.pte_low & _PAGE_GLOBAL) { 157 #endif 158 pte_t *buddy = ptep_buddy(ptep); 159 /* 160 * Make sure the buddy is global too (if it's !none, 161 * it better already be global) 162 */ 163 if (pte_none(*buddy)) { 164 if (!IS_ENABLED(CONFIG_XPA)) 165 buddy->pte_low |= _PAGE_GLOBAL; 166 buddy->pte_high |= _PAGE_GLOBAL; 167 } 168 } 169 } 170 171 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 172 { 173 pte_t null = __pte(0); 174 175 htw_stop(); 176 /* Preserve global status for the pair */ 177 if (IS_ENABLED(CONFIG_XPA)) { 178 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 179 null.pte_high = _PAGE_GLOBAL; 180 } else { 181 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 182 null.pte_low = null.pte_high = _PAGE_GLOBAL; 183 } 184 185 set_pte_at(mm, addr, ptep, null); 186 htw_start(); 187 } 188 #else 189 190 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 191 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 192 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 193 194 /* 195 * Certain architectures need to do special things when pte's 196 * within a page table are directly modified. Thus, the following 197 * hook is made available. 198 */ 199 static inline void set_pte(pte_t *ptep, pte_t pteval) 200 { 201 *ptep = pteval; 202 #if !defined(CONFIG_CPU_R3K_TLB) 203 if (pte_val(pteval) & _PAGE_GLOBAL) { 204 pte_t *buddy = ptep_buddy(ptep); 205 /* 206 * Make sure the buddy is global too (if it's !none, 207 * it better already be global) 208 */ 209 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32) 210 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); 211 # else 212 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); 213 # endif 214 } 215 #endif 216 } 217 218 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 219 { 220 htw_stop(); 221 #if !defined(CONFIG_CPU_R3K_TLB) 222 /* Preserve global status for the pair */ 223 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 224 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 225 else 226 #endif 227 set_pte_at(mm, addr, ptep, __pte(0)); 228 htw_start(); 229 } 230 #endif 231 232 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 233 pte_t *ptep, pte_t pteval) 234 { 235 extern void __update_cache(unsigned long address, pte_t pte); 236 237 if (!pte_present(pteval)) 238 goto cache_sync_done; 239 240 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval))) 241 goto cache_sync_done; 242 243 __update_cache(addr, pteval); 244 cache_sync_done: 245 set_pte(ptep, pteval); 246 } 247 248 /* 249 * (pmds are folded into puds so this doesn't get actually called, 250 * but the define is needed for a generic inline function.) 251 */ 252 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 253 254 #ifndef __PAGETABLE_PMD_FOLDED 255 /* 256 * (puds are folded into pgds so this doesn't get actually called, 257 * but the define is needed for a generic inline function.) 258 */ 259 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 260 #endif 261 262 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 263 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 264 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 265 266 /* 267 * We used to declare this array with size but gcc 3.3 and older are not able 268 * to find that this expression is a constant, so the size is dropped. 269 */ 270 extern pgd_t swapper_pg_dir[]; 271 272 /* 273 * Platform specific pte_special() and pte_mkspecial() definitions 274 * are required only when ARCH_HAS_PTE_SPECIAL is enabled. 275 */ 276 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 277 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 278 static inline int pte_special(pte_t pte) 279 { 280 return pte.pte_low & _PAGE_SPECIAL; 281 } 282 283 static inline pte_t pte_mkspecial(pte_t pte) 284 { 285 pte.pte_low |= _PAGE_SPECIAL; 286 return pte; 287 } 288 #else 289 static inline int pte_special(pte_t pte) 290 { 291 return pte_val(pte) & _PAGE_SPECIAL; 292 } 293 294 static inline pte_t pte_mkspecial(pte_t pte) 295 { 296 pte_val(pte) |= _PAGE_SPECIAL; 297 return pte; 298 } 299 #endif 300 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 301 302 /* 303 * The following only work if pte_present() is true. 304 * Undefined behaviour if not.. 305 */ 306 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 307 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 308 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 309 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 310 311 static inline pte_t pte_wrprotect(pte_t pte) 312 { 313 pte.pte_low &= ~_PAGE_WRITE; 314 if (!IS_ENABLED(CONFIG_XPA)) 315 pte.pte_low &= ~_PAGE_SILENT_WRITE; 316 pte.pte_high &= ~_PAGE_SILENT_WRITE; 317 return pte; 318 } 319 320 static inline pte_t pte_mkclean(pte_t pte) 321 { 322 pte.pte_low &= ~_PAGE_MODIFIED; 323 if (!IS_ENABLED(CONFIG_XPA)) 324 pte.pte_low &= ~_PAGE_SILENT_WRITE; 325 pte.pte_high &= ~_PAGE_SILENT_WRITE; 326 return pte; 327 } 328 329 static inline pte_t pte_mkold(pte_t pte) 330 { 331 pte.pte_low &= ~_PAGE_ACCESSED; 332 if (!IS_ENABLED(CONFIG_XPA)) 333 pte.pte_low &= ~_PAGE_SILENT_READ; 334 pte.pte_high &= ~_PAGE_SILENT_READ; 335 return pte; 336 } 337 338 static inline pte_t pte_mkwrite(pte_t pte) 339 { 340 pte.pte_low |= _PAGE_WRITE; 341 if (pte.pte_low & _PAGE_MODIFIED) { 342 if (!IS_ENABLED(CONFIG_XPA)) 343 pte.pte_low |= _PAGE_SILENT_WRITE; 344 pte.pte_high |= _PAGE_SILENT_WRITE; 345 } 346 return pte; 347 } 348 349 static inline pte_t pte_mkdirty(pte_t pte) 350 { 351 pte.pte_low |= _PAGE_MODIFIED; 352 if (pte.pte_low & _PAGE_WRITE) { 353 if (!IS_ENABLED(CONFIG_XPA)) 354 pte.pte_low |= _PAGE_SILENT_WRITE; 355 pte.pte_high |= _PAGE_SILENT_WRITE; 356 } 357 return pte; 358 } 359 360 static inline pte_t pte_mkyoung(pte_t pte) 361 { 362 pte.pte_low |= _PAGE_ACCESSED; 363 if (!(pte.pte_low & _PAGE_NO_READ)) { 364 if (!IS_ENABLED(CONFIG_XPA)) 365 pte.pte_low |= _PAGE_SILENT_READ; 366 pte.pte_high |= _PAGE_SILENT_READ; 367 } 368 return pte; 369 } 370 #else 371 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 372 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 373 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 374 375 static inline pte_t pte_wrprotect(pte_t pte) 376 { 377 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 378 return pte; 379 } 380 381 static inline pte_t pte_mkclean(pte_t pte) 382 { 383 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 384 return pte; 385 } 386 387 static inline pte_t pte_mkold(pte_t pte) 388 { 389 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 390 return pte; 391 } 392 393 static inline pte_t pte_mkwrite(pte_t pte) 394 { 395 pte_val(pte) |= _PAGE_WRITE; 396 if (pte_val(pte) & _PAGE_MODIFIED) 397 pte_val(pte) |= _PAGE_SILENT_WRITE; 398 return pte; 399 } 400 401 static inline pte_t pte_mkdirty(pte_t pte) 402 { 403 pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 404 if (pte_val(pte) & _PAGE_WRITE) 405 pte_val(pte) |= _PAGE_SILENT_WRITE; 406 return pte; 407 } 408 409 static inline pte_t pte_mkyoung(pte_t pte) 410 { 411 pte_val(pte) |= _PAGE_ACCESSED; 412 if (!(pte_val(pte) & _PAGE_NO_READ)) 413 pte_val(pte) |= _PAGE_SILENT_READ; 414 return pte; 415 } 416 417 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 418 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 419 420 static inline pte_t pte_mkhuge(pte_t pte) 421 { 422 pte_val(pte) |= _PAGE_HUGE; 423 return pte; 424 } 425 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 426 427 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 428 static inline bool pte_soft_dirty(pte_t pte) 429 { 430 return pte_val(pte) & _PAGE_SOFT_DIRTY; 431 } 432 #define pte_swp_soft_dirty pte_soft_dirty 433 434 static inline pte_t pte_mksoft_dirty(pte_t pte) 435 { 436 pte_val(pte) |= _PAGE_SOFT_DIRTY; 437 return pte; 438 } 439 #define pte_swp_mksoft_dirty pte_mksoft_dirty 440 441 static inline pte_t pte_clear_soft_dirty(pte_t pte) 442 { 443 pte_val(pte) &= ~(_PAGE_SOFT_DIRTY); 444 return pte; 445 } 446 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 447 448 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 449 450 #endif 451 452 /* 453 * Macro to make mark a page protection value as "uncacheable". Note 454 * that "protection" is really a misnomer here as the protection value 455 * contains the memory attribute bits, dirty bits, and various other 456 * bits as well. 457 */ 458 #define pgprot_noncached pgprot_noncached 459 460 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 461 { 462 unsigned long prot = pgprot_val(_prot); 463 464 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 465 466 return __pgprot(prot); 467 } 468 469 #define pgprot_writecombine pgprot_writecombine 470 471 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 472 { 473 unsigned long prot = pgprot_val(_prot); 474 475 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ 476 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; 477 478 return __pgprot(prot); 479 } 480 481 /* 482 * Conversion functions: convert a page and protection to a page entry, 483 * and a page entry and page directory to the page they refer to. 484 */ 485 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 486 487 #if defined(CONFIG_XPA) 488 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 489 { 490 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 491 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 492 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 493 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 494 return pte; 495 } 496 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 497 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 498 { 499 pte.pte_low &= _PAGE_CHG_MASK; 500 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 501 pte.pte_low |= pgprot_val(newprot); 502 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 503 return pte; 504 } 505 #else 506 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 507 { 508 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | 509 (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); 510 } 511 #endif 512 513 514 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 515 pte_t pte); 516 517 static inline void update_mmu_cache(struct vm_area_struct *vma, 518 unsigned long address, pte_t *ptep) 519 { 520 pte_t pte = *ptep; 521 __update_tlb(vma, address, pte); 522 } 523 524 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 525 unsigned long address, pmd_t *pmdp) 526 { 527 pte_t pte = *(pte_t *)pmdp; 528 529 __update_tlb(vma, address, pte); 530 } 531 532 #define kern_addr_valid(addr) (1) 533 534 /* 535 * Allow physical addresses to be fixed up to help 36-bit peripherals. 536 */ 537 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR 538 phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size); 539 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr, 540 unsigned long pfn, unsigned long size, pgprot_t prot); 541 #define io_remap_pfn_range io_remap_pfn_range 542 #else 543 #define fixup_bigphys_addr(addr, size) (addr) 544 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */ 545 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 547 548 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 549 #define pmdp_establish generic_pmdp_establish 550 551 #define has_transparent_hugepage has_transparent_hugepage 552 extern int has_transparent_hugepage(void); 553 554 static inline int pmd_trans_huge(pmd_t pmd) 555 { 556 return !!(pmd_val(pmd) & _PAGE_HUGE); 557 } 558 559 static inline pmd_t pmd_mkhuge(pmd_t pmd) 560 { 561 pmd_val(pmd) |= _PAGE_HUGE; 562 563 return pmd; 564 } 565 566 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 567 pmd_t *pmdp, pmd_t pmd); 568 569 #define pmd_write pmd_write 570 static inline int pmd_write(pmd_t pmd) 571 { 572 return !!(pmd_val(pmd) & _PAGE_WRITE); 573 } 574 575 static inline pmd_t pmd_wrprotect(pmd_t pmd) 576 { 577 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 578 return pmd; 579 } 580 581 static inline pmd_t pmd_mkwrite(pmd_t pmd) 582 { 583 pmd_val(pmd) |= _PAGE_WRITE; 584 if (pmd_val(pmd) & _PAGE_MODIFIED) 585 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 586 587 return pmd; 588 } 589 590 static inline int pmd_dirty(pmd_t pmd) 591 { 592 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 593 } 594 595 static inline pmd_t pmd_mkclean(pmd_t pmd) 596 { 597 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 598 return pmd; 599 } 600 601 static inline pmd_t pmd_mkdirty(pmd_t pmd) 602 { 603 pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 604 if (pmd_val(pmd) & _PAGE_WRITE) 605 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 606 607 return pmd; 608 } 609 610 static inline int pmd_young(pmd_t pmd) 611 { 612 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 613 } 614 615 static inline pmd_t pmd_mkold(pmd_t pmd) 616 { 617 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 618 619 return pmd; 620 } 621 622 static inline pmd_t pmd_mkyoung(pmd_t pmd) 623 { 624 pmd_val(pmd) |= _PAGE_ACCESSED; 625 626 if (!(pmd_val(pmd) & _PAGE_NO_READ)) 627 pmd_val(pmd) |= _PAGE_SILENT_READ; 628 629 return pmd; 630 } 631 632 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 633 static inline int pmd_soft_dirty(pmd_t pmd) 634 { 635 return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY); 636 } 637 638 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 639 { 640 pmd_val(pmd) |= _PAGE_SOFT_DIRTY; 641 return pmd; 642 } 643 644 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 645 { 646 pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY); 647 return pmd; 648 } 649 650 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 651 652 /* Extern to avoid header file madness */ 653 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 654 655 static inline unsigned long pmd_pfn(pmd_t pmd) 656 { 657 return pmd_val(pmd) >> _PFN_SHIFT; 658 } 659 660 static inline struct page *pmd_page(pmd_t pmd) 661 { 662 if (pmd_trans_huge(pmd)) 663 return pfn_to_page(pmd_pfn(pmd)); 664 665 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 666 } 667 668 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 669 { 670 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) | 671 (pgprot_val(newprot) & ~_PAGE_CHG_MASK); 672 return pmd; 673 } 674 675 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 676 { 677 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 678 679 return pmd; 680 } 681 682 /* 683 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 684 * different prototype. 685 */ 686 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 687 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 688 unsigned long address, pmd_t *pmdp) 689 { 690 pmd_t old = *pmdp; 691 692 pmd_clear(pmdp); 693 694 return old; 695 } 696 697 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 698 699 #ifdef _PAGE_HUGE 700 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) 701 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) 702 #endif 703 704 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases) 705 706 #include <asm-generic/pgtable.h> 707 708 /* 709 * We provide our own get_unmapped area to cope with the virtual aliasing 710 * constraints placed on us by the cache architecture. 711 */ 712 #define HAVE_ARCH_UNMAPPED_AREA 713 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 714 715 #endif /* _ASM_PGTABLE_H */ 716