1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003 Ralf Baechle 7 */ 8 #ifndef _ASM_PGTABLE_H 9 #define _ASM_PGTABLE_H 10 11 #include <linux/mm_types.h> 12 #include <linux/mmzone.h> 13 #ifdef CONFIG_32BIT 14 #include <asm/pgtable-32.h> 15 #endif 16 #ifdef CONFIG_64BIT 17 #include <asm/pgtable-64.h> 18 #endif 19 20 #include <asm/cmpxchg.h> 21 #include <asm/io.h> 22 #include <asm/pgtable-bits.h> 23 #include <asm/cpu-features.h> 24 25 struct mm_struct; 26 struct vm_area_struct; 27 28 #define PAGE_SHARED vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED) 29 30 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 31 _PAGE_GLOBAL | _page_cachable_default) 32 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 33 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 34 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 35 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 36 37 /* 38 * If _PAGE_NO_EXEC is not defined, we can't do page protection for 39 * execute, and consider it to be the same as read. Also, write 40 * permissions imply read permissions. This is the closest we can get 41 * by reasonable means.. 42 */ 43 44 extern unsigned long _page_cachable_default; 45 extern void __update_cache(unsigned long address, pte_t pte); 46 47 /* 48 * ZERO_PAGE is a global shared page that is always zero; used 49 * for zero-mapped memory areas etc.. 50 */ 51 52 extern unsigned long empty_zero_page; 53 extern unsigned long zero_page_mask; 54 55 #define ZERO_PAGE(vaddr) \ 56 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 57 #define __HAVE_COLOR_ZERO_PAGE 58 59 extern void paging_init(void); 60 61 /* 62 * Conversion functions: convert a page and protection to a page entry, 63 * and a page entry and page directory to the page they refer to. 64 */ 65 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 66 67 static inline unsigned long pmd_pfn(pmd_t pmd) 68 { 69 return pmd_val(pmd) >> _PFN_SHIFT; 70 } 71 72 #ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT 73 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 74 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 75 76 #define pmd_page_vaddr(pmd) pmd_val(pmd) 77 78 #define htw_stop() \ 79 do { \ 80 unsigned long __flags; \ 81 \ 82 if (cpu_has_htw) { \ 83 local_irq_save(__flags); \ 84 if(!raw_current_cpu_data.htw_seq++) { \ 85 write_c0_pwctl(read_c0_pwctl() & \ 86 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 87 back_to_back_c0_hazard(); \ 88 } \ 89 local_irq_restore(__flags); \ 90 } \ 91 } while(0) 92 93 #define htw_start() \ 94 do { \ 95 unsigned long __flags; \ 96 \ 97 if (cpu_has_htw) { \ 98 local_irq_save(__flags); \ 99 if (!--raw_current_cpu_data.htw_seq) { \ 100 write_c0_pwctl(read_c0_pwctl() | \ 101 (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 102 back_to_back_c0_hazard(); \ 103 } \ 104 local_irq_restore(__flags); \ 105 } \ 106 } while(0) 107 108 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 109 pte_t *ptep, pte_t pteval); 110 111 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 112 113 #ifdef CONFIG_XPA 114 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 115 #else 116 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 117 #endif 118 119 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 120 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) 121 122 static inline void set_pte(pte_t *ptep, pte_t pte) 123 { 124 ptep->pte_high = pte.pte_high; 125 smp_wmb(); 126 ptep->pte_low = pte.pte_low; 127 128 #ifdef CONFIG_XPA 129 if (pte.pte_high & _PAGE_GLOBAL) { 130 #else 131 if (pte.pte_low & _PAGE_GLOBAL) { 132 #endif 133 pte_t *buddy = ptep_buddy(ptep); 134 /* 135 * Make sure the buddy is global too (if it's !none, 136 * it better already be global) 137 */ 138 if (pte_none(*buddy)) { 139 if (!IS_ENABLED(CONFIG_XPA)) 140 buddy->pte_low |= _PAGE_GLOBAL; 141 buddy->pte_high |= _PAGE_GLOBAL; 142 } 143 } 144 } 145 146 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 147 { 148 pte_t null = __pte(0); 149 150 htw_stop(); 151 /* Preserve global status for the pair */ 152 if (IS_ENABLED(CONFIG_XPA)) { 153 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 154 null.pte_high = _PAGE_GLOBAL; 155 } else { 156 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 157 null.pte_low = null.pte_high = _PAGE_GLOBAL; 158 } 159 160 set_pte_at(mm, addr, ptep, null); 161 htw_start(); 162 } 163 #else 164 165 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 166 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 167 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 168 169 /* 170 * Certain architectures need to do special things when pte's 171 * within a page table are directly modified. Thus, the following 172 * hook is made available. 173 */ 174 static inline void set_pte(pte_t *ptep, pte_t pteval) 175 { 176 *ptep = pteval; 177 #if !defined(CONFIG_CPU_R3K_TLB) 178 if (pte_val(pteval) & _PAGE_GLOBAL) { 179 pte_t *buddy = ptep_buddy(ptep); 180 /* 181 * Make sure the buddy is global too (if it's !none, 182 * it better already be global) 183 */ 184 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32) 185 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); 186 # else 187 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); 188 # endif 189 } 190 #endif 191 } 192 193 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 194 { 195 htw_stop(); 196 #if !defined(CONFIG_CPU_R3K_TLB) 197 /* Preserve global status for the pair */ 198 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 199 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 200 else 201 #endif 202 set_pte_at(mm, addr, ptep, __pte(0)); 203 htw_start(); 204 } 205 #endif 206 207 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 208 pte_t *ptep, pte_t pteval) 209 { 210 211 if (!pte_present(pteval)) 212 goto cache_sync_done; 213 214 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval))) 215 goto cache_sync_done; 216 217 __update_cache(addr, pteval); 218 cache_sync_done: 219 set_pte(ptep, pteval); 220 } 221 222 /* 223 * (pmds are folded into puds so this doesn't get actually called, 224 * but the define is needed for a generic inline function.) 225 */ 226 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 227 228 #ifndef __PAGETABLE_PMD_FOLDED 229 /* 230 * (puds are folded into pgds so this doesn't get actually called, 231 * but the define is needed for a generic inline function.) 232 */ 233 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 234 #endif 235 236 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 237 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 238 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 239 240 /* 241 * We used to declare this array with size but gcc 3.3 and older are not able 242 * to find that this expression is a constant, so the size is dropped. 243 */ 244 extern pgd_t swapper_pg_dir[]; 245 246 /* 247 * Platform specific pte_special() and pte_mkspecial() definitions 248 * are required only when ARCH_HAS_PTE_SPECIAL is enabled. 249 */ 250 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 251 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 252 static inline int pte_special(pte_t pte) 253 { 254 return pte.pte_low & _PAGE_SPECIAL; 255 } 256 257 static inline pte_t pte_mkspecial(pte_t pte) 258 { 259 pte.pte_low |= _PAGE_SPECIAL; 260 return pte; 261 } 262 #else 263 static inline int pte_special(pte_t pte) 264 { 265 return pte_val(pte) & _PAGE_SPECIAL; 266 } 267 268 static inline pte_t pte_mkspecial(pte_t pte) 269 { 270 pte_val(pte) |= _PAGE_SPECIAL; 271 return pte; 272 } 273 #endif 274 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 275 276 /* 277 * The following only work if pte_present() is true. 278 * Undefined behaviour if not.. 279 */ 280 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 281 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 282 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 283 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 284 285 static inline pte_t pte_wrprotect(pte_t pte) 286 { 287 pte.pte_low &= ~_PAGE_WRITE; 288 if (!IS_ENABLED(CONFIG_XPA)) 289 pte.pte_low &= ~_PAGE_SILENT_WRITE; 290 pte.pte_high &= ~_PAGE_SILENT_WRITE; 291 return pte; 292 } 293 294 static inline pte_t pte_mkclean(pte_t pte) 295 { 296 pte.pte_low &= ~_PAGE_MODIFIED; 297 if (!IS_ENABLED(CONFIG_XPA)) 298 pte.pte_low &= ~_PAGE_SILENT_WRITE; 299 pte.pte_high &= ~_PAGE_SILENT_WRITE; 300 return pte; 301 } 302 303 static inline pte_t pte_mkold(pte_t pte) 304 { 305 pte.pte_low &= ~_PAGE_ACCESSED; 306 if (!IS_ENABLED(CONFIG_XPA)) 307 pte.pte_low &= ~_PAGE_SILENT_READ; 308 pte.pte_high &= ~_PAGE_SILENT_READ; 309 return pte; 310 } 311 312 static inline pte_t pte_mkwrite(pte_t pte) 313 { 314 pte.pte_low |= _PAGE_WRITE; 315 if (pte.pte_low & _PAGE_MODIFIED) { 316 if (!IS_ENABLED(CONFIG_XPA)) 317 pte.pte_low |= _PAGE_SILENT_WRITE; 318 pte.pte_high |= _PAGE_SILENT_WRITE; 319 } 320 return pte; 321 } 322 323 static inline pte_t pte_mkdirty(pte_t pte) 324 { 325 pte.pte_low |= _PAGE_MODIFIED; 326 if (pte.pte_low & _PAGE_WRITE) { 327 if (!IS_ENABLED(CONFIG_XPA)) 328 pte.pte_low |= _PAGE_SILENT_WRITE; 329 pte.pte_high |= _PAGE_SILENT_WRITE; 330 } 331 return pte; 332 } 333 334 static inline pte_t pte_mkyoung(pte_t pte) 335 { 336 pte.pte_low |= _PAGE_ACCESSED; 337 if (!(pte.pte_low & _PAGE_NO_READ)) { 338 if (!IS_ENABLED(CONFIG_XPA)) 339 pte.pte_low |= _PAGE_SILENT_READ; 340 pte.pte_high |= _PAGE_SILENT_READ; 341 } 342 return pte; 343 } 344 #else 345 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 346 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 347 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 348 349 static inline pte_t pte_wrprotect(pte_t pte) 350 { 351 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 352 return pte; 353 } 354 355 static inline pte_t pte_mkclean(pte_t pte) 356 { 357 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 358 return pte; 359 } 360 361 static inline pte_t pte_mkold(pte_t pte) 362 { 363 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 364 return pte; 365 } 366 367 static inline pte_t pte_mkwrite(pte_t pte) 368 { 369 pte_val(pte) |= _PAGE_WRITE; 370 if (pte_val(pte) & _PAGE_MODIFIED) 371 pte_val(pte) |= _PAGE_SILENT_WRITE; 372 return pte; 373 } 374 375 static inline pte_t pte_mkdirty(pte_t pte) 376 { 377 pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 378 if (pte_val(pte) & _PAGE_WRITE) 379 pte_val(pte) |= _PAGE_SILENT_WRITE; 380 return pte; 381 } 382 383 static inline pte_t pte_mkyoung(pte_t pte) 384 { 385 pte_val(pte) |= _PAGE_ACCESSED; 386 if (!(pte_val(pte) & _PAGE_NO_READ)) 387 pte_val(pte) |= _PAGE_SILENT_READ; 388 return pte; 389 } 390 391 #define pte_sw_mkyoung pte_mkyoung 392 393 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 394 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 395 396 static inline pte_t pte_mkhuge(pte_t pte) 397 { 398 pte_val(pte) |= _PAGE_HUGE; 399 return pte; 400 } 401 402 #define pmd_write pmd_write 403 static inline int pmd_write(pmd_t pmd) 404 { 405 return !!(pmd_val(pmd) & _PAGE_WRITE); 406 } 407 408 static inline struct page *pmd_page(pmd_t pmd) 409 { 410 if (pmd_val(pmd) & _PAGE_HUGE) 411 return pfn_to_page(pmd_pfn(pmd)); 412 413 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 414 } 415 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 416 417 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 418 static inline bool pte_soft_dirty(pte_t pte) 419 { 420 return pte_val(pte) & _PAGE_SOFT_DIRTY; 421 } 422 #define pte_swp_soft_dirty pte_soft_dirty 423 424 static inline pte_t pte_mksoft_dirty(pte_t pte) 425 { 426 pte_val(pte) |= _PAGE_SOFT_DIRTY; 427 return pte; 428 } 429 #define pte_swp_mksoft_dirty pte_mksoft_dirty 430 431 static inline pte_t pte_clear_soft_dirty(pte_t pte) 432 { 433 pte_val(pte) &= ~(_PAGE_SOFT_DIRTY); 434 return pte; 435 } 436 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 437 438 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 439 440 #endif 441 442 /* 443 * Macro to make mark a page protection value as "uncacheable". Note 444 * that "protection" is really a misnomer here as the protection value 445 * contains the memory attribute bits, dirty bits, and various other 446 * bits as well. 447 */ 448 #define pgprot_noncached pgprot_noncached 449 450 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 451 { 452 unsigned long prot = pgprot_val(_prot); 453 454 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 455 456 return __pgprot(prot); 457 } 458 459 #define pgprot_writecombine pgprot_writecombine 460 461 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 462 { 463 unsigned long prot = pgprot_val(_prot); 464 465 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ 466 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; 467 468 return __pgprot(prot); 469 } 470 471 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, 472 unsigned long address) 473 { 474 } 475 476 #define __HAVE_ARCH_PTE_SAME 477 static inline int pte_same(pte_t pte_a, pte_t pte_b) 478 { 479 return pte_val(pte_a) == pte_val(pte_b); 480 } 481 482 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 483 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 484 unsigned long address, pte_t *ptep, 485 pte_t entry, int dirty) 486 { 487 if (!pte_same(*ptep, entry)) 488 set_pte_at(vma->vm_mm, address, ptep, entry); 489 /* 490 * update_mmu_cache will unconditionally execute, handling both 491 * the case that the PTE changed and the spurious fault case. 492 */ 493 return true; 494 } 495 496 /* 497 * Conversion functions: convert a page and protection to a page entry, 498 * and a page entry and page directory to the page they refer to. 499 */ 500 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 501 502 #if defined(CONFIG_XPA) 503 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 504 { 505 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 506 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 507 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 508 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 509 return pte; 510 } 511 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 512 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 513 { 514 pte.pte_low &= _PAGE_CHG_MASK; 515 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 516 pte.pte_low |= pgprot_val(newprot); 517 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 518 return pte; 519 } 520 #else 521 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 522 { 523 pte_val(pte) &= _PAGE_CHG_MASK; 524 pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK; 525 if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ)) 526 pte_val(pte) |= _PAGE_SILENT_READ; 527 return pte; 528 } 529 #endif 530 531 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 532 static inline int pte_swp_exclusive(pte_t pte) 533 { 534 return pte.pte_low & _PAGE_SWP_EXCLUSIVE; 535 } 536 537 static inline pte_t pte_swp_mkexclusive(pte_t pte) 538 { 539 pte.pte_low |= _PAGE_SWP_EXCLUSIVE; 540 return pte; 541 } 542 543 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 544 { 545 pte.pte_low &= ~_PAGE_SWP_EXCLUSIVE; 546 return pte; 547 } 548 #else 549 static inline int pte_swp_exclusive(pte_t pte) 550 { 551 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 552 } 553 554 static inline pte_t pte_swp_mkexclusive(pte_t pte) 555 { 556 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 557 return pte; 558 } 559 560 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 561 { 562 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 563 return pte; 564 } 565 #endif 566 567 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 568 pte_t pte); 569 570 static inline void update_mmu_cache(struct vm_area_struct *vma, 571 unsigned long address, pte_t *ptep) 572 { 573 pte_t pte = *ptep; 574 __update_tlb(vma, address, pte); 575 } 576 577 #define __HAVE_ARCH_UPDATE_MMU_TLB 578 #define update_mmu_tlb update_mmu_cache 579 580 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 581 unsigned long address, pmd_t *pmdp) 582 { 583 pte_t pte = *(pte_t *)pmdp; 584 585 __update_tlb(vma, address, pte); 586 } 587 588 /* 589 * Allow physical addresses to be fixed up to help 36-bit peripherals. 590 */ 591 #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR 592 phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size); 593 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr, 594 unsigned long pfn, unsigned long size, pgprot_t prot); 595 #define io_remap_pfn_range io_remap_pfn_range 596 #else 597 #define fixup_bigphys_addr(addr, size) (addr) 598 #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */ 599 600 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 601 602 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 603 #define pmdp_establish generic_pmdp_establish 604 605 #define has_transparent_hugepage has_transparent_hugepage 606 extern int has_transparent_hugepage(void); 607 608 static inline int pmd_trans_huge(pmd_t pmd) 609 { 610 return !!(pmd_val(pmd) & _PAGE_HUGE); 611 } 612 613 static inline pmd_t pmd_mkhuge(pmd_t pmd) 614 { 615 pmd_val(pmd) |= _PAGE_HUGE; 616 617 return pmd; 618 } 619 620 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 621 pmd_t *pmdp, pmd_t pmd); 622 623 static inline pmd_t pmd_wrprotect(pmd_t pmd) 624 { 625 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 626 return pmd; 627 } 628 629 static inline pmd_t pmd_mkwrite(pmd_t pmd) 630 { 631 pmd_val(pmd) |= _PAGE_WRITE; 632 if (pmd_val(pmd) & _PAGE_MODIFIED) 633 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 634 635 return pmd; 636 } 637 638 static inline int pmd_dirty(pmd_t pmd) 639 { 640 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 641 } 642 643 static inline pmd_t pmd_mkclean(pmd_t pmd) 644 { 645 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 646 return pmd; 647 } 648 649 static inline pmd_t pmd_mkdirty(pmd_t pmd) 650 { 651 pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY; 652 if (pmd_val(pmd) & _PAGE_WRITE) 653 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 654 655 return pmd; 656 } 657 658 #define pmd_young pmd_young 659 static inline int pmd_young(pmd_t pmd) 660 { 661 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 662 } 663 664 static inline pmd_t pmd_mkold(pmd_t pmd) 665 { 666 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 667 668 return pmd; 669 } 670 671 static inline pmd_t pmd_mkyoung(pmd_t pmd) 672 { 673 pmd_val(pmd) |= _PAGE_ACCESSED; 674 675 if (!(pmd_val(pmd) & _PAGE_NO_READ)) 676 pmd_val(pmd) |= _PAGE_SILENT_READ; 677 678 return pmd; 679 } 680 681 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 682 static inline int pmd_soft_dirty(pmd_t pmd) 683 { 684 return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY); 685 } 686 687 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 688 { 689 pmd_val(pmd) |= _PAGE_SOFT_DIRTY; 690 return pmd; 691 } 692 693 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 694 { 695 pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY); 696 return pmd; 697 } 698 699 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 700 701 /* Extern to avoid header file madness */ 702 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 703 704 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 705 { 706 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) | 707 (pgprot_val(newprot) & ~_PAGE_CHG_MASK); 708 return pmd; 709 } 710 711 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 712 { 713 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 714 715 return pmd; 716 } 717 718 /* 719 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 720 * different prototype. 721 */ 722 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 723 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 724 unsigned long address, pmd_t *pmdp) 725 { 726 pmd_t old = *pmdp; 727 728 pmd_clear(pmdp); 729 730 return old; 731 } 732 733 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 734 735 #ifdef _PAGE_HUGE 736 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) 737 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) 738 #endif 739 740 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases) 741 742 /* 743 * We provide our own get_unmapped area to cope with the virtual aliasing 744 * constraints placed on us by the cache architecture. 745 */ 746 #define HAVE_ARCH_UNMAPPED_AREA 747 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 748 749 #endif /* _ASM_PGTABLE_H */ 750