1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003 Ralf Baechle 7 */ 8 #ifndef _ASM_PGTABLE_H 9 #define _ASM_PGTABLE_H 10 11 #include <linux/mm_types.h> 12 #include <linux/mmzone.h> 13 #ifdef CONFIG_32BIT 14 #include <asm/pgtable-32.h> 15 #endif 16 #ifdef CONFIG_64BIT 17 #include <asm/pgtable-64.h> 18 #endif 19 20 #include <asm/io.h> 21 #include <asm/pgtable-bits.h> 22 23 struct mm_struct; 24 struct vm_area_struct; 25 26 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) 27 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \ 28 _page_cachable_default) 29 #define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ 30 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default) 31 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ 32 _page_cachable_default) 33 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 34 _PAGE_GLOBAL | _page_cachable_default) 35 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 37 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ 38 _page_cachable_default) 39 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 41 42 /* 43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for 44 * execute, and consider it to be the same as read. Also, write 45 * permissions imply read permissions. This is the closest we can get 46 * by reasonable means.. 47 */ 48 49 /* 50 * Dummy values to fill the table in mmap.c 51 * The real values will be generated at runtime 52 */ 53 #define __P000 __pgprot(0) 54 #define __P001 __pgprot(0) 55 #define __P010 __pgprot(0) 56 #define __P011 __pgprot(0) 57 #define __P100 __pgprot(0) 58 #define __P101 __pgprot(0) 59 #define __P110 __pgprot(0) 60 #define __P111 __pgprot(0) 61 62 #define __S000 __pgprot(0) 63 #define __S001 __pgprot(0) 64 #define __S010 __pgprot(0) 65 #define __S011 __pgprot(0) 66 #define __S100 __pgprot(0) 67 #define __S101 __pgprot(0) 68 #define __S110 __pgprot(0) 69 #define __S111 __pgprot(0) 70 71 extern unsigned long _page_cachable_default; 72 73 /* 74 * ZERO_PAGE is a global shared page that is always zero; used 75 * for zero-mapped memory areas etc.. 76 */ 77 78 extern unsigned long empty_zero_page; 79 extern unsigned long zero_page_mask; 80 81 #define ZERO_PAGE(vaddr) \ 82 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 83 #define __HAVE_COLOR_ZERO_PAGE 84 85 extern void paging_init(void); 86 87 /* 88 * Conversion functions: convert a page and protection to a page entry, 89 * and a page entry and page directory to the page they refer to. 90 */ 91 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 92 93 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 94 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 95 #define pmd_page(pmd) __pmd_page(pmd) 96 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 97 98 #define pmd_page_vaddr(pmd) pmd_val(pmd) 99 100 #define htw_stop() \ 101 do { \ 102 if (cpu_has_htw) \ 103 write_c0_pwctl(read_c0_pwctl() & \ 104 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 105 } while(0) 106 107 #define htw_start() \ 108 do { \ 109 if (cpu_has_htw) \ 110 write_c0_pwctl(read_c0_pwctl() | \ 111 (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 112 } while(0) 113 114 115 #define htw_reset() \ 116 do { \ 117 if (cpu_has_htw) { \ 118 htw_stop(); \ 119 back_to_back_c0_hazard(); \ 120 htw_start(); \ 121 back_to_back_c0_hazard(); \ 122 } \ 123 } while(0) 124 125 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 126 pte_t pteval); 127 128 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 129 130 #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 131 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 132 133 static inline void set_pte(pte_t *ptep, pte_t pte) 134 { 135 ptep->pte_high = pte.pte_high; 136 smp_wmb(); 137 ptep->pte_low = pte.pte_low; 138 139 if (pte.pte_low & _PAGE_GLOBAL) { 140 pte_t *buddy = ptep_buddy(ptep); 141 /* 142 * Make sure the buddy is global too (if it's !none, 143 * it better already be global) 144 */ 145 if (pte_none(*buddy)) { 146 buddy->pte_low |= _PAGE_GLOBAL; 147 buddy->pte_high |= _PAGE_GLOBAL; 148 } 149 } 150 } 151 152 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 153 { 154 pte_t null = __pte(0); 155 156 /* Preserve global status for the pair */ 157 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 158 null.pte_low = null.pte_high = _PAGE_GLOBAL; 159 160 set_pte_at(mm, addr, ptep, null); 161 htw_reset(); 162 } 163 #else 164 165 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 166 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 167 168 /* 169 * Certain architectures need to do special things when pte's 170 * within a page table are directly modified. Thus, the following 171 * hook is made available. 172 */ 173 static inline void set_pte(pte_t *ptep, pte_t pteval) 174 { 175 *ptep = pteval; 176 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 177 if (pte_val(pteval) & _PAGE_GLOBAL) { 178 pte_t *buddy = ptep_buddy(ptep); 179 /* 180 * Make sure the buddy is global too (if it's !none, 181 * it better already be global) 182 */ 183 if (pte_none(*buddy)) 184 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 185 } 186 #endif 187 } 188 189 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 190 { 191 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 192 /* Preserve global status for the pair */ 193 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 194 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 195 else 196 #endif 197 set_pte_at(mm, addr, ptep, __pte(0)); 198 htw_reset(); 199 } 200 #endif 201 202 /* 203 * (pmds are folded into puds so this doesn't get actually called, 204 * but the define is needed for a generic inline function.) 205 */ 206 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 207 208 #ifndef __PAGETABLE_PMD_FOLDED 209 /* 210 * (puds are folded into pgds so this doesn't get actually called, 211 * but the define is needed for a generic inline function.) 212 */ 213 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 214 #endif 215 216 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 217 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 218 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 219 220 /* 221 * We used to declare this array with size but gcc 3.3 and older are not able 222 * to find that this expression is a constant, so the size is dropped. 223 */ 224 extern pgd_t swapper_pg_dir[]; 225 226 /* 227 * The following only work if pte_present() is true. 228 * Undefined behaviour if not.. 229 */ 230 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 231 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 232 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 233 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 234 235 static inline pte_t pte_wrprotect(pte_t pte) 236 { 237 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 238 pte.pte_high &= ~_PAGE_SILENT_WRITE; 239 return pte; 240 } 241 242 static inline pte_t pte_mkclean(pte_t pte) 243 { 244 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 245 pte.pte_high &= ~_PAGE_SILENT_WRITE; 246 return pte; 247 } 248 249 static inline pte_t pte_mkold(pte_t pte) 250 { 251 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 252 pte.pte_high &= ~_PAGE_SILENT_READ; 253 return pte; 254 } 255 256 static inline pte_t pte_mkwrite(pte_t pte) 257 { 258 pte.pte_low |= _PAGE_WRITE; 259 if (pte.pte_low & _PAGE_MODIFIED) { 260 pte.pte_low |= _PAGE_SILENT_WRITE; 261 pte.pte_high |= _PAGE_SILENT_WRITE; 262 } 263 return pte; 264 } 265 266 static inline pte_t pte_mkdirty(pte_t pte) 267 { 268 pte.pte_low |= _PAGE_MODIFIED; 269 if (pte.pte_low & _PAGE_WRITE) { 270 pte.pte_low |= _PAGE_SILENT_WRITE; 271 pte.pte_high |= _PAGE_SILENT_WRITE; 272 } 273 return pte; 274 } 275 276 static inline pte_t pte_mkyoung(pte_t pte) 277 { 278 pte.pte_low |= _PAGE_ACCESSED; 279 if (pte.pte_low & _PAGE_READ) { 280 pte.pte_low |= _PAGE_SILENT_READ; 281 pte.pte_high |= _PAGE_SILENT_READ; 282 } 283 return pte; 284 } 285 #else 286 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 287 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 288 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 289 290 static inline pte_t pte_wrprotect(pte_t pte) 291 { 292 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 293 return pte; 294 } 295 296 static inline pte_t pte_mkclean(pte_t pte) 297 { 298 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 299 return pte; 300 } 301 302 static inline pte_t pte_mkold(pte_t pte) 303 { 304 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 305 return pte; 306 } 307 308 static inline pte_t pte_mkwrite(pte_t pte) 309 { 310 pte_val(pte) |= _PAGE_WRITE; 311 if (pte_val(pte) & _PAGE_MODIFIED) 312 pte_val(pte) |= _PAGE_SILENT_WRITE; 313 return pte; 314 } 315 316 static inline pte_t pte_mkdirty(pte_t pte) 317 { 318 pte_val(pte) |= _PAGE_MODIFIED; 319 if (pte_val(pte) & _PAGE_WRITE) 320 pte_val(pte) |= _PAGE_SILENT_WRITE; 321 return pte; 322 } 323 324 static inline pte_t pte_mkyoung(pte_t pte) 325 { 326 pte_val(pte) |= _PAGE_ACCESSED; 327 if (cpu_has_rixi) { 328 if (!(pte_val(pte) & _PAGE_NO_READ)) 329 pte_val(pte) |= _PAGE_SILENT_READ; 330 } else { 331 if (pte_val(pte) & _PAGE_READ) 332 pte_val(pte) |= _PAGE_SILENT_READ; 333 } 334 return pte; 335 } 336 337 #ifdef _PAGE_HUGE 338 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 339 340 static inline pte_t pte_mkhuge(pte_t pte) 341 { 342 pte_val(pte) |= _PAGE_HUGE; 343 return pte; 344 } 345 #endif /* _PAGE_HUGE */ 346 #endif 347 static inline int pte_special(pte_t pte) { return 0; } 348 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 349 350 /* 351 * Macro to make mark a page protection value as "uncacheable". Note 352 * that "protection" is really a misnomer here as the protection value 353 * contains the memory attribute bits, dirty bits, and various other 354 * bits as well. 355 */ 356 #define pgprot_noncached pgprot_noncached 357 358 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 359 { 360 unsigned long prot = pgprot_val(_prot); 361 362 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 363 364 return __pgprot(prot); 365 } 366 367 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 368 { 369 unsigned long prot = pgprot_val(_prot); 370 371 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ 372 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; 373 374 return __pgprot(prot); 375 } 376 377 /* 378 * Conversion functions: convert a page and protection to a page entry, 379 * and a page entry and page directory to the page they refer to. 380 */ 381 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 382 383 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 384 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 385 { 386 pte.pte_low &= _PAGE_CHG_MASK; 387 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 388 pte.pte_low |= pgprot_val(newprot); 389 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 390 return pte; 391 } 392 #else 393 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 394 { 395 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 396 } 397 #endif 398 399 400 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 401 pte_t pte); 402 403 static inline void update_mmu_cache(struct vm_area_struct *vma, 404 unsigned long address, pte_t *ptep) 405 { 406 pte_t pte = *ptep; 407 __update_tlb(vma, address, pte); 408 } 409 410 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 411 unsigned long address, pmd_t *pmdp) 412 { 413 pte_t pte = *(pte_t *)pmdp; 414 415 __update_tlb(vma, address, pte); 416 } 417 418 #define kern_addr_valid(addr) (1) 419 420 #ifdef CONFIG_PHYS_ADDR_T_64BIT 421 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); 422 423 static inline int io_remap_pfn_range(struct vm_area_struct *vma, 424 unsigned long vaddr, 425 unsigned long pfn, 426 unsigned long size, 427 pgprot_t prot) 428 { 429 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); 430 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); 431 } 432 #define io_remap_pfn_range io_remap_pfn_range 433 #endif 434 435 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 436 437 extern int has_transparent_hugepage(void); 438 439 static inline int pmd_trans_huge(pmd_t pmd) 440 { 441 return !!(pmd_val(pmd) & _PAGE_HUGE); 442 } 443 444 static inline pmd_t pmd_mkhuge(pmd_t pmd) 445 { 446 pmd_val(pmd) |= _PAGE_HUGE; 447 448 return pmd; 449 } 450 451 static inline int pmd_trans_splitting(pmd_t pmd) 452 { 453 return !!(pmd_val(pmd) & _PAGE_SPLITTING); 454 } 455 456 static inline pmd_t pmd_mksplitting(pmd_t pmd) 457 { 458 pmd_val(pmd) |= _PAGE_SPLITTING; 459 460 return pmd; 461 } 462 463 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 464 pmd_t *pmdp, pmd_t pmd); 465 466 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 467 /* Extern to avoid header file madness */ 468 extern void pmdp_splitting_flush(struct vm_area_struct *vma, 469 unsigned long address, 470 pmd_t *pmdp); 471 472 #define __HAVE_ARCH_PMD_WRITE 473 static inline int pmd_write(pmd_t pmd) 474 { 475 return !!(pmd_val(pmd) & _PAGE_WRITE); 476 } 477 478 static inline pmd_t pmd_wrprotect(pmd_t pmd) 479 { 480 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 481 return pmd; 482 } 483 484 static inline pmd_t pmd_mkwrite(pmd_t pmd) 485 { 486 pmd_val(pmd) |= _PAGE_WRITE; 487 if (pmd_val(pmd) & _PAGE_MODIFIED) 488 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 489 490 return pmd; 491 } 492 493 static inline int pmd_dirty(pmd_t pmd) 494 { 495 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 496 } 497 498 static inline pmd_t pmd_mkclean(pmd_t pmd) 499 { 500 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 501 return pmd; 502 } 503 504 static inline pmd_t pmd_mkdirty(pmd_t pmd) 505 { 506 pmd_val(pmd) |= _PAGE_MODIFIED; 507 if (pmd_val(pmd) & _PAGE_WRITE) 508 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 509 510 return pmd; 511 } 512 513 static inline int pmd_young(pmd_t pmd) 514 { 515 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 516 } 517 518 static inline pmd_t pmd_mkold(pmd_t pmd) 519 { 520 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 521 522 return pmd; 523 } 524 525 static inline pmd_t pmd_mkyoung(pmd_t pmd) 526 { 527 pmd_val(pmd) |= _PAGE_ACCESSED; 528 529 if (cpu_has_rixi) { 530 if (!(pmd_val(pmd) & _PAGE_NO_READ)) 531 pmd_val(pmd) |= _PAGE_SILENT_READ; 532 } else { 533 if (pmd_val(pmd) & _PAGE_READ) 534 pmd_val(pmd) |= _PAGE_SILENT_READ; 535 } 536 537 return pmd; 538 } 539 540 /* Extern to avoid header file madness */ 541 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 542 543 static inline unsigned long pmd_pfn(pmd_t pmd) 544 { 545 return pmd_val(pmd) >> _PFN_SHIFT; 546 } 547 548 static inline struct page *pmd_page(pmd_t pmd) 549 { 550 if (pmd_trans_huge(pmd)) 551 return pfn_to_page(pmd_pfn(pmd)); 552 553 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 554 } 555 556 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 557 { 558 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); 559 return pmd; 560 } 561 562 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 563 { 564 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 565 566 return pmd; 567 } 568 569 /* 570 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a 571 * different prototype. 572 */ 573 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 574 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 575 unsigned long address, pmd_t *pmdp) 576 { 577 pmd_t old = *pmdp; 578 579 pmd_clear(pmdp); 580 581 return old; 582 } 583 584 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 585 586 #include <asm-generic/pgtable.h> 587 588 /* 589 * uncached accelerated TLB map for video memory access 590 */ 591 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED 592 #define __HAVE_PHYS_MEM_ACCESS_PROT 593 594 struct file; 595 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 596 unsigned long size, pgprot_t vma_prot); 597 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 598 unsigned long size, pgprot_t *vma_prot); 599 #endif 600 601 /* 602 * We provide our own get_unmapped area to cope with the virtual aliasing 603 * constraints placed on us by the cache architecture. 604 */ 605 #define HAVE_ARCH_UNMAPPED_AREA 606 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 607 608 /* 609 * No page table caches to initialise 610 */ 611 #define pgtable_cache_init() do { } while (0) 612 613 #endif /* _ASM_PGTABLE_H */ 614