1 #ifndef _ASM_X86_PGTABLE_H 2 #define _ASM_X86_PGTABLE_H 3 4 #include <asm/page.h> 5 6 #include <asm/pgtable_types.h> 7 8 /* 9 * Macro to mark a page protection value as UC- 10 */ 11 #define pgprot_noncached(prot) \ 12 ((boot_cpu_data.x86 > 3) \ 13 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ 14 : (prot)) 15 16 #ifndef __ASSEMBLY__ 17 18 /* 19 * ZERO_PAGE is a global shared page that is always zero: used 20 * for zero-mapped memory areas etc.. 21 */ 22 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 23 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 24 25 extern spinlock_t pgd_lock; 26 extern struct list_head pgd_list; 27 28 #ifdef CONFIG_PARAVIRT 29 #include <asm/paravirt.h> 30 #else /* !CONFIG_PARAVIRT */ 31 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 32 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 33 34 #define set_pte_atomic(ptep, pte) \ 35 native_set_pte_atomic(ptep, pte) 36 37 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) 38 39 #ifndef __PAGETABLE_PUD_FOLDED 40 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 41 #define pgd_clear(pgd) native_pgd_clear(pgd) 42 #endif 43 44 #ifndef set_pud 45 # define set_pud(pudp, pud) native_set_pud(pudp, pud) 46 #endif 47 48 #ifndef __PAGETABLE_PMD_FOLDED 49 #define pud_clear(pud) native_pud_clear(pud) 50 #endif 51 52 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 53 #define pmd_clear(pmd) native_pmd_clear(pmd) 54 55 #define pte_update(mm, addr, ptep) do { } while (0) 56 #define pte_update_defer(mm, addr, ptep) do { } while (0) 57 58 static inline void __init paravirt_pagetable_setup_start(pgd_t *base) 59 { 60 native_pagetable_setup_start(base); 61 } 62 63 static inline void __init paravirt_pagetable_setup_done(pgd_t *base) 64 { 65 native_pagetable_setup_done(base); 66 } 67 68 #define pgd_val(x) native_pgd_val(x) 69 #define __pgd(x) native_make_pgd(x) 70 71 #ifndef __PAGETABLE_PUD_FOLDED 72 #define pud_val(x) native_pud_val(x) 73 #define __pud(x) native_make_pud(x) 74 #endif 75 76 #ifndef __PAGETABLE_PMD_FOLDED 77 #define pmd_val(x) native_pmd_val(x) 78 #define __pmd(x) native_make_pmd(x) 79 #endif 80 81 #define pte_val(x) native_pte_val(x) 82 #define __pte(x) native_make_pte(x) 83 84 #define arch_end_context_switch(prev) do {} while(0) 85 86 #endif /* CONFIG_PARAVIRT */ 87 88 /* 89 * The following only work if pte_present() is true. 90 * Undefined behaviour if not.. 91 */ 92 static inline int pte_dirty(pte_t pte) 93 { 94 return pte_flags(pte) & _PAGE_DIRTY; 95 } 96 97 static inline int pte_young(pte_t pte) 98 { 99 return pte_flags(pte) & _PAGE_ACCESSED; 100 } 101 102 static inline int pte_write(pte_t pte) 103 { 104 return pte_flags(pte) & _PAGE_RW; 105 } 106 107 static inline int pte_file(pte_t pte) 108 { 109 return pte_flags(pte) & _PAGE_FILE; 110 } 111 112 static inline int pte_huge(pte_t pte) 113 { 114 return pte_flags(pte) & _PAGE_PSE; 115 } 116 117 static inline int pte_global(pte_t pte) 118 { 119 return pte_flags(pte) & _PAGE_GLOBAL; 120 } 121 122 static inline int pte_exec(pte_t pte) 123 { 124 return !(pte_flags(pte) & _PAGE_NX); 125 } 126 127 static inline int pte_special(pte_t pte) 128 { 129 return pte_flags(pte) & _PAGE_SPECIAL; 130 } 131 132 static inline unsigned long pte_pfn(pte_t pte) 133 { 134 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 135 } 136 137 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 138 139 static inline int pmd_large(pmd_t pte) 140 { 141 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 142 (_PAGE_PSE | _PAGE_PRESENT); 143 } 144 145 static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 146 { 147 pteval_t v = native_pte_val(pte); 148 149 return native_make_pte(v | set); 150 } 151 152 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) 153 { 154 pteval_t v = native_pte_val(pte); 155 156 return native_make_pte(v & ~clear); 157 } 158 159 static inline pte_t pte_mkclean(pte_t pte) 160 { 161 return pte_clear_flags(pte, _PAGE_DIRTY); 162 } 163 164 static inline pte_t pte_mkold(pte_t pte) 165 { 166 return pte_clear_flags(pte, _PAGE_ACCESSED); 167 } 168 169 static inline pte_t pte_wrprotect(pte_t pte) 170 { 171 return pte_clear_flags(pte, _PAGE_RW); 172 } 173 174 static inline pte_t pte_mkexec(pte_t pte) 175 { 176 return pte_clear_flags(pte, _PAGE_NX); 177 } 178 179 static inline pte_t pte_mkdirty(pte_t pte) 180 { 181 return pte_set_flags(pte, _PAGE_DIRTY); 182 } 183 184 static inline pte_t pte_mkyoung(pte_t pte) 185 { 186 return pte_set_flags(pte, _PAGE_ACCESSED); 187 } 188 189 static inline pte_t pte_mkwrite(pte_t pte) 190 { 191 return pte_set_flags(pte, _PAGE_RW); 192 } 193 194 static inline pte_t pte_mkhuge(pte_t pte) 195 { 196 return pte_set_flags(pte, _PAGE_PSE); 197 } 198 199 static inline pte_t pte_clrhuge(pte_t pte) 200 { 201 return pte_clear_flags(pte, _PAGE_PSE); 202 } 203 204 static inline pte_t pte_mkglobal(pte_t pte) 205 { 206 return pte_set_flags(pte, _PAGE_GLOBAL); 207 } 208 209 static inline pte_t pte_clrglobal(pte_t pte) 210 { 211 return pte_clear_flags(pte, _PAGE_GLOBAL); 212 } 213 214 static inline pte_t pte_mkspecial(pte_t pte) 215 { 216 return pte_set_flags(pte, _PAGE_SPECIAL); 217 } 218 219 /* 220 * Mask out unsupported bits in a present pgprot. Non-present pgprots 221 * can use those bits for other purposes, so leave them be. 222 */ 223 static inline pgprotval_t massage_pgprot(pgprot_t pgprot) 224 { 225 pgprotval_t protval = pgprot_val(pgprot); 226 227 if (protval & _PAGE_PRESENT) 228 protval &= __supported_pte_mask; 229 230 return protval; 231 } 232 233 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 234 { 235 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | 236 massage_pgprot(pgprot)); 237 } 238 239 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 240 { 241 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | 242 massage_pgprot(pgprot)); 243 } 244 245 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 246 { 247 pteval_t val = pte_val(pte); 248 249 /* 250 * Chop off the NX bit (if present), and add the NX portion of 251 * the newprot (if present): 252 */ 253 val &= _PAGE_CHG_MASK; 254 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; 255 256 return __pte(val); 257 } 258 259 /* mprotect needs to preserve PAT bits when updating vm_page_prot */ 260 #define pgprot_modify pgprot_modify 261 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 262 { 263 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; 264 pgprotval_t addbits = pgprot_val(newprot); 265 return __pgprot(preservebits | addbits); 266 } 267 268 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) 269 270 #define canon_pgprot(p) __pgprot(massage_pgprot(p)) 271 272 static inline int is_new_memtype_allowed(unsigned long flags, 273 unsigned long new_flags) 274 { 275 /* 276 * Certain new memtypes are not allowed with certain 277 * requested memtype: 278 * - request is uncached, return cannot be write-back 279 * - request is write-combine, return cannot be write-back 280 */ 281 if ((flags == _PAGE_CACHE_UC_MINUS && 282 new_flags == _PAGE_CACHE_WB) || 283 (flags == _PAGE_CACHE_WC && 284 new_flags == _PAGE_CACHE_WB)) { 285 return 0; 286 } 287 288 return 1; 289 } 290 291 pmd_t *populate_extra_pmd(unsigned long vaddr); 292 pte_t *populate_extra_pte(unsigned long vaddr); 293 #endif /* __ASSEMBLY__ */ 294 295 #ifdef CONFIG_X86_32 296 # include "pgtable_32.h" 297 #else 298 # include "pgtable_64.h" 299 #endif 300 301 #ifndef __ASSEMBLY__ 302 #include <linux/mm_types.h> 303 304 static inline int pte_none(pte_t pte) 305 { 306 return !pte.pte; 307 } 308 309 #define __HAVE_ARCH_PTE_SAME 310 static inline int pte_same(pte_t a, pte_t b) 311 { 312 return a.pte == b.pte; 313 } 314 315 static inline int pte_present(pte_t a) 316 { 317 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 318 } 319 320 static inline int pmd_present(pmd_t pmd) 321 { 322 return pmd_flags(pmd) & _PAGE_PRESENT; 323 } 324 325 static inline int pmd_none(pmd_t pmd) 326 { 327 /* Only check low word on 32-bit platforms, since it might be 328 out of sync with upper half. */ 329 return (unsigned long)native_pmd_val(pmd) == 0; 330 } 331 332 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 333 { 334 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); 335 } 336 337 /* 338 * Currently stuck as a macro due to indirect forward reference to 339 * linux/mmzone.h's __section_mem_map_addr() definition: 340 */ 341 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 342 343 /* 344 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 345 * 346 * this macro returns the index of the entry in the pmd page which would 347 * control the given virtual address 348 */ 349 static inline unsigned pmd_index(unsigned long address) 350 { 351 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 352 } 353 354 /* 355 * Conversion functions: convert a page and protection to a page entry, 356 * and a page entry and page directory to the page they refer to. 357 * 358 * (Currently stuck as a macro because of indirect forward reference 359 * to linux/mm.h:page_to_nid()) 360 */ 361 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 362 363 /* 364 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 365 * 366 * this function returns the index of the entry in the pte page which would 367 * control the given virtual address 368 */ 369 static inline unsigned pte_index(unsigned long address) 370 { 371 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 372 } 373 374 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) 375 { 376 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 377 } 378 379 static inline int pmd_bad(pmd_t pmd) 380 { 381 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; 382 } 383 384 static inline unsigned long pages_to_mb(unsigned long npg) 385 { 386 return npg >> (20 - PAGE_SHIFT); 387 } 388 389 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 390 remap_pfn_range(vma, vaddr, pfn, size, prot) 391 392 #if PAGETABLE_LEVELS > 2 393 static inline int pud_none(pud_t pud) 394 { 395 return native_pud_val(pud) == 0; 396 } 397 398 static inline int pud_present(pud_t pud) 399 { 400 return pud_flags(pud) & _PAGE_PRESENT; 401 } 402 403 static inline unsigned long pud_page_vaddr(pud_t pud) 404 { 405 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); 406 } 407 408 /* 409 * Currently stuck as a macro due to indirect forward reference to 410 * linux/mmzone.h's __section_mem_map_addr() definition: 411 */ 412 #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) 413 414 /* Find an entry in the second-level page table.. */ 415 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 416 { 417 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); 418 } 419 420 static inline unsigned long pmd_pfn(pmd_t pmd) 421 { 422 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; 423 } 424 425 static inline int pud_large(pud_t pud) 426 { 427 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == 428 (_PAGE_PSE | _PAGE_PRESENT); 429 } 430 431 static inline int pud_bad(pud_t pud) 432 { 433 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 434 } 435 #else 436 static inline int pud_large(pud_t pud) 437 { 438 return 0; 439 } 440 #endif /* PAGETABLE_LEVELS > 2 */ 441 442 #if PAGETABLE_LEVELS > 3 443 static inline int pgd_present(pgd_t pgd) 444 { 445 return pgd_flags(pgd) & _PAGE_PRESENT; 446 } 447 448 static inline unsigned long pgd_page_vaddr(pgd_t pgd) 449 { 450 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); 451 } 452 453 /* 454 * Currently stuck as a macro due to indirect forward reference to 455 * linux/mmzone.h's __section_mem_map_addr() definition: 456 */ 457 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 458 459 /* to find an entry in a page-table-directory. */ 460 static inline unsigned pud_index(unsigned long address) 461 { 462 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 463 } 464 465 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 466 { 467 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); 468 } 469 470 static inline int pgd_bad(pgd_t pgd) 471 { 472 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; 473 } 474 475 static inline int pgd_none(pgd_t pgd) 476 { 477 return !native_pgd_val(pgd); 478 } 479 #endif /* PAGETABLE_LEVELS > 3 */ 480 481 #endif /* __ASSEMBLY__ */ 482 483 /* 484 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 485 * 486 * this macro returns the index of the entry in the pgd page which would 487 * control the given virtual address 488 */ 489 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 490 491 /* 492 * pgd_offset() returns a (pgd_t *) 493 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 494 */ 495 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) 496 /* 497 * a shortcut which implies the use of the kernel's pgd, instead 498 * of a process's 499 */ 500 #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 501 502 503 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 504 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 505 506 #ifndef __ASSEMBLY__ 507 508 extern int direct_gbpages; 509 510 /* local pte updates need not use xchg for locking */ 511 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 512 { 513 pte_t res = *ptep; 514 515 /* Pure native function needs no input for mm, addr */ 516 native_pte_clear(NULL, 0, ptep); 517 return res; 518 } 519 520 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, 521 pte_t *ptep , pte_t pte) 522 { 523 native_set_pte(ptep, pte); 524 } 525 526 #ifndef CONFIG_PARAVIRT 527 /* 528 * Rules for using pte_update - it must be called after any PTE update which 529 * has not been done using the set_pte / clear_pte interfaces. It is used by 530 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE 531 * updates should either be sets, clears, or set_pte_atomic for P->P 532 * transitions, which means this hook should only be called for user PTEs. 533 * This hook implies a P->P protection or access change has taken place, which 534 * requires a subsequent TLB flush. The notification can optionally be delayed 535 * until the TLB flush event by using the pte_update_defer form of the 536 * interface, but care must be taken to assure that the flush happens while 537 * still holding the same page table lock so that the shadow and primary pages 538 * do not become out of sync on SMP. 539 */ 540 #define pte_update(mm, addr, ptep) do { } while (0) 541 #define pte_update_defer(mm, addr, ptep) do { } while (0) 542 #endif 543 544 /* 545 * We only update the dirty/accessed state if we set 546 * the dirty bit by hand in the kernel, since the hardware 547 * will do the accessed bit for us, and we don't want to 548 * race with other CPU's that might be updating the dirty 549 * bit at the same time. 550 */ 551 struct vm_area_struct; 552 553 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 554 extern int ptep_set_access_flags(struct vm_area_struct *vma, 555 unsigned long address, pte_t *ptep, 556 pte_t entry, int dirty); 557 558 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 559 extern int ptep_test_and_clear_young(struct vm_area_struct *vma, 560 unsigned long addr, pte_t *ptep); 561 562 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 563 extern int ptep_clear_flush_young(struct vm_area_struct *vma, 564 unsigned long address, pte_t *ptep); 565 566 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 567 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 568 pte_t *ptep) 569 { 570 pte_t pte = native_ptep_get_and_clear(ptep); 571 pte_update(mm, addr, ptep); 572 return pte; 573 } 574 575 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 576 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 577 unsigned long addr, pte_t *ptep, 578 int full) 579 { 580 pte_t pte; 581 if (full) { 582 /* 583 * Full address destruction in progress; paravirt does not 584 * care about updates and native needs no locking 585 */ 586 pte = native_local_ptep_get_and_clear(ptep); 587 } else { 588 pte = ptep_get_and_clear(mm, addr, ptep); 589 } 590 return pte; 591 } 592 593 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 594 static inline void ptep_set_wrprotect(struct mm_struct *mm, 595 unsigned long addr, pte_t *ptep) 596 { 597 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 598 pte_update(mm, addr, ptep); 599 } 600 601 /* 602 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 603 * 604 * dst - pointer to pgd range anwhere on a pgd page 605 * src - "" 606 * count - the number of pgds to copy. 607 * 608 * dst and src can be on the same page, but the range must not overlap, 609 * and must not cross a page boundary. 610 */ 611 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) 612 { 613 memcpy(dst, src, count * sizeof(pgd_t)); 614 } 615 616 617 #include <asm-generic/pgtable.h> 618 #endif /* __ASSEMBLY__ */ 619 620 #endif /* _ASM_X86_PGTABLE_H */ 621