1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_PGTABLE_H 17 #define __ASM_PGTABLE_H 18 19 #include <asm/bug.h> 20 #include <asm/proc-fns.h> 21 22 #include <asm/memory.h> 23 #include <asm/pgtable-hwdef.h> 24 25 /* 26 * Software defined PTE bits definition. 27 */ 28 #define PTE_VALID (_AT(pteval_t, 1) << 0) 29 #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ 30 #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 31 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 32 #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ 33 34 /* 35 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 36 * 37 * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array 38 * (rounded up to PUD_SIZE). 39 * VMALLOC_START: beginning of the kernel VA space 40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, 41 * fixed mappings and modules 42 */ 43 #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) 44 45 #ifndef CONFIG_KASAN 46 #define VMALLOC_START (VA_START) 47 #else 48 #include <asm/kasan.h> 49 #define VMALLOC_START (KASAN_SHADOW_END + SZ_64K) 50 #endif 51 52 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) 53 54 #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) 55 56 #define FIRST_USER_ADDRESS 0UL 57 58 #ifndef __ASSEMBLY__ 59 60 #include <linux/mmdebug.h> 61 62 extern void __pte_error(const char *file, int line, unsigned long val); 63 extern void __pmd_error(const char *file, int line, unsigned long val); 64 extern void __pud_error(const char *file, int line, unsigned long val); 65 extern void __pgd_error(const char *file, int line, unsigned long val); 66 67 #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 68 #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 69 70 #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 71 #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) 72 #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) 73 74 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) 75 #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) 76 #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) 77 78 #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 79 80 #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 81 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) 82 #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) 83 84 #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) 85 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) 86 87 #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 88 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) 89 90 #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 91 #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 92 #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 93 #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 94 #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 95 #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 96 #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 97 98 #define __P000 PAGE_NONE 99 #define __P001 PAGE_READONLY 100 #define __P010 PAGE_COPY 101 #define __P011 PAGE_COPY 102 #define __P100 PAGE_READONLY_EXEC 103 #define __P101 PAGE_READONLY_EXEC 104 #define __P110 PAGE_COPY_EXEC 105 #define __P111 PAGE_COPY_EXEC 106 107 #define __S000 PAGE_NONE 108 #define __S001 PAGE_READONLY 109 #define __S010 PAGE_SHARED 110 #define __S011 PAGE_SHARED 111 #define __S100 PAGE_READONLY_EXEC 112 #define __S101 PAGE_READONLY_EXEC 113 #define __S110 PAGE_SHARED_EXEC 114 #define __S111 PAGE_SHARED_EXEC 115 116 /* 117 * ZERO_PAGE is a global shared page that is always zero: used 118 * for zero-mapped memory areas etc.. 119 */ 120 extern struct page *empty_zero_page; 121 #define ZERO_PAGE(vaddr) (empty_zero_page) 122 123 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 124 125 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) 126 127 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 128 129 #define pte_none(pte) (!pte_val(pte)) 130 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) 131 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 132 133 /* Find an entry in the third-level page table. */ 134 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 135 136 #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) 137 138 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 139 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 140 #define pte_unmap(pte) do { } while (0) 141 #define pte_unmap_nested(pte) do { } while (0) 142 143 /* 144 * The following only work if pte_present(). Undefined behaviour otherwise. 145 */ 146 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) 147 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 148 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 149 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 150 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 151 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 152 153 #ifdef CONFIG_ARM64_HW_AFDBM 154 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) 155 #else 156 #define pte_hw_dirty(pte) (0) 157 #endif 158 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 159 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 160 161 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 162 #define pte_valid_user(pte) \ 163 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) 164 #define pte_valid_not_user(pte) \ 165 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) 166 167 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 168 { 169 pte_val(pte) &= ~pgprot_val(prot); 170 return pte; 171 } 172 173 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 174 { 175 pte_val(pte) |= pgprot_val(prot); 176 return pte; 177 } 178 179 static inline pte_t pte_wrprotect(pte_t pte) 180 { 181 return clear_pte_bit(pte, __pgprot(PTE_WRITE)); 182 } 183 184 static inline pte_t pte_mkwrite(pte_t pte) 185 { 186 return set_pte_bit(pte, __pgprot(PTE_WRITE)); 187 } 188 189 static inline pte_t pte_mkclean(pte_t pte) 190 { 191 return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 192 } 193 194 static inline pte_t pte_mkdirty(pte_t pte) 195 { 196 return set_pte_bit(pte, __pgprot(PTE_DIRTY)); 197 } 198 199 static inline pte_t pte_mkold(pte_t pte) 200 { 201 return clear_pte_bit(pte, __pgprot(PTE_AF)); 202 } 203 204 static inline pte_t pte_mkyoung(pte_t pte) 205 { 206 return set_pte_bit(pte, __pgprot(PTE_AF)); 207 } 208 209 static inline pte_t pte_mkspecial(pte_t pte) 210 { 211 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 212 } 213 214 static inline pte_t pte_mkcont(pte_t pte) 215 { 216 return set_pte_bit(pte, __pgprot(PTE_CONT)); 217 } 218 219 static inline pte_t pte_mknoncont(pte_t pte) 220 { 221 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 222 } 223 224 static inline void set_pte(pte_t *ptep, pte_t pte) 225 { 226 *ptep = pte; 227 228 /* 229 * Only if the new pte is valid and kernel, otherwise TLB maintenance 230 * or update_mmu_cache() have the necessary barriers. 231 */ 232 if (pte_valid_not_user(pte)) { 233 dsb(ishst); 234 isb(); 235 } 236 } 237 238 struct mm_struct; 239 struct vm_area_struct; 240 241 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 242 243 /* 244 * PTE bits configuration in the presence of hardware Dirty Bit Management 245 * (PTE_WRITE == PTE_DBM): 246 * 247 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 248 * 0 0 | 1 0 0 249 * 0 1 | 1 1 0 250 * 1 0 | 1 0 1 251 * 1 1 | 0 1 x 252 * 253 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 254 * the page fault mechanism. Checking the dirty status of a pte becomes: 255 * 256 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 257 */ 258 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 259 pte_t *ptep, pte_t pte) 260 { 261 if (pte_valid_user(pte)) { 262 if (!pte_special(pte) && pte_exec(pte)) 263 __sync_icache_dcache(pte, addr); 264 if (pte_sw_dirty(pte) && pte_write(pte)) 265 pte_val(pte) &= ~PTE_RDONLY; 266 else 267 pte_val(pte) |= PTE_RDONLY; 268 } 269 270 /* 271 * If the existing pte is valid, check for potential race with 272 * hardware updates of the pte (ptep_set_access_flags safely changes 273 * valid ptes without going through an invalid entry). 274 */ 275 if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && 276 pte_valid(*ptep)) { 277 BUG_ON(!pte_young(pte)); 278 BUG_ON(pte_write(*ptep) && !pte_dirty(pte)); 279 } 280 281 set_pte(ptep, pte); 282 } 283 284 /* 285 * Huge pte definitions. 286 */ 287 #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) 288 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 289 290 /* 291 * Hugetlb definitions. 292 */ 293 #define HUGE_MAX_HSTATE 2 294 #define HPAGE_SHIFT PMD_SHIFT 295 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 296 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 297 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 298 299 #define __HAVE_ARCH_PTE_SPECIAL 300 301 static inline pte_t pud_pte(pud_t pud) 302 { 303 return __pte(pud_val(pud)); 304 } 305 306 static inline pmd_t pud_pmd(pud_t pud) 307 { 308 return __pmd(pud_val(pud)); 309 } 310 311 static inline pte_t pmd_pte(pmd_t pmd) 312 { 313 return __pte(pmd_val(pmd)); 314 } 315 316 static inline pmd_t pte_pmd(pte_t pte) 317 { 318 return __pmd(pte_val(pte)); 319 } 320 321 static inline pgprot_t mk_sect_prot(pgprot_t prot) 322 { 323 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); 324 } 325 326 /* 327 * THP definitions. 328 */ 329 330 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 331 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 332 #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) 333 #ifdef CONFIG_HAVE_RCU_TABLE_FREE 334 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 335 struct vm_area_struct; 336 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, 337 pmd_t *pmdp); 338 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ 339 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 340 341 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 342 #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 343 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 344 #define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd))) 345 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 346 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) 347 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 348 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 349 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK)) 350 351 #define __HAVE_ARCH_PMD_WRITE 352 #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 353 354 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 355 356 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) 357 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 358 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 359 360 #define pud_write(pud) pte_write(pud_pte(pud)) 361 #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) 362 363 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) 364 365 static inline int has_transparent_hugepage(void) 366 { 367 return 1; 368 } 369 370 #define __pgprot_modify(prot,mask,bits) \ 371 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 372 373 /* 374 * Mark the prot value as uncacheable and unbufferable. 375 */ 376 #define pgprot_noncached(prot) \ 377 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 378 #define pgprot_writecombine(prot) \ 379 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 380 #define pgprot_device(prot) \ 381 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 382 #define __HAVE_PHYS_MEM_ACCESS_PROT 383 struct file; 384 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 385 unsigned long size, pgprot_t vma_prot); 386 387 #define pmd_none(pmd) (!pmd_val(pmd)) 388 #define pmd_present(pmd) (pmd_val(pmd)) 389 390 #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) 391 392 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 393 PMD_TYPE_TABLE) 394 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 395 PMD_TYPE_SECT) 396 397 #ifdef CONFIG_ARM64_64K_PAGES 398 #define pud_sect(pud) (0) 399 #define pud_table(pud) (1) 400 #else 401 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 402 PUD_TYPE_SECT) 403 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 404 PUD_TYPE_TABLE) 405 #endif 406 407 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 408 { 409 *pmdp = pmd; 410 dsb(ishst); 411 isb(); 412 } 413 414 static inline void pmd_clear(pmd_t *pmdp) 415 { 416 set_pmd(pmdp, __pmd(0)); 417 } 418 419 static inline pte_t *pmd_page_vaddr(pmd_t pmd) 420 { 421 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); 422 } 423 424 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 425 426 /* 427 * Conversion functions: convert a page and protection to a page entry, 428 * and a page entry and page directory to the page they refer to. 429 */ 430 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 431 432 #if CONFIG_PGTABLE_LEVELS > 2 433 434 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 435 436 #define pud_none(pud) (!pud_val(pud)) 437 #define pud_bad(pud) (!(pud_val(pud) & 2)) 438 #define pud_present(pud) (pud_val(pud)) 439 440 static inline void set_pud(pud_t *pudp, pud_t pud) 441 { 442 *pudp = pud; 443 dsb(ishst); 444 isb(); 445 } 446 447 static inline void pud_clear(pud_t *pudp) 448 { 449 set_pud(pudp, __pud(0)); 450 } 451 452 static inline pmd_t *pud_page_vaddr(pud_t pud) 453 { 454 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); 455 } 456 457 /* Find an entry in the second-level page table. */ 458 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 459 460 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) 461 { 462 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); 463 } 464 465 #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) 466 467 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 468 469 #if CONFIG_PGTABLE_LEVELS > 3 470 471 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) 472 473 #define pgd_none(pgd) (!pgd_val(pgd)) 474 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) 475 #define pgd_present(pgd) (pgd_val(pgd)) 476 477 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 478 { 479 *pgdp = pgd; 480 dsb(ishst); 481 } 482 483 static inline void pgd_clear(pgd_t *pgdp) 484 { 485 set_pgd(pgdp, __pgd(0)); 486 } 487 488 static inline pud_t *pgd_page_vaddr(pgd_t pgd) 489 { 490 return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK); 491 } 492 493 /* Find an entry in the frst-level page table. */ 494 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 495 496 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) 497 { 498 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr); 499 } 500 501 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) 502 503 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 504 505 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 506 507 /* to find an entry in a page-table-directory */ 508 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 509 510 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 511 512 /* to find an entry in a kernel page-table-directory */ 513 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 514 515 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 516 { 517 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 518 PTE_PROT_NONE | PTE_VALID | PTE_WRITE; 519 /* preserve the hardware dirty information */ 520 if (pte_hw_dirty(pte)) 521 pte = pte_mkdirty(pte); 522 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 523 return pte; 524 } 525 526 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 527 { 528 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 529 } 530 531 #ifdef CONFIG_ARM64_HW_AFDBM 532 /* 533 * Atomic pte/pmd modifications. 534 */ 535 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 536 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 537 unsigned long address, 538 pte_t *ptep) 539 { 540 pteval_t pteval; 541 unsigned int tmp, res; 542 543 asm volatile("// ptep_test_and_clear_young\n" 544 " prfm pstl1strm, %2\n" 545 "1: ldxr %0, %2\n" 546 " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n" 547 " and %0, %0, %4 // clear PTE_AF\n" 548 " stxr %w1, %0, %2\n" 549 " cbnz %w1, 1b\n" 550 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res) 551 : "L" (~PTE_AF), "I" (ilog2(PTE_AF))); 552 553 return res; 554 } 555 556 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 557 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 558 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 559 unsigned long address, 560 pmd_t *pmdp) 561 { 562 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 563 } 564 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 565 566 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 567 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 568 unsigned long address, pte_t *ptep) 569 { 570 pteval_t old_pteval; 571 unsigned int tmp; 572 573 asm volatile("// ptep_get_and_clear\n" 574 " prfm pstl1strm, %2\n" 575 "1: ldxr %0, %2\n" 576 " stxr %w1, xzr, %2\n" 577 " cbnz %w1, 1b\n" 578 : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))); 579 580 return __pte(old_pteval); 581 } 582 583 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 584 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 585 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 586 unsigned long address, pmd_t *pmdp) 587 { 588 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); 589 } 590 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 591 592 /* 593 * ptep_set_wrprotect - mark read-only while trasferring potential hardware 594 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 595 */ 596 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 597 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 598 { 599 pteval_t pteval; 600 unsigned long tmp; 601 602 asm volatile("// ptep_set_wrprotect\n" 603 " prfm pstl1strm, %2\n" 604 "1: ldxr %0, %2\n" 605 " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n" 606 " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n" 607 " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n" 608 " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n" 609 " stxr %w1, %0, %2\n" 610 " cbnz %w1, 1b\n" 611 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) 612 : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE) 613 : "cc"); 614 } 615 616 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 617 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 618 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 619 unsigned long address, pmd_t *pmdp) 620 { 621 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 622 } 623 #endif 624 #endif /* CONFIG_ARM64_HW_AFDBM */ 625 626 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 627 extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; 628 629 /* 630 * Encode and decode a swap entry: 631 * bits 0-1: present (must be zero) 632 * bits 2-7: swap type 633 * bits 8-57: swap offset 634 */ 635 #define __SWP_TYPE_SHIFT 2 636 #define __SWP_TYPE_BITS 6 637 #define __SWP_OFFSET_BITS 50 638 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 639 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 640 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 641 642 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 643 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 644 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 645 646 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 647 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 648 649 /* 650 * Ensure that there are not more swap files than can be encoded in the kernel 651 * PTEs. 652 */ 653 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 654 655 extern int kern_addr_valid(unsigned long addr); 656 657 #include <asm-generic/pgtable.h> 658 659 #define pgtable_cache_init() do { } while (0) 660 661 /* 662 * On AArch64, the cache coherency is handled via the set_pte_at() function. 663 */ 664 static inline void update_mmu_cache(struct vm_area_struct *vma, 665 unsigned long addr, pte_t *ptep) 666 { 667 /* 668 * We don't do anything here, so there's a very small chance of 669 * us retaking a user fault which we just fixed up. The alternative 670 * is doing a dsb(ishst), but that penalises the fastpath. 671 */ 672 } 673 674 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 675 676 #endif /* !__ASSEMBLY__ */ 677 678 #endif /* __ASM_PGTABLE_H */ 679