1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * pgtable.h: SpitFire page table operations. 4 * 5 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu) 6 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9 #ifndef _SPARC64_PGTABLE_H 10 #define _SPARC64_PGTABLE_H 11 12 /* This file contains the functions and defines necessary to modify and use 13 * the SpitFire page tables. 14 */ 15 16 #include <asm-generic/pgtable-nop4d.h> 17 #include <linux/compiler.h> 18 #include <linux/const.h> 19 #include <asm/types.h> 20 #include <asm/spitfire.h> 21 #include <asm/asi.h> 22 #include <asm/adi.h> 23 #include <asm/page.h> 24 #include <asm/processor.h> 25 26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). 27 * The page copy blockops can use 0x6000000 to 0x8000000. 28 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. 29 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range. 30 * The PROM resides in an area spanning 0xf0000000 to 0x100000000. 31 * The vmalloc area spans 0x100000000 to 0x200000000. 32 * Since modules need to be in the lowest 32-bits of the address space, 33 * we place them right before the OBP area from 0x10000000 to 0xf0000000. 34 * There is a single static kernel PMD which maps from 0x0 to address 35 * 0x400000000. 36 */ 37 #define TLBTEMP_BASE _AC(0x0000000006000000,UL) 38 #define TSBMAP_8K_BASE _AC(0x0000000008000000,UL) 39 #define TSBMAP_4M_BASE _AC(0x0000000008400000,UL) 40 #define MODULES_VADDR _AC(0x0000000010000000,UL) 41 #define MODULES_LEN _AC(0x00000000e0000000,UL) 42 #define MODULES_END _AC(0x00000000f0000000,UL) 43 #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) 44 #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) 45 #define VMALLOC_START _AC(0x0000000100000000,UL) 46 #define VMEMMAP_BASE VMALLOC_END 47 48 /* PMD_SHIFT determines the size of the area a second-level page 49 * table can map 50 */ 51 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) 52 #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) 53 #define PMD_MASK (~(PMD_SIZE-1)) 54 #define PMD_BITS (PAGE_SHIFT - 3) 55 56 /* PUD_SHIFT determines the size of the area a third-level page 57 * table can map 58 */ 59 #define PUD_SHIFT (PMD_SHIFT + PMD_BITS) 60 #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) 61 #define PUD_MASK (~(PUD_SIZE-1)) 62 #define PUD_BITS (PAGE_SHIFT - 3) 63 64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ 65 #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS) 66 #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) 67 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 68 #define PGDIR_BITS (PAGE_SHIFT - 3) 69 70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS) 71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support 72 #endif 73 74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53 75 #error Page table parameters do not cover virtual address space properly. 76 #endif 77 78 #if (PMD_SHIFT != HPAGE_SHIFT) 79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. 80 #endif 81 82 #ifndef __ASSEMBLY__ 83 84 extern unsigned long VMALLOC_END; 85 86 #define vmemmap ((struct page *)VMEMMAP_BASE) 87 88 #include <linux/sched.h> 89 #include <asm/tlbflush.h> 90 91 bool kern_addr_valid(unsigned long addr); 92 93 /* Entries per page directory level. */ 94 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) 95 #define PTRS_PER_PMD (1UL << PMD_BITS) 96 #define PTRS_PER_PUD (1UL << PUD_BITS) 97 #define PTRS_PER_PGD (1UL << PGDIR_BITS) 98 99 #define pmd_ERROR(e) \ 100 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ 101 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) 102 #define pud_ERROR(e) \ 103 pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \ 104 __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0)) 105 #define pgd_ERROR(e) \ 106 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ 107 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) 108 109 #endif /* !(__ASSEMBLY__) */ 110 111 /* PTE bits which are the same in SUN4U and SUN4V format. */ 112 #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ 113 #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ 114 #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ 115 #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ 116 #define _PAGE_PUD_HUGE _PAGE_PMD_HUGE 117 118 /* SUN4U pte bits... */ 119 #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ 120 #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */ 121 #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */ 122 #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */ 123 #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ 124 #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ 125 #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ 126 #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ 127 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */ 128 #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ 129 #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ 130 #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ 131 #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */ 132 #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ 133 #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ 134 #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ 135 #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */ 136 #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */ 137 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */ 138 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ 139 #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ 140 #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ 141 #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */ 142 #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */ 143 #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ 144 #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ 145 #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */ 146 #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */ 147 #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */ 148 149 /* SUN4V pte bits... */ 150 #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */ 151 #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */ 152 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */ 153 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ 154 #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ 155 #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ 156 #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */ 157 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */ 158 #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ 159 #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ 160 #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ 161 #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */ 162 #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */ 163 /* Bit 9 is used to enable MCD corruption detection instead on M7 */ 164 #define _PAGE_MCD_4V _AC(0x0000000000000200,UL) /* Memory Corruption */ 165 #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */ 166 #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */ 167 #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */ 168 #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */ 169 #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */ 170 #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */ 171 #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */ 172 #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */ 173 #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */ 174 #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */ 175 #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */ 176 #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ 177 #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ 178 #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ 179 #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */ 180 181 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U 182 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V 183 184 #if REAL_HPAGE_SHIFT != 22 185 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up 186 #endif 187 188 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U 189 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V 190 191 /* We borrow bit 20 to store the exclusive marker in swap PTEs. */ 192 #define _PAGE_SWP_EXCLUSIVE _AC(0x0000000000100000, UL) 193 194 #ifndef __ASSEMBLY__ 195 196 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); 197 198 unsigned long pte_sz_bits(unsigned long size); 199 200 extern pgprot_t PAGE_KERNEL; 201 extern pgprot_t PAGE_KERNEL_LOCKED; 202 extern pgprot_t PAGE_COPY; 203 extern pgprot_t PAGE_SHARED; 204 205 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */ 206 extern unsigned long _PAGE_IE; 207 extern unsigned long _PAGE_E; 208 extern unsigned long _PAGE_CACHE; 209 210 extern unsigned long pg_iobits; 211 extern unsigned long _PAGE_ALL_SZ_BITS; 212 213 extern struct page *mem_map_zero; 214 #define ZERO_PAGE(vaddr) (mem_map_zero) 215 216 /* PFNs are real physical page numbers. However, mem_map only begins to record 217 * per-page information starting at pfn_base. This is to handle systems where 218 * the first physical page in the machine is at some huge physical address, 219 * such as 4GB. This is common on a partitioned E10000, for example. 220 */ 221 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 222 { 223 unsigned long paddr = pfn << PAGE_SHIFT; 224 225 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); 226 return __pte(paddr | pgprot_val(prot)); 227 } 228 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 229 230 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 231 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 232 { 233 pte_t pte = pfn_pte(page_nr, pgprot); 234 235 return __pmd(pte_val(pte)); 236 } 237 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 238 #endif 239 240 /* This one can be done with two shifts. */ 241 static inline unsigned long pte_pfn(pte_t pte) 242 { 243 unsigned long ret; 244 245 __asm__ __volatile__( 246 "\n661: sllx %1, %2, %0\n" 247 " srlx %0, %3, %0\n" 248 " .section .sun4v_2insn_patch, \"ax\"\n" 249 " .word 661b\n" 250 " sllx %1, %4, %0\n" 251 " srlx %0, %5, %0\n" 252 " .previous\n" 253 : "=r" (ret) 254 : "r" (pte_val(pte)), 255 "i" (21), "i" (21 + PAGE_SHIFT), 256 "i" (8), "i" (8 + PAGE_SHIFT)); 257 258 return ret; 259 } 260 #define pte_page(x) pfn_to_page(pte_pfn(x)) 261 262 static inline pte_t pte_modify(pte_t pte, pgprot_t prot) 263 { 264 unsigned long mask, tmp; 265 266 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7) 267 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8) 268 * 269 * Even if we use negation tricks the result is still a 6 270 * instruction sequence, so don't try to play fancy and just 271 * do the most straightforward implementation. 272 * 273 * Note: We encode this into 3 sun4v 2-insn patch sequences. 274 */ 275 276 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); 277 __asm__ __volatile__( 278 "\n661: sethi %%uhi(%2), %1\n" 279 " sethi %%hi(%2), %0\n" 280 "\n662: or %1, %%ulo(%2), %1\n" 281 " or %0, %%lo(%2), %0\n" 282 "\n663: sllx %1, 32, %1\n" 283 " or %0, %1, %0\n" 284 " .section .sun4v_2insn_patch, \"ax\"\n" 285 " .word 661b\n" 286 " sethi %%uhi(%3), %1\n" 287 " sethi %%hi(%3), %0\n" 288 " .word 662b\n" 289 " or %1, %%ulo(%3), %1\n" 290 " or %0, %%lo(%3), %0\n" 291 " .word 663b\n" 292 " sllx %1, 32, %1\n" 293 " or %0, %1, %0\n" 294 " .previous\n" 295 " .section .sun_m7_2insn_patch, \"ax\"\n" 296 " .word 661b\n" 297 " sethi %%uhi(%4), %1\n" 298 " sethi %%hi(%4), %0\n" 299 " .word 662b\n" 300 " or %1, %%ulo(%4), %1\n" 301 " or %0, %%lo(%4), %0\n" 302 " .word 663b\n" 303 " sllx %1, 32, %1\n" 304 " or %0, %1, %0\n" 305 " .previous\n" 306 : "=r" (mask), "=r" (tmp) 307 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | 308 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | 309 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), 310 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | 311 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | 312 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V), 313 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | 314 _PAGE_CP_4V | _PAGE_E_4V | 315 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); 316 317 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); 318 } 319 320 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 321 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 322 { 323 pte_t pte = __pte(pmd_val(pmd)); 324 325 pte = pte_modify(pte, newprot); 326 327 return __pmd(pte_val(pte)); 328 } 329 #endif 330 331 static inline pgprot_t pgprot_noncached(pgprot_t prot) 332 { 333 unsigned long val = pgprot_val(prot); 334 335 __asm__ __volatile__( 336 "\n661: andn %0, %2, %0\n" 337 " or %0, %3, %0\n" 338 " .section .sun4v_2insn_patch, \"ax\"\n" 339 " .word 661b\n" 340 " andn %0, %4, %0\n" 341 " or %0, %5, %0\n" 342 " .previous\n" 343 " .section .sun_m7_2insn_patch, \"ax\"\n" 344 " .word 661b\n" 345 " andn %0, %6, %0\n" 346 " or %0, %5, %0\n" 347 " .previous\n" 348 : "=r" (val) 349 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), 350 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V), 351 "i" (_PAGE_CP_4V)); 352 353 return __pgprot(val); 354 } 355 /* Various pieces of code check for platform support by ifdef testing 356 * on "pgprot_noncached". That's broken and should be fixed, but for 357 * now... 358 */ 359 #define pgprot_noncached pgprot_noncached 360 361 static inline unsigned long pte_dirty(pte_t pte) 362 { 363 unsigned long mask; 364 365 __asm__ __volatile__( 366 "\n661: mov %1, %0\n" 367 " nop\n" 368 " .section .sun4v_2insn_patch, \"ax\"\n" 369 " .word 661b\n" 370 " sethi %%uhi(%2), %0\n" 371 " sllx %0, 32, %0\n" 372 " .previous\n" 373 : "=r" (mask) 374 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); 375 376 return (pte_val(pte) & mask); 377 } 378 379 static inline unsigned long pte_write(pte_t pte) 380 { 381 unsigned long mask; 382 383 __asm__ __volatile__( 384 "\n661: mov %1, %0\n" 385 " nop\n" 386 " .section .sun4v_2insn_patch, \"ax\"\n" 387 " .word 661b\n" 388 " sethi %%uhi(%2), %0\n" 389 " sllx %0, 32, %0\n" 390 " .previous\n" 391 : "=r" (mask) 392 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); 393 394 return (pte_val(pte) & mask); 395 } 396 397 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 398 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); 399 #define arch_make_huge_pte arch_make_huge_pte 400 static inline unsigned long __pte_default_huge_mask(void) 401 { 402 unsigned long mask; 403 404 __asm__ __volatile__( 405 "\n661: sethi %%uhi(%1), %0\n" 406 " sllx %0, 32, %0\n" 407 " .section .sun4v_2insn_patch, \"ax\"\n" 408 " .word 661b\n" 409 " mov %2, %0\n" 410 " nop\n" 411 " .previous\n" 412 : "=r" (mask) 413 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); 414 415 return mask; 416 } 417 418 static inline pte_t pte_mkhuge(pte_t pte) 419 { 420 return __pte(pte_val(pte) | __pte_default_huge_mask()); 421 } 422 423 static inline bool is_default_hugetlb_pte(pte_t pte) 424 { 425 unsigned long mask = __pte_default_huge_mask(); 426 427 return (pte_val(pte) & mask) == mask; 428 } 429 430 static inline bool is_hugetlb_pmd(pmd_t pmd) 431 { 432 return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); 433 } 434 435 static inline bool is_hugetlb_pud(pud_t pud) 436 { 437 return !!(pud_val(pud) & _PAGE_PUD_HUGE); 438 } 439 440 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 441 static inline pmd_t pmd_mkhuge(pmd_t pmd) 442 { 443 pte_t pte = __pte(pmd_val(pmd)); 444 445 pte = pte_mkhuge(pte); 446 pte_val(pte) |= _PAGE_PMD_HUGE; 447 448 return __pmd(pte_val(pte)); 449 } 450 #endif 451 #else 452 static inline bool is_hugetlb_pte(pte_t pte) 453 { 454 return false; 455 } 456 #endif 457 458 static inline pte_t __pte_mkhwwrite(pte_t pte) 459 { 460 unsigned long val = pte_val(pte); 461 462 /* 463 * Note: we only want to set the HW writable bit if the SW writable bit 464 * and the SW dirty bit are set. 465 */ 466 __asm__ __volatile__( 467 "\n661: or %0, %2, %0\n" 468 " .section .sun4v_1insn_patch, \"ax\"\n" 469 " .word 661b\n" 470 " or %0, %3, %0\n" 471 " .previous\n" 472 : "=r" (val) 473 : "0" (val), "i" (_PAGE_W_4U), "i" (_PAGE_W_4V)); 474 475 return __pte(val); 476 } 477 478 static inline pte_t pte_mkdirty(pte_t pte) 479 { 480 unsigned long val = pte_val(pte), mask; 481 482 __asm__ __volatile__( 483 "\n661: mov %1, %0\n" 484 " nop\n" 485 " .section .sun4v_2insn_patch, \"ax\"\n" 486 " .word 661b\n" 487 " sethi %%uhi(%2), %0\n" 488 " sllx %0, 32, %0\n" 489 " .previous\n" 490 : "=r" (mask) 491 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); 492 493 pte = __pte(val | mask); 494 return pte_write(pte) ? __pte_mkhwwrite(pte) : pte; 495 } 496 497 static inline pte_t pte_mkclean(pte_t pte) 498 { 499 unsigned long val = pte_val(pte), tmp; 500 501 __asm__ __volatile__( 502 "\n661: andn %0, %3, %0\n" 503 " nop\n" 504 "\n662: nop\n" 505 " nop\n" 506 " .section .sun4v_2insn_patch, \"ax\"\n" 507 " .word 661b\n" 508 " sethi %%uhi(%4), %1\n" 509 " sllx %1, 32, %1\n" 510 " .word 662b\n" 511 " or %1, %%lo(%4), %1\n" 512 " andn %0, %1, %0\n" 513 " .previous\n" 514 : "=r" (val), "=r" (tmp) 515 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), 516 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); 517 518 return __pte(val); 519 } 520 521 static inline pte_t pte_mkwrite_novma(pte_t pte) 522 { 523 unsigned long val = pte_val(pte), mask; 524 525 __asm__ __volatile__( 526 "\n661: mov %1, %0\n" 527 " nop\n" 528 " .section .sun4v_2insn_patch, \"ax\"\n" 529 " .word 661b\n" 530 " sethi %%uhi(%2), %0\n" 531 " sllx %0, 32, %0\n" 532 " .previous\n" 533 : "=r" (mask) 534 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); 535 536 pte = __pte(val | mask); 537 return pte_dirty(pte) ? __pte_mkhwwrite(pte) : pte; 538 } 539 540 static inline pte_t pte_wrprotect(pte_t pte) 541 { 542 unsigned long val = pte_val(pte), tmp; 543 544 __asm__ __volatile__( 545 "\n661: andn %0, %3, %0\n" 546 " nop\n" 547 "\n662: nop\n" 548 " nop\n" 549 " .section .sun4v_2insn_patch, \"ax\"\n" 550 " .word 661b\n" 551 " sethi %%uhi(%4), %1\n" 552 " sllx %1, 32, %1\n" 553 " .word 662b\n" 554 " or %1, %%lo(%4), %1\n" 555 " andn %0, %1, %0\n" 556 " .previous\n" 557 : "=r" (val), "=r" (tmp) 558 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U), 559 "i" (_PAGE_WRITE_4V | _PAGE_W_4V)); 560 561 return __pte(val); 562 } 563 564 static inline pte_t pte_mkold(pte_t pte) 565 { 566 unsigned long mask; 567 568 __asm__ __volatile__( 569 "\n661: mov %1, %0\n" 570 " nop\n" 571 " .section .sun4v_2insn_patch, \"ax\"\n" 572 " .word 661b\n" 573 " sethi %%uhi(%2), %0\n" 574 " sllx %0, 32, %0\n" 575 " .previous\n" 576 : "=r" (mask) 577 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); 578 579 mask |= _PAGE_R; 580 581 return __pte(pte_val(pte) & ~mask); 582 } 583 584 static inline pte_t pte_mkyoung(pte_t pte) 585 { 586 unsigned long mask; 587 588 __asm__ __volatile__( 589 "\n661: mov %1, %0\n" 590 " nop\n" 591 " .section .sun4v_2insn_patch, \"ax\"\n" 592 " .word 661b\n" 593 " sethi %%uhi(%2), %0\n" 594 " sllx %0, 32, %0\n" 595 " .previous\n" 596 : "=r" (mask) 597 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); 598 599 mask |= _PAGE_R; 600 601 return __pte(pte_val(pte) | mask); 602 } 603 604 static inline pte_t pte_mkspecial(pte_t pte) 605 { 606 pte_val(pte) |= _PAGE_SPECIAL; 607 return pte; 608 } 609 610 static inline pte_t pte_mkmcd(pte_t pte) 611 { 612 pte_val(pte) |= _PAGE_MCD_4V; 613 return pte; 614 } 615 616 static inline pte_t pte_mknotmcd(pte_t pte) 617 { 618 pte_val(pte) &= ~_PAGE_MCD_4V; 619 return pte; 620 } 621 622 static inline unsigned long pte_young(pte_t pte) 623 { 624 unsigned long mask; 625 626 __asm__ __volatile__( 627 "\n661: mov %1, %0\n" 628 " nop\n" 629 " .section .sun4v_2insn_patch, \"ax\"\n" 630 " .word 661b\n" 631 " sethi %%uhi(%2), %0\n" 632 " sllx %0, 32, %0\n" 633 " .previous\n" 634 : "=r" (mask) 635 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); 636 637 return (pte_val(pte) & mask); 638 } 639 640 static inline unsigned long pte_exec(pte_t pte) 641 { 642 unsigned long mask; 643 644 __asm__ __volatile__( 645 "\n661: sethi %%hi(%1), %0\n" 646 " .section .sun4v_1insn_patch, \"ax\"\n" 647 " .word 661b\n" 648 " mov %2, %0\n" 649 " .previous\n" 650 : "=r" (mask) 651 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); 652 653 return (pte_val(pte) & mask); 654 } 655 656 static inline unsigned long pte_present(pte_t pte) 657 { 658 unsigned long val = pte_val(pte); 659 660 __asm__ __volatile__( 661 "\n661: and %0, %2, %0\n" 662 " .section .sun4v_1insn_patch, \"ax\"\n" 663 " .word 661b\n" 664 " and %0, %3, %0\n" 665 " .previous\n" 666 : "=r" (val) 667 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); 668 669 return val; 670 } 671 672 #define pte_accessible pte_accessible 673 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 674 { 675 return pte_val(a) & _PAGE_VALID; 676 } 677 678 static inline unsigned long pte_special(pte_t pte) 679 { 680 return pte_val(pte) & _PAGE_SPECIAL; 681 } 682 683 #define pmd_leaf pmd_leaf 684 static inline bool pmd_leaf(pmd_t pmd) 685 { 686 pte_t pte = __pte(pmd_val(pmd)); 687 688 return pte_val(pte) & _PAGE_PMD_HUGE; 689 } 690 691 static inline unsigned long pmd_pfn(pmd_t pmd) 692 { 693 pte_t pte = __pte(pmd_val(pmd)); 694 695 return pte_pfn(pte); 696 } 697 698 #define pmd_write pmd_write 699 static inline unsigned long pmd_write(pmd_t pmd) 700 { 701 pte_t pte = __pte(pmd_val(pmd)); 702 703 return pte_write(pte); 704 } 705 706 #define pud_write(pud) pte_write(__pte(pud_val(pud))) 707 708 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 709 #define pmd_dirty pmd_dirty 710 static inline unsigned long pmd_dirty(pmd_t pmd) 711 { 712 pte_t pte = __pte(pmd_val(pmd)); 713 714 return pte_dirty(pte); 715 } 716 717 #define pmd_young pmd_young 718 static inline unsigned long pmd_young(pmd_t pmd) 719 { 720 pte_t pte = __pte(pmd_val(pmd)); 721 722 return pte_young(pte); 723 } 724 725 static inline unsigned long pmd_trans_huge(pmd_t pmd) 726 { 727 pte_t pte = __pte(pmd_val(pmd)); 728 729 return pte_val(pte) & _PAGE_PMD_HUGE; 730 } 731 732 static inline pmd_t pmd_mkold(pmd_t pmd) 733 { 734 pte_t pte = __pte(pmd_val(pmd)); 735 736 pte = pte_mkold(pte); 737 738 return __pmd(pte_val(pte)); 739 } 740 741 static inline pmd_t pmd_wrprotect(pmd_t pmd) 742 { 743 pte_t pte = __pte(pmd_val(pmd)); 744 745 pte = pte_wrprotect(pte); 746 747 return __pmd(pte_val(pte)); 748 } 749 750 static inline pmd_t pmd_mkdirty(pmd_t pmd) 751 { 752 pte_t pte = __pte(pmd_val(pmd)); 753 754 pte = pte_mkdirty(pte); 755 756 return __pmd(pte_val(pte)); 757 } 758 759 static inline pmd_t pmd_mkclean(pmd_t pmd) 760 { 761 pte_t pte = __pte(pmd_val(pmd)); 762 763 pte = pte_mkclean(pte); 764 765 return __pmd(pte_val(pte)); 766 } 767 768 static inline pmd_t pmd_mkyoung(pmd_t pmd) 769 { 770 pte_t pte = __pte(pmd_val(pmd)); 771 772 pte = pte_mkyoung(pte); 773 774 return __pmd(pte_val(pte)); 775 } 776 777 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 778 { 779 pte_t pte = __pte(pmd_val(pmd)); 780 781 pte = pte_mkwrite_novma(pte); 782 783 return __pmd(pte_val(pte)); 784 } 785 786 #define pmd_pgprot pmd_pgprot 787 static inline pgprot_t pmd_pgprot(pmd_t entry) 788 { 789 unsigned long val = pmd_val(entry); 790 791 return __pgprot(val); 792 } 793 #endif 794 795 static inline int pmd_present(pmd_t pmd) 796 { 797 return pmd_val(pmd) != 0UL; 798 } 799 800 #define pmd_none(pmd) (!pmd_val(pmd)) 801 802 /* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is 803 * very simple, it's just the physical address. PTE tables are of 804 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and 805 * the top bits outside of the range of any physical address size we 806 * support are clear as well. We also validate the physical itself. 807 */ 808 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 809 810 #define pud_none(pud) (!pud_val(pud)) 811 812 #define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK) 813 814 #define p4d_none(p4d) (!p4d_val(p4d)) 815 816 #define p4d_bad(p4d) (p4d_val(p4d) & ~PAGE_MASK) 817 818 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 819 void set_pmd_at(struct mm_struct *mm, unsigned long addr, 820 pmd_t *pmdp, pmd_t pmd); 821 #else 822 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 823 pmd_t *pmdp, pmd_t pmd) 824 { 825 *pmdp = pmd; 826 } 827 #endif 828 829 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) 830 { 831 unsigned long val = __pa((unsigned long) (ptep)); 832 833 pmd_val(*pmdp) = val; 834 } 835 836 #define pud_set(pudp, pmdp) \ 837 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)))) 838 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 839 { 840 pte_t pte = __pte(pmd_val(pmd)); 841 unsigned long pfn; 842 843 pfn = pte_pfn(pte); 844 845 return ((unsigned long) __va(pfn << PAGE_SHIFT)); 846 } 847 848 static inline pmd_t *pud_pgtable(pud_t pud) 849 { 850 pte_t pte = __pte(pud_val(pud)); 851 unsigned long pfn; 852 853 pfn = pte_pfn(pte); 854 855 return ((pmd_t *) __va(pfn << PAGE_SHIFT)); 856 } 857 858 #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd)) 859 #define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) 860 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) 861 #define pud_present(pud) (pud_val(pud) != 0U) 862 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 863 #define p4d_pgtable(p4d) \ 864 ((pud_t *) __va(p4d_val(p4d))) 865 #define p4d_present(p4d) (p4d_val(p4d) != 0U) 866 #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL) 867 868 /* only used by the stubbed out hugetlb gup code, should never be called */ 869 #define p4d_page(p4d) NULL 870 871 #define pud_leaf pud_leaf 872 static inline bool pud_leaf(pud_t pud) 873 { 874 pte_t pte = __pte(pud_val(pud)); 875 876 return pte_val(pte) & _PAGE_PMD_HUGE; 877 } 878 879 #define pud_pfn pud_pfn 880 static inline unsigned long pud_pfn(pud_t pud) 881 { 882 pte_t pte = __pte(pud_val(pud)); 883 884 return pte_pfn(pte); 885 } 886 887 /* Same in both SUN4V and SUN4U. */ 888 #define pte_none(pte) (!pte_val(pte)) 889 890 #define p4d_set(p4dp, pudp) \ 891 (p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp)))) 892 893 /* We cannot include <linux/mm_types.h> at this point yet: */ 894 extern struct mm_struct init_mm; 895 896 /* Actual page table PTE updates. */ 897 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, 898 pte_t *ptep, pte_t orig, int fullmm, 899 unsigned int hugepage_shift); 900 901 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, 902 pte_t *ptep, pte_t orig, int fullmm, 903 unsigned int hugepage_shift) 904 { 905 /* It is more efficient to let flush_tlb_kernel_range() 906 * handle init_mm tlb flushes. 907 * 908 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U 909 * and SUN4V pte layout, so this inline test is fine. 910 */ 911 if (likely(mm != &init_mm) && pte_accessible(mm, orig)) 912 tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift); 913 } 914 915 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 916 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 917 unsigned long addr, 918 pmd_t *pmdp) 919 { 920 pmd_t pmd = *pmdp; 921 set_pmd_at(mm, addr, pmdp, __pmd(0UL)); 922 return pmd; 923 } 924 925 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 926 pte_t *ptep, pte_t pte, int fullmm) 927 { 928 pte_t orig = *ptep; 929 930 *ptep = pte; 931 maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT); 932 } 933 934 #define PFN_PTE_SHIFT PAGE_SHIFT 935 936 static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 937 pte_t *ptep, pte_t pte, unsigned int nr) 938 { 939 arch_enter_lazy_mmu_mode(); 940 for (;;) { 941 __set_pte_at(mm, addr, ptep, pte, 0); 942 if (--nr == 0) 943 break; 944 ptep++; 945 pte_val(pte) += PAGE_SIZE; 946 addr += PAGE_SIZE; 947 } 948 arch_leave_lazy_mmu_mode(); 949 } 950 #define set_ptes set_ptes 951 952 #define pte_clear(mm,addr,ptep) \ 953 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 954 955 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL 956 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \ 957 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm)) 958 959 #ifdef DCACHE_ALIASING_POSSIBLE 960 #define __HAVE_ARCH_MOVE_PTE 961 #define move_pte(pte, old_addr, new_addr) \ 962 ({ \ 963 pte_t newpte = (pte); \ 964 if (tlb_type != hypervisor && pte_present(pte)) { \ 965 unsigned long this_pfn = pte_pfn(pte); \ 966 \ 967 if (pfn_valid(this_pfn) && \ 968 (((old_addr) ^ (new_addr)) & (1 << 13))) \ 969 flush_dcache_folio_all(current->mm, \ 970 page_folio(pfn_to_page(this_pfn))); \ 971 } \ 972 newpte; \ 973 }) 974 #endif 975 976 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 977 978 void paging_init(void); 979 unsigned long find_ecache_flush_span(unsigned long size); 980 981 struct seq_file; 982 void mmu_info(struct seq_file *); 983 984 struct vm_area_struct; 985 void update_mmu_cache_range(struct vm_fault *, struct vm_area_struct *, 986 unsigned long addr, pte_t *ptep, unsigned int nr); 987 #define update_mmu_cache(vma, addr, ptep) \ 988 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 989 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 990 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 991 pmd_t *pmd); 992 993 #define __HAVE_ARCH_PMDP_INVALIDATE 994 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 995 pmd_t *pmdp); 996 997 #define __HAVE_ARCH_PGTABLE_DEPOSIT 998 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 999 pgtable_t pgtable); 1000 1001 #define __HAVE_ARCH_PGTABLE_WITHDRAW 1002 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1003 #endif 1004 1005 /* 1006 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 1007 * are !pte_none() && !pte_present(). 1008 * 1009 * Format of swap PTEs: 1010 * 1011 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 1012 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1013 * <--------------------------- offset --------------------------- 1014 * 1015 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1016 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 1017 * --------------------> E <-- type ---> <------- zeroes --------> 1018 */ 1019 #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0x7fUL) 1020 #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL)) 1021 #define __swp_entry(type, offset) \ 1022 ( (swp_entry_t) \ 1023 { \ 1024 ((((long)(type) & 0x7fUL) << PAGE_SHIFT) | \ 1025 ((long)(offset) << (PAGE_SHIFT + 8UL))) \ 1026 } ) 1027 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1028 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1029 1030 static inline int pte_swp_exclusive(pte_t pte) 1031 { 1032 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 1033 } 1034 1035 static inline pte_t pte_swp_mkexclusive(pte_t pte) 1036 { 1037 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); 1038 } 1039 1040 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 1041 { 1042 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); 1043 } 1044 1045 int page_in_phys_avail(unsigned long paddr); 1046 1047 /* 1048 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 1049 * its high 4 bits. These macros/functions put it there or get it from there. 1050 */ 1051 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4))) 1052 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) 1053 #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) 1054 1055 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, 1056 unsigned long, pgprot_t); 1057 1058 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma, 1059 unsigned long addr, pte_t pte); 1060 1061 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma, 1062 unsigned long addr, pte_t oldpte); 1063 1064 #define __HAVE_ARCH_DO_SWAP_PAGE 1065 static inline void arch_do_swap_page(struct mm_struct *mm, 1066 struct vm_area_struct *vma, 1067 unsigned long addr, 1068 pte_t pte, pte_t oldpte) 1069 { 1070 /* If this is a new page being mapped in, there can be no 1071 * ADI tags stored away for this page. Skip looking for 1072 * stored tags 1073 */ 1074 if (pte_none(oldpte)) 1075 return; 1076 1077 if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V)) 1078 adi_restore_tags(mm, vma, addr, pte); 1079 } 1080 1081 #define __HAVE_ARCH_UNMAP_ONE 1082 static inline int arch_unmap_one(struct mm_struct *mm, 1083 struct vm_area_struct *vma, 1084 unsigned long addr, pte_t oldpte) 1085 { 1086 if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V)) 1087 return adi_save_tags(mm, vma, addr, oldpte); 1088 return 0; 1089 } 1090 1091 static inline int io_remap_pfn_range(struct vm_area_struct *vma, 1092 unsigned long from, unsigned long pfn, 1093 unsigned long size, pgprot_t prot) 1094 { 1095 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; 1096 int space = GET_IOSPACE(pfn); 1097 unsigned long phys_base; 1098 1099 phys_base = offset | (((unsigned long) space) << 32UL); 1100 1101 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); 1102 } 1103 #define io_remap_pfn_range io_remap_pfn_range 1104 1105 static inline unsigned long __untagged_addr(unsigned long start) 1106 { 1107 if (adi_capable()) { 1108 long addr = start; 1109 1110 /* If userspace has passed a versioned address, kernel 1111 * will not find it in the VMAs since it does not store 1112 * the version tags in the list of VMAs. Storing version 1113 * tags in list of VMAs is impractical since they can be 1114 * changed any time from userspace without dropping into 1115 * kernel. Any address search in VMAs will be done with 1116 * non-versioned addresses. Ensure the ADI version bits 1117 * are dropped here by sign extending the last bit before 1118 * ADI bits. IOMMU does not implement version tags. 1119 */ 1120 return (addr << (long)adi_nbits()) >> (long)adi_nbits(); 1121 } 1122 1123 return start; 1124 } 1125 #define untagged_addr(addr) \ 1126 ((__typeof__(addr))(__untagged_addr((unsigned long)(addr)))) 1127 1128 static inline bool pte_access_permitted(pte_t pte, bool write) 1129 { 1130 u64 prot; 1131 1132 if (tlb_type == hypervisor) { 1133 prot = _PAGE_PRESENT_4V | _PAGE_P_4V; 1134 if (write) 1135 prot |= _PAGE_WRITE_4V; 1136 } else { 1137 prot = _PAGE_PRESENT_4U | _PAGE_P_4U; 1138 if (write) 1139 prot |= _PAGE_WRITE_4U; 1140 } 1141 1142 return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot; 1143 } 1144 #define pte_access_permitted pte_access_permitted 1145 1146 /* We provide our own get_unmapped_area to cope with VA holes and 1147 * SHM area cache aliasing for userland. 1148 */ 1149 #define HAVE_ARCH_UNMAPPED_AREA 1150 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1151 1152 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use 1153 * the largest alignment possible such that larget PTEs can be used. 1154 */ 1155 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, 1156 unsigned long, unsigned long, 1157 unsigned long); 1158 #define HAVE_ARCH_FB_UNMAPPED_AREA 1159 1160 void sun4v_register_fault_status(void); 1161 void sun4v_ktsb_register(void); 1162 void __init cheetah_ecache_flush_init(void); 1163 void sun4v_patch_tlb_handlers(void); 1164 1165 extern unsigned long cmdline_memory_size; 1166 1167 asmlinkage void do_sparc64_fault(struct pt_regs *regs); 1168 1169 #define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD)) 1170 1171 #ifdef CONFIG_HUGETLB_PAGE 1172 1173 #define pud_leaf_size pud_leaf_size 1174 extern unsigned long pud_leaf_size(pud_t pud); 1175 1176 #define pmd_leaf_size pmd_leaf_size 1177 extern unsigned long pmd_leaf_size(pmd_t pmd); 1178 1179 #define pte_leaf_size pte_leaf_size 1180 extern unsigned long pte_leaf_size(pte_t pte); 1181 1182 #endif /* CONFIG_HUGETLB_PAGE */ 1183 1184 #endif /* !(__ASSEMBLY__) */ 1185 1186 #endif /* !(_SPARC64_PGTABLE_H) */ 1187