1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Hartmut Penner (hp@de.ibm.com) 5 * Ulrich Weigand (weigand@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "include/asm-i386/pgtable.h" 9 */ 10 11 #ifndef _ASM_S390_PGTABLE_H 12 #define _ASM_S390_PGTABLE_H 13 14 /* 15 * The Linux memory management assumes a three-level page table setup. For 16 * s390 31 bit we "fold" the mid level into the top-level page table, so 17 * that we physically have the same two-level page table as the s390 mmu 18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels 19 * the hardware provides (region first and region second tables are not 20 * used). 21 * 22 * The "pgd_xxx()" functions are trivial for a folded two-level 23 * setup: the pgd is never bad, and a pmd always exists (as it's folded 24 * into the pgd entry) 25 * 26 * This file contains the functions and defines necessary to modify and use 27 * the S390 page table tree. 28 */ 29 #ifndef __ASSEMBLY__ 30 #include <linux/sched.h> 31 #include <linux/mm_types.h> 32 #include <linux/page-flags.h> 33 #include <asm/bug.h> 34 #include <asm/page.h> 35 36 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 37 extern void paging_init(void); 38 extern void vmem_map_init(void); 39 40 /* 41 * The S390 doesn't have any external MMU info: the kernel page 42 * tables contain all the necessary information. 43 */ 44 #define update_mmu_cache(vma, address, ptep) do { } while (0) 45 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 46 47 /* 48 * ZERO_PAGE is a global shared page that is always zero; used 49 * for zero-mapped memory areas etc.. 50 */ 51 52 extern unsigned long empty_zero_page; 53 extern unsigned long zero_page_mask; 54 55 #define ZERO_PAGE(vaddr) \ 56 (virt_to_page((void *)(empty_zero_page + \ 57 (((unsigned long)(vaddr)) &zero_page_mask)))) 58 #define __HAVE_COLOR_ZERO_PAGE 59 60 #endif /* !__ASSEMBLY__ */ 61 62 /* 63 * PMD_SHIFT determines the size of the area a second-level page 64 * table can map 65 * PGDIR_SHIFT determines what a third-level page table entry can map 66 */ 67 #ifndef CONFIG_64BIT 68 # define PMD_SHIFT 20 69 # define PUD_SHIFT 20 70 # define PGDIR_SHIFT 20 71 #else /* CONFIG_64BIT */ 72 # define PMD_SHIFT 20 73 # define PUD_SHIFT 31 74 # define PGDIR_SHIFT 42 75 #endif /* CONFIG_64BIT */ 76 77 #define PMD_SIZE (1UL << PMD_SHIFT) 78 #define PMD_MASK (~(PMD_SIZE-1)) 79 #define PUD_SIZE (1UL << PUD_SHIFT) 80 #define PUD_MASK (~(PUD_SIZE-1)) 81 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 82 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 83 84 /* 85 * entries per page directory level: the S390 is two-level, so 86 * we don't really have any PMD directory physically. 87 * for S390 segment-table entries are combined to one PGD 88 * that leads to 1024 pte per pgd 89 */ 90 #define PTRS_PER_PTE 256 91 #ifndef CONFIG_64BIT 92 #define PTRS_PER_PMD 1 93 #define PTRS_PER_PUD 1 94 #else /* CONFIG_64BIT */ 95 #define PTRS_PER_PMD 2048 96 #define PTRS_PER_PUD 2048 97 #endif /* CONFIG_64BIT */ 98 #define PTRS_PER_PGD 2048 99 100 #define FIRST_USER_ADDRESS 0 101 102 #define pte_ERROR(e) \ 103 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 104 #define pmd_ERROR(e) \ 105 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 106 #define pud_ERROR(e) \ 107 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 108 #define pgd_ERROR(e) \ 109 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 110 111 #ifndef __ASSEMBLY__ 112 /* 113 * The vmalloc and module area will always be on the topmost area of the kernel 114 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules. 115 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 116 * modules will reside. That makes sure that inter module branches always 117 * happen without trampolines and in addition the placement within a 2GB frame 118 * is branch prediction unit friendly. 119 */ 120 extern unsigned long VMALLOC_START; 121 extern unsigned long VMALLOC_END; 122 extern struct page *vmemmap; 123 124 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 125 126 #ifdef CONFIG_64BIT 127 extern unsigned long MODULES_VADDR; 128 extern unsigned long MODULES_END; 129 #define MODULES_VADDR MODULES_VADDR 130 #define MODULES_END MODULES_END 131 #define MODULES_LEN (1UL << 31) 132 #endif 133 134 /* 135 * A 31 bit pagetable entry of S390 has following format: 136 * | PFRA | | OS | 137 * 0 0IP0 138 * 00000000001111111111222222222233 139 * 01234567890123456789012345678901 140 * 141 * I Page-Invalid Bit: Page is not available for address-translation 142 * P Page-Protection Bit: Store access not possible for page 143 * 144 * A 31 bit segmenttable entry of S390 has following format: 145 * | P-table origin | |PTL 146 * 0 IC 147 * 00000000001111111111222222222233 148 * 01234567890123456789012345678901 149 * 150 * I Segment-Invalid Bit: Segment is not available for address-translation 151 * C Common-Segment Bit: Segment is not private (PoP 3-30) 152 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) 153 * 154 * The 31 bit segmenttable origin of S390 has following format: 155 * 156 * |S-table origin | | STL | 157 * X **GPS 158 * 00000000001111111111222222222233 159 * 01234567890123456789012345678901 160 * 161 * X Space-Switch event: 162 * G Segment-Invalid Bit: * 163 * P Private-Space Bit: Segment is not private (PoP 3-30) 164 * S Storage-Alteration: 165 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) 166 * 167 * A 64 bit pagetable entry of S390 has following format: 168 * | PFRA |0IPC| OS | 169 * 0000000000111111111122222222223333333333444444444455555555556666 170 * 0123456789012345678901234567890123456789012345678901234567890123 171 * 172 * I Page-Invalid Bit: Page is not available for address-translation 173 * P Page-Protection Bit: Store access not possible for page 174 * C Change-bit override: HW is not required to set change bit 175 * 176 * A 64 bit segmenttable entry of S390 has following format: 177 * | P-table origin | TT 178 * 0000000000111111111122222222223333333333444444444455555555556666 179 * 0123456789012345678901234567890123456789012345678901234567890123 180 * 181 * I Segment-Invalid Bit: Segment is not available for address-translation 182 * C Common-Segment Bit: Segment is not private (PoP 3-30) 183 * P Page-Protection Bit: Store access not possible for page 184 * TT Type 00 185 * 186 * A 64 bit region table entry of S390 has following format: 187 * | S-table origin | TF TTTL 188 * 0000000000111111111122222222223333333333444444444455555555556666 189 * 0123456789012345678901234567890123456789012345678901234567890123 190 * 191 * I Segment-Invalid Bit: Segment is not available for address-translation 192 * TT Type 01 193 * TF 194 * TL Table length 195 * 196 * The 64 bit regiontable origin of S390 has following format: 197 * | region table origon | DTTL 198 * 0000000000111111111122222222223333333333444444444455555555556666 199 * 0123456789012345678901234567890123456789012345678901234567890123 200 * 201 * X Space-Switch event: 202 * G Segment-Invalid Bit: 203 * P Private-Space Bit: 204 * S Storage-Alteration: 205 * R Real space 206 * TL Table-Length: 207 * 208 * A storage key has the following format: 209 * | ACC |F|R|C|0| 210 * 0 3 4 5 6 7 211 * ACC: access key 212 * F : fetch protection bit 213 * R : referenced bit 214 * C : changed bit 215 */ 216 217 /* Hardware bits in the page table entry */ 218 #define _PAGE_CO 0x100 /* HW Change-bit override */ 219 #define _PAGE_RO 0x200 /* HW read-only bit */ 220 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 221 222 /* Software bits in the page table entry */ 223 #define _PAGE_SWT 0x001 /* SW pte type bit t */ 224 #define _PAGE_SWX 0x002 /* SW pte type bit x */ 225 #define _PAGE_SWC 0x004 /* SW pte changed bit */ 226 #define _PAGE_SWR 0x008 /* SW pte referenced bit */ 227 #define _PAGE_SWW 0x010 /* SW pte write bit */ 228 #define _PAGE_SPECIAL 0x020 /* SW associated with special page */ 229 #define __HAVE_ARCH_PTE_SPECIAL 230 231 /* Set of bits not changed in pte_modify */ 232 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 233 _PAGE_SWC | _PAGE_SWR) 234 235 /* Six different types of pages. */ 236 #define _PAGE_TYPE_EMPTY 0x400 237 #define _PAGE_TYPE_NONE 0x401 238 #define _PAGE_TYPE_SWAP 0x403 239 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 240 #define _PAGE_TYPE_RO 0x200 241 #define _PAGE_TYPE_RW 0x000 242 243 /* 244 * Only four types for huge pages, using the invalid bit and protection bit 245 * of a segment table entry. 246 */ 247 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ 248 #define _HPAGE_TYPE_NONE 0x220 249 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ 250 #define _HPAGE_TYPE_RW 0x000 251 252 /* 253 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 254 * pte_none and pte_file to find out the pte type WITHOUT holding the page 255 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to 256 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs 257 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. 258 * This change is done while holding the lock, but the intermediate step 259 * of a previously valid pte with the hw invalid bit set can be observed by 260 * handle_pte_fault. That makes it necessary that all valid pte types with 261 * the hw invalid bit set must be distinguishable from the four pte types 262 * empty, none, swap and file. 263 * 264 * irxt ipte irxt 265 * _PAGE_TYPE_EMPTY 1000 -> 1000 266 * _PAGE_TYPE_NONE 1001 -> 1001 267 * _PAGE_TYPE_SWAP 1011 -> 1011 268 * _PAGE_TYPE_FILE 11?1 -> 11?1 269 * _PAGE_TYPE_RO 0100 -> 1100 270 * _PAGE_TYPE_RW 0000 -> 1000 271 * 272 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 273 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 274 * pte_file is true for bits combinations 1101, 1111 275 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 276 */ 277 278 #ifndef CONFIG_64BIT 279 280 /* Bits in the segment table address-space-control-element */ 281 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ 282 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ 283 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 284 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 285 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ 286 287 /* Bits in the segment table entry */ 288 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 289 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 290 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 291 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 292 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 293 294 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 295 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 296 297 /* Page status table bits for virtualization */ 298 #define RCP_ACC_BITS 0xf0000000UL 299 #define RCP_FP_BIT 0x08000000UL 300 #define RCP_PCL_BIT 0x00800000UL 301 #define RCP_HR_BIT 0x00400000UL 302 #define RCP_HC_BIT 0x00200000UL 303 #define RCP_GR_BIT 0x00040000UL 304 #define RCP_GC_BIT 0x00020000UL 305 306 /* User dirty / referenced bit for KVM's migration feature */ 307 #define KVM_UR_BIT 0x00008000UL 308 #define KVM_UC_BIT 0x00004000UL 309 310 #else /* CONFIG_64BIT */ 311 312 /* Bits in the segment/region table address-space-control-element */ 313 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 314 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 315 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 316 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 317 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 318 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 319 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 320 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 321 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 322 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 323 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 324 325 /* Bits in the region table entry */ 326 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 327 #define _REGION_ENTRY_RO 0x200 /* region protection bit */ 328 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 329 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 330 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 331 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 332 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 333 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 334 335 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 336 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) 337 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 338 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) 339 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 340 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) 341 342 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 343 344 /* Bits in the segment table entry */ 345 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 346 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 347 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 348 349 #define _SEGMENT_ENTRY (0) 350 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 351 352 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 353 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 354 #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ 355 #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT) 356 357 /* Set of bits not changed in pmd_modify */ 358 #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ 359 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO) 360 361 /* Page status table bits for virtualization */ 362 #define RCP_ACC_BITS 0xf000000000000000UL 363 #define RCP_FP_BIT 0x0800000000000000UL 364 #define RCP_PCL_BIT 0x0080000000000000UL 365 #define RCP_HR_BIT 0x0040000000000000UL 366 #define RCP_HC_BIT 0x0020000000000000UL 367 #define RCP_GR_BIT 0x0004000000000000UL 368 #define RCP_GC_BIT 0x0002000000000000UL 369 370 /* User dirty / referenced bit for KVM's migration feature */ 371 #define KVM_UR_BIT 0x0000800000000000UL 372 #define KVM_UC_BIT 0x0000400000000000UL 373 374 #endif /* CONFIG_64BIT */ 375 376 /* 377 * A user page table pointer has the space-switch-event bit, the 378 * private-space-control bit and the storage-alteration-event-control 379 * bit set. A kernel page table pointer doesn't need them. 380 */ 381 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 382 _ASCE_ALT_EVENT) 383 384 /* 385 * Page protection definitions. 386 */ 387 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 388 #define PAGE_RO __pgprot(_PAGE_TYPE_RO) 389 #define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) 390 #define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) 391 392 #define PAGE_KERNEL PAGE_RWC 393 #define PAGE_SHARED PAGE_KERNEL 394 #define PAGE_COPY PAGE_RO 395 396 /* 397 * On s390 the page table entry has an invalid bit and a read-only bit. 398 * Read permission implies execute permission and write permission 399 * implies read permission. 400 */ 401 /*xwr*/ 402 #define __P000 PAGE_NONE 403 #define __P001 PAGE_RO 404 #define __P010 PAGE_RO 405 #define __P011 PAGE_RO 406 #define __P100 PAGE_RO 407 #define __P101 PAGE_RO 408 #define __P110 PAGE_RO 409 #define __P111 PAGE_RO 410 411 #define __S000 PAGE_NONE 412 #define __S001 PAGE_RO 413 #define __S010 PAGE_RW 414 #define __S011 PAGE_RW 415 #define __S100 PAGE_RO 416 #define __S101 PAGE_RO 417 #define __S110 PAGE_RW 418 #define __S111 PAGE_RW 419 420 static inline int mm_exclusive(struct mm_struct *mm) 421 { 422 return likely(mm == current->active_mm && 423 atomic_read(&mm->context.attach_count) <= 1); 424 } 425 426 static inline int mm_has_pgste(struct mm_struct *mm) 427 { 428 #ifdef CONFIG_PGSTE 429 if (unlikely(mm->context.has_pgste)) 430 return 1; 431 #endif 432 return 0; 433 } 434 /* 435 * pgd/pmd/pte query functions 436 */ 437 #ifndef CONFIG_64BIT 438 439 static inline int pgd_present(pgd_t pgd) { return 1; } 440 static inline int pgd_none(pgd_t pgd) { return 0; } 441 static inline int pgd_bad(pgd_t pgd) { return 0; } 442 443 static inline int pud_present(pud_t pud) { return 1; } 444 static inline int pud_none(pud_t pud) { return 0; } 445 static inline int pud_large(pud_t pud) { return 0; } 446 static inline int pud_bad(pud_t pud) { return 0; } 447 448 #else /* CONFIG_64BIT */ 449 450 static inline int pgd_present(pgd_t pgd) 451 { 452 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 453 return 1; 454 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 455 } 456 457 static inline int pgd_none(pgd_t pgd) 458 { 459 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 460 return 0; 461 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; 462 } 463 464 static inline int pgd_bad(pgd_t pgd) 465 { 466 /* 467 * With dynamic page table levels the pgd can be a region table 468 * entry or a segment table entry. Check for the bit that are 469 * invalid for either table entry. 470 */ 471 unsigned long mask = 472 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 473 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 474 return (pgd_val(pgd) & mask) != 0; 475 } 476 477 static inline int pud_present(pud_t pud) 478 { 479 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 480 return 1; 481 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 482 } 483 484 static inline int pud_none(pud_t pud) 485 { 486 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 487 return 0; 488 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 489 } 490 491 static inline int pud_large(pud_t pud) 492 { 493 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 494 return 0; 495 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 496 } 497 498 static inline int pud_bad(pud_t pud) 499 { 500 /* 501 * With dynamic page table levels the pud can be a region table 502 * entry or a segment table entry. Check for the bit that are 503 * invalid for either table entry. 504 */ 505 unsigned long mask = 506 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 507 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 508 return (pud_val(pud) & mask) != 0; 509 } 510 511 #endif /* CONFIG_64BIT */ 512 513 static inline int pmd_present(pmd_t pmd) 514 { 515 unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; 516 return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE || 517 !(pmd_val(pmd) & _SEGMENT_ENTRY_INV); 518 } 519 520 static inline int pmd_none(pmd_t pmd) 521 { 522 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && 523 !(pmd_val(pmd) & _SEGMENT_ENTRY_RO); 524 } 525 526 static inline int pmd_large(pmd_t pmd) 527 { 528 #ifdef CONFIG_64BIT 529 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); 530 #else 531 return 0; 532 #endif 533 } 534 535 static inline int pmd_bad(pmd_t pmd) 536 { 537 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; 538 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; 539 } 540 541 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 542 extern void pmdp_splitting_flush(struct vm_area_struct *vma, 543 unsigned long addr, pmd_t *pmdp); 544 545 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 546 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 547 unsigned long address, pmd_t *pmdp, 548 pmd_t entry, int dirty); 549 550 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 551 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 552 unsigned long address, pmd_t *pmdp); 553 554 #define __HAVE_ARCH_PMD_WRITE 555 static inline int pmd_write(pmd_t pmd) 556 { 557 return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; 558 } 559 560 static inline int pmd_young(pmd_t pmd) 561 { 562 return 0; 563 } 564 565 static inline int pte_none(pte_t pte) 566 { 567 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 568 } 569 570 static inline int pte_present(pte_t pte) 571 { 572 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; 573 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || 574 (!(pte_val(pte) & _PAGE_INVALID) && 575 !(pte_val(pte) & _PAGE_SWT)); 576 } 577 578 static inline int pte_file(pte_t pte) 579 { 580 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; 581 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 582 } 583 584 static inline int pte_special(pte_t pte) 585 { 586 return (pte_val(pte) & _PAGE_SPECIAL); 587 } 588 589 #define __HAVE_ARCH_PTE_SAME 590 static inline int pte_same(pte_t a, pte_t b) 591 { 592 return pte_val(a) == pte_val(b); 593 } 594 595 static inline pgste_t pgste_get_lock(pte_t *ptep) 596 { 597 unsigned long new = 0; 598 #ifdef CONFIG_PGSTE 599 unsigned long old; 600 601 preempt_disable(); 602 asm( 603 " lg %0,%2\n" 604 "0: lgr %1,%0\n" 605 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */ 606 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */ 607 " csg %0,%1,%2\n" 608 " jl 0b\n" 609 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 610 : "Q" (ptep[PTRS_PER_PTE]) : "cc"); 611 #endif 612 return __pgste(new); 613 } 614 615 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) 616 { 617 #ifdef CONFIG_PGSTE 618 asm( 619 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ 620 " stg %1,%0\n" 621 : "=Q" (ptep[PTRS_PER_PTE]) 622 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); 623 preempt_enable(); 624 #endif 625 } 626 627 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) 628 { 629 #ifdef CONFIG_PGSTE 630 unsigned long address, bits; 631 unsigned char skey; 632 633 if (!pte_present(*ptep)) 634 return pgste; 635 address = pte_val(*ptep) & PAGE_MASK; 636 skey = page_get_storage_key(address); 637 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 638 /* Clear page changed & referenced bit in the storage key */ 639 if (bits & _PAGE_CHANGED) 640 page_set_storage_key(address, skey ^ bits, 0); 641 else if (bits) 642 page_reset_referenced(address); 643 /* Transfer page changed & referenced bit to guest bits in pgste */ 644 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ 645 /* Get host changed & referenced bits from pgste */ 646 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; 647 /* Transfer page changed & referenced bit to kvm user bits */ 648 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ 649 /* Clear relevant host bits in pgste. */ 650 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); 651 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); 652 /* Copy page access key and fetch protection bit to pgste */ 653 pgste_val(pgste) |= 654 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 655 /* Transfer referenced bit to pte */ 656 pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; 657 #endif 658 return pgste; 659 660 } 661 662 static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) 663 { 664 #ifdef CONFIG_PGSTE 665 int young; 666 667 if (!pte_present(*ptep)) 668 return pgste; 669 /* Get referenced bit from storage key */ 670 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 671 if (young) 672 pgste_val(pgste) |= RCP_GR_BIT; 673 /* Get host referenced bit from pgste */ 674 if (pgste_val(pgste) & RCP_HR_BIT) { 675 pgste_val(pgste) &= ~RCP_HR_BIT; 676 young = 1; 677 } 678 /* Transfer referenced bit to kvm user bits and pte */ 679 if (young) { 680 pgste_val(pgste) |= KVM_UR_BIT; 681 pte_val(*ptep) |= _PAGE_SWR; 682 } 683 #endif 684 return pgste; 685 } 686 687 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) 688 { 689 #ifdef CONFIG_PGSTE 690 unsigned long address; 691 unsigned long okey, nkey; 692 693 if (!pte_present(entry)) 694 return; 695 address = pte_val(entry) & PAGE_MASK; 696 okey = nkey = page_get_storage_key(address); 697 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); 698 /* Set page access key and fetch protection bit from pgste */ 699 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 700 if (okey != nkey) 701 page_set_storage_key(address, nkey, 0); 702 #endif 703 } 704 705 static inline void pgste_set_pte(pte_t *ptep, pte_t entry) 706 { 707 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { 708 /* 709 * Without enhanced suppression-on-protection force 710 * the dirty bit on for all writable ptes. 711 */ 712 pte_val(entry) |= _PAGE_SWC; 713 pte_val(entry) &= ~_PAGE_RO; 714 } 715 *ptep = entry; 716 } 717 718 /** 719 * struct gmap_struct - guest address space 720 * @mm: pointer to the parent mm_struct 721 * @table: pointer to the page directory 722 * @asce: address space control element for gmap page table 723 * @crst_list: list of all crst tables used in the guest address space 724 */ 725 struct gmap { 726 struct list_head list; 727 struct mm_struct *mm; 728 unsigned long *table; 729 unsigned long asce; 730 struct list_head crst_list; 731 }; 732 733 /** 734 * struct gmap_rmap - reverse mapping for segment table entries 735 * @next: pointer to the next gmap_rmap structure in the list 736 * @entry: pointer to a segment table entry 737 */ 738 struct gmap_rmap { 739 struct list_head list; 740 unsigned long *entry; 741 }; 742 743 /** 744 * struct gmap_pgtable - gmap information attached to a page table 745 * @vmaddr: address of the 1MB segment in the process virtual memory 746 * @mapper: list of segment table entries maping a page table 747 */ 748 struct gmap_pgtable { 749 unsigned long vmaddr; 750 struct list_head mapper; 751 }; 752 753 struct gmap *gmap_alloc(struct mm_struct *mm); 754 void gmap_free(struct gmap *gmap); 755 void gmap_enable(struct gmap *gmap); 756 void gmap_disable(struct gmap *gmap); 757 int gmap_map_segment(struct gmap *gmap, unsigned long from, 758 unsigned long to, unsigned long length); 759 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 760 unsigned long __gmap_fault(unsigned long address, struct gmap *); 761 unsigned long gmap_fault(unsigned long address, struct gmap *); 762 void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 763 764 /* 765 * Certain architectures need to do special things when PTEs 766 * within a page table are directly modified. Thus, the following 767 * hook is made available. 768 */ 769 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 770 pte_t *ptep, pte_t entry) 771 { 772 pgste_t pgste; 773 774 if (mm_has_pgste(mm)) { 775 pgste = pgste_get_lock(ptep); 776 pgste_set_key(ptep, pgste, entry); 777 pgste_set_pte(ptep, entry); 778 pgste_set_unlock(ptep, pgste); 779 } else { 780 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) 781 pte_val(entry) |= _PAGE_CO; 782 *ptep = entry; 783 } 784 } 785 786 /* 787 * query functions pte_write/pte_dirty/pte_young only work if 788 * pte_present() is true. Undefined behaviour if not.. 789 */ 790 static inline int pte_write(pte_t pte) 791 { 792 return (pte_val(pte) & _PAGE_SWW) != 0; 793 } 794 795 static inline int pte_dirty(pte_t pte) 796 { 797 return (pte_val(pte) & _PAGE_SWC) != 0; 798 } 799 800 static inline int pte_young(pte_t pte) 801 { 802 #ifdef CONFIG_PGSTE 803 if (pte_val(pte) & _PAGE_SWR) 804 return 1; 805 #endif 806 return 0; 807 } 808 809 /* 810 * pgd/pmd/pte modification functions 811 */ 812 813 static inline void pgd_clear(pgd_t *pgd) 814 { 815 #ifdef CONFIG_64BIT 816 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 817 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 818 #endif 819 } 820 821 static inline void pud_clear(pud_t *pud) 822 { 823 #ifdef CONFIG_64BIT 824 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 825 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 826 #endif 827 } 828 829 static inline void pmd_clear(pmd_t *pmdp) 830 { 831 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 832 } 833 834 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 835 { 836 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 837 } 838 839 /* 840 * The following pte modification functions only work if 841 * pte_present() is true. Undefined behaviour if not.. 842 */ 843 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 844 { 845 pte_val(pte) &= _PAGE_CHG_MASK; 846 pte_val(pte) |= pgprot_val(newprot); 847 if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) 848 pte_val(pte) &= ~_PAGE_RO; 849 return pte; 850 } 851 852 static inline pte_t pte_wrprotect(pte_t pte) 853 { 854 pte_val(pte) &= ~_PAGE_SWW; 855 /* Do not clobber _PAGE_TYPE_NONE pages! */ 856 if (!(pte_val(pte) & _PAGE_INVALID)) 857 pte_val(pte) |= _PAGE_RO; 858 return pte; 859 } 860 861 static inline pte_t pte_mkwrite(pte_t pte) 862 { 863 pte_val(pte) |= _PAGE_SWW; 864 if (pte_val(pte) & _PAGE_SWC) 865 pte_val(pte) &= ~_PAGE_RO; 866 return pte; 867 } 868 869 static inline pte_t pte_mkclean(pte_t pte) 870 { 871 pte_val(pte) &= ~_PAGE_SWC; 872 /* Do not clobber _PAGE_TYPE_NONE pages! */ 873 if (!(pte_val(pte) & _PAGE_INVALID)) 874 pte_val(pte) |= _PAGE_RO; 875 return pte; 876 } 877 878 static inline pte_t pte_mkdirty(pte_t pte) 879 { 880 pte_val(pte) |= _PAGE_SWC; 881 if (pte_val(pte) & _PAGE_SWW) 882 pte_val(pte) &= ~_PAGE_RO; 883 return pte; 884 } 885 886 static inline pte_t pte_mkold(pte_t pte) 887 { 888 #ifdef CONFIG_PGSTE 889 pte_val(pte) &= ~_PAGE_SWR; 890 #endif 891 return pte; 892 } 893 894 static inline pte_t pte_mkyoung(pte_t pte) 895 { 896 return pte; 897 } 898 899 static inline pte_t pte_mkspecial(pte_t pte) 900 { 901 pte_val(pte) |= _PAGE_SPECIAL; 902 return pte; 903 } 904 905 #ifdef CONFIG_HUGETLB_PAGE 906 static inline pte_t pte_mkhuge(pte_t pte) 907 { 908 /* 909 * PROT_NONE needs to be remapped from the pte type to the ste type. 910 * The HW invalid bit is also different for pte and ste. The pte 911 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE 912 * bit, so we don't have to clear it. 913 */ 914 if (pte_val(pte) & _PAGE_INVALID) { 915 if (pte_val(pte) & _PAGE_SWT) 916 pte_val(pte) |= _HPAGE_TYPE_NONE; 917 pte_val(pte) |= _SEGMENT_ENTRY_INV; 918 } 919 /* 920 * Clear SW pte bits, there are no SW bits in a segment table entry. 921 */ 922 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | 923 _PAGE_SWR | _PAGE_SWW); 924 /* 925 * Also set the change-override bit because we don't need dirty bit 926 * tracking for hugetlbfs pages. 927 */ 928 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); 929 return pte; 930 } 931 #endif 932 933 /* 934 * Get (and clear) the user dirty bit for a pte. 935 */ 936 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, 937 pte_t *ptep) 938 { 939 pgste_t pgste; 940 int dirty = 0; 941 942 if (mm_has_pgste(mm)) { 943 pgste = pgste_get_lock(ptep); 944 pgste = pgste_update_all(ptep, pgste); 945 dirty = !!(pgste_val(pgste) & KVM_UC_BIT); 946 pgste_val(pgste) &= ~KVM_UC_BIT; 947 pgste_set_unlock(ptep, pgste); 948 return dirty; 949 } 950 return dirty; 951 } 952 953 /* 954 * Get (and clear) the user referenced bit for a pte. 955 */ 956 static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, 957 pte_t *ptep) 958 { 959 pgste_t pgste; 960 int young = 0; 961 962 if (mm_has_pgste(mm)) { 963 pgste = pgste_get_lock(ptep); 964 pgste = pgste_update_young(ptep, pgste); 965 young = !!(pgste_val(pgste) & KVM_UR_BIT); 966 pgste_val(pgste) &= ~KVM_UR_BIT; 967 pgste_set_unlock(ptep, pgste); 968 } 969 return young; 970 } 971 972 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 973 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 974 unsigned long addr, pte_t *ptep) 975 { 976 pgste_t pgste; 977 pte_t pte; 978 979 if (mm_has_pgste(vma->vm_mm)) { 980 pgste = pgste_get_lock(ptep); 981 pgste = pgste_update_young(ptep, pgste); 982 pte = *ptep; 983 *ptep = pte_mkold(pte); 984 pgste_set_unlock(ptep, pgste); 985 return pte_young(pte); 986 } 987 return 0; 988 } 989 990 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 991 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 992 unsigned long address, pte_t *ptep) 993 { 994 /* No need to flush TLB 995 * On s390 reference bits are in storage key and never in TLB 996 * With virtualization we handle the reference bit, without we 997 * we can simply return */ 998 return ptep_test_and_clear_young(vma, address, ptep); 999 } 1000 1001 static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 1002 { 1003 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 1004 #ifndef CONFIG_64BIT 1005 /* pto must point to the start of the segment table */ 1006 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 1007 #else 1008 /* ipte in zarch mode can do the math */ 1009 pte_t *pto = ptep; 1010 #endif 1011 asm volatile( 1012 " ipte %2,%3" 1013 : "=m" (*ptep) : "m" (*ptep), 1014 "a" (pto), "a" (address)); 1015 } 1016 } 1017 1018 /* 1019 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 1020 * both clear the TLB for the unmapped pte. The reason is that 1021 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1022 * to modify an active pte. The sequence is 1023 * 1) ptep_get_and_clear 1024 * 2) set_pte_at 1025 * 3) flush_tlb_range 1026 * On s390 the tlb needs to get flushed with the modification of the pte 1027 * if the pte is active. The only way how this can be implemented is to 1028 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1029 * is a nop. 1030 */ 1031 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1032 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1033 unsigned long address, pte_t *ptep) 1034 { 1035 pgste_t pgste; 1036 pte_t pte; 1037 1038 mm->context.flush_mm = 1; 1039 if (mm_has_pgste(mm)) 1040 pgste = pgste_get_lock(ptep); 1041 1042 pte = *ptep; 1043 if (!mm_exclusive(mm)) 1044 __ptep_ipte(address, ptep); 1045 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1046 1047 if (mm_has_pgste(mm)) { 1048 pgste = pgste_update_all(&pte, pgste); 1049 pgste_set_unlock(ptep, pgste); 1050 } 1051 return pte; 1052 } 1053 1054 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1055 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, 1056 unsigned long address, 1057 pte_t *ptep) 1058 { 1059 pte_t pte; 1060 1061 mm->context.flush_mm = 1; 1062 if (mm_has_pgste(mm)) 1063 pgste_get_lock(ptep); 1064 1065 pte = *ptep; 1066 if (!mm_exclusive(mm)) 1067 __ptep_ipte(address, ptep); 1068 return pte; 1069 } 1070 1071 static inline void ptep_modify_prot_commit(struct mm_struct *mm, 1072 unsigned long address, 1073 pte_t *ptep, pte_t pte) 1074 { 1075 if (mm_has_pgste(mm)) { 1076 pgste_set_pte(ptep, pte); 1077 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); 1078 } else 1079 *ptep = pte; 1080 } 1081 1082 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1083 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1084 unsigned long address, pte_t *ptep) 1085 { 1086 pgste_t pgste; 1087 pte_t pte; 1088 1089 if (mm_has_pgste(vma->vm_mm)) 1090 pgste = pgste_get_lock(ptep); 1091 1092 pte = *ptep; 1093 __ptep_ipte(address, ptep); 1094 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1095 1096 if (mm_has_pgste(vma->vm_mm)) { 1097 pgste = pgste_update_all(&pte, pgste); 1098 pgste_set_unlock(ptep, pgste); 1099 } 1100 return pte; 1101 } 1102 1103 /* 1104 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1105 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1106 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1107 * cannot be accessed while the batched unmap is running. In this case 1108 * full==1 and a simple pte_clear is enough. See tlb.h. 1109 */ 1110 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1111 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1112 unsigned long address, 1113 pte_t *ptep, int full) 1114 { 1115 pgste_t pgste; 1116 pte_t pte; 1117 1118 if (mm_has_pgste(mm)) 1119 pgste = pgste_get_lock(ptep); 1120 1121 pte = *ptep; 1122 if (!full) 1123 __ptep_ipte(address, ptep); 1124 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1125 1126 if (mm_has_pgste(mm)) { 1127 pgste = pgste_update_all(&pte, pgste); 1128 pgste_set_unlock(ptep, pgste); 1129 } 1130 return pte; 1131 } 1132 1133 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1134 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, 1135 unsigned long address, pte_t *ptep) 1136 { 1137 pgste_t pgste; 1138 pte_t pte = *ptep; 1139 1140 if (pte_write(pte)) { 1141 mm->context.flush_mm = 1; 1142 if (mm_has_pgste(mm)) 1143 pgste = pgste_get_lock(ptep); 1144 1145 if (!mm_exclusive(mm)) 1146 __ptep_ipte(address, ptep); 1147 pte = pte_wrprotect(pte); 1148 1149 if (mm_has_pgste(mm)) { 1150 pgste_set_pte(ptep, pte); 1151 pgste_set_unlock(ptep, pgste); 1152 } else 1153 *ptep = pte; 1154 } 1155 return pte; 1156 } 1157 1158 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1159 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1160 unsigned long address, pte_t *ptep, 1161 pte_t entry, int dirty) 1162 { 1163 pgste_t pgste; 1164 1165 if (pte_same(*ptep, entry)) 1166 return 0; 1167 if (mm_has_pgste(vma->vm_mm)) 1168 pgste = pgste_get_lock(ptep); 1169 1170 __ptep_ipte(address, ptep); 1171 1172 if (mm_has_pgste(vma->vm_mm)) { 1173 pgste_set_pte(ptep, entry); 1174 pgste_set_unlock(ptep, pgste); 1175 } else 1176 *ptep = entry; 1177 return 1; 1178 } 1179 1180 /* 1181 * Conversion functions: convert a page and protection to a page entry, 1182 * and a page entry and page directory to the page they refer to. 1183 */ 1184 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1185 { 1186 pte_t __pte; 1187 pte_val(__pte) = physpage + pgprot_val(pgprot); 1188 return __pte; 1189 } 1190 1191 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1192 { 1193 unsigned long physpage = page_to_phys(page); 1194 pte_t __pte = mk_pte_phys(physpage, pgprot); 1195 1196 if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { 1197 pte_val(__pte) |= _PAGE_SWC; 1198 pte_val(__pte) &= ~_PAGE_RO; 1199 } 1200 return __pte; 1201 } 1202 1203 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1204 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1205 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1206 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1207 1208 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1209 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1210 1211 #ifndef CONFIG_64BIT 1212 1213 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1214 #define pud_deref(pmd) ({ BUG(); 0UL; }) 1215 #define pgd_deref(pmd) ({ BUG(); 0UL; }) 1216 1217 #define pud_offset(pgd, address) ((pud_t *) pgd) 1218 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) 1219 1220 #else /* CONFIG_64BIT */ 1221 1222 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1223 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1224 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1225 1226 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 1227 { 1228 pud_t *pud = (pud_t *) pgd; 1229 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1230 pud = (pud_t *) pgd_deref(*pgd); 1231 return pud + pud_index(address); 1232 } 1233 1234 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1235 { 1236 pmd_t *pmd = (pmd_t *) pud; 1237 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1238 pmd = (pmd_t *) pud_deref(*pud); 1239 return pmd + pmd_index(address); 1240 } 1241 1242 #endif /* CONFIG_64BIT */ 1243 1244 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1245 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1246 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1247 1248 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 1249 1250 /* Find an entry in the lowest level page table.. */ 1251 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1252 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1253 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1254 #define pte_unmap(pte) do { } while (0) 1255 1256 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) 1257 { 1258 unsigned long sto = (unsigned long) pmdp - 1259 pmd_index(address) * sizeof(pmd_t); 1260 1261 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { 1262 asm volatile( 1263 " .insn rrf,0xb98e0000,%2,%3,0,0" 1264 : "=m" (*pmdp) 1265 : "m" (*pmdp), "a" (sto), 1266 "a" ((address & HPAGE_MASK)) 1267 : "cc" 1268 ); 1269 } 1270 } 1271 1272 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1273 1274 #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) 1275 #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) 1276 #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) 1277 1278 #define __HAVE_ARCH_PGTABLE_DEPOSIT 1279 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); 1280 1281 #define __HAVE_ARCH_PGTABLE_WITHDRAW 1282 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); 1283 1284 static inline int pmd_trans_splitting(pmd_t pmd) 1285 { 1286 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; 1287 } 1288 1289 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1290 pmd_t *pmdp, pmd_t entry) 1291 { 1292 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) 1293 pmd_val(entry) |= _SEGMENT_ENTRY_CO; 1294 *pmdp = entry; 1295 } 1296 1297 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1298 { 1299 /* 1300 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) 1301 * Convert to segment table entry format. 1302 */ 1303 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1304 return pgprot_val(SEGMENT_NONE); 1305 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1306 return pgprot_val(SEGMENT_RO); 1307 return pgprot_val(SEGMENT_RW); 1308 } 1309 1310 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1311 { 1312 pmd_val(pmd) &= _SEGMENT_CHG_MASK; 1313 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1314 return pmd; 1315 } 1316 1317 static inline pmd_t pmd_mkhuge(pmd_t pmd) 1318 { 1319 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1320 return pmd; 1321 } 1322 1323 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1324 { 1325 /* Do not clobber _HPAGE_TYPE_NONE pages! */ 1326 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) 1327 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; 1328 return pmd; 1329 } 1330 1331 static inline pmd_t pmd_wrprotect(pmd_t pmd) 1332 { 1333 pmd_val(pmd) |= _SEGMENT_ENTRY_RO; 1334 return pmd; 1335 } 1336 1337 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1338 { 1339 /* No dirty bit in the segment table entry. */ 1340 return pmd; 1341 } 1342 1343 static inline pmd_t pmd_mkold(pmd_t pmd) 1344 { 1345 /* No referenced bit in the segment table entry. */ 1346 return pmd; 1347 } 1348 1349 static inline pmd_t pmd_mkyoung(pmd_t pmd) 1350 { 1351 /* No referenced bit in the segment table entry. */ 1352 return pmd; 1353 } 1354 1355 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1356 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1357 unsigned long address, pmd_t *pmdp) 1358 { 1359 unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; 1360 long tmp, rc; 1361 int counter; 1362 1363 rc = 0; 1364 if (MACHINE_HAS_RRBM) { 1365 counter = PTRS_PER_PTE >> 6; 1366 asm volatile( 1367 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */ 1368 " ogr %1,%0\n" 1369 " la %3,0(%4,%3)\n" 1370 " brct %2,0b\n" 1371 : "=&d" (tmp), "+&d" (rc), "+d" (counter), 1372 "+a" (pmd_addr) 1373 : "a" (64 * 4096UL) : "cc"); 1374 rc = !!rc; 1375 } else { 1376 counter = PTRS_PER_PTE; 1377 asm volatile( 1378 "0: rrbe 0,%2\n" 1379 " la %2,0(%3,%2)\n" 1380 " brc 12,1f\n" 1381 " lhi %0,1\n" 1382 "1: brct %1,0b\n" 1383 : "+d" (rc), "+d" (counter), "+a" (pmd_addr) 1384 : "a" (4096UL) : "cc"); 1385 } 1386 return rc; 1387 } 1388 1389 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 1390 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 1391 unsigned long address, pmd_t *pmdp) 1392 { 1393 pmd_t pmd = *pmdp; 1394 1395 __pmd_idte(address, pmdp); 1396 pmd_clear(pmdp); 1397 return pmd; 1398 } 1399 1400 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH 1401 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 1402 unsigned long address, pmd_t *pmdp) 1403 { 1404 return pmdp_get_and_clear(vma->vm_mm, address, pmdp); 1405 } 1406 1407 #define __HAVE_ARCH_PMDP_INVALIDATE 1408 static inline void pmdp_invalidate(struct vm_area_struct *vma, 1409 unsigned long address, pmd_t *pmdp) 1410 { 1411 __pmd_idte(address, pmdp); 1412 } 1413 1414 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1415 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1416 unsigned long address, pmd_t *pmdp) 1417 { 1418 pmd_t pmd = *pmdp; 1419 1420 if (pmd_write(pmd)) { 1421 __pmd_idte(address, pmdp); 1422 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); 1423 } 1424 } 1425 1426 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1427 { 1428 pmd_t __pmd; 1429 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1430 return __pmd; 1431 } 1432 1433 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1434 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1435 1436 static inline int pmd_trans_huge(pmd_t pmd) 1437 { 1438 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1439 } 1440 1441 static inline int has_transparent_hugepage(void) 1442 { 1443 return MACHINE_HAS_HPAGE ? 1 : 0; 1444 } 1445 1446 static inline unsigned long pmd_pfn(pmd_t pmd) 1447 { 1448 return pmd_val(pmd) >> PAGE_SHIFT; 1449 } 1450 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1451 1452 /* 1453 * 31 bit swap entry format: 1454 * A page-table entry has some bits we have to treat in a special way. 1455 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification 1456 * exception will occur instead of a page translation exception. The 1457 * specifiation exception has the bad habit not to store necessary 1458 * information in the lowcore. 1459 * Bit 21 and bit 22 are the page invalid bit and the page protection 1460 * bit. We set both to indicate a swapped page. 1461 * Bit 30 and 31 are used to distinguish the different page types. For 1462 * a swapped page these bits need to be zero. 1463 * This leaves the bits 1-19 and bits 24-29 to store type and offset. 1464 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 1465 * plus 24 for the offset. 1466 * 0| offset |0110|o|type |00| 1467 * 0 0000000001111111111 2222 2 22222 33 1468 * 0 1234567890123456789 0123 4 56789 01 1469 * 1470 * 64 bit swap entry format: 1471 * A page-table entry has some bits we have to treat in a special way. 1472 * Bits 52 and bit 55 have to be zero, otherwise an specification 1473 * exception will occur instead of a page translation exception. The 1474 * specifiation exception has the bad habit not to store necessary 1475 * information in the lowcore. 1476 * Bit 53 and bit 54 are the page invalid bit and the page protection 1477 * bit. We set both to indicate a swapped page. 1478 * Bit 62 and 63 are used to distinguish the different page types. For 1479 * a swapped page these bits need to be zero. 1480 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 1481 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 1482 * plus 56 for the offset. 1483 * | offset |0110|o|type |00| 1484 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1485 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1486 */ 1487 #ifndef CONFIG_64BIT 1488 #define __SWP_OFFSET_MASK (~0UL >> 12) 1489 #else 1490 #define __SWP_OFFSET_MASK (~0UL >> 11) 1491 #endif 1492 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1493 { 1494 pte_t pte; 1495 offset &= __SWP_OFFSET_MASK; 1496 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | 1497 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 1498 return pte; 1499 } 1500 1501 #define __swp_type(entry) (((entry).val >> 2) & 0x1f) 1502 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) 1503 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) 1504 1505 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1506 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1507 1508 #ifndef CONFIG_64BIT 1509 # define PTE_FILE_MAX_BITS 26 1510 #else /* CONFIG_64BIT */ 1511 # define PTE_FILE_MAX_BITS 59 1512 #endif /* CONFIG_64BIT */ 1513 1514 #define pte_to_pgoff(__pte) \ 1515 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) 1516 1517 #define pgoff_to_pte(__off) \ 1518 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 1519 | _PAGE_TYPE_FILE }) 1520 1521 #endif /* !__ASSEMBLY__ */ 1522 1523 #define kern_addr_valid(addr) (1) 1524 1525 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1526 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1527 extern int s390_enable_sie(void); 1528 1529 /* 1530 * No page table caches to initialise 1531 */ 1532 #define pgtable_cache_init() do { } while (0) 1533 1534 #include <asm-generic/pgtable.h> 1535 1536 #endif /* _S390_PAGE_H */ 1537