1 /* 2 * include/asm-s390/pgtable.h 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com) 7 * Ulrich Weigand (weigand@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 * 10 * Derived from "include/asm-i386/pgtable.h" 11 */ 12 13 #ifndef _ASM_S390_PGTABLE_H 14 #define _ASM_S390_PGTABLE_H 15 16 /* 17 * The Linux memory management assumes a three-level page table setup. For 18 * s390 31 bit we "fold" the mid level into the top-level page table, so 19 * that we physically have the same two-level page table as the s390 mmu 20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels 21 * the hardware provides (region first and region second tables are not 22 * used). 23 * 24 * The "pgd_xxx()" functions are trivial for a folded two-level 25 * setup: the pgd is never bad, and a pmd always exists (as it's folded 26 * into the pgd entry) 27 * 28 * This file contains the functions and defines necessary to modify and use 29 * the S390 page table tree. 30 */ 31 #ifndef __ASSEMBLY__ 32 #include <linux/sched.h> 33 #include <linux/mm_types.h> 34 #include <asm/bug.h> 35 #include <asm/page.h> 36 37 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 38 extern void paging_init(void); 39 extern void vmem_map_init(void); 40 extern void fault_init(void); 41 42 /* 43 * The S390 doesn't have any external MMU info: the kernel page 44 * tables contain all the necessary information. 45 */ 46 #define update_mmu_cache(vma, address, ptep) do { } while (0) 47 48 /* 49 * ZERO_PAGE is a global shared page that is always zero; used 50 * for zero-mapped memory areas etc.. 51 */ 52 53 extern unsigned long empty_zero_page; 54 extern unsigned long zero_page_mask; 55 56 #define ZERO_PAGE(vaddr) \ 57 (virt_to_page((void *)(empty_zero_page + \ 58 (((unsigned long)(vaddr)) &zero_page_mask)))) 59 60 #define is_zero_pfn is_zero_pfn 61 static inline int is_zero_pfn(unsigned long pfn) 62 { 63 extern unsigned long zero_pfn; 64 unsigned long offset_from_zero_pfn = pfn - zero_pfn; 65 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 66 } 67 68 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 69 70 #endif /* !__ASSEMBLY__ */ 71 72 /* 73 * PMD_SHIFT determines the size of the area a second-level page 74 * table can map 75 * PGDIR_SHIFT determines what a third-level page table entry can map 76 */ 77 #ifndef __s390x__ 78 # define PMD_SHIFT 20 79 # define PUD_SHIFT 20 80 # define PGDIR_SHIFT 20 81 #else /* __s390x__ */ 82 # define PMD_SHIFT 20 83 # define PUD_SHIFT 31 84 # define PGDIR_SHIFT 42 85 #endif /* __s390x__ */ 86 87 #define PMD_SIZE (1UL << PMD_SHIFT) 88 #define PMD_MASK (~(PMD_SIZE-1)) 89 #define PUD_SIZE (1UL << PUD_SHIFT) 90 #define PUD_MASK (~(PUD_SIZE-1)) 91 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 92 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 93 94 /* 95 * entries per page directory level: the S390 is two-level, so 96 * we don't really have any PMD directory physically. 97 * for S390 segment-table entries are combined to one PGD 98 * that leads to 1024 pte per pgd 99 */ 100 #define PTRS_PER_PTE 256 101 #ifndef __s390x__ 102 #define PTRS_PER_PMD 1 103 #define PTRS_PER_PUD 1 104 #else /* __s390x__ */ 105 #define PTRS_PER_PMD 2048 106 #define PTRS_PER_PUD 2048 107 #endif /* __s390x__ */ 108 #define PTRS_PER_PGD 2048 109 110 #define FIRST_USER_ADDRESS 0 111 112 #define pte_ERROR(e) \ 113 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 114 #define pmd_ERROR(e) \ 115 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 116 #define pud_ERROR(e) \ 117 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 118 #define pgd_ERROR(e) \ 119 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 120 121 #ifndef __ASSEMBLY__ 122 /* 123 * The vmalloc area will always be on the topmost area of the kernel 124 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc, 125 * which should be enough for any sane case. 126 * By putting vmalloc at the top, we maximise the gap between physical 127 * memory and vmalloc to catch misplaced memory accesses. As a side 128 * effect, this also makes sure that 64 bit module code cannot be used 129 * as system call address. 130 */ 131 132 extern unsigned long VMALLOC_START; 133 134 #ifndef __s390x__ 135 #define VMALLOC_SIZE (96UL << 20) 136 #define VMALLOC_END 0x7e000000UL 137 #define VMEM_MAP_END 0x80000000UL 138 #else /* __s390x__ */ 139 #define VMALLOC_SIZE (128UL << 30) 140 #define VMALLOC_END 0x3e000000000UL 141 #define VMEM_MAP_END 0x40000000000UL 142 #endif /* __s390x__ */ 143 144 /* 145 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 146 * mapping. This needs to be calculated at compile time since the size of the 147 * VMEM_MAP is static but the size of struct page can change. 148 */ 149 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) 150 #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) 151 #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) 152 #define vmemmap ((struct page *) VMALLOC_END) 153 154 /* 155 * A 31 bit pagetable entry of S390 has following format: 156 * | PFRA | | OS | 157 * 0 0IP0 158 * 00000000001111111111222222222233 159 * 01234567890123456789012345678901 160 * 161 * I Page-Invalid Bit: Page is not available for address-translation 162 * P Page-Protection Bit: Store access not possible for page 163 * 164 * A 31 bit segmenttable entry of S390 has following format: 165 * | P-table origin | |PTL 166 * 0 IC 167 * 00000000001111111111222222222233 168 * 01234567890123456789012345678901 169 * 170 * I Segment-Invalid Bit: Segment is not available for address-translation 171 * C Common-Segment Bit: Segment is not private (PoP 3-30) 172 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) 173 * 174 * The 31 bit segmenttable origin of S390 has following format: 175 * 176 * |S-table origin | | STL | 177 * X **GPS 178 * 00000000001111111111222222222233 179 * 01234567890123456789012345678901 180 * 181 * X Space-Switch event: 182 * G Segment-Invalid Bit: * 183 * P Private-Space Bit: Segment is not private (PoP 3-30) 184 * S Storage-Alteration: 185 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) 186 * 187 * A 64 bit pagetable entry of S390 has following format: 188 * | PFRA |0IPC| OS | 189 * 0000000000111111111122222222223333333333444444444455555555556666 190 * 0123456789012345678901234567890123456789012345678901234567890123 191 * 192 * I Page-Invalid Bit: Page is not available for address-translation 193 * P Page-Protection Bit: Store access not possible for page 194 * C Change-bit override: HW is not required to set change bit 195 * 196 * A 64 bit segmenttable entry of S390 has following format: 197 * | P-table origin | TT 198 * 0000000000111111111122222222223333333333444444444455555555556666 199 * 0123456789012345678901234567890123456789012345678901234567890123 200 * 201 * I Segment-Invalid Bit: Segment is not available for address-translation 202 * C Common-Segment Bit: Segment is not private (PoP 3-30) 203 * P Page-Protection Bit: Store access not possible for page 204 * TT Type 00 205 * 206 * A 64 bit region table entry of S390 has following format: 207 * | S-table origin | TF TTTL 208 * 0000000000111111111122222222223333333333444444444455555555556666 209 * 0123456789012345678901234567890123456789012345678901234567890123 210 * 211 * I Segment-Invalid Bit: Segment is not available for address-translation 212 * TT Type 01 213 * TF 214 * TL Table length 215 * 216 * The 64 bit regiontable origin of S390 has following format: 217 * | region table origon | DTTL 218 * 0000000000111111111122222222223333333333444444444455555555556666 219 * 0123456789012345678901234567890123456789012345678901234567890123 220 * 221 * X Space-Switch event: 222 * G Segment-Invalid Bit: 223 * P Private-Space Bit: 224 * S Storage-Alteration: 225 * R Real space 226 * TL Table-Length: 227 * 228 * A storage key has the following format: 229 * | ACC |F|R|C|0| 230 * 0 3 4 5 6 7 231 * ACC: access key 232 * F : fetch protection bit 233 * R : referenced bit 234 * C : changed bit 235 */ 236 237 /* Hardware bits in the page table entry */ 238 #define _PAGE_CO 0x100 /* HW Change-bit override */ 239 #define _PAGE_RO 0x200 /* HW read-only bit */ 240 #define _PAGE_INVALID 0x400 /* HW invalid bit */ 241 242 /* Software bits in the page table entry */ 243 #define _PAGE_SWT 0x001 /* SW pte type bit t */ 244 #define _PAGE_SWX 0x002 /* SW pte type bit x */ 245 #define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */ 246 #define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */ 247 #define _PAGE_SPECIAL 0x010 /* SW associated with special page */ 248 #define __HAVE_ARCH_PTE_SPECIAL 249 250 /* Set of bits not changed in pte_modify */ 251 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR) 252 253 /* Six different types of pages. */ 254 #define _PAGE_TYPE_EMPTY 0x400 255 #define _PAGE_TYPE_NONE 0x401 256 #define _PAGE_TYPE_SWAP 0x403 257 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 258 #define _PAGE_TYPE_RO 0x200 259 #define _PAGE_TYPE_RW 0x000 260 261 /* 262 * Only four types for huge pages, using the invalid bit and protection bit 263 * of a segment table entry. 264 */ 265 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ 266 #define _HPAGE_TYPE_NONE 0x220 267 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ 268 #define _HPAGE_TYPE_RW 0x000 269 270 /* 271 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 272 * pte_none and pte_file to find out the pte type WITHOUT holding the page 273 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to 274 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs 275 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. 276 * This change is done while holding the lock, but the intermediate step 277 * of a previously valid pte with the hw invalid bit set can be observed by 278 * handle_pte_fault. That makes it necessary that all valid pte types with 279 * the hw invalid bit set must be distinguishable from the four pte types 280 * empty, none, swap and file. 281 * 282 * irxt ipte irxt 283 * _PAGE_TYPE_EMPTY 1000 -> 1000 284 * _PAGE_TYPE_NONE 1001 -> 1001 285 * _PAGE_TYPE_SWAP 1011 -> 1011 286 * _PAGE_TYPE_FILE 11?1 -> 11?1 287 * _PAGE_TYPE_RO 0100 -> 1100 288 * _PAGE_TYPE_RW 0000 -> 1000 289 * 290 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 291 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 292 * pte_file is true for bits combinations 1101, 1111 293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 294 */ 295 296 #ifndef __s390x__ 297 298 /* Bits in the segment table address-space-control-element */ 299 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ 300 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ 301 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 302 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 303 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ 304 305 /* Bits in the segment table entry */ 306 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 307 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 308 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 309 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 310 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 311 312 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 313 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 314 315 /* Page status table bits for virtualization */ 316 #define RCP_ACC_BITS 0xf0000000UL 317 #define RCP_FP_BIT 0x08000000UL 318 #define RCP_PCL_BIT 0x00800000UL 319 #define RCP_HR_BIT 0x00400000UL 320 #define RCP_HC_BIT 0x00200000UL 321 #define RCP_GR_BIT 0x00040000UL 322 #define RCP_GC_BIT 0x00020000UL 323 324 /* User dirty / referenced bit for KVM's migration feature */ 325 #define KVM_UR_BIT 0x00008000UL 326 #define KVM_UC_BIT 0x00004000UL 327 328 #else /* __s390x__ */ 329 330 /* Bits in the segment/region table address-space-control-element */ 331 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 332 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 333 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 334 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 335 #define _ASCE_REAL_SPACE 0x20 /* real space control */ 336 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 337 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 338 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 339 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 340 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 341 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 342 343 /* Bits in the region table entry */ 344 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 345 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 346 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 347 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 348 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 349 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 350 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 351 352 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 353 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) 354 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 355 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) 356 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 357 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) 358 359 /* Bits in the segment table entry */ 360 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 361 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 362 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 363 364 #define _SEGMENT_ENTRY (0) 365 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 366 367 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 368 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 369 370 /* Page status table bits for virtualization */ 371 #define RCP_ACC_BITS 0xf000000000000000UL 372 #define RCP_FP_BIT 0x0800000000000000UL 373 #define RCP_PCL_BIT 0x0080000000000000UL 374 #define RCP_HR_BIT 0x0040000000000000UL 375 #define RCP_HC_BIT 0x0020000000000000UL 376 #define RCP_GR_BIT 0x0004000000000000UL 377 #define RCP_GC_BIT 0x0002000000000000UL 378 379 /* User dirty / referenced bit for KVM's migration feature */ 380 #define KVM_UR_BIT 0x0000800000000000UL 381 #define KVM_UC_BIT 0x0000400000000000UL 382 383 #endif /* __s390x__ */ 384 385 /* 386 * A user page table pointer has the space-switch-event bit, the 387 * private-space-control bit and the storage-alteration-event-control 388 * bit set. A kernel page table pointer doesn't need them. 389 */ 390 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 391 _ASCE_ALT_EVENT) 392 393 /* 394 * Page protection definitions. 395 */ 396 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 397 #define PAGE_RO __pgprot(_PAGE_TYPE_RO) 398 #define PAGE_RW __pgprot(_PAGE_TYPE_RW) 399 400 #define PAGE_KERNEL PAGE_RW 401 #define PAGE_COPY PAGE_RO 402 403 /* 404 * On s390 the page table entry has an invalid bit and a read-only bit. 405 * Read permission implies execute permission and write permission 406 * implies read permission. 407 */ 408 /*xwr*/ 409 #define __P000 PAGE_NONE 410 #define __P001 PAGE_RO 411 #define __P010 PAGE_RO 412 #define __P011 PAGE_RO 413 #define __P100 PAGE_RO 414 #define __P101 PAGE_RO 415 #define __P110 PAGE_RO 416 #define __P111 PAGE_RO 417 418 #define __S000 PAGE_NONE 419 #define __S001 PAGE_RO 420 #define __S010 PAGE_RW 421 #define __S011 PAGE_RW 422 #define __S100 PAGE_RO 423 #define __S101 PAGE_RO 424 #define __S110 PAGE_RW 425 #define __S111 PAGE_RW 426 427 static inline int mm_exclusive(struct mm_struct *mm) 428 { 429 return likely(mm == current->active_mm && 430 atomic_read(&mm->context.attach_count) <= 1); 431 } 432 433 static inline int mm_has_pgste(struct mm_struct *mm) 434 { 435 #ifdef CONFIG_PGSTE 436 if (unlikely(mm->context.has_pgste)) 437 return 1; 438 #endif 439 return 0; 440 } 441 /* 442 * pgd/pmd/pte query functions 443 */ 444 #ifndef __s390x__ 445 446 static inline int pgd_present(pgd_t pgd) { return 1; } 447 static inline int pgd_none(pgd_t pgd) { return 0; } 448 static inline int pgd_bad(pgd_t pgd) { return 0; } 449 450 static inline int pud_present(pud_t pud) { return 1; } 451 static inline int pud_none(pud_t pud) { return 0; } 452 static inline int pud_bad(pud_t pud) { return 0; } 453 454 #else /* __s390x__ */ 455 456 static inline int pgd_present(pgd_t pgd) 457 { 458 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 459 return 1; 460 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 461 } 462 463 static inline int pgd_none(pgd_t pgd) 464 { 465 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 466 return 0; 467 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; 468 } 469 470 static inline int pgd_bad(pgd_t pgd) 471 { 472 /* 473 * With dynamic page table levels the pgd can be a region table 474 * entry or a segment table entry. Check for the bit that are 475 * invalid for either table entry. 476 */ 477 unsigned long mask = 478 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 479 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 480 return (pgd_val(pgd) & mask) != 0; 481 } 482 483 static inline int pud_present(pud_t pud) 484 { 485 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 486 return 1; 487 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 488 } 489 490 static inline int pud_none(pud_t pud) 491 { 492 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 493 return 0; 494 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 495 } 496 497 static inline int pud_bad(pud_t pud) 498 { 499 /* 500 * With dynamic page table levels the pud can be a region table 501 * entry or a segment table entry. Check for the bit that are 502 * invalid for either table entry. 503 */ 504 unsigned long mask = 505 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 506 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 507 return (pud_val(pud) & mask) != 0; 508 } 509 510 #endif /* __s390x__ */ 511 512 static inline int pmd_present(pmd_t pmd) 513 { 514 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; 515 } 516 517 static inline int pmd_none(pmd_t pmd) 518 { 519 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; 520 } 521 522 static inline int pmd_bad(pmd_t pmd) 523 { 524 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; 525 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; 526 } 527 528 static inline int pte_none(pte_t pte) 529 { 530 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 531 } 532 533 static inline int pte_present(pte_t pte) 534 { 535 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; 536 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || 537 (!(pte_val(pte) & _PAGE_INVALID) && 538 !(pte_val(pte) & _PAGE_SWT)); 539 } 540 541 static inline int pte_file(pte_t pte) 542 { 543 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; 544 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 545 } 546 547 static inline int pte_special(pte_t pte) 548 { 549 return (pte_val(pte) & _PAGE_SPECIAL); 550 } 551 552 #define __HAVE_ARCH_PTE_SAME 553 static inline int pte_same(pte_t a, pte_t b) 554 { 555 return pte_val(a) == pte_val(b); 556 } 557 558 static inline pgste_t pgste_get_lock(pte_t *ptep) 559 { 560 unsigned long new = 0; 561 #ifdef CONFIG_PGSTE 562 unsigned long old; 563 564 preempt_disable(); 565 asm( 566 " lg %0,%2\n" 567 "0: lgr %1,%0\n" 568 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */ 569 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */ 570 " csg %0,%1,%2\n" 571 " jl 0b\n" 572 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 573 : "Q" (ptep[PTRS_PER_PTE]) : "cc"); 574 #endif 575 return __pgste(new); 576 } 577 578 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) 579 { 580 #ifdef CONFIG_PGSTE 581 asm( 582 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ 583 " stg %1,%0\n" 584 : "=Q" (ptep[PTRS_PER_PTE]) 585 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); 586 preempt_enable(); 587 #endif 588 } 589 590 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) 591 { 592 #ifdef CONFIG_PGSTE 593 unsigned long address, bits; 594 unsigned char skey; 595 596 address = pte_val(*ptep) & PAGE_MASK; 597 skey = page_get_storage_key(address); 598 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 599 /* Clear page changed & referenced bit in the storage key */ 600 if (bits) { 601 skey ^= bits; 602 page_set_storage_key(address, skey, 1); 603 } 604 /* Transfer page changed & referenced bit to guest bits in pgste */ 605 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ 606 /* Get host changed & referenced bits from pgste */ 607 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; 608 /* Clear host bits in pgste. */ 609 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); 610 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); 611 /* Copy page access key and fetch protection bit to pgste */ 612 pgste_val(pgste) |= 613 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 614 /* Transfer changed and referenced to kvm user bits */ 615 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ 616 /* Transfer changed & referenced to pte sofware bits */ 617 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */ 618 #endif 619 return pgste; 620 621 } 622 623 static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) 624 { 625 #ifdef CONFIG_PGSTE 626 int young; 627 628 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 629 /* Transfer page referenced bit to pte software bit (host view) */ 630 if (young || (pgste_val(pgste) & RCP_HR_BIT)) 631 pte_val(*ptep) |= _PAGE_SWR; 632 /* Clear host referenced bit in pgste. */ 633 pgste_val(pgste) &= ~RCP_HR_BIT; 634 /* Transfer page referenced bit to guest bit in pgste */ 635 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */ 636 #endif 637 return pgste; 638 639 } 640 641 static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) 642 { 643 #ifdef CONFIG_PGSTE 644 unsigned long address; 645 unsigned long okey, nkey; 646 647 address = pte_val(*ptep) & PAGE_MASK; 648 okey = nkey = page_get_storage_key(address); 649 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); 650 /* Set page access key and fetch protection bit from pgste */ 651 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 652 if (okey != nkey) 653 page_set_storage_key(address, nkey, 1); 654 #endif 655 } 656 657 /** 658 * struct gmap_struct - guest address space 659 * @mm: pointer to the parent mm_struct 660 * @table: pointer to the page directory 661 * @asce: address space control element for gmap page table 662 * @crst_list: list of all crst tables used in the guest address space 663 */ 664 struct gmap { 665 struct list_head list; 666 struct mm_struct *mm; 667 unsigned long *table; 668 unsigned long asce; 669 struct list_head crst_list; 670 }; 671 672 /** 673 * struct gmap_rmap - reverse mapping for segment table entries 674 * @next: pointer to the next gmap_rmap structure in the list 675 * @entry: pointer to a segment table entry 676 */ 677 struct gmap_rmap { 678 struct list_head list; 679 unsigned long *entry; 680 }; 681 682 /** 683 * struct gmap_pgtable - gmap information attached to a page table 684 * @vmaddr: address of the 1MB segment in the process virtual memory 685 * @mapper: list of segment table entries maping a page table 686 */ 687 struct gmap_pgtable { 688 unsigned long vmaddr; 689 struct list_head mapper; 690 }; 691 692 struct gmap *gmap_alloc(struct mm_struct *mm); 693 void gmap_free(struct gmap *gmap); 694 void gmap_enable(struct gmap *gmap); 695 void gmap_disable(struct gmap *gmap); 696 int gmap_map_segment(struct gmap *gmap, unsigned long from, 697 unsigned long to, unsigned long length); 698 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 699 unsigned long __gmap_fault(unsigned long address, struct gmap *); 700 unsigned long gmap_fault(unsigned long address, struct gmap *); 701 void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 702 703 /* 704 * Certain architectures need to do special things when PTEs 705 * within a page table are directly modified. Thus, the following 706 * hook is made available. 707 */ 708 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 709 pte_t *ptep, pte_t entry) 710 { 711 pgste_t pgste; 712 713 if (mm_has_pgste(mm)) { 714 pgste = pgste_get_lock(ptep); 715 pgste_set_pte(ptep, pgste); 716 *ptep = entry; 717 pgste_set_unlock(ptep, pgste); 718 } else 719 *ptep = entry; 720 } 721 722 /* 723 * query functions pte_write/pte_dirty/pte_young only work if 724 * pte_present() is true. Undefined behaviour if not.. 725 */ 726 static inline int pte_write(pte_t pte) 727 { 728 return (pte_val(pte) & _PAGE_RO) == 0; 729 } 730 731 static inline int pte_dirty(pte_t pte) 732 { 733 #ifdef CONFIG_PGSTE 734 if (pte_val(pte) & _PAGE_SWC) 735 return 1; 736 #endif 737 return 0; 738 } 739 740 static inline int pte_young(pte_t pte) 741 { 742 #ifdef CONFIG_PGSTE 743 if (pte_val(pte) & _PAGE_SWR) 744 return 1; 745 #endif 746 return 0; 747 } 748 749 /* 750 * pgd/pmd/pte modification functions 751 */ 752 753 static inline void pgd_clear(pgd_t *pgd) 754 { 755 #ifdef __s390x__ 756 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 757 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 758 #endif 759 } 760 761 static inline void pud_clear(pud_t *pud) 762 { 763 #ifdef __s390x__ 764 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 765 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 766 #endif 767 } 768 769 static inline void pmd_clear(pmd_t *pmdp) 770 { 771 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 772 } 773 774 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 775 { 776 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 777 } 778 779 /* 780 * The following pte modification functions only work if 781 * pte_present() is true. Undefined behaviour if not.. 782 */ 783 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 784 { 785 pte_val(pte) &= _PAGE_CHG_MASK; 786 pte_val(pte) |= pgprot_val(newprot); 787 return pte; 788 } 789 790 static inline pte_t pte_wrprotect(pte_t pte) 791 { 792 /* Do not clobber _PAGE_TYPE_NONE pages! */ 793 if (!(pte_val(pte) & _PAGE_INVALID)) 794 pte_val(pte) |= _PAGE_RO; 795 return pte; 796 } 797 798 static inline pte_t pte_mkwrite(pte_t pte) 799 { 800 pte_val(pte) &= ~_PAGE_RO; 801 return pte; 802 } 803 804 static inline pte_t pte_mkclean(pte_t pte) 805 { 806 #ifdef CONFIG_PGSTE 807 pte_val(pte) &= ~_PAGE_SWC; 808 #endif 809 return pte; 810 } 811 812 static inline pte_t pte_mkdirty(pte_t pte) 813 { 814 return pte; 815 } 816 817 static inline pte_t pte_mkold(pte_t pte) 818 { 819 #ifdef CONFIG_PGSTE 820 pte_val(pte) &= ~_PAGE_SWR; 821 #endif 822 return pte; 823 } 824 825 static inline pte_t pte_mkyoung(pte_t pte) 826 { 827 return pte; 828 } 829 830 static inline pte_t pte_mkspecial(pte_t pte) 831 { 832 pte_val(pte) |= _PAGE_SPECIAL; 833 return pte; 834 } 835 836 #ifdef CONFIG_HUGETLB_PAGE 837 static inline pte_t pte_mkhuge(pte_t pte) 838 { 839 /* 840 * PROT_NONE needs to be remapped from the pte type to the ste type. 841 * The HW invalid bit is also different for pte and ste. The pte 842 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE 843 * bit, so we don't have to clear it. 844 */ 845 if (pte_val(pte) & _PAGE_INVALID) { 846 if (pte_val(pte) & _PAGE_SWT) 847 pte_val(pte) |= _HPAGE_TYPE_NONE; 848 pte_val(pte) |= _SEGMENT_ENTRY_INV; 849 } 850 /* 851 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment 852 * table entry. 853 */ 854 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); 855 /* 856 * Also set the change-override bit because we don't need dirty bit 857 * tracking for hugetlbfs pages. 858 */ 859 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); 860 return pte; 861 } 862 #endif 863 864 /* 865 * Get (and clear) the user dirty bit for a pte. 866 */ 867 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, 868 pte_t *ptep) 869 { 870 pgste_t pgste; 871 int dirty = 0; 872 873 if (mm_has_pgste(mm)) { 874 pgste = pgste_get_lock(ptep); 875 pgste = pgste_update_all(ptep, pgste); 876 dirty = !!(pgste_val(pgste) & KVM_UC_BIT); 877 pgste_val(pgste) &= ~KVM_UC_BIT; 878 pgste_set_unlock(ptep, pgste); 879 return dirty; 880 } 881 return dirty; 882 } 883 884 /* 885 * Get (and clear) the user referenced bit for a pte. 886 */ 887 static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, 888 pte_t *ptep) 889 { 890 pgste_t pgste; 891 int young = 0; 892 893 if (mm_has_pgste(mm)) { 894 pgste = pgste_get_lock(ptep); 895 pgste = pgste_update_young(ptep, pgste); 896 young = !!(pgste_val(pgste) & KVM_UR_BIT); 897 pgste_val(pgste) &= ~KVM_UR_BIT; 898 pgste_set_unlock(ptep, pgste); 899 } 900 return young; 901 } 902 903 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 904 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 905 unsigned long addr, pte_t *ptep) 906 { 907 pgste_t pgste; 908 pte_t pte; 909 910 if (mm_has_pgste(vma->vm_mm)) { 911 pgste = pgste_get_lock(ptep); 912 pgste = pgste_update_young(ptep, pgste); 913 pte = *ptep; 914 *ptep = pte_mkold(pte); 915 pgste_set_unlock(ptep, pgste); 916 return pte_young(pte); 917 } 918 return 0; 919 } 920 921 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 922 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 923 unsigned long address, pte_t *ptep) 924 { 925 /* No need to flush TLB 926 * On s390 reference bits are in storage key and never in TLB 927 * With virtualization we handle the reference bit, without we 928 * we can simply return */ 929 return ptep_test_and_clear_young(vma, address, ptep); 930 } 931 932 static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 933 { 934 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 935 #ifndef __s390x__ 936 /* pto must point to the start of the segment table */ 937 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 938 #else 939 /* ipte in zarch mode can do the math */ 940 pte_t *pto = ptep; 941 #endif 942 asm volatile( 943 " ipte %2,%3" 944 : "=m" (*ptep) : "m" (*ptep), 945 "a" (pto), "a" (address)); 946 } 947 } 948 949 /* 950 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 951 * both clear the TLB for the unmapped pte. The reason is that 952 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 953 * to modify an active pte. The sequence is 954 * 1) ptep_get_and_clear 955 * 2) set_pte_at 956 * 3) flush_tlb_range 957 * On s390 the tlb needs to get flushed with the modification of the pte 958 * if the pte is active. The only way how this can be implemented is to 959 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 960 * is a nop. 961 */ 962 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 963 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 964 unsigned long address, pte_t *ptep) 965 { 966 pgste_t pgste; 967 pte_t pte; 968 969 mm->context.flush_mm = 1; 970 if (mm_has_pgste(mm)) 971 pgste = pgste_get_lock(ptep); 972 973 pte = *ptep; 974 if (!mm_exclusive(mm)) 975 __ptep_ipte(address, ptep); 976 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 977 978 if (mm_has_pgste(mm)) { 979 pgste = pgste_update_all(&pte, pgste); 980 pgste_set_unlock(ptep, pgste); 981 } 982 return pte; 983 } 984 985 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 986 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, 987 unsigned long address, 988 pte_t *ptep) 989 { 990 pte_t pte; 991 992 mm->context.flush_mm = 1; 993 if (mm_has_pgste(mm)) 994 pgste_get_lock(ptep); 995 996 pte = *ptep; 997 if (!mm_exclusive(mm)) 998 __ptep_ipte(address, ptep); 999 return pte; 1000 } 1001 1002 static inline void ptep_modify_prot_commit(struct mm_struct *mm, 1003 unsigned long address, 1004 pte_t *ptep, pte_t pte) 1005 { 1006 *ptep = pte; 1007 if (mm_has_pgste(mm)) 1008 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); 1009 } 1010 1011 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1012 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1013 unsigned long address, pte_t *ptep) 1014 { 1015 pgste_t pgste; 1016 pte_t pte; 1017 1018 if (mm_has_pgste(vma->vm_mm)) 1019 pgste = pgste_get_lock(ptep); 1020 1021 pte = *ptep; 1022 __ptep_ipte(address, ptep); 1023 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1024 1025 if (mm_has_pgste(vma->vm_mm)) { 1026 pgste = pgste_update_all(&pte, pgste); 1027 pgste_set_unlock(ptep, pgste); 1028 } 1029 return pte; 1030 } 1031 1032 /* 1033 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1034 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1035 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1036 * cannot be accessed while the batched unmap is running. In this case 1037 * full==1 and a simple pte_clear is enough. See tlb.h. 1038 */ 1039 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1040 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1041 unsigned long address, 1042 pte_t *ptep, int full) 1043 { 1044 pgste_t pgste; 1045 pte_t pte; 1046 1047 if (mm_has_pgste(mm)) 1048 pgste = pgste_get_lock(ptep); 1049 1050 pte = *ptep; 1051 if (!full) 1052 __ptep_ipte(address, ptep); 1053 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1054 1055 if (mm_has_pgste(mm)) { 1056 pgste = pgste_update_all(&pte, pgste); 1057 pgste_set_unlock(ptep, pgste); 1058 } 1059 return pte; 1060 } 1061 1062 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 1063 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, 1064 unsigned long address, pte_t *ptep) 1065 { 1066 pgste_t pgste; 1067 pte_t pte = *ptep; 1068 1069 if (pte_write(pte)) { 1070 mm->context.flush_mm = 1; 1071 if (mm_has_pgste(mm)) 1072 pgste = pgste_get_lock(ptep); 1073 1074 if (!mm_exclusive(mm)) 1075 __ptep_ipte(address, ptep); 1076 *ptep = pte_wrprotect(pte); 1077 1078 if (mm_has_pgste(mm)) 1079 pgste_set_unlock(ptep, pgste); 1080 } 1081 return pte; 1082 } 1083 1084 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1085 static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1086 unsigned long address, pte_t *ptep, 1087 pte_t entry, int dirty) 1088 { 1089 pgste_t pgste; 1090 1091 if (pte_same(*ptep, entry)) 1092 return 0; 1093 if (mm_has_pgste(vma->vm_mm)) 1094 pgste = pgste_get_lock(ptep); 1095 1096 __ptep_ipte(address, ptep); 1097 *ptep = entry; 1098 1099 if (mm_has_pgste(vma->vm_mm)) 1100 pgste_set_unlock(ptep, pgste); 1101 return 1; 1102 } 1103 1104 /* 1105 * Conversion functions: convert a page and protection to a page entry, 1106 * and a page entry and page directory to the page they refer to. 1107 */ 1108 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1109 { 1110 pte_t __pte; 1111 pte_val(__pte) = physpage + pgprot_val(pgprot); 1112 return __pte; 1113 } 1114 1115 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1116 { 1117 unsigned long physpage = page_to_phys(page); 1118 1119 return mk_pte_phys(physpage, pgprot); 1120 } 1121 1122 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1123 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1124 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1125 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1126 1127 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1128 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1129 1130 #ifndef __s390x__ 1131 1132 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1133 #define pud_deref(pmd) ({ BUG(); 0UL; }) 1134 #define pgd_deref(pmd) ({ BUG(); 0UL; }) 1135 1136 #define pud_offset(pgd, address) ((pud_t *) pgd) 1137 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) 1138 1139 #else /* __s390x__ */ 1140 1141 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1142 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1143 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1144 1145 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 1146 { 1147 pud_t *pud = (pud_t *) pgd; 1148 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 1149 pud = (pud_t *) pgd_deref(*pgd); 1150 return pud + pud_index(address); 1151 } 1152 1153 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1154 { 1155 pmd_t *pmd = (pmd_t *) pud; 1156 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 1157 pmd = (pmd_t *) pud_deref(*pud); 1158 return pmd + pmd_index(address); 1159 } 1160 1161 #endif /* __s390x__ */ 1162 1163 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1164 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1165 #define pte_page(x) pfn_to_page(pte_pfn(x)) 1166 1167 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 1168 1169 /* Find an entry in the lowest level page table.. */ 1170 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1171 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1172 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1173 #define pte_unmap(pte) do { } while (0) 1174 1175 /* 1176 * 31 bit swap entry format: 1177 * A page-table entry has some bits we have to treat in a special way. 1178 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification 1179 * exception will occur instead of a page translation exception. The 1180 * specifiation exception has the bad habit not to store necessary 1181 * information in the lowcore. 1182 * Bit 21 and bit 22 are the page invalid bit and the page protection 1183 * bit. We set both to indicate a swapped page. 1184 * Bit 30 and 31 are used to distinguish the different page types. For 1185 * a swapped page these bits need to be zero. 1186 * This leaves the bits 1-19 and bits 24-29 to store type and offset. 1187 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 1188 * plus 24 for the offset. 1189 * 0| offset |0110|o|type |00| 1190 * 0 0000000001111111111 2222 2 22222 33 1191 * 0 1234567890123456789 0123 4 56789 01 1192 * 1193 * 64 bit swap entry format: 1194 * A page-table entry has some bits we have to treat in a special way. 1195 * Bits 52 and bit 55 have to be zero, otherwise an specification 1196 * exception will occur instead of a page translation exception. The 1197 * specifiation exception has the bad habit not to store necessary 1198 * information in the lowcore. 1199 * Bit 53 and bit 54 are the page invalid bit and the page protection 1200 * bit. We set both to indicate a swapped page. 1201 * Bit 62 and 63 are used to distinguish the different page types. For 1202 * a swapped page these bits need to be zero. 1203 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 1204 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 1205 * plus 56 for the offset. 1206 * | offset |0110|o|type |00| 1207 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1208 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1209 */ 1210 #ifndef __s390x__ 1211 #define __SWP_OFFSET_MASK (~0UL >> 12) 1212 #else 1213 #define __SWP_OFFSET_MASK (~0UL >> 11) 1214 #endif 1215 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1216 { 1217 pte_t pte; 1218 offset &= __SWP_OFFSET_MASK; 1219 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | 1220 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 1221 return pte; 1222 } 1223 1224 #define __swp_type(entry) (((entry).val >> 2) & 0x1f) 1225 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) 1226 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) 1227 1228 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1229 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1230 1231 #ifndef __s390x__ 1232 # define PTE_FILE_MAX_BITS 26 1233 #else /* __s390x__ */ 1234 # define PTE_FILE_MAX_BITS 59 1235 #endif /* __s390x__ */ 1236 1237 #define pte_to_pgoff(__pte) \ 1238 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) 1239 1240 #define pgoff_to_pte(__off) \ 1241 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 1242 | _PAGE_TYPE_FILE }) 1243 1244 #endif /* !__ASSEMBLY__ */ 1245 1246 #define kern_addr_valid(addr) (1) 1247 1248 extern int vmem_add_mapping(unsigned long start, unsigned long size); 1249 extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1250 extern int s390_enable_sie(void); 1251 1252 /* 1253 * No page table caches to initialise 1254 */ 1255 #define pgtable_cache_init() do { } while (0) 1256 1257 #include <asm-generic/pgtable.h> 1258 1259 #endif /* _S390_PAGE_H */ 1260