1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (uweigand@de.ibm.com) 7 * 8 * Derived from "arch/i386/mm/fault.c" 9 * Copyright (C) 1995 Linus Torvalds 10 */ 11 12 #include <linux/kernel_stat.h> 13 #include <linux/perf_event.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/sched/debug.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/types.h> 21 #include <linux/ptrace.h> 22 #include <linux/mman.h> 23 #include <linux/mm.h> 24 #include <linux/compat.h> 25 #include <linux/smp.h> 26 #include <linux/kdebug.h> 27 #include <linux/init.h> 28 #include <linux/console.h> 29 #include <linux/extable.h> 30 #include <linux/hardirq.h> 31 #include <linux/kprobes.h> 32 #include <linux/uaccess.h> 33 #include <linux/hugetlb.h> 34 #include <asm/asm-offsets.h> 35 #include <asm/diag.h> 36 #include <asm/pgtable.h> 37 #include <asm/gmap.h> 38 #include <asm/irq.h> 39 #include <asm/mmu_context.h> 40 #include <asm/facility.h> 41 #include <asm/uv.h> 42 #include "../kernel/entry.h" 43 44 #define __FAIL_ADDR_MASK -4096L 45 #define __SUBCODE_MASK 0x0600 46 #define __PF_RES_FIELD 0x8000000000000000ULL 47 48 #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000) 49 #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000) 50 #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000) 51 #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000) 52 #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000) 53 54 enum fault_type { 55 KERNEL_FAULT, 56 USER_FAULT, 57 VDSO_FAULT, 58 GMAP_FAULT, 59 }; 60 61 static unsigned long store_indication __read_mostly; 62 63 static int __init fault_init(void) 64 { 65 if (test_facility(75)) 66 store_indication = 0xc00; 67 return 0; 68 } 69 early_initcall(fault_init); 70 71 /* 72 * Find out which address space caused the exception. 73 */ 74 static enum fault_type get_fault_type(struct pt_regs *regs) 75 { 76 unsigned long trans_exc_code; 77 78 trans_exc_code = regs->int_parm_long & 3; 79 if (likely(trans_exc_code == 0)) { 80 /* primary space exception */ 81 if (IS_ENABLED(CONFIG_PGSTE) && 82 test_pt_regs_flag(regs, PIF_GUEST_FAULT)) 83 return GMAP_FAULT; 84 if (current->thread.mm_segment == USER_DS) 85 return USER_FAULT; 86 return KERNEL_FAULT; 87 } 88 if (trans_exc_code == 2) { 89 /* secondary space exception */ 90 if (current->thread.mm_segment & 1) { 91 if (current->thread.mm_segment == USER_DS_SACF) 92 return USER_FAULT; 93 return KERNEL_FAULT; 94 } 95 return VDSO_FAULT; 96 } 97 if (trans_exc_code == 1) { 98 /* access register mode, not used in the kernel */ 99 return USER_FAULT; 100 } 101 /* home space exception -> access via kernel ASCE */ 102 return KERNEL_FAULT; 103 } 104 105 static int bad_address(void *p) 106 { 107 unsigned long dummy; 108 109 return probe_kernel_address((unsigned long *)p, dummy); 110 } 111 112 static void dump_pagetable(unsigned long asce, unsigned long address) 113 { 114 unsigned long *table = __va(asce & _ASCE_ORIGIN); 115 116 pr_alert("AS:%016lx ", asce); 117 switch (asce & _ASCE_TYPE_MASK) { 118 case _ASCE_TYPE_REGION1: 119 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; 120 if (bad_address(table)) 121 goto bad; 122 pr_cont("R1:%016lx ", *table); 123 if (*table & _REGION_ENTRY_INVALID) 124 goto out; 125 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 126 fallthrough; 127 case _ASCE_TYPE_REGION2: 128 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; 129 if (bad_address(table)) 130 goto bad; 131 pr_cont("R2:%016lx ", *table); 132 if (*table & _REGION_ENTRY_INVALID) 133 goto out; 134 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 135 fallthrough; 136 case _ASCE_TYPE_REGION3: 137 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; 138 if (bad_address(table)) 139 goto bad; 140 pr_cont("R3:%016lx ", *table); 141 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) 142 goto out; 143 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 144 fallthrough; 145 case _ASCE_TYPE_SEGMENT: 146 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 147 if (bad_address(table)) 148 goto bad; 149 pr_cont("S:%016lx ", *table); 150 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) 151 goto out; 152 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 153 } 154 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; 155 if (bad_address(table)) 156 goto bad; 157 pr_cont("P:%016lx ", *table); 158 out: 159 pr_cont("\n"); 160 return; 161 bad: 162 pr_cont("BAD\n"); 163 } 164 165 static void dump_fault_info(struct pt_regs *regs) 166 { 167 unsigned long asce; 168 169 pr_alert("Failing address: %016lx TEID: %016lx\n", 170 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); 171 pr_alert("Fault in "); 172 switch (regs->int_parm_long & 3) { 173 case 3: 174 pr_cont("home space "); 175 break; 176 case 2: 177 pr_cont("secondary space "); 178 break; 179 case 1: 180 pr_cont("access register "); 181 break; 182 case 0: 183 pr_cont("primary space "); 184 break; 185 } 186 pr_cont("mode while using "); 187 switch (get_fault_type(regs)) { 188 case USER_FAULT: 189 asce = S390_lowcore.user_asce; 190 pr_cont("user "); 191 break; 192 case VDSO_FAULT: 193 asce = S390_lowcore.vdso_asce; 194 pr_cont("vdso "); 195 break; 196 case GMAP_FAULT: 197 asce = ((struct gmap *) S390_lowcore.gmap)->asce; 198 pr_cont("gmap "); 199 break; 200 case KERNEL_FAULT: 201 asce = S390_lowcore.kernel_asce; 202 pr_cont("kernel "); 203 break; 204 default: 205 unreachable(); 206 } 207 pr_cont("ASCE.\n"); 208 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); 209 } 210 211 int show_unhandled_signals = 1; 212 213 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) 214 { 215 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 216 return; 217 if (!unhandled_signal(current, signr)) 218 return; 219 if (!printk_ratelimit()) 220 return; 221 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", 222 regs->int_code & 0xffff, regs->int_code >> 17); 223 print_vma_addr(KERN_CONT "in ", regs->psw.addr); 224 printk(KERN_CONT "\n"); 225 if (is_mm_fault) 226 dump_fault_info(regs); 227 show_regs(regs); 228 } 229 230 /* 231 * Send SIGSEGV to task. This is an external routine 232 * to keep the stack usage of do_page_fault small. 233 */ 234 static noinline void do_sigsegv(struct pt_regs *regs, int si_code) 235 { 236 report_user_fault(regs, SIGSEGV, 1); 237 force_sig_fault(SIGSEGV, si_code, 238 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); 239 } 240 241 const struct exception_table_entry *s390_search_extables(unsigned long addr) 242 { 243 const struct exception_table_entry *fixup; 244 245 fixup = search_extable(__start_dma_ex_table, 246 __stop_dma_ex_table - __start_dma_ex_table, 247 addr); 248 if (!fixup) 249 fixup = search_exception_tables(addr); 250 return fixup; 251 } 252 253 static noinline void do_no_context(struct pt_regs *regs) 254 { 255 const struct exception_table_entry *fixup; 256 257 /* Are we prepared to handle this kernel fault? */ 258 fixup = s390_search_extables(regs->psw.addr); 259 if (fixup) { 260 regs->psw.addr = extable_fixup(fixup); 261 return; 262 } 263 264 /* 265 * Oops. The kernel tried to access some bad page. We'll have to 266 * terminate things with extreme prejudice. 267 */ 268 if (get_fault_type(regs) == KERNEL_FAULT) 269 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 270 " in virtual kernel address space\n"); 271 else 272 printk(KERN_ALERT "Unable to handle kernel paging request" 273 " in virtual user address space\n"); 274 dump_fault_info(regs); 275 die(regs, "Oops"); 276 do_exit(SIGKILL); 277 } 278 279 static noinline void do_low_address(struct pt_regs *regs) 280 { 281 /* Low-address protection hit in kernel mode means 282 NULL pointer write access in kernel mode. */ 283 if (regs->psw.mask & PSW_MASK_PSTATE) { 284 /* Low-address protection hit in user mode 'cannot happen'. */ 285 die (regs, "Low-address protection"); 286 do_exit(SIGKILL); 287 } 288 289 do_no_context(regs); 290 } 291 292 static noinline void do_sigbus(struct pt_regs *regs) 293 { 294 /* 295 * Send a sigbus, regardless of whether we were in kernel 296 * or user mode. 297 */ 298 force_sig_fault(SIGBUS, BUS_ADRERR, 299 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); 300 } 301 302 static noinline int signal_return(struct pt_regs *regs) 303 { 304 u16 instruction; 305 int rc; 306 307 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 308 if (rc) 309 return rc; 310 if (instruction == 0x0a77) { 311 set_pt_regs_flag(regs, PIF_SYSCALL); 312 regs->int_code = 0x00040077; 313 return 0; 314 } else if (instruction == 0x0aad) { 315 set_pt_regs_flag(regs, PIF_SYSCALL); 316 regs->int_code = 0x000400ad; 317 return 0; 318 } 319 return -EACCES; 320 } 321 322 static noinline void do_fault_error(struct pt_regs *regs, int access, 323 vm_fault_t fault) 324 { 325 int si_code; 326 327 switch (fault) { 328 case VM_FAULT_BADACCESS: 329 if (access == VM_EXEC && signal_return(regs) == 0) 330 break; 331 fallthrough; 332 case VM_FAULT_BADMAP: 333 /* Bad memory access. Check if it is kernel or user space. */ 334 if (user_mode(regs)) { 335 /* User mode accesses just cause a SIGSEGV */ 336 si_code = (fault == VM_FAULT_BADMAP) ? 337 SEGV_MAPERR : SEGV_ACCERR; 338 do_sigsegv(regs, si_code); 339 break; 340 } 341 fallthrough; 342 case VM_FAULT_BADCONTEXT: 343 case VM_FAULT_PFAULT: 344 do_no_context(regs); 345 break; 346 case VM_FAULT_SIGNAL: 347 if (!user_mode(regs)) 348 do_no_context(regs); 349 break; 350 default: /* fault & VM_FAULT_ERROR */ 351 if (fault & VM_FAULT_OOM) { 352 if (!user_mode(regs)) 353 do_no_context(regs); 354 else 355 pagefault_out_of_memory(); 356 } else if (fault & VM_FAULT_SIGSEGV) { 357 /* Kernel mode? Handle exceptions or die */ 358 if (!user_mode(regs)) 359 do_no_context(regs); 360 else 361 do_sigsegv(regs, SEGV_MAPERR); 362 } else if (fault & VM_FAULT_SIGBUS) { 363 /* Kernel mode? Handle exceptions or die */ 364 if (!user_mode(regs)) 365 do_no_context(regs); 366 else 367 do_sigbus(regs); 368 } else 369 BUG(); 370 break; 371 } 372 } 373 374 /* 375 * This routine handles page faults. It determines the address, 376 * and the problem, and then passes it off to one of the appropriate 377 * routines. 378 * 379 * interruption code (int_code): 380 * 04 Protection -> Write-Protection (suprression) 381 * 10 Segment translation -> Not present (nullification) 382 * 11 Page translation -> Not present (nullification) 383 * 3b Region third trans. -> Not present (nullification) 384 */ 385 static inline vm_fault_t do_exception(struct pt_regs *regs, int access) 386 { 387 struct gmap *gmap; 388 struct task_struct *tsk; 389 struct mm_struct *mm; 390 struct vm_area_struct *vma; 391 enum fault_type type; 392 unsigned long trans_exc_code; 393 unsigned long address; 394 unsigned int flags; 395 vm_fault_t fault; 396 397 tsk = current; 398 /* 399 * The instruction that caused the program check has 400 * been nullified. Don't signal single step via SIGTRAP. 401 */ 402 clear_pt_regs_flag(regs, PIF_PER_TRAP); 403 404 if (kprobe_page_fault(regs, 14)) 405 return 0; 406 407 mm = tsk->mm; 408 trans_exc_code = regs->int_parm_long; 409 410 /* 411 * Verify that the fault happened in user space, that 412 * we are not in an interrupt and that there is a 413 * user context. 414 */ 415 fault = VM_FAULT_BADCONTEXT; 416 type = get_fault_type(regs); 417 switch (type) { 418 case KERNEL_FAULT: 419 goto out; 420 case VDSO_FAULT: 421 fault = VM_FAULT_BADMAP; 422 goto out; 423 case USER_FAULT: 424 case GMAP_FAULT: 425 if (faulthandler_disabled() || !mm) 426 goto out; 427 break; 428 } 429 430 address = trans_exc_code & __FAIL_ADDR_MASK; 431 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 432 flags = FAULT_FLAG_DEFAULT; 433 if (user_mode(regs)) 434 flags |= FAULT_FLAG_USER; 435 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 436 flags |= FAULT_FLAG_WRITE; 437 down_read(&mm->mmap_sem); 438 439 gmap = NULL; 440 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { 441 gmap = (struct gmap *) S390_lowcore.gmap; 442 current->thread.gmap_addr = address; 443 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); 444 current->thread.gmap_int_code = regs->int_code & 0xffff; 445 address = __gmap_translate(gmap, address); 446 if (address == -EFAULT) { 447 fault = VM_FAULT_BADMAP; 448 goto out_up; 449 } 450 if (gmap->pfault_enabled) 451 flags |= FAULT_FLAG_RETRY_NOWAIT; 452 } 453 454 retry: 455 fault = VM_FAULT_BADMAP; 456 vma = find_vma(mm, address); 457 if (!vma) 458 goto out_up; 459 460 if (unlikely(vma->vm_start > address)) { 461 if (!(vma->vm_flags & VM_GROWSDOWN)) 462 goto out_up; 463 if (expand_stack(vma, address)) 464 goto out_up; 465 } 466 467 /* 468 * Ok, we have a good vm_area for this memory access, so 469 * we can handle it.. 470 */ 471 fault = VM_FAULT_BADACCESS; 472 if (unlikely(!(vma->vm_flags & access))) 473 goto out_up; 474 475 if (is_vm_hugetlb_page(vma)) 476 address &= HPAGE_MASK; 477 /* 478 * If for any reason at all we couldn't handle the fault, 479 * make sure we exit gracefully rather than endlessly redo 480 * the fault. 481 */ 482 fault = handle_mm_fault(vma, address, flags); 483 if (fault_signal_pending(fault, regs)) { 484 fault = VM_FAULT_SIGNAL; 485 if (flags & FAULT_FLAG_RETRY_NOWAIT) 486 goto out_up; 487 goto out; 488 } 489 if (unlikely(fault & VM_FAULT_ERROR)) 490 goto out_up; 491 492 /* 493 * Major/minor page fault accounting is only done on the 494 * initial attempt. If we go through a retry, it is extremely 495 * likely that the page will be found in page cache at that point. 496 */ 497 if (flags & FAULT_FLAG_ALLOW_RETRY) { 498 if (fault & VM_FAULT_MAJOR) { 499 tsk->maj_flt++; 500 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 501 regs, address); 502 } else { 503 tsk->min_flt++; 504 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 505 regs, address); 506 } 507 if (fault & VM_FAULT_RETRY) { 508 if (IS_ENABLED(CONFIG_PGSTE) && gmap && 509 (flags & FAULT_FLAG_RETRY_NOWAIT)) { 510 /* FAULT_FLAG_RETRY_NOWAIT has been set, 511 * mmap_sem has not been released */ 512 current->thread.gmap_pfault = 1; 513 fault = VM_FAULT_PFAULT; 514 goto out_up; 515 } 516 flags &= ~FAULT_FLAG_RETRY_NOWAIT; 517 flags |= FAULT_FLAG_TRIED; 518 down_read(&mm->mmap_sem); 519 goto retry; 520 } 521 } 522 if (IS_ENABLED(CONFIG_PGSTE) && gmap) { 523 address = __gmap_link(gmap, current->thread.gmap_addr, 524 address); 525 if (address == -EFAULT) { 526 fault = VM_FAULT_BADMAP; 527 goto out_up; 528 } 529 if (address == -ENOMEM) { 530 fault = VM_FAULT_OOM; 531 goto out_up; 532 } 533 } 534 fault = 0; 535 out_up: 536 up_read(&mm->mmap_sem); 537 out: 538 return fault; 539 } 540 541 void do_protection_exception(struct pt_regs *regs) 542 { 543 unsigned long trans_exc_code; 544 int access; 545 vm_fault_t fault; 546 547 trans_exc_code = regs->int_parm_long; 548 /* 549 * Protection exceptions are suppressing, decrement psw address. 550 * The exception to this rule are aborted transactions, for these 551 * the PSW already points to the correct location. 552 */ 553 if (!(regs->int_code & 0x200)) 554 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 555 /* 556 * Check for low-address protection. This needs to be treated 557 * as a special case because the translation exception code 558 * field is not guaranteed to contain valid data in this case. 559 */ 560 if (unlikely(!(trans_exc_code & 4))) { 561 do_low_address(regs); 562 return; 563 } 564 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) { 565 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) | 566 (regs->psw.addr & PAGE_MASK); 567 access = VM_EXEC; 568 fault = VM_FAULT_BADACCESS; 569 } else { 570 access = VM_WRITE; 571 fault = do_exception(regs, access); 572 } 573 if (unlikely(fault)) 574 do_fault_error(regs, access, fault); 575 } 576 NOKPROBE_SYMBOL(do_protection_exception); 577 578 void do_dat_exception(struct pt_regs *regs) 579 { 580 int access; 581 vm_fault_t fault; 582 583 access = VM_ACCESS_FLAGS; 584 fault = do_exception(regs, access); 585 if (unlikely(fault)) 586 do_fault_error(regs, access, fault); 587 } 588 NOKPROBE_SYMBOL(do_dat_exception); 589 590 #ifdef CONFIG_PFAULT 591 /* 592 * 'pfault' pseudo page faults routines. 593 */ 594 static int pfault_disable; 595 596 static int __init nopfault(char *str) 597 { 598 pfault_disable = 1; 599 return 1; 600 } 601 602 __setup("nopfault", nopfault); 603 604 struct pfault_refbk { 605 u16 refdiagc; 606 u16 reffcode; 607 u16 refdwlen; 608 u16 refversn; 609 u64 refgaddr; 610 u64 refselmk; 611 u64 refcmpmk; 612 u64 reserved; 613 } __attribute__ ((packed, aligned(8))); 614 615 static struct pfault_refbk pfault_init_refbk = { 616 .refdiagc = 0x258, 617 .reffcode = 0, 618 .refdwlen = 5, 619 .refversn = 2, 620 .refgaddr = __LC_LPP, 621 .refselmk = 1ULL << 48, 622 .refcmpmk = 1ULL << 48, 623 .reserved = __PF_RES_FIELD 624 }; 625 626 int pfault_init(void) 627 { 628 int rc; 629 630 if (pfault_disable) 631 return -1; 632 diag_stat_inc(DIAG_STAT_X258); 633 asm volatile( 634 " diag %1,%0,0x258\n" 635 "0: j 2f\n" 636 "1: la %0,8\n" 637 "2:\n" 638 EX_TABLE(0b,1b) 639 : "=d" (rc) 640 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc"); 641 return rc; 642 } 643 644 static struct pfault_refbk pfault_fini_refbk = { 645 .refdiagc = 0x258, 646 .reffcode = 1, 647 .refdwlen = 5, 648 .refversn = 2, 649 }; 650 651 void pfault_fini(void) 652 { 653 654 if (pfault_disable) 655 return; 656 diag_stat_inc(DIAG_STAT_X258); 657 asm volatile( 658 " diag %0,0,0x258\n" 659 "0: nopr %%r7\n" 660 EX_TABLE(0b,0b) 661 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc"); 662 } 663 664 static DEFINE_SPINLOCK(pfault_lock); 665 static LIST_HEAD(pfault_list); 666 667 #define PF_COMPLETE 0x0080 668 669 /* 670 * The mechanism of our pfault code: if Linux is running as guest, runs a user 671 * space process and the user space process accesses a page that the host has 672 * paged out we get a pfault interrupt. 673 * 674 * This allows us, within the guest, to schedule a different process. Without 675 * this mechanism the host would have to suspend the whole virtual cpu until 676 * the page has been paged in. 677 * 678 * So when we get such an interrupt then we set the state of the current task 679 * to uninterruptible and also set the need_resched flag. Both happens within 680 * interrupt context(!). If we later on want to return to user space we 681 * recognize the need_resched flag and then call schedule(). It's not very 682 * obvious how this works... 683 * 684 * Of course we have a lot of additional fun with the completion interrupt (-> 685 * host signals that a page of a process has been paged in and the process can 686 * continue to run). This interrupt can arrive on any cpu and, since we have 687 * virtual cpus, actually appear before the interrupt that signals that a page 688 * is missing. 689 */ 690 static void pfault_interrupt(struct ext_code ext_code, 691 unsigned int param32, unsigned long param64) 692 { 693 struct task_struct *tsk; 694 __u16 subcode; 695 pid_t pid; 696 697 /* 698 * Get the external interruption subcode & pfault initial/completion 699 * signal bit. VM stores this in the 'cpu address' field associated 700 * with the external interrupt. 701 */ 702 subcode = ext_code.subcode; 703 if ((subcode & 0xff00) != __SUBCODE_MASK) 704 return; 705 inc_irq_stat(IRQEXT_PFL); 706 /* Get the token (= pid of the affected task). */ 707 pid = param64 & LPP_PID_MASK; 708 rcu_read_lock(); 709 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 710 if (tsk) 711 get_task_struct(tsk); 712 rcu_read_unlock(); 713 if (!tsk) 714 return; 715 spin_lock(&pfault_lock); 716 if (subcode & PF_COMPLETE) { 717 /* signal bit is set -> a page has been swapped in by VM */ 718 if (tsk->thread.pfault_wait == 1) { 719 /* Initial interrupt was faster than the completion 720 * interrupt. pfault_wait is valid. Set pfault_wait 721 * back to zero and wake up the process. This can 722 * safely be done because the task is still sleeping 723 * and can't produce new pfaults. */ 724 tsk->thread.pfault_wait = 0; 725 list_del(&tsk->thread.list); 726 wake_up_process(tsk); 727 put_task_struct(tsk); 728 } else { 729 /* Completion interrupt was faster than initial 730 * interrupt. Set pfault_wait to -1 so the initial 731 * interrupt doesn't put the task to sleep. 732 * If the task is not running, ignore the completion 733 * interrupt since it must be a leftover of a PFAULT 734 * CANCEL operation which didn't remove all pending 735 * completion interrupts. */ 736 if (tsk->state == TASK_RUNNING) 737 tsk->thread.pfault_wait = -1; 738 } 739 } else { 740 /* signal bit not set -> a real page is missing. */ 741 if (WARN_ON_ONCE(tsk != current)) 742 goto out; 743 if (tsk->thread.pfault_wait == 1) { 744 /* Already on the list with a reference: put to sleep */ 745 goto block; 746 } else if (tsk->thread.pfault_wait == -1) { 747 /* Completion interrupt was faster than the initial 748 * interrupt (pfault_wait == -1). Set pfault_wait 749 * back to zero and exit. */ 750 tsk->thread.pfault_wait = 0; 751 } else { 752 /* Initial interrupt arrived before completion 753 * interrupt. Let the task sleep. 754 * An extra task reference is needed since a different 755 * cpu may set the task state to TASK_RUNNING again 756 * before the scheduler is reached. */ 757 get_task_struct(tsk); 758 tsk->thread.pfault_wait = 1; 759 list_add(&tsk->thread.list, &pfault_list); 760 block: 761 /* Since this must be a userspace fault, there 762 * is no kernel task state to trample. Rely on the 763 * return to userspace schedule() to block. */ 764 __set_current_state(TASK_UNINTERRUPTIBLE); 765 set_tsk_need_resched(tsk); 766 set_preempt_need_resched(); 767 } 768 } 769 out: 770 spin_unlock(&pfault_lock); 771 put_task_struct(tsk); 772 } 773 774 static int pfault_cpu_dead(unsigned int cpu) 775 { 776 struct thread_struct *thread, *next; 777 struct task_struct *tsk; 778 779 spin_lock_irq(&pfault_lock); 780 list_for_each_entry_safe(thread, next, &pfault_list, list) { 781 thread->pfault_wait = 0; 782 list_del(&thread->list); 783 tsk = container_of(thread, struct task_struct, thread); 784 wake_up_process(tsk); 785 put_task_struct(tsk); 786 } 787 spin_unlock_irq(&pfault_lock); 788 return 0; 789 } 790 791 static int __init pfault_irq_init(void) 792 { 793 int rc; 794 795 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 796 if (rc) 797 goto out_extint; 798 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; 799 if (rc) 800 goto out_pfault; 801 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 802 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead", 803 NULL, pfault_cpu_dead); 804 return 0; 805 806 out_pfault: 807 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 808 out_extint: 809 pfault_disable = 1; 810 return rc; 811 } 812 early_initcall(pfault_irq_init); 813 814 #endif /* CONFIG_PFAULT */ 815 816 #if IS_ENABLED(CONFIG_PGSTE) 817 void do_secure_storage_access(struct pt_regs *regs) 818 { 819 unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK; 820 struct vm_area_struct *vma; 821 struct mm_struct *mm; 822 struct page *page; 823 int rc; 824 825 switch (get_fault_type(regs)) { 826 case USER_FAULT: 827 mm = current->mm; 828 down_read(&mm->mmap_sem); 829 vma = find_vma(mm, addr); 830 if (!vma) { 831 up_read(&mm->mmap_sem); 832 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); 833 break; 834 } 835 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET); 836 if (IS_ERR_OR_NULL(page)) { 837 up_read(&mm->mmap_sem); 838 break; 839 } 840 if (arch_make_page_accessible(page)) 841 send_sig(SIGSEGV, current, 0); 842 put_page(page); 843 up_read(&mm->mmap_sem); 844 break; 845 case KERNEL_FAULT: 846 page = phys_to_page(addr); 847 if (unlikely(!try_get_page(page))) 848 break; 849 rc = arch_make_page_accessible(page); 850 put_page(page); 851 if (rc) 852 BUG(); 853 break; 854 case VDSO_FAULT: 855 case GMAP_FAULT: 856 default: 857 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); 858 WARN_ON_ONCE(1); 859 } 860 } 861 NOKPROBE_SYMBOL(do_secure_storage_access); 862 863 void do_non_secure_storage_access(struct pt_regs *regs) 864 { 865 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; 866 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; 867 868 if (get_fault_type(regs) != GMAP_FAULT) { 869 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); 870 WARN_ON_ONCE(1); 871 return; 872 } 873 874 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL) 875 send_sig(SIGSEGV, current, 0); 876 } 877 NOKPROBE_SYMBOL(do_non_secure_storage_access); 878 879 #else 880 void do_secure_storage_access(struct pt_regs *regs) 881 { 882 default_trap_handler(regs); 883 } 884 885 void do_non_secure_storage_access(struct pt_regs *regs) 886 { 887 default_trap_handler(regs); 888 } 889 #endif 890