1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (uweigand@de.ibm.com) 7 * 8 * Derived from "arch/i386/mm/fault.c" 9 * Copyright (C) 1995 Linus Torvalds 10 */ 11 12 #include <linux/kernel_stat.h> 13 #include <linux/perf_event.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/sched/debug.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/types.h> 21 #include <linux/ptrace.h> 22 #include <linux/mman.h> 23 #include <linux/mm.h> 24 #include <linux/compat.h> 25 #include <linux/smp.h> 26 #include <linux/kdebug.h> 27 #include <linux/init.h> 28 #include <linux/console.h> 29 #include <linux/extable.h> 30 #include <linux/hardirq.h> 31 #include <linux/kprobes.h> 32 #include <linux/uaccess.h> 33 #include <linux/hugetlb.h> 34 #include <asm/asm-offsets.h> 35 #include <asm/diag.h> 36 #include <asm/pgtable.h> 37 #include <asm/gmap.h> 38 #include <asm/irq.h> 39 #include <asm/mmu_context.h> 40 #include <asm/facility.h> 41 #include "../kernel/entry.h" 42 43 #define __FAIL_ADDR_MASK -4096L 44 #define __SUBCODE_MASK 0x0600 45 #define __PF_RES_FIELD 0x8000000000000000ULL 46 47 #define VM_FAULT_BADCONTEXT 0x010000 48 #define VM_FAULT_BADMAP 0x020000 49 #define VM_FAULT_BADACCESS 0x040000 50 #define VM_FAULT_SIGNAL 0x080000 51 #define VM_FAULT_PFAULT 0x100000 52 53 enum fault_type { 54 KERNEL_FAULT, 55 USER_FAULT, 56 VDSO_FAULT, 57 GMAP_FAULT, 58 }; 59 60 static unsigned long store_indication __read_mostly; 61 62 static int __init fault_init(void) 63 { 64 if (test_facility(75)) 65 store_indication = 0xc00; 66 return 0; 67 } 68 early_initcall(fault_init); 69 70 static inline int notify_page_fault(struct pt_regs *regs) 71 { 72 int ret = 0; 73 74 /* kprobe_running() needs smp_processor_id() */ 75 if (kprobes_built_in() && !user_mode(regs)) { 76 preempt_disable(); 77 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 78 ret = 1; 79 preempt_enable(); 80 } 81 return ret; 82 } 83 84 85 /* 86 * Unlock any spinlocks which will prevent us from getting the 87 * message out. 88 */ 89 void bust_spinlocks(int yes) 90 { 91 if (yes) { 92 oops_in_progress = 1; 93 } else { 94 int loglevel_save = console_loglevel; 95 console_unblank(); 96 oops_in_progress = 0; 97 /* 98 * OK, the message is on the console. Now we call printk() 99 * without oops_in_progress set so that printk will give klogd 100 * a poke. Hold onto your hats... 101 */ 102 console_loglevel = 15; 103 printk(" "); 104 console_loglevel = loglevel_save; 105 } 106 } 107 108 /* 109 * Find out which address space caused the exception. 110 * Access register mode is impossible, ignore space == 3. 111 */ 112 static inline enum fault_type get_fault_type(struct pt_regs *regs) 113 { 114 unsigned long trans_exc_code; 115 116 trans_exc_code = regs->int_parm_long & 3; 117 if (likely(trans_exc_code == 0)) { 118 /* primary space exception */ 119 if (IS_ENABLED(CONFIG_PGSTE) && 120 test_pt_regs_flag(regs, PIF_GUEST_FAULT)) 121 return GMAP_FAULT; 122 if (current->thread.mm_segment == USER_DS) 123 return USER_FAULT; 124 return KERNEL_FAULT; 125 } 126 if (trans_exc_code == 2) { 127 /* secondary space exception */ 128 if (current->thread.mm_segment & 1) { 129 if (current->thread.mm_segment == USER_DS_SACF) 130 return USER_FAULT; 131 return KERNEL_FAULT; 132 } 133 return VDSO_FAULT; 134 } 135 /* home space exception -> access via kernel ASCE */ 136 return KERNEL_FAULT; 137 } 138 139 static int bad_address(void *p) 140 { 141 unsigned long dummy; 142 143 return probe_kernel_address((unsigned long *)p, dummy); 144 } 145 146 static void dump_pagetable(unsigned long asce, unsigned long address) 147 { 148 unsigned long *table = __va(asce & _ASCE_ORIGIN); 149 150 pr_alert("AS:%016lx ", asce); 151 switch (asce & _ASCE_TYPE_MASK) { 152 case _ASCE_TYPE_REGION1: 153 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; 154 if (bad_address(table)) 155 goto bad; 156 pr_cont("R1:%016lx ", *table); 157 if (*table & _REGION_ENTRY_INVALID) 158 goto out; 159 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 160 /* fallthrough */ 161 case _ASCE_TYPE_REGION2: 162 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; 163 if (bad_address(table)) 164 goto bad; 165 pr_cont("R2:%016lx ", *table); 166 if (*table & _REGION_ENTRY_INVALID) 167 goto out; 168 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 169 /* fallthrough */ 170 case _ASCE_TYPE_REGION3: 171 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; 172 if (bad_address(table)) 173 goto bad; 174 pr_cont("R3:%016lx ", *table); 175 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) 176 goto out; 177 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 178 /* fallthrough */ 179 case _ASCE_TYPE_SEGMENT: 180 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 181 if (bad_address(table)) 182 goto bad; 183 pr_cont("S:%016lx ", *table); 184 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) 185 goto out; 186 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 187 } 188 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; 189 if (bad_address(table)) 190 goto bad; 191 pr_cont("P:%016lx ", *table); 192 out: 193 pr_cont("\n"); 194 return; 195 bad: 196 pr_cont("BAD\n"); 197 } 198 199 static void dump_fault_info(struct pt_regs *regs) 200 { 201 unsigned long asce; 202 203 pr_alert("Failing address: %016lx TEID: %016lx\n", 204 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); 205 pr_alert("Fault in "); 206 switch (regs->int_parm_long & 3) { 207 case 3: 208 pr_cont("home space "); 209 break; 210 case 2: 211 pr_cont("secondary space "); 212 break; 213 case 1: 214 pr_cont("access register "); 215 break; 216 case 0: 217 pr_cont("primary space "); 218 break; 219 } 220 pr_cont("mode while using "); 221 switch (get_fault_type(regs)) { 222 case USER_FAULT: 223 asce = S390_lowcore.user_asce; 224 pr_cont("user "); 225 break; 226 case VDSO_FAULT: 227 asce = S390_lowcore.vdso_asce; 228 pr_cont("vdso "); 229 break; 230 case GMAP_FAULT: 231 asce = ((struct gmap *) S390_lowcore.gmap)->asce; 232 pr_cont("gmap "); 233 break; 234 case KERNEL_FAULT: 235 asce = S390_lowcore.kernel_asce; 236 pr_cont("kernel "); 237 break; 238 } 239 pr_cont("ASCE.\n"); 240 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); 241 } 242 243 int show_unhandled_signals = 1; 244 245 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) 246 { 247 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 248 return; 249 if (!unhandled_signal(current, signr)) 250 return; 251 if (!printk_ratelimit()) 252 return; 253 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", 254 regs->int_code & 0xffff, regs->int_code >> 17); 255 print_vma_addr(KERN_CONT "in ", regs->psw.addr); 256 printk(KERN_CONT "\n"); 257 if (is_mm_fault) 258 dump_fault_info(regs); 259 show_regs(regs); 260 } 261 262 /* 263 * Send SIGSEGV to task. This is an external routine 264 * to keep the stack usage of do_page_fault small. 265 */ 266 static noinline void do_sigsegv(struct pt_regs *regs, int si_code) 267 { 268 report_user_fault(regs, SIGSEGV, 1); 269 force_sig_fault(SIGSEGV, si_code, 270 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK), 271 current); 272 } 273 274 static noinline void do_no_context(struct pt_regs *regs) 275 { 276 const struct exception_table_entry *fixup; 277 278 /* Are we prepared to handle this kernel fault? */ 279 fixup = search_exception_tables(regs->psw.addr); 280 if (fixup) { 281 regs->psw.addr = extable_fixup(fixup); 282 return; 283 } 284 285 /* 286 * Oops. The kernel tried to access some bad page. We'll have to 287 * terminate things with extreme prejudice. 288 */ 289 if (get_fault_type(regs) == KERNEL_FAULT) 290 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 291 " in virtual kernel address space\n"); 292 else 293 printk(KERN_ALERT "Unable to handle kernel paging request" 294 " in virtual user address space\n"); 295 dump_fault_info(regs); 296 die(regs, "Oops"); 297 do_exit(SIGKILL); 298 } 299 300 static noinline void do_low_address(struct pt_regs *regs) 301 { 302 /* Low-address protection hit in kernel mode means 303 NULL pointer write access in kernel mode. */ 304 if (regs->psw.mask & PSW_MASK_PSTATE) { 305 /* Low-address protection hit in user mode 'cannot happen'. */ 306 die (regs, "Low-address protection"); 307 do_exit(SIGKILL); 308 } 309 310 do_no_context(regs); 311 } 312 313 static noinline void do_sigbus(struct pt_regs *regs) 314 { 315 /* 316 * Send a sigbus, regardless of whether we were in kernel 317 * or user mode. 318 */ 319 force_sig_fault(SIGBUS, BUS_ADRERR, 320 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK), 321 current); 322 } 323 324 static noinline int signal_return(struct pt_regs *regs) 325 { 326 u16 instruction; 327 int rc; 328 329 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 330 if (rc) 331 return rc; 332 if (instruction == 0x0a77) { 333 set_pt_regs_flag(regs, PIF_SYSCALL); 334 regs->int_code = 0x00040077; 335 return 0; 336 } else if (instruction == 0x0aad) { 337 set_pt_regs_flag(regs, PIF_SYSCALL); 338 regs->int_code = 0x000400ad; 339 return 0; 340 } 341 return -EACCES; 342 } 343 344 static noinline void do_fault_error(struct pt_regs *regs, int access, int fault) 345 { 346 int si_code; 347 348 switch (fault) { 349 case VM_FAULT_BADACCESS: 350 if (access == VM_EXEC && signal_return(regs) == 0) 351 break; 352 case VM_FAULT_BADMAP: 353 /* Bad memory access. Check if it is kernel or user space. */ 354 if (user_mode(regs)) { 355 /* User mode accesses just cause a SIGSEGV */ 356 si_code = (fault == VM_FAULT_BADMAP) ? 357 SEGV_MAPERR : SEGV_ACCERR; 358 do_sigsegv(regs, si_code); 359 break; 360 } 361 case VM_FAULT_BADCONTEXT: 362 case VM_FAULT_PFAULT: 363 do_no_context(regs); 364 break; 365 case VM_FAULT_SIGNAL: 366 if (!user_mode(regs)) 367 do_no_context(regs); 368 break; 369 default: /* fault & VM_FAULT_ERROR */ 370 if (fault & VM_FAULT_OOM) { 371 if (!user_mode(regs)) 372 do_no_context(regs); 373 else 374 pagefault_out_of_memory(); 375 } else if (fault & VM_FAULT_SIGSEGV) { 376 /* Kernel mode? Handle exceptions or die */ 377 if (!user_mode(regs)) 378 do_no_context(regs); 379 else 380 do_sigsegv(regs, SEGV_MAPERR); 381 } else if (fault & VM_FAULT_SIGBUS) { 382 /* Kernel mode? Handle exceptions or die */ 383 if (!user_mode(regs)) 384 do_no_context(regs); 385 else 386 do_sigbus(regs); 387 } else 388 BUG(); 389 break; 390 } 391 } 392 393 /* 394 * This routine handles page faults. It determines the address, 395 * and the problem, and then passes it off to one of the appropriate 396 * routines. 397 * 398 * interruption code (int_code): 399 * 04 Protection -> Write-Protection (suprression) 400 * 10 Segment translation -> Not present (nullification) 401 * 11 Page translation -> Not present (nullification) 402 * 3b Region third trans. -> Not present (nullification) 403 */ 404 static inline int do_exception(struct pt_regs *regs, int access) 405 { 406 struct gmap *gmap; 407 struct task_struct *tsk; 408 struct mm_struct *mm; 409 struct vm_area_struct *vma; 410 enum fault_type type; 411 unsigned long trans_exc_code; 412 unsigned long address; 413 unsigned int flags; 414 int fault; 415 416 tsk = current; 417 /* 418 * The instruction that caused the program check has 419 * been nullified. Don't signal single step via SIGTRAP. 420 */ 421 clear_pt_regs_flag(regs, PIF_PER_TRAP); 422 423 if (notify_page_fault(regs)) 424 return 0; 425 426 mm = tsk->mm; 427 trans_exc_code = regs->int_parm_long; 428 429 /* 430 * Verify that the fault happened in user space, that 431 * we are not in an interrupt and that there is a 432 * user context. 433 */ 434 fault = VM_FAULT_BADCONTEXT; 435 type = get_fault_type(regs); 436 switch (type) { 437 case KERNEL_FAULT: 438 goto out; 439 case VDSO_FAULT: 440 fault = VM_FAULT_BADMAP; 441 goto out; 442 case USER_FAULT: 443 case GMAP_FAULT: 444 if (faulthandler_disabled() || !mm) 445 goto out; 446 break; 447 } 448 449 address = trans_exc_code & __FAIL_ADDR_MASK; 450 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 451 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 452 if (user_mode(regs)) 453 flags |= FAULT_FLAG_USER; 454 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 455 flags |= FAULT_FLAG_WRITE; 456 down_read(&mm->mmap_sem); 457 458 gmap = NULL; 459 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { 460 gmap = (struct gmap *) S390_lowcore.gmap; 461 current->thread.gmap_addr = address; 462 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); 463 current->thread.gmap_int_code = regs->int_code & 0xffff; 464 address = __gmap_translate(gmap, address); 465 if (address == -EFAULT) { 466 fault = VM_FAULT_BADMAP; 467 goto out_up; 468 } 469 if (gmap->pfault_enabled) 470 flags |= FAULT_FLAG_RETRY_NOWAIT; 471 } 472 473 retry: 474 fault = VM_FAULT_BADMAP; 475 vma = find_vma(mm, address); 476 if (!vma) 477 goto out_up; 478 479 if (unlikely(vma->vm_start > address)) { 480 if (!(vma->vm_flags & VM_GROWSDOWN)) 481 goto out_up; 482 if (expand_stack(vma, address)) 483 goto out_up; 484 } 485 486 /* 487 * Ok, we have a good vm_area for this memory access, so 488 * we can handle it.. 489 */ 490 fault = VM_FAULT_BADACCESS; 491 if (unlikely(!(vma->vm_flags & access))) 492 goto out_up; 493 494 if (is_vm_hugetlb_page(vma)) 495 address &= HPAGE_MASK; 496 /* 497 * If for any reason at all we couldn't handle the fault, 498 * make sure we exit gracefully rather than endlessly redo 499 * the fault. 500 */ 501 fault = handle_mm_fault(vma, address, flags); 502 /* No reason to continue if interrupted by SIGKILL. */ 503 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { 504 fault = VM_FAULT_SIGNAL; 505 if (flags & FAULT_FLAG_RETRY_NOWAIT) 506 goto out_up; 507 goto out; 508 } 509 if (unlikely(fault & VM_FAULT_ERROR)) 510 goto out_up; 511 512 /* 513 * Major/minor page fault accounting is only done on the 514 * initial attempt. If we go through a retry, it is extremely 515 * likely that the page will be found in page cache at that point. 516 */ 517 if (flags & FAULT_FLAG_ALLOW_RETRY) { 518 if (fault & VM_FAULT_MAJOR) { 519 tsk->maj_flt++; 520 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 521 regs, address); 522 } else { 523 tsk->min_flt++; 524 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 525 regs, address); 526 } 527 if (fault & VM_FAULT_RETRY) { 528 if (IS_ENABLED(CONFIG_PGSTE) && gmap && 529 (flags & FAULT_FLAG_RETRY_NOWAIT)) { 530 /* FAULT_FLAG_RETRY_NOWAIT has been set, 531 * mmap_sem has not been released */ 532 current->thread.gmap_pfault = 1; 533 fault = VM_FAULT_PFAULT; 534 goto out_up; 535 } 536 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 537 * of starvation. */ 538 flags &= ~(FAULT_FLAG_ALLOW_RETRY | 539 FAULT_FLAG_RETRY_NOWAIT); 540 flags |= FAULT_FLAG_TRIED; 541 down_read(&mm->mmap_sem); 542 goto retry; 543 } 544 } 545 if (IS_ENABLED(CONFIG_PGSTE) && gmap) { 546 address = __gmap_link(gmap, current->thread.gmap_addr, 547 address); 548 if (address == -EFAULT) { 549 fault = VM_FAULT_BADMAP; 550 goto out_up; 551 } 552 if (address == -ENOMEM) { 553 fault = VM_FAULT_OOM; 554 goto out_up; 555 } 556 } 557 fault = 0; 558 out_up: 559 up_read(&mm->mmap_sem); 560 out: 561 return fault; 562 } 563 564 void do_protection_exception(struct pt_regs *regs) 565 { 566 unsigned long trans_exc_code; 567 int access, fault; 568 569 trans_exc_code = regs->int_parm_long; 570 /* 571 * Protection exceptions are suppressing, decrement psw address. 572 * The exception to this rule are aborted transactions, for these 573 * the PSW already points to the correct location. 574 */ 575 if (!(regs->int_code & 0x200)) 576 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 577 /* 578 * Check for low-address protection. This needs to be treated 579 * as a special case because the translation exception code 580 * field is not guaranteed to contain valid data in this case. 581 */ 582 if (unlikely(!(trans_exc_code & 4))) { 583 do_low_address(regs); 584 return; 585 } 586 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) { 587 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) | 588 (regs->psw.addr & PAGE_MASK); 589 access = VM_EXEC; 590 fault = VM_FAULT_BADACCESS; 591 } else { 592 access = VM_WRITE; 593 fault = do_exception(regs, access); 594 } 595 if (unlikely(fault)) 596 do_fault_error(regs, access, fault); 597 } 598 NOKPROBE_SYMBOL(do_protection_exception); 599 600 void do_dat_exception(struct pt_regs *regs) 601 { 602 int access, fault; 603 604 access = VM_READ | VM_EXEC | VM_WRITE; 605 fault = do_exception(regs, access); 606 if (unlikely(fault)) 607 do_fault_error(regs, access, fault); 608 } 609 NOKPROBE_SYMBOL(do_dat_exception); 610 611 #ifdef CONFIG_PFAULT 612 /* 613 * 'pfault' pseudo page faults routines. 614 */ 615 static int pfault_disable; 616 617 static int __init nopfault(char *str) 618 { 619 pfault_disable = 1; 620 return 1; 621 } 622 623 __setup("nopfault", nopfault); 624 625 struct pfault_refbk { 626 u16 refdiagc; 627 u16 reffcode; 628 u16 refdwlen; 629 u16 refversn; 630 u64 refgaddr; 631 u64 refselmk; 632 u64 refcmpmk; 633 u64 reserved; 634 } __attribute__ ((packed, aligned(8))); 635 636 int pfault_init(void) 637 { 638 struct pfault_refbk refbk = { 639 .refdiagc = 0x258, 640 .reffcode = 0, 641 .refdwlen = 5, 642 .refversn = 2, 643 .refgaddr = __LC_LPP, 644 .refselmk = 1ULL << 48, 645 .refcmpmk = 1ULL << 48, 646 .reserved = __PF_RES_FIELD }; 647 int rc; 648 649 if (pfault_disable) 650 return -1; 651 diag_stat_inc(DIAG_STAT_X258); 652 asm volatile( 653 " diag %1,%0,0x258\n" 654 "0: j 2f\n" 655 "1: la %0,8\n" 656 "2:\n" 657 EX_TABLE(0b,1b) 658 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); 659 return rc; 660 } 661 662 void pfault_fini(void) 663 { 664 struct pfault_refbk refbk = { 665 .refdiagc = 0x258, 666 .reffcode = 1, 667 .refdwlen = 5, 668 .refversn = 2, 669 }; 670 671 if (pfault_disable) 672 return; 673 diag_stat_inc(DIAG_STAT_X258); 674 asm volatile( 675 " diag %0,0,0x258\n" 676 "0: nopr %%r7\n" 677 EX_TABLE(0b,0b) 678 : : "a" (&refbk), "m" (refbk) : "cc"); 679 } 680 681 static DEFINE_SPINLOCK(pfault_lock); 682 static LIST_HEAD(pfault_list); 683 684 #define PF_COMPLETE 0x0080 685 686 /* 687 * The mechanism of our pfault code: if Linux is running as guest, runs a user 688 * space process and the user space process accesses a page that the host has 689 * paged out we get a pfault interrupt. 690 * 691 * This allows us, within the guest, to schedule a different process. Without 692 * this mechanism the host would have to suspend the whole virtual cpu until 693 * the page has been paged in. 694 * 695 * So when we get such an interrupt then we set the state of the current task 696 * to uninterruptible and also set the need_resched flag. Both happens within 697 * interrupt context(!). If we later on want to return to user space we 698 * recognize the need_resched flag and then call schedule(). It's not very 699 * obvious how this works... 700 * 701 * Of course we have a lot of additional fun with the completion interrupt (-> 702 * host signals that a page of a process has been paged in and the process can 703 * continue to run). This interrupt can arrive on any cpu and, since we have 704 * virtual cpus, actually appear before the interrupt that signals that a page 705 * is missing. 706 */ 707 static void pfault_interrupt(struct ext_code ext_code, 708 unsigned int param32, unsigned long param64) 709 { 710 struct task_struct *tsk; 711 __u16 subcode; 712 pid_t pid; 713 714 /* 715 * Get the external interruption subcode & pfault initial/completion 716 * signal bit. VM stores this in the 'cpu address' field associated 717 * with the external interrupt. 718 */ 719 subcode = ext_code.subcode; 720 if ((subcode & 0xff00) != __SUBCODE_MASK) 721 return; 722 inc_irq_stat(IRQEXT_PFL); 723 /* Get the token (= pid of the affected task). */ 724 pid = param64 & LPP_PID_MASK; 725 rcu_read_lock(); 726 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 727 if (tsk) 728 get_task_struct(tsk); 729 rcu_read_unlock(); 730 if (!tsk) 731 return; 732 spin_lock(&pfault_lock); 733 if (subcode & PF_COMPLETE) { 734 /* signal bit is set -> a page has been swapped in by VM */ 735 if (tsk->thread.pfault_wait == 1) { 736 /* Initial interrupt was faster than the completion 737 * interrupt. pfault_wait is valid. Set pfault_wait 738 * back to zero and wake up the process. This can 739 * safely be done because the task is still sleeping 740 * and can't produce new pfaults. */ 741 tsk->thread.pfault_wait = 0; 742 list_del(&tsk->thread.list); 743 wake_up_process(tsk); 744 put_task_struct(tsk); 745 } else { 746 /* Completion interrupt was faster than initial 747 * interrupt. Set pfault_wait to -1 so the initial 748 * interrupt doesn't put the task to sleep. 749 * If the task is not running, ignore the completion 750 * interrupt since it must be a leftover of a PFAULT 751 * CANCEL operation which didn't remove all pending 752 * completion interrupts. */ 753 if (tsk->state == TASK_RUNNING) 754 tsk->thread.pfault_wait = -1; 755 } 756 } else { 757 /* signal bit not set -> a real page is missing. */ 758 if (WARN_ON_ONCE(tsk != current)) 759 goto out; 760 if (tsk->thread.pfault_wait == 1) { 761 /* Already on the list with a reference: put to sleep */ 762 goto block; 763 } else if (tsk->thread.pfault_wait == -1) { 764 /* Completion interrupt was faster than the initial 765 * interrupt (pfault_wait == -1). Set pfault_wait 766 * back to zero and exit. */ 767 tsk->thread.pfault_wait = 0; 768 } else { 769 /* Initial interrupt arrived before completion 770 * interrupt. Let the task sleep. 771 * An extra task reference is needed since a different 772 * cpu may set the task state to TASK_RUNNING again 773 * before the scheduler is reached. */ 774 get_task_struct(tsk); 775 tsk->thread.pfault_wait = 1; 776 list_add(&tsk->thread.list, &pfault_list); 777 block: 778 /* Since this must be a userspace fault, there 779 * is no kernel task state to trample. Rely on the 780 * return to userspace schedule() to block. */ 781 __set_current_state(TASK_UNINTERRUPTIBLE); 782 set_tsk_need_resched(tsk); 783 set_preempt_need_resched(); 784 } 785 } 786 out: 787 spin_unlock(&pfault_lock); 788 put_task_struct(tsk); 789 } 790 791 static int pfault_cpu_dead(unsigned int cpu) 792 { 793 struct thread_struct *thread, *next; 794 struct task_struct *tsk; 795 796 spin_lock_irq(&pfault_lock); 797 list_for_each_entry_safe(thread, next, &pfault_list, list) { 798 thread->pfault_wait = 0; 799 list_del(&thread->list); 800 tsk = container_of(thread, struct task_struct, thread); 801 wake_up_process(tsk); 802 put_task_struct(tsk); 803 } 804 spin_unlock_irq(&pfault_lock); 805 return 0; 806 } 807 808 static int __init pfault_irq_init(void) 809 { 810 int rc; 811 812 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 813 if (rc) 814 goto out_extint; 815 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; 816 if (rc) 817 goto out_pfault; 818 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 819 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead", 820 NULL, pfault_cpu_dead); 821 return 0; 822 823 out_pfault: 824 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 825 out_extint: 826 pfault_disable = 1; 827 return rc; 828 } 829 early_initcall(pfault_irq_init); 830 831 #endif /* CONFIG_PFAULT */ 832