xref: /linux/arch/s390/mm/fault.c (revision 523a82955eaa094e5c8357451f7a610e90bf029f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (uweigand@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/fault.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/diag.h>
36 #include <asm/gmap.h>
37 #include <asm/irq.h>
38 #include <asm/mmu_context.h>
39 #include <asm/facility.h>
40 #include <asm/uv.h>
41 #include "../kernel/entry.h"
42 
43 #define __FAIL_ADDR_MASK -4096L
44 #define __SUBCODE_MASK 0x0600
45 #define __PF_RES_FIELD 0x8000000000000000ULL
46 
47 #define VM_FAULT_BADCONTEXT	((__force vm_fault_t) 0x010000)
48 #define VM_FAULT_BADMAP		((__force vm_fault_t) 0x020000)
49 #define VM_FAULT_BADACCESS	((__force vm_fault_t) 0x040000)
50 #define VM_FAULT_SIGNAL		((__force vm_fault_t) 0x080000)
51 #define VM_FAULT_PFAULT		((__force vm_fault_t) 0x100000)
52 
53 enum fault_type {
54 	KERNEL_FAULT,
55 	USER_FAULT,
56 	VDSO_FAULT,
57 	GMAP_FAULT,
58 };
59 
60 static unsigned long store_indication __read_mostly;
61 
62 static int __init fault_init(void)
63 {
64 	if (test_facility(75))
65 		store_indication = 0xc00;
66 	return 0;
67 }
68 early_initcall(fault_init);
69 
70 /*
71  * Find out which address space caused the exception.
72  */
73 static enum fault_type get_fault_type(struct pt_regs *regs)
74 {
75 	unsigned long trans_exc_code;
76 
77 	trans_exc_code = regs->int_parm_long & 3;
78 	if (likely(trans_exc_code == 0)) {
79 		/* primary space exception */
80 		if (IS_ENABLED(CONFIG_PGSTE) &&
81 		    test_pt_regs_flag(regs, PIF_GUEST_FAULT))
82 			return GMAP_FAULT;
83 		if (current->thread.mm_segment == USER_DS)
84 			return USER_FAULT;
85 		return KERNEL_FAULT;
86 	}
87 	if (trans_exc_code == 2) {
88 		/* secondary space exception */
89 		if (current->thread.mm_segment & 1) {
90 			if (current->thread.mm_segment == USER_DS_SACF)
91 				return USER_FAULT;
92 			return KERNEL_FAULT;
93 		}
94 		return VDSO_FAULT;
95 	}
96 	if (trans_exc_code == 1) {
97 		/* access register mode, not used in the kernel */
98 		return USER_FAULT;
99 	}
100 	/* home space exception -> access via kernel ASCE */
101 	return KERNEL_FAULT;
102 }
103 
104 static int bad_address(void *p)
105 {
106 	unsigned long dummy;
107 
108 	return get_kernel_nofault(dummy, (unsigned long *)p);
109 }
110 
111 static void dump_pagetable(unsigned long asce, unsigned long address)
112 {
113 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
114 
115 	pr_alert("AS:%016lx ", asce);
116 	switch (asce & _ASCE_TYPE_MASK) {
117 	case _ASCE_TYPE_REGION1:
118 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
119 		if (bad_address(table))
120 			goto bad;
121 		pr_cont("R1:%016lx ", *table);
122 		if (*table & _REGION_ENTRY_INVALID)
123 			goto out;
124 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
125 		fallthrough;
126 	case _ASCE_TYPE_REGION2:
127 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
128 		if (bad_address(table))
129 			goto bad;
130 		pr_cont("R2:%016lx ", *table);
131 		if (*table & _REGION_ENTRY_INVALID)
132 			goto out;
133 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
134 		fallthrough;
135 	case _ASCE_TYPE_REGION3:
136 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
137 		if (bad_address(table))
138 			goto bad;
139 		pr_cont("R3:%016lx ", *table);
140 		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
141 			goto out;
142 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
143 		fallthrough;
144 	case _ASCE_TYPE_SEGMENT:
145 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
146 		if (bad_address(table))
147 			goto bad;
148 		pr_cont("S:%016lx ", *table);
149 		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
150 			goto out;
151 		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
152 	}
153 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
154 	if (bad_address(table))
155 		goto bad;
156 	pr_cont("P:%016lx ", *table);
157 out:
158 	pr_cont("\n");
159 	return;
160 bad:
161 	pr_cont("BAD\n");
162 }
163 
164 static void dump_fault_info(struct pt_regs *regs)
165 {
166 	unsigned long asce;
167 
168 	pr_alert("Failing address: %016lx TEID: %016lx\n",
169 		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
170 	pr_alert("Fault in ");
171 	switch (regs->int_parm_long & 3) {
172 	case 3:
173 		pr_cont("home space ");
174 		break;
175 	case 2:
176 		pr_cont("secondary space ");
177 		break;
178 	case 1:
179 		pr_cont("access register ");
180 		break;
181 	case 0:
182 		pr_cont("primary space ");
183 		break;
184 	}
185 	pr_cont("mode while using ");
186 	switch (get_fault_type(regs)) {
187 	case USER_FAULT:
188 		asce = S390_lowcore.user_asce;
189 		pr_cont("user ");
190 		break;
191 	case VDSO_FAULT:
192 		asce = S390_lowcore.vdso_asce;
193 		pr_cont("vdso ");
194 		break;
195 	case GMAP_FAULT:
196 		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
197 		pr_cont("gmap ");
198 		break;
199 	case KERNEL_FAULT:
200 		asce = S390_lowcore.kernel_asce;
201 		pr_cont("kernel ");
202 		break;
203 	default:
204 		unreachable();
205 	}
206 	pr_cont("ASCE.\n");
207 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
208 }
209 
210 int show_unhandled_signals = 1;
211 
212 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
213 {
214 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
215 		return;
216 	if (!unhandled_signal(current, signr))
217 		return;
218 	if (!printk_ratelimit())
219 		return;
220 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
221 	       regs->int_code & 0xffff, regs->int_code >> 17);
222 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
223 	printk(KERN_CONT "\n");
224 	if (is_mm_fault)
225 		dump_fault_info(regs);
226 	show_regs(regs);
227 }
228 
229 /*
230  * Send SIGSEGV to task.  This is an external routine
231  * to keep the stack usage of do_page_fault small.
232  */
233 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
234 {
235 	report_user_fault(regs, SIGSEGV, 1);
236 	force_sig_fault(SIGSEGV, si_code,
237 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
238 }
239 
240 const struct exception_table_entry *s390_search_extables(unsigned long addr)
241 {
242 	const struct exception_table_entry *fixup;
243 
244 	fixup = search_extable(__start_dma_ex_table,
245 			       __stop_dma_ex_table - __start_dma_ex_table,
246 			       addr);
247 	if (!fixup)
248 		fixup = search_exception_tables(addr);
249 	return fixup;
250 }
251 
252 static noinline void do_no_context(struct pt_regs *regs)
253 {
254 	const struct exception_table_entry *fixup;
255 
256 	/* Are we prepared to handle this kernel fault?  */
257 	fixup = s390_search_extables(regs->psw.addr);
258 	if (fixup) {
259 		regs->psw.addr = extable_fixup(fixup);
260 		return;
261 	}
262 
263 	/*
264 	 * Oops. The kernel tried to access some bad page. We'll have to
265 	 * terminate things with extreme prejudice.
266 	 */
267 	if (get_fault_type(regs) == KERNEL_FAULT)
268 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
269 		       " in virtual kernel address space\n");
270 	else
271 		printk(KERN_ALERT "Unable to handle kernel paging request"
272 		       " in virtual user address space\n");
273 	dump_fault_info(regs);
274 	die(regs, "Oops");
275 	do_exit(SIGKILL);
276 }
277 
278 static noinline void do_low_address(struct pt_regs *regs)
279 {
280 	/* Low-address protection hit in kernel mode means
281 	   NULL pointer write access in kernel mode.  */
282 	if (regs->psw.mask & PSW_MASK_PSTATE) {
283 		/* Low-address protection hit in user mode 'cannot happen'. */
284 		die (regs, "Low-address protection");
285 		do_exit(SIGKILL);
286 	}
287 
288 	do_no_context(regs);
289 }
290 
291 static noinline void do_sigbus(struct pt_regs *regs)
292 {
293 	/*
294 	 * Send a sigbus, regardless of whether we were in kernel
295 	 * or user mode.
296 	 */
297 	force_sig_fault(SIGBUS, BUS_ADRERR,
298 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
299 }
300 
301 static noinline int signal_return(struct pt_regs *regs)
302 {
303 	u16 instruction;
304 	int rc;
305 
306 	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
307 	if (rc)
308 		return rc;
309 	if (instruction == 0x0a77) {
310 		set_pt_regs_flag(regs, PIF_SYSCALL);
311 		regs->int_code = 0x00040077;
312 		return 0;
313 	} else if (instruction == 0x0aad) {
314 		set_pt_regs_flag(regs, PIF_SYSCALL);
315 		regs->int_code = 0x000400ad;
316 		return 0;
317 	}
318 	return -EACCES;
319 }
320 
321 static noinline void do_fault_error(struct pt_regs *regs, int access,
322 					vm_fault_t fault)
323 {
324 	int si_code;
325 
326 	switch (fault) {
327 	case VM_FAULT_BADACCESS:
328 		if (access == VM_EXEC && signal_return(regs) == 0)
329 			break;
330 		fallthrough;
331 	case VM_FAULT_BADMAP:
332 		/* Bad memory access. Check if it is kernel or user space. */
333 		if (user_mode(regs)) {
334 			/* User mode accesses just cause a SIGSEGV */
335 			si_code = (fault == VM_FAULT_BADMAP) ?
336 				SEGV_MAPERR : SEGV_ACCERR;
337 			do_sigsegv(regs, si_code);
338 			break;
339 		}
340 		fallthrough;
341 	case VM_FAULT_BADCONTEXT:
342 	case VM_FAULT_PFAULT:
343 		do_no_context(regs);
344 		break;
345 	case VM_FAULT_SIGNAL:
346 		if (!user_mode(regs))
347 			do_no_context(regs);
348 		break;
349 	default: /* fault & VM_FAULT_ERROR */
350 		if (fault & VM_FAULT_OOM) {
351 			if (!user_mode(regs))
352 				do_no_context(regs);
353 			else
354 				pagefault_out_of_memory();
355 		} else if (fault & VM_FAULT_SIGSEGV) {
356 			/* Kernel mode? Handle exceptions or die */
357 			if (!user_mode(regs))
358 				do_no_context(regs);
359 			else
360 				do_sigsegv(regs, SEGV_MAPERR);
361 		} else if (fault & VM_FAULT_SIGBUS) {
362 			/* Kernel mode? Handle exceptions or die */
363 			if (!user_mode(regs))
364 				do_no_context(regs);
365 			else
366 				do_sigbus(regs);
367 		} else
368 			BUG();
369 		break;
370 	}
371 }
372 
373 /*
374  * This routine handles page faults.  It determines the address,
375  * and the problem, and then passes it off to one of the appropriate
376  * routines.
377  *
378  * interruption code (int_code):
379  *   04       Protection           ->  Write-Protection  (suprression)
380  *   10       Segment translation  ->  Not present       (nullification)
381  *   11       Page translation     ->  Not present       (nullification)
382  *   3b       Region third trans.  ->  Not present       (nullification)
383  */
384 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
385 {
386 	struct gmap *gmap;
387 	struct task_struct *tsk;
388 	struct mm_struct *mm;
389 	struct vm_area_struct *vma;
390 	enum fault_type type;
391 	unsigned long trans_exc_code;
392 	unsigned long address;
393 	unsigned int flags;
394 	vm_fault_t fault;
395 
396 	tsk = current;
397 	/*
398 	 * The instruction that caused the program check has
399 	 * been nullified. Don't signal single step via SIGTRAP.
400 	 */
401 	clear_pt_regs_flag(regs, PIF_PER_TRAP);
402 
403 	if (kprobe_page_fault(regs, 14))
404 		return 0;
405 
406 	mm = tsk->mm;
407 	trans_exc_code = regs->int_parm_long;
408 
409 	/*
410 	 * Verify that the fault happened in user space, that
411 	 * we are not in an interrupt and that there is a
412 	 * user context.
413 	 */
414 	fault = VM_FAULT_BADCONTEXT;
415 	type = get_fault_type(regs);
416 	switch (type) {
417 	case KERNEL_FAULT:
418 		goto out;
419 	case VDSO_FAULT:
420 		fault = VM_FAULT_BADMAP;
421 		goto out;
422 	case USER_FAULT:
423 	case GMAP_FAULT:
424 		if (faulthandler_disabled() || !mm)
425 			goto out;
426 		break;
427 	}
428 
429 	address = trans_exc_code & __FAIL_ADDR_MASK;
430 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
431 	flags = FAULT_FLAG_DEFAULT;
432 	if (user_mode(regs))
433 		flags |= FAULT_FLAG_USER;
434 	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
435 		flags |= FAULT_FLAG_WRITE;
436 	mmap_read_lock(mm);
437 
438 	gmap = NULL;
439 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
440 		gmap = (struct gmap *) S390_lowcore.gmap;
441 		current->thread.gmap_addr = address;
442 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
443 		current->thread.gmap_int_code = regs->int_code & 0xffff;
444 		address = __gmap_translate(gmap, address);
445 		if (address == -EFAULT) {
446 			fault = VM_FAULT_BADMAP;
447 			goto out_up;
448 		}
449 		if (gmap->pfault_enabled)
450 			flags |= FAULT_FLAG_RETRY_NOWAIT;
451 	}
452 
453 retry:
454 	fault = VM_FAULT_BADMAP;
455 	vma = find_vma(mm, address);
456 	if (!vma)
457 		goto out_up;
458 
459 	if (unlikely(vma->vm_start > address)) {
460 		if (!(vma->vm_flags & VM_GROWSDOWN))
461 			goto out_up;
462 		if (expand_stack(vma, address))
463 			goto out_up;
464 	}
465 
466 	/*
467 	 * Ok, we have a good vm_area for this memory access, so
468 	 * we can handle it..
469 	 */
470 	fault = VM_FAULT_BADACCESS;
471 	if (unlikely(!(vma->vm_flags & access)))
472 		goto out_up;
473 
474 	if (is_vm_hugetlb_page(vma))
475 		address &= HPAGE_MASK;
476 	/*
477 	 * If for any reason at all we couldn't handle the fault,
478 	 * make sure we exit gracefully rather than endlessly redo
479 	 * the fault.
480 	 */
481 	fault = handle_mm_fault(vma, address, flags);
482 	if (fault_signal_pending(fault, regs)) {
483 		fault = VM_FAULT_SIGNAL;
484 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
485 			goto out_up;
486 		goto out;
487 	}
488 	if (unlikely(fault & VM_FAULT_ERROR))
489 		goto out_up;
490 
491 	/*
492 	 * Major/minor page fault accounting is only done on the
493 	 * initial attempt. If we go through a retry, it is extremely
494 	 * likely that the page will be found in page cache at that point.
495 	 */
496 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
497 		if (fault & VM_FAULT_MAJOR) {
498 			tsk->maj_flt++;
499 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
500 				      regs, address);
501 		} else {
502 			tsk->min_flt++;
503 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
504 				      regs, address);
505 		}
506 		if (fault & VM_FAULT_RETRY) {
507 			if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
508 			    (flags & FAULT_FLAG_RETRY_NOWAIT)) {
509 				/* FAULT_FLAG_RETRY_NOWAIT has been set,
510 				 * mmap_lock has not been released */
511 				current->thread.gmap_pfault = 1;
512 				fault = VM_FAULT_PFAULT;
513 				goto out_up;
514 			}
515 			flags &= ~FAULT_FLAG_RETRY_NOWAIT;
516 			flags |= FAULT_FLAG_TRIED;
517 			mmap_read_lock(mm);
518 			goto retry;
519 		}
520 	}
521 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
522 		address =  __gmap_link(gmap, current->thread.gmap_addr,
523 				       address);
524 		if (address == -EFAULT) {
525 			fault = VM_FAULT_BADMAP;
526 			goto out_up;
527 		}
528 		if (address == -ENOMEM) {
529 			fault = VM_FAULT_OOM;
530 			goto out_up;
531 		}
532 	}
533 	fault = 0;
534 out_up:
535 	mmap_read_unlock(mm);
536 out:
537 	return fault;
538 }
539 
540 void do_protection_exception(struct pt_regs *regs)
541 {
542 	unsigned long trans_exc_code;
543 	int access;
544 	vm_fault_t fault;
545 
546 	trans_exc_code = regs->int_parm_long;
547 	/*
548 	 * Protection exceptions are suppressing, decrement psw address.
549 	 * The exception to this rule are aborted transactions, for these
550 	 * the PSW already points to the correct location.
551 	 */
552 	if (!(regs->int_code & 0x200))
553 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
554 	/*
555 	 * Check for low-address protection.  This needs to be treated
556 	 * as a special case because the translation exception code
557 	 * field is not guaranteed to contain valid data in this case.
558 	 */
559 	if (unlikely(!(trans_exc_code & 4))) {
560 		do_low_address(regs);
561 		return;
562 	}
563 	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
564 		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
565 					(regs->psw.addr & PAGE_MASK);
566 		access = VM_EXEC;
567 		fault = VM_FAULT_BADACCESS;
568 	} else {
569 		access = VM_WRITE;
570 		fault = do_exception(regs, access);
571 	}
572 	if (unlikely(fault))
573 		do_fault_error(regs, access, fault);
574 }
575 NOKPROBE_SYMBOL(do_protection_exception);
576 
577 void do_dat_exception(struct pt_regs *regs)
578 {
579 	int access;
580 	vm_fault_t fault;
581 
582 	access = VM_ACCESS_FLAGS;
583 	fault = do_exception(regs, access);
584 	if (unlikely(fault))
585 		do_fault_error(regs, access, fault);
586 }
587 NOKPROBE_SYMBOL(do_dat_exception);
588 
589 #ifdef CONFIG_PFAULT
590 /*
591  * 'pfault' pseudo page faults routines.
592  */
593 static int pfault_disable;
594 
595 static int __init nopfault(char *str)
596 {
597 	pfault_disable = 1;
598 	return 1;
599 }
600 
601 __setup("nopfault", nopfault);
602 
603 struct pfault_refbk {
604 	u16 refdiagc;
605 	u16 reffcode;
606 	u16 refdwlen;
607 	u16 refversn;
608 	u64 refgaddr;
609 	u64 refselmk;
610 	u64 refcmpmk;
611 	u64 reserved;
612 } __attribute__ ((packed, aligned(8)));
613 
614 static struct pfault_refbk pfault_init_refbk = {
615 	.refdiagc = 0x258,
616 	.reffcode = 0,
617 	.refdwlen = 5,
618 	.refversn = 2,
619 	.refgaddr = __LC_LPP,
620 	.refselmk = 1ULL << 48,
621 	.refcmpmk = 1ULL << 48,
622 	.reserved = __PF_RES_FIELD
623 };
624 
625 int pfault_init(void)
626 {
627         int rc;
628 
629 	if (pfault_disable)
630 		return -1;
631 	diag_stat_inc(DIAG_STAT_X258);
632 	asm volatile(
633 		"	diag	%1,%0,0x258\n"
634 		"0:	j	2f\n"
635 		"1:	la	%0,8\n"
636 		"2:\n"
637 		EX_TABLE(0b,1b)
638 		: "=d" (rc)
639 		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
640         return rc;
641 }
642 
643 static struct pfault_refbk pfault_fini_refbk = {
644 	.refdiagc = 0x258,
645 	.reffcode = 1,
646 	.refdwlen = 5,
647 	.refversn = 2,
648 };
649 
650 void pfault_fini(void)
651 {
652 
653 	if (pfault_disable)
654 		return;
655 	diag_stat_inc(DIAG_STAT_X258);
656 	asm volatile(
657 		"	diag	%0,0,0x258\n"
658 		"0:	nopr	%%r7\n"
659 		EX_TABLE(0b,0b)
660 		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
661 }
662 
663 static DEFINE_SPINLOCK(pfault_lock);
664 static LIST_HEAD(pfault_list);
665 
666 #define PF_COMPLETE	0x0080
667 
668 /*
669  * The mechanism of our pfault code: if Linux is running as guest, runs a user
670  * space process and the user space process accesses a page that the host has
671  * paged out we get a pfault interrupt.
672  *
673  * This allows us, within the guest, to schedule a different process. Without
674  * this mechanism the host would have to suspend the whole virtual cpu until
675  * the page has been paged in.
676  *
677  * So when we get such an interrupt then we set the state of the current task
678  * to uninterruptible and also set the need_resched flag. Both happens within
679  * interrupt context(!). If we later on want to return to user space we
680  * recognize the need_resched flag and then call schedule().  It's not very
681  * obvious how this works...
682  *
683  * Of course we have a lot of additional fun with the completion interrupt (->
684  * host signals that a page of a process has been paged in and the process can
685  * continue to run). This interrupt can arrive on any cpu and, since we have
686  * virtual cpus, actually appear before the interrupt that signals that a page
687  * is missing.
688  */
689 static void pfault_interrupt(struct ext_code ext_code,
690 			     unsigned int param32, unsigned long param64)
691 {
692 	struct task_struct *tsk;
693 	__u16 subcode;
694 	pid_t pid;
695 
696 	/*
697 	 * Get the external interruption subcode & pfault initial/completion
698 	 * signal bit. VM stores this in the 'cpu address' field associated
699 	 * with the external interrupt.
700 	 */
701 	subcode = ext_code.subcode;
702 	if ((subcode & 0xff00) != __SUBCODE_MASK)
703 		return;
704 	inc_irq_stat(IRQEXT_PFL);
705 	/* Get the token (= pid of the affected task). */
706 	pid = param64 & LPP_PID_MASK;
707 	rcu_read_lock();
708 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
709 	if (tsk)
710 		get_task_struct(tsk);
711 	rcu_read_unlock();
712 	if (!tsk)
713 		return;
714 	spin_lock(&pfault_lock);
715 	if (subcode & PF_COMPLETE) {
716 		/* signal bit is set -> a page has been swapped in by VM */
717 		if (tsk->thread.pfault_wait == 1) {
718 			/* Initial interrupt was faster than the completion
719 			 * interrupt. pfault_wait is valid. Set pfault_wait
720 			 * back to zero and wake up the process. This can
721 			 * safely be done because the task is still sleeping
722 			 * and can't produce new pfaults. */
723 			tsk->thread.pfault_wait = 0;
724 			list_del(&tsk->thread.list);
725 			wake_up_process(tsk);
726 			put_task_struct(tsk);
727 		} else {
728 			/* Completion interrupt was faster than initial
729 			 * interrupt. Set pfault_wait to -1 so the initial
730 			 * interrupt doesn't put the task to sleep.
731 			 * If the task is not running, ignore the completion
732 			 * interrupt since it must be a leftover of a PFAULT
733 			 * CANCEL operation which didn't remove all pending
734 			 * completion interrupts. */
735 			if (tsk->state == TASK_RUNNING)
736 				tsk->thread.pfault_wait = -1;
737 		}
738 	} else {
739 		/* signal bit not set -> a real page is missing. */
740 		if (WARN_ON_ONCE(tsk != current))
741 			goto out;
742 		if (tsk->thread.pfault_wait == 1) {
743 			/* Already on the list with a reference: put to sleep */
744 			goto block;
745 		} else if (tsk->thread.pfault_wait == -1) {
746 			/* Completion interrupt was faster than the initial
747 			 * interrupt (pfault_wait == -1). Set pfault_wait
748 			 * back to zero and exit. */
749 			tsk->thread.pfault_wait = 0;
750 		} else {
751 			/* Initial interrupt arrived before completion
752 			 * interrupt. Let the task sleep.
753 			 * An extra task reference is needed since a different
754 			 * cpu may set the task state to TASK_RUNNING again
755 			 * before the scheduler is reached. */
756 			get_task_struct(tsk);
757 			tsk->thread.pfault_wait = 1;
758 			list_add(&tsk->thread.list, &pfault_list);
759 block:
760 			/* Since this must be a userspace fault, there
761 			 * is no kernel task state to trample. Rely on the
762 			 * return to userspace schedule() to block. */
763 			__set_current_state(TASK_UNINTERRUPTIBLE);
764 			set_tsk_need_resched(tsk);
765 			set_preempt_need_resched();
766 		}
767 	}
768 out:
769 	spin_unlock(&pfault_lock);
770 	put_task_struct(tsk);
771 }
772 
773 static int pfault_cpu_dead(unsigned int cpu)
774 {
775 	struct thread_struct *thread, *next;
776 	struct task_struct *tsk;
777 
778 	spin_lock_irq(&pfault_lock);
779 	list_for_each_entry_safe(thread, next, &pfault_list, list) {
780 		thread->pfault_wait = 0;
781 		list_del(&thread->list);
782 		tsk = container_of(thread, struct task_struct, thread);
783 		wake_up_process(tsk);
784 		put_task_struct(tsk);
785 	}
786 	spin_unlock_irq(&pfault_lock);
787 	return 0;
788 }
789 
790 static int __init pfault_irq_init(void)
791 {
792 	int rc;
793 
794 	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
795 	if (rc)
796 		goto out_extint;
797 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
798 	if (rc)
799 		goto out_pfault;
800 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
801 	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
802 				  NULL, pfault_cpu_dead);
803 	return 0;
804 
805 out_pfault:
806 	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
807 out_extint:
808 	pfault_disable = 1;
809 	return rc;
810 }
811 early_initcall(pfault_irq_init);
812 
813 #endif /* CONFIG_PFAULT */
814 
815 #if IS_ENABLED(CONFIG_PGSTE)
816 void do_secure_storage_access(struct pt_regs *regs)
817 {
818 	unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
819 	struct vm_area_struct *vma;
820 	struct mm_struct *mm;
821 	struct page *page;
822 	int rc;
823 
824 	switch (get_fault_type(regs)) {
825 	case USER_FAULT:
826 		mm = current->mm;
827 		mmap_read_lock(mm);
828 		vma = find_vma(mm, addr);
829 		if (!vma) {
830 			mmap_read_unlock(mm);
831 			do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
832 			break;
833 		}
834 		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
835 		if (IS_ERR_OR_NULL(page)) {
836 			mmap_read_unlock(mm);
837 			break;
838 		}
839 		if (arch_make_page_accessible(page))
840 			send_sig(SIGSEGV, current, 0);
841 		put_page(page);
842 		mmap_read_unlock(mm);
843 		break;
844 	case KERNEL_FAULT:
845 		page = phys_to_page(addr);
846 		if (unlikely(!try_get_page(page)))
847 			break;
848 		rc = arch_make_page_accessible(page);
849 		put_page(page);
850 		if (rc)
851 			BUG();
852 		break;
853 	case VDSO_FAULT:
854 	case GMAP_FAULT:
855 	default:
856 		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
857 		WARN_ON_ONCE(1);
858 	}
859 }
860 NOKPROBE_SYMBOL(do_secure_storage_access);
861 
862 void do_non_secure_storage_access(struct pt_regs *regs)
863 {
864 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
865 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
866 
867 	if (get_fault_type(regs) != GMAP_FAULT) {
868 		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
869 		WARN_ON_ONCE(1);
870 		return;
871 	}
872 
873 	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
874 		send_sig(SIGSEGV, current, 0);
875 }
876 NOKPROBE_SYMBOL(do_non_secure_storage_access);
877 
878 #else
879 void do_secure_storage_access(struct pt_regs *regs)
880 {
881 	default_trap_handler(regs);
882 }
883 
884 void do_non_secure_storage_access(struct pt_regs *regs)
885 {
886 	default_trap_handler(regs);
887 }
888 #endif
889