xref: /linux/arch/s390/mm/fault.c (revision b37042b2bb7cd751f03b73afb90364a418d870f4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (uweigand@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/fault.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <linux/kfence.h>
35 #include <asm/asm-extable.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/diag.h>
38 #include <asm/gmap.h>
39 #include <asm/irq.h>
40 #include <asm/mmu_context.h>
41 #include <asm/facility.h>
42 #include <asm/uv.h>
43 #include "../kernel/entry.h"
44 
45 #define __FAIL_ADDR_MASK -4096L
46 #define __SUBCODE_MASK 0x0600
47 #define __PF_RES_FIELD 0x8000000000000000ULL
48 
49 #define VM_FAULT_BADCONTEXT	((__force vm_fault_t) 0x010000)
50 #define VM_FAULT_BADMAP		((__force vm_fault_t) 0x020000)
51 #define VM_FAULT_BADACCESS	((__force vm_fault_t) 0x040000)
52 #define VM_FAULT_SIGNAL		((__force vm_fault_t) 0x080000)
53 #define VM_FAULT_PFAULT		((__force vm_fault_t) 0x100000)
54 
55 enum fault_type {
56 	KERNEL_FAULT,
57 	USER_FAULT,
58 	GMAP_FAULT,
59 };
60 
61 static unsigned long store_indication __read_mostly;
62 
63 static int __init fault_init(void)
64 {
65 	if (test_facility(75))
66 		store_indication = 0xc00;
67 	return 0;
68 }
69 early_initcall(fault_init);
70 
71 /*
72  * Find out which address space caused the exception.
73  */
74 static enum fault_type get_fault_type(struct pt_regs *regs)
75 {
76 	unsigned long trans_exc_code;
77 
78 	trans_exc_code = regs->int_parm_long & 3;
79 	if (likely(trans_exc_code == 0)) {
80 		/* primary space exception */
81 		if (user_mode(regs))
82 			return USER_FAULT;
83 		if (!IS_ENABLED(CONFIG_PGSTE))
84 			return KERNEL_FAULT;
85 		if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
86 			return GMAP_FAULT;
87 		return KERNEL_FAULT;
88 	}
89 	if (trans_exc_code == 2)
90 		return USER_FAULT;
91 	if (trans_exc_code == 1) {
92 		/* access register mode, not used in the kernel */
93 		return USER_FAULT;
94 	}
95 	/* home space exception -> access via kernel ASCE */
96 	return KERNEL_FAULT;
97 }
98 
99 static int bad_address(void *p)
100 {
101 	unsigned long dummy;
102 
103 	return get_kernel_nofault(dummy, (unsigned long *)p);
104 }
105 
106 static void dump_pagetable(unsigned long asce, unsigned long address)
107 {
108 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
109 
110 	pr_alert("AS:%016lx ", asce);
111 	switch (asce & _ASCE_TYPE_MASK) {
112 	case _ASCE_TYPE_REGION1:
113 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
114 		if (bad_address(table))
115 			goto bad;
116 		pr_cont("R1:%016lx ", *table);
117 		if (*table & _REGION_ENTRY_INVALID)
118 			goto out;
119 		table = __va(*table & _REGION_ENTRY_ORIGIN);
120 		fallthrough;
121 	case _ASCE_TYPE_REGION2:
122 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
123 		if (bad_address(table))
124 			goto bad;
125 		pr_cont("R2:%016lx ", *table);
126 		if (*table & _REGION_ENTRY_INVALID)
127 			goto out;
128 		table = __va(*table & _REGION_ENTRY_ORIGIN);
129 		fallthrough;
130 	case _ASCE_TYPE_REGION3:
131 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
132 		if (bad_address(table))
133 			goto bad;
134 		pr_cont("R3:%016lx ", *table);
135 		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
136 			goto out;
137 		table = __va(*table & _REGION_ENTRY_ORIGIN);
138 		fallthrough;
139 	case _ASCE_TYPE_SEGMENT:
140 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
141 		if (bad_address(table))
142 			goto bad;
143 		pr_cont("S:%016lx ", *table);
144 		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
145 			goto out;
146 		table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
147 	}
148 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
149 	if (bad_address(table))
150 		goto bad;
151 	pr_cont("P:%016lx ", *table);
152 out:
153 	pr_cont("\n");
154 	return;
155 bad:
156 	pr_cont("BAD\n");
157 }
158 
159 static void dump_fault_info(struct pt_regs *regs)
160 {
161 	unsigned long asce;
162 
163 	pr_alert("Failing address: %016lx TEID: %016lx\n",
164 		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
165 	pr_alert("Fault in ");
166 	switch (regs->int_parm_long & 3) {
167 	case 3:
168 		pr_cont("home space ");
169 		break;
170 	case 2:
171 		pr_cont("secondary space ");
172 		break;
173 	case 1:
174 		pr_cont("access register ");
175 		break;
176 	case 0:
177 		pr_cont("primary space ");
178 		break;
179 	}
180 	pr_cont("mode while using ");
181 	switch (get_fault_type(regs)) {
182 	case USER_FAULT:
183 		asce = S390_lowcore.user_asce;
184 		pr_cont("user ");
185 		break;
186 	case GMAP_FAULT:
187 		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
188 		pr_cont("gmap ");
189 		break;
190 	case KERNEL_FAULT:
191 		asce = S390_lowcore.kernel_asce;
192 		pr_cont("kernel ");
193 		break;
194 	default:
195 		unreachable();
196 	}
197 	pr_cont("ASCE.\n");
198 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
199 }
200 
201 int show_unhandled_signals = 1;
202 
203 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
204 {
205 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
206 		return;
207 	if (!unhandled_signal(current, signr))
208 		return;
209 	if (!printk_ratelimit())
210 		return;
211 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
212 	       regs->int_code & 0xffff, regs->int_code >> 17);
213 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
214 	printk(KERN_CONT "\n");
215 	if (is_mm_fault)
216 		dump_fault_info(regs);
217 	show_regs(regs);
218 }
219 
220 /*
221  * Send SIGSEGV to task.  This is an external routine
222  * to keep the stack usage of do_page_fault small.
223  */
224 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
225 {
226 	report_user_fault(regs, SIGSEGV, 1);
227 	force_sig_fault(SIGSEGV, si_code,
228 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
229 }
230 
231 static noinline void do_no_context(struct pt_regs *regs)
232 {
233 	if (fixup_exception(regs))
234 		return;
235 	/*
236 	 * Oops. The kernel tried to access some bad page. We'll have to
237 	 * terminate things with extreme prejudice.
238 	 */
239 	if (get_fault_type(regs) == KERNEL_FAULT)
240 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
241 		       " in virtual kernel address space\n");
242 	else
243 		printk(KERN_ALERT "Unable to handle kernel paging request"
244 		       " in virtual user address space\n");
245 	dump_fault_info(regs);
246 	die(regs, "Oops");
247 }
248 
249 static noinline void do_low_address(struct pt_regs *regs)
250 {
251 	/* Low-address protection hit in kernel mode means
252 	   NULL pointer write access in kernel mode.  */
253 	if (regs->psw.mask & PSW_MASK_PSTATE) {
254 		/* Low-address protection hit in user mode 'cannot happen'. */
255 		die (regs, "Low-address protection");
256 	}
257 
258 	do_no_context(regs);
259 }
260 
261 static noinline void do_sigbus(struct pt_regs *regs)
262 {
263 	/*
264 	 * Send a sigbus, regardless of whether we were in kernel
265 	 * or user mode.
266 	 */
267 	force_sig_fault(SIGBUS, BUS_ADRERR,
268 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
269 }
270 
271 static noinline void do_fault_error(struct pt_regs *regs, int access,
272 					vm_fault_t fault)
273 {
274 	int si_code;
275 
276 	switch (fault) {
277 	case VM_FAULT_BADACCESS:
278 	case VM_FAULT_BADMAP:
279 		/* Bad memory access. Check if it is kernel or user space. */
280 		if (user_mode(regs)) {
281 			/* User mode accesses just cause a SIGSEGV */
282 			si_code = (fault == VM_FAULT_BADMAP) ?
283 				SEGV_MAPERR : SEGV_ACCERR;
284 			do_sigsegv(regs, si_code);
285 			break;
286 		}
287 		fallthrough;
288 	case VM_FAULT_BADCONTEXT:
289 	case VM_FAULT_PFAULT:
290 		do_no_context(regs);
291 		break;
292 	case VM_FAULT_SIGNAL:
293 		if (!user_mode(regs))
294 			do_no_context(regs);
295 		break;
296 	default: /* fault & VM_FAULT_ERROR */
297 		if (fault & VM_FAULT_OOM) {
298 			if (!user_mode(regs))
299 				do_no_context(regs);
300 			else
301 				pagefault_out_of_memory();
302 		} else if (fault & VM_FAULT_SIGSEGV) {
303 			/* Kernel mode? Handle exceptions or die */
304 			if (!user_mode(regs))
305 				do_no_context(regs);
306 			else
307 				do_sigsegv(regs, SEGV_MAPERR);
308 		} else if (fault & VM_FAULT_SIGBUS) {
309 			/* Kernel mode? Handle exceptions or die */
310 			if (!user_mode(regs))
311 				do_no_context(regs);
312 			else
313 				do_sigbus(regs);
314 		} else
315 			BUG();
316 		break;
317 	}
318 }
319 
320 /*
321  * This routine handles page faults.  It determines the address,
322  * and the problem, and then passes it off to one of the appropriate
323  * routines.
324  *
325  * interruption code (int_code):
326  *   04       Protection           ->  Write-Protection  (suppression)
327  *   10       Segment translation  ->  Not present       (nullification)
328  *   11       Page translation     ->  Not present       (nullification)
329  *   3b       Region third trans.  ->  Not present       (nullification)
330  */
331 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
332 {
333 	struct gmap *gmap;
334 	struct task_struct *tsk;
335 	struct mm_struct *mm;
336 	struct vm_area_struct *vma;
337 	enum fault_type type;
338 	unsigned long trans_exc_code;
339 	unsigned long address;
340 	unsigned int flags;
341 	vm_fault_t fault;
342 	bool is_write;
343 
344 	tsk = current;
345 	/*
346 	 * The instruction that caused the program check has
347 	 * been nullified. Don't signal single step via SIGTRAP.
348 	 */
349 	clear_thread_flag(TIF_PER_TRAP);
350 
351 	if (kprobe_page_fault(regs, 14))
352 		return 0;
353 
354 	mm = tsk->mm;
355 	trans_exc_code = regs->int_parm_long;
356 	address = trans_exc_code & __FAIL_ADDR_MASK;
357 	is_write = (trans_exc_code & store_indication) == 0x400;
358 
359 	/*
360 	 * Verify that the fault happened in user space, that
361 	 * we are not in an interrupt and that there is a
362 	 * user context.
363 	 */
364 	fault = VM_FAULT_BADCONTEXT;
365 	type = get_fault_type(regs);
366 	switch (type) {
367 	case KERNEL_FAULT:
368 		if (kfence_handle_page_fault(address, is_write, regs))
369 			return 0;
370 		goto out;
371 	case USER_FAULT:
372 	case GMAP_FAULT:
373 		if (faulthandler_disabled() || !mm)
374 			goto out;
375 		break;
376 	}
377 
378 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
379 	flags = FAULT_FLAG_DEFAULT;
380 	if (user_mode(regs))
381 		flags |= FAULT_FLAG_USER;
382 	if (is_write)
383 		access = VM_WRITE;
384 	if (access == VM_WRITE)
385 		flags |= FAULT_FLAG_WRITE;
386 	mmap_read_lock(mm);
387 
388 	gmap = NULL;
389 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
390 		gmap = (struct gmap *) S390_lowcore.gmap;
391 		current->thread.gmap_addr = address;
392 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
393 		current->thread.gmap_int_code = regs->int_code & 0xffff;
394 		address = __gmap_translate(gmap, address);
395 		if (address == -EFAULT) {
396 			fault = VM_FAULT_BADMAP;
397 			goto out_up;
398 		}
399 		if (gmap->pfault_enabled)
400 			flags |= FAULT_FLAG_RETRY_NOWAIT;
401 	}
402 
403 retry:
404 	fault = VM_FAULT_BADMAP;
405 	vma = find_vma(mm, address);
406 	if (!vma)
407 		goto out_up;
408 
409 	if (unlikely(vma->vm_start > address)) {
410 		if (!(vma->vm_flags & VM_GROWSDOWN))
411 			goto out_up;
412 		if (expand_stack(vma, address))
413 			goto out_up;
414 	}
415 
416 	/*
417 	 * Ok, we have a good vm_area for this memory access, so
418 	 * we can handle it..
419 	 */
420 	fault = VM_FAULT_BADACCESS;
421 	if (unlikely(!(vma->vm_flags & access)))
422 		goto out_up;
423 
424 	/*
425 	 * If for any reason at all we couldn't handle the fault,
426 	 * make sure we exit gracefully rather than endlessly redo
427 	 * the fault.
428 	 */
429 	fault = handle_mm_fault(vma, address, flags, regs);
430 	if (fault_signal_pending(fault, regs)) {
431 		fault = VM_FAULT_SIGNAL;
432 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
433 			goto out_up;
434 		goto out;
435 	}
436 
437 	/* The fault is fully completed (including releasing mmap lock) */
438 	if (fault & VM_FAULT_COMPLETED) {
439 		if (gmap) {
440 			mmap_read_lock(mm);
441 			goto out_gmap;
442 		}
443 		fault = 0;
444 		goto out;
445 	}
446 
447 	if (unlikely(fault & VM_FAULT_ERROR))
448 		goto out_up;
449 
450 	if (fault & VM_FAULT_RETRY) {
451 		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
452 			(flags & FAULT_FLAG_RETRY_NOWAIT)) {
453 			/*
454 			 * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
455 			 * not been released
456 			 */
457 			current->thread.gmap_pfault = 1;
458 			fault = VM_FAULT_PFAULT;
459 			goto out_up;
460 		}
461 		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
462 		flags |= FAULT_FLAG_TRIED;
463 		mmap_read_lock(mm);
464 		goto retry;
465 	}
466 out_gmap:
467 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
468 		address =  __gmap_link(gmap, current->thread.gmap_addr,
469 				       address);
470 		if (address == -EFAULT) {
471 			fault = VM_FAULT_BADMAP;
472 			goto out_up;
473 		}
474 		if (address == -ENOMEM) {
475 			fault = VM_FAULT_OOM;
476 			goto out_up;
477 		}
478 	}
479 	fault = 0;
480 out_up:
481 	mmap_read_unlock(mm);
482 out:
483 	return fault;
484 }
485 
486 void do_protection_exception(struct pt_regs *regs)
487 {
488 	unsigned long trans_exc_code;
489 	int access;
490 	vm_fault_t fault;
491 
492 	trans_exc_code = regs->int_parm_long;
493 	/*
494 	 * Protection exceptions are suppressing, decrement psw address.
495 	 * The exception to this rule are aborted transactions, for these
496 	 * the PSW already points to the correct location.
497 	 */
498 	if (!(regs->int_code & 0x200))
499 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
500 	/*
501 	 * Check for low-address protection.  This needs to be treated
502 	 * as a special case because the translation exception code
503 	 * field is not guaranteed to contain valid data in this case.
504 	 */
505 	if (unlikely(!(trans_exc_code & 4))) {
506 		do_low_address(regs);
507 		return;
508 	}
509 	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
510 		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
511 					(regs->psw.addr & PAGE_MASK);
512 		access = VM_EXEC;
513 		fault = VM_FAULT_BADACCESS;
514 	} else {
515 		access = VM_WRITE;
516 		fault = do_exception(regs, access);
517 	}
518 	if (unlikely(fault))
519 		do_fault_error(regs, access, fault);
520 }
521 NOKPROBE_SYMBOL(do_protection_exception);
522 
523 void do_dat_exception(struct pt_regs *regs)
524 {
525 	int access;
526 	vm_fault_t fault;
527 
528 	access = VM_ACCESS_FLAGS;
529 	fault = do_exception(regs, access);
530 	if (unlikely(fault))
531 		do_fault_error(regs, access, fault);
532 }
533 NOKPROBE_SYMBOL(do_dat_exception);
534 
535 #ifdef CONFIG_PFAULT
536 /*
537  * 'pfault' pseudo page faults routines.
538  */
539 static int pfault_disable;
540 
541 static int __init nopfault(char *str)
542 {
543 	pfault_disable = 1;
544 	return 1;
545 }
546 
547 __setup("nopfault", nopfault);
548 
549 struct pfault_refbk {
550 	u16 refdiagc;
551 	u16 reffcode;
552 	u16 refdwlen;
553 	u16 refversn;
554 	u64 refgaddr;
555 	u64 refselmk;
556 	u64 refcmpmk;
557 	u64 reserved;
558 } __attribute__ ((packed, aligned(8)));
559 
560 static struct pfault_refbk pfault_init_refbk = {
561 	.refdiagc = 0x258,
562 	.reffcode = 0,
563 	.refdwlen = 5,
564 	.refversn = 2,
565 	.refgaddr = __LC_LPP,
566 	.refselmk = 1ULL << 48,
567 	.refcmpmk = 1ULL << 48,
568 	.reserved = __PF_RES_FIELD
569 };
570 
571 int pfault_init(void)
572 {
573         int rc;
574 
575 	if (pfault_disable)
576 		return -1;
577 	diag_stat_inc(DIAG_STAT_X258);
578 	asm volatile(
579 		"	diag	%1,%0,0x258\n"
580 		"0:	j	2f\n"
581 		"1:	la	%0,8\n"
582 		"2:\n"
583 		EX_TABLE(0b,1b)
584 		: "=d" (rc)
585 		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
586         return rc;
587 }
588 
589 static struct pfault_refbk pfault_fini_refbk = {
590 	.refdiagc = 0x258,
591 	.reffcode = 1,
592 	.refdwlen = 5,
593 	.refversn = 2,
594 };
595 
596 void pfault_fini(void)
597 {
598 
599 	if (pfault_disable)
600 		return;
601 	diag_stat_inc(DIAG_STAT_X258);
602 	asm volatile(
603 		"	diag	%0,0,0x258\n"
604 		"0:	nopr	%%r7\n"
605 		EX_TABLE(0b,0b)
606 		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
607 }
608 
609 static DEFINE_SPINLOCK(pfault_lock);
610 static LIST_HEAD(pfault_list);
611 
612 #define PF_COMPLETE	0x0080
613 
614 /*
615  * The mechanism of our pfault code: if Linux is running as guest, runs a user
616  * space process and the user space process accesses a page that the host has
617  * paged out we get a pfault interrupt.
618  *
619  * This allows us, within the guest, to schedule a different process. Without
620  * this mechanism the host would have to suspend the whole virtual cpu until
621  * the page has been paged in.
622  *
623  * So when we get such an interrupt then we set the state of the current task
624  * to uninterruptible and also set the need_resched flag. Both happens within
625  * interrupt context(!). If we later on want to return to user space we
626  * recognize the need_resched flag and then call schedule().  It's not very
627  * obvious how this works...
628  *
629  * Of course we have a lot of additional fun with the completion interrupt (->
630  * host signals that a page of a process has been paged in and the process can
631  * continue to run). This interrupt can arrive on any cpu and, since we have
632  * virtual cpus, actually appear before the interrupt that signals that a page
633  * is missing.
634  */
635 static void pfault_interrupt(struct ext_code ext_code,
636 			     unsigned int param32, unsigned long param64)
637 {
638 	struct task_struct *tsk;
639 	__u16 subcode;
640 	pid_t pid;
641 
642 	/*
643 	 * Get the external interruption subcode & pfault initial/completion
644 	 * signal bit. VM stores this in the 'cpu address' field associated
645 	 * with the external interrupt.
646 	 */
647 	subcode = ext_code.subcode;
648 	if ((subcode & 0xff00) != __SUBCODE_MASK)
649 		return;
650 	inc_irq_stat(IRQEXT_PFL);
651 	/* Get the token (= pid of the affected task). */
652 	pid = param64 & LPP_PID_MASK;
653 	rcu_read_lock();
654 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
655 	if (tsk)
656 		get_task_struct(tsk);
657 	rcu_read_unlock();
658 	if (!tsk)
659 		return;
660 	spin_lock(&pfault_lock);
661 	if (subcode & PF_COMPLETE) {
662 		/* signal bit is set -> a page has been swapped in by VM */
663 		if (tsk->thread.pfault_wait == 1) {
664 			/* Initial interrupt was faster than the completion
665 			 * interrupt. pfault_wait is valid. Set pfault_wait
666 			 * back to zero and wake up the process. This can
667 			 * safely be done because the task is still sleeping
668 			 * and can't produce new pfaults. */
669 			tsk->thread.pfault_wait = 0;
670 			list_del(&tsk->thread.list);
671 			wake_up_process(tsk);
672 			put_task_struct(tsk);
673 		} else {
674 			/* Completion interrupt was faster than initial
675 			 * interrupt. Set pfault_wait to -1 so the initial
676 			 * interrupt doesn't put the task to sleep.
677 			 * If the task is not running, ignore the completion
678 			 * interrupt since it must be a leftover of a PFAULT
679 			 * CANCEL operation which didn't remove all pending
680 			 * completion interrupts. */
681 			if (task_is_running(tsk))
682 				tsk->thread.pfault_wait = -1;
683 		}
684 	} else {
685 		/* signal bit not set -> a real page is missing. */
686 		if (WARN_ON_ONCE(tsk != current))
687 			goto out;
688 		if (tsk->thread.pfault_wait == 1) {
689 			/* Already on the list with a reference: put to sleep */
690 			goto block;
691 		} else if (tsk->thread.pfault_wait == -1) {
692 			/* Completion interrupt was faster than the initial
693 			 * interrupt (pfault_wait == -1). Set pfault_wait
694 			 * back to zero and exit. */
695 			tsk->thread.pfault_wait = 0;
696 		} else {
697 			/* Initial interrupt arrived before completion
698 			 * interrupt. Let the task sleep.
699 			 * An extra task reference is needed since a different
700 			 * cpu may set the task state to TASK_RUNNING again
701 			 * before the scheduler is reached. */
702 			get_task_struct(tsk);
703 			tsk->thread.pfault_wait = 1;
704 			list_add(&tsk->thread.list, &pfault_list);
705 block:
706 			/* Since this must be a userspace fault, there
707 			 * is no kernel task state to trample. Rely on the
708 			 * return to userspace schedule() to block. */
709 			__set_current_state(TASK_UNINTERRUPTIBLE);
710 			set_tsk_need_resched(tsk);
711 			set_preempt_need_resched();
712 		}
713 	}
714 out:
715 	spin_unlock(&pfault_lock);
716 	put_task_struct(tsk);
717 }
718 
719 static int pfault_cpu_dead(unsigned int cpu)
720 {
721 	struct thread_struct *thread, *next;
722 	struct task_struct *tsk;
723 
724 	spin_lock_irq(&pfault_lock);
725 	list_for_each_entry_safe(thread, next, &pfault_list, list) {
726 		thread->pfault_wait = 0;
727 		list_del(&thread->list);
728 		tsk = container_of(thread, struct task_struct, thread);
729 		wake_up_process(tsk);
730 		put_task_struct(tsk);
731 	}
732 	spin_unlock_irq(&pfault_lock);
733 	return 0;
734 }
735 
736 static int __init pfault_irq_init(void)
737 {
738 	int rc;
739 
740 	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
741 	if (rc)
742 		goto out_extint;
743 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
744 	if (rc)
745 		goto out_pfault;
746 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
747 	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
748 				  NULL, pfault_cpu_dead);
749 	return 0;
750 
751 out_pfault:
752 	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
753 out_extint:
754 	pfault_disable = 1;
755 	return rc;
756 }
757 early_initcall(pfault_irq_init);
758 
759 #endif /* CONFIG_PFAULT */
760 
761 #if IS_ENABLED(CONFIG_PGSTE)
762 
763 void do_secure_storage_access(struct pt_regs *regs)
764 {
765 	unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
766 	struct vm_area_struct *vma;
767 	struct mm_struct *mm;
768 	struct page *page;
769 	struct gmap *gmap;
770 	int rc;
771 
772 	/*
773 	 * bit 61 tells us if the address is valid, if it's not we
774 	 * have a major problem and should stop the kernel or send a
775 	 * SIGSEGV to the process. Unfortunately bit 61 is not
776 	 * reliable without the misc UV feature so we need to check
777 	 * for that as well.
778 	 */
779 	if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
780 	    !test_bit_inv(61, &regs->int_parm_long)) {
781 		/*
782 		 * When this happens, userspace did something that it
783 		 * was not supposed to do, e.g. branching into secure
784 		 * memory. Trigger a segmentation fault.
785 		 */
786 		if (user_mode(regs)) {
787 			send_sig(SIGSEGV, current, 0);
788 			return;
789 		}
790 
791 		/*
792 		 * The kernel should never run into this case and we
793 		 * have no way out of this situation.
794 		 */
795 		panic("Unexpected PGM 0x3d with TEID bit 61=0");
796 	}
797 
798 	switch (get_fault_type(regs)) {
799 	case GMAP_FAULT:
800 		mm = current->mm;
801 		gmap = (struct gmap *)S390_lowcore.gmap;
802 		mmap_read_lock(mm);
803 		addr = __gmap_translate(gmap, addr);
804 		mmap_read_unlock(mm);
805 		if (IS_ERR_VALUE(addr)) {
806 			do_fault_error(regs, VM_ACCESS_FLAGS, VM_FAULT_BADMAP);
807 			break;
808 		}
809 		fallthrough;
810 	case USER_FAULT:
811 		mm = current->mm;
812 		mmap_read_lock(mm);
813 		vma = find_vma(mm, addr);
814 		if (!vma) {
815 			mmap_read_unlock(mm);
816 			do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
817 			break;
818 		}
819 		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
820 		if (IS_ERR_OR_NULL(page)) {
821 			mmap_read_unlock(mm);
822 			break;
823 		}
824 		if (arch_make_page_accessible(page))
825 			send_sig(SIGSEGV, current, 0);
826 		put_page(page);
827 		mmap_read_unlock(mm);
828 		break;
829 	case KERNEL_FAULT:
830 		page = phys_to_page(addr);
831 		if (unlikely(!try_get_page(page)))
832 			break;
833 		rc = arch_make_page_accessible(page);
834 		put_page(page);
835 		if (rc)
836 			BUG();
837 		break;
838 	default:
839 		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
840 		WARN_ON_ONCE(1);
841 	}
842 }
843 NOKPROBE_SYMBOL(do_secure_storage_access);
844 
845 void do_non_secure_storage_access(struct pt_regs *regs)
846 {
847 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
848 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
849 
850 	if (get_fault_type(regs) != GMAP_FAULT) {
851 		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
852 		WARN_ON_ONCE(1);
853 		return;
854 	}
855 
856 	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
857 		send_sig(SIGSEGV, current, 0);
858 }
859 NOKPROBE_SYMBOL(do_non_secure_storage_access);
860 
861 void do_secure_storage_violation(struct pt_regs *regs)
862 {
863 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
864 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
865 
866 	/*
867 	 * If the VM has been rebooted, its address space might still contain
868 	 * secure pages from the previous boot.
869 	 * Clear the page so it can be reused.
870 	 */
871 	if (!gmap_destroy_page(gmap, gaddr))
872 		return;
873 	/*
874 	 * Either KVM messed up the secure guest mapping or the same
875 	 * page is mapped into multiple secure guests.
876 	 *
877 	 * This exception is only triggered when a guest 2 is running
878 	 * and can therefore never occur in kernel context.
879 	 */
880 	printk_ratelimited(KERN_WARNING
881 			   "Secure storage violation in task: %s, pid %d\n",
882 			   current->comm, current->pid);
883 	send_sig(SIGSEGV, current, 0);
884 }
885 
886 #endif /* CONFIG_PGSTE */
887