xref: /linux/arch/s390/mm/fault.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *		 Ulrich Weigand (uweigand@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/fault.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/mmu_context.h>
14 #include <linux/perf_event.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/jump_label.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/mm.h>
26 #include <linux/compat.h>
27 #include <linux/smp.h>
28 #include <linux/kdebug.h>
29 #include <linux/init.h>
30 #include <linux/console.h>
31 #include <linux/extable.h>
32 #include <linux/hardirq.h>
33 #include <linux/kprobes.h>
34 #include <linux/uaccess.h>
35 #include <linux/hugetlb.h>
36 #include <linux/kfence.h>
37 #include <asm/asm-extable.h>
38 #include <asm/asm-offsets.h>
39 #include <asm/ptrace.h>
40 #include <asm/fault.h>
41 #include <asm/diag.h>
42 #include <asm/gmap.h>
43 #include <asm/irq.h>
44 #include <asm/facility.h>
45 #include <asm/uv.h>
46 #include "../kernel/entry.h"
47 
48 enum fault_type {
49 	KERNEL_FAULT,
50 	USER_FAULT,
51 	GMAP_FAULT,
52 };
53 
54 static DEFINE_STATIC_KEY_FALSE(have_store_indication);
55 
56 static int __init fault_init(void)
57 {
58 	if (test_facility(75))
59 		static_branch_enable(&have_store_indication);
60 	return 0;
61 }
62 early_initcall(fault_init);
63 
64 /*
65  * Find out which address space caused the exception.
66  */
67 static enum fault_type get_fault_type(struct pt_regs *regs)
68 {
69 	union teid teid = { .val = regs->int_parm_long };
70 
71 	if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
72 		if (user_mode(regs))
73 			return USER_FAULT;
74 		if (!IS_ENABLED(CONFIG_PGSTE))
75 			return KERNEL_FAULT;
76 		if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
77 			return GMAP_FAULT;
78 		return KERNEL_FAULT;
79 	}
80 	if (teid.as == PSW_BITS_AS_SECONDARY)
81 		return USER_FAULT;
82 	/* Access register mode, not used in the kernel */
83 	if (teid.as == PSW_BITS_AS_ACCREG)
84 		return USER_FAULT;
85 	/* Home space -> access via kernel ASCE */
86 	return KERNEL_FAULT;
87 }
88 
89 static unsigned long get_fault_address(struct pt_regs *regs)
90 {
91 	union teid teid = { .val = regs->int_parm_long };
92 
93 	return teid.addr * PAGE_SIZE;
94 }
95 
96 static __always_inline bool fault_is_write(struct pt_regs *regs)
97 {
98 	union teid teid = { .val = regs->int_parm_long };
99 
100 	if (static_branch_likely(&have_store_indication))
101 		return teid.fsi == TEID_FSI_STORE;
102 	return false;
103 }
104 
105 static void dump_pagetable(unsigned long asce, unsigned long address)
106 {
107 	unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
108 
109 	pr_alert("AS:%016lx ", asce);
110 	switch (asce & _ASCE_TYPE_MASK) {
111 	case _ASCE_TYPE_REGION1:
112 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
113 		if (get_kernel_nofault(entry, table))
114 			goto bad;
115 		pr_cont("R1:%016lx ", entry);
116 		if (entry & _REGION_ENTRY_INVALID)
117 			goto out;
118 		table = __va(entry & _REGION_ENTRY_ORIGIN);
119 		fallthrough;
120 	case _ASCE_TYPE_REGION2:
121 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
122 		if (get_kernel_nofault(entry, table))
123 			goto bad;
124 		pr_cont("R2:%016lx ", entry);
125 		if (entry & _REGION_ENTRY_INVALID)
126 			goto out;
127 		table = __va(entry & _REGION_ENTRY_ORIGIN);
128 		fallthrough;
129 	case _ASCE_TYPE_REGION3:
130 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
131 		if (get_kernel_nofault(entry, table))
132 			goto bad;
133 		pr_cont("R3:%016lx ", entry);
134 		if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
135 			goto out;
136 		table = __va(entry & _REGION_ENTRY_ORIGIN);
137 		fallthrough;
138 	case _ASCE_TYPE_SEGMENT:
139 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
140 		if (get_kernel_nofault(entry, table))
141 			goto bad;
142 		pr_cont("S:%016lx ", entry);
143 		if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
144 			goto out;
145 		table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
146 	}
147 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
148 	if (get_kernel_nofault(entry, table))
149 		goto bad;
150 	pr_cont("P:%016lx ", entry);
151 out:
152 	pr_cont("\n");
153 	return;
154 bad:
155 	pr_cont("BAD\n");
156 }
157 
158 static void dump_fault_info(struct pt_regs *regs)
159 {
160 	union teid teid = { .val = regs->int_parm_long };
161 	unsigned long asce;
162 
163 	pr_alert("Failing address: %016lx TEID: %016lx\n",
164 		 get_fault_address(regs), teid.val);
165 	pr_alert("Fault in ");
166 	switch (teid.as) {
167 	case PSW_BITS_AS_HOME:
168 		pr_cont("home space ");
169 		break;
170 	case PSW_BITS_AS_SECONDARY:
171 		pr_cont("secondary space ");
172 		break;
173 	case PSW_BITS_AS_ACCREG:
174 		pr_cont("access register ");
175 		break;
176 	case PSW_BITS_AS_PRIMARY:
177 		pr_cont("primary space ");
178 		break;
179 	}
180 	pr_cont("mode while using ");
181 	switch (get_fault_type(regs)) {
182 	case USER_FAULT:
183 		asce = S390_lowcore.user_asce.val;
184 		pr_cont("user ");
185 		break;
186 	case GMAP_FAULT:
187 		asce = ((struct gmap *)S390_lowcore.gmap)->asce;
188 		pr_cont("gmap ");
189 		break;
190 	case KERNEL_FAULT:
191 		asce = S390_lowcore.kernel_asce.val;
192 		pr_cont("kernel ");
193 		break;
194 	default:
195 		unreachable();
196 	}
197 	pr_cont("ASCE.\n");
198 	dump_pagetable(asce, get_fault_address(regs));
199 }
200 
201 int show_unhandled_signals = 1;
202 
203 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
204 {
205 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
206 
207 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
208 		return;
209 	if (!unhandled_signal(current, signr))
210 		return;
211 	if (!__ratelimit(&rs))
212 		return;
213 	pr_alert("User process fault: interruption code %04x ilc:%d ",
214 		 regs->int_code & 0xffff, regs->int_code >> 17);
215 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
216 	pr_cont("\n");
217 	if (is_mm_fault)
218 		dump_fault_info(regs);
219 	show_regs(regs);
220 }
221 
222 static void do_sigsegv(struct pt_regs *regs, int si_code)
223 {
224 	report_user_fault(regs, SIGSEGV, 1);
225 	force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
226 }
227 
228 static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
229 {
230 	enum fault_type fault_type;
231 	unsigned long address;
232 	bool is_write;
233 
234 	if (user_mode(regs)) {
235 		if (WARN_ON_ONCE(!si_code))
236 			si_code = SEGV_MAPERR;
237 		return do_sigsegv(regs, si_code);
238 	}
239 	if (fixup_exception(regs))
240 		return;
241 	fault_type = get_fault_type(regs);
242 	if (fault_type == KERNEL_FAULT) {
243 		address = get_fault_address(regs);
244 		is_write = fault_is_write(regs);
245 		if (kfence_handle_page_fault(address, is_write, regs))
246 			return;
247 	}
248 	if (fault_type == KERNEL_FAULT)
249 		pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
250 	else
251 		pr_alert("Unable to handle kernel paging request in virtual user address space\n");
252 	dump_fault_info(regs);
253 	die(regs, "Oops");
254 }
255 
256 static void handle_fault_error(struct pt_regs *regs, int si_code)
257 {
258 	struct mm_struct *mm = current->mm;
259 
260 	mmap_read_unlock(mm);
261 	handle_fault_error_nolock(regs, si_code);
262 }
263 
264 static void do_sigbus(struct pt_regs *regs)
265 {
266 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
267 }
268 
269 /*
270  * This routine handles page faults.  It determines the address,
271  * and the problem, and then passes it off to one of the appropriate
272  * routines.
273  *
274  * interruption code (int_code):
275  *   04       Protection	   ->  Write-Protection  (suppression)
276  *   10       Segment translation  ->  Not present	 (nullification)
277  *   11       Page translation	   ->  Not present	 (nullification)
278  *   3b       Region third trans.  ->  Not present	 (nullification)
279  */
280 static void do_exception(struct pt_regs *regs, int access)
281 {
282 	struct vm_area_struct *vma;
283 	struct task_struct *tsk;
284 	unsigned long address;
285 	struct mm_struct *mm;
286 	enum fault_type type;
287 	unsigned int flags;
288 	struct gmap *gmap;
289 	vm_fault_t fault;
290 	bool is_write;
291 
292 	tsk = current;
293 	/*
294 	 * The instruction that caused the program check has
295 	 * been nullified. Don't signal single step via SIGTRAP.
296 	 */
297 	clear_thread_flag(TIF_PER_TRAP);
298 	if (kprobe_page_fault(regs, 14))
299 		return;
300 	mm = tsk->mm;
301 	address = get_fault_address(regs);
302 	is_write = fault_is_write(regs);
303 	type = get_fault_type(regs);
304 	switch (type) {
305 	case KERNEL_FAULT:
306 		return handle_fault_error_nolock(regs, 0);
307 	case USER_FAULT:
308 	case GMAP_FAULT:
309 		if (faulthandler_disabled() || !mm)
310 			return handle_fault_error_nolock(regs, 0);
311 		break;
312 	}
313 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
314 	flags = FAULT_FLAG_DEFAULT;
315 	if (user_mode(regs))
316 		flags |= FAULT_FLAG_USER;
317 	if (is_write)
318 		access = VM_WRITE;
319 	if (access == VM_WRITE)
320 		flags |= FAULT_FLAG_WRITE;
321 	if (!(flags & FAULT_FLAG_USER))
322 		goto lock_mmap;
323 	vma = lock_vma_under_rcu(mm, address);
324 	if (!vma)
325 		goto lock_mmap;
326 	if (!(vma->vm_flags & access)) {
327 		vma_end_read(vma);
328 		goto lock_mmap;
329 	}
330 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
331 	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
332 		vma_end_read(vma);
333 	if (!(fault & VM_FAULT_RETRY)) {
334 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
335 		if (unlikely(fault & VM_FAULT_ERROR))
336 			goto error;
337 		return;
338 	}
339 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
340 	/* Quick path to respond to signals */
341 	if (fault_signal_pending(fault, regs)) {
342 		if (!user_mode(regs))
343 			handle_fault_error_nolock(regs, 0);
344 		return;
345 	}
346 lock_mmap:
347 	mmap_read_lock(mm);
348 	gmap = NULL;
349 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
350 		gmap = (struct gmap *)S390_lowcore.gmap;
351 		current->thread.gmap_addr = address;
352 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
353 		current->thread.gmap_int_code = regs->int_code & 0xffff;
354 		address = __gmap_translate(gmap, address);
355 		if (address == -EFAULT)
356 			return handle_fault_error(regs, SEGV_MAPERR);
357 		if (gmap->pfault_enabled)
358 			flags |= FAULT_FLAG_RETRY_NOWAIT;
359 	}
360 retry:
361 	vma = find_vma(mm, address);
362 	if (!vma)
363 		return handle_fault_error(regs, SEGV_MAPERR);
364 	if (unlikely(vma->vm_start > address)) {
365 		if (!(vma->vm_flags & VM_GROWSDOWN))
366 			return handle_fault_error(regs, SEGV_MAPERR);
367 		vma = expand_stack(mm, address);
368 		if (!vma)
369 			return handle_fault_error_nolock(regs, SEGV_MAPERR);
370 	}
371 	if (unlikely(!(vma->vm_flags & access)))
372 		return handle_fault_error(regs, SEGV_ACCERR);
373 	fault = handle_mm_fault(vma, address, flags, regs);
374 	if (fault_signal_pending(fault, regs)) {
375 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
376 			mmap_read_unlock(mm);
377 		if (!user_mode(regs))
378 			handle_fault_error_nolock(regs, 0);
379 		return;
380 	}
381 	/* The fault is fully completed (including releasing mmap lock) */
382 	if (fault & VM_FAULT_COMPLETED) {
383 		if (gmap) {
384 			mmap_read_lock(mm);
385 			goto gmap;
386 		}
387 		return;
388 	}
389 	if (unlikely(fault & VM_FAULT_ERROR)) {
390 		mmap_read_unlock(mm);
391 		goto error;
392 	}
393 	if (fault & VM_FAULT_RETRY) {
394 		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&	(flags & FAULT_FLAG_RETRY_NOWAIT)) {
395 			/*
396 			 * FAULT_FLAG_RETRY_NOWAIT has been set,
397 			 * mmap_lock has not been released
398 			 */
399 			current->thread.gmap_pfault = 1;
400 			return handle_fault_error(regs, 0);
401 		}
402 		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
403 		flags |= FAULT_FLAG_TRIED;
404 		mmap_read_lock(mm);
405 		goto retry;
406 	}
407 gmap:
408 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
409 		address =  __gmap_link(gmap, current->thread.gmap_addr,
410 				       address);
411 		if (address == -EFAULT)
412 			return handle_fault_error(regs, SEGV_MAPERR);
413 		if (address == -ENOMEM) {
414 			fault = VM_FAULT_OOM;
415 			mmap_read_unlock(mm);
416 			goto error;
417 		}
418 	}
419 	mmap_read_unlock(mm);
420 	return;
421 error:
422 	if (fault & VM_FAULT_OOM) {
423 		if (!user_mode(regs))
424 			handle_fault_error_nolock(regs, 0);
425 		else
426 			pagefault_out_of_memory();
427 	} else if (fault & VM_FAULT_SIGSEGV) {
428 		if (!user_mode(regs))
429 			handle_fault_error_nolock(regs, 0);
430 		else
431 			do_sigsegv(regs, SEGV_MAPERR);
432 	} else if (fault & VM_FAULT_SIGBUS) {
433 		if (!user_mode(regs))
434 			handle_fault_error_nolock(regs, 0);
435 		else
436 			do_sigbus(regs);
437 	} else {
438 		BUG();
439 	}
440 }
441 
442 void do_protection_exception(struct pt_regs *regs)
443 {
444 	union teid teid = { .val = regs->int_parm_long };
445 
446 	/*
447 	 * Protection exceptions are suppressing, decrement psw address.
448 	 * The exception to this rule are aborted transactions, for these
449 	 * the PSW already points to the correct location.
450 	 */
451 	if (!(regs->int_code & 0x200))
452 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
453 	/*
454 	 * Check for low-address protection.  This needs to be treated
455 	 * as a special case because the translation exception code
456 	 * field is not guaranteed to contain valid data in this case.
457 	 */
458 	if (unlikely(!teid.b61)) {
459 		if (user_mode(regs)) {
460 			/* Low-address protection in user mode: cannot happen */
461 			die(regs, "Low-address protection");
462 		}
463 		/*
464 		 * Low-address protection in kernel mode means
465 		 * NULL pointer write access in kernel mode.
466 		 */
467 		return handle_fault_error_nolock(regs, 0);
468 	}
469 	if (unlikely(MACHINE_HAS_NX && teid.b56)) {
470 		regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
471 		return handle_fault_error_nolock(regs, SEGV_ACCERR);
472 	}
473 	do_exception(regs, VM_WRITE);
474 }
475 NOKPROBE_SYMBOL(do_protection_exception);
476 
477 void do_dat_exception(struct pt_regs *regs)
478 {
479 	do_exception(regs, VM_ACCESS_FLAGS);
480 }
481 NOKPROBE_SYMBOL(do_dat_exception);
482 
483 #if IS_ENABLED(CONFIG_PGSTE)
484 
485 void do_secure_storage_access(struct pt_regs *regs)
486 {
487 	union teid teid = { .val = regs->int_parm_long };
488 	unsigned long addr = get_fault_address(regs);
489 	struct vm_area_struct *vma;
490 	struct mm_struct *mm;
491 	struct page *page;
492 	struct gmap *gmap;
493 	int rc;
494 
495 	/*
496 	 * Bit 61 indicates if the address is valid, if it is not the
497 	 * kernel should be stopped or SIGSEGV should be sent to the
498 	 * process. Bit 61 is not reliable without the misc UV feature,
499 	 * therefore this needs to be checked too.
500 	 */
501 	if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
502 		/*
503 		 * When this happens, userspace did something that it
504 		 * was not supposed to do, e.g. branching into secure
505 		 * memory. Trigger a segmentation fault.
506 		 */
507 		if (user_mode(regs)) {
508 			send_sig(SIGSEGV, current, 0);
509 			return;
510 		}
511 		/*
512 		 * The kernel should never run into this case and
513 		 * there is no way out of this situation.
514 		 */
515 		panic("Unexpected PGM 0x3d with TEID bit 61=0");
516 	}
517 	switch (get_fault_type(regs)) {
518 	case GMAP_FAULT:
519 		mm = current->mm;
520 		gmap = (struct gmap *)S390_lowcore.gmap;
521 		mmap_read_lock(mm);
522 		addr = __gmap_translate(gmap, addr);
523 		mmap_read_unlock(mm);
524 		if (IS_ERR_VALUE(addr))
525 			return handle_fault_error_nolock(regs, SEGV_MAPERR);
526 		fallthrough;
527 	case USER_FAULT:
528 		mm = current->mm;
529 		mmap_read_lock(mm);
530 		vma = find_vma(mm, addr);
531 		if (!vma)
532 			return handle_fault_error(regs, SEGV_MAPERR);
533 		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
534 		if (IS_ERR_OR_NULL(page)) {
535 			mmap_read_unlock(mm);
536 			break;
537 		}
538 		if (arch_make_page_accessible(page))
539 			send_sig(SIGSEGV, current, 0);
540 		put_page(page);
541 		mmap_read_unlock(mm);
542 		break;
543 	case KERNEL_FAULT:
544 		page = phys_to_page(addr);
545 		if (unlikely(!try_get_page(page)))
546 			break;
547 		rc = arch_make_page_accessible(page);
548 		put_page(page);
549 		if (rc)
550 			BUG();
551 		break;
552 	default:
553 		unreachable();
554 	}
555 }
556 NOKPROBE_SYMBOL(do_secure_storage_access);
557 
558 void do_non_secure_storage_access(struct pt_regs *regs)
559 {
560 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
561 	unsigned long gaddr = get_fault_address(regs);
562 
563 	if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
564 		return handle_fault_error_nolock(regs, SEGV_MAPERR);
565 	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
566 		send_sig(SIGSEGV, current, 0);
567 }
568 NOKPROBE_SYMBOL(do_non_secure_storage_access);
569 
570 void do_secure_storage_violation(struct pt_regs *regs)
571 {
572 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
573 	unsigned long gaddr = get_fault_address(regs);
574 
575 	/*
576 	 * If the VM has been rebooted, its address space might still contain
577 	 * secure pages from the previous boot.
578 	 * Clear the page so it can be reused.
579 	 */
580 	if (!gmap_destroy_page(gmap, gaddr))
581 		return;
582 	/*
583 	 * Either KVM messed up the secure guest mapping or the same
584 	 * page is mapped into multiple secure guests.
585 	 *
586 	 * This exception is only triggered when a guest 2 is running
587 	 * and can therefore never occur in kernel context.
588 	 */
589 	pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
590 			    current->comm, current->pid);
591 	send_sig(SIGSEGV, current, 0);
592 }
593 
594 #endif /* CONFIG_PGSTE */
595