xref: /linux/arch/s390/mm/fault.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *		 Ulrich Weigand (uweigand@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/fault.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/mmu_context.h>
14 #include <linux/perf_event.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/jump_label.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/mm.h>
26 #include <linux/compat.h>
27 #include <linux/smp.h>
28 #include <linux/kdebug.h>
29 #include <linux/init.h>
30 #include <linux/console.h>
31 #include <linux/extable.h>
32 #include <linux/hardirq.h>
33 #include <linux/kprobes.h>
34 #include <linux/uaccess.h>
35 #include <linux/hugetlb.h>
36 #include <linux/kfence.h>
37 #include <asm/asm-extable.h>
38 #include <asm/asm-offsets.h>
39 #include <asm/ptrace.h>
40 #include <asm/fault.h>
41 #include <asm/diag.h>
42 #include <asm/gmap.h>
43 #include <asm/irq.h>
44 #include <asm/facility.h>
45 #include <asm/uv.h>
46 #include "../kernel/entry.h"
47 
48 enum fault_type {
49 	KERNEL_FAULT,
50 	USER_FAULT,
51 	GMAP_FAULT,
52 };
53 
54 static DEFINE_STATIC_KEY_FALSE(have_store_indication);
55 
56 static int __init fault_init(void)
57 {
58 	if (test_facility(75))
59 		static_branch_enable(&have_store_indication);
60 	return 0;
61 }
62 early_initcall(fault_init);
63 
64 /*
65  * Find out which address space caused the exception.
66  */
67 static enum fault_type get_fault_type(struct pt_regs *regs)
68 {
69 	union teid teid = { .val = regs->int_parm_long };
70 	struct gmap *gmap;
71 
72 	if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
73 		if (user_mode(regs))
74 			return USER_FAULT;
75 		if (!IS_ENABLED(CONFIG_PGSTE))
76 			return KERNEL_FAULT;
77 		gmap = (struct gmap *)S390_lowcore.gmap;
78 		if (regs->cr1 == gmap->asce)
79 			return GMAP_FAULT;
80 		return KERNEL_FAULT;
81 	}
82 	if (teid.as == PSW_BITS_AS_SECONDARY)
83 		return USER_FAULT;
84 	/* Access register mode, not used in the kernel */
85 	if (teid.as == PSW_BITS_AS_ACCREG)
86 		return USER_FAULT;
87 	/* Home space -> access via kernel ASCE */
88 	return KERNEL_FAULT;
89 }
90 
91 static unsigned long get_fault_address(struct pt_regs *regs)
92 {
93 	union teid teid = { .val = regs->int_parm_long };
94 
95 	return teid.addr * PAGE_SIZE;
96 }
97 
98 static __always_inline bool fault_is_write(struct pt_regs *regs)
99 {
100 	union teid teid = { .val = regs->int_parm_long };
101 
102 	if (static_branch_likely(&have_store_indication))
103 		return teid.fsi == TEID_FSI_STORE;
104 	return false;
105 }
106 
107 static void dump_pagetable(unsigned long asce, unsigned long address)
108 {
109 	unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
110 
111 	pr_alert("AS:%016lx ", asce);
112 	switch (asce & _ASCE_TYPE_MASK) {
113 	case _ASCE_TYPE_REGION1:
114 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
115 		if (get_kernel_nofault(entry, table))
116 			goto bad;
117 		pr_cont("R1:%016lx ", entry);
118 		if (entry & _REGION_ENTRY_INVALID)
119 			goto out;
120 		table = __va(entry & _REGION_ENTRY_ORIGIN);
121 		fallthrough;
122 	case _ASCE_TYPE_REGION2:
123 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
124 		if (get_kernel_nofault(entry, table))
125 			goto bad;
126 		pr_cont("R2:%016lx ", entry);
127 		if (entry & _REGION_ENTRY_INVALID)
128 			goto out;
129 		table = __va(entry & _REGION_ENTRY_ORIGIN);
130 		fallthrough;
131 	case _ASCE_TYPE_REGION3:
132 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
133 		if (get_kernel_nofault(entry, table))
134 			goto bad;
135 		pr_cont("R3:%016lx ", entry);
136 		if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
137 			goto out;
138 		table = __va(entry & _REGION_ENTRY_ORIGIN);
139 		fallthrough;
140 	case _ASCE_TYPE_SEGMENT:
141 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
142 		if (get_kernel_nofault(entry, table))
143 			goto bad;
144 		pr_cont("S:%016lx ", entry);
145 		if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
146 			goto out;
147 		table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
148 	}
149 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
150 	if (get_kernel_nofault(entry, table))
151 		goto bad;
152 	pr_cont("P:%016lx ", entry);
153 out:
154 	pr_cont("\n");
155 	return;
156 bad:
157 	pr_cont("BAD\n");
158 }
159 
160 static void dump_fault_info(struct pt_regs *regs)
161 {
162 	union teid teid = { .val = regs->int_parm_long };
163 	unsigned long asce;
164 
165 	pr_alert("Failing address: %016lx TEID: %016lx\n",
166 		 get_fault_address(regs), teid.val);
167 	pr_alert("Fault in ");
168 	switch (teid.as) {
169 	case PSW_BITS_AS_HOME:
170 		pr_cont("home space ");
171 		break;
172 	case PSW_BITS_AS_SECONDARY:
173 		pr_cont("secondary space ");
174 		break;
175 	case PSW_BITS_AS_ACCREG:
176 		pr_cont("access register ");
177 		break;
178 	case PSW_BITS_AS_PRIMARY:
179 		pr_cont("primary space ");
180 		break;
181 	}
182 	pr_cont("mode while using ");
183 	switch (get_fault_type(regs)) {
184 	case USER_FAULT:
185 		asce = S390_lowcore.user_asce.val;
186 		pr_cont("user ");
187 		break;
188 	case GMAP_FAULT:
189 		asce = ((struct gmap *)S390_lowcore.gmap)->asce;
190 		pr_cont("gmap ");
191 		break;
192 	case KERNEL_FAULT:
193 		asce = S390_lowcore.kernel_asce.val;
194 		pr_cont("kernel ");
195 		break;
196 	default:
197 		unreachable();
198 	}
199 	pr_cont("ASCE.\n");
200 	dump_pagetable(asce, get_fault_address(regs));
201 }
202 
203 int show_unhandled_signals = 1;
204 
205 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
206 {
207 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
208 
209 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
210 		return;
211 	if (!unhandled_signal(current, signr))
212 		return;
213 	if (!__ratelimit(&rs))
214 		return;
215 	pr_alert("User process fault: interruption code %04x ilc:%d ",
216 		 regs->int_code & 0xffff, regs->int_code >> 17);
217 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
218 	pr_cont("\n");
219 	if (is_mm_fault)
220 		dump_fault_info(regs);
221 	show_regs(regs);
222 }
223 
224 static void do_sigsegv(struct pt_regs *regs, int si_code)
225 {
226 	report_user_fault(regs, SIGSEGV, 1);
227 	force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
228 }
229 
230 static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
231 {
232 	enum fault_type fault_type;
233 	unsigned long address;
234 	bool is_write;
235 
236 	if (user_mode(regs)) {
237 		if (WARN_ON_ONCE(!si_code))
238 			si_code = SEGV_MAPERR;
239 		return do_sigsegv(regs, si_code);
240 	}
241 	if (fixup_exception(regs))
242 		return;
243 	fault_type = get_fault_type(regs);
244 	if (fault_type == KERNEL_FAULT) {
245 		address = get_fault_address(regs);
246 		is_write = fault_is_write(regs);
247 		if (kfence_handle_page_fault(address, is_write, regs))
248 			return;
249 	}
250 	if (fault_type == KERNEL_FAULT)
251 		pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
252 	else
253 		pr_alert("Unable to handle kernel paging request in virtual user address space\n");
254 	dump_fault_info(regs);
255 	die(regs, "Oops");
256 }
257 
258 static void handle_fault_error(struct pt_regs *regs, int si_code)
259 {
260 	struct mm_struct *mm = current->mm;
261 
262 	mmap_read_unlock(mm);
263 	handle_fault_error_nolock(regs, si_code);
264 }
265 
266 static void do_sigbus(struct pt_regs *regs)
267 {
268 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
269 }
270 
271 /*
272  * This routine handles page faults.  It determines the address,
273  * and the problem, and then passes it off to one of the appropriate
274  * routines.
275  *
276  * interruption code (int_code):
277  *   04       Protection	   ->  Write-Protection  (suppression)
278  *   10       Segment translation  ->  Not present	 (nullification)
279  *   11       Page translation	   ->  Not present	 (nullification)
280  *   3b       Region third trans.  ->  Not present	 (nullification)
281  */
282 static void do_exception(struct pt_regs *regs, int access)
283 {
284 	struct vm_area_struct *vma;
285 	unsigned long address;
286 	struct mm_struct *mm;
287 	enum fault_type type;
288 	unsigned int flags;
289 	struct gmap *gmap;
290 	vm_fault_t fault;
291 	bool is_write;
292 
293 	/*
294 	 * The instruction that caused the program check has
295 	 * been nullified. Don't signal single step via SIGTRAP.
296 	 */
297 	clear_thread_flag(TIF_PER_TRAP);
298 	if (kprobe_page_fault(regs, 14))
299 		return;
300 	mm = current->mm;
301 	address = get_fault_address(regs);
302 	is_write = fault_is_write(regs);
303 	type = get_fault_type(regs);
304 	switch (type) {
305 	case KERNEL_FAULT:
306 		return handle_fault_error_nolock(regs, 0);
307 	case USER_FAULT:
308 	case GMAP_FAULT:
309 		if (faulthandler_disabled() || !mm)
310 			return handle_fault_error_nolock(regs, 0);
311 		break;
312 	}
313 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
314 	flags = FAULT_FLAG_DEFAULT;
315 	if (user_mode(regs))
316 		flags |= FAULT_FLAG_USER;
317 	if (is_write)
318 		access = VM_WRITE;
319 	if (access == VM_WRITE)
320 		flags |= FAULT_FLAG_WRITE;
321 	if (!(flags & FAULT_FLAG_USER))
322 		goto lock_mmap;
323 	vma = lock_vma_under_rcu(mm, address);
324 	if (!vma)
325 		goto lock_mmap;
326 	if (!(vma->vm_flags & access)) {
327 		vma_end_read(vma);
328 		goto lock_mmap;
329 	}
330 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
331 	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
332 		vma_end_read(vma);
333 	if (!(fault & VM_FAULT_RETRY)) {
334 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
335 		if (unlikely(fault & VM_FAULT_ERROR))
336 			goto error;
337 		return;
338 	}
339 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
340 	if (fault & VM_FAULT_MAJOR)
341 		flags |= FAULT_FLAG_TRIED;
342 
343 	/* Quick path to respond to signals */
344 	if (fault_signal_pending(fault, regs)) {
345 		if (!user_mode(regs))
346 			handle_fault_error_nolock(regs, 0);
347 		return;
348 	}
349 lock_mmap:
350 	mmap_read_lock(mm);
351 	gmap = NULL;
352 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
353 		gmap = (struct gmap *)S390_lowcore.gmap;
354 		current->thread.gmap_addr = address;
355 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
356 		current->thread.gmap_int_code = regs->int_code & 0xffff;
357 		address = __gmap_translate(gmap, address);
358 		if (address == -EFAULT)
359 			return handle_fault_error(regs, SEGV_MAPERR);
360 		if (gmap->pfault_enabled)
361 			flags |= FAULT_FLAG_RETRY_NOWAIT;
362 	}
363 retry:
364 	vma = find_vma(mm, address);
365 	if (!vma)
366 		return handle_fault_error(regs, SEGV_MAPERR);
367 	if (unlikely(vma->vm_start > address)) {
368 		if (!(vma->vm_flags & VM_GROWSDOWN))
369 			return handle_fault_error(regs, SEGV_MAPERR);
370 		vma = expand_stack(mm, address);
371 		if (!vma)
372 			return handle_fault_error_nolock(regs, SEGV_MAPERR);
373 	}
374 	if (unlikely(!(vma->vm_flags & access)))
375 		return handle_fault_error(regs, SEGV_ACCERR);
376 	fault = handle_mm_fault(vma, address, flags, regs);
377 	if (fault_signal_pending(fault, regs)) {
378 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
379 			mmap_read_unlock(mm);
380 		if (!user_mode(regs))
381 			handle_fault_error_nolock(regs, 0);
382 		return;
383 	}
384 	/* The fault is fully completed (including releasing mmap lock) */
385 	if (fault & VM_FAULT_COMPLETED) {
386 		if (gmap) {
387 			mmap_read_lock(mm);
388 			goto gmap;
389 		}
390 		return;
391 	}
392 	if (unlikely(fault & VM_FAULT_ERROR)) {
393 		mmap_read_unlock(mm);
394 		goto error;
395 	}
396 	if (fault & VM_FAULT_RETRY) {
397 		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&	(flags & FAULT_FLAG_RETRY_NOWAIT)) {
398 			/*
399 			 * FAULT_FLAG_RETRY_NOWAIT has been set,
400 			 * mmap_lock has not been released
401 			 */
402 			current->thread.gmap_pfault = 1;
403 			return handle_fault_error(regs, 0);
404 		}
405 		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
406 		flags |= FAULT_FLAG_TRIED;
407 		mmap_read_lock(mm);
408 		goto retry;
409 	}
410 gmap:
411 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
412 		address =  __gmap_link(gmap, current->thread.gmap_addr,
413 				       address);
414 		if (address == -EFAULT)
415 			return handle_fault_error(regs, SEGV_MAPERR);
416 		if (address == -ENOMEM) {
417 			fault = VM_FAULT_OOM;
418 			mmap_read_unlock(mm);
419 			goto error;
420 		}
421 	}
422 	mmap_read_unlock(mm);
423 	return;
424 error:
425 	if (fault & VM_FAULT_OOM) {
426 		if (!user_mode(regs))
427 			handle_fault_error_nolock(regs, 0);
428 		else
429 			pagefault_out_of_memory();
430 	} else if (fault & VM_FAULT_SIGSEGV) {
431 		if (!user_mode(regs))
432 			handle_fault_error_nolock(regs, 0);
433 		else
434 			do_sigsegv(regs, SEGV_MAPERR);
435 	} else if (fault & VM_FAULT_SIGBUS) {
436 		if (!user_mode(regs))
437 			handle_fault_error_nolock(regs, 0);
438 		else
439 			do_sigbus(regs);
440 	} else {
441 		BUG();
442 	}
443 }
444 
445 void do_protection_exception(struct pt_regs *regs)
446 {
447 	union teid teid = { .val = regs->int_parm_long };
448 
449 	/*
450 	 * Protection exceptions are suppressing, decrement psw address.
451 	 * The exception to this rule are aborted transactions, for these
452 	 * the PSW already points to the correct location.
453 	 */
454 	if (!(regs->int_code & 0x200))
455 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
456 	/*
457 	 * Check for low-address protection.  This needs to be treated
458 	 * as a special case because the translation exception code
459 	 * field is not guaranteed to contain valid data in this case.
460 	 */
461 	if (unlikely(!teid.b61)) {
462 		if (user_mode(regs)) {
463 			/* Low-address protection in user mode: cannot happen */
464 			die(regs, "Low-address protection");
465 		}
466 		/*
467 		 * Low-address protection in kernel mode means
468 		 * NULL pointer write access in kernel mode.
469 		 */
470 		return handle_fault_error_nolock(regs, 0);
471 	}
472 	if (unlikely(MACHINE_HAS_NX && teid.b56)) {
473 		regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
474 		return handle_fault_error_nolock(regs, SEGV_ACCERR);
475 	}
476 	do_exception(regs, VM_WRITE);
477 }
478 NOKPROBE_SYMBOL(do_protection_exception);
479 
480 void do_dat_exception(struct pt_regs *regs)
481 {
482 	do_exception(regs, VM_ACCESS_FLAGS);
483 }
484 NOKPROBE_SYMBOL(do_dat_exception);
485 
486 #if IS_ENABLED(CONFIG_PGSTE)
487 
488 void do_secure_storage_access(struct pt_regs *regs)
489 {
490 	union teid teid = { .val = regs->int_parm_long };
491 	unsigned long addr = get_fault_address(regs);
492 	struct vm_area_struct *vma;
493 	struct mm_struct *mm;
494 	struct page *page;
495 	struct gmap *gmap;
496 	int rc;
497 
498 	/*
499 	 * Bit 61 indicates if the address is valid, if it is not the
500 	 * kernel should be stopped or SIGSEGV should be sent to the
501 	 * process. Bit 61 is not reliable without the misc UV feature,
502 	 * therefore this needs to be checked too.
503 	 */
504 	if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
505 		/*
506 		 * When this happens, userspace did something that it
507 		 * was not supposed to do, e.g. branching into secure
508 		 * memory. Trigger a segmentation fault.
509 		 */
510 		if (user_mode(regs)) {
511 			send_sig(SIGSEGV, current, 0);
512 			return;
513 		}
514 		/*
515 		 * The kernel should never run into this case and
516 		 * there is no way out of this situation.
517 		 */
518 		panic("Unexpected PGM 0x3d with TEID bit 61=0");
519 	}
520 	switch (get_fault_type(regs)) {
521 	case GMAP_FAULT:
522 		mm = current->mm;
523 		gmap = (struct gmap *)S390_lowcore.gmap;
524 		mmap_read_lock(mm);
525 		addr = __gmap_translate(gmap, addr);
526 		mmap_read_unlock(mm);
527 		if (IS_ERR_VALUE(addr))
528 			return handle_fault_error_nolock(regs, SEGV_MAPERR);
529 		fallthrough;
530 	case USER_FAULT:
531 		mm = current->mm;
532 		mmap_read_lock(mm);
533 		vma = find_vma(mm, addr);
534 		if (!vma)
535 			return handle_fault_error(regs, SEGV_MAPERR);
536 		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
537 		if (IS_ERR_OR_NULL(page)) {
538 			mmap_read_unlock(mm);
539 			break;
540 		}
541 		if (arch_make_page_accessible(page))
542 			send_sig(SIGSEGV, current, 0);
543 		put_page(page);
544 		mmap_read_unlock(mm);
545 		break;
546 	case KERNEL_FAULT:
547 		page = phys_to_page(addr);
548 		if (unlikely(!try_get_page(page)))
549 			break;
550 		rc = arch_make_page_accessible(page);
551 		put_page(page);
552 		if (rc)
553 			BUG();
554 		break;
555 	default:
556 		unreachable();
557 	}
558 }
559 NOKPROBE_SYMBOL(do_secure_storage_access);
560 
561 void do_non_secure_storage_access(struct pt_regs *regs)
562 {
563 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
564 	unsigned long gaddr = get_fault_address(regs);
565 
566 	if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
567 		return handle_fault_error_nolock(regs, SEGV_MAPERR);
568 	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
569 		send_sig(SIGSEGV, current, 0);
570 }
571 NOKPROBE_SYMBOL(do_non_secure_storage_access);
572 
573 void do_secure_storage_violation(struct pt_regs *regs)
574 {
575 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
576 	unsigned long gaddr = get_fault_address(regs);
577 
578 	/*
579 	 * If the VM has been rebooted, its address space might still contain
580 	 * secure pages from the previous boot.
581 	 * Clear the page so it can be reused.
582 	 */
583 	if (!gmap_destroy_page(gmap, gaddr))
584 		return;
585 	/*
586 	 * Either KVM messed up the secure guest mapping or the same
587 	 * page is mapped into multiple secure guests.
588 	 *
589 	 * This exception is only triggered when a guest 2 is running
590 	 * and can therefore never occur in kernel context.
591 	 */
592 	pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
593 			    current->comm, current->pid);
594 	send_sig(SIGSEGV, current, 0);
595 }
596 
597 #endif /* CONFIG_PGSTE */
598