xref: /linux/arch/arm/mm/fault.c (revision eda40be3a5ff3fdce513d2bcfeaca8cc16cf962a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/fault.c
4  *
5  *  Copyright (C) 1995  Linus Torvalds
6  *  Modifications for ARM processor (c) 1995-2004 Russell King
7  */
8 #include <linux/extable.h>
9 #include <linux/signal.h>
10 #include <linux/mm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/kprobes.h>
14 #include <linux/uaccess.h>
15 #include <linux/page-flags.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/debug.h>
18 #include <linux/highmem.h>
19 #include <linux/perf_event.h>
20 #include <linux/kfence.h>
21 
22 #include <asm/system_misc.h>
23 #include <asm/system_info.h>
24 #include <asm/tlbflush.h>
25 
26 #include "fault.h"
27 
28 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
29 {
30 	unsigned long addr = (unsigned long)unsafe_src;
31 
32 	return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
33 }
34 
35 #ifdef CONFIG_MMU
36 
37 /*
38  * This is useful to dump out the page tables associated with
39  * 'addr' in mm 'mm'.
40  */
41 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
42 {
43 	pgd_t *pgd;
44 
45 	if (!mm)
46 		mm = &init_mm;
47 
48 	pgd = pgd_offset(mm, addr);
49 	printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
50 
51 	do {
52 		p4d_t *p4d;
53 		pud_t *pud;
54 		pmd_t *pmd;
55 		pte_t *pte;
56 
57 		p4d = p4d_offset(pgd, addr);
58 		if (p4d_none(*p4d))
59 			break;
60 
61 		if (p4d_bad(*p4d)) {
62 			pr_cont("(bad)");
63 			break;
64 		}
65 
66 		pud = pud_offset(p4d, addr);
67 		if (PTRS_PER_PUD != 1)
68 			pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
69 
70 		if (pud_none(*pud))
71 			break;
72 
73 		if (pud_bad(*pud)) {
74 			pr_cont("(bad)");
75 			break;
76 		}
77 
78 		pmd = pmd_offset(pud, addr);
79 		if (PTRS_PER_PMD != 1)
80 			pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
81 
82 		if (pmd_none(*pmd))
83 			break;
84 
85 		if (pmd_bad(*pmd)) {
86 			pr_cont("(bad)");
87 			break;
88 		}
89 
90 		/* We must not map this if we have highmem enabled */
91 		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
92 			break;
93 
94 		pte = pte_offset_map(pmd, addr);
95 		if (!pte)
96 			break;
97 
98 		pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
99 #ifndef CONFIG_ARM_LPAE
100 		pr_cont(", *ppte=%08llx",
101 		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
102 #endif
103 		pte_unmap(pte);
104 	} while(0);
105 
106 	pr_cont("\n");
107 }
108 #else					/* CONFIG_MMU */
109 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
110 { }
111 #endif					/* CONFIG_MMU */
112 
113 static inline bool is_write_fault(unsigned int fsr)
114 {
115 	return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
116 }
117 
118 static inline bool is_translation_fault(unsigned int fsr)
119 {
120 	int fs = fsr_fs(fsr);
121 #ifdef CONFIG_ARM_LPAE
122 	if ((fs & FS_MMU_NOLL_MASK) == FS_TRANS_NOLL)
123 		return true;
124 #else
125 	if (fs == FS_L1_TRANS || fs == FS_L2_TRANS)
126 		return true;
127 #endif
128 	return false;
129 }
130 
131 static void die_kernel_fault(const char *msg, struct mm_struct *mm,
132 			     unsigned long addr, unsigned int fsr,
133 			     struct pt_regs *regs)
134 {
135 	bust_spinlocks(1);
136 	pr_alert("8<--- cut here ---\n");
137 	pr_alert("Unable to handle kernel %s at virtual address %08lx when %s\n",
138 		 msg, addr, fsr & FSR_LNX_PF ? "execute" :
139 		 fsr & FSR_WRITE ? "write" : "read");
140 
141 	show_pte(KERN_ALERT, mm, addr);
142 	die("Oops", regs, fsr);
143 	bust_spinlocks(0);
144 	make_task_dead(SIGKILL);
145 }
146 
147 /*
148  * Oops.  The kernel tried to access some page that wasn't present.
149  */
150 static void
151 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
152 		  struct pt_regs *regs)
153 {
154 	const char *msg;
155 	/*
156 	 * Are we prepared to handle this kernel fault?
157 	 */
158 	if (fixup_exception(regs))
159 		return;
160 
161 	/*
162 	 * No handler, we'll have to terminate things with extreme prejudice.
163 	 */
164 	if (addr < PAGE_SIZE) {
165 		msg = "NULL pointer dereference";
166 	} else {
167 		if (is_translation_fault(fsr) &&
168 		    kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
169 			return;
170 
171 		msg = "paging request";
172 	}
173 
174 	die_kernel_fault(msg, mm, addr, fsr, regs);
175 }
176 
177 /*
178  * Something tried to access memory that isn't in our memory map..
179  * User mode accesses just cause a SIGSEGV
180  */
181 static void
182 __do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
183 		int code, struct pt_regs *regs)
184 {
185 	struct task_struct *tsk = current;
186 
187 	if (addr > TASK_SIZE)
188 		harden_branch_predictor();
189 
190 #ifdef CONFIG_DEBUG_USER
191 	if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
192 	    ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
193 		pr_err("8<--- cut here ---\n");
194 		pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
195 		       tsk->comm, sig, addr, fsr);
196 		show_pte(KERN_ERR, tsk->mm, addr);
197 		show_regs(regs);
198 	}
199 #endif
200 #ifndef CONFIG_KUSER_HELPERS
201 	if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000))
202 		printk_ratelimited(KERN_DEBUG
203 				   "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n",
204 				   tsk->comm, addr);
205 #endif
206 
207 	tsk->thread.address = addr;
208 	tsk->thread.error_code = fsr;
209 	tsk->thread.trap_no = 14;
210 	force_sig_fault(sig, code, (void __user *)addr);
211 }
212 
213 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
214 {
215 	struct task_struct *tsk = current;
216 	struct mm_struct *mm = tsk->active_mm;
217 
218 	/*
219 	 * If we are in kernel mode at this point, we
220 	 * have no context to handle this fault with.
221 	 */
222 	if (user_mode(regs))
223 		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
224 	else
225 		__do_kernel_fault(mm, addr, fsr, regs);
226 }
227 
228 #ifdef CONFIG_MMU
229 #define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
230 #define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
231 
232 static inline bool is_permission_fault(unsigned int fsr)
233 {
234 	int fs = fsr_fs(fsr);
235 #ifdef CONFIG_ARM_LPAE
236 	if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
237 		return true;
238 #else
239 	if (fs == FS_L1_PERM || fs == FS_L2_PERM)
240 		return true;
241 #endif
242 	return false;
243 }
244 
245 #ifdef CONFIG_CPU_TTBR0_PAN
246 static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
247 {
248 	struct svc_pt_regs *svcregs;
249 
250 	/* If we are in user mode: permission granted */
251 	if (user_mode(regs))
252 		return true;
253 
254 	/* uaccess state saved above pt_regs on SVC exception entry */
255 	svcregs = to_svc_pt_regs(regs);
256 
257 	return !(svcregs->ttbcr & TTBCR_EPD0);
258 }
259 #else
260 static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
261 {
262 	return true;
263 }
264 #endif
265 
266 static int __kprobes
267 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
268 {
269 	struct mm_struct *mm = current->mm;
270 	struct vm_area_struct *vma;
271 	int sig, code;
272 	vm_fault_t fault;
273 	unsigned int flags = FAULT_FLAG_DEFAULT;
274 	unsigned long vm_flags = VM_ACCESS_FLAGS;
275 
276 	if (kprobe_page_fault(regs, fsr))
277 		return 0;
278 
279 
280 	/* Enable interrupts if they were enabled in the parent context. */
281 	if (interrupts_enabled(regs))
282 		local_irq_enable();
283 
284 	/*
285 	 * If we're in an interrupt or have no user
286 	 * context, we must not take the fault..
287 	 */
288 	if (faulthandler_disabled() || !mm)
289 		goto no_context;
290 
291 	if (user_mode(regs))
292 		flags |= FAULT_FLAG_USER;
293 
294 	if (is_write_fault(fsr)) {
295 		flags |= FAULT_FLAG_WRITE;
296 		vm_flags = VM_WRITE;
297 	}
298 
299 	if (fsr & FSR_LNX_PF) {
300 		vm_flags = VM_EXEC;
301 
302 		if (is_permission_fault(fsr) && !user_mode(regs))
303 			die_kernel_fault("execution of memory",
304 					 mm, addr, fsr, regs);
305 	}
306 
307 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
308 
309 	/*
310 	 * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are
311 	 * routed via the translation fault mechanism. Check whether uaccess
312 	 * is disabled while in kernel mode.
313 	 */
314 	if (!ttbr0_usermode_access_allowed(regs))
315 		goto no_context;
316 
317 	if (!(flags & FAULT_FLAG_USER))
318 		goto lock_mmap;
319 
320 	vma = lock_vma_under_rcu(mm, addr);
321 	if (!vma)
322 		goto lock_mmap;
323 
324 	if (!(vma->vm_flags & vm_flags)) {
325 		vma_end_read(vma);
326 		goto lock_mmap;
327 	}
328 	fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
329 	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
330 		vma_end_read(vma);
331 
332 	if (!(fault & VM_FAULT_RETRY)) {
333 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
334 		goto done;
335 	}
336 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
337 	if (fault & VM_FAULT_MAJOR)
338 		flags |= FAULT_FLAG_TRIED;
339 
340 	/* Quick path to respond to signals */
341 	if (fault_signal_pending(fault, regs)) {
342 		if (!user_mode(regs))
343 			goto no_context;
344 		return 0;
345 	}
346 lock_mmap:
347 
348 retry:
349 	vma = lock_mm_and_find_vma(mm, addr, regs);
350 	if (unlikely(!vma)) {
351 		fault = VM_FAULT_BADMAP;
352 		goto bad_area;
353 	}
354 
355 	/*
356 	 * ok, we have a good vm_area for this memory access, check the
357 	 * permissions on the VMA allow for the fault which occurred.
358 	 */
359 	if (!(vma->vm_flags & vm_flags))
360 		fault = VM_FAULT_BADACCESS;
361 	else
362 		fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
363 
364 	/* If we need to retry but a fatal signal is pending, handle the
365 	 * signal first. We do not need to release the mmap_lock because
366 	 * it would already be released in __lock_page_or_retry in
367 	 * mm/filemap.c. */
368 	if (fault_signal_pending(fault, regs)) {
369 		if (!user_mode(regs))
370 			goto no_context;
371 		return 0;
372 	}
373 
374 	/* The fault is fully completed (including releasing mmap lock) */
375 	if (fault & VM_FAULT_COMPLETED)
376 		return 0;
377 
378 	if (!(fault & VM_FAULT_ERROR)) {
379 		if (fault & VM_FAULT_RETRY) {
380 			flags |= FAULT_FLAG_TRIED;
381 			goto retry;
382 		}
383 	}
384 
385 	mmap_read_unlock(mm);
386 done:
387 
388 	/*
389 	 * Handle the "normal" case first - VM_FAULT_MAJOR
390 	 */
391 	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
392 		return 0;
393 
394 bad_area:
395 	/*
396 	 * If we are in kernel mode at this point, we
397 	 * have no context to handle this fault with.
398 	 */
399 	if (!user_mode(regs))
400 		goto no_context;
401 
402 	if (fault & VM_FAULT_OOM) {
403 		/*
404 		 * We ran out of memory, call the OOM killer, and return to
405 		 * userspace (which will retry the fault, or kill us if we
406 		 * got oom-killed)
407 		 */
408 		pagefault_out_of_memory();
409 		return 0;
410 	}
411 
412 	if (fault & VM_FAULT_SIGBUS) {
413 		/*
414 		 * We had some memory, but were unable to
415 		 * successfully fix up this page fault.
416 		 */
417 		sig = SIGBUS;
418 		code = BUS_ADRERR;
419 	} else {
420 		/*
421 		 * Something tried to access memory that
422 		 * isn't in our memory map..
423 		 */
424 		sig = SIGSEGV;
425 		code = fault == VM_FAULT_BADACCESS ?
426 			SEGV_ACCERR : SEGV_MAPERR;
427 	}
428 
429 	__do_user_fault(addr, fsr, sig, code, regs);
430 	return 0;
431 
432 no_context:
433 	__do_kernel_fault(mm, addr, fsr, regs);
434 	return 0;
435 }
436 #else					/* CONFIG_MMU */
437 static int
438 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
439 {
440 	return 0;
441 }
442 #endif					/* CONFIG_MMU */
443 
444 /*
445  * First Level Translation Fault Handler
446  *
447  * We enter here because the first level page table doesn't contain
448  * a valid entry for the address.
449  *
450  * If the address is in kernel space (>= TASK_SIZE), then we are
451  * probably faulting in the vmalloc() area.
452  *
453  * If the init_task's first level page tables contains the relevant
454  * entry, we copy the it to this task.  If not, we send the process
455  * a signal, fixup the exception, or oops the kernel.
456  *
457  * NOTE! We MUST NOT take any locks for this case. We may be in an
458  * interrupt or a critical region, and should only copy the information
459  * from the master page table, nothing more.
460  */
461 #ifdef CONFIG_MMU
462 static int __kprobes
463 do_translation_fault(unsigned long addr, unsigned int fsr,
464 		     struct pt_regs *regs)
465 {
466 	unsigned int index;
467 	pgd_t *pgd, *pgd_k;
468 	p4d_t *p4d, *p4d_k;
469 	pud_t *pud, *pud_k;
470 	pmd_t *pmd, *pmd_k;
471 
472 	if (addr < TASK_SIZE)
473 		return do_page_fault(addr, fsr, regs);
474 
475 	if (user_mode(regs))
476 		goto bad_area;
477 
478 	index = pgd_index(addr);
479 
480 	pgd = cpu_get_pgd() + index;
481 	pgd_k = init_mm.pgd + index;
482 
483 	p4d = p4d_offset(pgd, addr);
484 	p4d_k = p4d_offset(pgd_k, addr);
485 
486 	if (p4d_none(*p4d_k))
487 		goto bad_area;
488 	if (!p4d_present(*p4d))
489 		set_p4d(p4d, *p4d_k);
490 
491 	pud = pud_offset(p4d, addr);
492 	pud_k = pud_offset(p4d_k, addr);
493 
494 	if (pud_none(*pud_k))
495 		goto bad_area;
496 	if (!pud_present(*pud))
497 		set_pud(pud, *pud_k);
498 
499 	pmd = pmd_offset(pud, addr);
500 	pmd_k = pmd_offset(pud_k, addr);
501 
502 #ifdef CONFIG_ARM_LPAE
503 	/*
504 	 * Only one hardware entry per PMD with LPAE.
505 	 */
506 	index = 0;
507 #else
508 	/*
509 	 * On ARM one Linux PGD entry contains two hardware entries (see page
510 	 * tables layout in pgtable.h). We normally guarantee that we always
511 	 * fill both L1 entries. But create_mapping() doesn't follow the rule.
512 	 * It can create inidividual L1 entries, so here we have to call
513 	 * pmd_none() check for the entry really corresponded to address, not
514 	 * for the first of pair.
515 	 */
516 	index = (addr >> SECTION_SHIFT) & 1;
517 #endif
518 	if (pmd_none(pmd_k[index]))
519 		goto bad_area;
520 
521 	copy_pmd(pmd, pmd_k);
522 	return 0;
523 
524 bad_area:
525 	do_bad_area(addr, fsr, regs);
526 	return 0;
527 }
528 #else					/* CONFIG_MMU */
529 static int
530 do_translation_fault(unsigned long addr, unsigned int fsr,
531 		     struct pt_regs *regs)
532 {
533 	return 0;
534 }
535 #endif					/* CONFIG_MMU */
536 
537 /*
538  * Some section permission faults need to be handled gracefully.
539  * They can happen due to a __{get,put}_user during an oops.
540  */
541 #ifndef CONFIG_ARM_LPAE
542 static int
543 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
544 {
545 	do_bad_area(addr, fsr, regs);
546 	return 0;
547 }
548 #endif /* CONFIG_ARM_LPAE */
549 
550 /*
551  * This abort handler always returns "fault".
552  */
553 static int
554 do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
555 {
556 	return 1;
557 }
558 
559 struct fsr_info {
560 	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
561 	int	sig;
562 	int	code;
563 	const char *name;
564 };
565 
566 /* FSR definition */
567 #ifdef CONFIG_ARM_LPAE
568 #include "fsr-3level.c"
569 #else
570 #include "fsr-2level.c"
571 #endif
572 
573 void __init
574 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
575 		int sig, int code, const char *name)
576 {
577 	if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
578 		BUG();
579 
580 	fsr_info[nr].fn   = fn;
581 	fsr_info[nr].sig  = sig;
582 	fsr_info[nr].code = code;
583 	fsr_info[nr].name = name;
584 }
585 
586 /*
587  * Dispatch a data abort to the relevant handler.
588  */
589 asmlinkage void
590 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
591 {
592 	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
593 
594 	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
595 		return;
596 
597 	pr_alert("8<--- cut here ---\n");
598 	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
599 		inf->name, fsr, addr);
600 	show_pte(KERN_ALERT, current->mm, addr);
601 
602 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
603 		       fsr, 0);
604 }
605 
606 void __init
607 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
608 		 int sig, int code, const char *name)
609 {
610 	if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
611 		BUG();
612 
613 	ifsr_info[nr].fn   = fn;
614 	ifsr_info[nr].sig  = sig;
615 	ifsr_info[nr].code = code;
616 	ifsr_info[nr].name = name;
617 }
618 
619 asmlinkage void
620 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
621 {
622 	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
623 
624 	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
625 		return;
626 
627 	pr_alert("8<--- cut here ---\n");
628 	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
629 		inf->name, ifsr, addr);
630 
631 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
632 		       ifsr, 0);
633 }
634 
635 /*
636  * Abort handler to be used only during first unmasking of asynchronous aborts
637  * on the boot CPU. This makes sure that the machine will not die if the
638  * firmware/bootloader left an imprecise abort pending for us to trip over.
639  */
640 static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
641 				      struct pt_regs *regs)
642 {
643 	pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
644 		"first unmask, this is most likely caused by a "
645 		"firmware/bootloader bug.\n", fsr);
646 
647 	return 0;
648 }
649 
650 void __init early_abt_enable(void)
651 {
652 	fsr_info[FSR_FS_AEA].fn = early_abort_handler;
653 	local_abt_enable();
654 	fsr_info[FSR_FS_AEA].fn = do_bad;
655 }
656 
657 #ifndef CONFIG_ARM_LPAE
658 static int __init exceptions_init(void)
659 {
660 	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
661 		hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
662 				"I-cache maintenance fault");
663 	}
664 
665 	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
666 		/*
667 		 * TODO: Access flag faults introduced in ARMv6K.
668 		 * Runtime check for 'K' extension is needed
669 		 */
670 		hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
671 				"section access flag fault");
672 		hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
673 				"section access flag fault");
674 	}
675 
676 	return 0;
677 }
678 
679 arch_initcall(exceptions_init);
680 #endif
681