xref: /linux/arch/sh/mm/fault.c (revision 4745dc8abb0a0a9851c07265eea01d844886d5c8)
1 /*
2  * Page fault handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2012  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/fault.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/perf_event.h>
20 #include <linux/kdebug.h>
21 #include <linux/uaccess.h>
22 #include <asm/io_trapped.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/traps.h>
26 
27 static inline int notify_page_fault(struct pt_regs *regs, int trap)
28 {
29 	int ret = 0;
30 
31 	if (kprobes_built_in() && !user_mode(regs)) {
32 		preempt_disable();
33 		if (kprobe_running() && kprobe_fault_handler(regs, trap))
34 			ret = 1;
35 		preempt_enable();
36 	}
37 
38 	return ret;
39 }
40 
41 static void
42 force_sig_info_fault(int si_signo, int si_code, unsigned long address)
43 {
44 	force_sig_fault(si_signo, si_code, (void __user *)address);
45 }
46 
47 /*
48  * This is useful to dump out the page tables associated with
49  * 'addr' in mm 'mm'.
50  */
51 static void show_pte(struct mm_struct *mm, unsigned long addr)
52 {
53 	pgd_t *pgd;
54 
55 	if (mm) {
56 		pgd = mm->pgd;
57 	} else {
58 		pgd = get_TTB();
59 
60 		if (unlikely(!pgd))
61 			pgd = swapper_pg_dir;
62 	}
63 
64 	printk(KERN_ALERT "pgd = %p\n", pgd);
65 	pgd += pgd_index(addr);
66 	printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
67 	       (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
68 
69 	do {
70 		pud_t *pud;
71 		pmd_t *pmd;
72 		pte_t *pte;
73 
74 		if (pgd_none(*pgd))
75 			break;
76 
77 		if (pgd_bad(*pgd)) {
78 			printk("(bad)");
79 			break;
80 		}
81 
82 		pud = pud_offset(pgd, addr);
83 		if (PTRS_PER_PUD != 1)
84 			printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
85 			       (u64)pud_val(*pud));
86 
87 		if (pud_none(*pud))
88 			break;
89 
90 		if (pud_bad(*pud)) {
91 			printk("(bad)");
92 			break;
93 		}
94 
95 		pmd = pmd_offset(pud, addr);
96 		if (PTRS_PER_PMD != 1)
97 			printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
98 			       (u64)pmd_val(*pmd));
99 
100 		if (pmd_none(*pmd))
101 			break;
102 
103 		if (pmd_bad(*pmd)) {
104 			printk("(bad)");
105 			break;
106 		}
107 
108 		/* We must not map this if we have highmem enabled */
109 		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110 			break;
111 
112 		pte = pte_offset_kernel(pmd, addr);
113 		printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
114 		       (u64)pte_val(*pte));
115 	} while (0);
116 
117 	printk("\n");
118 }
119 
120 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
121 {
122 	unsigned index = pgd_index(address);
123 	pgd_t *pgd_k;
124 	pud_t *pud, *pud_k;
125 	pmd_t *pmd, *pmd_k;
126 
127 	pgd += index;
128 	pgd_k = init_mm.pgd + index;
129 
130 	if (!pgd_present(*pgd_k))
131 		return NULL;
132 
133 	pud = pud_offset(pgd, address);
134 	pud_k = pud_offset(pgd_k, address);
135 	if (!pud_present(*pud_k))
136 		return NULL;
137 
138 	if (!pud_present(*pud))
139 	    set_pud(pud, *pud_k);
140 
141 	pmd = pmd_offset(pud, address);
142 	pmd_k = pmd_offset(pud_k, address);
143 	if (!pmd_present(*pmd_k))
144 		return NULL;
145 
146 	if (!pmd_present(*pmd))
147 		set_pmd(pmd, *pmd_k);
148 	else {
149 		/*
150 		 * The page tables are fully synchronised so there must
151 		 * be another reason for the fault. Return NULL here to
152 		 * signal that we have not taken care of the fault.
153 		 */
154 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
155 		return NULL;
156 	}
157 
158 	return pmd_k;
159 }
160 
161 #ifdef CONFIG_SH_STORE_QUEUES
162 #define __FAULT_ADDR_LIMIT	P3_ADDR_MAX
163 #else
164 #define __FAULT_ADDR_LIMIT	VMALLOC_END
165 #endif
166 
167 /*
168  * Handle a fault on the vmalloc or module mapping area
169  */
170 static noinline int vmalloc_fault(unsigned long address)
171 {
172 	pgd_t *pgd_k;
173 	pmd_t *pmd_k;
174 	pte_t *pte_k;
175 
176 	/* Make sure we are in vmalloc/module/P3 area: */
177 	if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
178 		return -1;
179 
180 	/*
181 	 * Synchronize this task's top level page-table
182 	 * with the 'reference' page table.
183 	 *
184 	 * Do _not_ use "current" here. We might be inside
185 	 * an interrupt in the middle of a task switch..
186 	 */
187 	pgd_k = get_TTB();
188 	pmd_k = vmalloc_sync_one(pgd_k, address);
189 	if (!pmd_k)
190 		return -1;
191 
192 	pte_k = pte_offset_kernel(pmd_k, address);
193 	if (!pte_present(*pte_k))
194 		return -1;
195 
196 	return 0;
197 }
198 
199 static void
200 show_fault_oops(struct pt_regs *regs, unsigned long address)
201 {
202 	if (!oops_may_print())
203 		return;
204 
205 	printk(KERN_ALERT "BUG: unable to handle kernel ");
206 	if (address < PAGE_SIZE)
207 		printk(KERN_CONT "NULL pointer dereference");
208 	else
209 		printk(KERN_CONT "paging request");
210 
211 	printk(KERN_CONT " at %08lx\n", address);
212 	printk(KERN_ALERT "PC:");
213 	printk_address(regs->pc, 1);
214 
215 	show_pte(NULL, address);
216 }
217 
218 static noinline void
219 no_context(struct pt_regs *regs, unsigned long error_code,
220 	   unsigned long address)
221 {
222 	/* Are we prepared to handle this kernel fault?  */
223 	if (fixup_exception(regs))
224 		return;
225 
226 	if (handle_trapped_io(regs, address))
227 		return;
228 
229 	/*
230 	 * Oops. The kernel tried to access some bad page. We'll have to
231 	 * terminate things with extreme prejudice.
232 	 */
233 	bust_spinlocks(1);
234 
235 	show_fault_oops(regs, address);
236 
237 	die("Oops", regs, error_code);
238 	bust_spinlocks(0);
239 	do_exit(SIGKILL);
240 }
241 
242 static void
243 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
244 		       unsigned long address, int si_code)
245 {
246 	/* User mode accesses just cause a SIGSEGV */
247 	if (user_mode(regs)) {
248 		/*
249 		 * It's possible to have interrupts off here:
250 		 */
251 		local_irq_enable();
252 
253 		force_sig_info_fault(SIGSEGV, si_code, address);
254 
255 		return;
256 	}
257 
258 	no_context(regs, error_code, address);
259 }
260 
261 static noinline void
262 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
263 		     unsigned long address)
264 {
265 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
266 }
267 
268 static void
269 __bad_area(struct pt_regs *regs, unsigned long error_code,
270 	   unsigned long address, int si_code)
271 {
272 	struct mm_struct *mm = current->mm;
273 
274 	/*
275 	 * Something tried to access memory that isn't in our memory map..
276 	 * Fix it, but check if it's kernel or user first..
277 	 */
278 	up_read(&mm->mmap_sem);
279 
280 	__bad_area_nosemaphore(regs, error_code, address, si_code);
281 }
282 
283 static noinline void
284 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
285 {
286 	__bad_area(regs, error_code, address, SEGV_MAPERR);
287 }
288 
289 static noinline void
290 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
291 		      unsigned long address)
292 {
293 	__bad_area(regs, error_code, address, SEGV_ACCERR);
294 }
295 
296 static void
297 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
298 {
299 	struct task_struct *tsk = current;
300 	struct mm_struct *mm = tsk->mm;
301 
302 	up_read(&mm->mmap_sem);
303 
304 	/* Kernel mode? Handle exceptions or die: */
305 	if (!user_mode(regs))
306 		no_context(regs, error_code, address);
307 
308 	force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
309 }
310 
311 static noinline int
312 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
313 	       unsigned long address, vm_fault_t fault)
314 {
315 	/*
316 	 * Pagefault was interrupted by SIGKILL. We have no reason to
317 	 * continue pagefault.
318 	 */
319 	if (fatal_signal_pending(current)) {
320 		if (!(fault & VM_FAULT_RETRY))
321 			up_read(&current->mm->mmap_sem);
322 		if (!user_mode(regs))
323 			no_context(regs, error_code, address);
324 		return 1;
325 	}
326 
327 	if (!(fault & VM_FAULT_ERROR))
328 		return 0;
329 
330 	if (fault & VM_FAULT_OOM) {
331 		/* Kernel mode? Handle exceptions or die: */
332 		if (!user_mode(regs)) {
333 			up_read(&current->mm->mmap_sem);
334 			no_context(regs, error_code, address);
335 			return 1;
336 		}
337 		up_read(&current->mm->mmap_sem);
338 
339 		/*
340 		 * We ran out of memory, call the OOM killer, and return the
341 		 * userspace (which will retry the fault, or kill us if we got
342 		 * oom-killed):
343 		 */
344 		pagefault_out_of_memory();
345 	} else {
346 		if (fault & VM_FAULT_SIGBUS)
347 			do_sigbus(regs, error_code, address);
348 		else if (fault & VM_FAULT_SIGSEGV)
349 			bad_area(regs, error_code, address);
350 		else
351 			BUG();
352 	}
353 
354 	return 1;
355 }
356 
357 static inline int access_error(int error_code, struct vm_area_struct *vma)
358 {
359 	if (error_code & FAULT_CODE_WRITE) {
360 		/* write, present and write, not present: */
361 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
362 			return 1;
363 		return 0;
364 	}
365 
366 	/* ITLB miss on NX page */
367 	if (unlikely((error_code & FAULT_CODE_ITLB) &&
368 		     !(vma->vm_flags & VM_EXEC)))
369 		return 1;
370 
371 	/* read, not present: */
372 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
373 		return 1;
374 
375 	return 0;
376 }
377 
378 static int fault_in_kernel_space(unsigned long address)
379 {
380 	return address >= TASK_SIZE;
381 }
382 
383 /*
384  * This routine handles page faults.  It determines the address,
385  * and the problem, and then passes it off to one of the appropriate
386  * routines.
387  */
388 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
389 					unsigned long error_code,
390 					unsigned long address)
391 {
392 	unsigned long vec;
393 	struct task_struct *tsk;
394 	struct mm_struct *mm;
395 	struct vm_area_struct * vma;
396 	vm_fault_t fault;
397 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
398 
399 	tsk = current;
400 	mm = tsk->mm;
401 	vec = lookup_exception_vector();
402 
403 	/*
404 	 * We fault-in kernel-space virtual memory on-demand. The
405 	 * 'reference' page table is init_mm.pgd.
406 	 *
407 	 * NOTE! We MUST NOT take any locks for this case. We may
408 	 * be in an interrupt or a critical region, and should
409 	 * only copy the information from the master page table,
410 	 * nothing more.
411 	 */
412 	if (unlikely(fault_in_kernel_space(address))) {
413 		if (vmalloc_fault(address) >= 0)
414 			return;
415 		if (notify_page_fault(regs, vec))
416 			return;
417 
418 		bad_area_nosemaphore(regs, error_code, address);
419 		return;
420 	}
421 
422 	if (unlikely(notify_page_fault(regs, vec)))
423 		return;
424 
425 	/* Only enable interrupts if they were on before the fault */
426 	if ((regs->sr & SR_IMASK) != SR_IMASK)
427 		local_irq_enable();
428 
429 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
430 
431 	/*
432 	 * If we're in an interrupt, have no user context or are running
433 	 * with pagefaults disabled then we must not take the fault:
434 	 */
435 	if (unlikely(faulthandler_disabled() || !mm)) {
436 		bad_area_nosemaphore(regs, error_code, address);
437 		return;
438 	}
439 
440 retry:
441 	down_read(&mm->mmap_sem);
442 
443 	vma = find_vma(mm, address);
444 	if (unlikely(!vma)) {
445 		bad_area(regs, error_code, address);
446 		return;
447 	}
448 	if (likely(vma->vm_start <= address))
449 		goto good_area;
450 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
451 		bad_area(regs, error_code, address);
452 		return;
453 	}
454 	if (unlikely(expand_stack(vma, address))) {
455 		bad_area(regs, error_code, address);
456 		return;
457 	}
458 
459 	/*
460 	 * Ok, we have a good vm_area for this memory access, so
461 	 * we can handle it..
462 	 */
463 good_area:
464 	if (unlikely(access_error(error_code, vma))) {
465 		bad_area_access_error(regs, error_code, address);
466 		return;
467 	}
468 
469 	set_thread_fault_code(error_code);
470 
471 	if (user_mode(regs))
472 		flags |= FAULT_FLAG_USER;
473 	if (error_code & FAULT_CODE_WRITE)
474 		flags |= FAULT_FLAG_WRITE;
475 
476 	/*
477 	 * If for any reason at all we couldn't handle the fault,
478 	 * make sure we exit gracefully rather than endlessly redo
479 	 * the fault.
480 	 */
481 	fault = handle_mm_fault(vma, address, flags);
482 
483 	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
484 		if (mm_fault_error(regs, error_code, address, fault))
485 			return;
486 
487 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
488 		if (fault & VM_FAULT_MAJOR) {
489 			tsk->maj_flt++;
490 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
491 				      regs, address);
492 		} else {
493 			tsk->min_flt++;
494 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
495 				      regs, address);
496 		}
497 		if (fault & VM_FAULT_RETRY) {
498 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
499 			flags |= FAULT_FLAG_TRIED;
500 
501 			/*
502 			 * No need to up_read(&mm->mmap_sem) as we would
503 			 * have already released it in __lock_page_or_retry
504 			 * in mm/filemap.c.
505 			 */
506 			goto retry;
507 		}
508 	}
509 
510 	up_read(&mm->mmap_sem);
511 }
512