xref: /linux/arch/sh/mm/tlbex_32.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  * TLB miss handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/kprobes.h>
14 #include <linux/kdebug.h>
15 #include <asm/mmu_context.h>
16 #include <asm/thread_info.h>
17 #include <asm/tlb.h>
18 
19 /*
20  * Called with interrupts disabled.
21  */
22 asmlinkage int __kprobes
23 handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
24 	       unsigned long address)
25 {
26 	pgd_t *pgd;
27 	p4d_t *p4d;
28 	pud_t *pud;
29 	pmd_t *pmd;
30 	pte_t *pte;
31 	pte_t entry;
32 
33 	/*
34 	 * We don't take page faults for P1, P2, and parts of P4, these
35 	 * are always mapped, whether it be due to legacy behaviour in
36 	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
37 	 */
38 	if (address >= P3SEG && address < P3_ADDR_MAX) {
39 		pgd = pgd_offset_k(address);
40 	} else {
41 		if (unlikely(address >= TASK_SIZE || !current->mm))
42 			return 1;
43 
44 		pgd = pgd_offset(current->mm, address);
45 	}
46 
47 	p4d = p4d_offset(pgd, address);
48 	if (p4d_none_or_clear_bad(p4d))
49 		return 1;
50 	pud = pud_offset(p4d, address);
51 	if (pud_none_or_clear_bad(pud))
52 		return 1;
53 	pmd = pmd_offset(pud, address);
54 	if (pmd_none_or_clear_bad(pmd))
55 		return 1;
56 	pte = pte_offset_kernel(pmd, address);
57 	entry = *pte;
58 	if (unlikely(pte_none(entry) || pte_not_present(entry)))
59 		return 1;
60 	if (unlikely(error_code && !pte_write(entry)))
61 		return 1;
62 
63 	if (error_code)
64 		entry = pte_mkdirty(entry);
65 	entry = pte_mkyoung(entry);
66 
67 	set_pte(pte, entry);
68 
69 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
70 	/*
71 	 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
72 	 * the case of an initial page write exception, so we need to
73 	 * flush it in order to avoid potential TLB entry duplication.
74 	 */
75 	if (error_code == FAULT_CODE_INITIAL)
76 		local_flush_tlb_one(get_asid(), address & PAGE_MASK);
77 #endif
78 
79 	set_thread_fault_code(error_code);
80 	update_mmu_cache(NULL, address, pte);
81 
82 	return 0;
83 }
84