xref: /linux/arch/sh/mm/tlbex_32.c (revision cc04a46f11ea046ed53e2c832ae29e4790f7e35f)
1 /*
2  * TLB miss handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/kprobes.h>
14 #include <linux/kdebug.h>
15 #include <asm/mmu_context.h>
16 #include <asm/thread_info.h>
17 
18 /*
19  * Called with interrupts disabled.
20  */
21 asmlinkage int __kprobes
22 handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
23 	       unsigned long address)
24 {
25 	pgd_t *pgd;
26 	pud_t *pud;
27 	pmd_t *pmd;
28 	pte_t *pte;
29 	pte_t entry;
30 
31 	/*
32 	 * We don't take page faults for P1, P2, and parts of P4, these
33 	 * are always mapped, whether it be due to legacy behaviour in
34 	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
35 	 */
36 	if (address >= P3SEG && address < P3_ADDR_MAX) {
37 		pgd = pgd_offset_k(address);
38 	} else {
39 		if (unlikely(address >= TASK_SIZE || !current->mm))
40 			return 1;
41 
42 		pgd = pgd_offset(current->mm, address);
43 	}
44 
45 	pud = pud_offset(pgd, address);
46 	if (pud_none_or_clear_bad(pud))
47 		return 1;
48 	pmd = pmd_offset(pud, address);
49 	if (pmd_none_or_clear_bad(pmd))
50 		return 1;
51 	pte = pte_offset_kernel(pmd, address);
52 	entry = *pte;
53 	if (unlikely(pte_none(entry) || pte_not_present(entry)))
54 		return 1;
55 	if (unlikely(error_code && !pte_write(entry)))
56 		return 1;
57 
58 	if (error_code)
59 		entry = pte_mkdirty(entry);
60 	entry = pte_mkyoung(entry);
61 
62 	set_pte(pte, entry);
63 
64 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
65 	/*
66 	 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
67 	 * the case of an initial page write exception, so we need to
68 	 * flush it in order to avoid potential TLB entry duplication.
69 	 */
70 	if (error_code == FAULT_CODE_INITIAL)
71 		local_flush_tlb_one(get_asid(), address & PAGE_MASK);
72 #endif
73 
74 	set_thread_fault_code(error_code);
75 	update_mmu_cache(NULL, address, pte);
76 
77 	return 0;
78 }
79