xref: /linux/arch/xtensa/mm/fault.c (revision 1b0975ee3bdd3eb19a47371c26fd7ef8f7f6b599)
1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2010 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
23 
24 void bad_page_fault(struct pt_regs*, unsigned long, int);
25 
26 static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
27 {
28 #ifdef CONFIG_MMU
29 	/* Synchronize this task's top level page-table
30 	 * with the 'reference' page table.
31 	 */
32 	struct mm_struct *act_mm = current->active_mm;
33 	int index = pgd_index(address);
34 	pgd_t *pgd, *pgd_k;
35 	p4d_t *p4d, *p4d_k;
36 	pud_t *pud, *pud_k;
37 	pmd_t *pmd, *pmd_k;
38 	pte_t *pte_k;
39 
40 	if (act_mm == NULL)
41 		goto bad_page_fault;
42 
43 	pgd = act_mm->pgd + index;
44 	pgd_k = init_mm.pgd + index;
45 
46 	if (!pgd_present(*pgd_k))
47 		goto bad_page_fault;
48 
49 	pgd_val(*pgd) = pgd_val(*pgd_k);
50 
51 	p4d = p4d_offset(pgd, address);
52 	p4d_k = p4d_offset(pgd_k, address);
53 	if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
54 		goto bad_page_fault;
55 
56 	pud = pud_offset(p4d, address);
57 	pud_k = pud_offset(p4d_k, address);
58 	if (!pud_present(*pud) || !pud_present(*pud_k))
59 		goto bad_page_fault;
60 
61 	pmd = pmd_offset(pud, address);
62 	pmd_k = pmd_offset(pud_k, address);
63 	if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
64 		goto bad_page_fault;
65 
66 	pmd_val(*pmd) = pmd_val(*pmd_k);
67 	pte_k = pte_offset_kernel(pmd_k, address);
68 
69 	if (!pte_present(*pte_k))
70 		goto bad_page_fault;
71 	return;
72 
73 bad_page_fault:
74 	bad_page_fault(regs, address, SIGKILL);
75 #else
76 	WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
77 #endif
78 }
79 /*
80  * This routine handles page faults.  It determines the address,
81  * and the problem, and then passes it off to one of the appropriate
82  * routines.
83  *
84  * Note: does not handle Miss and MultiHit.
85  */
86 
87 void do_page_fault(struct pt_regs *regs)
88 {
89 	struct vm_area_struct * vma;
90 	struct mm_struct *mm = current->mm;
91 	unsigned int exccause = regs->exccause;
92 	unsigned int address = regs->excvaddr;
93 	int code;
94 
95 	int is_write, is_exec;
96 	vm_fault_t fault;
97 	unsigned int flags = FAULT_FLAG_DEFAULT;
98 
99 	code = SEGV_MAPERR;
100 
101 	/* We fault-in kernel-space virtual memory on-demand. The
102 	 * 'reference' page table is init_mm.pgd.
103 	 */
104 	if (address >= TASK_SIZE && !user_mode(regs)) {
105 		vmalloc_fault(regs, address);
106 		return;
107 	}
108 
109 	/* If we're in an interrupt or have no user
110 	 * context, we must not take the fault..
111 	 */
112 	if (faulthandler_disabled() || !mm) {
113 		bad_page_fault(regs, address, SIGSEGV);
114 		return;
115 	}
116 
117 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
118 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
119 		    exccause == EXCCAUSE_ITLB_MISS ||
120 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
121 
122 	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
123 		 current->comm, current->pid,
124 		 address, exccause, regs->pc,
125 		 is_write ? "w" : "", is_exec ? "x" : "");
126 
127 	if (user_mode(regs))
128 		flags |= FAULT_FLAG_USER;
129 
130 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
131 
132 retry:
133 	vma = lock_mm_and_find_vma(mm, address, regs);
134 	if (!vma)
135 		goto bad_area_nosemaphore;
136 
137 	/* Ok, we have a good vm_area for this memory access, so
138 	 * we can handle it..
139 	 */
140 
141 	code = SEGV_ACCERR;
142 
143 	if (is_write) {
144 		if (!(vma->vm_flags & VM_WRITE))
145 			goto bad_area;
146 		flags |= FAULT_FLAG_WRITE;
147 	} else if (is_exec) {
148 		if (!(vma->vm_flags & VM_EXEC))
149 			goto bad_area;
150 	} else	/* Allow read even from write-only pages. */
151 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
152 			goto bad_area;
153 
154 	/* If for any reason at all we couldn't handle the fault,
155 	 * make sure we exit gracefully rather than endlessly redo
156 	 * the fault.
157 	 */
158 	fault = handle_mm_fault(vma, address, flags, regs);
159 
160 	if (fault_signal_pending(fault, regs)) {
161 		if (!user_mode(regs))
162 			bad_page_fault(regs, address, SIGKILL);
163 		return;
164 	}
165 
166 	/* The fault is fully completed (including releasing mmap lock) */
167 	if (fault & VM_FAULT_COMPLETED)
168 		return;
169 
170 	if (unlikely(fault & VM_FAULT_ERROR)) {
171 		if (fault & VM_FAULT_OOM)
172 			goto out_of_memory;
173 		else if (fault & VM_FAULT_SIGSEGV)
174 			goto bad_area;
175 		else if (fault & VM_FAULT_SIGBUS)
176 			goto do_sigbus;
177 		BUG();
178 	}
179 
180 	if (fault & VM_FAULT_RETRY) {
181 		flags |= FAULT_FLAG_TRIED;
182 
183 		/* No need to mmap_read_unlock(mm) as we would
184 		 * have already released it in __lock_page_or_retry
185 		 * in mm/filemap.c.
186 		 */
187 
188 		goto retry;
189 	}
190 
191 	mmap_read_unlock(mm);
192 	return;
193 
194 	/* Something tried to access memory that isn't in our memory map..
195 	 * Fix it, but check if it's kernel or user first..
196 	 */
197 bad_area:
198 	mmap_read_unlock(mm);
199 bad_area_nosemaphore:
200 	if (user_mode(regs)) {
201 		force_sig_fault(SIGSEGV, code, (void *) address);
202 		return;
203 	}
204 	bad_page_fault(regs, address, SIGSEGV);
205 	return;
206 
207 
208 	/* We ran out of memory, or some other thing happened to us that made
209 	 * us unable to handle the page fault gracefully.
210 	 */
211 out_of_memory:
212 	mmap_read_unlock(mm);
213 	if (!user_mode(regs))
214 		bad_page_fault(regs, address, SIGKILL);
215 	else
216 		pagefault_out_of_memory();
217 	return;
218 
219 do_sigbus:
220 	mmap_read_unlock(mm);
221 
222 	/* Send a sigbus, regardless of whether we were in kernel
223 	 * or user mode.
224 	 */
225 	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
226 
227 	/* Kernel mode? Handle exceptions or die */
228 	if (!user_mode(regs))
229 		bad_page_fault(regs, address, SIGBUS);
230 	return;
231 }
232 
233 
234 void
235 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
236 {
237 	extern void __noreturn die(const char*, struct pt_regs*, long);
238 	const struct exception_table_entry *entry;
239 
240 	/* Are we prepared to handle this kernel fault?  */
241 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
242 		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
243 			 current->comm, regs->pc, entry->fixup);
244 		regs->pc = entry->fixup;
245 		return;
246 	}
247 
248 	/* Oops. The kernel tried to access some bad page. We'll have to
249 	 * terminate things with extreme prejudice.
250 	 */
251 	pr_alert("Unable to handle kernel paging request at virtual "
252 		 "address %08lx\n pc = %08lx, ra = %08lx\n",
253 		 address, regs->pc, regs->areg[0]);
254 	die("Oops", regs, sig);
255 }
256