xref: /linux/arch/xtensa/mm/fault.c (revision 48dea9a700c8728cc31a1dd44588b97578de86ee)
1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2010 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
23 
24 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
25 void bad_page_fault(struct pt_regs*, unsigned long, int);
26 
27 /*
28  * This routine handles page faults.  It determines the address,
29  * and the problem, and then passes it off to one of the appropriate
30  * routines.
31  *
32  * Note: does not handle Miss and MultiHit.
33  */
34 
35 void do_page_fault(struct pt_regs *regs)
36 {
37 	struct vm_area_struct * vma;
38 	struct mm_struct *mm = current->mm;
39 	unsigned int exccause = regs->exccause;
40 	unsigned int address = regs->excvaddr;
41 	int code;
42 
43 	int is_write, is_exec;
44 	vm_fault_t fault;
45 	unsigned int flags = FAULT_FLAG_DEFAULT;
46 
47 	code = SEGV_MAPERR;
48 
49 	/* We fault-in kernel-space virtual memory on-demand. The
50 	 * 'reference' page table is init_mm.pgd.
51 	 */
52 	if (address >= TASK_SIZE && !user_mode(regs))
53 		goto vmalloc_fault;
54 
55 	/* If we're in an interrupt or have no user
56 	 * context, we must not take the fault..
57 	 */
58 	if (faulthandler_disabled() || !mm) {
59 		bad_page_fault(regs, address, SIGSEGV);
60 		return;
61 	}
62 
63 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
64 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
65 		    exccause == EXCCAUSE_ITLB_MISS ||
66 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
67 
68 	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
69 		 current->comm, current->pid,
70 		 address, exccause, regs->pc,
71 		 is_write ? "w" : "", is_exec ? "x" : "");
72 
73 	if (user_mode(regs))
74 		flags |= FAULT_FLAG_USER;
75 
76 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
77 
78 retry:
79 	mmap_read_lock(mm);
80 	vma = find_vma(mm, address);
81 
82 	if (!vma)
83 		goto bad_area;
84 	if (vma->vm_start <= address)
85 		goto good_area;
86 	if (!(vma->vm_flags & VM_GROWSDOWN))
87 		goto bad_area;
88 	if (expand_stack(vma, address))
89 		goto bad_area;
90 
91 	/* Ok, we have a good vm_area for this memory access, so
92 	 * we can handle it..
93 	 */
94 
95 good_area:
96 	code = SEGV_ACCERR;
97 
98 	if (is_write) {
99 		if (!(vma->vm_flags & VM_WRITE))
100 			goto bad_area;
101 		flags |= FAULT_FLAG_WRITE;
102 	} else if (is_exec) {
103 		if (!(vma->vm_flags & VM_EXEC))
104 			goto bad_area;
105 	} else	/* Allow read even from write-only pages. */
106 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
107 			goto bad_area;
108 
109 	/* If for any reason at all we couldn't handle the fault,
110 	 * make sure we exit gracefully rather than endlessly redo
111 	 * the fault.
112 	 */
113 	fault = handle_mm_fault(vma, address, flags, regs);
114 
115 	if (fault_signal_pending(fault, regs))
116 		return;
117 
118 	if (unlikely(fault & VM_FAULT_ERROR)) {
119 		if (fault & VM_FAULT_OOM)
120 			goto out_of_memory;
121 		else if (fault & VM_FAULT_SIGSEGV)
122 			goto bad_area;
123 		else if (fault & VM_FAULT_SIGBUS)
124 			goto do_sigbus;
125 		BUG();
126 	}
127 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
128 		if (fault & VM_FAULT_RETRY) {
129 			flags |= FAULT_FLAG_TRIED;
130 
131 			 /* No need to mmap_read_unlock(mm) as we would
132 			 * have already released it in __lock_page_or_retry
133 			 * in mm/filemap.c.
134 			 */
135 
136 			goto retry;
137 		}
138 	}
139 
140 	mmap_read_unlock(mm);
141 	return;
142 
143 	/* Something tried to access memory that isn't in our memory map..
144 	 * Fix it, but check if it's kernel or user first..
145 	 */
146 bad_area:
147 	mmap_read_unlock(mm);
148 	if (user_mode(regs)) {
149 		current->thread.bad_vaddr = address;
150 		current->thread.error_code = is_write;
151 		force_sig_fault(SIGSEGV, code, (void *) address);
152 		return;
153 	}
154 	bad_page_fault(regs, address, SIGSEGV);
155 	return;
156 
157 
158 	/* We ran out of memory, or some other thing happened to us that made
159 	 * us unable to handle the page fault gracefully.
160 	 */
161 out_of_memory:
162 	mmap_read_unlock(mm);
163 	if (!user_mode(regs))
164 		bad_page_fault(regs, address, SIGKILL);
165 	else
166 		pagefault_out_of_memory();
167 	return;
168 
169 do_sigbus:
170 	mmap_read_unlock(mm);
171 
172 	/* Send a sigbus, regardless of whether we were in kernel
173 	 * or user mode.
174 	 */
175 	current->thread.bad_vaddr = address;
176 	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
177 
178 	/* Kernel mode? Handle exceptions or die */
179 	if (!user_mode(regs))
180 		bad_page_fault(regs, address, SIGBUS);
181 	return;
182 
183 vmalloc_fault:
184 	{
185 		/* Synchronize this task's top level page-table
186 		 * with the 'reference' page table.
187 		 */
188 		struct mm_struct *act_mm = current->active_mm;
189 		int index = pgd_index(address);
190 		pgd_t *pgd, *pgd_k;
191 		p4d_t *p4d, *p4d_k;
192 		pud_t *pud, *pud_k;
193 		pmd_t *pmd, *pmd_k;
194 		pte_t *pte_k;
195 
196 		if (act_mm == NULL)
197 			goto bad_page_fault;
198 
199 		pgd = act_mm->pgd + index;
200 		pgd_k = init_mm.pgd + index;
201 
202 		if (!pgd_present(*pgd_k))
203 			goto bad_page_fault;
204 
205 		pgd_val(*pgd) = pgd_val(*pgd_k);
206 
207 		p4d = p4d_offset(pgd, address);
208 		p4d_k = p4d_offset(pgd_k, address);
209 		if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
210 			goto bad_page_fault;
211 
212 		pud = pud_offset(p4d, address);
213 		pud_k = pud_offset(p4d_k, address);
214 		if (!pud_present(*pud) || !pud_present(*pud_k))
215 			goto bad_page_fault;
216 
217 		pmd = pmd_offset(pud, address);
218 		pmd_k = pmd_offset(pud_k, address);
219 		if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
220 			goto bad_page_fault;
221 
222 		pmd_val(*pmd) = pmd_val(*pmd_k);
223 		pte_k = pte_offset_kernel(pmd_k, address);
224 
225 		if (!pte_present(*pte_k))
226 			goto bad_page_fault;
227 		return;
228 	}
229 bad_page_fault:
230 	bad_page_fault(regs, address, SIGKILL);
231 	return;
232 }
233 
234 
235 void
236 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
237 {
238 	extern void die(const char*, struct pt_regs*, long);
239 	const struct exception_table_entry *entry;
240 
241 	/* Are we prepared to handle this kernel fault?  */
242 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
243 		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
244 			 current->comm, regs->pc, entry->fixup);
245 		current->thread.bad_uaddr = address;
246 		regs->pc = entry->fixup;
247 		return;
248 	}
249 
250 	/* Oops. The kernel tried to access some bad page. We'll have to
251 	 * terminate things with extreme prejudice.
252 	 */
253 	pr_alert("Unable to handle kernel paging request at virtual "
254 		 "address %08lx\n pc = %08lx, ra = %08lx\n",
255 		 address, regs->pc, regs->areg[0]);
256 	die("Oops", regs, sig);
257 	do_exit(sig);
258 }
259